]> git.sesse.net Git - bcachefs-tools-debian/commitdiff
New upstream release
authorJonathan Carter <jcc@debian.org>
Tue, 29 Nov 2022 07:43:56 +0000 (09:43 +0200)
committerJonathan Carter <jcc@debian.org>
Tue, 29 Nov 2022 07:43:56 +0000 (09:43 +0200)
97 files changed:
Makefile
cmd_kill_btree_node.c [new file with mode: 0644]
cmd_list_journal.c
cmd_migrate.c
debian/.gitignore [new file with mode: 0644]
debian/changelog
debian/files
include/linux/atomic.h
include/linux/kernel.h
include/linux/mean_and_variance.h
include/linux/rcupdate.h
include/linux/wait.h
include/trace/events/bcachefs.h
libbcachefs/alloc_background.c
libbcachefs/alloc_background.h
libbcachefs/alloc_foreground.c
libbcachefs/alloc_foreground.h
libbcachefs/alloc_types.h
libbcachefs/backpointers.c
libbcachefs/backpointers.h
libbcachefs/bcachefs.h
libbcachefs/bcachefs_format.h
libbcachefs/bcachefs_ioctl.h
libbcachefs/bkey.c
libbcachefs/bkey_methods.c
libbcachefs/bkey_methods.h
libbcachefs/bkey_sort.c
libbcachefs/bset.h
libbcachefs/btree_cache.c
libbcachefs/btree_gc.c
libbcachefs/btree_io.c
libbcachefs/btree_io.h
libbcachefs/btree_iter.c
libbcachefs/btree_iter.h
libbcachefs/btree_key_cache.c
libbcachefs/btree_key_cache.h
libbcachefs/btree_locking.c
libbcachefs/btree_locking.h
libbcachefs/btree_types.h
libbcachefs/btree_update_interior.c
libbcachefs/btree_update_interior.h
libbcachefs/btree_update_leaf.c
libbcachefs/buckets.c
libbcachefs/buckets.h
libbcachefs/buckets_waiting_for_journal.c
libbcachefs/checksum.c
libbcachefs/checksum.h
libbcachefs/data_update.c
libbcachefs/data_update.h
libbcachefs/dirent.h
libbcachefs/ec.h
libbcachefs/errcode.c
libbcachefs/errcode.h
libbcachefs/error.c
libbcachefs/error.h
libbcachefs/extents.c
libbcachefs/extents.h
libbcachefs/fifo.h
libbcachefs/fs-io.c
libbcachefs/fs.c
libbcachefs/fs.h
libbcachefs/fsck.c
libbcachefs/inode.c
libbcachefs/inode.h
libbcachefs/io.c
libbcachefs/io.h
libbcachefs/io_types.h
libbcachefs/journal.h
libbcachefs/journal_io.c
libbcachefs/keylist.h
libbcachefs/lru.h
libbcachefs/move.c
libbcachefs/move.h
libbcachefs/movinggc.c
libbcachefs/nocow_locking.c [new file with mode: 0644]
libbcachefs/nocow_locking.h [new file with mode: 0644]
libbcachefs/opts.h
libbcachefs/quota.c
libbcachefs/quota.h
libbcachefs/rebalance.c
libbcachefs/recovery.c
libbcachefs/reflink.c
libbcachefs/reflink.h
libbcachefs/replicas_types.h
libbcachefs/subvolume.c
libbcachefs/subvolume.h
libbcachefs/super.h
libbcachefs/sysfs.c
libbcachefs/tests.c
libbcachefs/two_state_shared_lock.c [new file with mode: 0644]
libbcachefs/two_state_shared_lock.h [new file with mode: 0644]
libbcachefs/util.h
libbcachefs/xattr.h
linux/mean_and_variance.c
linux/printbuf_userspace.c
linux/six.c
linux/wait.c

index d460a6d3d0b3e6ab742ec670f950b65c2fb403ad..49f06cf6b4702df30f2f36359b9f524e74e7b772 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,20 @@ PREFIX?=/usr/local
 PKG_CONFIG?=pkg-config
 INSTALL=install
 
-CFLAGS+=-std=gnu89 -O2 -g -MMD -Wall -fPIC                     \
+ifeq ("$(origin V)", "command line")
+  BUILD_VERBOSE = $(V)
+endif
+ifndef BUILD_VERBOSE
+  BUILD_VERBOSE = 0
+endif
+
+ifeq ($(BUILD_VERBOSE),1)
+  Q =
+else
+  Q = @
+endif
+
+CFLAGS+=-std=gnu11 -O2 -g -MMD -Wall -fPIC                     \
        -Wno-pointer-sign                                       \
        -fno-strict-aliasing                                    \
        -fno-delete-null-pointer-checks                         \
@@ -24,7 +37,7 @@ LDFLAGS+=$(CFLAGS) $(EXTRA_LDFLAGS)
 PYTEST_ARGS?=
 PYTEST_CMD?=$(shell \
        command -v pytest-3 \
-       || which pytest-3 \
+       || which pytest-3 2>/dev/null \
 )
 PYTEST:=$(PYTEST_CMD) $(PYTEST_ARGS)
 
@@ -104,7 +117,14 @@ DEPS=$(SRCS:.c=.d)
 -include $(DEPS)
 
 OBJS=$(SRCS:.c=.o)
+
+%.o: %.c
+       @echo "    [CC]     $@"
+       $(Q)$(CC) $(CPPFLAGS) $(CFLAGS) -c -o $@ $<
+
 bcachefs: $(filter-out ./tests/%.o, $(OBJS))
+       @echo "    [LD]     $@"
+       $(Q)$(CC) $(LDFLAGS) $+ $(LOADLIBES) $(LDLIBS) -o $@
 
 RUST_SRCS=$(shell find rust-src/ -type f -iname '*.rs')
 MOUNT_SRCS=$(filter %mount, $(RUST_SRCS))
@@ -115,7 +135,8 @@ debug: bcachefs
 MOUNT_OBJ=$(filter-out ./bcachefs.o ./tests/%.o ./cmd_%.o , $(OBJS))
 libbcachefs.so: LDFLAGS+=-shared
 libbcachefs.so: $(MOUNT_OBJ)
-       $(CC) $(LDFLAGS) $+ -o $@ $(LDLIBS)
+       @echo "    [CC]     $@"
+       $(Q)$(CC) $(LDFLAGS) $+ -o $@ $(LDLIBS)
 
 MOUNT_TOML=rust-src/mount/Cargo.toml
 mount.bcachefs: lib $(MOUNT_SRCS)
@@ -127,13 +148,16 @@ mount.bcachefs: lib $(MOUNT_SRCS)
 
 
 tests/test_helper: $(filter ./tests/%.o, $(OBJS))
+       @echo "    [LD]     $@"
+       $(Q)$(CC) $(LDFLAGS) $+ $(LOADLIBES) $(LDLIBS) -o $@
 
 # If the version string differs from the last build, update the last version
 ifneq ($(VERSION),$(shell cat .version 2>/dev/null))
 .PHONY: .version
 endif
 .version:
-       echo '$(VERSION)' > $@
+       @echo "  [VERS]    $@"
+       $(Q)echo '$(VERSION)' > $@
 
 # Rebuild the 'version' command any time the version string changes
 cmd_version.o : .version
@@ -156,8 +180,9 @@ install: bcachefs lib
 
 .PHONY: clean
 clean:
-       $(RM) bcachefs mount.bcachefs libbcachefs_mount.a tests/test_helper .version $(OBJS) $(DEPS) $(DOCGENERATED)
-       $(RM) -rf rust-src/*/target
+       @echo "Cleaning all"
+       $(Q)$(RM) bcachefs mount.bcachefs libbcachefs_mount.a tests/test_helper .version *.tar.xz $(OBJS) $(DEPS) $(DOCGENERATED)
+       $(Q)$(RM) -rf rust-src/*/target
 
 .PHONY: deb
 deb: all
@@ -215,3 +240,20 @@ update-bcachefs-sources:
 .PHONY: update-commit-bcachefs-sources
 update-commit-bcachefs-sources: update-bcachefs-sources
        git commit -m "Update bcachefs sources to $(shell git -C $(LINUX_DIR) show --oneline --no-patch)"
+
+SRCTARXZ = bcachefs-tools-$(VERSION).tar.xz
+SRCDIR=bcachefs-tools-$(VERSION)
+
+.PHONY: tarball
+tarball: $(SRCTARXZ)
+
+$(SRCTARXZ) : .gitcensus
+       $(Q)tar --transform "s,^,$(SRCDIR)/," -Jcf $(SRCDIR).tar.xz  \
+           `cat .gitcensus` 
+       @echo Wrote: $@
+
+.PHONY: .gitcensus
+.gitcensus:
+       $(Q)if test -d .git; then \
+         git ls-files > .gitcensus; \
+       fi
diff --git a/cmd_kill_btree_node.c b/cmd_kill_btree_node.c
new file mode 100644 (file)
index 0000000..a0e0fc9
--- /dev/null
@@ -0,0 +1,119 @@
+#include <fcntl.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include "cmds.h"
+#include "libbcachefs.h"
+#include "tools-util.h"
+
+#include "libbcachefs/bcachefs.h"
+#include "libbcachefs/btree_iter.h"
+#include "libbcachefs/error.h"
+#include "libbcachefs/super.h"
+
+static void kill_btree_node_usage(void)
+{
+       puts("bcachefs kill_btree_node - make btree nodes unreadable\n"
+            "Usage: bcachefs kill_btree_node [OPTION]... <devices>\n"
+            "\n"
+            "Options:\n"
+            "  -b (extents|inodes|dirents|xattrs)    Btree to delete from\n"
+            "  -l level                              Levle to delete from (0 == leaves)\n"
+            "  -i index                              Index of btree node to kill\n"
+            "  -h                                    Display this help and exit\n"
+            "Report bugs to <linux-bcachefs@vger.kernel.org>");
+}
+
+int cmd_kill_btree_node(int argc, char *argv[])
+{
+       struct bch_opts opts = bch2_opts_empty();
+       enum btree_id btree_id = 0;
+       unsigned level = 0;
+       u64 node_index = 0;
+       int opt;
+
+       opt_set(opts, read_only,        true);
+
+       while ((opt = getopt(argc, argv, "b:l:i:h")) != -1)
+               switch (opt) {
+               case 'b':
+                       btree_id = read_string_list_or_die(optarg,
+                                               bch2_btree_ids, "btree id");
+                       break;
+               case 'l':
+                       if (kstrtouint(optarg, 10, &level) || level >= BTREE_MAX_DEPTH)
+                               die("invalid level");
+                       break;
+               case 'i':
+                       if (kstrtoull(optarg, 10, &node_index))
+                               die("invalid index %s", optarg);
+                       break;
+               case 'h':
+                       kill_btree_node_usage();
+                       exit(EXIT_SUCCESS);
+               }
+       args_shift(optind);
+
+       if (!argc)
+               die("Please supply device(s)");
+
+       struct bch_fs *c = bch2_fs_open(argv, argc, opts);
+       if (IS_ERR(c))
+               die("error opening %s: %s", argv[0], strerror(-PTR_ERR(c)));
+
+       struct btree_trans trans;
+       struct btree_iter iter;
+       struct btree *b;
+       int ret;
+       void *zeroes;
+
+       ret = posix_memalign(&zeroes, c->opts.block_size, c->opts.block_size);
+       if (ret)
+               die("error %s from posix_memalign", strerror(ret));
+
+       bch2_trans_init(&trans, c, 0, 0);
+
+       __for_each_btree_node(&trans, iter, btree_id, POS_MIN, 0, level, 0, b, ret) {
+               if (b->c.level != level)
+                       continue;
+
+               if (!node_index) {
+                       struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(&b->key));
+                       const struct bch_extent_ptr *ptr;
+
+                       struct printbuf buf = PRINTBUF;
+
+                       bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+                       bch_info(c, "killing btree node %s", buf.buf);
+                       printbuf_exit(&buf);
+
+                       bkey_for_each_ptr(ptrs, ptr) {
+                               struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+
+                               ret = pwrite(ca->disk_sb.bdev->bd_fd, zeroes,
+                                            c->opts.block_size, ptr->offset << 9);
+                               if (ret != c->opts.block_size) {
+                                       bch_err(c, "pwrite error: expected %u got %i %s",
+                                               c->opts.block_size, ret, strerror(errno));
+                                       ret = EXIT_FAILURE;
+                                       goto done;
+                               }
+                       }
+                       goto done;
+               }
+
+               node_index--;
+       }
+       if (ret)
+               bch_err(c, "error %i walking btree nodes", ret);
+       else
+               bch_err(c, "node at specified index not found");
+       ret = EXIT_FAILURE;
+done:
+       bch2_trans_iter_exit(&trans, &iter);
+       bch2_trans_exit(&trans);
+
+       bch2_fs_stop(c);
+       return ret;
+}
index 869d3341dba1f5db32a425e99a2a0246da2f3da9..0836ebfc3989236a5677a0bc3680157451cb09f6 100644 (file)
@@ -5,7 +5,6 @@
 
 #include "cmds.h"
 #include "libbcachefs.h"
-#include "qcow2.h"
 #include "tools-util.h"
 
 #include "libbcachefs/bcachefs.h"
@@ -138,109 +137,3 @@ int cmd_list_journal(int argc, char *argv[])
        bch2_fs_stop(c);
        return 0;
 }
-
-static void kill_btree_node_usage(void)
-{
-       puts("bcachefs kill_btree_node - make btree nodes unreadable\n"
-            "Usage: bcachefs kill_btree_node [OPTION]... <devices>\n"
-            "\n"
-            "Options:\n"
-            "  -b (extents|inodes|dirents|xattrs)    Btree to delete from\n"
-            "  -l level                              Levle to delete from (0 == leaves)\n"
-            "  -i index                              Index of btree node to kill\n"
-            "  -h                                    Display this help and exit\n"
-            "Report bugs to <linux-bcachefs@vger.kernel.org>");
-}
-
-int cmd_kill_btree_node(int argc, char *argv[])
-{
-       struct bch_opts opts = bch2_opts_empty();
-       enum btree_id btree_id = 0;
-       unsigned level = 0;
-       u64 node_index = 0;
-       int opt;
-
-       opt_set(opts, read_only,        true);
-
-       while ((opt = getopt(argc, argv, "b:l:i:h")) != -1)
-               switch (opt) {
-               case 'b':
-                       btree_id = read_string_list_or_die(optarg,
-                                               bch2_btree_ids, "btree id");
-                       break;
-               case 'l':
-                       if (kstrtouint(optarg, 10, &level) || level >= BTREE_MAX_DEPTH)
-                               die("invalid level");
-                       break;
-               case 'i':
-                       if (kstrtoull(optarg, 10, &node_index))
-                               die("invalid index %s", optarg);
-                       break;
-               case 'h':
-                       kill_btree_node_usage();
-                       exit(EXIT_SUCCESS);
-               }
-       args_shift(optind);
-
-       if (!argc)
-               die("Please supply device(s)");
-
-       struct bch_fs *c = bch2_fs_open(argv, argc, opts);
-       if (IS_ERR(c))
-               die("error opening %s: %s", argv[0], strerror(-PTR_ERR(c)));
-
-       struct btree_trans trans;
-       struct btree_iter iter;
-       struct btree *b;
-       int ret;
-       void *zeroes;
-
-       ret = posix_memalign(&zeroes, c->opts.block_size, c->opts.block_size);
-       if (ret)
-               die("error %s from posix_memalign", strerror(ret));
-
-       bch2_trans_init(&trans, c, 0, 0);
-
-       __for_each_btree_node(&trans, iter, btree_id, POS_MIN, 0, level, 0, b, ret) {
-               if (b->c.level != level)
-                       continue;
-
-               if (!node_index) {
-                       struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(&b->key));
-                       const struct bch_extent_ptr *ptr;
-
-                       struct printbuf buf = PRINTBUF;
-
-                       bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
-                       bch_info(c, "killing btree node %s", buf.buf);
-                       printbuf_exit(&buf);
-
-                       bkey_for_each_ptr(ptrs, ptr) {
-                               struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
-
-                               ret = pwrite(ca->disk_sb.bdev->bd_fd, zeroes,
-                                            c->opts.block_size, ptr->offset << 9);
-                               if (ret != c->opts.block_size) {
-                                       bch_err(c, "pwrite error: expected %u got %i %s",
-                                               c->opts.block_size, ret, strerror(errno));
-                                       ret = EXIT_FAILURE;
-                                       goto done;
-                               }
-                       }
-                       goto done;
-               }
-
-               node_index--;
-       }
-       if (ret)
-               bch_err(c, "error %i walking btree nodes", ret);
-       else
-               bch_err(c, "node at specified index not found");
-       ret = EXIT_FAILURE;
-done:
-       bch2_trans_iter_exit(&trans, &iter);
-       bch2_trans_exit(&trans);
-
-       bch2_fs_stop(c);
-       return ret;
-}
index 3ba51c0c2ebd5d280adc7062c9d96e28473157e1..707f13e1ca1248d59b32c15cfda06a86cc155847 100644 (file)
@@ -265,6 +265,7 @@ static void write_data(struct bch_fs *c,
        op.nr_replicas  = 1;
        op.subvol       = 1;
        op.pos          = SPOS(dst_inode->bi_inum, dst_offset >> 9, U32_MAX);
+       op.flags |= BCH_WRITE_SYNC;
 
        int ret = bch2_disk_reservation_get(c, &op.res, len >> 9,
                                            c->opts.data_replicas, 0);
@@ -272,7 +273,6 @@ static void write_data(struct bch_fs *c,
                die("error reserving space in new filesystem: %s", strerror(-ret));
 
        closure_call(&op.cl, bch2_write, NULL, &cl);
-       closure_sync(&cl);
 
        dst_inode->bi_sectors += len >> 9;
 }
diff --git a/debian/.gitignore b/debian/.gitignore
new file mode 100644 (file)
index 0000000..04bfc77
--- /dev/null
@@ -0,0 +1,3 @@
+bcachefs-tools*
+debhelper-build-stamp
+files
index a5c17b2a2b4171add4de1da48aa64bb9049532df..5d7601e022ae3d77f80449e9e172f805de773df5 100644 (file)
@@ -1,3 +1,9 @@
+bcachefs-tools (24-1) unstable; urgency=medium
+
+  * New upstream release 
+
+ -- Jonathan Carter <jcc@debian.org>  Tue, 29 Nov 2022 09:40:27 +0200
+
 bcachefs-tools (23-1) unstable; urgency=medium
 
   * New upstream release
index 1af54c83d794911c5606292af19d114fe87f87a9..a5d4aa23f8d2ceb1b4fdc5ec1992b43c5bacf6ea 100644 (file)
@@ -1 +1 @@
-bcachefs-tools_23-1_source.buildinfo utils optional
+bcachefs-tools_24-1_source.buildinfo utils optional
index 38a364c07c1b619668f7823fa9d546944022369e..a9852fa1f99a3370f0baa1c489dada515e1402a2 100644 (file)
@@ -54,6 +54,8 @@ typedef struct {
 #define __ATOMIC_ADD_RETURN_RELEASE(v, p)                              \
                                        __atomic_add_fetch(p, v, __ATOMIC_RELEASE)
 #define __ATOMIC_SUB_RETURN(v, p)      __atomic_sub_fetch(p, v, __ATOMIC_RELAXED)
+#define __ATOMIC_SUB_RETURN_RELEASE(v, p)                              \
+                                       __atomic_sub_fetch(p, v, __ATOMIC_RELEASE)
 
 #define xchg(p, v)                     __atomic_exchange_n(p, v, __ATOMIC_SEQ_CST)
 #define xchg_acquire(p, v)             __atomic_exchange_n(p, v, __ATOMIC_ACQUIRE)
@@ -123,6 +125,11 @@ do {                                                                       \
        ({ smp_mb__before_atomic(); __ATOMIC_ADD_RETURN(i, v); })
 #endif
 
+#ifndef __ATOMIC_SUB_RETURN_RELEASE
+#define __ATOMIC_SUB_RETURN_RELEASE(i, v)                              \
+       ({ smp_mb__before_atomic(); __ATOMIC_SUB_RETURN(i, v); })
+#endif
+
 #ifndef __ATOMIC_SUB
 #define __ATOMIC_SUB(i, v) __ATOMIC_SUB_RETURN(i, v)
 #endif
@@ -164,6 +171,11 @@ static inline i_type a_type##_add_return_release(i_type i, a_type##_t *v)\
        return __ATOMIC_ADD_RETURN_RELEASE(i, &v->counter);             \
 }                                                                      \
                                                                        \
+static inline i_type a_type##_sub_return_release(i_type i, a_type##_t *v)\
+{                                                                      \
+       return __ATOMIC_SUB_RETURN_RELEASE(i, &v->counter);             \
+}                                                                      \
+                                                                       \
 static inline i_type a_type##_sub_return(i_type i, a_type##_t *v)      \
 {                                                                      \
        return __ATOMIC_SUB_RETURN(i, &v->counter);                     \
index d31b5f5622c255923635086b477e7c2375ce0927..b2c1751c56e2fcdffb87d7ef34c49571a0f11e36 100644 (file)
@@ -229,6 +229,8 @@ static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *
 }
 
 struct printbuf;
+extern void prt_u64(struct printbuf *out, u64 num);
+
 extern __printf(2, 0) void prt_vprintf(struct printbuf *out, const char *fmt, va_list args);
 extern __printf(2, 3) void prt_printf(struct printbuf *out, const char *fmt, ...);
 
index 3d62abe75976f3780e86d0ba435c4ceb9adc071b..cbac6ac88117ee350fc1286eefdaf29b852b8036 100644 (file)
@@ -155,7 +155,7 @@ struct mean_and_variance_weighted {
        u64 variance;
 };
 
-inline s64 fast_divpow2(s64 n, u8 d);
+s64 fast_divpow2(s64 n, u8 d);
 
 struct mean_and_variance mean_and_variance_update(struct mean_and_variance s1, s64 v1);
        s64              mean_and_variance_get_mean(struct mean_and_variance s);
index ae292241c82c5f8ce361443d1f53a3e91e4737a0..3db40cb49cb39b41d5f6e3c60fd7fdcb428c53e5 100644 (file)
@@ -4,6 +4,8 @@
 #include <urcu.h>
 #include <linux/compiler.h>
 
+#define ULONG_CMP_GE(a, b)      (ULONG_MAX / 2 >= (a) - (b))
+
 #define rcu_dereference_check(p, c)    rcu_dereference(p)
 #define rcu_dereference_raw(p)         rcu_dereference(p)
 #define rcu_dereference_protected(p, c)        rcu_dereference(p)
index d1d33e67d9e812d73dc79ec96bcbb9e6e0d99f6f..d30fb10d63f5b38b6076015d9d97ab47755eb931 100644 (file)
@@ -24,6 +24,7 @@ typedef struct {
 } wait_queue_head_t;
 
 void wake_up(wait_queue_head_t *);
+void wake_up_all(wait_queue_head_t *);
 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
index d3d9e965e7020efb96b4eff9a03f89a19d3a88f6..47ba750d7937c84559e31585bb3a7391a37a6d0a 100644 (file)
@@ -344,25 +344,29 @@ DEFINE_EVENT(btree_node, btree_node_free,
 TRACE_EVENT(btree_reserve_get_fail,
        TP_PROTO(const char *trans_fn,
                 unsigned long caller_ip,
-                size_t required),
-       TP_ARGS(trans_fn, caller_ip, required),
+                size_t required,
+                int ret),
+       TP_ARGS(trans_fn, caller_ip, required, ret),
 
        TP_STRUCT__entry(
                __array(char,                   trans_fn, 32    )
                __field(unsigned long,          caller_ip       )
                __field(size_t,                 required        )
+               __array(char,                   ret, 32         )
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
                __entry->caller_ip      = caller_ip;
                __entry->required       = required;
+               strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
        ),
 
-       TP_printk("%s %pS required %zu",
+       TP_printk("%s %pS required %zu ret %s",
                  __entry->trans_fn,
                  (void *) __entry->caller_ip,
-                 __entry->required)
+                 __entry->required,
+                 __entry->ret)
 );
 
 DEFINE_EVENT(btree_node, btree_node_compact,
@@ -411,7 +415,7 @@ TRACE_EVENT(btree_path_relock_fail,
        TP_fast_assign(
                struct btree *b = btree_path_node(path, level);
 
-               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
                __entry->caller_ip              = caller_ip;
                __entry->btree_id               = path->btree_id;
                __entry->level                  = path->level;
@@ -462,7 +466,7 @@ TRACE_EVENT(btree_path_upgrade_fail,
        TP_fast_assign(
                struct six_lock_count c;
 
-               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
                __entry->caller_ip              = caller_ip;
                __entry->btree_id               = path->btree_id;
                __entry->level                  = level;
@@ -524,7 +528,7 @@ TRACE_EVENT(bucket_alloc,
 
        TP_fast_assign(
                __entry->dev            = ca->dev;
-               strlcpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
+               strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
                __entry->user           = user;
                __entry->bucket         = bucket;
        ),
@@ -542,14 +546,11 @@ TRACE_EVENT(bucket_alloc_fail,
                 u64 avail,
                 u64 copygc_wait_amount,
                 s64 copygc_waiting_for,
-                u64 seen,
-                u64 open,
-                u64 need_journal_commit,
-                u64 nouse,
+                struct bucket_alloc_state *s,
                 bool nonblocking,
                 const char *err),
        TP_ARGS(ca, alloc_reserve, free, avail, copygc_wait_amount, copygc_waiting_for,
-               seen, open, need_journal_commit, nouse, nonblocking, err),
+               s, nonblocking, err),
 
        TP_STRUCT__entry(
                __field(dev_t,                  dev                     )
@@ -568,17 +569,17 @@ TRACE_EVENT(bucket_alloc_fail,
 
        TP_fast_assign(
                __entry->dev            = ca->dev;
-               strlcpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
+               strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
                __entry->free           = free;
                __entry->avail          = avail;
                __entry->copygc_wait_amount     = copygc_wait_amount;
                __entry->copygc_waiting_for     = copygc_waiting_for;
-               __entry->seen           = seen;
-               __entry->open           = open;
-               __entry->need_journal_commit = need_journal_commit;
-               __entry->nouse          = nouse;
+               __entry->seen           = s->buckets_seen;
+               __entry->open           = s->skipped_open;
+               __entry->need_journal_commit = s->skipped_need_journal_commit;
+               __entry->nouse          = s->skipped_nouse;
                __entry->nonblocking    = nonblocking;
-               strlcpy(__entry->err, err, sizeof(__entry->err));
+               strscpy(__entry->err, err, sizeof(__entry->err));
        ),
 
        TP_printk("%d,%d reserve %s free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nonblocking %u err %s",
@@ -616,7 +617,7 @@ TRACE_EVENT(discard_buckets,
                __entry->open                   = open;
                __entry->need_journal_commit    = need_journal_commit;
                __entry->discarded              = discarded;
-               strlcpy(__entry->err, err, sizeof(__entry->err));
+               strscpy(__entry->err, err, sizeof(__entry->err));
        ),
 
        TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
@@ -766,7 +767,7 @@ DECLARE_EVENT_CLASS(transaction_event,
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
                __entry->caller_ip              = caller_ip;
        ),
 
@@ -811,7 +812,7 @@ TRACE_EVENT(trans_restart_journal_preres_get,
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
                __entry->caller_ip              = caller_ip;
                __entry->flags                  = flags;
        ),
@@ -871,7 +872,7 @@ DECLARE_EVENT_CLASS(transaction_restart_iter,
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
                __entry->caller_ip              = caller_ip;
                __entry->btree_id               = path->btree_id;
                TRACE_BPOS_assign(pos, path->pos)
@@ -918,7 +919,7 @@ TRACE_EVENT(trans_restart_upgrade,
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
                __entry->caller_ip              = caller_ip;
                __entry->btree_id               = path->btree_id;
                __entry->old_locks_want         = old_locks_want;
@@ -1027,7 +1028,7 @@ TRACE_EVENT(trans_restart_would_deadlock_write,
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
        ),
 
        TP_printk("%s", __entry->trans_fn)
@@ -1046,7 +1047,7 @@ TRACE_EVENT(trans_restart_mem_realloced,
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
                __entry->caller_ip      = caller_ip;
                __entry->bytes          = bytes;
        ),
@@ -1075,7 +1076,7 @@ TRACE_EVENT(trans_restart_key_cache_key_realloced,
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
                __entry->caller_ip              = caller_ip;
 
                __entry->btree_id       = path->btree_id;
index 796b9f5afe8c66c3067888f3f4426246b5a6e94d..742313c2cfc2b8c22f592ed45a4bbcafc2b68fe8 100644 (file)
@@ -279,6 +279,22 @@ int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k,
                return -EINVAL;
        }
 
+       /*
+        * XXX this is wrong, we'll be checking updates that happened from
+        * before BCH_FS_CHECK_BACKPOINTERS_DONE
+        */
+       if (rw == WRITE && test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
+               unsigned i, bp_len = 0;
+
+               for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a.v); i++)
+                       bp_len += alloc_v4_backpointers_c(a.v)[i].bucket_len;
+
+               if (bp_len > a.v->dirty_sectors) {
+                       prt_printf(err, "too many backpointers");
+                       return -EINVAL;
+               }
+       }
+
        if (rw == WRITE) {
                if (alloc_data_type(*a.v, a.v->data_type) != a.v->data_type) {
                        prt_printf(err, "invalid data type (got %u should be %u)",
index 044bc72992d4186d551da21ba63defa09334da5e..ee683bdde956c7b08268feaf9d2bfe019d7b6ba8 100644 (file)
@@ -103,34 +103,34 @@ int bch2_alloc_v4_invalid(const struct bch_fs *, struct bkey_s_c, int, struct pr
 void bch2_alloc_v4_swab(struct bkey_s);
 void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
-#define bch2_bkey_ops_alloc (struct bkey_ops) {                \
+#define bch2_bkey_ops_alloc ((struct bkey_ops) {       \
        .key_invalid    = bch2_alloc_v1_invalid,        \
        .val_to_text    = bch2_alloc_to_text,           \
        .trans_trigger  = bch2_trans_mark_alloc,        \
        .atomic_trigger = bch2_mark_alloc,              \
-}
+})
 
-#define bch2_bkey_ops_alloc_v2 (struct bkey_ops) {     \
+#define bch2_bkey_ops_alloc_v2 ((struct bkey_ops) {    \
        .key_invalid    = bch2_alloc_v2_invalid,        \
        .val_to_text    = bch2_alloc_to_text,           \
        .trans_trigger  = bch2_trans_mark_alloc,        \
        .atomic_trigger = bch2_mark_alloc,              \
-}
+})
 
-#define bch2_bkey_ops_alloc_v3 (struct bkey_ops) {     \
+#define bch2_bkey_ops_alloc_v3 ((struct bkey_ops) {    \
        .key_invalid    = bch2_alloc_v3_invalid,        \
        .val_to_text    = bch2_alloc_to_text,           \
        .trans_trigger  = bch2_trans_mark_alloc,        \
        .atomic_trigger = bch2_mark_alloc,              \
-}
+})
 
-#define bch2_bkey_ops_alloc_v4 (struct bkey_ops) {     \
+#define bch2_bkey_ops_alloc_v4 ((struct bkey_ops) {    \
        .key_invalid    = bch2_alloc_v4_invalid,        \
        .val_to_text    = bch2_alloc_to_text,           \
        .swab           = bch2_alloc_v4_swab,           \
        .trans_trigger  = bch2_trans_mark_alloc,        \
        .atomic_trigger = bch2_mark_alloc,              \
-}
+})
 
 static inline bool bkey_is_alloc(const struct bkey *k)
 {
index a9e0c7397292fcab2e6dbcac08b7dd335c6c0147..c4f971c12a5100175a81da9bf0eef2dac3dc7166 100644 (file)
@@ -14,6 +14,7 @@
 #include "bcachefs.h"
 #include "alloc_background.h"
 #include "alloc_foreground.h"
+#include "backpointers.h"
 #include "btree_iter.h"
 #include "btree_update.h"
 #include "btree_gc.h"
@@ -194,26 +195,29 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *
                                              u64 bucket,
                                              enum alloc_reserve reserve,
                                              struct bch_alloc_v4 *a,
-                                             u64 *skipped_open,
-                                             u64 *skipped_need_journal_commit,
-                                             u64 *skipped_nouse,
+                                             struct bucket_alloc_state *s,
                                              struct closure *cl)
 {
        struct open_bucket *ob;
 
        if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
-               (*skipped_nouse)++;
+               s->skipped_nouse++;
                return NULL;
        }
 
        if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
-               (*skipped_open)++;
+               s->skipped_open++;
                return NULL;
        }
 
        if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
                        c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
-               (*skipped_need_journal_commit)++;
+               s->skipped_need_journal_commit++;
+               return NULL;
+       }
+
+       if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) {
+               s->skipped_nocow++;
                return NULL;
        }
 
@@ -233,7 +237,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *
        /* Recheck under lock: */
        if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
                spin_unlock(&c->freelist_lock);
-               (*skipped_open)++;
+               s->skipped_open++;
                return NULL;
        }
 
@@ -273,9 +277,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *
 
 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
                                            enum alloc_reserve reserve, u64 free_entry,
-                                           u64 *skipped_open,
-                                           u64 *skipped_need_journal_commit,
-                                           u64 *skipped_nouse,
+                                           struct bucket_alloc_state *s,
                                            struct bkey_s_c freespace_k,
                                            struct closure *cl)
 {
@@ -333,11 +335,30 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
                goto err;
        }
 
-       ob = __try_alloc_bucket(c, ca, b, reserve, &a,
-                               skipped_open,
-                               skipped_need_journal_commit,
-                               skipped_nouse,
-                               cl);
+       if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
+               struct bch_backpointer bp;
+               u64 bp_offset = 0;
+
+               ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1,
+                                               &bp_offset, &bp,
+                                               BTREE_ITER_NOPRESERVE);
+               if (ret) {
+                       ob = ERR_PTR(ret);
+                       goto err;
+               }
+
+               if (bp_offset != U64_MAX) {
+                       /*
+                        * Bucket may have data in it - we don't call
+                        * bc2h_trans_inconnsistent() because fsck hasn't
+                        * finished yet
+                        */
+                       ob = NULL;
+                       goto err;
+               }
+       }
+
+       ob = __try_alloc_bucket(c, ca, b, reserve, &a, s, cl);
        if (!ob)
                iter.path->preserve = false;
 err:
@@ -383,11 +404,7 @@ static noinline struct open_bucket *
 bch2_bucket_alloc_early(struct btree_trans *trans,
                        struct bch_dev *ca,
                        enum alloc_reserve reserve,
-                       u64 *cur_bucket,
-                       u64 *buckets_seen,
-                       u64 *skipped_open,
-                       u64 *skipped_need_journal_commit,
-                       u64 *skipped_nouse,
+                       struct bucket_alloc_state *s,
                        struct closure *cl)
 {
        struct btree_iter iter;
@@ -395,10 +412,10 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
        struct open_bucket *ob = NULL;
        int ret;
 
-       *cur_bucket = max_t(u64, *cur_bucket, ca->mi.first_bucket);
-       *cur_bucket = max_t(u64, *cur_bucket, ca->new_fs_bucket_idx);
+       s->cur_bucket = max_t(u64, s->cur_bucket, ca->mi.first_bucket);
+       s->cur_bucket = max_t(u64, s->cur_bucket, ca->new_fs_bucket_idx);
 
-       for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, *cur_bucket),
+       for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, s->cur_bucket),
                           BTREE_ITER_SLOTS, k, ret) {
                struct bch_alloc_v4 a;
 
@@ -414,19 +431,15 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
                if (a.data_type != BCH_DATA_free)
                        continue;
 
-               (*buckets_seen)++;
+               s->buckets_seen++;
 
-               ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, &a,
-                                       skipped_open,
-                                       skipped_need_journal_commit,
-                                       skipped_nouse,
-                                       cl);
+               ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, &a, s, cl);
                if (ob)
                        break;
        }
        bch2_trans_iter_exit(trans, &iter);
 
-       *cur_bucket = iter.pos.offset;
+       s->cur_bucket = iter.pos.offset;
 
        return ob ?: ERR_PTR(ret ?: -BCH_ERR_no_buckets_found);
 }
@@ -434,11 +447,7 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
 static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
                                                   struct bch_dev *ca,
                                                   enum alloc_reserve reserve,
-                                                  u64 *cur_bucket,
-                                                  u64 *buckets_seen,
-                                                  u64 *skipped_open,
-                                                  u64 *skipped_need_journal_commit,
-                                                  u64 *skipped_nouse,
+                                                  struct bucket_alloc_state *s,
                                                   struct closure *cl)
 {
        struct btree_iter iter;
@@ -454,25 +463,21 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
         * at previously
         */
        for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace,
-                                    POS(ca->dev_idx, *cur_bucket), 0, k, ret) {
+                                    POS(ca->dev_idx, s->cur_bucket), 0, k, ret) {
                if (k.k->p.inode != ca->dev_idx)
                        break;
 
-               for (*cur_bucket = max(*cur_bucket, bkey_start_offset(k.k));
-                    *cur_bucket < k.k->p.offset;
-                    (*cur_bucket)++) {
+               for (s->cur_bucket = max(s->cur_bucket, bkey_start_offset(k.k));
+                    s->cur_bucket < k.k->p.offset;
+                    s->cur_bucket++) {
                        ret = btree_trans_too_many_iters(trans);
                        if (ret)
                                break;
 
-                       (*buckets_seen)++;
+                       s->buckets_seen++;
 
                        ob = try_alloc_bucket(trans, ca, reserve,
-                                             *cur_bucket,
-                                             skipped_open,
-                                             skipped_need_journal_commit,
-                                             skipped_nouse,
-                                             k, cl);
+                                             s->cur_bucket, s, k, cl);
                        if (ob)
                                break;
                }
@@ -502,11 +507,7 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
        bool freespace_initialized = READ_ONCE(ca->mi.freespace_initialized);
        u64 start = freespace_initialized ? 0 : ca->bucket_alloc_trans_early_cursor;
        u64 avail;
-       u64 cur_bucket = start;
-       u64 buckets_seen = 0;
-       u64 skipped_open = 0;
-       u64 skipped_need_journal_commit = 0;
-       u64 skipped_nouse = 0;
+       struct bucket_alloc_state s = { .cur_bucket = start };
        bool waiting = false;
 again:
        bch2_dev_usage_read_fast(ca, usage);
@@ -545,31 +546,19 @@ again:
        }
 
        ob = likely(ca->mi.freespace_initialized)
-               ? bch2_bucket_alloc_freelist(trans, ca, reserve,
-                                       &cur_bucket,
-                                       &buckets_seen,
-                                       &skipped_open,
-                                       &skipped_need_journal_commit,
-                                       &skipped_nouse,
-                                       cl)
-               : bch2_bucket_alloc_early(trans, ca, reserve,
-                                       &cur_bucket,
-                                       &buckets_seen,
-                                       &skipped_open,
-                                       &skipped_need_journal_commit,
-                                       &skipped_nouse,
-                                       cl);
-
-       if (skipped_need_journal_commit * 2 > avail)
+               ? bch2_bucket_alloc_freelist(trans, ca, reserve, &s, cl)
+               : bch2_bucket_alloc_early(trans, ca, reserve, &s, cl);
+
+       if (s.skipped_need_journal_commit * 2 > avail)
                bch2_journal_flush_async(&c->journal, NULL);
 
        if (!ob && !freespace_initialized && start) {
-               start = cur_bucket = 0;
+               start = s.cur_bucket = 0;
                goto again;
        }
 
        if (!freespace_initialized)
-               ca->bucket_alloc_trans_early_cursor = cur_bucket;
+               ca->bucket_alloc_trans_early_cursor = s.cur_bucket;
 err:
        if (!ob)
                ob = ERR_PTR(-BCH_ERR_no_buckets_found);
@@ -584,10 +573,7 @@ err:
                                avail,
                                bch2_copygc_wait_amount(c),
                                c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
-                               buckets_seen,
-                               skipped_open,
-                               skipped_need_journal_commit,
-                               skipped_nouse,
+                               &s,
                                cl == NULL,
                                bch2_err_str(PTR_ERR(ob)));
 
@@ -1129,16 +1115,17 @@ out:
 /*
  * Get us an open_bucket we can allocate from, return with it locked:
  */
-struct write_point *bch2_alloc_sectors_start_trans(struct btree_trans *trans,
-                               unsigned target,
-                               unsigned erasure_code,
-                               struct write_point_specifier write_point,
-                               struct bch_devs_list *devs_have,
-                               unsigned nr_replicas,
-                               unsigned nr_replicas_required,
-                               enum alloc_reserve reserve,
-                               unsigned flags,
-                               struct closure *cl)
+int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
+                                  unsigned target,
+                                  unsigned erasure_code,
+                                  struct write_point_specifier write_point,
+                                  struct bch_devs_list *devs_have,
+                                  unsigned nr_replicas,
+                                  unsigned nr_replicas_required,
+                                  enum alloc_reserve reserve,
+                                  unsigned flags,
+                                  struct closure *cl,
+                                  struct write_point **wp_ret)
 {
        struct bch_fs *c = trans->c;
        struct write_point *wp;
@@ -1160,7 +1147,7 @@ retry:
        write_points_nr = c->write_points_nr;
        have_cache      = false;
 
-       wp = writepoint_find(trans, write_point.v);
+       *wp_ret = wp = writepoint_find(trans, write_point.v);
 
        if (wp->data_type == BCH_DATA_user)
                ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
@@ -1217,7 +1204,7 @@ alloc_done:
 
        BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
 
-       return wp;
+       return 0;
 err:
        open_bucket_for_each(c, &wp->ptrs, ob, i)
                if (ptrs.nr < ARRAY_SIZE(ptrs.v))
@@ -1235,39 +1222,13 @@ err:
        if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
            bch2_err_matches(ret, BCH_ERR_freelist_empty))
                return cl
-                       ? ERR_PTR(-EAGAIN)
-                       : ERR_PTR(-BCH_ERR_ENOSPC_bucket_alloc);
+                       ? -EAGAIN
+                       : -BCH_ERR_ENOSPC_bucket_alloc;
 
        if (bch2_err_matches(ret, BCH_ERR_insufficient_devices))
-               return ERR_PTR(-EROFS);
-
-       return ERR_PTR(ret);
-}
-
-struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
-                               unsigned target,
-                               unsigned erasure_code,
-                               struct write_point_specifier write_point,
-                               struct bch_devs_list *devs_have,
-                               unsigned nr_replicas,
-                               unsigned nr_replicas_required,
-                               enum alloc_reserve reserve,
-                               unsigned flags,
-                               struct closure *cl)
-{
-       struct write_point *wp;
-
-       bch2_trans_do(c, NULL, NULL, 0,
-                     PTR_ERR_OR_ZERO(wp = bch2_alloc_sectors_start_trans(&trans, target,
-                                                       erasure_code,
-                                                       write_point,
-                                                       devs_have,
-                                                       nr_replicas,
-                                                       nr_replicas_required,
-                                                       reserve,
-                                                       flags, cl)));
-       return wp;
+               return -EROFS;
 
+       return ret;
 }
 
 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
@@ -1338,6 +1299,10 @@ static inline void writepoint_init(struct write_point *wp,
 {
        mutex_init(&wp->lock);
        wp->data_type = type;
+
+       INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates);
+       INIT_LIST_HEAD(&wp->writes);
+       spin_lock_init(&wp->writes_lock);
 }
 
 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
index 6de63a351fa881f2547bb01e0f5ee593bfbb8410..16490ffbd2c72133e4441c96a6e4e6eaa305c1a5 100644 (file)
@@ -136,22 +136,15 @@ int bch2_bucket_alloc_set(struct bch_fs *, struct open_buckets *,
                      unsigned, unsigned *, bool *, enum alloc_reserve,
                      unsigned, struct closure *);
 
-struct write_point *bch2_alloc_sectors_start_trans(struct btree_trans *,
-                                            unsigned, unsigned,
-                                            struct write_point_specifier,
-                                            struct bch_devs_list *,
-                                            unsigned, unsigned,
-                                            enum alloc_reserve,
-                                            unsigned,
-                                            struct closure *);
-struct write_point *bch2_alloc_sectors_start(struct bch_fs *,
-                                            unsigned, unsigned,
-                                            struct write_point_specifier,
-                                            struct bch_devs_list *,
-                                            unsigned, unsigned,
-                                            enum alloc_reserve,
-                                            unsigned,
-                                            struct closure *);
+int bch2_alloc_sectors_start_trans(struct btree_trans *,
+                                  unsigned, unsigned,
+                                  struct write_point_specifier,
+                                  struct bch_devs_list *,
+                                  unsigned, unsigned,
+                                  enum alloc_reserve,
+                                  unsigned,
+                                  struct closure *,
+                                  struct write_point **);
 
 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *, struct open_bucket *);
 void bch2_alloc_sectors_append_ptrs(struct bch_fs *, struct write_point *,
index e078584d46f6562372bccc0303ed71109cb3bcee..271b4bf2b95ea1859194f9a00926bba6e5b25a48 100644 (file)
@@ -8,6 +8,15 @@
 #include "clock_types.h"
 #include "fifo.h"
 
+struct bucket_alloc_state {
+       u64     cur_bucket;
+       u64     buckets_seen;
+       u64     skipped_open;
+       u64     skipped_need_journal_commit;
+       u64     skipped_nocow;
+       u64     skipped_nouse;
+};
+
 struct ec_bucket_buf;
 
 #define BCH_ALLOC_RESERVES()           \
@@ -78,6 +87,11 @@ struct write_point {
 
        struct open_buckets     ptrs;
        struct dev_stripe_state stripe;
+
+       struct work_struct      index_update_work;
+
+       struct list_head        writes;
+       spinlock_t              writes_lock;
 };
 
 struct write_point_specifier {
index d74de1df7aa3433a9f2c284e96ab66bc9086f5db..614811eafa59f409568e29758746c3d3d75b0912 100644 (file)
@@ -9,8 +9,6 @@
 
 #include <linux/mm.h>
 
-#define MAX_EXTENT_COMPRESS_RATIO_SHIFT                10
-
 /*
  * Convert from pos in backpointer btree to pos of corresponding bucket in alloc
  * btree:
@@ -43,27 +41,6 @@ static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
        return ret;
 }
 
-void bch2_extent_ptr_to_bp(struct bch_fs *c,
-                          enum btree_id btree_id, unsigned level,
-                          struct bkey_s_c k, struct extent_ptr_decoded p,
-                          struct bpos *bucket_pos, struct bch_backpointer *bp)
-{
-       enum bch_data_type data_type = level ? BCH_DATA_btree : BCH_DATA_user;
-       s64 sectors = level ? btree_sectors(c) : k.k->size;
-       u32 bucket_offset;
-
-       *bucket_pos = PTR_BUCKET_POS_OFFSET(c, &p.ptr, &bucket_offset);
-       *bp = (struct bch_backpointer) {
-               .btree_id       = btree_id,
-               .level          = level,
-               .data_type      = data_type,
-               .bucket_offset  = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) +
-                       p.crc.offset,
-               .bucket_len     = ptr_disk_sectors(sectors, p),
-               .pos            = k.k->p,
-       };
-}
-
 static bool extent_matches_bp(struct bch_fs *c,
                              enum btree_id btree_id, unsigned level,
                              struct bkey_s_c k,
@@ -984,9 +961,8 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c)
                        break;
 
                if (!bpos_cmp(start, POS_MIN) && bpos_cmp(end, SPOS_MAX))
-                       bch_verbose(c, "check_extents_to_backpointers(): alloc info does not fit in ram,"
-                                   "running in multiple passes with %zu nodes per pass",
-                                   btree_nodes_fit_in_ram(c));
+                       bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
+                                   __func__, btree_nodes_fit_in_ram(c));
 
                if (bpos_cmp(start, POS_MIN) || bpos_cmp(end, SPOS_MAX)) {
                        struct printbuf buf = PRINTBUF;
@@ -1099,9 +1075,8 @@ int bch2_check_backpointers_to_extents(struct bch_fs *c)
 
                if (!bbpos_cmp(start, BBPOS_MIN) &&
                    bbpos_cmp(end, BBPOS_MAX))
-                       bch_verbose(c, "check_backpointers_to_extents(): extents do not fit in ram,"
-                                   "running in multiple passes with %zu nodes per pass",
-                                   btree_nodes_fit_in_ram(c));
+                       bch_verbose(c, "%s(): extents do not fit in ram, running in multiple passes with %zu nodes per pass",
+                                   __func__, btree_nodes_fit_in_ram(c));
 
                if (bbpos_cmp(start, BBPOS_MIN) ||
                    bbpos_cmp(end, BBPOS_MAX)) {
index 1c97e364c53200170d89485752124c6c15bd420b..48a48b75c0ac137b22f566568024ebb8525c241b 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _BCACHEFS_BACKPOINTERS_BACKGROUND_H
 #define _BCACHEFS_BACKPOINTERS_BACKGROUND_H
 
+#include "buckets.h"
 #include "super.h"
 
 int bch2_backpointer_invalid(const struct bch_fs *, struct bkey_s_c k,
@@ -10,15 +11,34 @@ void bch2_backpointer_to_text(struct printbuf *, const struct bch_backpointer *)
 void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 void bch2_backpointer_swab(struct bkey_s);
 
-#define bch2_bkey_ops_backpointer (struct bkey_ops) {  \
+#define bch2_bkey_ops_backpointer ((struct bkey_ops) { \
        .key_invalid    = bch2_backpointer_invalid,     \
        .val_to_text    = bch2_backpointer_k_to_text,   \
        .swab           = bch2_backpointer_swab,        \
-}
+})
+
+#define MAX_EXTENT_COMPRESS_RATIO_SHIFT                10
 
-void bch2_extent_ptr_to_bp(struct bch_fs *, enum btree_id, unsigned,
-                          struct bkey_s_c, struct extent_ptr_decoded,
-                          struct bpos *, struct bch_backpointer *);
+static inline void bch2_extent_ptr_to_bp(struct bch_fs *c,
+                          enum btree_id btree_id, unsigned level,
+                          struct bkey_s_c k, struct extent_ptr_decoded p,
+                          struct bpos *bucket_pos, struct bch_backpointer *bp)
+{
+       enum bch_data_type data_type = level ? BCH_DATA_btree : BCH_DATA_user;
+       s64 sectors = level ? btree_sectors(c) : k.k->size;
+       u32 bucket_offset;
+
+       *bucket_pos = PTR_BUCKET_POS_OFFSET(c, &p.ptr, &bucket_offset);
+       *bp = (struct bch_backpointer) {
+               .btree_id       = btree_id,
+               .level          = level,
+               .data_type      = data_type,
+               .bucket_offset  = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) +
+                       p.crc.offset,
+               .bucket_len     = ptr_disk_sectors(sectors, p),
+               .pos            = k.k->p,
+       };
+}
 
 int bch2_bucket_backpointer_del(struct btree_trans *, struct bkey_i_alloc_v4 *,
                                struct bch_backpointer, struct bkey_s_c);
index 33186fa82682e037af78dcb3543d5bbf3049e26b..33341008016b7811c7a75d43200f91d414ae3671 100644 (file)
 #include "bcachefs_format.h"
 #include "errcode.h"
 #include "fifo.h"
+#include "nocow_locking.h"
 #include "opts.h"
 #include "util.h"
 
@@ -226,15 +227,31 @@ do {                                                                      \
         dynamic_fault("bcachefs:meta:write:" name)
 
 #ifdef __KERNEL__
-#define bch2_log_msg(_c, fmt)          "bcachefs (%s): " fmt, ((_c)->name)
-#define bch2_fmt(_c, fmt)              bch2_log_msg(_c, fmt "\n")
-#define bch2_fmt_inum(_c, _inum, fmt)  "bcachefs (%s inum %llu): " fmt "\n", ((_c)->name), (_inum)
+#define BCACHEFS_LOG_PREFIX
+#endif
+
+#ifdef BCACHEFS_LOG_PREFIX
+
+#define bch2_log_msg(_c, fmt)                  "bcachefs (%s): " fmt, ((_c)->name)
+#define bch2_fmt_dev(_ca, fmt)                 "bcachefs (%s): " fmt "\n", ((_ca)->name)
+#define bch2_fmt_dev_offset(_ca, _offset, fmt) "bcachefs (%s sector %llu): " fmt "\n", ((_ca)->name), (_offset)
+#define bch2_fmt_inum(_c, _inum, fmt)          "bcachefs (%s inum %llu): " fmt "\n", ((_c)->name), (_inum)
+#define bch2_fmt_inum_offset(_c, _inum, _offset, fmt)                  \
+        "bcachefs (%s inum %llu offset %llu): " fmt "\n", ((_c)->name), (_inum), (_offset)
+
 #else
-#define bch2_log_msg(_c, fmt)          fmt
-#define bch2_fmt(_c, fmt)              fmt "\n"
-#define bch2_fmt_inum(_c, _inum, fmt)  "inum %llu: " fmt "\n", (_inum)
+
+#define bch2_log_msg(_c, fmt)                  fmt
+#define bch2_fmt_dev(_ca, fmt)                 "%s: " fmt "\n", ((_ca)->name)
+#define bch2_fmt_dev_offset(_ca, _offset, fmt) "%s sector %llu: " fmt "\n", ((_ca)->name), (_offset)
+#define bch2_fmt_inum(_c, _inum, fmt)          "inum %llu: " fmt "\n", (_inum)
+#define bch2_fmt_inum_offset(_c, _inum, _offset, fmt)                          \
+        "inum %llu offset %llu: " fmt "\n", (_inum), (_offset)
+
 #endif
 
+#define bch2_fmt(_c, fmt)              bch2_log_msg(_c, fmt "\n")
+
 #define bch_info(c, fmt, ...) \
        printk(KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__)
 #define bch_notice(c, fmt, ...) \
@@ -243,13 +260,28 @@ do {                                                                      \
        printk(KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
 #define bch_warn_ratelimited(c, fmt, ...) \
        printk_ratelimited(KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
+
 #define bch_err(c, fmt, ...) \
        printk(KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
+#define bch_err_dev(ca, fmt, ...) \
+       printk(KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
+#define bch_err_dev_offset(ca, _offset, fmt, ...) \
+       printk(KERN_ERR bch2_fmt_dev_offset(ca, _offset, fmt), ##__VA_ARGS__)
+#define bch_err_inum(c, _inum, fmt, ...) \
+       printk(KERN_ERR bch2_fmt_inum(c, _inum, fmt), ##__VA_ARGS__)
+#define bch_err_inum_offset(c, _inum, _offset, fmt, ...) \
+       printk(KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
 
 #define bch_err_ratelimited(c, fmt, ...) \
        printk_ratelimited(KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
+#define bch_err_dev_ratelimited(ca, fmt, ...) \
+       printk_ratelimited(KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
+#define bch_err_dev_offset_ratelimited(ca, _offset, fmt, ...) \
+       printk_ratelimited(KERN_ERR bch2_fmt_dev_offset(ca, _offset, fmt), ##__VA_ARGS__)
 #define bch_err_inum_ratelimited(c, _inum, fmt, ...) \
        printk_ratelimited(KERN_ERR bch2_fmt_inum(c, _inum, fmt), ##__VA_ARGS__)
+#define bch_err_inum_offset_ratelimited(c, _inum, _offset, fmt, ...) \
+       printk_ratelimited(KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
 
 #define bch_verbose(c, fmt, ...)                                       \
 do {                                                                   \
@@ -282,7 +314,7 @@ do {                                                                        \
                "When reading btree nodes, read all replicas and "      \
                "compare them")
 
-/* Parameters that should only be compiled in in debug mode: */
+/* Parameters that should only be compiled in debug mode: */
 #define BCH_DEBUG_PARAMS_DEBUG()                                       \
        BCH_DEBUG_PARAM(expensive_debug_checks,                         \
                "Enables various runtime debugging checks that "        \
@@ -345,7 +377,8 @@ BCH_DEBUG_PARAMS_DEBUG()
        x(journal_flush_seq)                    \
        x(blocked_journal)                      \
        x(blocked_allocate)                     \
-       x(blocked_allocate_open_bucket)
+       x(blocked_allocate_open_bucket)         \
+       x(nocow_lock_contended)
 
 enum bch_time_stats {
 #define x(name) BCH_TIME_##name,
@@ -444,6 +477,7 @@ struct bch_dev {
        struct bch_sb           *sb_read_scratch;
        int                     sb_write_error;
        dev_t                   dev;
+       atomic_t                flush_seq;
 
        struct bch_devs_mask    self;
 
@@ -707,6 +741,13 @@ struct bch_fs {
        struct workqueue_struct *btree_interior_update_worker;
        struct work_struct      btree_interior_update_work;
 
+       /* btree_io.c: */
+       spinlock_t              btree_write_error_lock;
+       struct btree_write_stats {
+               atomic64_t      nr;
+               atomic64_t      bytes;
+       }                       btree_write_stats[BTREE_WRITE_TYPE_NR];
+
        /* btree_iter.c: */
        struct mutex            btree_trans_lock;
        struct list_head        btree_trans_list;
@@ -819,6 +860,8 @@ struct bch_fs {
        struct bio_set          bio_write;
        struct mutex            bio_bounce_pages_lock;
        mempool_t               bio_bounce_pages;
+       struct bucket_nocow_lock_table
+                               nocow_locks;
        struct rhashtable       promote_table;
 
        mempool_t               compression_bounce[2];
@@ -880,11 +923,7 @@ struct bch_fs {
        struct bio_set          writepage_bioset;
        struct bio_set          dio_write_bioset;
        struct bio_set          dio_read_bioset;
-
-
-       atomic64_t              btree_writes_nr;
-       atomic64_t              btree_writes_sectors;
-       spinlock_t              btree_write_error_lock;
+       struct bio_set          nocow_flush_bioset;
 
        /* ERRORS */
        struct list_head        fsck_errors;
index bfcb75a361cb4c1edf231ca0fce8ae235dad2984..0aa522b7f0f29a210c8a0a7b52691726cbc16955 100644 (file)
@@ -147,7 +147,7 @@ struct bpos {
 #else
 #error edit for your odd byteorder.
 #endif
-} __attribute__((packed, aligned(4)));
+} __packed __aligned(4);
 
 #define KEY_INODE_MAX                  ((__u64)~0ULL)
 #define KEY_OFFSET_MAX                 ((__u64)~0ULL)
@@ -181,7 +181,7 @@ struct bversion {
        __u32           hi;
        __u64           lo;
 #endif
-} __attribute__((packed, aligned(4)));
+} __packed __aligned(4);
 
 struct bkey {
        /* Size of combined key and value, in u64s */
@@ -214,7 +214,7 @@ struct bkey {
 
        __u8            pad[1];
 #endif
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bkey_packed {
        __u64           _data[0];
@@ -248,7 +248,7 @@ struct bkey_packed {
         * to the same size as struct bkey should hopefully be safest.
         */
        __u8            pad[sizeof(struct bkey) - 3];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define BKEY_U64s                      (sizeof(struct bkey) / sizeof(__u64))
 #define BKEY_U64s_MAX                  U8_MAX
@@ -478,7 +478,7 @@ struct bch_set {
 struct bch_csum {
        __le64                  lo;
        __le64                  hi;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define BCH_EXTENT_ENTRY_TYPES()               \
        x(ptr,                  0)              \
@@ -515,7 +515,7 @@ struct bch_extent_crc32 {
                                _compressed_size:7,
                                type:2;
 #endif
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define CRC32_SIZE_MAX         (1U << 7)
 #define CRC32_NONCE_MAX                0
@@ -541,7 +541,7 @@ struct bch_extent_crc64 {
                                type:3;
 #endif
        __u64                   csum_lo;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define CRC64_SIZE_MAX         (1U << 9)
 #define CRC64_NONCE_MAX                ((1U << 10) - 1)
@@ -565,7 +565,7 @@ struct bch_extent_crc128 {
                                type:4;
 #endif
        struct bch_csum         csum;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define CRC128_SIZE_MAX                (1U << 13)
 #define CRC128_NONCE_MAX       ((1U << 13) - 1)
@@ -578,7 +578,7 @@ struct bch_extent_ptr {
        __u64                   type:1,
                                cached:1,
                                unused:1,
-                               reservation:1,
+                               unwritten:1,
                                offset:44, /* 8 petabytes */
                                dev:8,
                                gen:8;
@@ -586,12 +586,12 @@ struct bch_extent_ptr {
        __u64                   gen:8,
                                dev:8,
                                offset:44,
-                               reservation:1,
+                               unwritten:1,
                                unused:1,
                                cached:1,
                                type:1;
 #endif
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_extent_stripe_ptr {
 #if defined(__LITTLE_ENDIAN_BITFIELD)
@@ -643,7 +643,7 @@ struct bch_btree_ptr {
 
        __u64                   _data[0];
        struct bch_extent_ptr   start[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_btree_ptr_v2 {
        struct bch_val          v;
@@ -655,7 +655,7 @@ struct bch_btree_ptr_v2 {
        struct bpos             min_key;
        __u64                   _data[0];
        struct bch_extent_ptr   start[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 LE16_BITMASK(BTREE_PTR_RANGE_UPDATED,  struct bch_btree_ptr_v2, flags, 0, 1);
 
@@ -664,7 +664,7 @@ struct bch_extent {
 
        __u64                   _data[0];
        union bch_extent_entry  start[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_reservation {
        struct bch_val          v;
@@ -672,7 +672,7 @@ struct bch_reservation {
        __le32                  generation;
        __u8                    nr_replicas;
        __u8                    pad[3];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /* Maximum size (in u64s) a single pointer could be: */
 #define BKEY_EXTENT_PTR_U64s_MAX\
@@ -706,7 +706,7 @@ struct bch_inode {
        __le32                  bi_flags;
        __le16                  bi_mode;
        __u8                    fields[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_inode_v2 {
        struct bch_val          v;
@@ -716,7 +716,7 @@ struct bch_inode_v2 {
        __le64                  bi_flags;
        __le16                  bi_mode;
        __u8                    fields[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_inode_v3 {
        struct bch_val          v;
@@ -728,7 +728,7 @@ struct bch_inode_v3 {
        __le64                  bi_size;
        __le64                  bi_version;
        __u8                    fields[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define INODEv3_FIELDS_START_INITIAL   6
 #define INODEv3_FIELDS_START_CUR       (offsetof(struct bch_inode_v3, fields) / sizeof(u64))
@@ -738,7 +738,7 @@ struct bch_inode_generation {
 
        __le32                  bi_generation;
        __le32                  pad;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /*
  * bi_subvol and bi_parent_subvol are only set for subvolume roots:
@@ -794,7 +794,8 @@ struct bch_inode_generation {
        x(bi_dir,                       64)     \
        x(bi_dir_offset,                64)     \
        x(bi_subvol,                    32)     \
-       x(bi_parent_subvol,             32)
+       x(bi_parent_subvol,             32)     \
+       x(bi_nocow,                     8)
 
 /* subset of BCH_INODE_FIELDS */
 #define BCH_INODE_OPTS()                       \
@@ -806,7 +807,8 @@ struct bch_inode_generation {
        x(promote_target,               16)     \
        x(foreground_target,            16)     \
        x(background_target,            16)     \
-       x(erasure_code,                 16)
+       x(erasure_code,                 16)     \
+       x(nocow,                        8)
 
 enum inode_opt_id {
 #define x(name, ...)                           \
@@ -891,7 +893,7 @@ struct bch_dirent {
        __u8                    d_type;
 
        __u8                    d_name[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define DT_SUBVOL      16
 #define BCH_DT_MAX     17
@@ -914,7 +916,7 @@ struct bch_xattr {
        __u8                    x_name_len;
        __le16                  x_val_len;
        __u8                    x_name[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /* Bucket/allocation information: */
 
@@ -923,7 +925,7 @@ struct bch_alloc {
        __u8                    fields;
        __u8                    gen;
        __u8                    data[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define BCH_ALLOC_FIELDS_V1()                  \
        x(read_time,            16)             \
@@ -948,7 +950,7 @@ struct bch_alloc_v2 {
        __u8                    oldest_gen;
        __u8                    data_type;
        __u8                    data[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define BCH_ALLOC_FIELDS_V2()                  \
        x(read_time,            64)             \
@@ -967,7 +969,7 @@ struct bch_alloc_v3 {
        __u8                    oldest_gen;
        __u8                    data_type;
        __u8                    data[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags,  0,  1)
 LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags,  1,  2)
@@ -985,7 +987,7 @@ struct bch_alloc_v4 {
        __u64                   io_time[2];
        __u32                   stripe;
        __u32                   nr_external_backpointers;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define BCH_ALLOC_V4_U64s_V0   6
 #define BCH_ALLOC_V4_U64s      (sizeof(struct bch_alloc_v4) / sizeof(u64))
@@ -1005,7 +1007,7 @@ struct bch_backpointer {
        __u64                   bucket_offset:40;
        __u32                   bucket_len;
        struct bpos             pos;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /* Quotas: */
 
@@ -1030,7 +1032,7 @@ struct bch_quota_counter {
 struct bch_quota {
        struct bch_val          v;
        struct bch_quota_counter c[Q_COUNTERS];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /* Erasure coding */
 
@@ -1046,7 +1048,7 @@ struct bch_stripe {
        __u8                    pad;
 
        struct bch_extent_ptr   ptrs[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /* Reflink: */
 
@@ -1063,14 +1065,14 @@ struct bch_reflink_p {
         */
        __le32                  front_pad;
        __le32                  back_pad;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_reflink_v {
        struct bch_val          v;
        __le64                  refcount;
        union bch_extent_entry  start[0];
        __u64                   _data[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_indirect_inline_data {
        struct bch_val          v;
@@ -1127,7 +1129,7 @@ LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags,  1,  2)
 struct bch_lru {
        struct bch_val          v;
        __le64                  idx;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define LRU_ID_STRIPES         (1U << 16)
 
@@ -1326,19 +1328,19 @@ struct bch_replicas_entry_v0 {
        __u8                    data_type;
        __u8                    nr_devs;
        __u8                    devs[];
-} __attribute__((packed));
+} __packed;
 
 struct bch_sb_field_replicas_v0 {
        struct bch_sb_field     field;
        struct bch_replicas_entry_v0 entries[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_replicas_entry {
        __u8                    data_type;
        __u8                    nr_devs;
        __u8                    nr_required;
        __u8                    devs[];
-} __attribute__((packed));
+} __packed;
 
 #define replicas_entry_bytes(_i)                                       \
        (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
@@ -1346,7 +1348,7 @@ struct bch_replicas_entry {
 struct bch_sb_field_replicas {
        struct bch_sb_field     field;
        struct bch_replicas_entry entries[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /* BCH_SB_FIELD_quota: */
 
@@ -1363,7 +1365,7 @@ struct bch_sb_quota_type {
 struct bch_sb_field_quota {
        struct bch_sb_field             field;
        struct bch_sb_quota_type        q[QTYP_NR];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /* BCH_SB_FIELD_disk_groups: */
 
@@ -1372,7 +1374,7 @@ struct bch_sb_field_quota {
 struct bch_disk_group {
        __u8                    label[BCH_SB_LABEL_SIZE];
        __le64                  flags[2];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 LE64_BITMASK(BCH_GROUP_DELETED,                struct bch_disk_group, flags[0], 0,  1)
 LE64_BITMASK(BCH_GROUP_DATA_ALLOWED,   struct bch_disk_group, flags[0], 1,  6)
@@ -1381,7 +1383,7 @@ LE64_BITMASK(BCH_GROUP_PARENT,            struct bch_disk_group, flags[0], 6, 24)
 struct bch_sb_field_disk_groups {
        struct bch_sb_field     field;
        struct bch_disk_group   entries[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /* BCH_SB_FIELD_counters */
 
@@ -1543,7 +1545,8 @@ struct bch_sb_field_journal_seq_blacklist {
        x(alloc_v4,                     20)             \
        x(new_data_types,               21)             \
        x(backpointers,                 22)             \
-       x(inode_v3,                     23)
+       x(inode_v3,                     23)             \
+       x(unwritten_extents,            24)
 
 enum bcachefs_metadata_version {
        bcachefs_metadata_version_min = 9,
@@ -1565,7 +1568,7 @@ struct bch_sb_layout {
        __u8                    nr_superblocks;
        __u8                    pad[5];
        __le64                  sb_offset[61];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #define BCH_SB_LAYOUT_SECTOR   7
 
@@ -1616,7 +1619,7 @@ struct bch_sb {
                struct bch_sb_field start[0];
                __le64          _data[0];
        };
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 /*
  * Flags:
@@ -1692,6 +1695,7 @@ LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
 LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
 /* Obsolete, always enabled: */
 LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
+LE64_BITMASK(BCH_SB_NOCOW,             struct bch_sb, flags[4], 33, 34);
 
 /*
  * Features:
@@ -1899,6 +1903,7 @@ enum bch_compression_opts {
 static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
 {
        __le64 ret;
+
        memcpy(&ret, &sb->uuid, sizeof(ret));
        return ret;
 }
@@ -1973,26 +1978,26 @@ enum {
 struct jset_entry_usage {
        struct jset_entry       entry;
        __le64                  v;
-} __attribute__((packed));
+} __packed;
 
 struct jset_entry_data_usage {
        struct jset_entry       entry;
        __le64                  v;
        struct bch_replicas_entry r;
-} __attribute__((packed));
+} __packed;
 
 struct jset_entry_clock {
        struct jset_entry       entry;
        __u8                    rw;
        __u8                    pad[7];
        __le64                  time;
-} __attribute__((packed));
+} __packed;
 
 struct jset_entry_dev_usage_type {
        __le64                  buckets;
        __le64                  sectors;
        __le64                  fragmented;
-} __attribute__((packed));
+} __packed;
 
 struct jset_entry_dev_usage {
        struct jset_entry       entry;
@@ -2003,7 +2008,7 @@ struct jset_entry_dev_usage {
        __le64                  _buckets_unavailable; /* No longer used */
 
        struct jset_entry_dev_usage_type d[];
-} __attribute__((packed));
+} __packed;
 
 static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
 {
@@ -2014,7 +2019,7 @@ static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage
 struct jset_entry_log {
        struct jset_entry       entry;
        u8                      d[];
-} __attribute__((packed));
+} __packed;
 
 /*
  * On disk format for a journal entry:
@@ -2049,7 +2054,7 @@ struct jset {
                struct jset_entry start[0];
                __u64           _data[0];
        };
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 LE32_BITMASK(JSET_CSUM_TYPE,   struct jset, flags, 0, 4);
 LE32_BITMASK(JSET_BIG_ENDIAN,  struct jset, flags, 4, 5);
@@ -2112,7 +2117,7 @@ struct bset {
                struct bkey_packed start[0];
                __u64           _data[0];
        };
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 LE32_BITMASK(BSET_CSUM_TYPE,   struct bset, flags, 0, 4);
 
@@ -2145,7 +2150,7 @@ struct btree_node {
 
        };
        };
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 LE64_BITMASK(BTREE_NODE_ID,    struct btree_node, flags,  0,  4);
 LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags,  4,  8);
@@ -2166,6 +2171,6 @@ struct btree_node_entry {
 
        };
        };
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 #endif /* _BCACHEFS_FORMAT_H */
index b2edabf58260d4ea1e312aae44cfee68b25fa810..ad47a506a907651bf1ead3bfa0c29c3efb04cc05 100644 (file)
@@ -208,7 +208,7 @@ struct bch_ioctl_data {
                __u64           pad[8];
        };
        };
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 enum bch_data_event {
        BCH_DATA_EVENT_PROGRESS = 0,
@@ -224,7 +224,7 @@ struct bch_ioctl_data_progress {
 
        __u64                   sectors_done;
        __u64                   sectors_total;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_ioctl_data_event {
        __u8                    type;
@@ -233,12 +233,12 @@ struct bch_ioctl_data_event {
        struct bch_ioctl_data_progress p;
        __u64                   pad2[15];
        };
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 struct bch_replicas_usage {
        __u64                   sectors;
        struct bch_replicas_entry r;
-} __attribute__((packed));
+} __packed;
 
 static inline struct bch_replicas_usage *
 replicas_usage_next(struct bch_replicas_usage *u)
index f7e5d0c377eb0cc6443244de8110c2d6423c752f..630df060fbe934b5ff8d936eed91b13b792f8f81 100644 (file)
@@ -17,9 +17,6 @@
 
 const struct bkey_format bch2_bkey_format_current = BKEY_FORMAT_CURRENT;
 
-struct bkey __bch2_bkey_unpack_key(const struct bkey_format *,
-                             const struct bkey_packed *);
-
 void bch2_bkey_packed_to_binary_text(struct printbuf *out,
                                     const struct bkey_format *f,
                                     const struct bkey_packed *k)
index 14d910a3077ffa49a6522423563bc1de0df632fc..6939d74d705e00b05dee159b3ae1969b3882c6cb 100644 (file)
@@ -29,13 +29,13 @@ static int deleted_key_invalid(const struct bch_fs *c, struct bkey_s_c k,
        return 0;
 }
 
-#define bch2_bkey_ops_deleted (struct bkey_ops) {      \
+#define bch2_bkey_ops_deleted ((struct bkey_ops) {     \
        .key_invalid = deleted_key_invalid,             \
-}
+})
 
-#define bch2_bkey_ops_whiteout (struct bkey_ops) {     \
+#define bch2_bkey_ops_whiteout ((struct bkey_ops) {    \
        .key_invalid = deleted_key_invalid,             \
-}
+})
 
 static int empty_val_key_invalid(const struct bch_fs *c, struct bkey_s_c k,
                                 int rw, struct printbuf *err)
@@ -49,9 +49,9 @@ static int empty_val_key_invalid(const struct bch_fs *c, struct bkey_s_c k,
        return 0;
 }
 
-#define bch2_bkey_ops_error (struct bkey_ops) {                \
+#define bch2_bkey_ops_error ((struct bkey_ops) {       \
        .key_invalid = empty_val_key_invalid,           \
-}
+})
 
 static int key_type_cookie_invalid(const struct bch_fs *c, struct bkey_s_c k,
                                   int rw, struct printbuf *err)
@@ -65,13 +65,13 @@ static int key_type_cookie_invalid(const struct bch_fs *c, struct bkey_s_c k,
        return 0;
 }
 
-#define bch2_bkey_ops_cookie (struct bkey_ops) {       \
+#define bch2_bkey_ops_cookie ((struct bkey_ops) {      \
        .key_invalid = key_type_cookie_invalid,         \
-}
+})
 
-#define bch2_bkey_ops_hash_whiteout (struct bkey_ops) {        \
+#define bch2_bkey_ops_hash_whiteout ((struct bkey_ops) {\
        .key_invalid = empty_val_key_invalid,           \
-}
+})
 
 static int key_type_inline_data_invalid(const struct bch_fs *c, struct bkey_s_c k,
                                        int rw, struct printbuf *err)
@@ -89,10 +89,10 @@ static void key_type_inline_data_to_text(struct printbuf *out, struct bch_fs *c,
               datalen, min(datalen, 32U), d.v->data);
 }
 
-#define bch2_bkey_ops_inline_data (struct bkey_ops) {  \
+#define bch2_bkey_ops_inline_data ((struct bkey_ops) { \
        .key_invalid    = key_type_inline_data_invalid, \
        .val_to_text    = key_type_inline_data_to_text, \
-}
+})
 
 static int key_type_set_invalid(const struct bch_fs *c, struct bkey_s_c k,
                                int rw, struct printbuf *err)
@@ -112,10 +112,10 @@ static bool key_type_set_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_
        return true;
 }
 
-#define bch2_bkey_ops_set (struct bkey_ops) {          \
+#define bch2_bkey_ops_set ((struct bkey_ops) {         \
        .key_invalid    = key_type_set_invalid,         \
        .key_merge      = key_type_set_merge,           \
-}
+})
 
 const struct bkey_ops bch2_bkey_ops[] = {
 #define x(name, nr) [KEY_TYPE_##name]  = bch2_bkey_ops_##name,
@@ -440,6 +440,7 @@ void __bch2_bkey_compat(unsigned level, enum btree_id btree_id,
                    btree_id == BTREE_ID_inodes) {
                        if (!bkey_packed(k)) {
                                struct bkey_i *u = packed_to_bkey(k);
+
                                swap(u->k.p.inode, u->k.p.offset);
                        } else if (f->bits_per_field[BKEY_FIELD_INODE] &&
                                   f->bits_per_field[BKEY_FIELD_OFFSET]) {
index db894b40d2ca4180e1e91f398cc3c7021fc68491..4739b3c32cff6df783dfcf16e6d219d310f08f28 100644 (file)
@@ -18,7 +18,7 @@ extern const char * const bch2_bkey_types[];
  *
  * When invalid, error string is returned via @err. @rw indicates whether key is
  * being read or written; more aggressive checks can be enabled when rw == WRITE.
-*/
+ */
 struct bkey_ops {
        int             (*key_invalid)(const struct bch_fs *c, struct bkey_s_c k,
                                       int rw, struct printbuf *err);
index 8518054a23817cbf06bd6b8c371162096995fabb..557a79cad98670b5f753f6e7d71372b4418cf7a8 100644 (file)
@@ -144,6 +144,8 @@ bch2_sort_repack(struct bset *dst, struct btree *src,
                else
                        bch2_bkey_unpack(src, (void *) out, in);
 
+               out->needs_whiteout = false;
+
                btree_keys_account_key_add(&nr, 0, out);
                out = bkey_next(out);
        }
@@ -178,7 +180,7 @@ unsigned bch2_sort_keys(struct bkey_packed *dst,
                        continue;
 
                while ((next = sort_iter_peek(iter)) &&
-                      !bch2_bkey_cmp_packed(iter->b, in, next)) {
+                      !bch2_bkey_cmp_packed_inlined(iter->b, in, next)) {
                        BUG_ON(in->needs_whiteout &&
                               next->needs_whiteout);
                        needs_whiteout |= in->needs_whiteout;
index 72e6376bce2af705ee8519abbd3af3202b2cbd53..acef143091d045cc24bd930bd370bc5a2783ad73 100644 (file)
@@ -432,6 +432,11 @@ struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *,
                                                struct btree *,
                                                struct bkey *);
 
+#define for_each_btree_node_key(b, k, iter)                            \
+       for (bch2_btree_node_iter_init_from_start((iter), (b));         \
+            (k = bch2_btree_node_iter_peek((iter), (b)));              \
+            bch2_btree_node_iter_advance(iter, b))
+
 #define for_each_btree_node_key_unpack(b, k, iter, unpacked)           \
        for (bch2_btree_node_iter_init_from_start((iter), (b));         \
             (k = bch2_btree_node_iter_peek_unpack((iter), (b), (unpacked))).k;\
index 8dd2db4121a6b57db2a2c20fa4c9f3e0ad781428..2ca0a9d8226b1610f3b842f21a1d035f61685728 100644 (file)
@@ -118,7 +118,9 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
 
 static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
 {
-       struct btree *b = kzalloc(sizeof(struct btree), gfp);
+       struct btree *b;
+
+       b = kzalloc(sizeof(struct btree), gfp);
        if (!b)
                return NULL;
 
@@ -136,7 +138,9 @@ static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
 struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
 {
        struct btree_cache *bc = &c->btree_cache;
-       struct btree *b = __btree_node_mem_alloc(c, GFP_KERNEL);
+       struct btree *b;
+
+       b = __btree_node_mem_alloc(c, GFP_KERNEL);
        if (!b)
                return NULL;
 
@@ -155,6 +159,7 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
 void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
 {
        int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params);
+
        BUG_ON(ret);
 
        /* Cause future lookups for this node to fail: */
@@ -275,9 +280,11 @@ wait_on_io:
                 * the post write cleanup:
                 */
                if (bch2_verify_btree_ondisk)
-                       bch2_btree_node_write(c, b, SIX_LOCK_intent, 0);
+                       bch2_btree_node_write(c, b, SIX_LOCK_intent,
+                                             BTREE_WRITE_cache_reclaim);
                else
-                       __bch2_btree_node_write(c, b, 0);
+                       __bch2_btree_node_write(c, b,
+                                               BTREE_WRITE_cache_reclaim);
 
                six_unlock_write(&b->c.lock);
                six_unlock_intent(&b->c.lock);
@@ -384,7 +391,7 @@ restart:
                           six_trylock_read(&b->c.lock)) {
                        list_move(&bc->live, &b->list);
                        mutex_unlock(&bc->lock);
-                       __bch2_btree_node_write(c, b, 0);
+                       __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim);
                        six_unlock_read(&b->c.lock);
                        if (touched >= nr)
                                goto out_nounlock;
@@ -1113,7 +1120,7 @@ wait_on_io:
        btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
 
        if (btree_node_dirty(b)) {
-               __bch2_btree_node_write(c, b, 0);
+               __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim);
                six_unlock_write(&b->c.lock);
                six_unlock_intent(&b->c.lock);
                goto wait_on_io;
index 801a09f6fc1141f595cfc89b2cc4b151fbb85978..20e804ecb104727bc3d6bccfc8ae59cc5c5d407c 100644 (file)
@@ -199,7 +199,7 @@ static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
        struct bkey_i_btree_ptr_v2 *new;
        int ret;
 
-       new = kmalloc(BKEY_BTREE_PTR_U64s_MAX * sizeof(u64), GFP_KERNEL);
+       new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
        if (!new)
                return -ENOMEM;
 
@@ -228,7 +228,7 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
        if (ret)
                return ret;
 
-       new = kmalloc(BKEY_BTREE_PTR_U64s_MAX * sizeof(u64), GFP_KERNEL);
+       new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
        if (!new)
                return -ENOMEM;
 
index dd6b536ced6a918ebd1152da352d05609c9658a6..48f213fcf8930a64d8b468914a8551bcbd3604c4 100644 (file)
@@ -450,6 +450,24 @@ void bch2_btree_build_aux_trees(struct btree *b)
                                t == bset_tree_last(b));
 }
 
+/*
+ * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
+ *
+ * The first bset is going to be of similar order to the size of the node, the
+ * last bset is bounded by btree_write_set_buffer(), which is set to keep the
+ * memmove on insert from being too expensive: the middle bset should, ideally,
+ * be the geometric mean of the first and the last.
+ *
+ * Returns true if the middle bset is greater than that geometric mean:
+ */
+static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
+{
+       unsigned mid_u64s_bits =
+               (ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
+
+       return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
+}
+
 /*
  * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
  * inserted into
@@ -467,19 +485,14 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
 
        EBUG_ON(!(b->c.lock.state.seq & 1));
        BUG_ON(bset_written(b, bset(b, &b->set[1])));
+       BUG_ON(btree_node_just_written(b));
 
        if (b->nsets == MAX_BSETS &&
-           !btree_node_write_in_flight(b)) {
-               unsigned log_u64s[] = {
-                       ilog2(bset_u64s(&b->set[0])),
-                       ilog2(bset_u64s(&b->set[1])),
-                       ilog2(bset_u64s(&b->set[2])),
-               };
-
-               if (log_u64s[1] >= (log_u64s[0] + log_u64s[2]) / 2) {
-                       bch2_btree_node_write(c, b, SIX_LOCK_write, 0);
-                       reinit_iter = true;
-               }
+           !btree_node_write_in_flight(b) &&
+           should_compact_all(c, b)) {
+               bch2_btree_node_write(c, b, SIX_LOCK_write,
+                                     BTREE_WRITE_init_next_bset);
+               reinit_iter = true;
        }
 
        if (b->nsets == MAX_BSETS &&
@@ -1216,6 +1229,7 @@ static void btree_node_read_endio(struct bio *bio)
 
        if (rb->have_ioref) {
                struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+
                bch2_latency_acct(ca, rb->start_time, READ);
        }
 
@@ -1403,6 +1417,7 @@ static void btree_node_read_all_replicas_endio(struct bio *bio)
 
        if (rb->have_ioref) {
                struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+
                bch2_latency_acct(ca, rb->start_time, READ);
        }
 
@@ -1626,6 +1641,7 @@ static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
 {
        struct btree_write *w = btree_prev_write(b);
        unsigned long old, new, v;
+       unsigned type = 0;
 
        bch2_btree_complete_write(c, b, w);
 
@@ -1644,6 +1660,9 @@ static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
                        new |=  (1U << BTREE_NODE_write_in_flight_inner);
                        new |=  (1U << BTREE_NODE_just_written);
                        new ^=  (1U << BTREE_NODE_write_idx);
+
+                       type = new & BTREE_WRITE_TYPE_MASK;
+                       new &= ~BTREE_WRITE_TYPE_MASK;
                } else {
                        new &= ~(1U << BTREE_NODE_write_in_flight);
                        new &= ~(1U << BTREE_NODE_write_in_flight_inner);
@@ -1651,7 +1670,7 @@ static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
        } while ((v = cmpxchg(&b->flags, old, new)) != old);
 
        if (new & (1U << BTREE_NODE_write_in_flight))
-               __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED);
+               __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
        else
                wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
 }
@@ -1783,7 +1802,8 @@ static void btree_write_submit(struct work_struct *work)
        bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
                ptr->offset += wbio->sector_offset;
 
-       bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &tmp.k);
+       bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
+                                 &tmp.k, false);
 }
 
 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
@@ -1800,6 +1820,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
        bool used_mempool;
        unsigned long old, new;
        bool validate_before_checksum = false;
+       enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
        void *data;
        int ret;
 
@@ -1835,6 +1856,10 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
                if (old & (1 << BTREE_NODE_write_in_flight))
                        return;
 
+               if (flags & BTREE_WRITE_ONLY_IF_NEED)
+                       type = new & BTREE_WRITE_TYPE_MASK;
+               new &= ~BTREE_WRITE_TYPE_MASK;
+
                new &= ~(1 << BTREE_NODE_dirty);
                new &= ~(1 << BTREE_NODE_need_write);
                new |=  (1 << BTREE_NODE_write_in_flight);
@@ -1846,6 +1871,8 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
        if (new & (1U << BTREE_NODE_need_write))
                return;
 do_write:
+       BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
+
        atomic_dec(&c->btree_cache.dirty);
 
        BUG_ON(btree_node_fake(b));
@@ -2020,8 +2047,8 @@ do_write:
                bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
                        cpu_to_le16(b->written);
 
-       atomic64_inc(&c->btree_writes_nr);
-       atomic64_add(sectors_to_write, &c->btree_writes_sectors);
+       atomic64_inc(&c->btree_write_stats[type].nr);
+       atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
 
        INIT_WORK(&wbio->work, btree_write_submit);
        queue_work(c->io_complete_wq, &wbio->work);
@@ -2149,3 +2176,33 @@ bool bch2_btree_flush_all_writes(struct bch_fs *c)
 {
        return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
 }
+
+const char * const bch2_btree_write_types[] = {
+#define x(t, n) [n] = #t,
+       BCH_BTREE_WRITE_TYPES()
+       NULL
+};
+
+void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
+{
+       printbuf_tabstop_push(out, 20);
+       printbuf_tabstop_push(out, 10);
+
+       prt_tab(out);
+       prt_str(out, "nr");
+       prt_tab(out);
+       prt_str(out, "size");
+       prt_newline(out);
+
+       for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
+               u64 nr          = atomic64_read(&c->btree_write_stats[i].nr);
+               u64 bytes       = atomic64_read(&c->btree_write_stats[i].bytes);
+
+               prt_printf(out, "%s:", bch2_btree_write_types[i]);
+               prt_tab(out);
+               prt_u64(out, nr);
+               prt_tab(out);
+               prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
+               prt_newline(out);
+       }
+}
index 8af853642123df33276aad4cf1bad547001e7e6a..4b1810ad7d912dd15f28498256b061ed57a86ece 100644 (file)
@@ -139,8 +139,12 @@ void bch2_btree_complete_write(struct bch_fs *, struct btree *,
 
 bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
 
-#define BTREE_WRITE_ONLY_IF_NEED       (1U << 0)
-#define BTREE_WRITE_ALREADY_STARTED    (1U << 1)
+enum btree_write_flags {
+       __BTREE_WRITE_ONLY_IF_NEED = BTREE_WRITE_TYPE_BITS,
+       __BTREE_WRITE_ALREADY_STARTED,
+};
+#define BTREE_WRITE_ONLY_IF_NEED       (1U << __BTREE_WRITE_ONLY_IF_NEED )
+#define BTREE_WRITE_ALREADY_STARTED    (1U << __BTREE_WRITE_ALREADY_STARTED)
 
 void __bch2_btree_node_write(struct bch_fs *, struct btree *, unsigned);
 void bch2_btree_node_write(struct bch_fs *, struct btree *,
@@ -219,4 +223,6 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id,
                bn->min_key = bpos_nosnap_successor(bn->min_key);
 }
 
+void bch2_btree_write_stats_to_text(struct printbuf *, struct bch_fs *);
+
 #endif /* _BCACHEFS_BTREE_IO_H */
index d18346a5d58d066bafd86509c2aeeb61d6ab41ed..b6a761dba4d06d6246c4e0595cfb019413e1d4a3 100644 (file)
@@ -214,6 +214,7 @@ err:
 
        if (p) {
                struct bkey uk = bkey_unpack_key(l->b, p);
+
                bch2_bkey_to_text(&buf2, &uk);
        } else {
                prt_printf(&buf2, "(none)");
@@ -221,6 +222,7 @@ err:
 
        if (k) {
                struct bkey uk = bkey_unpack_key(l->b, k);
+
                bch2_bkey_to_text(&buf3, &uk);
        } else {
                prt_printf(&buf3, "(none)");
@@ -644,9 +646,9 @@ static inline void __btree_path_level_init(struct btree_path *path,
                bch2_btree_node_iter_peek(&l->iter, l->b);
 }
 
-inline void bch2_btree_path_level_init(struct btree_trans *trans,
-                                      struct btree_path *path,
-                                      struct btree *b)
+void bch2_btree_path_level_init(struct btree_trans *trans,
+                               struct btree_path *path,
+                               struct btree *b)
 {
        BUG_ON(path->cached);
 
@@ -1089,7 +1091,7 @@ static int btree_path_traverse_one(struct btree_trans *trans,
                                   unsigned long trace_ip)
 {
        unsigned depth_want = path->level;
-       int ret = trans->restarted;
+       int ret = -((int) trans->restarted);
 
        if (unlikely(ret))
                goto out;
@@ -1170,11 +1172,10 @@ int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
                btree_path_traverse_one(trans, path, flags, _RET_IP_);
 }
 
-static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
+static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
                            struct btree_path *src)
 {
        unsigned i, offset = offsetof(struct btree_path, pos);
-       int cmp = btree_path_cmp(dst, src);
 
        memcpy((void *) dst + offset,
               (void *) src + offset,
@@ -1186,9 +1187,6 @@ static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
                if (t != BTREE_NODE_UNLOCKED)
                        six_lock_increment(&dst->l[i].b->c.lock, t);
        }
-
-       if (cmp)
-               bch2_btree_path_check_sort_fast(trans, dst, cmp);
 }
 
 static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
@@ -1201,38 +1199,31 @@ static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btr
        return new;
 }
 
+__flatten
 struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *trans,
                         struct btree_path *path, bool intent,
                         unsigned long ip)
 {
-       if (path->ref > 1 || path->preserve) {
-               __btree_path_put(path, intent);
-               path = btree_path_clone(trans, path, intent);
-               path->preserve = false;
+       __btree_path_put(path, intent);
+       path = btree_path_clone(trans, path, intent);
+       path->preserve = false;
 #ifdef CONFIG_BCACHEFS_DEBUG
-               path->ip_allocated = ip;
+       path->ip_allocated = ip;
 #endif
-               btree_trans_verify_sorted(trans);
-       }
-
-       path->should_be_locked = false;
+       btree_trans_verify_sorted(trans);
        return path;
 }
 
 struct btree_path * __must_check
-bch2_btree_path_set_pos(struct btree_trans *trans,
+__bch2_btree_path_set_pos(struct btree_trans *trans,
                   struct btree_path *path, struct bpos new_pos,
-                  bool intent, unsigned long ip)
+                  bool intent, unsigned long ip, int cmp)
 {
-       int cmp = bpos_cmp(new_pos, path->pos);
        unsigned l = path->level;
 
        EBUG_ON(trans->restarted);
        EBUG_ON(!path->ref);
 
-       if (!cmp)
-               return path;
-
        path = bch2_btree_path_make_mut(trans, path, intent, ip);
 
        path->pos = new_pos;
@@ -1556,7 +1547,7 @@ struct btree_path *bch2_path_get(struct btree_trans *trans,
        return path;
 }
 
-inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
+struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
 {
 
        struct btree_path_level *l = path_l(path);
@@ -1801,7 +1792,8 @@ struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
        if (bpos_cmp(start_pos, iter->journal_pos) < 0)
                iter->journal_idx = 0;
 
-       k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id, 0,
+       k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
+                                       iter->path->level,
                                        start_pos, end_pos,
                                        &iter->journal_idx);
 
@@ -1823,7 +1815,7 @@ struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
 {
        struct bkey_i *next_journal =
                bch2_btree_journal_peek(trans, iter, iter->path->pos,
-                               k.k ? k.k->p : iter->path->l[0].b->key.k.p);
+                               k.k ? k.k->p : path_l(iter->path)->b->key.k.p);
 
        if (next_journal) {
                iter->k = next_journal->k;
@@ -2537,6 +2529,18 @@ static inline void btree_path_swap(struct btree_trans *trans,
        btree_path_verify_sorted_ref(trans, r);
 }
 
+static inline struct btree_path *sib_btree_path(struct btree_trans *trans,
+                                               struct btree_path *path, int sib)
+{
+       unsigned idx = (unsigned) path->sorted_idx + sib;
+
+       EBUG_ON(sib != -1 && sib != 1);
+
+       return idx < trans->nr_sorted
+               ? trans->paths + trans->sorted[idx]
+               : NULL;
+}
+
 static __always_inline void bch2_btree_path_check_sort_fast(struct btree_trans *trans,
                                                   struct btree_path *path,
                                                   int cmp)
@@ -2546,9 +2550,7 @@ static __always_inline void bch2_btree_path_check_sort_fast(struct btree_trans *
 
        EBUG_ON(!cmp);
 
-       while ((n = cmp < 0
-               ? prev_btree_path(trans, path)
-               : next_btree_path(trans, path)) &&
+       while ((n = sib_btree_path(trans, path, cmp)) &&
               (cmp2 = btree_path_cmp(n, path)) &&
               cmp2 != cmp)
                btree_path_swap(trans, n, path);
@@ -2902,7 +2904,7 @@ void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, unsigned fn_
        bch2_trans_alloc_paths(trans, c);
 
        s = btree_trans_stats(trans);
-       if (s) {
+       if (s && s->max_mem) {
                unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
 
                trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
@@ -2913,9 +2915,9 @@ void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, unsigned fn_
                } else {
                        trans->mem_bytes = expected_mem_bytes;
                }
-
-               trans->nr_max_paths = s->nr_max_paths;
        }
+       if (s)
+               trans->nr_max_paths = s->nr_max_paths;
 
        trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
 
index 0775cfa2be9a37a0a042bd56f52217fc3e5d0ffc..8ed5aee2d6a0b66627060048e2e6160c3b320bf3 100644 (file)
@@ -146,19 +146,31 @@ bch2_btree_path_make_mut(struct btree_trans *trans,
 }
 
 struct btree_path * __must_check
-bch2_btree_path_set_pos(struct btree_trans *, struct btree_path *,
-                       struct bpos, bool, unsigned long);
+__bch2_btree_path_set_pos(struct btree_trans *, struct btree_path *,
+                       struct bpos, bool, unsigned long, int);
+
+static inline struct btree_path * __must_check
+bch2_btree_path_set_pos(struct btree_trans *trans,
+                  struct btree_path *path, struct bpos new_pos,
+                  bool intent, unsigned long ip)
+{
+       int cmp = bpos_cmp(new_pos, path->pos);
+
+       return cmp
+               ? __bch2_btree_path_set_pos(trans, path, new_pos, intent, ip, cmp)
+               : path;
+}
+
 int __must_check bch2_btree_path_traverse(struct btree_trans *,
                                          struct btree_path *, unsigned);
 struct btree_path *bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
                                 unsigned, unsigned, unsigned, unsigned long);
-inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
+struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
 
 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
                                        struct btree_iter *, struct bpos);
 
-inline void bch2_btree_path_level_init(struct btree_trans *,
-                                      struct btree_path *, struct btree *);
+void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *);
 
 #ifdef CONFIG_BCACHEFS_DEBUG
 void bch2_trans_verify_paths(struct btree_trans *);
@@ -335,6 +347,11 @@ __btree_iter_peek_node_and_restart(struct btree_trans *trans, struct btree_iter
        return b;
 }
 
+/*
+ * XXX
+ * this does not handle transaction restarts from bch2_btree_iter_next_node()
+ * correctly
+ */
 #define __for_each_btree_node(_trans, _iter, _btree_id, _start,                \
                              _locks_want, _depth, _flags, _b, _ret)    \
        for (bch2_trans_node_iter_init((_trans), &(_iter), (_btree_id), \
@@ -461,11 +478,11 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
                                                                        \
        while (1) {                                                     \
                u32 _restart_count = bch2_trans_begin(_trans);          \
+                                                                       \
+               _ret = 0;                                               \
                (_k) = bch2_btree_iter_peek_type(&(_iter), (_flags));   \
-               if (!(_k).k) {                                          \
-                       _ret = 0;                                       \
+               if (!(_k).k)                                            \
                        break;                                          \
-               }                                                       \
                                                                        \
                _ret = bkey_err(_k) ?: (_do);                           \
                if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
index cd52dd5a2890e44263f7f2925bddb8951cbcba8b..634c67318a8da84cdaca37f6060dcbf42732df8e 100644 (file)
@@ -104,6 +104,7 @@ static void bkey_cached_free(struct btree_key_cache *bc,
        six_unlock_intent(&ck->c.lock);
 }
 
+#ifdef __KERNEL__
 static void __bkey_cached_move_to_freelist_ordered(struct btree_key_cache *bc,
                                                   struct bkey_cached *ck)
 {
@@ -119,17 +120,18 @@ static void __bkey_cached_move_to_freelist_ordered(struct btree_key_cache *bc,
 
        list_move(&ck->list, &bc->freed_nonpcpu);
 }
+#endif
 
 static void bkey_cached_move_to_freelist(struct btree_key_cache *bc,
                                         struct bkey_cached *ck)
 {
-       struct btree_key_cache_freelist *f;
-       bool freed = false;
-
        BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
 
        if (!ck->c.lock.readers) {
 #ifdef __KERNEL__
+               struct btree_key_cache_freelist *f;
+               bool freed = false;
+
                preempt_disable();
                f = this_cpu_ptr(bc->pcpu_freed);
 
@@ -188,16 +190,18 @@ static void bkey_cached_free_fast(struct btree_key_cache *bc,
 }
 
 static struct bkey_cached *
-bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path)
+bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
+                 bool *was_new)
 {
        struct bch_fs *c = trans->c;
        struct btree_key_cache *bc = &c->btree_key_cache;
        struct bkey_cached *ck = NULL;
-       struct btree_key_cache_freelist *f;
        bool pcpu_readers = btree_uses_pcpu_readers(path->btree_id);
 
        if (!pcpu_readers) {
 #ifdef __KERNEL__
+               struct btree_key_cache_freelist *f;
+
                preempt_disable();
                f = this_cpu_ptr(bc->pcpu_freed);
                if (f->nr)
@@ -271,6 +275,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path)
                ck->c.cached = true;
                BUG_ON(!six_trylock_intent(&ck->c.lock));
                BUG_ON(!six_trylock_write(&ck->c.lock));
+               *was_new = true;
                return ck;
        }
 
@@ -309,9 +314,9 @@ btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
        struct bch_fs *c = trans->c;
        struct btree_key_cache *bc = &c->btree_key_cache;
        struct bkey_cached *ck;
-       bool was_new = true;
+       bool was_new = false;
 
-       ck = bkey_cached_alloc(trans, path);
+       ck = bkey_cached_alloc(trans, path, &was_new);
        if (IS_ERR(ck))
                return ck;
 
@@ -324,7 +329,6 @@ btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
                }
 
                mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
-               was_new = false;
        } else {
                if (path->btree_id == BTREE_ID_subvolumes)
                        six_lock_pcpu_alloc(&ck->c.lock);
@@ -345,12 +349,12 @@ btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
                if (likely(was_new)) {
                        six_unlock_write(&ck->c.lock);
                        six_unlock_intent(&ck->c.lock);
-                       mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED);
                        kfree(ck);
                } else {
                        bkey_cached_free_fast(bc, ck);
                }
 
+               mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED);
                return NULL;
        }
 
index 670746e72dabae9cb3d56a5cbe69360ca4fdbe7d..eccea15fca792614eb0e9fe782224016cea7df28 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _BCACHEFS_BTREE_KEY_CACHE_H
 #define _BCACHEFS_BTREE_KEY_CACHE_H
 
index 9d090437d8f6a5d6cb373076bdeceeb0a41499bd..dce2dc0cc0c555a7e34873d05d6b0cd4c14cb191 100644 (file)
@@ -173,10 +173,9 @@ static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
        }
 
        if (unlikely(!best)) {
-               struct bch_fs *c = g->g->trans->c;
                struct printbuf buf = PRINTBUF;
 
-               bch_err(c, "cycle of nofail locks");
+               prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
 
                for (i = g->g; i < g->g + g->nr; i++) {
                        struct btree_trans *trans = i->trans;
index bf8d1880673b224a09390e911dc50dfff03bb3b5..fb237c95ee13cb5d91e606207461fbfde5c7e2b7 100644 (file)
@@ -296,6 +296,7 @@ static inline void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
                                              struct btree_bkey_cached_common *b)
 {
        int ret = __btree_node_lock_write(trans, path, b, true);
+
        BUG_ON(ret);
 }
 
index 892d1231755164dede6deafd9b61ad65148d9919..6d44b2ea144060e85d499360d44cd276d50fccff 100644 (file)
@@ -326,7 +326,7 @@ struct btree_key_cache {
 struct bkey_cached_key {
        u32                     btree_id;
        struct bpos             pos;
-} __attribute__((packed, aligned(4)));
+} __packed __aligned(4);
 
 #define BKEY_CACHED_ACCESSED           0
 #define BKEY_CACHED_DIRTY              1
@@ -456,6 +456,23 @@ struct btree_trans {
        struct replicas_delta_list *fs_usage_deltas;
 };
 
+#define BCH_BTREE_WRITE_TYPES()                                                \
+       x(initial,              0)                                      \
+       x(init_next_bset,       1)                                      \
+       x(cache_reclaim,        2)                                      \
+       x(journal_reclaim,      3)                                      \
+       x(interior,             4)
+
+enum btree_write_type {
+#define x(t, n) BTREE_WRITE_##t,
+       BCH_BTREE_WRITE_TYPES()
+#undef x
+       BTREE_WRITE_TYPE_NR,
+};
+
+#define BTREE_WRITE_TYPE_MASK  (roundup_pow_of_two(BTREE_WRITE_TYPE_NR) - 1)
+#define BTREE_WRITE_TYPE_BITS  ilog2(roundup_pow_of_two(BTREE_WRITE_TYPE_NR))
+
 #define BTREE_FLAGS()                                                  \
        x(read_in_flight)                                               \
        x(read_error)                                                   \
@@ -475,6 +492,8 @@ struct btree_trans {
        x(never_write)
 
 enum btree_flags {
+       /* First bits for btree node write type */
+       BTREE_NODE_FLAGS_START = BTREE_WRITE_TYPE_BITS - 1,
 #define x(flag)        BTREE_NODE_##flag,
        BTREE_FLAGS()
 #undef x
index 40debf7563f8d5fdecd6185c3823d7dfda72b1cd..d4f1920ccf94ef662c0d336a6b9716895a03214a 100644 (file)
@@ -246,6 +246,7 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
        struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
        unsigned nr_reserve;
        enum alloc_reserve alloc_reserve;
+       int ret;
 
        if (flags & BTREE_INSERT_USE_RESERVE) {
                nr_reserve      = 0;
@@ -268,7 +269,7 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
        mutex_unlock(&c->btree_reserve_cache_lock);
 
 retry:
-       wp = bch2_alloc_sectors_start_trans(trans,
+       ret = bch2_alloc_sectors_start_trans(trans,
                                      c->opts.metadata_target ?:
                                      c->opts.foreground_target,
                                      0,
@@ -276,9 +277,9 @@ retry:
                                      &devs_have,
                                      res->nr_replicas,
                                      c->opts.metadata_replicas_required,
-                                     alloc_reserve, 0, cl);
-       if (IS_ERR(wp))
-               return ERR_CAST(wp);
+                                     alloc_reserve, 0, cl, &wp);
+       if (unlikely(ret))
+               return ERR_PTR(ret);
 
        if (wp->sectors_free < btree_sectors(c)) {
                struct open_bucket *ob;
@@ -377,14 +378,19 @@ static void btree_set_max(struct btree *b, struct bpos pos)
        b->data->max_key = pos;
 }
 
-struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *as,
-                                                 struct btree_trans *trans,
-                                                 struct btree *b,
-                                                 struct bkey_format format)
+static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as,
+                                                      struct btree_trans *trans,
+                                                      struct btree *b)
 {
-       struct btree *n;
+       struct btree *n = bch2_btree_node_alloc(as, trans, b->c.level);
+       struct bkey_format format = bch2_btree_calc_format(b);
 
-       n = bch2_btree_node_alloc(as, trans, b->c.level);
+       /*
+        * The keys might expand with the new format - if they wouldn't fit in
+        * the btree node anymore, use the old format for now:
+        */
+       if (!bch2_btree_node_format_fits(as->c, b, &format))
+               format = b->format;
 
        SET_BTREE_NODE_SEQ(n->data, BTREE_NODE_SEQ(b->data) + 1);
 
@@ -397,27 +403,9 @@ struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *as,
        bch2_btree_sort_into(as->c, n, b);
 
        btree_node_reset_sib_u64s(n);
-
-       n->key.k.p = b->key.k.p;
        return n;
 }
 
-static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as,
-                                                      struct btree_trans *trans,
-                                                      struct btree *b)
-{
-       struct bkey_format new_f = bch2_btree_calc_format(b);
-
-       /*
-        * The keys might expand with the new format - if they wouldn't fit in
-        * the btree node anymore, use the old format for now:
-        */
-       if (!bch2_btree_node_format_fits(as->c, b, &new_f))
-               new_f = b->format;
-
-       return __bch2_btree_node_alloc_replacement(as, trans, b, new_f);
-}
-
 static struct btree *__btree_root_alloc(struct btree_update *as,
                                struct btree_trans *trans, unsigned level)
 {
@@ -665,7 +653,7 @@ static void btree_update_nodes_written(struct btree_update *as)
        bch2_trans_unlock(&trans);
 
        bch2_fs_fatal_err_on(ret && !bch2_journal_error(&c->journal), c,
-                            "error %i in btree_update_nodes_written()", ret);
+                            "%s(): error %s", __func__, bch2_err_str(ret));
 err:
        if (as->b) {
                struct btree_path *path;
@@ -1178,7 +1166,8 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
        }
 
        if (ret) {
-               trace_and_count(c, btree_reserve_get_fail, trans->fn, _RET_IP_, nr_nodes[0] + nr_nodes[1]);
+               trace_and_count(c, btree_reserve_get_fail, trans->fn,
+                               _RET_IP_, nr_nodes[0] + nr_nodes[1], ret);
                goto err;
        }
 
@@ -1269,6 +1258,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
        struct bch_fs *c = as->c;
        struct bkey_packed *k;
        struct printbuf buf = PRINTBUF;
+       unsigned long old, new, v;
 
        BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 &&
               !btree_ptr_sectors_written(insert));
@@ -1306,7 +1296,15 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
 
        bch2_btree_bset_insert_key(trans, path, b, node_iter, insert);
        set_btree_node_dirty_acct(c, b);
-       set_btree_node_need_write(b);
+
+       v = READ_ONCE(b->flags);
+       do {
+               old = new = v;
+
+               new &= ~BTREE_WRITE_TYPE_MASK;
+               new |= BTREE_WRITE_interior;
+               new |= 1 << BTREE_NODE_need_write;
+       } while ((v = cmpxchg(&b->flags, old, new)) != old);
 
        printbuf_exit(&buf);
 }
@@ -1329,8 +1327,12 @@ __bch2_btree_insert_keys_interior(struct btree_update *as,
                ;
 
        while (!bch2_keylist_empty(keys)) {
-               bch2_insert_fixup_btree_ptr(as, trans, path, b,
-                               &node_iter, bch2_keylist_front(keys));
+               struct bkey_i *k = bch2_keylist_front(keys);
+
+               if (bpos_cmp(k->k.p, b->key.k.p) > 0)
+                       break;
+
+               bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, k);
                bch2_keylist_pop_front(keys);
        }
 }
@@ -1339,109 +1341,91 @@ __bch2_btree_insert_keys_interior(struct btree_update *as,
  * Move keys from n1 (original replacement node, now lower node) to n2 (higher
  * node)
  */
-static struct btree *__btree_split_node(struct btree_update *as,
-                                       struct btree_trans *trans,
-                                       struct btree *n1)
+static void __btree_split_node(struct btree_update *as,
+                              struct btree_trans *trans,
+                              struct btree *b,
+                              struct btree *n[2])
 {
-       struct bkey_format_state s;
-       size_t nr_packed = 0, nr_unpacked = 0;
-       struct btree *n2;
-       struct bset *set1, *set2;
-       struct bkey_packed *k, *set2_start, *set2_end, *out, *prev = NULL;
+       struct bkey_packed *k;
        struct bpos n1_pos;
+       struct btree_node_iter iter;
+       struct bset *bsets[2];
+       struct bkey_format_state format[2];
+       struct bkey_packed *out[2];
+       struct bkey uk;
+       unsigned u64s, n1_u64s = (b->nr.live_u64s * 3) / 5;
+       int i;
 
-       n2 = bch2_btree_node_alloc(as, trans, n1->c.level);
+       for (i = 0; i < 2; i++) {
+               BUG_ON(n[i]->nsets != 1);
 
-       n2->data->max_key       = n1->data->max_key;
-       n2->data->format        = n1->format;
-       SET_BTREE_NODE_SEQ(n2->data, BTREE_NODE_SEQ(n1->data));
-       n2->key.k.p = n1->key.k.p;
+               bsets[i] = btree_bset_first(n[i]);
+               out[i] = bsets[i]->start;
 
-       set1 = btree_bset_first(n1);
-       set2 = btree_bset_first(n2);
+               SET_BTREE_NODE_SEQ(n[i]->data, BTREE_NODE_SEQ(b->data) + 1);
+               bch2_bkey_format_init(&format[i]);
+       }
 
-       /*
-        * Has to be a linear search because we don't have an auxiliary
-        * search tree yet
-        */
-       k = set1->start;
-       while (1) {
-               struct bkey_packed *n = bkey_next(k);
+       u64s = 0;
+       for_each_btree_node_key(b, k, &iter) {
+               if (bkey_deleted(k))
+                       continue;
+
+               i = u64s >= n1_u64s;
+               u64s += k->u64s;
+               uk = bkey_unpack_key(b, k);
+               if (!i)
+                       n1_pos = uk.p;
+               bch2_bkey_format_add_key(&format[i], &uk);
+       }
 
-               if (n == vstruct_last(set1))
-                       break;
-               if (k->_data - set1->_data >= (le16_to_cpu(set1->u64s) * 3) / 5)
-                       break;
+       btree_set_min(n[0], b->data->min_key);
+       btree_set_max(n[0], n1_pos);
+       btree_set_min(n[1], bpos_successor(n1_pos));
+       btree_set_max(n[1], b->data->max_key);
 
-               if (bkey_packed(k))
-                       nr_packed++;
-               else
-                       nr_unpacked++;
+       for (i = 0; i < 2; i++) {
+               bch2_bkey_format_add_pos(&format[i], n[i]->data->min_key);
+               bch2_bkey_format_add_pos(&format[i], n[i]->data->max_key);
 
-               prev = k;
-               k = n;
+               n[i]->data->format = bch2_bkey_format_done(&format[i]);
+               btree_node_set_format(n[i], n[i]->data->format);
        }
 
-       BUG_ON(!prev);
-       set2_start      = k;
-       set2_end        = vstruct_last(set1);
-
-       set1->u64s = cpu_to_le16((u64 *) set2_start - set1->_data);
-       set_btree_bset_end(n1, n1->set);
-
-       n1->nr.live_u64s        = le16_to_cpu(set1->u64s);
-       n1->nr.bset_u64s[0]     = le16_to_cpu(set1->u64s);
-       n1->nr.packed_keys      = nr_packed;
-       n1->nr.unpacked_keys    = nr_unpacked;
+       u64s = 0;
+       for_each_btree_node_key(b, k, &iter) {
+               if (bkey_deleted(k))
+                       continue;
 
-       n1_pos = bkey_unpack_pos(n1, prev);
-       if (as->c->sb.version < bcachefs_metadata_version_snapshot)
-               n1_pos.snapshot = U32_MAX;
+               i = u64s >= n1_u64s;
+               u64s += k->u64s;
 
-       btree_set_max(n1, n1_pos);
-       btree_set_min(n2, bpos_successor(n1->key.k.p));
+               if (bch2_bkey_transform(&n[i]->format, out[i], bkey_packed(k)
+                                       ? &b->format: &bch2_bkey_format_current, k))
+                       out[i]->format = KEY_FORMAT_LOCAL_BTREE;
+               else
+                       bch2_bkey_unpack(b, (void *) out[i], k);
 
-       bch2_bkey_format_init(&s);
-       bch2_bkey_format_add_pos(&s, n2->data->min_key);
-       bch2_bkey_format_add_pos(&s, n2->data->max_key);
+               out[i]->needs_whiteout = false;
 
-       for (k = set2_start; k != set2_end; k = bkey_next(k)) {
-               struct bkey uk = bkey_unpack_key(n1, k);
-               bch2_bkey_format_add_key(&s, &uk);
+               btree_keys_account_key_add(&n[i]->nr, 0, out[i]);
+               out[i] = bkey_next(out[i]);
        }
 
-       n2->data->format = bch2_bkey_format_done(&s);
-       btree_node_set_format(n2, n2->data->format);
-
-       out = set2->start;
-       memset(&n2->nr, 0, sizeof(n2->nr));
-
-       for (k = set2_start; k != set2_end; k = bkey_next(k)) {
-               BUG_ON(!bch2_bkey_transform(&n2->format, out, bkey_packed(k)
-                                      ? &n1->format : &bch2_bkey_format_current, k));
-               out->format = KEY_FORMAT_LOCAL_BTREE;
-               btree_keys_account_key_add(&n2->nr, 0, out);
-               out = bkey_next(out);
-       }
+       for (i = 0; i < 2; i++) {
+               bsets[i]->u64s = cpu_to_le16((u64 *) out[i] - bsets[i]->_data);
 
-       set2->u64s = cpu_to_le16((u64 *) out - set2->_data);
-       set_btree_bset_end(n2, n2->set);
+               BUG_ON(!bsets[i]->u64s);
 
-       BUG_ON(!set1->u64s);
-       BUG_ON(!set2->u64s);
+               set_btree_bset_end(n[i], n[i]->set);
 
-       btree_node_reset_sib_u64s(n1);
-       btree_node_reset_sib_u64s(n2);
+               btree_node_reset_sib_u64s(n[i]);
 
-       bch2_verify_btree_nr_keys(n1);
-       bch2_verify_btree_nr_keys(n2);
+               bch2_verify_btree_nr_keys(n[i]);
 
-       if (n1->c.level) {
-               btree_node_interior_verify(as->c, n1);
-               btree_node_interior_verify(as->c, n2);
+               if (b->c.level)
+                       btree_node_interior_verify(as->c, n[i]);
        }
-
-       return n2;
 }
 
 /*
@@ -1461,41 +1445,17 @@ static void btree_split_insert_keys(struct btree_update *as,
                                    struct btree *b,
                                    struct keylist *keys)
 {
-       struct btree_node_iter node_iter;
-       struct bkey_i *k = bch2_keylist_front(keys);
-       struct bkey_packed *src, *dst, *n;
-       struct bset *i;
+       if (!bch2_keylist_empty(keys) &&
+           bpos_cmp(bch2_keylist_front(keys)->k.p,
+                    b->data->max_key) <= 0) {
+               struct btree_node_iter node_iter;
 
-       bch2_btree_node_iter_init(&node_iter, b, &k->k.p);
+               bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p);
 
-       __bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys);
+               __bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys);
 
-       /*
-        * We can't tolerate whiteouts here - with whiteouts there can be
-        * duplicate keys, and it would be rather bad if we picked a duplicate
-        * for the pivot:
-        */
-       i = btree_bset_first(b);
-       src = dst = i->start;
-       while (src != vstruct_last(i)) {
-               n = bkey_next(src);
-               if (!bkey_deleted(src)) {
-                       memmove_u64s_down(dst, src, src->u64s);
-                       dst = bkey_next(dst);
-               }
-               src = n;
+               btree_node_interior_verify(as->c, b);
        }
-
-       /* Also clear out the unwritten whiteouts area: */
-       b->whiteout_u64s = 0;
-
-       i->u64s = cpu_to_le16((u64 *) dst - i->_data);
-       set_btree_bset_end(b, b->set);
-
-       BUG_ON(b->nsets != 1 ||
-              b->nr.live_u64s != le16_to_cpu(btree_bset_first(b)->u64s));
-
-       btree_node_interior_verify(as->c, b);
 }
 
 static int btree_split(struct btree_update *as, struct btree_trans *trans,
@@ -1514,15 +1474,21 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
 
        bch2_btree_interior_update_will_free_node(as, b);
 
-       n1 = bch2_btree_node_alloc_replacement(as, trans, b);
-
-       if (keys)
-               btree_split_insert_keys(as, trans, path, n1, keys);
+       if (b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c)) {
+               struct btree *n[2];
 
-       if (bset_u64s(&n1->set[0]) > BTREE_SPLIT_THRESHOLD(c)) {
                trace_and_count(c, btree_node_split, c, b);
 
-               n2 = __btree_split_node(as, trans, n1);
+               n[0] = n1 = bch2_btree_node_alloc(as, trans, b->c.level);
+               n[1] = n2 = bch2_btree_node_alloc(as, trans, b->c.level);
+
+               __btree_split_node(as, trans, b, n);
+
+               if (keys) {
+                       btree_split_insert_keys(as, trans, path, n1, keys);
+                       btree_split_insert_keys(as, trans, path, n2, keys);
+                       BUG_ON(!bch2_keylist_empty(keys));
+               }
 
                bch2_btree_build_aux_trees(n2);
                bch2_btree_build_aux_trees(n1);
@@ -1571,6 +1537,13 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
        } else {
                trace_and_count(c, btree_node_compact, c, b);
 
+               n1 = bch2_btree_node_alloc_replacement(as, trans, b);
+
+               if (keys) {
+                       btree_split_insert_keys(as, trans, path, n1, keys);
+                       BUG_ON(!bch2_keylist_empty(keys));
+               }
+
                bch2_btree_build_aux_trees(n1);
                bch2_btree_update_add_new_node(as, n1);
                six_unlock_write(&n1->c.lock);
@@ -1838,10 +1811,10 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
                bch2_bpos_to_text(&buf1, prev->data->max_key);
                bch2_bpos_to_text(&buf2, next->data->min_key);
                bch_err(c,
-                       "btree topology error in btree merge:\n"
+                       "%s(): btree topology error:\n"
                        "  prev ends at   %s\n"
                        "  next starts at %s",
-                       buf1.buf, buf2.buf);
+                       __func__, buf1.buf, buf2.buf);
                printbuf_exit(&buf1);
                printbuf_exit(&buf2);
                bch2_topology_error(c);
index dabe815965445484d2a24c7ab801d7bf0e19049a..2e6d220c3bcd6e005889b22a60361fb34f32c1aa 100644 (file)
@@ -282,6 +282,7 @@ static inline void push_whiteout(struct bch_fs *c, struct btree *b,
        struct bkey_packed k;
 
        BUG_ON(bch_btree_keys_u64s_remaining(c, b) < BKEY_U64s);
+       EBUG_ON(btree_node_just_written(b));
 
        if (!bkey_pack_pos(&k, pos, b)) {
                struct bkey *u = (void *) &k;
index 3a68382013e79620351e0ba17bcd0e45708f4779..05c1b28fd0889a54abac36443cac3744103f1676 100644 (file)
@@ -178,6 +178,8 @@ static int __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
                    w->journal.seq != seq)
                        break;
 
+               new &= ~BTREE_WRITE_TYPE_MASK;
+               new |= BTREE_WRITE_journal_reclaim;
                new |= 1 << BTREE_NODE_need_write;
        } while ((v = cmpxchg(&b->flags, old, new)) != old);
 
@@ -289,7 +291,7 @@ bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned u64s,
        return 0;
 }
 
-static inline int bch2_trans_journal_res_get(struct btree_trans *trans,
+static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans,
                                             unsigned flags)
 {
        struct bch_fs *c = trans->c;
@@ -721,33 +723,34 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
        return ret;
 }
 
+static noinline int trans_lock_write_fail(struct btree_trans *trans, struct btree_insert_entry *i)
+{
+       while (--i >= trans->updates) {
+               if (same_leaf_as_prev(trans, i))
+                       continue;
+
+               bch2_btree_node_unlock_write(trans, i->path, insert_l(i)->b);
+       }
+
+       trace_and_count(trans->c, trans_restart_would_deadlock_write, trans);
+       return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write);
+}
+
 static inline int trans_lock_write(struct btree_trans *trans)
 {
        struct btree_insert_entry *i;
-       int ret;
 
        trans_for_each_update(trans, i) {
                if (same_leaf_as_prev(trans, i))
                        continue;
 
-               ret = bch2_btree_node_lock_write(trans, i->path, &insert_l(i)->b->c);
-               if (ret)
-                       goto fail;
+               if (bch2_btree_node_lock_write(trans, i->path, &insert_l(i)->b->c))
+                       return trans_lock_write_fail(trans, i);
 
                bch2_btree_node_prep_for_write(trans, i->path, insert_l(i)->b);
        }
 
        return 0;
-fail:
-       while (--i >= trans->updates) {
-               if (same_leaf_as_prev(trans, i))
-                       continue;
-
-               bch2_btree_node_unlock_write_inlined(trans, i->path, insert_l(i)->b);
-       }
-
-       trace_and_count(trans->c, trans_restart_would_deadlock_write, trans);
-       return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write);
 }
 
 static noinline void bch2_drop_overwrites_from_journal(struct btree_trans *trans)
@@ -758,6 +761,33 @@ static noinline void bch2_drop_overwrites_from_journal(struct btree_trans *trans
                bch2_journal_key_overwritten(trans->c, i->btree_id, i->level, i->k->k.p);
 }
 
+static noinline int bch2_trans_commit_bkey_invalid(struct btree_trans *trans,
+                                                  struct btree_insert_entry *i,
+                                                  struct printbuf *err)
+{
+       struct bch_fs *c = trans->c;
+       int rw = (trans->flags & BTREE_INSERT_JOURNAL_REPLAY) ? READ : WRITE;
+
+       printbuf_reset(err);
+       prt_printf(err, "invalid bkey on insert from %s -> %ps",
+                  trans->fn, (void *) i->ip_allocated);
+       prt_newline(err);
+       printbuf_indent_add(err, 2);
+
+       bch2_bkey_val_to_text(err, c, bkey_i_to_s_c(i->k));
+       prt_newline(err);
+
+       bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
+                         i->bkey_type, rw, err);
+       bch2_print_string_as_lines(KERN_ERR, err->buf);
+
+       bch2_inconsistent_error(c);
+       bch2_dump_trans_updates(trans);
+       printbuf_exit(err);
+
+       return -EINVAL;
+}
+
 /*
  * Get journal reservation, take write locks, and attempt to do btree update(s):
  */
@@ -772,24 +802,9 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
        int rw = (trans->flags & BTREE_INSERT_JOURNAL_REPLAY) ? READ : WRITE;
 
        trans_for_each_update(trans, i) {
-               if (bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
-                                     i->bkey_type, rw, &buf)) {
-                       printbuf_reset(&buf);
-                       prt_printf(&buf, "invalid bkey on insert from %s -> %ps",
-                              trans->fn, (void *) i->ip_allocated);
-                       prt_newline(&buf);
-                       printbuf_indent_add(&buf, 2);
-
-                       bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k));
-                       prt_newline(&buf);
-
-                       bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
-                                         i->bkey_type, rw, &buf);
-
-                       bch2_trans_inconsistent(trans, "%s", buf.buf);
-                       printbuf_exit(&buf);
-                       return -EINVAL;
-               }
+               if (unlikely(bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
+                                              i->bkey_type, rw, &buf)))
+                       return bch2_trans_commit_bkey_invalid(trans, i, &buf);
                btree_insert_entry_checks(trans, i);
        }
 
index 116711fc01fb30f501ad206fb9c6a7f70362ca3f..bf01837e13623a302c2a3ef58d791d71b83f861a 100644 (file)
@@ -575,7 +575,8 @@ int bch2_mark_alloc(struct btree_trans *trans,
                                            -((s64) old_a.cached_sectors),
                                            journal_seq, gc);
                if (ret) {
-                       bch2_fs_fatal_error(c, "bch2_mark_alloc(): no replicas entry while updating cached sectors");
+                       bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
+                                           __func__);
                        return ret;
                }
        }
@@ -959,7 +960,8 @@ int bch2_mark_extent(struct btree_trans *trans,
                                ret = update_cached_sectors(c, k, p.ptr.dev,
                                                disk_sectors, journal_seq, true);
                                if (ret) {
-                                       bch2_fs_fatal_error(c, "bch2_mark_extent(): no replicas entry while updating cached sectors");
+                                       bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
+                                                           __func__);
                                        return ret;
                                }
                        }
@@ -987,7 +989,7 @@ int bch2_mark_extent(struct btree_trans *trans,
                        struct printbuf buf = PRINTBUF;
 
                        bch2_bkey_val_to_text(&buf, c, k);
-                       bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
+                       bch2_fs_fatal_error(c, "%s(): no replicas entry for %s", __func__, buf.buf);
                        printbuf_exit(&buf);
                        return ret;
                }
@@ -1261,23 +1263,24 @@ void fs_usage_apply_warn(struct btree_trans *trans,
        struct btree_insert_entry *i;
        struct printbuf buf = PRINTBUF;
 
-       bch_err(c, "disk usage increased %lli more than %u sectors reserved",
-               should_not_have_added, disk_res_sectors);
+       prt_printf(&buf,
+                  bch2_fmt(c, "disk usage increased %lli more than %u sectors reserved)"),
+                  should_not_have_added, disk_res_sectors);
 
        trans_for_each_update(trans, i) {
                struct bkey_s_c old = { &i->old_k, i->old_v };
 
-               pr_err("while inserting");
-               printbuf_reset(&buf);
+               prt_str(&buf, "new ");
                bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k));
-               pr_err("  %s", buf.buf);
-               pr_err("overlapping with");
-               printbuf_reset(&buf);
+               prt_newline(&buf);
+
+               prt_str(&buf, "old ");
                bch2_bkey_val_to_text(&buf, c, old);
-               pr_err("  %s", buf.buf);
+               prt_newline(&buf);
        }
 
        __WARN();
+       bch2_print_string_as_lines(KERN_ERR, buf.buf);
        printbuf_exit(&buf);
 }
 
@@ -1947,7 +1950,7 @@ int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
 
 #define SECTORS_CACHE  1024
 
-int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
+int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
                              u64 sectors, int flags)
 {
        struct bch_fs_pcpu *pcpu;
index 56c06ccde14f8a39a19ded9475d6861624f35d63..b6a1db7619ff659666eb385595a0beb7c983daf9 100644 (file)
@@ -259,15 +259,39 @@ int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *);
 static inline void bch2_disk_reservation_put(struct bch_fs *c,
                                             struct disk_reservation *res)
 {
-       this_cpu_sub(*c->online_reserved, res->sectors);
-       res->sectors = 0;
+       if (res->sectors) {
+               this_cpu_sub(*c->online_reserved, res->sectors);
+               res->sectors = 0;
+       }
 }
 
 #define BCH_DISK_RESERVATION_NOFAIL            (1 << 0)
 
-int bch2_disk_reservation_add(struct bch_fs *,
-                             struct disk_reservation *,
-                             u64, int);
+int __bch2_disk_reservation_add(struct bch_fs *,
+                               struct disk_reservation *,
+                               u64, int);
+
+static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
+                                           u64 sectors, int flags)
+{
+#ifdef __KERNEL__
+       u64 old, new;
+
+       do {
+               old = this_cpu_read(c->pcpu->sectors_available);
+               if (sectors > old)
+                       return __bch2_disk_reservation_add(c, res, sectors, flags);
+
+               new = old - sectors;
+       } while (this_cpu_cmpxchg(c->pcpu->sectors_available, old, new) != old);
+
+       this_cpu_add(*c->online_reserved, sectors);
+       res->sectors                    += sectors;
+       return 0;
+#else
+       return __bch2_disk_reservation_add(c, res, sectors, flags);
+#endif
+}
 
 static inline struct disk_reservation
 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
index 2e5b955080de46676ca04dc2796a97c55e840f60..0f4ef9e5a431f4f84e80a347196a03f7d71a8946 100644 (file)
@@ -3,6 +3,7 @@
 #include "bcachefs.h"
 #include "buckets_waiting_for_journal.h"
 #include <linux/random.h>
+#include <linux/siphash.h>
 
 static inline struct bucket_hashed *
 bucket_hash(struct buckets_waiting_for_journal_table *t,
@@ -10,7 +11,7 @@ bucket_hash(struct buckets_waiting_for_journal_table *t,
 {
        unsigned h = siphash_1u64(dev_bucket, &t->hash_seeds[hash_seed_idx]);
 
-       BUG_ON(!is_power_of_2(t->size));
+       EBUG_ON(!is_power_of_2(t->size));
 
        return t->d + (h & (t->size - 1));
 }
index 3268e8d48603372f6f6178e346393edb068ede7c..43d22fe8131b00d720ed58702da1696577027f1b 100644 (file)
@@ -316,7 +316,7 @@ struct bch_csum bch2_checksum_bio(struct bch_fs *c, unsigned type,
        return __bch2_checksum_bio(c, type, nonce, bio, &iter);
 }
 
-int bch2_encrypt_bio(struct bch_fs *c, unsigned type,
+int __bch2_encrypt_bio(struct bch_fs *c, unsigned type,
                     struct nonce nonce, struct bio *bio)
 {
        struct bio_vec bv;
index c86c3c05d62054a66faffa4c5c2cde81ab5de1e8..409ad534d9f40c23efd94817349ca1eeffb4ff44 100644 (file)
@@ -61,8 +61,16 @@ int bch2_rechecksum_bio(struct bch_fs *, struct bio *, struct bversion,
                        struct bch_extent_crc_unpacked *,
                        unsigned, unsigned, unsigned);
 
-int bch2_encrypt_bio(struct bch_fs *, unsigned,
-                    struct nonce, struct bio *);
+int __bch2_encrypt_bio(struct bch_fs *, unsigned,
+                      struct nonce, struct bio *);
+
+static inline int bch2_encrypt_bio(struct bch_fs *c, unsigned type,
+                                  struct nonce nonce, struct bio *bio)
+{
+       return bch2_csum_type_is_encryption(type)
+               ? __bch2_encrypt_bio(c, type, nonce, bio)
+               : 0;
+}
 
 int bch2_decrypt_sb_key(struct bch_fs *, struct bch_sb_field_crypt *,
                        struct bch_key *);
@@ -78,27 +86,30 @@ static inline enum bch_csum_type bch2_csum_opt_to_type(enum bch_csum_opts type,
 {
        switch (type) {
        case BCH_CSUM_OPT_none:
-            return BCH_CSUM_none;
+               return BCH_CSUM_none;
        case BCH_CSUM_OPT_crc32c:
-            return data ? BCH_CSUM_crc32c : BCH_CSUM_crc32c_nonzero;
+               return data ? BCH_CSUM_crc32c : BCH_CSUM_crc32c_nonzero;
        case BCH_CSUM_OPT_crc64:
-            return data ? BCH_CSUM_crc64 : BCH_CSUM_crc64_nonzero;
+               return data ? BCH_CSUM_crc64 : BCH_CSUM_crc64_nonzero;
        case BCH_CSUM_OPT_xxhash:
-            return BCH_CSUM_xxhash;
+               return BCH_CSUM_xxhash;
        default:
-            BUG();
+               BUG();
        }
 }
 
 static inline enum bch_csum_type bch2_data_checksum_type(struct bch_fs *c,
-                                                        unsigned opt)
+                                                        struct bch_io_opts opts)
 {
+       if (opts.nocow)
+               return 0;
+
        if (c->sb.encryption_type)
                return c->opts.wide_macs
                        ? BCH_CSUM_chacha20_poly1305_128
                        : BCH_CSUM_chacha20_poly1305_80;
 
-       return bch2_csum_opt_to_type(opt, true);
+       return bch2_csum_opt_to_type(opts.data_checksum, true);
 }
 
 static inline enum bch_csum_type bch2_meta_checksum_type(struct bch_fs *c)
index b75ff07e59210175fd1a18bad2eef5f2bd08018d..301552889ec8898ff550d32deedfb90d2fd691d6 100644 (file)
@@ -97,10 +97,10 @@ static void bch2_bkey_mark_dev_cached(struct bkey_s k, unsigned dev)
                        ptr->cached = true;
 }
 
-static int bch2_data_update_index_update(struct bch_write_op *op)
+static int __bch2_data_update_index_update(struct btree_trans *trans,
+                                          struct bch_write_op *op)
 {
        struct bch_fs *c = op->c;
-       struct btree_trans trans;
        struct btree_iter iter;
        struct data_update *m =
                container_of(op, struct data_update, op);
@@ -112,9 +112,7 @@ static int bch2_data_update_index_update(struct bch_write_op *op)
        bch2_bkey_buf_init(&_insert);
        bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
 
-       bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
-
-       bch2_trans_iter_init(&trans, &iter, m->btree_id,
+       bch2_trans_iter_init(trans, &iter, m->btree_id,
                             bkey_start_pos(&bch2_keylist_front(keys)->k),
                             BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
 
@@ -131,7 +129,7 @@ static int bch2_data_update_index_update(struct bch_write_op *op)
                s64 i_sectors_delta = 0, disk_sectors_delta = 0;
                unsigned i;
 
-               bch2_trans_begin(&trans);
+               bch2_trans_begin(trans);
 
                k = bch2_btree_iter_peek_slot(&iter);
                ret = bkey_err(k);
@@ -202,7 +200,7 @@ static int bch2_data_update_index_update(struct bch_write_op *op)
                bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
                bch2_extent_normalize(c, bkey_i_to_s(insert));
 
-               ret = bch2_sum_sector_overwrites(&trans, &iter, insert,
+               ret = bch2_sum_sector_overwrites(trans, &iter, insert,
                                                 &should_check_enospc,
                                                 &i_sectors_delta,
                                                 &disk_sectors_delta);
@@ -220,12 +218,12 @@ static int bch2_data_update_index_update(struct bch_write_op *op)
 
                next_pos = insert->k.p;
 
-               ret   = insert_snapshot_whiteouts(&trans, m->btree_id,
+               ret   = insert_snapshot_whiteouts(trans, m->btree_id,
                                                  k.k->p, insert->k.p) ?:
-                       bch2_trans_update(&trans, &iter, insert,
+                       bch2_trans_update(trans, &iter, insert,
                                BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
-                       bch2_trans_commit(&trans, &op->res,
-                               op_journal_seq(op),
+                       bch2_trans_commit(trans, &op->res,
+                               NULL,
                                BTREE_INSERT_NOFAIL|
                                m->data_opts.btree_insert_flags);
                if (!ret) {
@@ -261,17 +259,28 @@ nomatch:
                goto next;
        }
 out:
-       bch2_trans_iter_exit(&trans, &iter);
-       bch2_trans_exit(&trans);
+       bch2_trans_iter_exit(trans, &iter);
        bch2_bkey_buf_exit(&_insert, c);
        bch2_bkey_buf_exit(&_new, c);
        BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
        return ret;
 }
 
+int bch2_data_update_index_update(struct bch_write_op *op)
+{
+       struct bch_fs *c = op->c;
+       struct btree_trans trans;
+       int ret;
+
+       bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
+       ret = __bch2_data_update_index_update(&trans, op);
+       bch2_trans_exit(&trans);
+
+       return ret;
+}
+
 void bch2_data_update_read_done(struct data_update *m,
-                               struct bch_extent_crc_unpacked crc,
-                               struct closure *cl)
+                               struct bch_extent_crc_unpacked crc)
 {
        /* write bio must own pages: */
        BUG_ON(!m->op.wbio.bio.bi_vcnt);
@@ -279,18 +288,105 @@ void bch2_data_update_read_done(struct data_update *m,
        m->op.crc = crc;
        m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
 
-       closure_call(&m->op.cl, bch2_write, NULL, cl);
+       closure_call(&m->op.cl, bch2_write, NULL, NULL);
 }
 
 void bch2_data_update_exit(struct data_update *update)
 {
        struct bch_fs *c = update->op.c;
+       struct bkey_ptrs_c ptrs =
+               bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
+       const struct bch_extent_ptr *ptr;
+
+       bkey_for_each_ptr(ptrs, ptr)
+               bch2_bucket_nocow_unlock(&c->nocow_locks,
+                                      PTR_BUCKET_POS(c, ptr), 0);
 
        bch2_bkey_buf_exit(&update->k, c);
        bch2_disk_reservation_put(c, &update->op.res);
        bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
 }
 
+void bch2_update_unwritten_extent(struct btree_trans *trans,
+                                 struct data_update *update)
+{
+       struct bch_fs *c = update->op.c;
+       struct bio *bio = &update->op.wbio.bio;
+       struct bkey_i_extent *e;
+       struct write_point *wp;
+       struct bch_extent_ptr *ptr;
+       struct closure cl;
+       struct btree_iter iter;
+       struct bkey_s_c k;
+       int ret;
+
+       closure_init_stack(&cl);
+       bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
+
+       while (bio_sectors(bio)) {
+               unsigned sectors = bio_sectors(bio);
+
+               bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
+                                    BTREE_ITER_SLOTS);
+               ret = lockrestart_do(trans, ({
+                       k = bch2_btree_iter_peek_slot(&iter);
+                       bkey_err(k);
+               }));
+               bch2_trans_iter_exit(trans, &iter);
+
+               if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
+                       break;
+
+               e = bkey_extent_init(update->op.insert_keys.top);
+               e->k.p = update->op.pos;
+
+               ret = bch2_alloc_sectors_start_trans(trans,
+                               update->op.target,
+                               false,
+                               update->op.write_point,
+                               &update->op.devs_have,
+                               update->op.nr_replicas,
+                               update->op.nr_replicas,
+                               update->op.alloc_reserve,
+                               0, &cl, &wp);
+               if (ret == -EAGAIN) {
+                       bch2_trans_unlock(trans);
+                       closure_sync(&cl);
+                       continue;
+               }
+
+               if (ret)
+                       return;
+
+               sectors = min(sectors, wp->sectors_free);
+
+               bch2_key_resize(&e->k, sectors);
+
+               bch2_open_bucket_get(c, wp, &update->op.open_buckets);
+               bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
+               bch2_alloc_sectors_done(c, wp);
+
+               bio_advance(bio, sectors << 9);
+               update->op.pos.offset += sectors;
+
+               extent_for_each_ptr(extent_i_to_s(e), ptr)
+                       ptr->unwritten = true;
+               bch2_keylist_push(&update->op.insert_keys);
+
+               ret = __bch2_data_update_index_update(trans, &update->op);
+
+               bch2_open_buckets_put(c, &update->op.open_buckets);
+
+               if (ret)
+                       break;
+       }
+
+       if ((atomic_read(&cl.remaining) & CLOSURE_REMAINING_MASK) != 1) {
+               bch2_trans_unlock(trans);
+               closure_sync(&cl);
+       }
+}
+
 int bch2_data_update_init(struct bch_fs *c, struct data_update *m,
                          struct write_point_specifier wp,
                          struct bch_io_opts io_opts,
@@ -317,14 +413,13 @@ int bch2_data_update_init(struct bch_fs *c, struct data_update *m,
        m->op.flags     |= BCH_WRITE_PAGES_STABLE|
                BCH_WRITE_PAGES_OWNED|
                BCH_WRITE_DATA_ENCODED|
-               BCH_WRITE_FROM_INTERNAL|
+               BCH_WRITE_MOVE|
                m->data_opts.write_flags;
        m->op.compression_type =
                bch2_compression_opt_to_type[io_opts.background_compression ?:
                                             io_opts.compression];
        if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE)
                m->op.alloc_reserve = RESERVE_movinggc;
-       m->op.index_update_fn   = bch2_data_update_index_update;
 
        i = 0;
        bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
@@ -353,6 +448,9 @@ int bch2_data_update_init(struct bch_fs *c, struct data_update *m,
                        m->op.incompressible = true;
 
                i++;
+
+               bch2_bucket_nocow_lock(&c->nocow_locks,
+                                      PTR_BUCKET_POS(c, &p.ptr), 0);
        }
 
        if (reserve_sectors) {
@@ -368,6 +466,10 @@ int bch2_data_update_init(struct bch_fs *c, struct data_update *m,
                hweight32(m->data_opts.rewrite_ptrs) + m->data_opts.extra_replicas;
 
        BUG_ON(!m->op.nr_replicas);
+
+       /* Special handling required: */
+       if (bkey_extent_is_unwritten(k))
+               return -BCH_ERR_unwritten_extent_update;
        return 0;
 }
 
index 6793aa577cd207f55af29fb3dba8f9f954748002..f304c33662266ec1edf563c7c424ef9c250ff1d1 100644 (file)
@@ -26,11 +26,13 @@ struct data_update {
        struct bch_write_op     op;
 };
 
+int bch2_data_update_index_update(struct bch_write_op *);
+
 void bch2_data_update_read_done(struct data_update *,
-                               struct bch_extent_crc_unpacked,
-                               struct closure *);
+                               struct bch_extent_crc_unpacked);
 
 void bch2_data_update_exit(struct data_update *);
+void bch2_update_unwritten_extent(struct btree_trans *, struct data_update *);
 int bch2_data_update_init(struct bch_fs *, struct data_update *,
                          struct write_point_specifier,
                          struct bch_io_opts, struct data_update_opts,
index b1466932c76873c2d326f9d87507195e26fcb769..1a2c9108f864ee5eee32472ef4b133f1030f8786 100644 (file)
@@ -9,10 +9,10 @@ extern const struct bch_hash_desc bch2_dirent_hash_desc;
 int bch2_dirent_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
 void bch2_dirent_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
-#define bch2_bkey_ops_dirent (struct bkey_ops) {       \
+#define bch2_bkey_ops_dirent ((struct bkey_ops) {      \
        .key_invalid    = bch2_dirent_invalid,          \
        .val_to_text    = bch2_dirent_to_text,          \
-}
+})
 
 struct qstr;
 struct file;
index 3e2b22c00a3e7adf19f166b1254452c79af001d7..aba1e82bc889f7e7c6d878d7831fb0dc0c99b20a 100644 (file)
@@ -10,13 +10,13 @@ int bch2_stripe_invalid(const struct bch_fs *, struct bkey_s_c,
 void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
                         struct bkey_s_c);
 
-#define bch2_bkey_ops_stripe (struct bkey_ops) {       \
+#define bch2_bkey_ops_stripe ((struct bkey_ops) {      \
        .key_invalid    = bch2_stripe_invalid,          \
        .val_to_text    = bch2_stripe_to_text,          \
        .swab           = bch2_ptr_swab,                \
        .trans_trigger  = bch2_trans_mark_stripe,       \
        .atomic_trigger = bch2_mark_stripe,             \
-}
+})
 
 static inline unsigned stripe_csums_per_device(const struct bch_stripe *s)
 {
index cc9ce0be356e20931a44df8e6a0ce0944be799a0..dc906fc9176fecf9f8a7240ff78780a742367735 100644 (file)
@@ -23,6 +23,7 @@ static unsigned bch2_errcode_parents[] = {
 const char *bch2_err_str(int err)
 {
        const char *errstr;
+
        err = abs(err);
 
        BUG_ON(err >= BCH_ERR_MAX);
index 9f293040b25384d6823c4ccedc669ae63fbd4b71..93b515ebea4248848a6fc40d6003475c00036acf 100644 (file)
@@ -63,7 +63,8 @@
        x(BCH_ERR_fsck,                 fsck_repair_unimplemented)              \
        x(BCH_ERR_fsck,                 fsck_repair_impossible)                 \
        x(0,                            need_snapshot_cleanup)                  \
-       x(0,                            need_topology_repair)
+       x(0,                            need_topology_repair)                   \
+       x(0,                            unwritten_extent_update)
 
 enum bch_errcode {
        BCH_ERR_START           = 2048,
index 2fb5102ee31d16da84e83a37b32a0a0872254284..3e49d72d65b5e8db791cfa6254529e9217fce4e6 100644 (file)
@@ -125,8 +125,10 @@ int bch2_fsck_err(struct bch_fs *c, unsigned flags, const char *fmt, ...)
                s->nr++;
        }
 
+#ifdef BCACHEFS_LOG_PREFIX
        if (!strncmp(fmt, "bcachefs:", 9))
                prt_printf(out, bch2_log_msg(c, ""));
+#endif
 
        va_start(args, fmt);
        prt_vprintf(out, fmt, args);
index bbf9b6d85b4dcf55d794afd88ed52d228e40eba8..dae72620dae36e4bc8b23298010766f9e2b074c3 100644 (file)
@@ -186,36 +186,25 @@ void bch2_io_error_work(struct work_struct *);
 /* Does the error handling without logging a message */
 void bch2_io_error(struct bch_dev *);
 
-/* Logs message and handles the error: */
-#define bch2_dev_io_error(ca, fmt, ...)                                        \
-do {                                                                   \
-       printk_ratelimited(KERN_ERR "bcachefs (%s): " fmt,              \
-               (ca)->name, ##__VA_ARGS__);                             \
-       bch2_io_error(ca);                                              \
-} while (0)
-
-#define bch2_dev_inum_io_error(ca, _inum, _offset, fmt, ...)           \
-do {                                                                   \
-       printk_ratelimited(KERN_ERR "bcachefs (%s inum %llu offset %llu): " fmt,\
-               (ca)->name, (_inum), (_offset), ##__VA_ARGS__);         \
-       bch2_io_error(ca);                                              \
-} while (0)
-
 #define bch2_dev_io_err_on(cond, ca, ...)                              \
 ({                                                                     \
        bool _ret = (cond);                                             \
                                                                        \
-       if (_ret)                                                       \
-               bch2_dev_io_error(ca, __VA_ARGS__);                     \
+       if (_ret) {                                                     \
+               bch_err_dev_ratelimited(ca, __VA_ARGS__);               \
+               bch2_io_error(ca);                                      \
+       }                                                               \
        _ret;                                                           \
 })
 
-#define bch2_dev_inum_io_err_on(cond, ca, _inum, _offset, ...)         \
+#define bch2_dev_inum_io_err_on(cond, ca, ...)                         \
 ({                                                                     \
        bool _ret = (cond);                                             \
                                                                        \
-       if (_ret)                                                       \
-               bch2_dev_inum_io_error(ca, _inum, _offset, __VA_ARGS__);\
+       if (_ret) {                                                     \
+               bch_err_inum_offset_ratelimited(ca, __VA_ARGS__);       \
+               bch2_io_error(ca);                                      \
+       }                                                               \
        _ret;                                                           \
 })
 
index 9e2a4ed48b42bedf70f373c564452d3a66c3b4f8..1274d506085db54277a5a7d6455a1f1e47a8bd81 100644 (file)
@@ -117,6 +117,13 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
                return -EIO;
 
        bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+               /*
+                * Unwritten extent: no need to actually read, treat it as a
+                * hole and return 0s:
+                */
+               if (p.ptr.unwritten)
+                       return 0;
+
                ca = bch_dev_bkey_exists(c, p.ptr.dev);
 
                /*
@@ -270,6 +277,7 @@ bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
                    rp.ptr.offset + rp.crc.offset ||
                    lp.ptr.dev                  != rp.ptr.dev ||
                    lp.ptr.gen                  != rp.ptr.gen ||
+                   lp.ptr.unwritten            != rp.ptr.unwritten ||
                    lp.has_ec                   != rp.has_ec)
                        return false;
 
@@ -657,22 +665,21 @@ unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
        return replicas;
 }
 
-static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
-                                          struct extent_ptr_decoded p)
+unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
 {
        unsigned durability = 0;
        struct bch_dev *ca;
 
-       if (p.ptr.cached)
+       if (p->ptr.cached)
                return 0;
 
-       ca = bch_dev_bkey_exists(c, p.ptr.dev);
+       ca = bch_dev_bkey_exists(c, p->ptr.dev);
 
        if (ca->mi.state != BCH_MEMBER_STATE_failed)
                durability = max_t(unsigned, durability, ca->mi.durability);
 
-       if (p.has_ec)
-               durability += p.ec.redundancy;
+       if (p->has_ec)
+               durability += p->ec.redundancy;
 
        return durability;
 }
@@ -685,7 +692,7 @@ unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
        unsigned durability = 0;
 
        bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
-               durability += bch2_extent_ptr_durability(c, p);
+               durability += bch2_extent_ptr_durability(c,& p);
 
        return durability;
 }
@@ -923,20 +930,31 @@ bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
  */
 bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
 {
-       struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
-       struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
-       const union bch_extent_entry *entry1, *entry2;
-       struct extent_ptr_decoded p1, p2;
+       if (k1.k->type != k2.k->type)
+               return false;
+
+       if (bkey_extent_is_direct_data(k1.k)) {
+               struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
+               struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
+               const union bch_extent_entry *entry1, *entry2;
+               struct extent_ptr_decoded p1, p2;
 
-       bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
-               bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
+               if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2))
+                       return false;
+
+               bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
+                       bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
                        if (p1.ptr.dev          == p2.ptr.dev &&
                            p1.ptr.gen          == p2.ptr.gen &&
                            (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
                            (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
                                return true;
 
-       return false;
+               return false;
+       } else {
+               /* KEY_TYPE_deleted, etc. */
+               return true;
+       }
 }
 
 bool bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1,
@@ -1005,10 +1023,12 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
                                u32 offset;
                                u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
 
-                               prt_printf(out, "ptr: %u:%llu:%u gen %u%s", ptr->dev,
-                                      b, offset, ptr->gen,
-                                      ptr->cached ? " cached" : "");
-
+                               prt_printf(out, "ptr: %u:%llu:%u gen %u",
+                                          ptr->dev, b, offset, ptr->gen);
+                               if (ptr->cached)
+                                       prt_str(out, " cached");
+                               if (ptr->unwritten)
+                                       prt_str(out, " unwritten");
                                if (ca && ptr_stale(ca, ptr))
                                        prt_printf(out, " stale");
                        }
@@ -1097,6 +1117,7 @@ int bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k,
        unsigned size_ondisk = k.k->size;
        unsigned nonce = UINT_MAX;
        unsigned nr_ptrs = 0;
+       bool unwritten = false;
        int ret;
 
        if (bkey_is_btree_ptr(k.k))
@@ -1121,6 +1142,18 @@ int bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k,
                                                 false, err);
                        if (ret)
                                return ret;
+
+                       if (nr_ptrs && unwritten != entry->ptr.unwritten) {
+                               prt_printf(err, "extent with unwritten and written ptrs");
+                               return -EINVAL;
+                       }
+
+                       if (k.k->type != KEY_TYPE_extent && entry->ptr.unwritten) {
+                               prt_printf(err, "has unwritten ptrs");
+                               return -EINVAL;
+                       }
+
+                       unwritten = entry->ptr.unwritten;
                        nr_ptrs++;
                        break;
                case BCH_EXTENT_ENTRY_crc32:
index 3c17b81130bbfbdba9130dc3c224597115e4cf4d..84737af6496de68da9eaa7dda451d0f85c4be2d5 100644 (file)
@@ -198,6 +198,7 @@ static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
        switch (k.k->type) {
        case KEY_TYPE_btree_ptr: {
                struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k);
+
                return (struct bkey_ptrs_c) {
                        to_entry(&e.v->start[0]),
                        to_entry(extent_entry_last(e))
@@ -205,6 +206,7 @@ static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
        }
        case KEY_TYPE_extent: {
                struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+
                return (struct bkey_ptrs_c) {
                        e.v->start,
                        extent_entry_last(e)
@@ -212,6 +214,7 @@ static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
        }
        case KEY_TYPE_stripe: {
                struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
+
                return (struct bkey_ptrs_c) {
                        to_entry(&s.v->ptrs[0]),
                        to_entry(&s.v->ptrs[s.v->nr_blocks]),
@@ -227,6 +230,7 @@ static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
        }
        case KEY_TYPE_btree_ptr_v2: {
                struct bkey_s_c_btree_ptr_v2 e = bkey_s_c_to_btree_ptr_v2(k);
+
                return (struct bkey_ptrs_c) {
                        to_entry(&e.v->start[0]),
                        to_entry(extent_entry_last(e))
@@ -342,7 +346,7 @@ out:                                                                        \
 
 #define extent_for_each_entry_from(_e, _entry, _start)                 \
        __bkey_extent_entry_for_each_from(_start,                       \
-                               extent_entry_last(_e),_entry)
+                               extent_entry_last(_e), _entry)
 
 #define extent_for_each_entry(_e, _entry)                              \
        extent_for_each_entry_from(_e, _entry, (_e).v->start)
@@ -376,28 +380,28 @@ void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *, struct bkey_s
 void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
                              int, struct bkey_s);
 
-#define bch2_bkey_ops_btree_ptr (struct bkey_ops) {            \
+#define bch2_bkey_ops_btree_ptr ((struct bkey_ops) {           \
        .key_invalid    = bch2_btree_ptr_invalid,               \
        .val_to_text    = bch2_btree_ptr_to_text,               \
        .swab           = bch2_ptr_swab,                        \
        .trans_trigger  = bch2_trans_mark_extent,               \
        .atomic_trigger = bch2_mark_extent,                     \
-}
+})
 
-#define bch2_bkey_ops_btree_ptr_v2 (struct bkey_ops) {         \
+#define bch2_bkey_ops_btree_ptr_v2 ((struct bkey_ops) {                \
        .key_invalid    = bch2_btree_ptr_v2_invalid,            \
        .val_to_text    = bch2_btree_ptr_v2_to_text,            \
        .swab           = bch2_ptr_swab,                        \
        .compat         = bch2_btree_ptr_v2_compat,             \
        .trans_trigger  = bch2_trans_mark_extent,               \
        .atomic_trigger = bch2_mark_extent,                     \
-}
+})
 
 /* KEY_TYPE_extent: */
 
 bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
 
-#define bch2_bkey_ops_extent (struct bkey_ops) {               \
+#define bch2_bkey_ops_extent ((struct bkey_ops) {              \
        .key_invalid    = bch2_bkey_ptrs_invalid,               \
        .val_to_text    = bch2_bkey_ptrs_to_text,               \
        .swab           = bch2_ptr_swab,                        \
@@ -405,7 +409,7 @@ bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
        .key_merge      = bch2_extent_merge,                    \
        .trans_trigger  = bch2_trans_mark_extent,               \
        .atomic_trigger = bch2_mark_extent,                     \
-}
+})
 
 /* KEY_TYPE_reservation: */
 
@@ -414,13 +418,13 @@ int bch2_reservation_invalid(const struct bch_fs *, struct bkey_s_c,
 void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
 
-#define bch2_bkey_ops_reservation (struct bkey_ops) {          \
+#define bch2_bkey_ops_reservation ((struct bkey_ops) {         \
        .key_invalid    = bch2_reservation_invalid,             \
        .val_to_text    = bch2_reservation_to_text,             \
        .key_merge      = bch2_reservation_merge,               \
        .trans_trigger  = bch2_trans_mark_reservation,          \
        .atomic_trigger = bch2_mark_reservation,                \
-}
+})
 
 /* Extent checksum entries: */
 
@@ -506,6 +510,23 @@ static inline bool bkey_extent_is_allocation(const struct bkey *k)
        }
 }
 
+static inline bool bkey_extent_is_unwritten(struct bkey_s_c k)
+{
+       struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+       const struct bch_extent_ptr *ptr;
+
+       bkey_for_each_ptr(ptrs, ptr)
+               if (ptr->unwritten)
+                       return true;
+       return false;
+}
+
+static inline bool bkey_extent_is_reservation(struct bkey_s_c k)
+{
+       return k.k->type == KEY_TYPE_reservation ||
+               bkey_extent_is_unwritten(k);
+}
+
 static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k)
 {
        struct bch_devs_list ret = (struct bch_devs_list) { 0 };
@@ -575,6 +596,7 @@ bool bch2_bkey_is_incompressible(struct bkey_s_c);
 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
 
 unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c);
+unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
 unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
 
 void bch2_bkey_extent_entry_drop(struct bkey_i *, union bch_extent_entry *);
index cdb272708a4bdacf94093a7c0351570189abf973..66b945be10c2309a9e758b228b146047b20674e2 100644 (file)
@@ -65,7 +65,7 @@ do {                                                                  \
           (((p) - (fifo)->data)))
 
 #define fifo_entry_idx(fifo, p)        (((p) - &fifo_peek_front(fifo)) & (fifo)->mask)
-#define fifo_idx_entry(fifo, i)        (fifo)->data[((fifo)->front + (i)) & (fifo)->mask]
+#define fifo_idx_entry(fifo, i)        ((fifo)->data[((fifo)->front + (i)) & (fifo)->mask])
 
 #define fifo_push_back_ref(f)                                          \
        (fifo_full((f)) ? NULL : &(f)->data[(f)->back++ & (f)->mask])
index 706180b97a7711138bfb9af59952c748fad6c7a3..dc2352df8685cf14541e307907fc5f6ebbe93074 100644 (file)
 #include <trace/events/bcachefs.h>
 #include <trace/events/writeback.h>
 
+struct nocow_flush {
+       struct closure  *cl;
+       struct bch_dev  *ca;
+       struct bio      bio;
+};
+
+static void nocow_flush_endio(struct bio *_bio)
+{
+
+       struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
+
+       closure_put(bio->cl);
+       percpu_ref_put(&bio->ca->io_ref);
+       bio_put(&bio->bio);
+}
+
+static void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
+                                               struct bch_inode_info *inode,
+                                               struct closure *cl)
+{
+       struct nocow_flush *bio;
+       struct bch_dev *ca;
+       struct bch_devs_mask devs;
+       unsigned dev;
+
+       dev = find_first_bit(inode->ei_devs_need_flush.d, BCH_SB_MEMBERS_MAX);
+       if (dev == BCH_SB_MEMBERS_MAX)
+               return;
+
+       devs = inode->ei_devs_need_flush;
+       memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
+
+       for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
+               rcu_read_lock();
+               ca = rcu_dereference(c->devs[dev]);
+               if (ca && !percpu_ref_tryget(&ca->io_ref))
+                       ca = NULL;
+               rcu_read_unlock();
+
+               if (!ca)
+                       continue;
+
+               bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
+                                                   REQ_OP_FLUSH,
+                                                   GFP_KERNEL,
+                                                   &c->nocow_flush_bioset),
+                                  struct nocow_flush, bio);
+               bio->cl                 = cl;
+               bio->ca                 = ca;
+               bio->bio.bi_end_io      = nocow_flush_endio;
+               closure_bio_submit(&bio->bio, cl);
+       }
+}
+
+static int bch2_inode_flush_nocow_writes(struct bch_fs *c,
+                                        struct bch_inode_info *inode)
+{
+       struct closure cl;
+
+       closure_init_stack(&cl);
+       bch2_inode_flush_nocow_writes_async(c, inode, &cl);
+       closure_sync(&cl);
+
+       return 0;
+}
+
 static inline bool bio_full(struct bio *bio, unsigned len)
 {
        if (bio->bi_vcnt >= bio->bi_max_vecs)
@@ -65,7 +131,6 @@ struct quota_res {
 };
 
 struct bch_writepage_io {
-       struct closure                  cl;
        struct bch_inode_info           *inode;
 
        /* must be last: */
@@ -73,11 +138,14 @@ struct bch_writepage_io {
 };
 
 struct dio_write {
-       struct completion               done;
        struct kiocb                    *req;
+       struct address_space            *mapping;
+       struct bch_inode_info           *inode;
        struct mm_struct                *mm;
        unsigned                        loop:1,
+                                       extending:1,
                                        sync:1,
+                                       flush:1,
                                        free_iov:1;
        struct quota_res                quota_res;
        u64                             written;
@@ -98,7 +166,7 @@ struct dio_read {
 };
 
 /* pagecache_block must be held */
-static int write_invalidate_inode_pages_range(struct address_space *mapping,
+static noinline int write_invalidate_inode_pages_range(struct address_space *mapping,
                                              loff_t start, loff_t end)
 {
        int ret;
@@ -130,24 +198,29 @@ static int write_invalidate_inode_pages_range(struct address_space *mapping,
 
 #ifdef CONFIG_BCACHEFS_QUOTA
 
-static void bch2_quota_reservation_put(struct bch_fs *c,
-                                      struct bch_inode_info *inode,
-                                      struct quota_res *res)
+static void __bch2_quota_reservation_put(struct bch_fs *c,
+                                        struct bch_inode_info *inode,
+                                        struct quota_res *res)
 {
-       if (!res->sectors)
-               return;
-
-       mutex_lock(&inode->ei_quota_lock);
        BUG_ON(res->sectors > inode->ei_quota_reserved);
 
        bch2_quota_acct(c, inode->ei_qid, Q_SPC,
                        -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
        inode->ei_quota_reserved -= res->sectors;
-       mutex_unlock(&inode->ei_quota_lock);
-
        res->sectors = 0;
 }
 
+static void bch2_quota_reservation_put(struct bch_fs *c,
+                                      struct bch_inode_info *inode,
+                                      struct quota_res *res)
+{
+       if (res->sectors) {
+               mutex_lock(&inode->ei_quota_lock);
+               __bch2_quota_reservation_put(c, inode, res);
+               mutex_unlock(&inode->ei_quota_lock);
+       }
+}
+
 static int bch2_quota_reservation_add(struct bch_fs *c,
                                      struct bch_inode_info *inode,
                                      struct quota_res *res,
@@ -170,11 +243,13 @@ static int bch2_quota_reservation_add(struct bch_fs *c,
 
 #else
 
+static void __bch2_quota_reservation_put(struct bch_fs *c,
+                                        struct bch_inode_info *inode,
+                                        struct quota_res *res) {}
+
 static void bch2_quota_reservation_put(struct bch_fs *c,
                                       struct bch_inode_info *inode,
-                                      struct quota_res *res)
-{
-}
+                                      struct quota_res *res) {}
 
 static int bch2_quota_reservation_add(struct bch_fs *c,
                                      struct bch_inode_info *inode,
@@ -225,13 +300,9 @@ int __must_check bch2_write_inode_size(struct bch_fs *c,
        return bch2_write_inode(c, inode, inode_set_size, &s, fields);
 }
 
-static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
+static void __i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
                           struct quota_res *quota_res, s64 sectors)
 {
-       if (!sectors)
-               return;
-
-       mutex_lock(&inode->ei_quota_lock);
        bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
                                "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
                                inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
@@ -249,7 +320,16 @@ static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
                bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
        }
 #endif
-       mutex_unlock(&inode->ei_quota_lock);
+}
+
+static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
+                          struct quota_res *quota_res, s64 sectors)
+{
+       if (sectors) {
+               mutex_lock(&inode->ei_quota_lock);
+               __i_sectors_acct(c, inode, quota_res, sectors);
+               mutex_unlock(&inode->ei_quota_lock);
+       }
 }
 
 /* page state: */
@@ -327,11 +407,11 @@ static struct bch_page_state *bch2_page_state_create(struct page *page,
        return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp);
 }
 
-static unsigned bkey_to_sector_state(const struct bkey *k)
+static unsigned bkey_to_sector_state(struct bkey_s_c k)
 {
-       if (k->type == KEY_TYPE_reservation)
+       if (bkey_extent_is_reservation(k))
                return SECTOR_RESERVED;
-       if (bkey_extent_is_allocation(k))
+       if (bkey_extent_is_allocation(k.k))
                return SECTOR_ALLOCATED;
        return SECTOR_UNALLOCATED;
 }
@@ -382,7 +462,7 @@ retry:
                           SPOS(inum.inum, offset, snapshot),
                           BTREE_ITER_SLOTS, k, ret) {
                unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
-               unsigned state = bkey_to_sector_state(k.k);
+               unsigned state = bkey_to_sector_state(k);
 
                while (pg_idx < nr_pages) {
                        struct page *page = pages[pg_idx];
@@ -422,7 +502,7 @@ static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
        struct bio_vec bv;
        unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
                ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
-       unsigned state = bkey_to_sector_state(k.k);
+       unsigned state = bkey_to_sector_state(k);
 
        bio_for_each_segment(bv, bio, iter)
                __bch2_page_state_set(bv.bv_page, bv.bv_offset >> 9,
@@ -750,25 +830,25 @@ vm_fault_t bch2_page_fault(struct vm_fault *vmf)
        if (fdm > mapping) {
                struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
 
-               if (bch2_pagecache_add_tryget(&inode->ei_pagecache_lock))
+               if (bch2_pagecache_add_tryget(inode))
                        goto got_lock;
 
-               bch2_pagecache_block_put(&fdm_host->ei_pagecache_lock);
+               bch2_pagecache_block_put(fdm_host);
 
-               bch2_pagecache_add_get(&inode->ei_pagecache_lock);
-               bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+               bch2_pagecache_add_get(inode);
+               bch2_pagecache_add_put(inode);
 
-               bch2_pagecache_block_get(&fdm_host->ei_pagecache_lock);
+               bch2_pagecache_block_get(fdm_host);
 
                /* Signal that lock has been dropped: */
                set_fdm_dropped_locks();
                return VM_FAULT_SIGBUS;
        }
 
-       bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+       bch2_pagecache_add_get(inode);
 got_lock:
        ret = filemap_fault(vmf);
-       bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+       bch2_pagecache_add_put(inode);
 
        return ret;
 }
@@ -796,7 +876,7 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
         * a write_invalidate_inode_pages_range() that works without dropping
         * page lock before invalidating page
         */
-       bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+       bch2_pagecache_add_get(inode);
 
        lock_page(page);
        isize = i_size_read(&inode->v);
@@ -829,7 +909,7 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
        wait_for_stable_page(page);
        ret = VM_FAULT_LOCKED;
 out:
-       bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+       bch2_pagecache_add_put(inode);
        sb_end_pagefault(inode->v.i_sb);
 
        return ret;
@@ -1073,7 +1153,9 @@ err:
                goto retry;
 
        if (ret) {
-               bch_err_inum_ratelimited(c, inum.inum,
+               bch_err_inum_offset_ratelimited(c,
+                               iter.pos.inode,
+                               iter.pos.offset << 9,
                                "read error %i from btree lookup", ret);
                rbio->bio.bi_status = BLK_STS_IOERR;
                bio_endio(&rbio->bio);
@@ -1097,7 +1179,7 @@ void bch2_readahead(struct readahead_control *ractl)
 
        bch2_trans_init(&trans, c, 0, 0);
 
-       bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+       bch2_pagecache_add_get(inode);
 
        while ((page = readpage_iter_next(&readpages_iter))) {
                pgoff_t index = readpages_iter.offset + readpages_iter.idx;
@@ -1120,7 +1202,7 @@ void bch2_readahead(struct readahead_control *ractl)
                           &readpages_iter);
        }
 
-       bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+       bch2_pagecache_add_put(inode);
 
        bch2_trans_exit(&trans);
        kfree(readpages_iter.pages);
@@ -1200,18 +1282,10 @@ static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs
        };
 }
 
-static void bch2_writepage_io_free(struct closure *cl)
-{
-       struct bch_writepage_io *io = container_of(cl,
-                                       struct bch_writepage_io, cl);
-
-       bio_put(&io->op.wbio.bio);
-}
-
-static void bch2_writepage_io_done(struct closure *cl)
+static void bch2_writepage_io_done(struct bch_write_op *op)
 {
-       struct bch_writepage_io *io = container_of(cl,
-                                       struct bch_writepage_io, cl);
+       struct bch_writepage_io *io =
+               container_of(op, struct bch_writepage_io, op);
        struct bch_fs *c = io->op.c;
        struct bio *bio = &io->op.wbio.bio;
        struct bvec_iter_all iter;
@@ -1273,7 +1347,7 @@ static void bch2_writepage_io_done(struct closure *cl)
                        end_page_writeback(bvec->bv_page);
        }
 
-       closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
+       bio_put(&io->op.wbio.bio);
 }
 
 static void bch2_writepage_do_io(struct bch_writepage_state *w)
@@ -1281,8 +1355,7 @@ static void bch2_writepage_do_io(struct bch_writepage_state *w)
        struct bch_writepage_io *io = w->io;
 
        w->io = NULL;
-       closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
-       continue_at(&io->cl, bch2_writepage_io_done, NULL);
+       closure_call(&io->op.cl, bch2_write, NULL, NULL);
 }
 
 /*
@@ -1304,9 +1377,7 @@ static void bch2_writepage_io_alloc(struct bch_fs *c,
                                              &c->writepage_bioset),
                             struct bch_writepage_io, op.wbio.bio);
 
-       closure_init(&w->io->cl, NULL);
        w->io->inode            = inode;
-
        op                      = &w->io->op;
        bch2_write_op_init(op, c, w->opts);
        op->target              = w->opts.foreground_target;
@@ -1315,6 +1386,8 @@ static void bch2_writepage_io_alloc(struct bch_fs *c,
        op->write_point         = writepoint_hashed(inode->ei_last_dirtied);
        op->subvol              = inode->ei_subvol;
        op->pos                 = POS(inode->v.i_ino, sector);
+       op->end_io              = bch2_writepage_io_done;
+       op->devs_need_flush     = &inode->ei_devs_need_flush;
        op->wbio.bio.bi_iter.bi_sector = sector;
        op->wbio.bio.bi_opf     = wbc_to_write_flags(wbc);
 }
@@ -1437,8 +1510,13 @@ do_io:
                                     sectors << 9, offset << 9));
 
                /* Check for writing past i_size: */
-               WARN_ON_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
-                            round_up(i_size, block_bytes(c)));
+               WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
+                         round_up(i_size, block_bytes(c)) &&
+                         !test_bit(BCH_FS_EMERGENCY_RO, &c->flags),
+                         "writing past i_size: %llu > %llu (unrounded %llu)\n",
+                         bio_end_sector(&w->io->op.wbio.bio) << 9,
+                         round_up(i_size, block_bytes(c)),
+                         i_size);
 
                w->io->op.res.sectors += reserved_sectors;
                w->io->op.i_sectors_delta -= dirty_sectors;
@@ -1490,7 +1568,7 @@ int bch2_write_begin(struct file *file, struct address_space *mapping,
        bch2_page_reservation_init(c, inode, res);
        *fsdata = res;
 
-       bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+       bch2_pagecache_add_get(inode);
 
        page = grab_cache_page_write_begin(mapping, index);
        if (!page)
@@ -1547,7 +1625,7 @@ err:
        put_page(page);
        *pagep = NULL;
 err_unlock:
-       bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+       bch2_pagecache_add_put(inode);
        kfree(res);
        *fsdata = NULL;
        return bch2_err_class(ret);
@@ -1591,7 +1669,7 @@ int bch2_write_end(struct file *file, struct address_space *mapping,
 
        unlock_page(page);
        put_page(page);
-       bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+       bch2_pagecache_add_put(inode);
 
        bch2_page_reservation_put(c, inode, res);
        kfree(res);
@@ -1666,10 +1744,21 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
                                goto out;
                }
 
+               /*
+                * XXX: per POSIX and fstests generic/275, on -ENOSPC we're
+                * supposed to write as much as we have disk space for.
+                *
+                * On failure here we should still write out a partial page if
+                * we aren't completely out of disk space - we don't do that
+                * yet:
+                */
                ret = bch2_page_reservation_get(c, inode, page, &res,
                                                pg_offset, pg_len);
-               if (ret)
-                       goto out;
+               if (unlikely(ret)) {
+                       if (!reserved)
+                               goto out;
+                       break;
+               }
 
                reserved += pg_len;
        }
@@ -1678,10 +1767,10 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
                for (i = 0; i < nr_pages; i++)
                        flush_dcache_page(pages[i]);
 
-       while (copied < len) {
+       while (copied < reserved) {
                struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
                unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
-               unsigned pg_len = min_t(unsigned, len - copied,
+               unsigned pg_len = min_t(unsigned, reserved - copied,
                                        PAGE_SIZE - pg_offset);
                unsigned pg_copied = copy_page_from_iter_atomic(page,
                                                pg_offset, pg_len, iter);
@@ -1749,7 +1838,7 @@ static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
        ssize_t written = 0;
        int ret = 0;
 
-       bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+       bch2_pagecache_add_get(inode);
 
        do {
                unsigned offset = pos & (PAGE_SIZE - 1);
@@ -1807,7 +1896,7 @@ again:
                balance_dirty_pages_ratelimited(mapping);
        } while (iov_iter_count(iter));
 
-       bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+       bch2_pagecache_add_put(inode);
 
        return written ? written : ret;
 }
@@ -1970,11 +2059,13 @@ ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
        if (iocb->ki_flags & IOCB_DIRECT) {
                struct blk_plug plug;
 
-               ret = filemap_write_and_wait_range(mapping,
-                                       iocb->ki_pos,
-                                       iocb->ki_pos + count - 1);
-               if (ret < 0)
-                       goto out;
+               if (unlikely(mapping->nrpages)) {
+                       ret = filemap_write_and_wait_range(mapping,
+                                               iocb->ki_pos,
+                                               iocb->ki_pos + count - 1);
+                       if (ret < 0)
+                               goto out;
+               }
 
                file_accessed(file);
 
@@ -1985,9 +2076,9 @@ ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
                if (ret >= 0)
                        iocb->ki_pos += ret;
        } else {
-               bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+               bch2_pagecache_add_get(inode);
                ret = generic_file_read_iter(iocb, iter);
-               bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+               bch2_pagecache_add_put(inode);
        }
 out:
        return bch2_err_class(ret);
@@ -2039,31 +2130,162 @@ err:
        return err ? false : ret;
 }
 
+static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
+{
+       struct bch_fs *c = dio->op.c;
+       struct bch_inode_info *inode = dio->inode;
+       struct bio *bio = &dio->op.wbio.bio;
+
+       return bch2_check_range_allocated(c, inode_inum(inode),
+                               dio->op.pos.offset, bio_sectors(bio),
+                               dio->op.opts.data_replicas,
+                               dio->op.opts.compression != 0);
+}
+
 static void bch2_dio_write_loop_async(struct bch_write_op *);
+static __always_inline long bch2_dio_write_done(struct dio_write *dio);
 
-static long bch2_dio_write_loop(struct dio_write *dio)
+static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
+{
+       struct iovec *iov = dio->inline_vecs;
+
+       if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
+               iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
+                                   GFP_KERNEL);
+               if (unlikely(!iov))
+                       return -ENOMEM;
+
+               dio->free_iov = true;
+       }
+
+       memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
+       dio->iter.iov = iov;
+       return 0;
+}
+
+static void bch2_dio_write_flush_done(struct closure *cl)
+{
+       struct dio_write *dio = container_of(cl, struct dio_write, op.cl);
+       struct bch_fs *c = dio->op.c;
+
+       closure_debug_destroy(cl);
+
+       dio->op.error = bch2_journal_error(&c->journal);
+
+       bch2_dio_write_done(dio);
+}
+
+static noinline void bch2_dio_write_flush(struct dio_write *dio)
+{
+       struct bch_fs *c = dio->op.c;
+       struct bch_inode_unpacked inode;
+       int ret;
+
+       dio->flush = 0;
+
+       closure_init(&dio->op.cl, NULL);
+
+       if (!dio->op.error) {
+               ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
+               if (ret) {
+                       dio->op.error = ret;
+               } else {
+                       bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq, &dio->op.cl);
+                       bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
+               }
+       }
+
+       if (dio->sync) {
+               closure_sync(&dio->op.cl);
+               closure_debug_destroy(&dio->op.cl);
+       } else {
+               continue_at(&dio->op.cl, bch2_dio_write_flush_done, NULL);
+       }
+}
+
+static __always_inline long bch2_dio_write_done(struct dio_write *dio)
 {
-       bool kthread = (current->flags & PF_KTHREAD) != 0;
        struct kiocb *req = dio->req;
-       struct address_space *mapping = req->ki_filp->f_mapping;
-       struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
-       struct bch_fs *c = inode->v.i_sb->s_fs_info;
+       struct bch_inode_info *inode = dio->inode;
+       bool sync = dio->sync;
+       long ret;
+
+       if (unlikely(dio->flush)) {
+               bch2_dio_write_flush(dio);
+               if (!sync)
+                       return -EIOCBQUEUED;
+       }
+
+       bch2_pagecache_block_put(inode);
+
+       if (dio->free_iov)
+               kfree(dio->iter.iov);
+
+       ret = dio->op.error ?: ((long) dio->written << 9);
+       bio_put(&dio->op.wbio.bio);
+
+       /* inode->i_dio_count is our ref on inode and thus bch_fs */
+       inode_dio_end(&inode->v);
+
+       if (ret < 0)
+               ret = bch2_err_class(ret);
+
+       if (!sync) {
+               req->ki_complete(req, ret);
+               ret = -EIOCBQUEUED;
+       }
+       return ret;
+}
+
+static __always_inline void bch2_dio_write_end(struct dio_write *dio)
+{
+       struct bch_fs *c = dio->op.c;
+       struct kiocb *req = dio->req;
+       struct bch_inode_info *inode = dio->inode;
        struct bio *bio = &dio->op.wbio.bio;
        struct bvec_iter_all iter;
        struct bio_vec *bv;
+
+       req->ki_pos     += (u64) dio->op.written << 9;
+       dio->written    += dio->op.written;
+
+       if (dio->extending) {
+               spin_lock(&inode->v.i_lock);
+               if (req->ki_pos > inode->v.i_size)
+                       i_size_write(&inode->v, req->ki_pos);
+               spin_unlock(&inode->v.i_lock);
+       }
+
+       if (dio->op.i_sectors_delta || dio->quota_res.sectors) {
+               mutex_lock(&inode->ei_quota_lock);
+               __i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta);
+               __bch2_quota_reservation_put(c, inode, &dio->quota_res);
+               mutex_unlock(&inode->ei_quota_lock);
+       }
+
+       if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
+               bio_for_each_segment_all(bv, bio, iter)
+                       put_page(bv->bv_page);
+
+       if (unlikely(dio->op.error))
+               set_bit(EI_INODE_ERROR, &inode->ei_flags);
+}
+
+static long bch2_dio_write_loop(struct dio_write *dio)
+{
+       struct bch_fs *c = dio->op.c;
+       struct kiocb *req = dio->req;
+       struct address_space *mapping = dio->mapping;
+       struct bch_inode_info *inode = dio->inode;
+       struct bio *bio = &dio->op.wbio.bio;
        unsigned unaligned, iter_count;
        bool sync = dio->sync, dropped_locks;
        long ret;
 
-       if (dio->loop)
-               goto loop;
-
        while (1) {
                iter_count = dio->iter.count;
 
-               if (kthread && dio->mm)
-                       kthread_use_mm(dio->mm);
-               BUG_ON(current->faults_disabled_mapping);
+               EBUG_ON(current->faults_disabled_mapping);
                current->faults_disabled_mapping = mapping;
 
                ret = bio_iov_iter_get_pages(bio, &dio->iter);
@@ -2071,8 +2293,6 @@ static long bch2_dio_write_loop(struct dio_write *dio)
                dropped_locks = fdm_dropped_locks();
 
                current->faults_disabled_mapping = NULL;
-               if (kthread && dio->mm)
-                       kthread_unuse_mm(dio->mm);
 
                /*
                 * If the fault handler returned an error but also signalled
@@ -2110,116 +2330,88 @@ static long bch2_dio_write_loop(struct dio_write *dio)
                }
 
                bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode));
-               dio->op.end_io          = bch2_dio_write_loop_async;
+               dio->op.end_io          = sync
+                       ? NULL
+                       : bch2_dio_write_loop_async;
                dio->op.target          = dio->op.opts.foreground_target;
                dio->op.write_point     = writepoint_hashed((unsigned long) current);
                dio->op.nr_replicas     = dio->op.opts.data_replicas;
                dio->op.subvol          = inode->ei_subvol;
                dio->op.pos             = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
+               dio->op.devs_need_flush = &inode->ei_devs_need_flush;
 
-               if ((req->ki_flags & IOCB_DSYNC) &&
-                   !c->opts.journal_flush_disabled)
-                       dio->op.flags |= BCH_WRITE_FLUSH;
+               if (sync)
+                       dio->op.flags |= BCH_WRITE_SYNC;
                dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
 
+               ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
+                                                bio_sectors(bio), true);
+               if (unlikely(ret))
+                       goto err;
+
                ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
                                                dio->op.opts.data_replicas, 0);
                if (unlikely(ret) &&
-                   !bch2_check_range_allocated(c, inode_inum(inode),
-                               dio->op.pos.offset, bio_sectors(bio),
-                               dio->op.opts.data_replicas,
-                               dio->op.opts.compression != 0))
+                   !bch2_dio_write_check_allocated(dio))
                        goto err;
 
                task_io_account_write(bio->bi_iter.bi_size);
 
-               if (!dio->sync && !dio->loop && dio->iter.count) {
-                       struct iovec *iov = dio->inline_vecs;
+               if (unlikely(dio->iter.count) &&
+                   !dio->sync &&
+                   !dio->loop &&
+                   bch2_dio_write_copy_iov(dio))
+                       dio->sync = sync = true;
 
-                       if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
-                               iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
-                                                   GFP_KERNEL);
-                               if (unlikely(!iov)) {
-                                       dio->sync = sync = true;
-                                       goto do_io;
-                               }
-
-                               dio->free_iov = true;
-                       }
-
-                       memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
-                       dio->iter.iov = iov;
-               }
-do_io:
                dio->loop = true;
                closure_call(&dio->op.cl, bch2_write, NULL, NULL);
 
-               if (sync)
-                       wait_for_completion(&dio->done);
-               else
+               if (!sync)
                        return -EIOCBQUEUED;
-loop:
-               i_sectors_acct(c, inode, &dio->quota_res,
-                              dio->op.i_sectors_delta);
-               req->ki_pos += (u64) dio->op.written << 9;
-               dio->written += dio->op.written;
-
-               spin_lock(&inode->v.i_lock);
-               if (req->ki_pos > inode->v.i_size)
-                       i_size_write(&inode->v, req->ki_pos);
-               spin_unlock(&inode->v.i_lock);
-
-               if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
-                       bio_for_each_segment_all(bv, bio, iter)
-                               put_page(bv->bv_page);
-               bio->bi_vcnt = 0;
 
-               if (dio->op.error) {
-                       set_bit(EI_INODE_ERROR, &inode->ei_flags);
-                       break;
-               }
+               bch2_dio_write_end(dio);
 
-               if (!dio->iter.count)
+               if (likely(!dio->iter.count) || dio->op.error)
                        break;
 
                bio_reset(bio, NULL, REQ_OP_WRITE);
-               reinit_completion(&dio->done);
        }
-
-       ret = dio->op.error ?: ((long) dio->written << 9);
+out:
+       return bch2_dio_write_done(dio);
 err:
-       bch2_pagecache_block_put(&inode->ei_pagecache_lock);
-       bch2_quota_reservation_put(c, inode, &dio->quota_res);
+       dio->op.error = ret;
 
-       if (dio->free_iov)
-               kfree(dio->iter.iov);
+       if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
+               struct bvec_iter_all iter;
+               struct bio_vec *bv;
 
-       if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
                bio_for_each_segment_all(bv, bio, iter)
                        put_page(bv->bv_page);
-       bio_put(bio);
-
-       /* inode->i_dio_count is our ref on inode and thus bch_fs */
-       inode_dio_end(&inode->v);
-
-       if (ret < 0)
-               ret = bch2_err_class(ret);
-
-       if (!sync) {
-               req->ki_complete(req, ret);
-               ret = -EIOCBQUEUED;
        }
-       return ret;
+
+       bch2_quota_reservation_put(c, inode, &dio->quota_res);
+       goto out;
 }
 
 static void bch2_dio_write_loop_async(struct bch_write_op *op)
 {
        struct dio_write *dio = container_of(op, struct dio_write, op);
+       struct mm_struct *mm = dio->mm;
 
-       if (dio->sync)
-               complete(&dio->done);
-       else
-               bch2_dio_write_loop(dio);
+       bch2_dio_write_end(dio);
+
+       if (likely(!dio->iter.count) || dio->op.error) {
+               bch2_dio_write_done(dio);
+               return;
+       }
+
+       bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE);
+
+       if (mm)
+               kthread_use_mm(mm);
+       bch2_dio_write_loop(dio);
+       if (mm)
+               kthread_unuse_mm(mm);
 }
 
 static noinline
@@ -2257,7 +2449,7 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
                goto err;
 
        inode_dio_begin(&inode->v);
-       bch2_pagecache_block_get(&inode->ei_pagecache_lock);
+       bch2_pagecache_block_get(inode);
 
        extending = req->ki_pos + iter->count > inode->v.i_size;
        if (!extending) {
@@ -2271,26 +2463,27 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
                               GFP_KERNEL,
                               &c->dio_write_bioset);
        dio = container_of(bio, struct dio_write, op.wbio.bio);
-       init_completion(&dio->done);
        dio->req                = req;
+       dio->mapping            = mapping;
+       dio->inode              = inode;
        dio->mm                 = current->mm;
        dio->loop               = false;
+       dio->extending          = extending;
        dio->sync               = is_sync_kiocb(req) || extending;
+       dio->flush              = iocb_is_dsync(req) && !c->opts.journal_flush_disabled;
        dio->free_iov           = false;
        dio->quota_res.sectors  = 0;
        dio->written            = 0;
        dio->iter               = *iter;
+       dio->op.c               = c;
 
-       ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
-                                        iter->count >> 9, true);
-       if (unlikely(ret))
-               goto err_put_bio;
-
-       ret = write_invalidate_inode_pages_range(mapping,
-                                       req->ki_pos,
-                                       req->ki_pos + iter->count - 1);
-       if (unlikely(ret))
-               goto err_put_bio;
+       if (unlikely(mapping->nrpages)) {
+               ret = write_invalidate_inode_pages_range(mapping,
+                                               req->ki_pos,
+                                               req->ki_pos + iter->count - 1);
+               if (unlikely(ret))
+                       goto err_put_bio;
+       }
 
        ret = bch2_dio_write_loop(dio);
 err:
@@ -2298,8 +2491,7 @@ err:
                inode_unlock(&inode->v);
        return ret;
 err_put_bio:
-       bch2_pagecache_block_put(&inode->ei_pagecache_lock);
-       bch2_quota_reservation_put(c, inode, &dio->quota_res);
+       bch2_pagecache_block_put(inode);
        bio_put(bio);
        inode_dio_end(&inode->v);
        goto err;
@@ -2351,19 +2543,21 @@ out:
  * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
  * insert trigger: look up the btree inode instead
  */
-static int bch2_flush_inode(struct bch_fs *c, subvol_inum inum)
+static int bch2_flush_inode(struct bch_fs *c,
+                           struct bch_inode_info *inode)
 {
-       struct bch_inode_unpacked inode;
+       struct bch_inode_unpacked u;
        int ret;
 
        if (c->opts.journal_flush_disabled)
                return 0;
 
-       ret = bch2_inode_find_by_inum(c, inum, &inode);
+       ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u);
        if (ret)
                return ret;
 
-       return bch2_journal_flush_seq(&c->journal, inode.bi_journal_seq);
+       return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
+               bch2_inode_flush_nocow_writes(c, inode);
 }
 
 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
@@ -2374,7 +2568,7 @@ int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 
        ret = file_write_and_wait_range(file, start, end);
        ret2 = sync_inode_metadata(&inode->v, 1);
-       ret3 = bch2_flush_inode(c, inode_inum(inode));
+       ret3 = bch2_flush_inode(c, inode);
 
        return bch2_err_class(ret ?: ret2 ?: ret3);
 }
@@ -2602,7 +2796,7 @@ int bch2_truncate(struct user_namespace *mnt_userns,
        }
 
        inode_dio_wait(&inode->v);
-       bch2_pagecache_block_get(&inode->ei_pagecache_lock);
+       bch2_pagecache_block_get(inode);
 
        ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
        if (ret)
@@ -2617,8 +2811,10 @@ int bch2_truncate(struct user_namespace *mnt_userns,
        if (ret)
                goto err;
 
-       WARN_ON(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
-               inode->v.i_size < inode_u.bi_size);
+       WARN_ONCE(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
+                 inode->v.i_size < inode_u.bi_size,
+                 "truncate spotted in mem i_size < btree i_size: %llu < %llu\n",
+                 (u64) inode->v.i_size, inode_u.bi_size);
 
        if (iattr->ia_size > inode->v.i_size) {
                ret = bch2_extend(mnt_userns, inode, &inode_u, iattr);
@@ -2681,7 +2877,7 @@ int bch2_truncate(struct user_namespace *mnt_userns,
 
        ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
 err:
-       bch2_pagecache_block_put(&inode->ei_pagecache_lock);
+       bch2_pagecache_block_put(inode);
        return bch2_err_class(ret);
 }
 
@@ -2920,7 +3116,7 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
        struct btree_trans trans;
        struct btree_iter iter;
        struct bpos end_pos = POS(inode->v.i_ino, end_sector);
-       unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
+       struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
        int ret = 0;
 
        bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
@@ -2931,9 +3127,7 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
 
        while (!ret && bkey_cmp(iter.pos, end_pos) < 0) {
                s64 i_sectors_delta = 0;
-               struct disk_reservation disk_res = { 0 };
                struct quota_res quota_res = { 0 };
-               struct bkey_i_reservation reservation;
                struct bkey_s_c k;
                unsigned sectors;
                u32 snapshot;
@@ -2952,8 +3146,8 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
                        goto bkey_err;
 
                /* already reserved */
-               if (k.k->type == KEY_TYPE_reservation &&
-                   bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
+               if (bkey_extent_is_reservation(k) &&
+                   bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
                        bch2_btree_iter_advance(&iter);
                        continue;
                }
@@ -2964,16 +3158,12 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
                        continue;
                }
 
-               bkey_reservation_init(&reservation.k_i);
-               reservation.k.type      = KEY_TYPE_reservation;
-               reservation.k.p         = k.k->p;
-               reservation.k.size      = k.k->size;
-
-               bch2_cut_front(iter.pos,        &reservation.k_i);
-               bch2_cut_back(end_pos,          &reservation.k_i);
+               /*
+                * XXX: for nocow mode, we should promote shared extents to
+                * unshared here
+                */
 
-               sectors = reservation.k.size;
-               reservation.v.nr_replicas = bch2_bkey_nr_ptrs_allocated(k);
+               sectors = bpos_min(k.k->p, end_pos).offset - iter.pos.offset;
 
                if (!bkey_extent_is_allocation(k.k)) {
                        ret = bch2_quota_reservation_add(c, inode,
@@ -2983,26 +3173,15 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
                                goto bkey_err;
                }
 
-               if (reservation.v.nr_replicas < replicas ||
-                   bch2_bkey_sectors_compressed(k)) {
-                       ret = bch2_disk_reservation_get(c, &disk_res, sectors,
-                                                       replicas, 0);
-                       if (unlikely(ret))
-                               goto bkey_err;
-
-                       reservation.v.nr_replicas = disk_res.nr_replicas;
-               }
-
-               ret = bch2_extent_update(&trans, inode_inum(inode), &iter,
-                                        &reservation.k_i,
-                               &disk_res, NULL,
-                               0, &i_sectors_delta, true);
+               ret = bch2_extent_fallocate(&trans, inode_inum(inode), &iter,
+                                           sectors, opts, &i_sectors_delta,
+                                           writepoint_hashed((unsigned long) current));
                if (ret)
                        goto bkey_err;
+
                i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
 bkey_err:
                bch2_quota_reservation_put(c, inode, &quota_res);
-               bch2_disk_reservation_put(c, &disk_res);
                if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
                        ret = 0;
        }
@@ -3094,7 +3273,7 @@ long bch2_fallocate_dispatch(struct file *file, int mode,
 
        inode_lock(&inode->v);
        inode_dio_wait(&inode->v);
-       bch2_pagecache_block_get(&inode->ei_pagecache_lock);
+       bch2_pagecache_block_get(inode);
 
        ret = file_modified(file);
        if (ret)
@@ -3111,7 +3290,7 @@ long bch2_fallocate_dispatch(struct file *file, int mode,
        else
                ret = -EOPNOTSUPP;
 err:
-       bch2_pagecache_block_put(&inode->ei_pagecache_lock);
+       bch2_pagecache_block_put(inode);
        inode_unlock(&inode->v);
        percpu_ref_put(&c->writes);
 
@@ -3243,7 +3422,7 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
 
        if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
            IS_SYNC(file_inode(file_dst)))
-               ret = bch2_flush_inode(c, inode_inum(dst));
+               ret = bch2_flush_inode(c, dst);
 err:
        bch2_quota_reservation_put(c, dst, &quota_res);
        bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
@@ -3499,6 +3678,7 @@ loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
 
 void bch2_fs_fsio_exit(struct bch_fs *c)
 {
+       bioset_exit(&c->nocow_flush_bioset);
        bioset_exit(&c->dio_write_bioset);
        bioset_exit(&c->dio_read_bioset);
        bioset_exit(&c->writepage_bioset);
@@ -3518,7 +3698,9 @@ int bch2_fs_fsio_init(struct bch_fs *c)
                        BIOSET_NEED_BVECS) ||
            bioset_init(&c->dio_write_bioset,
                        4, offsetof(struct dio_write, op.wbio.bio),
-                       BIOSET_NEED_BVECS))
+                       BIOSET_NEED_BVECS) ||
+           bioset_init(&c->nocow_flush_bioset,
+                       1, offsetof(struct nocow_flush, bio), 0))
                ret = -ENOMEM;
 
        pr_verbose_init(c->opts, "ret %i", ret);
index 186faa54b590f1e736c1b660d5c83d1bc56cdbda..8621738fd071358eb0f29fbc4e4de4577ee3fce4 100644 (file)
@@ -43,58 +43,6 @@ static void bch2_vfs_inode_init(struct btree_trans *, subvol_inum,
                                struct bch_inode_unpacked *,
                                struct bch_subvolume *);
 
-static void __pagecache_lock_put(struct pagecache_lock *lock, long i)
-{
-       BUG_ON(atomic_long_read(&lock->v) == 0);
-
-       if (atomic_long_sub_return_release(i, &lock->v) == 0)
-               wake_up_all(&lock->wait);
-}
-
-static bool __pagecache_lock_tryget(struct pagecache_lock *lock, long i)
-{
-       long v = atomic_long_read(&lock->v), old;
-
-       do {
-               old = v;
-
-               if (i > 0 ? v < 0 : v > 0)
-                       return false;
-       } while ((v = atomic_long_cmpxchg_acquire(&lock->v,
-                                       old, old + i)) != old);
-       return true;
-}
-
-static void __pagecache_lock_get(struct pagecache_lock *lock, long i)
-{
-       wait_event(lock->wait, __pagecache_lock_tryget(lock, i));
-}
-
-void bch2_pagecache_add_put(struct pagecache_lock *lock)
-{
-       __pagecache_lock_put(lock, 1);
-}
-
-bool bch2_pagecache_add_tryget(struct pagecache_lock *lock)
-{
-       return __pagecache_lock_tryget(lock, 1);
-}
-
-void bch2_pagecache_add_get(struct pagecache_lock *lock)
-{
-       __pagecache_lock_get(lock, 1);
-}
-
-void bch2_pagecache_block_put(struct pagecache_lock *lock)
-{
-       __pagecache_lock_put(lock, -1);
-}
-
-void bch2_pagecache_block_get(struct pagecache_lock *lock)
-{
-       __pagecache_lock_get(lock, -1);
-}
-
 void bch2_inode_update_after_write(struct btree_trans *trans,
                                   struct bch_inode_info *inode,
                                   struct bch_inode_unpacked *bi,
@@ -862,6 +810,9 @@ static int bch2_fill_extent(struct bch_fs *c,
                        int flags2 = 0;
                        u64 offset = p.ptr.offset;
 
+                       if (p.ptr.unwritten)
+                               flags2 |= FIEMAP_EXTENT_UNWRITTEN;
+
                        if (p.crc.compression_type)
                                flags2 |= FIEMAP_EXTENT_ENCODED;
                        else
@@ -1409,7 +1360,7 @@ static struct inode *bch2_alloc_inode(struct super_block *sb)
 
        inode_init_once(&inode->v);
        mutex_init(&inode->ei_update_lock);
-       pagecache_lock_init(&inode->ei_pagecache_lock);
+       two_state_lock_init(&inode->ei_pagecache_lock);
        mutex_init(&inode->ei_quota_lock);
 
        return &inode->v;
index 9f4b57e30e2a7d14e8c66598c2203cbf970d319e..6b91bbe911162466063d00e4ec41889003d28ced 100644 (file)
@@ -6,31 +6,11 @@
 #include "opts.h"
 #include "str_hash.h"
 #include "quota_types.h"
+#include "two_state_shared_lock.h"
 
 #include <linux/seqlock.h>
 #include <linux/stat.h>
 
-/*
- * Two-state lock - can be taken for add or block - both states are shared,
- * like read side of rwsem, but conflict with other state:
- */
-struct pagecache_lock {
-       atomic_long_t           v;
-       wait_queue_head_t       wait;
-};
-
-static inline void pagecache_lock_init(struct pagecache_lock *lock)
-{
-       atomic_long_set(&lock->v, 0);
-       init_waitqueue_head(&lock->wait);
-}
-
-void bch2_pagecache_add_put(struct pagecache_lock *);
-bool bch2_pagecache_add_tryget(struct pagecache_lock *);
-void bch2_pagecache_add_get(struct pagecache_lock *);
-void bch2_pagecache_block_put(struct pagecache_lock *);
-void bch2_pagecache_block_get(struct pagecache_lock *);
-
 struct bch_inode_info {
        struct inode            v;
        unsigned long           ei_flags;
@@ -39,17 +19,35 @@ struct bch_inode_info {
        u64                     ei_quota_reserved;
        unsigned long           ei_last_dirtied;
 
-       struct pagecache_lock   ei_pagecache_lock;
+       two_state_lock_t        ei_pagecache_lock;
 
        struct mutex            ei_quota_lock;
        struct bch_qid          ei_qid;
 
        u32                     ei_subvol;
 
+       /*
+        * When we've been doing nocow writes we'll need to issue flushes to the
+        * underlying block devices
+        *
+        * XXX: a device may have had a flush issued by some other codepath. It
+        * would be better to keep for each device a sequence number that's
+        * incremented when we isusue a cache flush, and track here the sequence
+        * number that needs flushing.
+        */
+       struct bch_devs_mask    ei_devs_need_flush;
+
        /* copy of inode in btree: */
        struct bch_inode_unpacked ei_inode;
 };
 
+#define bch2_pagecache_add_put(i)      bch2_two_state_unlock(&i->ei_pagecache_lock, 0)
+#define bch2_pagecache_add_tryget(i)   bch2_two_state_trylock(&i->ei_pagecache_lock, 0)
+#define bch2_pagecache_add_get(i)      bch2_two_state_lock(&i->ei_pagecache_lock, 0)
+
+#define bch2_pagecache_block_put(i)    bch2_two_state_unlock(&i->ei_pagecache_lock, 1)
+#define bch2_pagecache_block_get(i)    bch2_two_state_lock(&i->ei_pagecache_lock, 1)
+
 static inline subvol_inum inode_inum(struct bch_inode_info *inode)
 {
        return (subvol_inum) {
@@ -96,7 +94,7 @@ do {                                                                  \
                        if ((_locks) & INODE_LOCK)                      \
                                down_write_nested(&a[i]->v.i_rwsem, i); \
                        if ((_locks) & INODE_PAGECACHE_BLOCK)           \
-                               bch2_pagecache_block_get(&a[i]->ei_pagecache_lock);\
+                               bch2_pagecache_block_get(a[i]);\
                        if ((_locks) & INODE_UPDATE_LOCK)                       \
                                mutex_lock_nested(&a[i]->ei_update_lock, i);\
                }                                                       \
@@ -114,7 +112,7 @@ do {                                                                        \
                        if ((_locks) & INODE_LOCK)                      \
                                up_write(&a[i]->v.i_rwsem);             \
                        if ((_locks) & INODE_PAGECACHE_BLOCK)           \
-                               bch2_pagecache_block_put(&a[i]->ei_pagecache_lock);\
+                               bch2_pagecache_block_put(a[i]);\
                        if ((_locks) & INODE_UPDATE_LOCK)                       \
                                mutex_unlock(&a[i]->ei_update_lock);    \
                }                                                       \
index ca95d85b73488ef849c54ab26e044d72a7e76e32..ffc2671cece6a60867fec46f522e68fe81fd3713 100644 (file)
@@ -321,7 +321,7 @@ static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
        bch2_trans_iter_exit(trans, &iter);
 err:
        if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
-               bch_err(c, "error from __remove_dirent(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -506,7 +506,7 @@ static int snapshots_seen_add(struct bch_fs *c, struct snapshots_seen *s, u32 id
                        break;
 
                if (i->equiv == n.equiv) {
-                       bch_err(c, "adding duplicate snapshot in snapshots_seen_add()");
+                       bch_err(c, "%s(): adding duplicate snapshot", __func__);
                        return -EINVAL;
                }
        }
@@ -848,8 +848,7 @@ out:
        printbuf_exit(&buf);
        return ret;
 bad_hash:
-       if (fsck_err(c, "hash table key at wrong offset: btree %s inode %llu offset %llu, "
-                    "hashed to %llu\n%s",
+       if (fsck_err(c, "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s",
                     bch2_btree_ids[desc.btree_id], hash_k.k->p.inode, hash_k.k->p.offset, hash,
                     (printbuf_reset(&buf),
                      bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) {
@@ -1000,7 +999,7 @@ static int check_inode(struct btree_trans *trans,
 err:
 fsck_err:
        if (ret)
-               bch_err(c, "error from check_inode(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -1026,7 +1025,7 @@ static int check_inodes(struct bch_fs *c, bool full)
        bch2_trans_exit(&trans);
        snapshots_seen_exit(&s);
        if (ret)
-               bch_err(c, "error from check_inodes(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -1159,7 +1158,7 @@ static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
        }
 fsck_err:
        if (ret)
-               bch_err(c, "error from check_i_sectors(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        if (!ret && trans_was_restarted(trans, restart_count))
                ret = -BCH_ERR_transaction_restart_nested;
        return ret;
@@ -1258,8 +1257,8 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
                        continue;
 
                if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
-                               k.k->type != KEY_TYPE_reservation &&
-                               k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9, c,
+                               k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 &&
+                               !bkey_extent_is_reservation(k), c,
                                "extent type past end of inode %llu:%u, i_size %llu\n  %s",
                                i->inode.bi_inum, i->snapshot, i->inode.bi_size,
                                (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
@@ -1295,7 +1294,7 @@ fsck_err:
        printbuf_exit(&buf);
 
        if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
-               bch_err(c, "error from check_extent(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -1337,7 +1336,7 @@ static int check_extents(struct bch_fs *c)
        snapshots_seen_exit(&s);
 
        if (ret)
-               bch_err(c, "error from check_extents(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -1376,7 +1375,7 @@ static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
        }
 fsck_err:
        if (ret)
-               bch_err(c, "error from check_subdir_count(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        if (!ret && trans_was_restarted(trans, restart_count))
                ret = -BCH_ERR_transaction_restart_nested;
        return ret;
@@ -1497,7 +1496,7 @@ fsck_err:
        printbuf_exit(&buf);
 
        if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
-               bch_err(c, "error from check_target(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -1667,7 +1666,7 @@ fsck_err:
        printbuf_exit(&buf);
 
        if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
-               bch_err(c, "error from check_dirent(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -1706,7 +1705,7 @@ static int check_dirents(struct bch_fs *c)
        inode_walker_exit(&target);
 
        if (ret)
-               bch_err(c, "error from check_dirents(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -1742,7 +1741,7 @@ static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
        ret = hash_check_key(trans, bch2_xattr_hash_desc, hash_info, iter, k);
 fsck_err:
        if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
-               bch_err(c, "error from check_xattr(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -1774,7 +1773,7 @@ static int check_xattrs(struct bch_fs *c)
        bch2_trans_exit(&trans);
 
        if (ret)
-               bch_err(c, "error from check_xattrs(): %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
index 1a0d2608c058662d1f8d1238f5093aa22fd7ce11..a91465ef3abb4b309e9a5bf8d4fca1bd3f4daaf2 100644 (file)
@@ -733,7 +733,6 @@ static int bch2_inode_delete_keys(struct btree_trans *trans,
         * iterator:
         */
        bch2_trans_iter_init(trans, &iter, id, POS(inum.inum, 0),
-                            BTREE_ITER_NOT_EXTENTS|
                             BTREE_ITER_INTENT);
 
        while (1) {
@@ -756,6 +755,14 @@ static int bch2_inode_delete_keys(struct btree_trans *trans,
                bkey_init(&delete.k);
                delete.k.p = iter.pos;
 
+               if (iter.flags & BTREE_ITER_IS_EXTENTS) {
+                       bch2_key_resize(&delete.k, k.k->p.offset - iter.pos.offset);
+
+                       ret = bch2_extent_trim_atomic(trans, &iter, &delete);
+                       if (ret)
+                               goto err;
+               }
+
                ret = bch2_trans_update(trans, &iter, &delete, 0) ?:
                      bch2_trans_commit(trans, NULL, NULL,
                                        BTREE_INSERT_NOFAIL);
index 2915f4f96f4bb3c2fc8f2190ffad7fe86a8d2408..be016043aa8218e68f866ccb5893383cab83abbf 100644 (file)
@@ -12,26 +12,26 @@ int bch2_inode_v2_invalid(const struct bch_fs *, struct bkey_s_c, int, struct pr
 int bch2_inode_v3_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
 void bch2_inode_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
-#define bch2_bkey_ops_inode (struct bkey_ops) {                \
+#define bch2_bkey_ops_inode ((struct bkey_ops) {       \
        .key_invalid    = bch2_inode_invalid,           \
        .val_to_text    = bch2_inode_to_text,           \
        .trans_trigger  = bch2_trans_mark_inode,        \
        .atomic_trigger = bch2_mark_inode,              \
-}
+})
 
-#define bch2_bkey_ops_inode_v2 (struct bkey_ops) {     \
+#define bch2_bkey_ops_inode_v2 ((struct bkey_ops) {    \
        .key_invalid    = bch2_inode_v2_invalid,        \
        .val_to_text    = bch2_inode_to_text,           \
        .trans_trigger  = bch2_trans_mark_inode,        \
        .atomic_trigger = bch2_mark_inode,              \
-}
+})
 
-#define bch2_bkey_ops_inode_v3 (struct bkey_ops) {     \
+#define bch2_bkey_ops_inode_v3 ((struct bkey_ops) {    \
        .key_invalid    = bch2_inode_v3_invalid,        \
        .val_to_text    = bch2_inode_to_text,           \
        .trans_trigger  = bch2_trans_mark_inode,        \
        .atomic_trigger = bch2_mark_inode,              \
-}
+})
 
 static inline bool bkey_is_inode(const struct bkey *k)
 {
@@ -44,10 +44,10 @@ int bch2_inode_generation_invalid(const struct bch_fs *, struct bkey_s_c,
                                  int, struct printbuf *);
 void bch2_inode_generation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
-#define bch2_bkey_ops_inode_generation (struct bkey_ops) {     \
+#define bch2_bkey_ops_inode_generation ((struct bkey_ops) {    \
        .key_invalid    = bch2_inode_generation_invalid,        \
        .val_to_text    = bch2_inode_generation_to_text,        \
-}
+})
 
 #if 0
 typedef struct {
@@ -78,7 +78,7 @@ struct bkey_inode_buf {
 #define x(_name, _bits)                + 8 + _bits / 8
        u8              _pad[0 + BCH_INODE_FIELDS_v3()];
 #undef  x
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
 
 void bch2_inode_pack(struct bkey_inode_buf *, const struct bch_inode_unpacked *);
 int bch2_inode_unpack(struct bkey_s_c, struct bch_inode_unpacked *);
@@ -157,6 +157,8 @@ io_opts(struct bch_fs *c, struct bch_inode_unpacked *inode)
        struct bch_io_opts opts = bch2_opts_to_inode_opts(c->opts);
 
        bch2_io_opts_apply(&opts, bch2_inode_opts_get(inode));
+       if (opts.nocow)
+               opts.compression = opts.background_compression = opts.data_checksum = opts.erasure_code;
        return opts;
 }
 
index 5971569e31336043ce4bd69dda9b8028b703b6df..0ff835e8d1b4fbc49bac8426d016098f0c57a27e 100644 (file)
@@ -16,6 +16,7 @@
 #include "checksum.h"
 #include "compress.h"
 #include "clock.h"
+#include "data_update.h"
 #include "debug.h"
 #include "disk_groups.h"
 #include "ec.h"
@@ -232,17 +233,71 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans,
        return ret;
 }
 
+static int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
+                                            struct btree_iter *extent_iter,
+                                            u64 new_i_size,
+                                            s64 i_sectors_delta)
+{
+       struct btree_iter iter;
+       struct bkey_s_c inode_k;
+       struct bkey_s_c_inode_v3 inode;
+       struct bkey_i_inode_v3 *new_inode;
+       int ret;
+
+       bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
+                            SPOS(0,
+                                 extent_iter->pos.inode,
+                                 extent_iter->snapshot),
+                            BTREE_ITER_INTENT|BTREE_ITER_CACHED);
+       inode_k = bch2_btree_iter_peek_slot(&iter);
+       ret = bkey_err(inode_k);
+       if (unlikely(ret))
+               goto err;
+
+       ret = bkey_is_inode(inode_k.k) ? 0 : -ENOENT;
+       if (unlikely(ret))
+               goto err;
+
+       if (unlikely(inode_k.k->type != KEY_TYPE_inode_v3)) {
+               inode_k = bch2_inode_to_v3(trans, inode_k);
+               ret = bkey_err(inode_k);
+               if (unlikely(ret))
+                       goto err;
+       }
+
+       inode = bkey_s_c_to_inode_v3(inode_k);
+
+       new_inode = bch2_trans_kmalloc(trans, bkey_bytes(inode_k.k));
+       ret = PTR_ERR_OR_ZERO(new_inode);
+       if (unlikely(ret))
+               goto err;
+
+       bkey_reassemble(&new_inode->k_i, inode.s_c);
+
+       if (!(le64_to_cpu(inode.v->bi_flags) & BCH_INODE_I_SIZE_DIRTY) &&
+           new_i_size > le64_to_cpu(inode.v->bi_size))
+               new_inode->v.bi_size = cpu_to_le64(new_i_size);
+
+       le64_add_cpu(&new_inode->v.bi_sectors, i_sectors_delta);
+
+       new_inode->k.p.snapshot = iter.snapshot;
+
+       ret = bch2_trans_update(trans, &iter, &new_inode->k_i,
+                               BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+err:
+       bch2_trans_iter_exit(trans, &iter);
+       return ret;
+}
+
 int bch2_extent_update(struct btree_trans *trans,
                       subvol_inum inum,
                       struct btree_iter *iter,
                       struct bkey_i *k,
                       struct disk_reservation *disk_res,
-                      u64 *journal_seq,
                       u64 new_i_size,
                       s64 *i_sectors_delta_total,
                       bool check_enospc)
 {
-       struct btree_iter inode_iter = { NULL };
        struct bpos next_pos;
        bool usage_increasing;
        s64 i_sectors_delta = 0, disk_sectors_delta = 0;
@@ -262,7 +317,6 @@ int bch2_extent_update(struct btree_trans *trans,
        if (ret)
                return ret;
 
-       new_i_size = min(k->k.p.offset << 9, new_i_size);
        next_pos = k->k.p;
 
        ret = bch2_sum_sector_overwrites(trans, iter, k,
@@ -282,66 +336,149 @@ int bch2_extent_update(struct btree_trans *trans,
                        return ret;
        }
 
-       if (new_i_size || i_sectors_delta) {
-               struct bkey_s_c k;
-               struct bkey_s_c_inode_v3 inode;
-               struct bkey_i_inode_v3 *new_inode;
-               bool i_size_update;
+       /*
+        * Note:
+        * We always have to do an inode update - even when i_size/i_sectors
+        * aren't changing - for fsync to work properly; fsync relies on
+        * inode->bi_journal_seq which is updated by the trigger code:
+        */
+       ret =   bch2_extent_update_i_size_sectors(trans, iter,
+                                                 min(k->k.p.offset << 9, new_i_size),
+                                                 i_sectors_delta) ?:
+               bch2_trans_update(trans, iter, k, 0) ?:
+               bch2_trans_commit(trans, disk_res, NULL,
+                               BTREE_INSERT_NOCHECK_RW|
+                               BTREE_INSERT_NOFAIL);
+       if (unlikely(ret))
+               return ret;
 
-               bch2_trans_iter_init(trans, &inode_iter, BTREE_ID_inodes,
-                                    SPOS(0, inum.inum, iter->snapshot),
-                                    BTREE_ITER_INTENT|BTREE_ITER_CACHED);
-               k = bch2_btree_iter_peek_slot(&inode_iter);
-               ret = bkey_err(k);
-               if (unlikely(ret))
-                       goto err;
+       if (i_sectors_delta_total)
+               *i_sectors_delta_total += i_sectors_delta;
+       bch2_btree_iter_set_pos(iter, next_pos);
+       return 0;
+}
+
+/* Overwrites whatever was present with zeroes: */
+int bch2_extent_fallocate(struct btree_trans *trans,
+                         subvol_inum inum,
+                         struct btree_iter *iter,
+                         unsigned sectors,
+                         struct bch_io_opts opts,
+                         s64 *i_sectors_delta,
+                         struct write_point_specifier write_point)
+{
+       struct bch_fs *c = trans->c;
+       struct disk_reservation disk_res = { 0 };
+       struct closure cl;
+       struct open_buckets open_buckets;
+       struct bkey_s_c k;
+       struct bkey_buf old, new;
+       bool have_reservation = false;
+       bool unwritten = opts.nocow &&
+           c->sb.version >= bcachefs_metadata_version_unwritten_extents;
+       int ret;
+
+       bch2_bkey_buf_init(&old);
+       bch2_bkey_buf_init(&new);
+       closure_init_stack(&cl);
+       open_buckets.nr = 0;
+retry:
+       k = bch2_btree_iter_peek_slot(iter);
+       ret = bkey_err(k);
+       if (ret)
+               return ret;
 
-               ret = bkey_is_inode(k.k) ? 0 : -ENOENT;
+       sectors = min_t(u64, sectors, k.k->p.offset - iter->pos.offset);
+
+       if (!have_reservation) {
+               unsigned new_replicas =
+                       max(0, (int) opts.data_replicas -
+                           (int) bch2_bkey_nr_ptrs_fully_allocated(k));
+               /*
+                * Get a disk reservation before (in the nocow case) calling
+                * into the allocator:
+                */
+               ret = bch2_disk_reservation_get(c, &disk_res, sectors, new_replicas, 0);
                if (unlikely(ret))
-                       goto err;
+                       goto out;
 
-               if (unlikely(k.k->type != KEY_TYPE_inode_v3)) {
-                       k = bch2_inode_to_v3(trans, k);
-                       ret = bkey_err(k);
-                       if (unlikely(ret))
-                               goto err;
+               bch2_bkey_buf_reassemble(&old, c, k);
+       }
+
+       if (have_reservation) {
+               if (!bch2_extents_match(k, bkey_i_to_s_c(old.k)))
+                       goto out;
+
+               bch2_key_resize(&new.k->k, sectors);
+       } else if (!unwritten) {
+               struct bkey_i_reservation *reservation;
+
+               bch2_bkey_buf_realloc(&new, c, sizeof(*reservation) / sizeof(u64));
+               reservation = bkey_reservation_init(new.k);
+               reservation->k.p = iter->pos;
+               bch2_key_resize(&reservation->k, sectors);
+               reservation->v.nr_replicas = opts.data_replicas;
+       } else {
+               struct bkey_i_extent *e;
+               struct bch_devs_list devs_have;
+               struct write_point *wp;
+               struct bch_extent_ptr *ptr;
+
+               devs_have.nr = 0;
+
+               bch2_bkey_buf_realloc(&new, c, BKEY_EXTENT_U64s_MAX);
+
+               e = bkey_extent_init(new.k);
+               e->k.p = iter->pos;
+
+               ret = bch2_alloc_sectors_start_trans(trans,
+                               opts.foreground_target,
+                               false,
+                               write_point,
+                               &devs_have,
+                               opts.data_replicas,
+                               opts.data_replicas,
+                               RESERVE_none, 0, &cl, &wp);
+               if (ret == -EAGAIN) {
+                       bch2_trans_unlock(trans);
+                       closure_sync(&cl);
+                       goto retry;
                }
+               if (ret)
+                       return ret;
 
-               inode = bkey_s_c_to_inode_v3(k);
-               i_size_update = !(le64_to_cpu(inode.v->bi_flags) & BCH_INODE_I_SIZE_DIRTY) &&
-                       new_i_size > le64_to_cpu(inode.v->bi_size);
+               sectors = min(sectors, wp->sectors_free);
 
-               if (!i_sectors_delta && !i_size_update)
-                       goto no_inode_update;
+               bch2_key_resize(&e->k, sectors);
 
-               new_inode = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
-               ret = PTR_ERR_OR_ZERO(new_inode);
-               if (unlikely(ret))
-                       goto err;
+               bch2_open_bucket_get(c, wp, &open_buckets);
+               bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
+               bch2_alloc_sectors_done(c, wp);
 
-               bkey_reassemble(&new_inode->k_i, k);
+               extent_for_each_ptr(extent_i_to_s(e), ptr)
+                       ptr->unwritten = true;
+       }
 
-               if (i_size_update)
-                       new_inode->v.bi_size = cpu_to_le64(new_i_size);
+       have_reservation = true;
 
-               le64_add_cpu(&new_inode->v.bi_sectors, i_sectors_delta);
-               ret = bch2_trans_update(trans, &inode_iter, &new_inode->k_i, 0);
-               if (unlikely(ret))
-                       goto err;
+       ret = bch2_extent_update(trans, inum, iter, new.k, &disk_res,
+                                0, i_sectors_delta, true);
+out:
+       if ((atomic_read(&cl.remaining) & CLOSURE_REMAINING_MASK) != 1) {
+               bch2_trans_unlock(trans);
+               closure_sync(&cl);
        }
-no_inode_update:
-       ret =   bch2_trans_update(trans, iter, k, 0) ?:
-               bch2_trans_commit(trans, disk_res, journal_seq,
-                               BTREE_INSERT_NOCHECK_RW|
-                               BTREE_INSERT_NOFAIL);
-       if (unlikely(ret))
-               goto err;
 
-       if (i_sectors_delta_total)
-               *i_sectors_delta_total += i_sectors_delta;
-       bch2_btree_iter_set_pos(iter, next_pos);
-err:
-       bch2_trans_iter_exit(trans, &inode_iter);
+       if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
+               bch2_trans_begin(trans);
+               goto retry;
+       }
+
+       bch2_open_buckets_put(c, &open_buckets);
+       bch2_disk_reservation_put(c, &disk_res);
+       bch2_bkey_buf_exit(&new, c);
+       bch2_bkey_buf_exit(&old, c);
+
        return ret;
 }
 
@@ -394,8 +531,7 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
                bch2_cut_back(end_pos, &delete);
 
                ret = bch2_extent_update(trans, inum, iter, &delete,
-                               &disk_res, NULL,
-                               0, i_sectors_delta, false);
+                               &disk_res, 0, i_sectors_delta, false);
                bch2_disk_reservation_put(c, &disk_res);
        }
 
@@ -425,7 +561,7 @@ int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end,
        return ret;
 }
 
-int bch2_write_index_default(struct bch_write_op *op)
+static int bch2_write_index_default(struct bch_write_op *op)
 {
        struct bch_fs *c = op->c;
        struct bkey_buf sk;
@@ -462,7 +598,7 @@ int bch2_write_index_default(struct bch_write_op *op)
                                     BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
 
                ret = bch2_extent_update(&trans, inum, &iter, sk.k,
-                                        &op->res, op_journal_seq(op),
+                                        &op->res,
                                         op->new_i_size, &op->i_sectors_delta,
                                         op->flags & BCH_WRITE_CHECK_ENOSPC);
                bch2_trans_iter_exit(&trans, &iter);
@@ -488,7 +624,8 @@ int bch2_write_index_default(struct bch_write_op *op)
 
 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
                               enum bch_data_type type,
-                              const struct bkey_i *k)
+                              const struct bkey_i *k,
+                              bool nocow)
 {
        struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
        const struct bch_extent_ptr *ptr;
@@ -522,9 +659,11 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
 
                n->c                    = c;
                n->dev                  = ptr->dev;
-               n->have_ioref           = bch2_dev_get_ioref(ca,
+               n->have_ioref           = nocow || bch2_dev_get_ioref(ca,
                                        type == BCH_DATA_btree ? READ : WRITE);
+               n->nocow                = nocow;
                n->submit_time          = local_clock();
+               n->inode_offset         = bkey_start_offset(&k->k);
                n->bio.bi_iter.bi_sector = ptr->offset;
 
                if (likely(n->have_ioref)) {
@@ -540,29 +679,22 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
        }
 }
 
-static void __bch2_write(struct closure *);
+static void __bch2_write(struct bch_write_op *);
 
 static void bch2_write_done(struct closure *cl)
 {
        struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
        struct bch_fs *c = op->c;
 
-       if (!op->error && (op->flags & BCH_WRITE_FLUSH))
-               op->error = bch2_journal_error(&c->journal);
-
        bch2_disk_reservation_put(c, &op->res);
        percpu_ref_put(&c->writes);
        bch2_keylist_free(&op->insert_keys, op->inline_keys);
 
        bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
 
-       if (op->end_io) {
-               EBUG_ON(cl->parent);
-               closure_debug_destroy(cl);
+       closure_debug_destroy(cl);
+       if (op->end_io)
                op->end_io(op);
-       } else {
-               closure_return(cl);
-       }
 }
 
 static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
@@ -600,7 +732,7 @@ static void __bch2_write_index(struct bch_write_op *op)
        struct keylist *keys = &op->insert_keys;
        struct bkey_i *k;
        unsigned dev;
-       int ret;
+       int ret = 0;
 
        if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
                ret = bch2_write_drop_io_error_ptrs(op);
@@ -623,7 +755,10 @@ static void __bch2_write_index(struct bch_write_op *op)
 
        if (!bch2_keylist_empty(keys)) {
                u64 sectors_start = keylist_sectors(keys);
-               int ret = op->index_update_fn(op);
+
+               ret = !(op->flags & BCH_WRITE_MOVE)
+                       ? bch2_write_index_default(op)
+                       : bch2_data_update_index_update(op);
 
                BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
                BUG_ON(keylist_sectors(keys) && !ret);
@@ -631,9 +766,13 @@ static void __bch2_write_index(struct bch_write_op *op)
                op->written += sectors_start - keylist_sectors(keys);
 
                if (ret) {
-                       bch_err_inum_ratelimited(c, op->pos.inode,
-                               "write error while doing btree update: %s", bch2_err_str(ret));
-                       op->error = ret;
+                       struct bkey_i *k = bch2_keylist_front(&op->insert_keys);
+
+                       bch_err_inum_offset_ratelimited(c,
+                               k->k.p.inode, k->k.p.offset << 9,
+                               "write error while doing btree update: %s",
+                               bch2_err_str(ret));
+                       goto err;
                }
        }
 out:
@@ -646,25 +785,45 @@ out:
 err:
        keys->top = keys->keys;
        op->error = ret;
+       op->flags |= BCH_WRITE_DONE;
        goto out;
 }
 
 static void bch2_write_index(struct closure *cl)
 {
        struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
-       struct bch_fs *c = op->c;
+       struct write_point *wp = op->wp;
+       struct workqueue_struct *wq = index_update_wq(op);
 
-       __bch2_write_index(op);
+       barrier();
+       op->btree_update_ready = true;
+       queue_work(wq, &wp->index_update_work);
+}
 
-       if (!(op->flags & BCH_WRITE_DONE)) {
-               continue_at(cl, __bch2_write, index_update_wq(op));
-       } else if (!op->error && (op->flags & BCH_WRITE_FLUSH)) {
-               bch2_journal_flush_seq_async(&c->journal,
-                                            *op_journal_seq(op),
-                                            cl);
-               continue_at(cl, bch2_write_done, index_update_wq(op));
-       } else {
-               continue_at_nobarrier(cl, bch2_write_done, NULL);
+void bch2_write_point_do_index_updates(struct work_struct *work)
+{
+       struct write_point *wp =
+               container_of(work, struct write_point, index_update_work);
+       struct bch_write_op *op;
+
+       while (1) {
+               spin_lock(&wp->writes_lock);
+               op = list_first_entry_or_null(&wp->writes, struct bch_write_op, wp_list);
+               if (op && !op->btree_update_ready)
+                       op = NULL;
+               if (op)
+                       list_del(&op->wp_list);
+               spin_unlock(&wp->writes_lock);
+
+               if (!op)
+                       break;
+
+               __bch2_write_index(op);
+
+               if (!(op->flags & BCH_WRITE_DONE))
+                       __bch2_write(op);
+               else
+                       bch2_write_done(&op->cl);
        }
 }
 
@@ -679,13 +838,16 @@ static void bch2_write_endio(struct bio *bio)
 
        if (bch2_dev_inum_io_err_on(bio->bi_status, ca,
                                    op->pos.inode,
-                                   op->pos.offset - bio_sectors(bio), /* XXX definitely wrong */
+                                   wbio->inode_offset << 9,
                                    "data write error: %s",
                                    bch2_blk_status_to_str(bio->bi_status))) {
                set_bit(wbio->dev, op->failed.d);
                op->flags |= BCH_WRITE_IO_ERROR;
        }
 
+       if (wbio->nocow)
+               set_bit(wbio->dev, op->devs_need_flush->d);
+
        if (wbio->have_ioref) {
                bch2_latency_acct(ca, wbio->submit_time, WRITE);
                percpu_ref_put(&ca->io_ref);
@@ -697,12 +859,12 @@ static void bch2_write_endio(struct bio *bio)
        if (wbio->put_bio)
                bio_put(bio);
 
-       if (parent)
+       if (parent) {
                bio_endio(&parent->bio);
-       else if (!(op->flags & BCH_WRITE_SKIP_CLOSURE_PUT))
-               closure_put(cl);
-       else
-               continue_at_nobarrier(cl, bch2_write_index, index_update_wq(op));
+               return;
+       }
+
+       closure_put(cl);
 }
 
 static void init_append_extent(struct bch_write_op *op,
@@ -1109,19 +1271,319 @@ err:
        return ret;
 }
 
-static void __bch2_write(struct closure *cl)
+static bool bch2_extent_is_writeable(struct bch_write_op *op,
+                                    struct bkey_s_c k)
+{
+       struct bch_fs *c = op->c;
+       struct bkey_s_c_extent e;
+       struct extent_ptr_decoded p;
+       const union bch_extent_entry *entry;
+       unsigned replicas = 0;
+
+       if (k.k->type != KEY_TYPE_extent)
+               return false;
+
+       e = bkey_s_c_to_extent(k);
+       extent_for_each_ptr_decode(e, p, entry) {
+               if (p.crc.csum_type ||
+                   crc_is_compressed(p.crc) ||
+                   p.has_ec)
+                       return false;
+
+               replicas += bch2_extent_ptr_durability(c, &p);
+       }
+
+       return replicas >= op->opts.data_replicas;
+}
+
+static inline void bch2_nocow_write_unlock(struct bch_write_op *op)
+{
+       struct bch_fs *c = op->c;
+       const struct bch_extent_ptr *ptr;
+       struct bkey_i *k;
+
+       for_each_keylist_key(&op->insert_keys, k) {
+               struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
+
+               bkey_for_each_ptr(ptrs, ptr)
+                       bch2_bucket_nocow_unlock(&c->nocow_locks,
+                                              PTR_BUCKET_POS(c, ptr),
+                                              BUCKET_NOCOW_LOCK_UPDATE);
+       }
+}
+
+static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
+                                                 struct btree_iter *iter,
+                                                 struct bkey_i *orig,
+                                                 struct bkey_s_c k,
+                                                 u64 new_i_size)
+{
+       struct bkey_i *new;
+       struct bkey_ptrs ptrs;
+       struct bch_extent_ptr *ptr;
+       int ret;
+
+       if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) {
+               /* trace this */
+               return 0;
+       }
+
+       new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+       ret = PTR_ERR_OR_ZERO(new);
+       if (ret)
+               return ret;
+
+       bkey_reassemble(new, k);
+
+       bch2_cut_front(bkey_start_pos(&orig->k), new);
+       bch2_cut_back(orig->k.p, new);
+
+       ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
+       bkey_for_each_ptr(ptrs, ptr)
+               ptr->unwritten = 0;
+
+       /*
+        * Note that we're not calling bch2_subvol_get_snapshot() in this path -
+        * that was done when we kicked off the write, and here it's important
+        * that we update the extent that we wrote to - even if a snapshot has
+        * since been created. The write is still outstanding, so we're ok
+        * w.r.t. snapshot atomicity:
+        */
+       return  bch2_extent_update_i_size_sectors(trans, iter,
+                                       min(new->k.p.offset << 9, new_i_size), 0) ?:
+               bch2_trans_update(trans, iter, new,
+                                 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+}
+
+static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
+{
+       struct bch_fs *c = op->c;
+       struct btree_trans trans;
+       struct btree_iter iter;
+       struct bkey_i *orig;
+       struct bkey_s_c k;
+       int ret;
+
+       bch2_trans_init(&trans, c, 0, 0);
+
+       for_each_keylist_key(&op->insert_keys, orig) {
+               ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_extents,
+                                    bkey_start_pos(&orig->k),
+                                    BTREE_ITER_INTENT, k,
+                                    NULL, NULL, BTREE_INSERT_NOFAIL, ({
+                       if (bkey_cmp(bkey_start_pos(k.k), orig->k.p) >= 0)
+                               break;
+
+                       bch2_nocow_write_convert_one_unwritten(&trans, &iter, orig, k, op->new_i_size);
+               }));
+
+               if (ret) {
+                       struct bkey_i *k = bch2_keylist_front(&op->insert_keys);
+
+                       bch_err_inum_offset_ratelimited(c,
+                               k->k.p.inode, k->k.p.offset << 9,
+                               "write error while doing btree update: %s",
+                               bch2_err_str(ret));
+                       op->error = ret;
+                       break;
+               }
+       }
+
+       bch2_trans_exit(&trans);
+}
+
+static void __bch2_nocow_write_done(struct bch_write_op *op)
+{
+       bch2_nocow_write_unlock(op);
+
+       if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
+               op->error = -EIO;
+       } else if (unlikely(op->flags & BCH_WRITE_CONVERT_UNWRITTEN))
+               bch2_nocow_write_convert_unwritten(op);
+}
+
+static void bch2_nocow_write_done(struct closure *cl)
 {
        struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
+
+       __bch2_nocow_write_done(op);
+       bch2_write_done(cl);
+}
+
+static void bch2_nocow_write(struct bch_write_op *op)
+{
+       struct bch_fs *c = op->c;
+       struct btree_trans trans;
+       struct btree_iter iter;
+       struct bkey_s_c k;
+       struct bkey_ptrs_c ptrs;
+       const struct bch_extent_ptr *ptr, *ptr2;
+       u32 snapshot;
+       int ret;
+
+       if (op->flags & BCH_WRITE_MOVE)
+               return;
+
+       bch2_trans_init(&trans, c, 0, 0);
+retry:
+       bch2_trans_begin(&trans);
+
+       ret = bch2_subvolume_get_snapshot(&trans, op->subvol, &snapshot);
+       if (unlikely(ret))
+               goto err;
+
+       bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
+                            SPOS(op->pos.inode, op->pos.offset, snapshot),
+                            BTREE_ITER_SLOTS);
+       while (1) {
+               struct bio *bio = &op->wbio.bio;
+
+               k = bch2_btree_iter_peek_slot(&iter);
+               ret = bkey_err(k);
+               if (ret)
+                       break;
+
+               /* fall back to normal cow write path? */
+               if (unlikely(k.k->p.snapshot != snapshot ||
+                            !bch2_extent_is_writeable(op, k)))
+                       break;
+
+               if (bch2_keylist_realloc(&op->insert_keys,
+                                       op->inline_keys,
+                                       ARRAY_SIZE(op->inline_keys),
+                                       k.k->u64s))
+                       break;
+
+               /* Get iorefs before dropping btree locks: */
+               ptrs = bch2_bkey_ptrs_c(k);
+               bkey_for_each_ptr(ptrs, ptr)
+                       if (unlikely(!bch2_dev_get_ioref(bch_dev_bkey_exists(c, ptr->dev), WRITE)))
+                               goto err_get_ioref;
+
+               /* Unlock before taking nocow locks, doing IO: */
+               bkey_reassemble(op->insert_keys.top, k);
+               bch2_trans_unlock(&trans);
+
+               bch2_cut_front(op->pos, op->insert_keys.top);
+               bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
+
+               ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(op->insert_keys.top));
+               bkey_for_each_ptr(ptrs, ptr) {
+                       bch2_bucket_nocow_lock(&c->nocow_locks,
+                                              PTR_BUCKET_POS(c, ptr),
+                                              BUCKET_NOCOW_LOCK_UPDATE);
+                       if (unlikely(ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
+                               goto err_bucket_stale;
+
+                       if (ptr->unwritten)
+                               op->flags |= BCH_WRITE_CONVERT_UNWRITTEN;
+               }
+
+               bio = &op->wbio.bio;
+               if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) {
+                       bio = bio_split(bio, k.k->p.offset - op->pos.offset,
+                                       GFP_KERNEL, &c->bio_write);
+                       wbio_init(bio)->put_bio = true;
+                       bio->bi_opf = op->wbio.bio.bi_opf;
+               } else {
+                       op->flags |= BCH_WRITE_DONE;
+               }
+
+               op->pos.offset += bio_sectors(bio);
+               op->written += bio_sectors(bio);
+
+               bio->bi_end_io  = bch2_write_endio;
+               bio->bi_private = &op->cl;
+               bio->bi_opf |= REQ_OP_WRITE;
+               closure_get(&op->cl);
+               bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
+                                         op->insert_keys.top, true);
+
+               bch2_keylist_push(&op->insert_keys);
+               if (op->flags & BCH_WRITE_DONE)
+                       break;
+               bch2_btree_iter_advance(&iter);
+       }
+out:
+       bch2_trans_iter_exit(&trans, &iter);
+err:
+       if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+               goto retry;
+
+       if (ret) {
+               bch_err_inum_offset_ratelimited(c,
+                               op->pos.inode,
+                               op->pos.offset << 9,
+                               "%s: btree lookup error %s",
+                               __func__, bch2_err_str(ret));
+               op->error = ret;
+               op->flags |= BCH_WRITE_DONE;
+       }
+
+       bch2_trans_exit(&trans);
+
+       /* fallback to cow write path? */
+       if (!(op->flags & BCH_WRITE_DONE)) {
+               closure_sync(&op->cl);
+               __bch2_nocow_write_done(op);
+               op->insert_keys.top = op->insert_keys.keys;
+       } else if (op->flags & BCH_WRITE_SYNC) {
+               closure_sync(&op->cl);
+               bch2_nocow_write_done(&op->cl);
+       } else {
+               /*
+                * XXX
+                * needs to run out of process context because ei_quota_lock is
+                * a mutex
+                */
+               continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op));
+       }
+       return;
+err_get_ioref:
+       bkey_for_each_ptr(ptrs, ptr2) {
+               if (ptr2 == ptr)
+                       break;
+
+               percpu_ref_put(&bch_dev_bkey_exists(c, ptr2->dev)->io_ref);
+       }
+
+       /* Fall back to COW path: */
+       goto out;
+err_bucket_stale:
+       bkey_for_each_ptr(ptrs, ptr2) {
+               bch2_bucket_nocow_unlock(&c->nocow_locks,
+                                        PTR_BUCKET_POS(c, ptr2),
+                                        BUCKET_NOCOW_LOCK_UPDATE);
+               if (ptr2 == ptr)
+                       break;
+       }
+
+       bkey_for_each_ptr(ptrs, ptr2)
+               percpu_ref_put(&bch_dev_bkey_exists(c, ptr2->dev)->io_ref);
+
+       /* We can retry this: */
+       ret = BCH_ERR_transaction_restart;
+       goto out;
+}
+
+static void __bch2_write(struct bch_write_op *op)
+{
        struct bch_fs *c = op->c;
-       struct write_point *wp;
+       struct write_point *wp = NULL;
        struct bio *bio = NULL;
-       bool skip_put = true;
        unsigned nofs_flags;
        int ret;
 
        nofs_flags = memalloc_nofs_save();
+
+       if (unlikely(op->opts.nocow)) {
+               bch2_nocow_write(op);
+               if (op->flags & BCH_WRITE_DONE)
+                       goto out_nofs_restore;
+       }
 again:
        memset(&op->failed, 0, sizeof(op->failed));
+       op->btree_update_ready = false;
 
        do {
                struct bkey_i *key_to_write;
@@ -1131,126 +1593,96 @@ again:
                /* +1 for possible cache device: */
                if (op->open_buckets.nr + op->nr_replicas + 1 >
                    ARRAY_SIZE(op->open_buckets.v))
-                       goto flush_io;
+                       break;
 
                if (bch2_keylist_realloc(&op->insert_keys,
                                        op->inline_keys,
                                        ARRAY_SIZE(op->inline_keys),
                                        BKEY_EXTENT_U64s_MAX))
-                       goto flush_io;
+                       break;
 
                /*
                 * The copygc thread is now global, which means it's no longer
                 * freeing up space on specific disks, which means that
                 * allocations for specific disks may hang arbitrarily long:
                 */
-               wp = bch2_alloc_sectors_start(c,
-                       op->target,
-                       op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
-                       op->write_point,
-                       &op->devs_have,
-                       op->nr_replicas,
-                       op->nr_replicas_required,
-                       op->alloc_reserve,
-                       op->flags,
-                       (op->flags & (BCH_WRITE_ALLOC_NOWAIT|
-                                     BCH_WRITE_ONLY_SPECIFIED_DEVS)) ? NULL : cl);
-               EBUG_ON(!wp);
-
-               if (IS_ERR(wp)) {
-                       if (unlikely(wp != ERR_PTR(-EAGAIN))) {
-                               ret = PTR_ERR(wp);
-                               goto err;
+               ret = bch2_trans_do(c, NULL, NULL, 0,
+                       bch2_alloc_sectors_start_trans(&trans,
+                               op->target,
+                               op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
+                               op->write_point,
+                               &op->devs_have,
+                               op->nr_replicas,
+                               op->nr_replicas_required,
+                               op->alloc_reserve,
+                               op->flags,
+                               (op->flags & (BCH_WRITE_ALLOC_NOWAIT|
+                                             BCH_WRITE_ONLY_SPECIFIED_DEVS))
+                               ? NULL : &op->cl, &wp));
+               if (unlikely(ret)) {
+                       if (unlikely(ret != -EAGAIN)) {
+                               op->error = ret;
+                               op->flags |= BCH_WRITE_DONE;
                        }
 
-                       goto flush_io;
+                       break;
                }
 
-               /*
-                * It's possible for the allocator to fail, put us on the
-                * freelist waitlist, and then succeed in one of various retry
-                * paths: if that happens, we need to disable the skip_put
-                * optimization because otherwise there won't necessarily be a
-                * barrier before we free the bch_write_op:
-                */
-               if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
-                       skip_put = false;
-
                bch2_open_bucket_get(c, wp, &op->open_buckets);
                ret = bch2_write_extent(op, wp, &bio);
-               bch2_alloc_sectors_done(c, wp);
 
-               if (ret < 0)
-                       goto err;
+               bch2_alloc_sectors_done(c, wp);
 
-               if (ret) {
-                       skip_put = false;
-               } else {
-                       /*
-                        * for the skip_put optimization this has to be set
-                        * before we submit the bio:
-                        */
+               if (ret < 0) {
+                       op->error = ret;
                        op->flags |= BCH_WRITE_DONE;
+                       break;
                }
 
+               if (!ret)
+                       op->flags |= BCH_WRITE_DONE;
+
                bio->bi_end_io  = bch2_write_endio;
                bio->bi_private = &op->cl;
                bio->bi_opf |= REQ_OP_WRITE;
 
-               if (!skip_put)
-                       closure_get(bio->bi_private);
-               else
-                       op->flags |= BCH_WRITE_SKIP_CLOSURE_PUT;
+               closure_get(bio->bi_private);
 
                key_to_write = (void *) (op->insert_keys.keys_p +
                                         key_to_write_offset);
 
                bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
-                                         key_to_write);
+                                         key_to_write, false);
        } while (ret);
 
-       if (!skip_put)
-               continue_at(cl, bch2_write_index, index_update_wq(op));
-out:
-       memalloc_nofs_restore(nofs_flags);
-       return;
-err:
-       op->error = ret;
-       op->flags |= BCH_WRITE_DONE;
-
-       continue_at(cl, bch2_write_index, index_update_wq(op));
-       goto out;
-flush_io:
        /*
-        * If the write can't all be submitted at once, we generally want to
-        * block synchronously as that signals backpressure to the caller.
+        * Sync or no?
         *
-        * However, if we're running out of a workqueue, we can't block here
-        * because we'll be blocking other work items from completing:
+        * If we're running asynchronously, wne may still want to block
+        * synchronously here if we weren't able to submit all of the IO at
+        * once, as that signals backpressure to the caller.
         */
-       if (current->flags & PF_WQ_WORKER) {
-               continue_at(cl, bch2_write_index, index_update_wq(op));
-               goto out;
-       }
-
-       closure_sync(cl);
-
-       if (!bch2_keylist_empty(&op->insert_keys)) {
+       if ((op->flags & BCH_WRITE_SYNC) || !(op->flags & BCH_WRITE_DONE)) {
+               closure_sync(&op->cl);
                __bch2_write_index(op);
 
-               if (op->error) {
-                       op->flags |= BCH_WRITE_DONE;
-                       continue_at_nobarrier(cl, bch2_write_done, NULL);
-                       goto out;
-               }
-       }
+               if (!(op->flags & BCH_WRITE_DONE))
+                       goto again;
+               bch2_write_done(&op->cl);
+       } else {
+               spin_lock(&wp->writes_lock);
+               op->wp = wp;
+               list_add_tail(&op->wp_list, &wp->writes);
+               spin_unlock(&wp->writes_lock);
 
-       goto again;
+               continue_at(&op->cl, bch2_write_index, NULL);
+       }
+out_nofs_restore:
+       memalloc_nofs_restore(nofs_flags);
 }
 
 static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
 {
-       struct closure *cl = &op->cl;
        struct bio *bio = &op->wbio.bio;
        struct bvec_iter iter;
        struct bkey_i_inline_data *id;
@@ -1287,8 +1719,7 @@ static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
        op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
        op->flags |= BCH_WRITE_DONE;
 
-       continue_at_nobarrier(cl, bch2_write_index, NULL);
-       return;
+       __bch2_write_index(op);
 err:
        bch2_write_done(&op->cl);
 }
@@ -1316,6 +1747,7 @@ void bch2_write(struct closure *cl)
        struct bch_fs *c = op->c;
        unsigned data_len;
 
+       EBUG_ON(op->cl.parent);
        BUG_ON(!op->nr_replicas);
        BUG_ON(!op->write_point.v);
        BUG_ON(!bkey_cmp(op->pos, POS_MAX));
@@ -1325,8 +1757,10 @@ void bch2_write(struct closure *cl)
        wbio_init(bio)->put_bio = false;
 
        if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) {
-               bch_err_inum_ratelimited(c, op->pos.inode,
-                                        "misaligned write");
+               bch_err_inum_offset_ratelimited(c,
+                       op->pos.inode,
+                       op->pos.offset << 9,
+                       "misaligned write");
                op->error = -EIO;
                goto err;
        }
@@ -1349,24 +1783,19 @@ void bch2_write(struct closure *cl)
                return;
        }
 
-       continue_at_nobarrier(cl, __bch2_write, NULL);
+       __bch2_write(op);
        return;
 err:
        bch2_disk_reservation_put(c, &op->res);
 
-       if (op->end_io) {
-               EBUG_ON(cl->parent);
-               closure_debug_destroy(cl);
+       closure_debug_destroy(&op->cl);
+       if (op->end_io)
                op->end_io(op);
-       } else {
-               closure_return(cl);
-       }
 }
 
 /* Cache promotion on read */
 
 struct promote_op {
-       struct closure          cl;
        struct rcu_head         rcu;
        u64                     start_time;
 
@@ -1397,6 +1826,9 @@ static inline bool should_promote(struct bch_fs *c, struct bkey_s_c k,
        if (bch2_bkey_has_target(c, k, opts.promote_target))
                return false;
 
+       if (bkey_extent_is_unwritten(k))
+               return false;
+
        if (bch2_target_congested(c, opts.promote_target)) {
                /* XXX trace this */
                return false;
@@ -1420,10 +1852,10 @@ static void promote_free(struct bch_fs *c, struct promote_op *op)
        kfree_rcu(op, rcu);
 }
 
-static void promote_done(struct closure *cl)
+static void promote_done(struct bch_write_op *wop)
 {
        struct promote_op *op =
-               container_of(cl, struct promote_op, cl);
+               container_of(wop, struct promote_op, write.op);
        struct bch_fs *c = op->write.op.c;
 
        bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
@@ -1435,7 +1867,6 @@ static void promote_done(struct closure *cl)
 
 static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
 {
-       struct closure *cl = &op->cl;
        struct bio *bio = &op->write.op.wbio.bio;
 
        trace_and_count(op->write.op.c, read_promote, &rbio->bio);
@@ -1448,9 +1879,7 @@ static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
               sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
        swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
 
-       closure_init(cl, NULL);
-       bch2_data_update_read_done(&op->write, rbio->pick.crc, cl);
-       closure_return_with_destructor(cl, promote_done);
+       bch2_data_update_read_done(&op->write, rbio->pick.crc);
 }
 
 static struct promote_op *__promote_alloc(struct bch_fs *c,
@@ -1515,6 +1944,7 @@ static struct promote_op *__promote_alloc(struct bch_fs *c,
                        },
                        btree_id, k);
        BUG_ON(ret);
+       op->write.op.end_io = promote_done;
 
        return op;
 err:
@@ -1914,20 +2344,25 @@ csum_err:
                goto out;
        }
 
-       bch2_dev_inum_io_error(ca, rbio->read_pos.inode, (u64) rbio->bvec_iter.bi_sector,
+       bch_err_inum_offset_ratelimited(ca,
+               rbio->read_pos.inode,
+               rbio->read_pos.offset << 9,
                "data checksum error: expected %0llx:%0llx got %0llx:%0llx (type %s)",
                rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
                csum.hi, csum.lo, bch2_csum_types[crc.csum_type]);
+       bch2_io_error(ca);
        bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
        goto out;
 decompression_err:
-       bch_err_inum_ratelimited(c, rbio->read_pos.inode,
-                                "decompression error");
+       bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
+                                       rbio->read_pos.offset << 9,
+                                       "decompression error");
        bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
        goto out;
 decrypt_err:
-       bch_err_inum_ratelimited(c, rbio->read_pos.inode,
-                                "decrypt error");
+       bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
+                                       rbio->read_pos.offset << 9,
+                                       "decrypt error");
        bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
        goto out;
 }
@@ -2002,7 +2437,9 @@ int __bch2_read_indirect_extent(struct btree_trans *trans,
 
        if (k.k->type != KEY_TYPE_reflink_v &&
            k.k->type != KEY_TYPE_indirect_inline_data) {
-               bch_err_inum_ratelimited(trans->c, orig_k->k->k.p.inode,
+               bch_err_inum_offset_ratelimited(trans->c,
+                       orig_k->k->k.p.inode,
+                       orig_k->k->k.p.offset << 9,
                        "%llu len %u points to nonexistent indirect extent %llu",
                        orig_k->k->k.p.offset,
                        orig_k->k->k.size,
@@ -2088,8 +2525,9 @@ retry_pick:
                goto hole;
 
        if (pick_ret < 0) {
-               bch_err_inum_ratelimited(c, k.k->p.inode,
-                                        "no device to read from");
+               bch_err_inum_offset_ratelimited(c,
+                               read_pos.inode, read_pos.offset << 9,
+                               "no device to read from");
                goto err;
        }
 
@@ -2268,8 +2706,10 @@ get_bio:
 
        if (!rbio->pick.idx) {
                if (!rbio->have_ioref) {
-                       bch_err_inum_ratelimited(c, k.k->p.inode,
-                                                "no device to read from");
+                       bch_err_inum_offset_ratelimited(c,
+                                       read_pos.inode,
+                                       read_pos.offset << 9,
+                                       "no device to read from");
                        bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
                        goto out;
                }
@@ -2437,8 +2877,9 @@ err:
        bch2_bkey_buf_exit(&sk, c);
 
        if (ret) {
-               bch_err_inum_ratelimited(c, inum.inum,
-                                        "read error %i from btree lookup", ret);
+               bch_err_inum_offset_ratelimited(c, inum.inum,
+                                               bvec_iter.bi_sector << 9,
+                                               "read error %i from btree lookup", ret);
                rbio->bio.bi_status = BLK_STS_IOERR;
                bch2_rbio_done(rbio);
        }
@@ -2456,6 +2897,11 @@ void bch2_fs_io_exit(struct bch_fs *c)
 
 int bch2_fs_io_init(struct bch_fs *c)
 {
+       unsigned i;
+
+       for (i = 0; i < ARRAY_SIZE(c->nocow_locks.l); i++)
+               two_state_lock_init(&c->nocow_locks.l[i]);
+
        if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
                        BIOSET_NEED_BVECS) ||
            bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
index 3ae31758a01ee8b86bc441ea493a96d8187efc4a..68e4d7642d4ee38fdca2e35fc0af46c295e29b7e 100644 (file)
@@ -18,7 +18,7 @@ void bch2_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t);
 void bch2_latency_acct(struct bch_dev *, u64, int);
 
 void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
-                              enum bch_data_type, const struct bkey_i *);
+                              enum bch_data_type, const struct bkey_i *, bool);
 
 #define BLK_STS_REMOVED                ((__force blk_status_t)128)
 
@@ -27,28 +27,21 @@ const char *bch2_blk_status_to_str(blk_status_t);
 enum bch_write_flags {
        BCH_WRITE_ALLOC_NOWAIT          = (1 << 0),
        BCH_WRITE_CACHED                = (1 << 1),
-       BCH_WRITE_FLUSH                 = (1 << 2),
-       BCH_WRITE_DATA_ENCODED          = (1 << 3),
-       BCH_WRITE_PAGES_STABLE          = (1 << 4),
-       BCH_WRITE_PAGES_OWNED           = (1 << 5),
-       BCH_WRITE_ONLY_SPECIFIED_DEVS   = (1 << 6),
-       BCH_WRITE_WROTE_DATA_INLINE     = (1 << 7),
-       BCH_WRITE_FROM_INTERNAL         = (1 << 8),
-       BCH_WRITE_CHECK_ENOSPC          = (1 << 9),
+       BCH_WRITE_DATA_ENCODED          = (1 << 2),
+       BCH_WRITE_PAGES_STABLE          = (1 << 3),
+       BCH_WRITE_PAGES_OWNED           = (1 << 4),
+       BCH_WRITE_ONLY_SPECIFIED_DEVS   = (1 << 5),
+       BCH_WRITE_WROTE_DATA_INLINE     = (1 << 6),
+       BCH_WRITE_CHECK_ENOSPC          = (1 << 7),
+       BCH_WRITE_SYNC                  = (1 << 8),
+       BCH_WRITE_MOVE                  = (1 << 9),
 
        /* Internal: */
-       BCH_WRITE_JOURNAL_SEQ_PTR       = (1 << 10),
-       BCH_WRITE_SKIP_CLOSURE_PUT      = (1 << 11),
-       BCH_WRITE_DONE                  = (1 << 12),
-       BCH_WRITE_IO_ERROR              = (1 << 13),
+       BCH_WRITE_DONE                  = (1 << 10),
+       BCH_WRITE_IO_ERROR              = (1 << 11),
+       BCH_WRITE_CONVERT_UNWRITTEN     = (1 << 12),
 };
 
-static inline u64 *op_journal_seq(struct bch_write_op *op)
-{
-       return (op->flags & BCH_WRITE_JOURNAL_SEQ_PTR)
-               ? op->journal_seq_p : &op->journal_seq;
-}
-
 static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
 {
        return op->alloc_reserve == RESERVE_movinggc
@@ -60,14 +53,15 @@ int bch2_sum_sector_overwrites(struct btree_trans *, struct btree_iter *,
                               struct bkey_i *, bool *, s64 *, s64 *);
 int bch2_extent_update(struct btree_trans *, subvol_inum,
                       struct btree_iter *, struct bkey_i *,
-                      struct disk_reservation *, u64 *, u64, s64 *, bool);
+                      struct disk_reservation *, u64, s64 *, bool);
+int bch2_extent_fallocate(struct btree_trans *, subvol_inum, struct btree_iter *,
+                         unsigned, struct bch_io_opts, s64 *,
+                         struct write_point_specifier);
 
 int bch2_fpunch_at(struct btree_trans *, struct btree_iter *,
                   subvol_inum, u64, s64 *);
 int bch2_fpunch(struct bch_fs *c, subvol_inum, u64, u64, s64 *);
 
-int bch2_write_index_default(struct bch_write_op *);
-
 static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
                                      struct bch_io_opts opts)
 {
@@ -76,7 +70,7 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
        op->flags               = 0;
        op->written             = 0;
        op->error               = 0;
-       op->csum_type           = bch2_data_checksum_type(c, opts.data_checksum);
+       op->csum_type           = bch2_data_checksum_type(c, opts);
        op->compression_type    = bch2_compression_opt_to_type[opts.compression];
        op->nr_replicas         = 0;
        op->nr_replicas_required = c->opts.data_replicas_required;
@@ -91,14 +85,15 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
        op->version             = ZERO_VERSION;
        op->write_point         = (struct write_point_specifier) { 0 };
        op->res                 = (struct disk_reservation) { 0 };
-       op->journal_seq         = 0;
        op->new_i_size          = U64_MAX;
        op->i_sectors_delta     = 0;
-       op->index_update_fn     = bch2_write_index_default;
+       op->devs_need_flush     = NULL;
 }
 
 void bch2_write(struct closure *);
 
+void bch2_write_point_do_index_updates(struct work_struct *);
+
 static inline struct bch_write_bio *wbio_init(struct bio *bio)
 {
        struct bch_write_bio *wbio = to_wbio(bio);
index 78bff13d36f27cb46c6a28c5bcc9dd65cb0ecac6..4e5d31060b5760a5bc4b8dbc511bcd54d6070030 100644 (file)
@@ -87,6 +87,7 @@ struct bch_write_bio {
        struct bch_write_bio    *parent;
 
        u64                     submit_time;
+       u64                     inode_offset;
 
        struct bch_devs_list    failed;
        u8                      dev;
@@ -95,6 +96,7 @@ struct bch_write_bio {
                                bounce:1,
                                put_bio:1,
                                have_ioref:1,
+                               nocow:1,
                                used_mempool:1,
                                first_btree_write:1;
 
@@ -117,6 +119,7 @@ struct bch_write_op {
        unsigned                nr_replicas_required:4;
        unsigned                alloc_reserve:3;
        unsigned                incompressible:1;
+       unsigned                btree_update_ready:1;
 
        struct bch_devs_list    devs_have;
        u16                     target;
@@ -132,28 +135,27 @@ struct bch_write_op {
 
        struct write_point_specifier write_point;
 
+       struct write_point      *wp;
+       struct list_head        wp_list;
+
        struct disk_reservation res;
 
        struct open_buckets     open_buckets;
 
-       /*
-        * If caller wants to flush but hasn't passed us a journal_seq ptr, we
-        * still need to stash the journal_seq somewhere:
-        */
-       union {
-               u64                     *journal_seq_p;
-               u64                     journal_seq;
-       };
        u64                     new_i_size;
        s64                     i_sectors_delta;
 
-       int                     (*index_update_fn)(struct bch_write_op *);
-
        struct bch_devs_mask    failed;
 
        struct keylist          insert_keys;
        u64                     inline_keys[BKEY_EXTENT_U64s_MAX * 2];
 
+       /*
+        * Bitmask of devices that have had nocow writes issued to them since
+        * last flush:
+        */
+       struct bch_devs_mask    *devs_need_flush;
+
        /* Must be last: */
        struct bch_write_bio    wbio;
 };
index 9428f4233997b0072f7fd473cf45b8e6575b80d0..51d29a01b7b2c0d2979872a9cbdbc2bfd7b389c9 100644 (file)
@@ -29,8 +29,8 @@
  *
  * Synchronous updates are specified by passing a closure (@flush_cl) to
  * bch2_btree_insert() or bch_btree_insert_node(), which then pass that parameter
- * down to the journalling code. That closure will will wait on the journal
- * write to complete (via closure_wait()).
+ * down to the journalling code. That closure will wait on the journal write to
+ * complete (via closure_wait()).
  *
  * If the index update wasn't synchronous, the journal entry will be
  * written out after 10 ms have elapsed, by default (the delay_ms field
index c4922c64065323ebfe4703c7d782a8d2c1acc4c1..fb7d0bfab9e3c25a4f1aacd9f322ce5816d3e17b 100644 (file)
@@ -1269,10 +1269,10 @@ int bch2_journal_read(struct bch_fs *c, u64 *blacklist_seq, u64 *start_seq)
                        struct bch_dev *ca = bch_dev_bkey_exists(c, i->ptrs[ptr].dev);
 
                        if (!i->ptrs[ptr].csum_good)
-                               printk(KERN_ERR "bcachefs (%s) sector %llu: invalid journal checksum, seq %llu%s\n",
-                                      ca->name, i->ptrs[ptr].sector,
-                                      le64_to_cpu(i->j.seq),
-                                      i->csum_good ? " (had good copy on another device)" : "");
+                               bch_err_dev_offset(ca, i->ptrs[ptr].sector,
+                                                  "invalid journal checksum, seq %llu%s",
+                                                  le64_to_cpu(i->j.seq),
+                                                  i->csum_good ? " (had good copy on another device)" : "");
                }
 
                ret = jset_validate(c,
index 195799bb20bcbfc91f206f9e0f0c1fde9d615324..635efb7e8228b96fae848a917214590306ae1538 100644 (file)
@@ -17,7 +17,6 @@ static inline void bch2_keylist_free(struct keylist *l, u64 *inline_keys)
 {
        if (l->keys_p != inline_keys)
                kfree(l->keys_p);
-       bch2_keylist_init(l, inline_keys);
 }
 
 static inline void bch2_keylist_push(struct keylist *l)
index 3decb7b1dde23b9ac4cfa2e26af4fdd73f03e67d..925c29b49b867ae45d9b5baf08159e0fe9d53b53 100644 (file)
@@ -5,10 +5,10 @@
 int bch2_lru_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
 void bch2_lru_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
-#define bch2_bkey_ops_lru (struct bkey_ops) {  \
+#define bch2_bkey_ops_lru ((struct bkey_ops) { \
        .key_invalid    = bch2_lru_invalid,     \
        .val_to_text    = bch2_lru_to_text,     \
-}
+})
 
 int bch2_lru_delete(struct btree_trans *, u64, u64, u64, struct bkey_s_c);
 int bch2_lru_set(struct btree_trans *, u64, u64, u64 *);
index 7486920475f0c6b2b909e014dcb10d79d8265570..700f847c395cc36304667e23c695343a53dc9895 100644 (file)
@@ -53,9 +53,8 @@ struct moving_io {
        struct bio_vec          bi_inline_vecs[0];
 };
 
-static void move_free(struct closure *cl)
+static void move_free(struct moving_io *io)
 {
-       struct moving_io *io = container_of(cl, struct moving_io, cl);
        struct moving_context *ctxt = io->write.ctxt;
        struct bch_fs *c = ctxt->c;
 
@@ -65,31 +64,30 @@ static void move_free(struct closure *cl)
        kfree(io);
 }
 
-static void move_write_done(struct closure *cl)
+static void move_write_done(struct bch_write_op *op)
 {
-       struct moving_io *io = container_of(cl, struct moving_io, cl);
+       struct moving_io *io = container_of(op, struct moving_io, write.op);
        struct moving_context *ctxt = io->write.ctxt;
 
        if (io->write.op.error)
                ctxt->write_error = true;
 
        atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors);
-       closure_return_with_destructor(cl, move_free);
+       move_free(io);
+       closure_put(&ctxt->cl);
 }
 
-static void move_write(struct closure *cl)
+static void move_write(struct moving_io *io)
 {
-       struct moving_io *io = container_of(cl, struct moving_io, cl);
-
        if (unlikely(io->rbio.bio.bi_status || io->rbio.hole)) {
-               closure_return_with_destructor(cl, move_free);
+               move_free(io);
                return;
        }
 
+       closure_get(&io->write.ctxt->cl);
        atomic_add(io->write_sectors, &io->write.ctxt->write_sectors);
 
-       bch2_data_update_read_done(&io->write, io->rbio.pick.crc, cl);
-       continue_at(cl, move_write_done, NULL);
+       bch2_data_update_read_done(&io->write, io->rbio.pick.crc);
 }
 
 static inline struct moving_io *next_pending_write(struct moving_context *ctxt)
@@ -121,7 +119,7 @@ static void do_pending_writes(struct moving_context *ctxt, struct btree_trans *t
 
        while ((io = next_pending_write(ctxt))) {
                list_del(&io->list);
-               closure_call(&io->cl, move_write, NULL, &ctxt->cl);
+               move_write(io);
        }
 }
 
@@ -185,7 +183,7 @@ void bch2_moving_ctxt_init(struct moving_context *ctxt,
        }
 }
 
-void bch_move_stats_init(struct bch_move_stats *stats, char *name)
+void bch2_move_stats_init(struct bch_move_stats *stats, char *name)
 {
        memset(stats, 0, sizeof(*stats));
        scnprintf(stats->name, sizeof(stats->name), "%s", name);
@@ -263,6 +261,12 @@ static int bch2_move_extent(struct btree_trans *trans,
        if (!percpu_ref_tryget_live(&c->writes))
                return -EROFS;
 
+       /*
+        * Before memory allocations & taking nocow locks in
+        * bch2_data_update_init():
+        */
+       bch2_trans_unlock(trans);
+
        /* write path might have to decompress data: */
        bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
                sectors = max_t(unsigned, sectors, p.crc.uncompressed_size);
@@ -298,13 +302,23 @@ static int bch2_move_extent(struct btree_trans *trans,
 
        ret = bch2_data_update_init(c, &io->write, ctxt->wp, io_opts,
                                    data_opts, btree_id, k);
-       if (ret)
+       if (ret && ret != -BCH_ERR_unwritten_extent_update)
                goto err_free_pages;
 
        io->write.ctxt = ctxt;
+       io->write.op.end_io = move_write_done;
 
        atomic64_inc(&ctxt->stats->keys_moved);
        atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
+
+       if (ret == -BCH_ERR_unwritten_extent_update) {
+               bch2_update_unwritten_extent(trans, &io->write);
+               move_free(io);
+               return 0;
+       }
+
+       BUG_ON(ret);
+
        this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size);
        this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size);
        trace_move_extent_read(k.k);
@@ -500,6 +514,7 @@ static int __bch2_move_data(struct moving_context *ctxt,
                 */
                bch2_bkey_buf_reassemble(&sk, c, k);
                k = bkey_i_to_s_c(sk.k);
+               bch2_trans_unlock(&trans);
 
                ret2 = bch2_move_extent(&trans, &iter, ctxt, io_opts,
                                        btree_id, k, data_opts);
@@ -574,6 +589,9 @@ static int verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket
        struct bch_fs *c = trans->c;
        struct btree_iter iter;
        struct bkey_s_c k;
+       struct printbuf buf = PRINTBUF;
+       struct bch_backpointer bp;
+       u64 bp_offset = 0;
        int ret;
 
        bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
@@ -587,24 +605,53 @@ again:
 
                if (a.v->gen == gen &&
                    a.v->dirty_sectors) {
-                       struct printbuf buf = PRINTBUF;
-
                        if (a.v->data_type == BCH_DATA_btree) {
                                bch2_trans_unlock(trans);
                                if (bch2_btree_interior_updates_flush(c))
                                        goto again;
+                               goto failed_to_evacuate;
                        }
-
-                       prt_str(&buf, "failed to evacuate bucket ");
-                       bch2_bkey_val_to_text(&buf, c, k);
-
-                       bch_err(c, "%s", buf.buf);
-                       printbuf_exit(&buf);
                }
        }
 
        bch2_trans_iter_exit(trans, &iter);
        return ret;
+failed_to_evacuate:
+       bch2_trans_iter_exit(trans, &iter);
+
+       prt_printf(&buf, bch2_log_msg(c, "failed to evacuate bucket "));
+       bch2_bkey_val_to_text(&buf, c, k);
+
+       while (1) {
+               bch2_trans_begin(trans);
+
+               ret = bch2_get_next_backpointer(trans, bucket, gen,
+                                               &bp_offset, &bp,
+                                               BTREE_ITER_CACHED);
+               if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+                       continue;
+               if (ret)
+                       break;
+               if (bp_offset == U64_MAX)
+                       break;
+
+               k = bch2_backpointer_get_key(trans, &iter,
+                                            bucket, bp_offset, bp);
+               ret = bkey_err(k);
+               if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+                       continue;
+               if (ret)
+                       break;
+               if (!k.k)
+                       continue;
+               prt_newline(&buf);
+               bch2_bkey_val_to_text(&buf, c, k);
+               bch2_trans_iter_exit(trans, &iter);
+       }
+
+       bch2_print_string_as_lines(KERN_ERR, buf.buf);
+       printbuf_exit(&buf);
+       return 0;
 }
 
 int __bch2_evacuate_bucket(struct moving_context *ctxt,
@@ -956,7 +1003,7 @@ int bch2_data_job(struct bch_fs *c,
 
        switch (op.op) {
        case BCH_DATA_OP_REREPLICATE:
-               bch_move_stats_init(stats, "rereplicate");
+               bch2_move_stats_init(stats, "rereplicate");
                stats->data_type = BCH_DATA_journal;
                ret = bch2_journal_flush_device_pins(&c->journal, -1);
 
@@ -980,7 +1027,7 @@ int bch2_data_job(struct bch_fs *c,
                if (op.migrate.dev >= c->sb.nr_devices)
                        return -EINVAL;
 
-               bch_move_stats_init(stats, "migrate");
+               bch2_move_stats_init(stats, "migrate");
                stats->data_type = BCH_DATA_journal;
                ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev);
 
@@ -1001,7 +1048,7 @@ int bch2_data_job(struct bch_fs *c,
                ret = bch2_replicas_gc2(c) ?: ret;
                break;
        case BCH_DATA_OP_REWRITE_OLD_NODES:
-               bch_move_stats_init(stats, "rewrite_old_nodes");
+               bch2_move_stats_init(stats, "rewrite_old_nodes");
                ret = bch2_scan_old_btree_nodes(c, stats);
                break;
        default:
index c0fec69bbb6a1d6becca6bf6bdffc8eb32febc7e..b14f679f6904eb80bf1dd4d5079b515b8e2ccb3a 100644 (file)
@@ -60,8 +60,7 @@ int bch2_data_job(struct bch_fs *,
                  struct bch_move_stats *,
                  struct bch_ioctl_data);
 
-inline void bch_move_stats_init(struct bch_move_stats *stats,
-                               char *name);
+void bch2_move_stats_init(struct bch_move_stats *stats, char *name);
 
 
 #endif /* _BCACHEFS_MOVE_H */
index 044eca879afced62016f63076c6fe47f39eba069..63bc692f550ec9b73e26fbbb908d5d6bedf1afd3 100644 (file)
@@ -102,7 +102,7 @@ static int bch2_copygc(struct bch_fs *c)
        };
        int ret = 0;
 
-       bch_move_stats_init(&move_stats, "copygc");
+       bch2_move_stats_init(&move_stats, "copygc");
 
        for_each_rw_member(ca, c, dev_idx)
                heap_size += ca->mi.nbuckets >> 7;
diff --git a/libbcachefs/nocow_locking.c b/libbcachefs/nocow_locking.c
new file mode 100644 (file)
index 0000000..54e8669
--- /dev/null
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "nocow_locking.h"
+#include "util.h"
+
+void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t,
+                             struct bpos bucket, int flags)
+{
+       struct bch_fs *c = container_of(t, struct bch_fs, nocow_locks);
+       two_state_lock_t *l = bucket_nocow_lock(t, bucket);
+       u64 start_time = local_clock();
+
+       bch2_two_state_lock(l, flags & BUCKET_NOCOW_LOCK_UPDATE);
+       bch2_time_stats_update(&c->times[BCH_TIME_nocow_lock_contended], start_time);
+}
diff --git a/libbcachefs/nocow_locking.h b/libbcachefs/nocow_locking.h
new file mode 100644 (file)
index 0000000..09ab85a
--- /dev/null
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_NOCOW_LOCKING_H
+#define _BCACHEFS_NOCOW_LOCKING_H
+
+#include "bcachefs_format.h"
+#include "two_state_shared_lock.h"
+
+#include <linux/siphash.h>
+
+#define BUCKET_NOCOW_LOCKS             (1U << 10)
+
+struct bucket_nocow_lock_table {
+       siphash_key_t                   key;
+       two_state_lock_t                l[BUCKET_NOCOW_LOCKS];
+};
+
+#define BUCKET_NOCOW_LOCK_UPDATE       (1 << 0)
+
+static inline two_state_lock_t *bucket_nocow_lock(struct bucket_nocow_lock_table *t,
+                                                 struct bpos bucket)
+{
+       u64 dev_bucket = bucket.inode << 56 | bucket.offset;
+       unsigned h = siphash_1u64(dev_bucket, &t->key);
+
+       return t->l + (h & (BUCKET_NOCOW_LOCKS - 1));
+}
+
+static inline bool bch2_bucket_nocow_is_locked(struct bucket_nocow_lock_table *t,
+                                              struct bpos bucket)
+{
+       two_state_lock_t *l = bucket_nocow_lock(t, bucket);
+
+       return atomic_long_read(&l->v) != 0;
+}
+
+static inline void bch2_bucket_nocow_unlock(struct bucket_nocow_lock_table *t,
+                                           struct bpos bucket, int flags)
+{
+       two_state_lock_t *l = bucket_nocow_lock(t, bucket);
+
+       bch2_two_state_unlock(l, flags & BUCKET_NOCOW_LOCK_UPDATE);
+}
+
+void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *, struct bpos, int);
+
+static inline void bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t,
+                                         struct bpos bucket, int flags)
+{
+       two_state_lock_t *l = bucket_nocow_lock(t, bucket);
+
+       if (!bch2_two_state_trylock(l, flags & BUCKET_NOCOW_LOCK_UPDATE))
+               __bch2_bucket_nocow_lock(t, bucket, flags);
+}
+
+#endif /* _BCACHEFS_NOCOW_LOCKING_H */
index 5b8586ecb37431150a4321a73876f52691f8e768..ad0b13ebc1f9fb62a79995b4ce6217833ecc4394 100644 (file)
@@ -382,6 +382,13 @@ enum opt_type {
          OPT_BOOL(),                                                   \
          BCH2_NO_SB_OPT,                       false,                          \
          NULL,         NULL)                                           \
+       x(nocow,                        u8,                             \
+         OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME|OPT_INODE,            \
+         OPT_BOOL(),                                                   \
+         BCH_SB_NOCOW,                 false,                          \
+         NULL,         "Nocow mode: Writes will be done in place when possible.\n"\
+                       "Snapshots and reflink will still caused writes to be COW\n"\
+                       "Implicitly disables data checksumming, compression and encryption")\
        x(fs_size,                      u64,                            \
          OPT_DEVICE,                                                   \
          OPT_UINT(0, S64_MAX),                                         \
index db817273652771e73943a22629122313df9cd79b..7f74c026e9dae910d4751e480b8bf572632240bf 100644 (file)
@@ -364,16 +364,16 @@ int bch2_quota_acct(struct bch_fs *c, struct bch_qid qid,
 
        memset(&msgs, 0, sizeof(msgs));
 
+       for_each_set_qtype(c, i, q, qtypes) {
+               mq[i] = genradix_ptr_alloc(&q->table, qid.q[i], GFP_KERNEL);
+               if (!mq[i])
+                       return -ENOMEM;
+       }
+
        for_each_set_qtype(c, i, q, qtypes)
                mutex_lock_nested(&q->lock, i);
 
        for_each_set_qtype(c, i, q, qtypes) {
-               mq[i] = genradix_ptr_alloc(&q->table, qid.q[i], GFP_NOFS);
-               if (!mq[i]) {
-                       ret = -ENOMEM;
-                       goto err;
-               }
-
                ret = bch2_quota_check_limit(c, i, mq[i], &msgs, counter, v, mode);
                if (ret)
                        goto err;
@@ -416,18 +416,17 @@ int bch2_quota_transfer(struct bch_fs *c, unsigned qtypes,
 
        memset(&msgs, 0, sizeof(msgs));
 
+       for_each_set_qtype(c, i, q, qtypes) {
+               src_q[i] = genradix_ptr_alloc(&q->table, src.q[i], GFP_KERNEL);
+               dst_q[i] = genradix_ptr_alloc(&q->table, dst.q[i], GFP_KERNEL);
+               if (!src_q[i] || !dst_q[i])
+                       return -ENOMEM;
+       }
+
        for_each_set_qtype(c, i, q, qtypes)
                mutex_lock_nested(&q->lock, i);
 
        for_each_set_qtype(c, i, q, qtypes) {
-               src_q[i] = genradix_ptr_alloc(&q->table, src.q[i], GFP_NOFS);
-               dst_q[i] = genradix_ptr_alloc(&q->table, dst.q[i], GFP_NOFS);
-
-               if (!src_q[i] || !dst_q[i]) {
-                       ret = -ENOMEM;
-                       goto err;
-               }
-
                ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_SPC,
                                             dst_q[i]->c[Q_SPC].v + space,
                                             mode);
index 8c67ae1da7c75806fff2ee4a22182bdd704799aa..59bed1148201c3dd73ea33672ec60af692880588 100644 (file)
@@ -10,10 +10,10 @@ extern const struct bch_sb_field_ops bch_sb_field_ops_quota;
 int bch2_quota_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
 void bch2_quota_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
-#define bch2_bkey_ops_quota (struct bkey_ops) {                \
+#define bch2_bkey_ops_quota ((struct bkey_ops) {       \
        .key_invalid    = bch2_quota_invalid,           \
        .val_to_text    = bch2_quota_to_text,           \
-}
+})
 
 static inline struct bch_qid bch_qid(struct bch_inode_unpacked *u)
 {
index 17b289b051f290bbce8fb6053ce20d07375f0218..4df981bd96df18966481ab05f76c68f2b18eea1c 100644 (file)
@@ -189,7 +189,7 @@ static int bch2_rebalance_thread(void *arg)
        prev_start      = jiffies;
        prev_cputime    = curr_cputime();
 
-       bch_move_stats_init(&move_stats, "rebalance");
+       bch2_move_stats_init(&move_stats, "rebalance");
        while (!kthread_wait_freezable(r->enabled)) {
                cond_resched();
 
index ea7810a1797500c826ce1b40b162d7725e15d2b9..fdcd70e8eb1f6d97215dfbb45e869220bc73deb0 100644 (file)
@@ -1251,6 +1251,20 @@ use_clean:
                        goto err;
                bch_verbose(c, "done checking need_discard and freespace btrees");
 
+               if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
+                       err = "error creating root snapshot node";
+                       ret = bch2_fs_initialize_subvolumes(c);
+                       if (ret)
+                               goto err;
+               }
+
+               bch_verbose(c, "reading snapshots table");
+               err = "error reading snapshots table";
+               ret = bch2_fs_snapshots_start(c);
+               if (ret)
+                       goto err;
+               bch_verbose(c, "reading snapshots done");
+
                set_bit(BCH_FS_MAY_GO_RW, &c->flags);
 
                bch_info(c, "starting journal replay, %zu keys", c->journal_keys.nr);
@@ -1299,7 +1313,6 @@ use_clean:
                bch_verbose(c, "done checking alloc to lru refs");
                set_bit(BCH_FS_CHECK_ALLOC_TO_LRU_REFS_DONE, &c->flags);
        } else {
-               set_bit(BCH_FS_MAY_GO_RW, &c->flags);
                set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
                set_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags);
                set_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags);
@@ -1309,6 +1322,22 @@ use_clean:
                if (c->opts.norecovery)
                        goto out;
 
+               if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
+                       err = "error creating root snapshot node";
+                       ret = bch2_fs_initialize_subvolumes(c);
+                       if (ret)
+                               goto err;
+               }
+
+               bch_verbose(c, "reading snapshots table");
+               err = "error reading snapshots table";
+               ret = bch2_fs_snapshots_start(c);
+               if (ret)
+                       goto err;
+               bch_verbose(c, "reading snapshots done");
+
+               set_bit(BCH_FS_MAY_GO_RW, &c->flags);
+
                bch_verbose(c, "starting journal replay, %zu keys", c->journal_keys.nr);
                err = "journal replay failed";
                ret = bch2_journal_replay(c);
@@ -1323,22 +1352,6 @@ use_clean:
        if (ret)
                goto err;
 
-       if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
-               bch2_fs_lazy_rw(c);
-
-               err = "error creating root snapshot node";
-               ret = bch2_fs_initialize_subvolumes(c);
-               if (ret)
-                       goto err;
-       }
-
-       bch_verbose(c, "reading snapshots table");
-       err = "error reading snapshots table";
-       ret = bch2_fs_snapshots_start(c);
-       if (ret)
-               goto err;
-       bch_verbose(c, "reading snapshots done");
-
        if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
                /* set bi_subvol on root inode */
                err = "error upgrade root inode for subvolumes";
@@ -1401,7 +1414,7 @@ use_clean:
            le16_to_cpu(c->sb.version_min) < bcachefs_metadata_version_btree_ptr_sectors_written) {
                struct bch_move_stats stats;
 
-               bch_move_stats_init(&stats, "recovery");
+               bch2_move_stats_init(&stats, "recovery");
 
                bch_info(c, "scanning for old btree nodes");
                ret = bch2_fs_read_write(c);
@@ -1423,7 +1436,8 @@ out:
        set_bit(BCH_FS_FSCK_DONE, &c->flags);
        bch2_flush_fsck_errs(c);
 
-       if (!c->opts.keep_journal) {
+       if (!c->opts.keep_journal &&
+           test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) {
                bch2_journal_keys_free(&c->journal_keys);
                bch2_journal_entries_free(c);
        }
@@ -1472,6 +1486,9 @@ int bch2_fs_initialize(struct bch_fs *c)
        mutex_unlock(&c->sb_lock);
 
        set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
+       set_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags);
+       set_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags);
+       set_bit(BCH_FS_CHECK_ALLOC_TO_LRU_REFS_DONE, &c->flags);
        set_bit(BCH_FS_MAY_GO_RW, &c->flags);
        set_bit(BCH_FS_FSCK_DONE, &c->flags);
 
@@ -1536,8 +1553,7 @@ int bch2_fs_initialize(struct bch_fs *c)
                goto err;
        bch_verbose(c, "reading snapshots done");
 
-       bch2_inode_init(c, &root_inode, 0, 0,
-                       S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
+       bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL);
        root_inode.bi_inum      = BCACHEFS_ROOT_INO;
        root_inode.bi_subvol    = BCACHEFS_ROOT_SUBVOL;
        bch2_inode_pack(&packed_inode, &root_inode);
index d5c14bb2992d5d7fc4281a207140861a0cdefd1f..94f2b30597665cb1214e9dd2302d56ae8d7093c9 100644 (file)
@@ -255,6 +255,9 @@ static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
                if (bkey_cmp(iter->pos, end) >= 0)
                        break;
 
+               if (bkey_extent_is_unwritten(k))
+                       continue;
+
                if (bkey_extent_is_data(k.k))
                        return k;
        }
@@ -378,7 +381,7 @@ s64 bch2_remap_range(struct bch_fs *c,
                                    dst_end.offset - dst_iter.pos.offset));
 
                ret = bch2_extent_update(&trans, dst_inum, &dst_iter,
-                                        new_dst.k, &disk_res, NULL,
+                                        new_dst.k, &disk_res,
                                         new_i_size, i_sectors_delta,
                                         true);
                bch2_disk_reservation_put(c, &disk_res);
index f9848dc3eebbaeb770048d6c375d0a829a0f0d64..ce0012aa99c6a506a76aeb3132ceff05b6106e32 100644 (file)
@@ -8,13 +8,13 @@ void bch2_reflink_p_to_text(struct printbuf *, struct bch_fs *,
                            struct bkey_s_c);
 bool bch2_reflink_p_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
 
-#define bch2_bkey_ops_reflink_p (struct bkey_ops) {            \
+#define bch2_bkey_ops_reflink_p ((struct bkey_ops) {           \
        .key_invalid    = bch2_reflink_p_invalid,               \
        .val_to_text    = bch2_reflink_p_to_text,               \
        .key_merge      = bch2_reflink_p_merge,                 \
        .trans_trigger  = bch2_trans_mark_reflink_p,            \
        .atomic_trigger = bch2_mark_reflink_p,                  \
-}
+})
 
 int bch2_reflink_v_invalid(const struct bch_fs *, struct bkey_s_c,
                           int, struct printbuf *);
@@ -23,13 +23,13 @@ void bch2_reflink_v_to_text(struct printbuf *, struct bch_fs *,
 int bch2_trans_mark_reflink_v(struct btree_trans *, enum btree_id, unsigned,
                              struct bkey_s_c, struct bkey_i *, unsigned);
 
-#define bch2_bkey_ops_reflink_v (struct bkey_ops) {            \
+#define bch2_bkey_ops_reflink_v ((struct bkey_ops) {           \
        .key_invalid    = bch2_reflink_v_invalid,               \
        .val_to_text    = bch2_reflink_v_to_text,               \
        .swab           = bch2_ptr_swab,                        \
        .trans_trigger  = bch2_trans_mark_reflink_v,            \
        .atomic_trigger = bch2_mark_extent,                     \
-}
+})
 
 int bch2_indirect_inline_data_invalid(const struct bch_fs *, struct bkey_s_c,
                                      int, struct printbuf *);
@@ -40,11 +40,11 @@ int bch2_trans_mark_indirect_inline_data(struct btree_trans *,
                              struct bkey_s_c, struct bkey_i *,
                              unsigned);
 
-#define bch2_bkey_ops_indirect_inline_data (struct bkey_ops) { \
+#define bch2_bkey_ops_indirect_inline_data ((struct bkey_ops) {        \
        .key_invalid    = bch2_indirect_inline_data_invalid,    \
        .val_to_text    = bch2_indirect_inline_data_to_text,    \
        .trans_trigger  = bch2_trans_mark_indirect_inline_data, \
-}
+})
 
 static inline const __le64 *bkey_refcount_c(struct bkey_s_c k)
 {
index 0535b1d3760edc1cdbc8284c44546f60f1ffb944..f12a35b3dbcf3b170dd66760e5b7944fad7fb88f 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _BCACHEFS_REPLICAS_TYPES_H
 #define _BCACHEFS_REPLICAS_TYPES_H
 
index 8c98bacca290b8301f421a16707eebf811e32126..1133783477e14424785688da513f82027615ae06 100644 (file)
@@ -158,6 +158,7 @@ static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k)
 
        for (i = 0; i < 2; i++) {
                int ret = snapshot_live(trans, child[i]);
+
                if (ret < 0)
                        return ret;
 
index 02a636644988a4ba51327c071b20906821cc8f93..c694c1c24483beeebb6f5534be511a21d7a3ba3e 100644 (file)
@@ -9,10 +9,10 @@ void bch2_snapshot_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 int bch2_snapshot_invalid(const struct bch_fs *, struct bkey_s_c,
                          int rw, struct printbuf *);
 
-#define bch2_bkey_ops_snapshot (struct bkey_ops) {             \
+#define bch2_bkey_ops_snapshot ((struct bkey_ops) {            \
        .key_invalid    = bch2_snapshot_invalid,                \
        .val_to_text    = bch2_snapshot_to_text,                \
-}
+})
 
 int bch2_mark_snapshot(struct btree_trans *, struct bkey_s_c,
                       struct bkey_s_c, unsigned);
@@ -109,10 +109,10 @@ int bch2_subvolume_invalid(const struct bch_fs *, struct bkey_s_c,
                           int rw, struct printbuf *);
 void bch2_subvolume_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
-#define bch2_bkey_ops_subvolume (struct bkey_ops) {            \
+#define bch2_bkey_ops_subvolume ((struct bkey_ops) {           \
        .key_invalid    = bch2_subvolume_invalid,               \
        .val_to_text    = bch2_subvolume_to_text,               \
-}
+})
 
 int bch2_subvolume_get(struct btree_trans *, unsigned,
                       bool, int, struct bch_subvolume *);
index 8501adaff4c2f3286e1f42755732de93366e5817..3c83e9b9cb7b6b50248a441945aee545d683cf4d 100644 (file)
@@ -88,9 +88,10 @@ static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs,
 static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs,
                                         unsigned dev)
 {
-       BUG_ON(bch2_dev_list_has_dev(*devs, dev));
-       BUG_ON(devs->nr >= ARRAY_SIZE(devs->devs));
-       devs->devs[devs->nr++] = dev;
+       if (!bch2_dev_list_has_dev(*devs, dev)) {
+               BUG_ON(devs->nr >= ARRAY_SIZE(devs->devs));
+               devs->devs[devs->nr++] = dev;
+       }
 }
 
 static inline struct bch_devs_list bch2_dev_list_single(unsigned dev)
index 0f45aef78477326b1f10dd1aeae4fabe385097de..647d018b5ec99e423dde784010cdbf6fd5d57f63 100644 (file)
@@ -90,9 +90,9 @@ static ssize_t fn ## _store_inner(struct kobject *kobj, struct attribute *attr,\
        static struct attribute sysfs_##_name =                         \
                { .name = #_name, .mode = _mode }
 
-#define write_attribute(n)     __sysfs_attribute(n, S_IWUSR)
-#define read_attribute(n)      __sysfs_attribute(n, S_IRUGO)
-#define rw_attribute(n)                __sysfs_attribute(n, S_IRUGO|S_IWUSR)
+#define write_attribute(n)     __sysfs_attribute(n, 0200)
+#define read_attribute(n)      __sysfs_attribute(n, 0444)
+#define rw_attribute(n)                __sysfs_attribute(n, 0644)
 
 #define sysfs_printf(file, fmt, ...)                                   \
 do {                                                                   \
@@ -184,7 +184,7 @@ read_attribute(io_latency_stats_read);
 read_attribute(io_latency_stats_write);
 read_attribute(congested);
 
-read_attribute(btree_avg_write_size);
+read_attribute(btree_write_stats);
 
 read_attribute(btree_cache_size);
 read_attribute(compression_stats);
@@ -228,13 +228,13 @@ write_attribute(perf_test);
 
 #define x(_name)                                               \
        static struct attribute sysfs_time_stat_##_name =               \
-               { .name = #_name, .mode = S_IRUGO };
+               { .name = #_name, .mode = 0444 };
        BCH_TIME_STATS()
 #undef x
 
 static struct attribute sysfs_state_rw = {
        .name = "state",
-       .mode = S_IRUGO
+       .mode =  0444,
 };
 
 static size_t bch2_btree_cache_size(struct bch_fs *c)
@@ -250,14 +250,6 @@ static size_t bch2_btree_cache_size(struct bch_fs *c)
        return ret;
 }
 
-static size_t bch2_btree_avg_write_size(struct bch_fs *c)
-{
-       u64 nr = atomic64_read(&c->btree_writes_nr);
-       u64 sectors = atomic64_read(&c->btree_writes_sectors);
-
-       return nr ? div64_u64(sectors, nr) : 0;
-}
-
 static long data_progress_to_text(struct printbuf *out, struct bch_fs *c)
 {
        long ret = 0;
@@ -396,7 +388,9 @@ SHOW(bch2_fs)
        sysfs_printf(internal_uuid, "%pU",      c->sb.uuid.b);
 
        sysfs_hprint(btree_cache_size,          bch2_btree_cache_size(c));
-       sysfs_hprint(btree_avg_write_size,      bch2_btree_avg_write_size(c));
+
+       if (attr == &sysfs_btree_write_stats)
+               bch2_btree_write_stats_to_text(out, c);
 
        sysfs_printf(btree_gc_periodic, "%u",   (int) c->btree_gc_periodic);
 
@@ -554,7 +548,7 @@ SYSFS_OPS(bch2_fs);
 struct attribute *bch2_fs_files[] = {
        &sysfs_minor,
        &sysfs_btree_cache_size,
-       &sysfs_btree_avg_write_size,
+       &sysfs_btree_write_stats,
 
        &sysfs_promote_whole_extents,
 
@@ -613,12 +607,14 @@ struct attribute *bch2_fs_counters_files[] = {
 SHOW(bch2_fs_internal)
 {
        struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
+
        return bch2_fs_to_text(out, &c->kobj, attr);
 }
 
 STORE(bch2_fs_internal)
 {
        struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
+
        return bch2_fs_store(&c->kobj, attr, buf, size);
 }
 SYSFS_OPS(bch2_fs_internal);
index d058861811189433886cb16b482e6589604f3025..43f974eb9b7e1fdd43b01066c85ee5da30075cbc 100644 (file)
@@ -46,7 +46,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
                bch2_btree_iter_traverse(&iter) ?:
                bch2_trans_update(&trans, &iter, &k.k_i, 0));
        if (ret) {
-               bch_err(c, "update error in test_delete: %s", bch2_err_str(ret));
+               bch_err(c, "%s(): update error in: %s", __func__, bch2_err_str(ret));
                goto err;
        }
 
@@ -55,7 +55,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
                bch2_btree_iter_traverse(&iter) ?:
                bch2_btree_delete_at(&trans, &iter, 0));
        if (ret) {
-               bch_err(c, "delete error (first) in test_delete: %s", bch2_err_str(ret));
+               bch_err(c, "%s(): delete error (first): %s", __func__, bch2_err_str(ret));
                goto err;
        }
 
@@ -64,7 +64,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
                bch2_btree_iter_traverse(&iter) ?:
                bch2_btree_delete_at(&trans, &iter, 0));
        if (ret) {
-               bch_err(c, "delete error (second) in test_delete: %s", bch2_err_str(ret));
+               bch_err(c, "%s(): delete error (second): %s", __func__, bch2_err_str(ret));
                goto err;
        }
 err:
@@ -92,7 +92,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
                bch2_btree_iter_traverse(&iter) ?:
                bch2_trans_update(&trans, &iter, &k.k_i, 0));
        if (ret) {
-               bch_err(c, "update error in test_delete_written: %s", bch2_err_str(ret));
+               bch_err(c, "%s(): update error: %s", __func__, bch2_err_str(ret));
                goto err;
        }
 
@@ -103,7 +103,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
                bch2_btree_iter_traverse(&iter) ?:
                bch2_btree_delete_at(&trans, &iter, 0));
        if (ret) {
-               bch_err(c, "delete error in test_delete_written: %s", bch2_err_str(ret));
+               bch_err(c, "%s(): delete error: %s", __func__, bch2_err_str(ret));
                goto err;
        }
 err:
@@ -136,7 +136,7 @@ static int test_iterate(struct bch_fs *c, u64 nr)
                ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
                                        NULL, NULL, 0);
                if (ret) {
-                       bch_err(c, "insert error in test_iterate: %s", bch2_err_str(ret));
+                       bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
                        goto err;
                }
        }
@@ -202,7 +202,7 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr)
                ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
                                        NULL, NULL, 0);
                if (ret) {
-                       bch_err(c, "insert error in test_iterate_extents: %s", bch2_err_str(ret));
+                       bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
                        goto err;
                }
        }
@@ -269,7 +269,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
                ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
                                        NULL, NULL, 0);
                if (ret) {
-                       bch_err(c, "insert error in test_iterate_slots: %s", bch2_err_str(ret));
+                       bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
                        goto err;
                }
        }
@@ -342,7 +342,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
                ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
                                        NULL, NULL, 0);
                if (ret) {
-                       bch_err(c, "insert error in test_iterate_slots_extents: %s", bch2_err_str(ret));
+                       bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
                        goto err;
                }
        }
@@ -456,7 +456,7 @@ static int insert_test_extent(struct bch_fs *c,
        ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
                                NULL, NULL, 0);
        if (ret)
-               bch_err(c, "insert error in insert_test_extent: %s", bch2_err_str(ret));
+               bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
@@ -555,7 +555,7 @@ static int test_snapshots(struct bch_fs *c, u64 nr)
 
        ret = test_snapshot_filter(c, snapids[0], snapids[1]);
        if (ret) {
-               bch_err(c, "err from test_snapshot_filter: %s", bch2_err_str(ret));
+               bch_err(c, "%s(): err from test_snapshot_filter: %s", __func__, bch2_err_str(ret));
                return ret;
        }
 
@@ -567,11 +567,8 @@ static int test_snapshots(struct bch_fs *c, u64 nr)
 static u64 test_rand(void)
 {
        u64 v;
-#if 0
-       v = prandom_u32();
-#else
+
        prandom_bytes(&v, sizeof(v));
-#endif
        return v;
 }
 
@@ -592,7 +589,7 @@ static int rand_insert(struct bch_fs *c, u64 nr)
                ret = commit_do(&trans, NULL, NULL, 0,
                        __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k.k_i));
                if (ret) {
-                       bch_err(c, "error in rand_insert: %s", bch2_err_str(ret));
+                       bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
                        break;
                }
        }
@@ -628,7 +625,7 @@ static int rand_insert_multi(struct bch_fs *c, u64 nr)
                        __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[6].k_i) ?:
                        __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[7].k_i));
                if (ret) {
-                       bch_err(c, "error in rand_insert_multi: %s", bch2_err_str(ret));
+                       bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
                        break;
                }
        }
@@ -655,7 +652,7 @@ static int rand_lookup(struct bch_fs *c, u64 nr)
                lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
                ret = bkey_err(k);
                if (ret) {
-                       bch_err(c, "error in rand_lookup: %s", bch2_err_str(ret));
+                       bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
                        break;
                }
        }
@@ -678,7 +675,7 @@ static int rand_mixed_trans(struct btree_trans *trans,
        k = bch2_btree_iter_peek(iter);
        ret = bkey_err(k);
        if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
-               bch_err(trans->c, "lookup error in rand_mixed: %s", bch2_err_str(ret));
+               bch_err(trans->c, "%s(): lookup error: %s", __func__, bch2_err_str(ret));
        if (ret)
                return ret;
 
@@ -708,7 +705,7 @@ static int rand_mixed(struct bch_fs *c, u64 nr)
                ret = commit_do(&trans, NULL, NULL, 0,
                        rand_mixed_trans(&trans, &iter, &cookie, i, rand));
                if (ret) {
-                       bch_err(c, "update error in rand_mixed: %s", bch2_err_str(ret));
+                       bch_err(c, "%s(): update error: %s", __func__, bch2_err_str(ret));
                        break;
                }
        }
@@ -754,7 +751,7 @@ static int rand_delete(struct bch_fs *c, u64 nr)
                ret = commit_do(&trans, NULL, NULL, 0,
                        __do_delete(&trans, pos));
                if (ret) {
-                       bch_err(c, "error in rand_delete: %s", bch2_err_str(ret));
+                       bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
                        break;
                }
        }
@@ -786,7 +783,7 @@ static int seq_insert(struct bch_fs *c, u64 nr)
                        bch2_trans_update(&trans, &iter, &insert.k_i, 0);
                }));
        if (ret)
-               bch_err(c, "error in %s(): %s", __func__, bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
 
        bch2_trans_exit(&trans);
        return ret;
@@ -805,7 +802,7 @@ static int seq_lookup(struct bch_fs *c, u64 nr)
                                  SPOS(0, 0, U32_MAX), 0, k,
                0);
        if (ret)
-               bch_err(c, "error in %s(): %s", __func__, bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
 
        bch2_trans_exit(&trans);
        return ret;
@@ -831,7 +828,7 @@ static int seq_overwrite(struct bch_fs *c, u64 nr)
                        bch2_trans_update(&trans, &iter, &u.k_i, 0);
                }));
        if (ret)
-               bch_err(c, "error in %s(): %s", __func__, bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
 
        bch2_trans_exit(&trans);
        return ret;
@@ -845,7 +842,7 @@ static int seq_delete(struct bch_fs *c, u64 nr)
                                      SPOS(0, 0, U32_MAX), SPOS_MAX,
                                      0, NULL);
        if (ret)
-               bch_err(c, "error in seq_delete: %s", bch2_err_str(ret));
+               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
        return ret;
 }
 
diff --git a/libbcachefs/two_state_shared_lock.c b/libbcachefs/two_state_shared_lock.c
new file mode 100644 (file)
index 0000000..dc508d5
--- /dev/null
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "two_state_shared_lock.h"
+
+void bch2_two_state_unlock(two_state_lock_t *lock, int s)
+{
+       long i = s ? 1 : -1;
+
+       BUG_ON(atomic_long_read(&lock->v) == 0);
+
+       if (atomic_long_sub_return_release(i, &lock->v) == 0)
+               wake_up_all(&lock->wait);
+}
+
+bool bch2_two_state_trylock(two_state_lock_t *lock, int s)
+{
+       long i = s ? 1 : -1;
+       long v = atomic_long_read(&lock->v), old;
+
+       do {
+               old = v;
+
+               if (i > 0 ? v < 0 : v > 0)
+                       return false;
+       } while ((v = atomic_long_cmpxchg_acquire(&lock->v,
+                                       old, old + i)) != old);
+       return true;
+}
+
+void bch2_two_state_lock(two_state_lock_t *lock, int s)
+{
+       wait_event(lock->wait, bch2_two_state_trylock(lock, s));
+}
diff --git a/libbcachefs/two_state_shared_lock.h b/libbcachefs/two_state_shared_lock.h
new file mode 100644 (file)
index 0000000..1b4f108
--- /dev/null
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_TWO_STATE_LOCK_H
+#define _BCACHEFS_TWO_STATE_LOCK_H
+
+#include <linux/atomic.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+/*
+ * Two-state lock - can be taken for add or block - both states are shared,
+ * like read side of rwsem, but conflict with other state:
+ */
+typedef struct {
+       atomic_long_t           v;
+       wait_queue_head_t       wait;
+} two_state_lock_t;
+
+static inline void two_state_lock_init(two_state_lock_t *lock)
+{
+       atomic_long_set(&lock->v, 0);
+       init_waitqueue_head(&lock->wait);
+}
+
+void bch2_two_state_unlock(two_state_lock_t *, int);
+bool bch2_two_state_trylock(two_state_lock_t *, int);
+void bch2_two_state_lock(two_state_lock_t *, int);
+
+#endif /* _BCACHEFS_TWO_STATE_LOCK_H */
index 846e6024a80b377c2eb81b92e2209d2166941123..473c96968121b3c13f6990755b7b25778a786bcd 100644 (file)
 struct closure;
 
 #ifdef CONFIG_BCACHEFS_DEBUG
-
 #define EBUG_ON(cond)          BUG_ON(cond)
-#define atomic_dec_bug(v)      BUG_ON(atomic_dec_return(v) < 0)
-#define atomic_inc_bug(v, i)   BUG_ON(atomic_inc_return(v) <= i)
-#define atomic_sub_bug(i, v)   BUG_ON(atomic_sub_return(i, v) < 0)
-#define atomic_add_bug(i, v)   BUG_ON(atomic_add_return(i, v) < 0)
-#define atomic_long_dec_bug(v)         BUG_ON(atomic_long_dec_return(v) < 0)
-#define atomic_long_sub_bug(i, v)      BUG_ON(atomic_long_sub_return(i, v) < 0)
-#define atomic64_dec_bug(v)    BUG_ON(atomic64_dec_return(v) < 0)
-#define atomic64_inc_bug(v, i) BUG_ON(atomic64_inc_return(v) <= i)
-#define atomic64_sub_bug(i, v) BUG_ON(atomic64_sub_return(i, v) < 0)
-#define atomic64_add_bug(i, v) BUG_ON(atomic64_add_return(i, v) < 0)
-
-#else /* DEBUG */
-
+#else
 #define EBUG_ON(cond)
-#define atomic_dec_bug(v)      atomic_dec(v)
-#define atomic_inc_bug(v, i)   atomic_inc(v)
-#define atomic_sub_bug(i, v)   atomic_sub(i, v)
-#define atomic_add_bug(i, v)   atomic_add(i, v)
-#define atomic_long_dec_bug(v)         atomic_long_dec(v)
-#define atomic_long_sub_bug(i, v)      atomic_long_sub(i, v)
-#define atomic64_dec_bug(v)    atomic64_dec(v)
-#define atomic64_inc_bug(v, i) atomic64_inc(v)
-#define atomic64_sub_bug(i, v) atomic64_sub(i, v)
-#define atomic64_add_bug(i, v) atomic64_add(i, v)
-
 #endif
 
 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
index 66d7a1e30350e30875d3e1597675ddf264a9c8e6..03f1b73fc926290a90fe9b8af13274c3b88c1cde 100644 (file)
@@ -9,10 +9,10 @@ extern const struct bch_hash_desc bch2_xattr_hash_desc;
 int bch2_xattr_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
 void bch2_xattr_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
-#define bch2_bkey_ops_xattr (struct bkey_ops) {                \
+#define bch2_bkey_ops_xattr ((struct bkey_ops) {       \
        .key_invalid    = bch2_xattr_invalid,           \
        .val_to_text    = bch2_xattr_to_text,           \
-}
+})
 
 static inline unsigned xattr_val_u64s(unsigned name_len, unsigned val_len)
 {
index 643e3113500b20a5dece4bc6a64eb3a253271d44..aa95db1277716d563961be30a528cde29fa70578 100644 (file)
@@ -52,7 +52,7 @@
  *
  * note: this rounds towards 0.
  */
-inline s64 fast_divpow2(s64 n, u8 d)
+s64 fast_divpow2(s64 n, u8 d)
 {
        return (n + ((n < 0) ? ((1 << d) - 1) : 0)) >> d;
 }
index df9567c5f5a9270933d605cc02f4c42d2f4b8fcd..0ae56ee1b9ad6bd80bf62fbc3cce25aba46318a6 100644 (file)
@@ -27,3 +27,8 @@ void prt_printf(struct printbuf *out, const char *fmt, ...)
        prt_vprintf(out, fmt, args);
        va_end(args);
 }
+
+void prt_u64(struct printbuf *out, u64 v)
+{
+       prt_printf(out, "%llu", v);
+}
index 39f7ea79fdb17d121b5dfa3f80dffe5bf3bb041b..39a9bd6ecd78fdd966a278bf75bc12665eebef21 100644 (file)
@@ -342,7 +342,11 @@ static bool __six_relock_type(struct six_lock *lock, enum six_lock_type type,
        return true;
 }
 
-#ifdef CONFIG_LOCK_SPIN_ON_OWNER
+/*
+ * We don't see stable performance with SIX_LOCK_SPIN_ON_OWNER enabled, so it's
+ * off for now:
+ */
+#ifdef SIX_LOCK_SPIN_ON_OWNER
 
 static inline bool six_optimistic_spin(struct six_lock *lock,
                                       struct six_lock_waiter *wait)
index 991875c5f760b0f052ca521f617b54d22f5d040f..b1f002b99f9fd699864094ccf6850f8a2fa1d596 100644 (file)
@@ -66,6 +66,11 @@ void wake_up(wait_queue_head_t *q)
        __wake_up(q, TASK_NORMAL, 1, NULL);
 }
 
+void wake_up_all(wait_queue_head_t *q)
+{
+       __wake_up(q, TASK_NORMAL, 0, NULL);
+}
+
 static void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
 {
        __wake_up_common(q, mode, nr, 0, NULL);