From 5ac5084cce1deb6315e6ce87e98aebcabe8f2842 Mon Sep 17 00:00:00 2001 From: Jonathan Carter Date: Wed, 1 Sep 2021 16:29:48 +0200 Subject: [PATCH] New upstream release --- .gitignore | 2 + Makefile | 72 +- Makefile.compiler | 74 + bcachefs.c | 55 +- cmd_attr.c | 1 + cmd_debug.c | 81 +- cmd_device.c | 6 + cmd_format.c | 1 + cmd_fusemount.c | 2 + cmd_migrate.c | 10 +- debian/changelog | 8 +- debian/control | 24 +- debian/files | 2 +- debian/rules | 1 + doc/{bcachefs.5.txt => bcachefs.5.rst.tmpl} | 45 +- doc/macro2rst.py | 85 + doc/opts_macro.h | 12 + include/crypto/{sha.h => sha2.h} | 0 include/linux/bit_spinlock.h | 44 +- include/linux/blk_types.h | 33 +- include/linux/blkdev.h | 33 +- include/linux/bsearch.h | 32 + include/linux/bug.h | 12 +- include/linux/closure.h | 65 +- include/linux/freezer.h | 1 + include/linux/jiffies.h | 1 + include/linux/kernel.h | 56 +- include/linux/kobject.h | 3 - include/linux/lockdep.h | 5 + include/linux/math.h | 151 ++ include/linux/mempool.h | 105 +- include/linux/page.h | 5 + include/linux/poison.h | 4 - include/linux/rhashtable.h | 1 + include/linux/sched/debug.h | 0 include/linux/sched/task_stack.h | 0 include/linux/slab.h | 12 +- include/linux/string.h | 1 + include/linux/types.h | 2 + include/linux/wait.h | 1 + include/linux/xattr.h | 4 +- include/linux/xxhash.h | 259 +++ include/trace/events/bcachefs.h | 686 +++++--- libbcachefs.c | 70 +- libbcachefs.h | 1 + libbcachefs/acl.c | 33 +- libbcachefs/acl.h | 2 +- libbcachefs/alloc_background.c | 522 ++---- libbcachefs/alloc_background.h | 11 +- libbcachefs/alloc_foreground.c | 64 +- libbcachefs/alloc_types.h | 12 + libbcachefs/bcachefs.h | 87 +- libbcachefs/bcachefs_format.h | 22 +- libbcachefs/bkey.c | 29 +- libbcachefs/bkey_methods.c | 60 +- libbcachefs/bkey_methods.h | 29 +- libbcachefs/bset.c | 10 +- libbcachefs/bset.h | 2 +- libbcachefs/btree_cache.c | 276 ++- libbcachefs/btree_cache.h | 11 +- libbcachefs/btree_gc.c | 1397 +++++++++------ libbcachefs/btree_gc.h | 14 +- libbcachefs/btree_io.c | 731 ++++++-- libbcachefs/btree_io.h | 66 +- libbcachefs/btree_iter.c | 1015 +++++++---- libbcachefs/btree_iter.h | 84 +- libbcachefs/btree_key_cache.c | 96 +- libbcachefs/btree_types.h | 81 +- libbcachefs/btree_update.h | 43 +- libbcachefs/btree_update_interior.c | 459 +++-- libbcachefs/btree_update_interior.h | 33 +- libbcachefs/btree_update_leaf.c | 614 +++---- libbcachefs/buckets.c | 996 +++++------ libbcachefs/buckets.h | 67 +- libbcachefs/buckets_types.h | 5 + libbcachefs/chardev.c | 52 +- libbcachefs/checksum.c | 107 +- libbcachefs/checksum.h | 2 + libbcachefs/debug.c | 130 +- libbcachefs/debug.h | 4 - libbcachefs/dirent.c | 46 +- libbcachefs/ec.c | 36 +- libbcachefs/ec.h | 3 +- libbcachefs/error.c | 19 +- libbcachefs/error.h | 4 + libbcachefs/extent_update.c | 39 +- libbcachefs/extent_update.h | 4 - libbcachefs/extents.c | 227 +-- libbcachefs/extents.h | 41 +- libbcachefs/fs-common.c | 25 +- libbcachefs/fs-io.c | 240 +-- libbcachefs/fs-io.h | 3 +- libbcachefs/fs-ioctl.c | 64 +- libbcachefs/fs.c | 118 +- libbcachefs/fs.h | 4 + libbcachefs/fsck.c | 1739 +++++++++---------- libbcachefs/fsck.h | 1 - libbcachefs/inode.c | 119 +- libbcachefs/inode.h | 8 +- libbcachefs/io.c | 112 +- libbcachefs/io.h | 11 +- libbcachefs/io_types.h | 3 +- libbcachefs/journal.c | 65 +- libbcachefs/journal.h | 5 +- libbcachefs/journal_io.c | 51 +- libbcachefs/journal_reclaim.c | 31 +- libbcachefs/journal_reclaim.h | 8 +- libbcachefs/journal_seq_blacklist.c | 6 +- libbcachefs/journal_types.h | 3 + libbcachefs/keylist.c | 2 +- libbcachefs/migrate.c | 8 +- libbcachefs/move.c | 37 +- libbcachefs/movinggc.c | 39 +- libbcachefs/opts.c | 39 +- libbcachefs/opts.h | 18 +- libbcachefs/quota.c | 9 +- libbcachefs/recovery.c | 157 +- libbcachefs/recovery.h | 7 +- libbcachefs/reflink.c | 114 +- libbcachefs/reflink.h | 27 +- libbcachefs/replicas.c | 24 +- libbcachefs/replicas.h | 1 + libbcachefs/str_hash.h | 35 +- libbcachefs/super-io.c | 74 +- libbcachefs/super.c | 107 +- libbcachefs/super.h | 7 +- libbcachefs/super_types.h | 2 +- libbcachefs/sysfs.c | 57 +- libbcachefs/tests.c | 76 +- libbcachefs/util.c | 8 +- libbcachefs/util.h | 5 +- libbcachefs/varint.c | 73 +- libbcachefs/varint.h | 3 + libbcachefs/xattr.c | 35 +- linux/blkdev.c | 5 +- linux/closure.c | 67 +- linux/mempool.c | 541 ++++++ linux/xxhash.c | 500 ++++++ smoke_test | 11 +- tests/test_basic.py | 4 +- tests/test_fuse.py | 1 + tests/util.py | 15 +- tests/valgrind-suppressions.txt | 8 + tools-util.c | 2 + tools-util.h | 4 +- 145 files changed, 9034 insertions(+), 5357 deletions(-) create mode 100644 Makefile.compiler rename doc/{bcachefs.5.txt => bcachefs.5.rst.tmpl} (87%) create mode 100755 doc/macro2rst.py create mode 100644 doc/opts_macro.h rename include/crypto/{sha.h => sha2.h} (100%) create mode 100644 include/linux/bsearch.h create mode 100644 include/linux/math.h create mode 100644 include/linux/sched/debug.h create mode 100644 include/linux/sched/task_stack.h create mode 100644 include/linux/xxhash.h create mode 100644 linux/mempool.c create mode 100644 linux/xxhash.c create mode 100644 tests/valgrind-suppressions.txt diff --git a/.gitignore b/.gitignore index eb1f1ee..8feb598 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ /result bcachefs +bcachefs.5 .* *.o *.d @@ -17,3 +18,4 @@ tests/__pycache__/ mount/target mount.bcachefs +doc/bcachefs.5.rst diff --git a/Makefile b/Makefile index 3fe9604..23e0508 100644 --- a/Makefile +++ b/Makefile @@ -1,12 +1,9 @@ - PREFIX?=/usr/local PKG_CONFIG?=pkg-config INSTALL=install PYTEST=pytest-3 CFLAGS+=-std=gnu89 -O2 -g -MMD -Wall \ -Wno-pointer-sign \ - -Wno-zero-length-bounds \ - -Wno-stringop-overflow \ -fno-strict-aliasing \ -fno-delete-null-pointer-checks \ -I. -Iinclude -Iraid \ @@ -25,21 +22,15 @@ LDFLAGS+=$(CFLAGS) $(EXTRA_LDFLAGS) VERSION?=$(shell git describe --dirty=+ 2>/dev/null || echo v0.1-nogit) -CC_VERSION=$(shell $(CC) -v 2>&1|grep -E '(gcc|clang) version') +include Makefile.compiler -ifneq (,$(findstring gcc,$(CC_VERSION))) - CFLAGS+=-Wno-unused-but-set-variable -endif - -ifneq (,$(findstring clang,$(CC_VERSION))) - CFLAGS+=-Wno-missing-braces -endif - -ifdef D - CFLAGS+=-Werror - CFLAGS+=-DCONFIG_BCACHEFS_DEBUG=y -endif - CFLAGS+=-DCONFIG_VALGRIND=y +CFLAGS+=$(call cc-disable-warning, unused-but-set-variable) +CFLAGS+=$(call cc-disable-warning, stringop-overflow) +CFLAGS+=$(call cc-disable-warning, zero-length-bounds) +CFLAGS+=$(call cc-disable-warning, missing-braces) +CFLAGS+=$(call cc-disable-warning, zero-length-array) +CFLAGS+=$(call cc-disable-warning, shift-overflow) +CFLAGS+=$(call cc-disable-warning, enum-conversion) PKGCONFIG_LIBS="blkid uuid liburcu libsodium zlib liblz4 libzstd libudev" ifdef BCACHEFS_FUSE @@ -70,8 +61,24 @@ else INITRAMFS_DIR=/etc/initramfs-tools endif +var := $(shell rst2man -V 2>/dev/null) +ifeq ($(.SHELLSTATUS),0) + RST2MAN=rst2man +endif + +var := $(shell rst2man.py -V 2>/dev/null) +ifeq ($(.SHELLSTATUS),0) + RST2MAN=rst2man.py +endif + +undefine var + +ifeq (,$(RST2MAN)) + @echo "WARNING: no RST2MAN found!" +endif + .PHONY: all -all: bcachefs +all: bcachefs bcachefs.5 .PHONY: tests tests: tests/test_helper @@ -87,6 +94,14 @@ TAGS: tags: ctags -R . +DOCSRC := opts_macro.h bcachefs.5.rst.tmpl +DOCGENERATED := bcachefs.5 doc/bcachefs.5.rst +DOCDEPS := $(addprefix ./doc/,$(DOCSRC)) +bcachefs.5: $(DOCDEPS) libbcachefs/opts.h + $(CC) doc/opts_macro.h -I libbcachefs -I include -E 2>/dev/null \ + | doc/macro2rst.py + $(RST2MAN) doc/bcachefs.5.rst bcachefs.5 + SRCS=$(shell find . -type f -iname '*.c') DEPS=$(SRCS:.c=.d) -include $(DEPS) @@ -96,6 +111,10 @@ bcachefs: $(filter-out ./tests/%.o, $(OBJS)) MOUNT_SRCS=$(shell find mount/src -type f -iname '*.rs') \ mount/Cargo.toml mount/Cargo.lock mount/build.rs + +debug: CFLAGS+=-Werror -DCONFIG_BCACHEFS_DEBUG=y -DCONFIG_VALGRIND=y +debug: bcachefs + libbcachefs_mount.a: $(MOUNT_SRCS) LIBBCACHEFS_INCLUDE=$(CURDIR) cargo build --manifest-path mount/Cargo.toml --release cp mount/target/release/libbcachefs_mount.a $@ @@ -116,9 +135,6 @@ endif # Rebuild the 'version' command any time the version string changes cmd_version.o : .version -doc/bcachefs.5: doc/bcachefs.5.txt - a2x -f manpage doc/bcachefs.5.txt - .PHONY: install install: INITRAMFS_HOOK=$(INITRAMFS_DIR)/hooks/bcachefs install: INITRAMFS_SCRIPT=$(INITRAMFS_DIR)/scripts/local-premount/bcachefs @@ -135,16 +151,14 @@ install: bcachefs .PHONY: clean clean: - $(RM) bcachefs mount.bcachefs libbcachefs_mount.a tests/test_helper .version $(OBJS) $(DEPS) + $(RM) bcachefs mount.bcachefs libbcachefs_mount.a tests/test_helper .version $(OBJS) $(DEPS) $(DOCGENERATED) $(RM) -rf mount/target .PHONY: deb deb: all -# --unsigned-source --unsigned-changes --no-pre-clean --build=binary -# --diff-ignore --tar-ignore debuild -us -uc -nc -b -i -I -.PHONE: update-bcachefs-sources +.PHONY: update-bcachefs-sources update-bcachefs-sources: git rm -rf --ignore-unmatch libbcachefs test -d libbcachefs || mkdir libbcachefs @@ -152,6 +166,10 @@ update-bcachefs-sources: git add libbcachefs/*.[ch] cp $(LINUX_DIR)/include/trace/events/bcachefs.h include/trace/events/ git add include/trace/events/bcachefs.h + cp $(LINUX_DIR)/include/linux/xxhash.h include/linux/ + git add include/linux/xxhash.h + cp $(LINUX_DIR)/lib/xxhash.c linux/ + git add linux/xxhash.c cp $(LINUX_DIR)/kernel/locking/six.c linux/ git add linux/six.c cp $(LINUX_DIR)/include/linux/six.h include/linux/ @@ -160,10 +178,12 @@ update-bcachefs-sources: git add include/linux/list_nulls.h cp $(LINUX_DIR)/include/linux/poison.h include/linux/ git add include/linux/poison.h + cp $(LINUX_DIR)/scripts/Makefile.compiler ./ + git add Makefile.compiler $(RM) libbcachefs/*.mod.c git -C $(LINUX_DIR) rev-parse HEAD | tee .bcachefs_revision git add .bcachefs_revision -.PHONE: update-commit-bcachefs-sources +.PHONY: update-commit-bcachefs-sources update-commit-bcachefs-sources: update-bcachefs-sources git commit -m "Update bcachefs sources to $(shell git -C $(LINUX_DIR) show --oneline --no-patch)" diff --git a/Makefile.compiler b/Makefile.compiler new file mode 100644 index 0000000..86ecd2a --- /dev/null +++ b/Makefile.compiler @@ -0,0 +1,74 @@ +# SPDX-License-Identifier: GPL-2.0-only + +# cc-cross-prefix +# Usage: CROSS_COMPILE := $(call cc-cross-prefix, m68k-linux-gnu- m68k-linux-) +# Return first where a gcc is found in PATH. +# If no gcc found in PATH with listed prefixes return nothing +# +# Note: '2>/dev/null' is here to force Make to invoke a shell. Otherwise, it +# would try to directly execute the shell builtin 'command'. This workaround +# should be kept for a long time since this issue was fixed only after the +# GNU Make 4.2.1 release. +cc-cross-prefix = $(firstword $(foreach c, $(1), \ + $(if $(shell command -v -- $(c)gcc 2>/dev/null), $(c)))) + +# output directory for tests below +TMPOUT = $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_$$$$ + +# try-run +# Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise) +# Exit code chooses option. "$$TMP" serves as a temporary file and is +# automatically cleaned up. +try-run = $(shell set -e; \ + TMP=$(TMPOUT)/tmp; \ + mkdir -p $(TMPOUT); \ + trap "rm -rf $(TMPOUT)" EXIT; \ + if ($(1)) >/dev/null 2>&1; \ + then echo "$(2)"; \ + else echo "$(3)"; \ + fi) + +# as-option +# Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,) + +as-option = $(call try-run,\ + $(CC) $(KBUILD_CFLAGS) $(1) -c -x assembler /dev/null -o "$$TMP",$(1),$(2)) + +# as-instr +# Usage: cflags-y += $(call as-instr,instr,option1,option2) + +as-instr = $(call try-run,\ + printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3)) + +# __cc-option +# Usage: MY_CFLAGS += $(call __cc-option,$(CC),$(MY_CFLAGS),-march=winchip-c6,-march=i586) +__cc-option = $(call try-run,\ + $(1) -Werror $(2) $(3) -c -x c /dev/null -o "$$TMP",$(3),$(4)) + +# cc-option +# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586) + +cc-option = $(call __cc-option, $(CC),\ + $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS),$(1),$(2)) + +# cc-option-yn +# Usage: flag := $(call cc-option-yn,-march=winchip-c6) +cc-option-yn = $(call try-run,\ + $(CC) -Werror $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n) + +# cc-disable-warning +# Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable) +cc-disable-warning = $(call try-run,\ + $(CC) -Werror $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) + +# cc-ifversion +# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) +cc-ifversion = $(shell [ $(CONFIG_GCC_VERSION)0 $(1) $(2)000 ] && echo $(3) || echo $(4)) + +# ld-option +# Usage: KBUILD_LDFLAGS += $(call ld-option, -X, -Y) +ld-option = $(call try-run, $(LD) $(KBUILD_LDFLAGS) $(1) -v,$(1),$(2),$(3)) + +# ld-ifversion +# Usage: $(call ld-ifversion, -ge, 22252, y) +ld-ifversion = $(shell [ $(CONFIG_LD_VERSION)0 $(1) $(2)0 ] && echo $(3) || echo $(4)) diff --git a/bcachefs.c b/bcachefs.c index e9ff7d1..239b114 100644 --- a/bcachefs.c +++ b/bcachefs.c @@ -31,55 +31,56 @@ static void usage(void) "usage: bcachefs []\n" "\n" "Superblock commands:\n" - " format Format a new filesystem\n" - " show-super Dump superblock information to stdout\n" + " format Format a new filesystem\n" + " show-super Dump superblock information to stdout\n" "\n" "Repair:\n" - " fsck Check an existing filesystem for errors\n" + " fsck Check an existing filesystem for errors\n" "\n" "Startup/shutdown, assembly of multi device filesystems:\n" #if 0 - " assemble Assemble an existing multi device filesystem\n" - " incremental Incrementally assemble an existing multi device filesystem\n" - " run Start a partially assembled filesystem\n" - " stop Stop a running filesystem\n" + " assemble Assemble an existing multi device filesystem\n" + " incremental Incrementally assemble an existing multi device filesystem\n" + " run Start a partially assembled filesystem\n" + " stop Stop a running filesystem\n" #endif "\n" "Commands for managing a running filesystem:\n" - " fs usage Show disk usage\n" + " fs usage Show disk usage\n" "\n" "Commands for managing devices within a running filesystem:\n" - " device add Add a new device to an existing filesystem\n" - " device remove Remove a device from an existing filesystem\n" - " device online Re-add an existing member to a filesystem\n" - " device offline Take a device offline, without removing it\n" - " device evacuate Migrate data off of a specific device\n" - " device set-state Mark a device as failed\n" - " device resize Resize filesystem on a device\n" + " device add Add a new device to an existing filesystem\n" + " device remove Remove a device from an existing filesystem\n" + " device online Re-add an existing member to a filesystem\n" + " device offline Take a device offline, without removing it\n" + " device evacuate Migrate data off of a specific device\n" + " device set-state Mark a device as failed\n" + " device resize Resize filesystem on a device\n" + " device journal-resize Resize journal on a device\n" "\n" "Commands for managing filesystem data:\n" - " data rereplicate Rereplicate degraded data\n" - " data job Kick off low level data jobs\n" + " data rereplicate Rereplicate degraded data\n" + " data job Kick off low level data jobs\n" "\n" "Encryption:\n" - " unlock Unlock an encrypted filesystem prior to running/mounting\n" - " set-passphrase Change passphrase on an existing (unmounted) filesystem\n" - " remove-passphrase Remove passphrase on an existing (unmounted) filesystem\n" + " unlock Unlock an encrypted filesystem prior to running/mounting\n" + " set-passphrase Change passphrase on an existing (unmounted) filesystem\n" + " remove-passphrase Remove passphrase on an existing (unmounted) filesystem\n" "\n" "Migrate:\n" - " migrate Migrate an existing filesystem to bcachefs, in place\n" - " migrate-superblock Add default superblock, after bcachefs migrate\n" + " migrate Migrate an existing filesystem to bcachefs, in place\n" + " migrate-superblock Add default superblock, after bcachefs migrate\n" "\n" "Commands for operating on files in a bcachefs filesystem:\n" - " setattr Set various per file attributes\n" + " setattr Set various per file attributes\n" "Debug:\n" "These commands work on offline, unmounted filesystems\n" - " dump Dump filesystem metadata to a qcow2 image\n" - " list List filesystem metadata in textual form\n" - " list_journal List contents of journal\n" + " dump Dump filesystem metadata to a qcow2 image\n" + " list List filesystem metadata in textual form\n" + " list_journal List contents of journal\n" "\n" "Miscellaneous:\n" - " version Display the version of the invoked bcachefs tool\n"); + " version Display the version of the invoked bcachefs tool\n"); } static char *full_cmd; diff --git a/cmd_attr.c b/cmd_attr.c index aef42f4..736554c 100644 --- a/cmd_attr.c +++ b/cmd_attr.c @@ -108,6 +108,7 @@ int cmd_setattr(int argc, char *argv[]) for (i = 1; i < argc; i++) do_setattr(argv[i], opts); + bch2_opt_strs_free(&opts); return 0; } diff --git a/cmd_debug.c b/cmd_debug.c index 4938ec0..2f56e41 100644 --- a/cmd_debug.c +++ b/cmd_debug.c @@ -100,6 +100,7 @@ static void dump_one_device(struct bch_fs *c, struct bch_dev *ca, int fd) qcow2_write_image(ca->disk_sb.bdev->bd_fd, fd, &data, max_t(unsigned, btree_bytes(c) / 8, block_bytes(c))); + darray_free(data); } int cmd_dump(int argc, char *argv[]) @@ -115,7 +116,7 @@ int cmd_dump(int argc, char *argv[]) opt_set(opts, norecovery, true); opt_set(opts, degraded, true); opt_set(opts, errors, BCH_ON_ERROR_continue); - opt_set(opts, fix_errors, FSCK_OPT_YES); + opt_set(opts, fix_errors, FSCK_OPT_NO); while ((opt = getopt(argc, argv, "o:fvh")) != -1) switch (opt) { @@ -195,10 +196,12 @@ static void list_keys(struct bch_fs *c, enum btree_id btree_id, bch2_bkey_val_to_text(&PBUF(buf), c, k); puts(buf); } + bch2_trans_iter_put(&trans, iter); + bch2_trans_exit(&trans); } -static void list_btree_formats(struct bch_fs *c, enum btree_id btree_id, +static void list_btree_formats(struct bch_fs *c, enum btree_id btree_id, unsigned level, struct bpos start, struct bpos end) { struct btree_trans trans; @@ -208,18 +211,20 @@ static void list_btree_formats(struct bch_fs *c, enum btree_id btree_id, bch2_trans_init(&trans, c, 0, 0); - for_each_btree_node(&trans, iter, btree_id, start, 0, b) { + __for_each_btree_node(&trans, iter, btree_id, start, 0, level, 0, b) { if (bkey_cmp(b->key.k.p, end) > 0) break; bch2_btree_node_to_text(&PBUF(buf), c, b); puts(buf); } + bch2_trans_iter_put(&trans, iter); + bch2_trans_exit(&trans); } -static void list_nodes(struct bch_fs *c, enum btree_id btree_id, - struct bpos start, struct bpos end) +static void list_nodes(struct bch_fs *c, enum btree_id btree_id, unsigned level, + struct bpos start, struct bpos end) { struct btree_trans trans; struct btree_iter *iter; @@ -228,7 +233,7 @@ static void list_nodes(struct bch_fs *c, enum btree_id btree_id, bch2_trans_init(&trans, c, 0, 0); - for_each_btree_node(&trans, iter, btree_id, start, 0, b) { + __for_each_btree_node(&trans, iter, btree_id, start, 0, level, 0, b) { if (bkey_cmp(b->key.k.p, end) > 0) break; @@ -236,6 +241,8 @@ static void list_nodes(struct bch_fs *c, enum btree_id btree_id, fputs(buf, stdout); putchar('\n'); } + bch2_trans_iter_put(&trans, iter); + bch2_trans_exit(&trans); } @@ -335,7 +342,7 @@ static void print_node_ondisk(struct bch_fs *c, struct btree *b) free(n_ondisk); } -static void list_nodes_ondisk(struct bch_fs *c, enum btree_id btree_id, +static void list_nodes_ondisk(struct bch_fs *c, enum btree_id btree_id, unsigned level, struct bpos start, struct bpos end) { struct btree_trans trans; @@ -345,7 +352,7 @@ static void list_nodes_ondisk(struct bch_fs *c, enum btree_id btree_id, bch2_trans_init(&trans, c, 0, 0); - for_each_btree_node(&trans, iter, btree_id, start, 0, b) { + __for_each_btree_node(&trans, iter, btree_id, start, 0, level, 0, b) { if (bkey_cmp(b->key.k.p, end) > 0) break; @@ -355,10 +362,12 @@ static void list_nodes_ondisk(struct bch_fs *c, enum btree_id btree_id, print_node_ondisk(c, b); } + bch2_trans_iter_put(&trans, iter); + bch2_trans_exit(&trans); } -static void list_nodes_keys(struct bch_fs *c, enum btree_id btree_id, +static void list_nodes_keys(struct bch_fs *c, enum btree_id btree_id, unsigned level, struct bpos start, struct bpos end) { struct btree_trans trans; @@ -371,7 +380,7 @@ static void list_nodes_keys(struct bch_fs *c, enum btree_id btree_id, bch2_trans_init(&trans, c, 0, 0); - for_each_btree_node(&trans, iter, btree_id, start, 0, b) { + __for_each_btree_node(&trans, iter, btree_id, start, 0, level, 0, b) { if (bkey_cmp(b->key.k.p, end) > 0) break; @@ -384,6 +393,8 @@ static void list_nodes_keys(struct bch_fs *c, enum btree_id btree_id, puts(buf); } } + bch2_trans_iter_put(&trans, iter); + bch2_trans_exit(&trans); } @@ -394,22 +405,35 @@ static void list_keys_usage(void) "\n" "Options:\n" " -b (extents|inodes|dirents|xattrs) Btree to list from\n" + " -l level Btree depth to descend to (0 == leaves)\n" " -s inode:offset Start position to list from\n" " -e inode:offset End position\n" " -i inode List keys for a given inode number\n" - " -m (keys|formats) List mode\n" + " -m (keys|formats|nodes|nodes_ondisk|nodes_keys)\n" + " List mode\n" " -f Check (fsck) the filesystem first\n" " -v Verbose mode\n" " -h Display this help and exit\n" "Report bugs to "); } +#define LIST_MODES() \ + x(keys) \ + x(formats) \ + x(nodes) \ + x(nodes_ondisk) \ + x(nodes_keys) + +enum list_modes { +#define x(n) LIST_MODE_##n, + LIST_MODES() +#undef x +}; + static const char * const list_modes[] = { - "keys", - "formats", - "nodes", - "nodes_ondisk", - "nodes_keys", +#define x(n) #n, + LIST_MODES() +#undef x NULL }; @@ -419,6 +443,7 @@ int cmd_list(int argc, char *argv[]) enum btree_id btree_id_start = 0; enum btree_id btree_id_end = BTREE_ID_NR; enum btree_id btree_id; + unsigned level; struct bpos start = POS_MIN, end = POS_MAX; u64 inum; int mode = 0, opt; @@ -428,13 +453,17 @@ int cmd_list(int argc, char *argv[]) opt_set(opts, degraded, true); opt_set(opts, errors, BCH_ON_ERROR_continue); - while ((opt = getopt(argc, argv, "b:s:e:i:m:fvh")) != -1) + while ((opt = getopt(argc, argv, "b:l:s:e:i:m:fvh")) != -1) switch (opt) { case 'b': btree_id_start = read_string_list_or_die(optarg, bch2_btree_ids, "btree id"); btree_id_end = btree_id_start + 1; break; + case 'l': + if (kstrtouint(optarg, 10, &level) || level >= BTREE_MAX_DEPTH) + die("invalid level"); + break; case 's': start = bpos_parse(optarg); break; @@ -476,20 +505,20 @@ int cmd_list(int argc, char *argv[]) btree_id < btree_id_end; btree_id++) { switch (mode) { - case 0: + case LIST_MODE_keys: list_keys(c, btree_id, start, end); break; - case 1: - list_btree_formats(c, btree_id, start, end); + case LIST_MODE_formats: + list_btree_formats(c, btree_id, level, start, end); break; - case 2: - list_nodes(c, btree_id, start, end); + case LIST_MODE_nodes: + list_nodes(c, btree_id, level, start, end); break; - case 3: - list_nodes_ondisk(c, btree_id, start, end); + case LIST_MODE_nodes_ondisk: + list_nodes_ondisk(c, btree_id, level, start, end); break; - case 4: - list_nodes_keys(c, btree_id, start, end); + case LIST_MODE_nodes_keys: + list_nodes_keys(c, btree_id, level, start, end); break; default: die("Invalid mode"); diff --git a/cmd_device.c b/cmd_device.c index f9e975a..b18bdd8 100644 --- a/cmd_device.c +++ b/cmd_device.c @@ -331,6 +331,7 @@ static void device_set_state_usage(void) "\n" "Options:\n" " -f, --force Force, if data redundancy will be degraded\n" + " --force-if-data-lost Force, if data will be lost\n" " -o, --offline Set state of an offline device\n" " -h, --help display this help and exit\n" "Report bugs to "); @@ -341,6 +342,7 @@ int cmd_device_set_state(int argc, char *argv[]) { static const struct option longopts[] = { { "force", 0, NULL, 'f' }, + { "force-if-data-lost", 0, NULL, 'F' }, { "offline", 0, NULL, 'o' }, { "help", 0, NULL, 'h' }, { NULL } @@ -355,6 +357,10 @@ int cmd_device_set_state(int argc, char *argv[]) case 'f': flags |= BCH_FORCE_IF_DEGRADED; break; + case 'F': + flags |= BCH_FORCE_IF_DEGRADED; + flags |= BCH_FORCE_IF_LOST; + break; case 'o': offline = true; break; diff --git a/cmd_format.c b/cmd_format.c index b88ffe9..3f96f5d 100644 --- a/cmd_format.c +++ b/cmd_format.c @@ -236,6 +236,7 @@ int cmd_format(int argc, char *argv[]) fs_opts, opts, devices.item, darray_size(devices)); + bch2_opt_strs_free(&fs_opt_strs); if (!quiet) bch2_sb_print(sb, false, 1 << BCH_SB_FIELD_members, HUMAN_READABLE); diff --git a/cmd_fusemount.c b/cmd_fusemount.c index 54bc76c..2b6b2d7 100644 --- a/cmd_fusemount.c +++ b/cmd_fusemount.c @@ -212,6 +212,7 @@ retry: bch2_trans_commit(&trans, NULL, NULL, BTREE_INSERT_NOFAIL); err: + bch2_trans_iter_put(&trans, iter); if (ret == -EINTR) goto retry; @@ -548,6 +549,7 @@ retry: BTREE_INSERT_NOFAIL); err: + bch2_trans_iter_put(&trans, iter); if (ret == -EINTR) goto retry; diff --git a/cmd_migrate.c b/cmd_migrate.c index a0d2742..5126090 100644 --- a/cmd_migrate.c +++ b/cmd_migrate.c @@ -773,10 +773,12 @@ int cmd_migrate(int argc, char *argv[]) if (format_opts.encrypted && !no_passphrase) format_opts.passphrase = read_passphrase_twice("Enter passphrase: "); - return migrate_fs(fs_path, - fs_opt_strs, - fs_opts, - format_opts, force); + int ret = migrate_fs(fs_path, + fs_opt_strs, + fs_opts, + format_opts, force); + bch2_opt_strs_free(&fs_opt_strs); + return ret; } static void migrate_superblock_usage(void) diff --git a/debian/changelog b/debian/changelog index 3bd5898..349a882 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,9 +1,9 @@ -bcachefs-tools (0.1+git20210404.ce906d66-1) UNRELEASED; urgency=medium +bcachefs-tools (0.1+git20210805.6c42566-1) unstable; urgency=medium * New upstream snapshot - * Update standards version to 4.5.1 - - Currently unreleased due to test failures. + * Update standards version to 4.6.1 + * Add python3-docutils as dependency + * Do not run tests at package build time -- Jonathan Carter Tue, 06 Apr 2021 15:11:27 +0200 diff --git a/debian/control b/debian/control index caf1b0d..4ec9f1d 100644 --- a/debian/control +++ b/debian/control @@ -2,23 +2,25 @@ Source: bcachefs-tools Maintainer: Jonathan Carter Section: utils Priority: optional -Standards-Version: 4.5.1 +Standards-Version: 4.6.1 Rules-Requires-Root: no Build-Depends: debhelper-compat (= 13), pkg-config, + python3-docutils, python3-pytest, - libaio-dev, - libblkid-dev, - libkeyutils-dev, - liblz4-dev, - libscrypt-dev, - libsodium-dev, + libaio-dev, + libfuse3-dev, + libblkid-dev, + libkeyutils-dev, + liblz4-dev, + libscrypt-dev, + libsodium-dev, libudev-dev, - liburcu-dev, - libzstd-dev, - uuid-dev, + liburcu-dev, + libzstd-dev, + uuid-dev, valgrind, - zlib1g-dev + zlib1g-dev Homepage: https://bcachefs.org/ Vcs-Git: https://salsa.debian.org/jcc/bcachefs-tools.git Vcs-Browser: https://salsa.debian.org/jcc/bcachefs-tools diff --git a/debian/files b/debian/files index d1acbd3..00d330d 100644 --- a/debian/files +++ b/debian/files @@ -1 +1 @@ -bcachefs-tools_0.1+git20210404.ce906d66-1_source.buildinfo utils optional +bcachefs-tools_0.1+git20210805.6c42566-1_source.buildinfo utils optional diff --git a/debian/rules b/debian/rules index fbaf6d4..b3ee89d 100755 --- a/debian/rules +++ b/debian/rules @@ -11,3 +11,4 @@ override_dh_auto_install: override_dh_auto_clean: +override_dh_auto_test: diff --git a/doc/bcachefs.5.txt b/doc/bcachefs.5.rst.tmpl similarity index 87% rename from doc/bcachefs.5.txt rename to doc/bcachefs.5.rst.tmpl index 291e2e3..7b5e1ce 100644 --- a/doc/bcachefs.5.txt +++ b/doc/bcachefs.5.rst.tmpl @@ -1,21 +1,23 @@ -BCACHEFS(5) -=========== +======== +bcachefs +======== -NAME ----- -bcachefs - bcachefs overview, user's manual and configuration +-------------------------------------------------- +bcachefs overview, user's manual and configuration +-------------------------------------------------- +:Manual section: 5 DESCRIPTION ----------- Bcachefs is a multi device copy on write filesystem that supports - Checksumming - Compression - Encryption - Reflink - Caching - Replication - Erasure coding (reed-solomon) +- Checksumming +- Compression +- Encryption +- Reflink +- Caching +- Replication +- Erasure coding (reed-solomon) And more. This document is intended to be an overview of the various features and use cases. @@ -31,8 +33,9 @@ set on individual files and directories, via the bcachefs setattr command (which internally mostly works via the extended attribute interface, but the setattr command takes care to propagate options to children correctly). - * TODO: include master list of options from opts.h -#include "opts.mdwn" +OPTIONS +------- +OPTIONS_TABLE Device management ----------------- @@ -58,6 +61,8 @@ group. For example, given disks formatted with these labels: +.. code-block:: bash + bcachefs format -g controller1.hdd.hdd1 /dev/sda \ -g controller1.hdd.hdd2 /dev/sdb \ -g controller1.ssd.ssd1 /dev/sdc \ @@ -73,9 +78,9 @@ Data placement, caching The following options control which disks data is written to: - * foreground_target - * background_target - * promote_target +- foreground_target +- background_target +- promote_target The foreground_target option is used to direct writes from applications. The background_target option, if set, will cause data to be moved to that target in @@ -86,7 +91,9 @@ a cached copy of the data being read to that target, if it doesn't exist. Together, these options can be used for writeback caching, like so: - foregroud_target=ssd +.. code-block:: bash + + foreground_target=ssd background_target=hdd promote_target=ssd @@ -97,6 +104,8 @@ per-file options. This is done by setting the device's durability to 0. These options can all be set on individual files or directories. They can also be used to pin a specific file or directory to a specific device or target: +.. code-block:: bash + foreground_target=ssd background_target= promote_target= diff --git a/doc/macro2rst.py b/doc/macro2rst.py new file mode 100755 index 0000000..e80f7ed --- /dev/null +++ b/doc/macro2rst.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python3 +''' +A utility script for generating documentation. + +Preprocessor macro output from opts_macro.h is parsed and combined with +bcachefs.5.rst.tmpl to generate bcachefs.5.rst. + +>=python3.6 +''' + +import sys +import re + +INDENT = ' ' +TEMPLATE = './doc/bcachefs.5.rst.tmpl' +RST_FILE= './doc/bcachefs.5.rst' +SANITIZE_CHARS = [ + '\\\\n', + '\\n', + ' ', + '"', + '\\', + ] + +def sanitize(text): + ''' + Parses opts_macro.h preprocessor output + :param text: text to sanitize + :type text: str + :returns: a list of options + :rtype: list + ''' + + args = [] + reg = re.search('FMT_START_SECTION(.*)FMT_END_SECTION', text, + flags=re.DOTALL) + if not reg: + raise re.error('no text found') + + # decoding would probably be better, but this works + for char in SANITIZE_CHARS: + text = text.replace(char, '') + + text = re.split(r'FMT_END_LINE', text) + + # this seemed easier than getting preprocessor macros to play nice + # with python's builtin csv module + for line in text: + vals = line.split(';') + if not vals: + continue + if len(vals) != 4: + continue + vals = list(map(str.strip, vals)) + name, is_bool, desc, arg_name = vals + + # this macro value from opts.h indicates that no values are passed + if is_bool == 'OPT_BOOL()': + args.append(f'--{name}\n{INDENT}{desc}') + else: + args.append(f'--{name} <{arg_name}>\n{INDENT}{desc}') + if not args: + raise re.error('no args found, likely parsing error') + + return args + + +def main(): + ''' Transform stdin to option list and write templated output to new file ''' + out = '' + + stdin = sys.stdin.read() + opts = sanitize(stdin) + opts = '\n'.join(opts) + + # Insert into template + with open(TEMPLATE, 'r') as in_handle: + in_handle = in_handle.read() + out = in_handle.replace('OPTIONS_TABLE', opts) + with open(RST_FILE, 'w') as out_handle: + out_handle.write(out) + + +if __name__ == '__main__': + main() diff --git a/doc/opts_macro.h b/doc/opts_macro.h new file mode 100644 index 0000000..9172802 --- /dev/null +++ b/doc/opts_macro.h @@ -0,0 +1,12 @@ +#include "../libbcachefs/opts.h" + +/** + * generate tables from definitions in opt.h + */ + +#define NULL (null) + +FMT_START_SECTION +#define x(_name, _shortopt, _type, _in_mem_type, _mode, _sb_opt, _desc , _usage)\ +_name;_in_mem_type;_usage;_desc FMT_END_LINE +BCH_OPTS() FMT_END_SECTION diff --git a/include/crypto/sha.h b/include/crypto/sha2.h similarity index 100% rename from include/crypto/sha.h rename to include/crypto/sha2.h diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h index 0e88820..ed47cc6 100644 --- a/include/linux/bit_spinlock.h +++ b/include/linux/bit_spinlock.h @@ -3,38 +3,40 @@ #include #include -#include -#include +#include -static inline void bit_spin_lock(int bitnum, unsigned long *addr) +static inline void bit_spin_lock(int nr, unsigned long *_addr) { - while (unlikely(test_and_set_bit_lock(bitnum, addr))) { - do { - cpu_relax(); - } while (test_bit(bitnum, addr)); - } -} + u32 mask, *addr = ((u32 *) _addr) + (nr / 32), v; -static inline int bit_spin_trylock(int bitnum, unsigned long *addr) -{ - return !test_and_set_bit_lock(bitnum, addr); -} + nr &= 31; + mask = 1U << nr; -static inline void bit_spin_unlock(int bitnum, unsigned long *addr) -{ - BUG_ON(!test_bit(bitnum, addr)); + while (1) { + v = __atomic_fetch_or(addr, mask, __ATOMIC_ACQUIRE); + if (!(v & mask)) + break; - clear_bit_unlock(bitnum, addr); + futex(addr, FUTEX_WAIT|FUTEX_PRIVATE_FLAG, v, NULL, NULL, 0); + } } -static inline void __bit_spin_unlock(int bitnum, unsigned long *addr) +static inline void bit_spin_wake(int nr, unsigned long *_addr) { - bit_spin_unlock(bitnum, addr); + u32 *addr = ((u32 *) _addr) + (nr / 32); + + futex(addr, FUTEX_WAKE|FUTEX_PRIVATE_FLAG, INT_MAX, NULL, NULL, 0); } -static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) +static inline void bit_spin_unlock(int nr, unsigned long *_addr) { - return test_bit(bitnum, addr); + u32 mask, *addr = ((u32 *) _addr) + (nr / 32); + + nr &= 31; + mask = 1U << nr; + + __atomic_and_fetch(addr, ~mask, __ATOMIC_RELEASE); + futex(addr, FUTEX_WAKE|FUTEX_PRIVATE_FLAG, INT_MAX, NULL, NULL, 0); } #endif /* __LINUX_BIT_SPINLOCK_H */ diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 42cd003..8aef4bb 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -8,12 +8,43 @@ #include #include #include +#include struct bio_set; struct bio; -struct block_device; typedef void (bio_end_io_t) (struct bio *); +#define BDEVNAME_SIZE 32 + +struct request_queue { + struct backing_dev_info *backing_dev_info; +}; + +struct gendisk { +}; + +struct hd_struct { + struct kobject kobj; +}; + +struct block_device { + struct kobject kobj; + dev_t bd_dev; + char name[BDEVNAME_SIZE]; + struct inode *bd_inode; + struct request_queue queue; + void *bd_holder; + struct gendisk * bd_disk; + struct gendisk __bd_disk; + int bd_fd; + int bd_sync_fd; + + struct backing_dev_info *bd_bdi; + struct backing_dev_info __bd_bdi; +}; + +#define bdev_kobj(_bdev) (&((_bdev)->kobj)) + /* * Block error status values. See block/blk-core:blk_errors for the details. */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 318bcfa..35082ae 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -6,7 +6,7 @@ #include #include -#define BIO_MAX_PAGES 256 +#define BIO_MAX_VECS 256 typedef unsigned fmode_t; @@ -59,36 +59,8 @@ static inline struct inode *file_inode(const struct file *f) return f->f_inode; } -#define BDEVNAME_SIZE 32 - -struct request_queue { - struct backing_dev_info *backing_dev_info; -}; - -struct gendisk { -}; - -struct hd_struct { - struct kobject kobj; -}; - #define part_to_dev(part) (part) -struct block_device { - char name[BDEVNAME_SIZE]; - struct inode *bd_inode; - struct request_queue queue; - void *bd_holder; - struct hd_struct *bd_part; - struct gendisk *bd_disk; - struct gendisk __bd_disk; - int bd_fd; - int bd_sync_fd; - - struct backing_dev_info *bd_bdi; - struct backing_dev_info __bd_bdi; -}; - void generic_make_request(struct bio *); int submit_bio_wait(struct bio *); @@ -111,7 +83,7 @@ sector_t get_capacity(struct gendisk *disk); void blkdev_put(struct block_device *bdev, fmode_t mode); void bdput(struct block_device *bdev); struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, void *holder); -struct block_device *lookup_bdev(const char *path); +int lookup_bdev(const char *path, dev_t *); struct super_block { void *s_fs_info; @@ -133,6 +105,7 @@ struct super_block { #define DT_LNK 10 #define DT_SOCK 12 #define DT_WHT 14 +#define DT_MAX 16 #endif /* diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h new file mode 100644 index 0000000..e66b711 --- /dev/null +++ b/include/linux/bsearch.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BSEARCH_H +#define _LINUX_BSEARCH_H + +#include + +static __always_inline +void *__inline_bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp) +{ + const char *pivot; + int result; + + while (num > 0) { + pivot = base + (num >> 1) * size; + result = cmp(key, pivot); + + if (result == 0) + return (void *)pivot; + + if (result > 0) { + base = pivot + size; + num--; + } + num >>= 1; + } + + return NULL; +} + +extern void *bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp); + +#endif /* _LINUX_BSEARCH_H */ diff --git a/include/linux/bug.h b/include/linux/bug.h index d47f5a4..77260f3 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h @@ -29,10 +29,15 @@ __ret_warn_on; \ }) +#define __WARN() \ +do { \ + fprintf(stderr, "WARNING at " __FILE__ ":%d\n", __LINE__); \ +} while (0) + #define WARN_ON(cond) ({ \ int __ret_warn_on = unlikely(!!(cond)); \ if (__ret_warn_on) \ - fprintf(stderr, "WARNING at " __FILE__ ":%d\n", __LINE__);\ + __WARN(); \ __ret_warn_on; \ }) @@ -42,8 +47,7 @@ int __ret_warn_on = unlikely(!!(cond)); \ if (__ret_warn_on && !__warned) { \ __warned = true; \ - fprintf(stderr, "WARNING at " __FILE__ ":%d: " fmt "\n",\ - __LINE__, ##__VA_ARGS__); \ + __WARN(); \ } \ __ret_warn_on; \ }) @@ -53,7 +57,7 @@ int __ret_warn_on = unlikely(!!(cond)); \ if (__ret_warn_on && !__warned) { \ __warned = true; \ - fprintf(stderr, "WARNING at " __FILE__ ":%d\n", __LINE__);\ + __WARN(); \ } \ __ret_warn_on; \ }) diff --git a/include/linux/closure.h b/include/linux/closure.h index a9de6d9..d85ca86 100644 --- a/include/linux/closure.h +++ b/include/linux/closure.h @@ -1,8 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CLOSURE_H #define _LINUX_CLOSURE_H #include +#include #include +#include #include /* @@ -103,6 +106,7 @@ struct closure; struct closure_syncer; typedef void (closure_fn) (struct closure *); +extern struct dentry *bcache_debug; struct closure_waitlist { struct llist_head list; @@ -125,10 +129,10 @@ enum closure_state { * annotate where references are being transferred. */ - CLOSURE_BITS_START = (1U << 27), - CLOSURE_DESTRUCTOR = (1U << 27), - CLOSURE_WAITING = (1U << 29), - CLOSURE_RUNNING = (1U << 31), + CLOSURE_BITS_START = (1U << 26), + CLOSURE_DESTRUCTOR = (1U << 26), + CLOSURE_WAITING = (1U << 28), + CLOSURE_RUNNING = (1U << 30), }; #define CLOSURE_GUARD_MASK \ @@ -156,7 +160,7 @@ struct closure { #define CLOSURE_MAGIC_DEAD 0xc054dead #define CLOSURE_MAGIC_ALIVE 0xc054a11e - unsigned magic; + unsigned int magic; struct list_head all; unsigned long ip; unsigned long waiting_on; @@ -232,10 +236,16 @@ static inline void set_closure_fn(struct closure *cl, closure_fn *fn, static inline void closure_queue(struct closure *cl) { struct workqueue_struct *wq = cl->wq; + /** + * Changes made to closure, work_struct, or a couple of other structs + * may cause work.func not pointing to the right location. + */ + BUILD_BUG_ON(offsetof(struct closure, fn) + != offsetof(struct work_struct, func)); if (wq) { INIT_WORK(&cl->work, cl->work.func); - queue_work(wq, &cl->work); + BUG_ON(!queue_work(wq, &cl->work)); } else cl->fn(cl); } @@ -279,20 +289,16 @@ static inline void closure_init_stack(struct closure *cl) } /** - * closure_wake_up - wake up all closures on a wait list. + * closure_wake_up - wake up all closures on a wait list, + * with memory barrier */ static inline void closure_wake_up(struct closure_waitlist *list) { + /* Memory barrier for the wait list */ smp_mb(); __closure_wake_up(list); } -#define continue_at_noreturn(_cl, _fn, _wq) \ -do { \ - set_closure_fn(_cl, _fn, _wq); \ - closure_sub(_cl, CLOSURE_RUNNING + 1); \ -} while (0) - /** * continue_at - jump to another function with barrier * @@ -300,16 +306,16 @@ do { \ * been dropped with closure_put()), it will resume execution at @fn running out * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly). * - * NOTE: This macro expands to a return in the calling function! - * * This is because after calling continue_at() you no longer have a ref on @cl, * and whatever @cl owns may be freed out from under you - a running closure fn * has a ref on its own closure which continue_at() drops. + * + * Note you are expected to immediately return after using this macro. */ #define continue_at(_cl, _fn, _wq) \ do { \ - continue_at_noreturn(_cl, _fn, _wq); \ - return; \ + set_closure_fn(_cl, _fn, _wq); \ + closure_sub(_cl, CLOSURE_RUNNING + 1); \ } while (0) /** @@ -328,32 +334,19 @@ do { \ * Causes @fn to be executed out of @cl, in @wq context (or called directly if * @wq is NULL). * - * NOTE: like continue_at(), this macro expands to a return in the caller! - * * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn, * thus it's not safe to touch anything protected by @cl after a * continue_at_nobarrier(). */ #define continue_at_nobarrier(_cl, _fn, _wq) \ do { \ - closure_set_ip(_cl); \ - if (_wq) { \ - INIT_WORK(&(_cl)->work, (void *) _fn); \ - queue_work((_wq), &(_cl)->work); \ - } else { \ - (_fn)(_cl); \ - } \ - return; \ -} while (0) - -#define closure_return_with_destructor_noreturn(_cl, _destructor) \ -do { \ - set_closure_fn(_cl, _destructor, NULL); \ - closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \ + set_closure_fn(_cl, _fn, _wq); \ + closure_queue(_cl); \ } while (0) /** - * closure_return - finish execution of a closure, with destructor + * closure_return_with_destructor - finish execution of a closure, + * with destructor * * Works like closure_return(), except @destructor will be called when all * outstanding refs on @cl have been dropped; @destructor may be used to safely @@ -363,8 +356,8 @@ do { \ */ #define closure_return_with_destructor(_cl, _destructor) \ do { \ - closure_return_with_destructor_noreturn(_cl, _destructor); \ - return; \ + set_closure_fn(_cl, _destructor, NULL); \ + closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \ } while (0) /** diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 1af94d5..a29d156 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -4,5 +4,6 @@ #define try_to_freeze() #define set_freezable() #define freezing(task) false +#define freezable_schedule_timeout(_t) schedule_timeout(_t); #endif /* __TOOLS_LINUX_FREEZER_H */ diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 9b8dd43..fe92826 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -1,6 +1,7 @@ #ifndef _LINUX_JIFFIES_H #define _LINUX_JIFFIES_H +#include #include #include #include diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 4b45306..30451cb 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -11,8 +11,50 @@ #include #include #include +#include -#define IS_ENABLED(opt) 0 +#define __ARG_PLACEHOLDER_1 0, +#define __take_second_arg(__ignored, val, ...) val + +#define __and(x, y) ___and(x, y) +#define ___and(x, y) ____and(__ARG_PLACEHOLDER_##x, y) +#define ____and(arg1_or_junk, y) __take_second_arg(arg1_or_junk y, 0) + +#define __or(x, y) ___or(x, y) +#define ___or(x, y) ____or(__ARG_PLACEHOLDER_##x, y) +#define ____or(arg1_or_junk, y) __take_second_arg(arg1_or_junk 1, y) + +#define __is_defined(x) ___is_defined(x) +#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val) +#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0) + +/* + * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0 + * otherwise. For boolean options, this is equivalent to + * IS_ENABLED(CONFIG_FOO). + */ +#define IS_BUILTIN(option) __is_defined(option) + +/* + * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0 + * otherwise. + */ +#define IS_MODULE(option) __is_defined(option##_MODULE) + +/* + * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled + * code can call a function defined in code compiled based on CONFIG_FOO. + * This is similar to IS_ENABLED(), but returns false when invoked from + * built-in code when CONFIG_FOO is set to 'm'. + */ +#define IS_REACHABLE(option) __or(IS_BUILTIN(option), \ + __and(IS_MODULE(option), __is_defined(MODULE))) + +/* + * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm', + * 0 otherwise. + */ +#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option)) #define EXPORT_SYMBOL(sym) #define U8_MAX ((u8)~0U) @@ -37,8 +79,6 @@ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) -#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) - #ifndef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #endif @@ -56,16 +96,6 @@ (type *)((char *)__mptr - offsetof(type, member)); }) #endif -#define __round_mask(x, y) ((__typeof__(x))((y)-1)) -#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) -#define round_down(x, y) ((x) & ~__round_mask(x, y)) - -#define roundup(x, y) \ -({ \ - const typeof(y) __y = y; \ - (((x) + (__y - 1)) / __y) * __y; \ -}) - #define max(x, y) ({ \ typeof(x) _max1 = (x); \ typeof(y) _max2 = (y); \ diff --git a/include/linux/kobject.h b/include/linux/kobject.h index d524178..c7362d6 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -98,12 +98,9 @@ static inline void kobject_put(struct kobject *kobj) static inline void kobject_del(struct kobject *kobj) { - struct kernfs_node *sd; - if (!kobj) return; - sd = kobj->sd; kobj->state_in_sysfs = 0; #if 0 kobj_kset_leave(kobj); diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 1a7f024..3831ef2 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -51,5 +51,10 @@ debug_check_no_locks_held(void) { } +static inline int lock_class_is_held(struct lock_class_key *k) +{ + return 0; +} + #endif /* __TOOLS_LINUX_LOCKDEP_H */ diff --git a/include/linux/math.h b/include/linux/math.h new file mode 100644 index 0000000..3cf6726 --- /dev/null +++ b/include/linux/math.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MATH_H +#define _LINUX_MATH_H + +/* + * This looks more complex than it should be. But we need to + * get the type for the ~ right in round_down (it needs to be + * as wide as the result!), and we want to evaluate the macro + * arguments just once each. + */ +#define __round_mask(x, y) ((__typeof__(x))((y)-1)) + +/** + * round_up - round up to next specified power of 2 + * @x: the value to round + * @y: multiple to round up to (must be a power of 2) + * + * Rounds @x up to next multiple of @y (which must be a power of 2). + * To perform arbitrary rounding up, use roundup() below. + */ +#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) + +/** + * round_down - round down to next specified power of 2 + * @x: the value to round + * @y: multiple to round down to (must be a power of 2) + * + * Rounds @x down to next multiple of @y (which must be a power of 2). + * To perform arbitrary rounding down, use rounddown() below. + */ +#define round_down(x, y) ((x) & ~__round_mask(x, y)) + +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) + +#define DIV_ROUND_DOWN_ULL(ll, d) \ + ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) + +#define DIV_ROUND_UP_ULL(ll, d) \ + DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d)) + +#if BITS_PER_LONG == 32 +# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d) +#else +# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d) +#endif + +/** + * roundup - round up to the next specified multiple + * @x: the value to up + * @y: multiple to round up to + * + * Rounds @x up to next multiple of @y. If @y will always be a power + * of 2, consider using the faster round_up(). + */ +#define roundup(x, y) ( \ +{ \ + typeof(y) __y = y; \ + (((x) + (__y - 1)) / __y) * __y; \ +} \ +) +/** + * rounddown - round down to next specified multiple + * @x: the value to round + * @y: multiple to round down to + * + * Rounds @x down to next multiple of @y. If @y will always be a power + * of 2, consider using the faster round_down(). + */ +#define rounddown(x, y) ( \ +{ \ + typeof(x) __x = (x); \ + __x - (__x % (y)); \ +} \ +) + +/* + * Divide positive or negative dividend by positive or negative divisor + * and round to closest integer. Result is undefined for negative + * divisors if the dividend variable type is unsigned and for negative + * dividends if the divisor variable type is unsigned. + */ +#define DIV_ROUND_CLOSEST(x, divisor)( \ +{ \ + typeof(x) __x = x; \ + typeof(divisor) __d = divisor; \ + (((typeof(x))-1) > 0 || \ + ((typeof(divisor))-1) > 0 || \ + (((__x) > 0) == ((__d) > 0))) ? \ + (((__x) + ((__d) / 2)) / (__d)) : \ + (((__x) - ((__d) / 2)) / (__d)); \ +} \ +) +/* + * Same as above but for u64 dividends. divisor must be a 32-bit + * number. + */ +#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \ +{ \ + typeof(divisor) __d = divisor; \ + unsigned long long _tmp = (x) + (__d) / 2; \ + do_div(_tmp, __d); \ + _tmp; \ +} \ +) + +/* + * Multiplies an integer by a fraction, while avoiding unnecessary + * overflow or loss of precision. + */ +#define mult_frac(x, numer, denom)( \ +{ \ + typeof(x) quot = (x) / (denom); \ + typeof(x) rem = (x) % (denom); \ + (quot * (numer)) + ((rem * (numer)) / (denom)); \ +} \ +) + +#define sector_div(a, b) do_div(a, b) + +/** + * reciprocal_scale - "scale" a value into range [0, ep_ro) + * @val: value + * @ep_ro: right open interval endpoint + * + * Perform a "reciprocal multiplication" in order to "scale" a value into + * range [0, @ep_ro), where the upper interval endpoint is right-open. + * This is useful, e.g. for accessing a index of an array containing + * @ep_ro elements, for example. Think of it as sort of modulus, only that + * the result isn't that of modulo. ;) Note that if initial input is a + * small value, then result will return 0. + * + * Return: a result based on @val in interval [0, @ep_ro). + */ +static inline u32 reciprocal_scale(u32 val, u32 ep_ro) +{ + return (u32)(((u64) val * ep_ro) >> 32); +} + +u64 int_pow(u64 base, unsigned int exp); +unsigned long int_sqrt(unsigned long); + +#if BITS_PER_LONG < 64 +u32 int_sqrt64(u64 x); +#else +static inline u32 int_sqrt64(u64 x) +{ + return (u32)int_sqrt(x); +} +#endif + +#endif /* _LINUX_MATH_H */ diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 37d8149..506da24 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -1,11 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * memory buffer pool support */ #ifndef _LINUX_MEMPOOL_H #define _LINUX_MEMPOOL_H +#include #include -#include #include struct kmem_cache; @@ -14,74 +15,98 @@ typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); typedef void (mempool_free_t)(void *element, void *pool_data); typedef struct mempool_s { - size_t elem_size; - void *pool_data; - mempool_alloc_t *alloc; - mempool_free_t *free; + spinlock_t lock; + int min_nr; /* nr of elements at *elements */ + int curr_nr; /* Current nr of elements at *elements */ + void **elements; + + void *pool_data; + mempool_alloc_t *alloc; + mempool_free_t *free; + wait_queue_head_t wait; } mempool_t; static inline bool mempool_initialized(mempool_t *pool) { - return true; + return pool->elements != NULL; } -extern int mempool_resize(mempool_t *pool, int new_min_nr); +void mempool_exit(mempool_t *pool); +int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data, + gfp_t gfp_mask, int node_id); +int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data); -static inline void mempool_free(void *element, mempool_t *pool) -{ - free(element); -} - -static inline void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc -{ - BUG_ON(!pool->elem_size); - return kmalloc(pool->elem_size, gfp_mask); -} +extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data); +extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data, + gfp_t gfp_mask, int nid); -static inline void mempool_exit(mempool_t *pool) {} +extern int mempool_resize(mempool_t *pool, int new_min_nr); +extern void mempool_destroy(mempool_t *pool); +extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc; +extern void mempool_free(void *element, mempool_t *pool); -static inline void mempool_destroy(mempool_t *pool) -{ - free(pool); -} +/* + * A mempool_alloc_t and mempool_free_t that get the memory from + * a slab cache that is passed in through pool_data. + * Note: the slab cache may not have a ctor function. + */ +void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); +void mempool_free_slab(void *element, void *pool_data); static inline int mempool_init_slab_pool(mempool_t *pool, int min_nr, struct kmem_cache *kc) { - pool->elem_size = 0; - return 0; + return mempool_init(pool, min_nr, mempool_alloc_slab, + mempool_free_slab, (void *) kc); } static inline mempool_t * mempool_create_slab_pool(int min_nr, struct kmem_cache *kc) { - mempool_t *pool = malloc(sizeof(*pool)); - pool->elem_size = 0; - return pool; + return mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab, + (void *) kc); } +/* + * a mempool_alloc_t and a mempool_free_t to kmalloc and kfree the + * amount of memory specified by pool_data + */ +void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); +void mempool_kfree(void *element, void *pool_data); + static inline int mempool_init_kmalloc_pool(mempool_t *pool, int min_nr, size_t size) { - pool->elem_size = size; - return 0; + return mempool_init(pool, min_nr, mempool_kmalloc, + mempool_kfree, (void *) size); +} + +static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size) +{ + return mempool_create(min_nr, mempool_kmalloc, mempool_kfree, + (void *) size); } +/* + * A mempool_alloc_t and mempool_free_t for a simple page allocator that + * allocates pages of the order specified by pool_data + */ +void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data); +void mempool_free_pages(void *element, void *pool_data); + static inline int mempool_init_page_pool(mempool_t *pool, int min_nr, int order) { - pool->elem_size = PAGE_SIZE << order; - return 0; + return mempool_init(pool, min_nr, mempool_alloc_pages, + mempool_free_pages, (void *)(long)order); } -static inline int mempool_init(mempool_t *pool, int min_nr, - mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, - void *pool_data) +static inline mempool_t *mempool_create_page_pool(int min_nr, int order) { - pool->elem_size = (size_t) pool_data; - pool->pool_data = pool_data; - pool->alloc = alloc_fn; - pool->free = free_fn; - return 0; + return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages, + (void *)(long)order); } #endif /* _LINUX_MEMPOOL_H */ diff --git a/include/linux/page.h b/include/linux/page.h index 310b3ed..e2dda66 100644 --- a/include/linux/page.h +++ b/include/linux/page.h @@ -12,6 +12,11 @@ struct page; #endif +#ifndef PAGE_SHIFT +#define PAGE_SHIFT 12 +#endif + + #define virt_to_page(p) \ ((struct page *) (((unsigned long) (p)) & PAGE_MASK)) #define offset_in_page(p) ((unsigned long) (p) & ~PAGE_MASK) diff --git a/include/linux/poison.h b/include/linux/poison.h index dc8ae5d..aff1c92 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -27,11 +27,7 @@ #define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA) /********** mm/page_poison.c **********/ -#ifdef CONFIG_PAGE_POISONING_ZERO -#define PAGE_POISON 0x00 -#else #define PAGE_POISON 0xaa -#endif /********** mm/page_alloc.c ************/ diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 6cf8c25..c5e717b 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -395,6 +395,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl, rcu_assign_pointer(*bkt, (void *)obj); preempt_enable(); __release(bitlock); + bit_spin_wake(0, (unsigned long *) bkt); } /** diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h new file mode 100644 index 0000000..e69de29 diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h new file mode 100644 index 0000000..e69de29 diff --git a/include/linux/slab.h b/include/linux/slab.h index 775b7e3..ef86153 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -40,17 +40,19 @@ static inline void *krealloc(void *old, size_t size, gfp_t flags) run_shrinkers(); - new = malloc(size); + new = kmalloc(size, flags); if (!new) return NULL; if (flags & __GFP_ZERO) memset(new, 0, size); - memcpy(new, old, - min(malloc_usable_size(old), - malloc_usable_size(new))); - free(old); + if (old) { + memcpy(new, old, + min(malloc_usable_size(old), + malloc_usable_size(new))); + free(old); + } return new; } diff --git a/include/linux/string.h b/include/linux/string.h index 4806e2c..b5e00a0 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -11,5 +11,6 @@ extern void memzero_explicit(void *, size_t); int match_string(const char * const *, size_t, const char *); #define kstrndup(s, n, gfp) strndup(s, n) +#define kstrdup(s, gfp) strdup(s) #endif /* _LINUX_STRING_H_ */ diff --git a/include/linux/types.h b/include/linux/types.h index c9886cb..77f9673 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -76,4 +76,6 @@ typedef __u64 __bitwise __be64; typedef u64 sector_t; +typedef int (*cmp_func_t)(const void *a, const void *b); + #endif /* _TOOLS_LINUX_TYPES_H_ */ diff --git a/include/linux/wait.h b/include/linux/wait.h index c3d9824..d1d33e6 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -90,6 +90,7 @@ do { \ __wait_event(wq, condition); \ } while (0) +#define wait_event_freezable(wq, condition) ({wait_event(wq, condition); 0; }) #define wait_event_killable(wq, condition) ({wait_event(wq, condition); 0; }) #define wait_event_interruptible(wq, condition) ({wait_event(wq, condition); 0; }) diff --git a/include/linux/xattr.h b/include/linux/xattr.h index fbc1e1f..222c72f 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -26,6 +26,7 @@ struct inode; struct dentry; +struct user_namespace; /* * struct xattr_handler: When @name is set, match attributes with exactly that @@ -40,7 +41,8 @@ struct xattr_handler { int (*get)(const struct xattr_handler *, struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size); - int (*set)(const struct xattr_handler *, struct dentry *dentry, + int (*set)(const struct xattr_handler *, + struct user_namespace *mnt_userns, struct dentry *dentry, struct inode *inode, const char *name, const void *buffer, size_t size, int flags); }; diff --git a/include/linux/xxhash.h b/include/linux/xxhash.h new file mode 100644 index 0000000..df42511 --- /dev/null +++ b/include/linux/xxhash.h @@ -0,0 +1,259 @@ +/* + * xxHash - Extremely Fast Hash algorithm + * Copyright (C) 2012-2016, Yann Collet. + * + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. This program is dual-licensed; you may select + * either version 2 of the GNU General Public License ("GPL") or BSD license + * ("BSD"). + * + * You can contact the author at: + * - xxHash homepage: https://cyan4973.github.io/xxHash/ + * - xxHash source repository: https://github.com/Cyan4973/xxHash + */ + +/* + * Notice extracted from xxHash homepage: + * + * xxHash is an extremely fast Hash algorithm, running at RAM speed limits. + * It also successfully passes all tests from the SMHasher suite. + * + * Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 + * Duo @3GHz) + * + * Name Speed Q.Score Author + * xxHash 5.4 GB/s 10 + * CrapWow 3.2 GB/s 2 Andrew + * MumurHash 3a 2.7 GB/s 10 Austin Appleby + * SpookyHash 2.0 GB/s 10 Bob Jenkins + * SBox 1.4 GB/s 9 Bret Mulvey + * Lookup3 1.2 GB/s 9 Bob Jenkins + * SuperFastHash 1.2 GB/s 1 Paul Hsieh + * CityHash64 1.05 GB/s 10 Pike & Alakuijala + * FNV 0.55 GB/s 5 Fowler, Noll, Vo + * CRC32 0.43 GB/s 9 + * MD5-32 0.33 GB/s 10 Ronald L. Rivest + * SHA1-32 0.28 GB/s 10 + * + * Q.Score is a measure of quality of the hash function. + * It depends on successfully passing SMHasher test set. + * 10 is a perfect score. + * + * A 64-bits version, named xxh64 offers much better speed, + * but for 64-bits applications only. + * Name Speed on 64 bits Speed on 32 bits + * xxh64 13.8 GB/s 1.9 GB/s + * xxh32 6.8 GB/s 6.0 GB/s + */ + +#ifndef XXHASH_H +#define XXHASH_H + +#include + +/*-**************************** + * Simple Hash Functions + *****************************/ + +/** + * xxh32() - calculate the 32-bit hash of the input with a given seed. + * + * @input: The data to hash. + * @length: The length of the data to hash. + * @seed: The seed can be used to alter the result predictably. + * + * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s + * + * Return: The 32-bit hash of the data. + */ +uint32_t xxh32(const void *input, size_t length, uint32_t seed); + +/** + * xxh64() - calculate the 64-bit hash of the input with a given seed. + * + * @input: The data to hash. + * @length: The length of the data to hash. + * @seed: The seed can be used to alter the result predictably. + * + * This function runs 2x faster on 64-bit systems, but slower on 32-bit systems. + * + * Return: The 64-bit hash of the data. + */ +uint64_t xxh64(const void *input, size_t length, uint64_t seed); + +/** + * xxhash() - calculate wordsize hash of the input with a given seed + * @input: The data to hash. + * @length: The length of the data to hash. + * @seed: The seed can be used to alter the result predictably. + * + * If the hash does not need to be comparable between machines with + * different word sizes, this function will call whichever of xxh32() + * or xxh64() is faster. + * + * Return: wordsize hash of the data. + */ + +static inline unsigned long xxhash(const void *input, size_t length, + uint64_t seed) +{ +#if BITS_PER_LONG == 64 + return xxh64(input, length, seed); +#else + return xxh32(input, length, seed); +#endif +} + +/*-**************************** + * Streaming Hash Functions + *****************************/ + +/* + * These definitions are only meant to allow allocation of XXH state + * statically, on stack, or in a struct for example. + * Do not use members directly. + */ + +/** + * struct xxh32_state - private xxh32 state, do not use members directly + */ +struct xxh32_state { + uint32_t total_len_32; + uint32_t large_len; + uint32_t v1; + uint32_t v2; + uint32_t v3; + uint32_t v4; + uint32_t mem32[4]; + uint32_t memsize; +}; + +/** + * struct xxh32_state - private xxh64 state, do not use members directly + */ +struct xxh64_state { + uint64_t total_len; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t mem64[4]; + uint32_t memsize; +}; + +/** + * xxh32_reset() - reset the xxh32 state to start a new hashing operation + * + * @state: The xxh32 state to reset. + * @seed: Initialize the hash state with this seed. + * + * Call this function on any xxh32_state to prepare for a new hashing operation. + */ +void xxh32_reset(struct xxh32_state *state, uint32_t seed); + +/** + * xxh32_update() - hash the data given and update the xxh32 state + * + * @state: The xxh32 state to update. + * @input: The data to hash. + * @length: The length of the data to hash. + * + * After calling xxh32_reset() call xxh32_update() as many times as necessary. + * + * Return: Zero on success, otherwise an error code. + */ +int xxh32_update(struct xxh32_state *state, const void *input, size_t length); + +/** + * xxh32_digest() - produce the current xxh32 hash + * + * @state: Produce the current xxh32 hash of this state. + * + * A hash value can be produced at any time. It is still possible to continue + * inserting input into the hash state after a call to xxh32_digest(), and + * generate new hashes later on, by calling xxh32_digest() again. + * + * Return: The xxh32 hash stored in the state. + */ +uint32_t xxh32_digest(const struct xxh32_state *state); + +/** + * xxh64_reset() - reset the xxh64 state to start a new hashing operation + * + * @state: The xxh64 state to reset. + * @seed: Initialize the hash state with this seed. + */ +void xxh64_reset(struct xxh64_state *state, uint64_t seed); + +/** + * xxh64_update() - hash the data given and update the xxh64 state + * @state: The xxh64 state to update. + * @input: The data to hash. + * @length: The length of the data to hash. + * + * After calling xxh64_reset() call xxh64_update() as many times as necessary. + * + * Return: Zero on success, otherwise an error code. + */ +int xxh64_update(struct xxh64_state *state, const void *input, size_t length); + +/** + * xxh64_digest() - produce the current xxh64 hash + * + * @state: Produce the current xxh64 hash of this state. + * + * A hash value can be produced at any time. It is still possible to continue + * inserting input into the hash state after a call to xxh64_digest(), and + * generate new hashes later on, by calling xxh64_digest() again. + * + * Return: The xxh64 hash stored in the state. + */ +uint64_t xxh64_digest(const struct xxh64_state *state); + +/*-************************** + * Utils + ***************************/ + +/** + * xxh32_copy_state() - copy the source state into the destination state + * + * @src: The source xxh32 state. + * @dst: The destination xxh32 state. + */ +void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src); + +/** + * xxh64_copy_state() - copy the source state into the destination state + * + * @src: The source xxh64 state. + * @dst: The destination xxh64 state. + */ +void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src); + +#endif /* XXHASH_H */ diff --git a/include/trace/events/bcachefs.h b/include/trace/events/bcachefs.h index d4cb7a2..a11bb5f 100644 --- a/include/trace/events/bcachefs.h +++ b/include/trace/events/bcachefs.h @@ -49,14 +49,14 @@ DECLARE_EVENT_CLASS(bch_fs, TP_ARGS(c), TP_STRUCT__entry( - __array(char, uuid, 16 ) + __field(dev_t, dev ) ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + __entry->dev = c->dev; ), - TP_printk("%pU", __entry->uuid) + TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev)) ); DECLARE_EVENT_CLASS(bio, @@ -71,10 +71,10 @@ DECLARE_EVENT_CLASS(bio, ), TP_fast_assign( - __entry->dev = bio->bi_disk ? bio_dev(bio) : 0; + __entry->dev = bio->bi_bdev ? bio_dev(bio) : 0; __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio->bi_iter.bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); + blk_fill_rwbs(__entry->rwbs, bio->bi_opf); ), TP_printk("%d,%d %s %llu + %u", @@ -131,7 +131,7 @@ TRACE_EVENT(journal_reclaim_start, btree_key_cache_dirty, btree_key_cache_total), TP_STRUCT__entry( - __array(char, uuid, 16 ) + __field(dev_t, dev ) __field(u64, min_nr ) __field(u64, prereserved ) __field(u64, prereserved_total ) @@ -142,7 +142,7 @@ TRACE_EVENT(journal_reclaim_start, ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + __entry->dev = c->dev; __entry->min_nr = min_nr; __entry->prereserved = prereserved; __entry->prereserved_total = prereserved_total; @@ -152,8 +152,8 @@ TRACE_EVENT(journal_reclaim_start, __entry->btree_key_cache_total = btree_key_cache_total; ), - TP_printk("%pU min %llu prereserved %llu/%llu btree cache %llu/%llu key cache %llu/%llu", - __entry->uuid, + TP_printk("%d,%d min %llu prereserved %llu/%llu btree cache %llu/%llu key cache %llu/%llu", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->min_nr, __entry->prereserved, __entry->prereserved_total, @@ -168,16 +168,18 @@ TRACE_EVENT(journal_reclaim_finish, TP_ARGS(c, nr_flushed), TP_STRUCT__entry( - __array(char, uuid, 16 ) - __field(u64, nr_flushed ) + __field(dev_t, dev ) + __field(u64, nr_flushed ) ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); - __entry->nr_flushed = nr_flushed; + __entry->dev = c->dev; + __entry->nr_flushed = nr_flushed; ), - TP_printk("%pU flushed %llu", __entry->uuid, __entry->nr_flushed) + TP_printk("%d%d flushed %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->nr_flushed) ); /* bset.c: */ @@ -194,7 +196,7 @@ DECLARE_EVENT_CLASS(btree_node, TP_ARGS(c, b), TP_STRUCT__entry( - __array(char, uuid, 16 ) + __field(dev_t, dev ) __field(u8, level ) __field(u8, id ) __field(u64, inode ) @@ -202,15 +204,16 @@ DECLARE_EVENT_CLASS(btree_node, ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + __entry->dev = c->dev; __entry->level = b->c.level; __entry->id = b->c.btree_id; __entry->inode = b->key.k.p.inode; __entry->offset = b->key.k.p.offset; ), - TP_printk("%pU %u id %u %llu:%llu", - __entry->uuid, __entry->level, __entry->id, + TP_printk("%d,%d %u id %u %llu:%llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->level, __entry->id, __entry->inode, __entry->offset) ); @@ -254,32 +257,17 @@ DEFINE_EVENT(btree_node, btree_node_reap, TP_ARGS(c, b) ); -DECLARE_EVENT_CLASS(btree_node_cannibalize_lock, - TP_PROTO(struct bch_fs *c), - TP_ARGS(c), - - TP_STRUCT__entry( - __array(char, uuid, 16 ) - ), - - TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); - ), - - TP_printk("%pU", __entry->uuid) -); - -DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock_fail, +DEFINE_EVENT(bch_fs, btree_node_cannibalize_lock_fail, TP_PROTO(struct bch_fs *c), TP_ARGS(c) ); -DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock, +DEFINE_EVENT(bch_fs, btree_node_cannibalize_lock, TP_PROTO(struct bch_fs *c), TP_ARGS(c) ); -DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize, +DEFINE_EVENT(bch_fs, btree_node_cannibalize, TP_PROTO(struct bch_fs *c), TP_ARGS(c) ); @@ -294,18 +282,19 @@ TRACE_EVENT(btree_reserve_get_fail, TP_ARGS(c, required, cl), TP_STRUCT__entry( - __array(char, uuid, 16 ) + __field(dev_t, dev ) __field(size_t, required ) __field(struct closure *, cl ) ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + __entry->dev = c->dev; __entry->required = required; __entry->cl = cl; ), - TP_printk("%pU required %zu by %p", __entry->uuid, + TP_printk("%d,%d required %zu by %p", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->required, __entry->cl) ); @@ -353,28 +342,6 @@ DEFINE_EVENT(btree_node, btree_set_root, /* Garbage collection */ -DEFINE_EVENT(btree_node, btree_gc_coalesce, - TP_PROTO(struct bch_fs *c, struct btree *b), - TP_ARGS(c, b) -); - -TRACE_EVENT(btree_gc_coalesce_fail, - TP_PROTO(struct bch_fs *c, int reason), - TP_ARGS(c, reason), - - TP_STRUCT__entry( - __field(u8, reason ) - __array(char, uuid, 16 ) - ), - - TP_fast_assign( - __entry->reason = reason; - memcpy(__entry->uuid, c->disk_sb.sb->user_uuid.b, 16); - ), - - TP_printk("%pU: %u", __entry->uuid, __entry->reason) -); - DEFINE_EVENT(btree_node, btree_gc_rewrite_node, TP_PROTO(struct bch_fs *c, struct btree *b), TP_ARGS(c, b) @@ -395,16 +362,6 @@ DEFINE_EVENT(bch_fs, gc_end, TP_ARGS(c) ); -DEFINE_EVENT(bch_fs, gc_coalesce_start, - TP_PROTO(struct bch_fs *c), - TP_ARGS(c) -); - -DEFINE_EVENT(bch_fs, gc_coalesce_end, - TP_PROTO(struct bch_fs *c), - TP_ARGS(c) -); - DEFINE_EVENT(bch_fs, gc_cannot_inc_gens, TP_PROTO(struct bch_fs *c), TP_ARGS(c) @@ -412,24 +369,27 @@ DEFINE_EVENT(bch_fs, gc_cannot_inc_gens, /* Allocator */ -TRACE_EVENT(alloc_batch, - TP_PROTO(struct bch_dev *ca, size_t free, size_t total), - TP_ARGS(ca, free, total), +TRACE_EVENT(alloc_scan, + TP_PROTO(struct bch_dev *ca, u64 found, u64 inc_gen, u64 inc_gen_skipped), + TP_ARGS(ca, found, inc_gen, inc_gen_skipped), TP_STRUCT__entry( - __array(char, uuid, 16 ) - __field(size_t, free ) - __field(size_t, total ) + __field(dev_t, dev ) + __field(u64, found ) + __field(u64, inc_gen ) + __field(u64, inc_gen_skipped ) ), TP_fast_assign( - memcpy(__entry->uuid, ca->uuid.b, 16); - __entry->free = free; - __entry->total = total; + __entry->dev = ca->disk_sb.bdev->bd_dev; + __entry->found = found; + __entry->inc_gen = inc_gen; + __entry->inc_gen_skipped = inc_gen_skipped; ), - TP_printk("%pU free %zu total %zu", - __entry->uuid, __entry->free, __entry->total) + TP_printk("%d,%d found %llu inc_gen %llu inc_gen_skipped %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->found, __entry->inc_gen, __entry->inc_gen_skipped) ); TRACE_EVENT(invalidate, @@ -449,13 +409,10 @@ TRACE_EVENT(invalidate, ), TP_printk("invalidated %u sectors at %d,%d sector=%llu", - __entry->sectors, MAJOR(__entry->dev), - MINOR(__entry->dev), __entry->offset) -); - -DEFINE_EVENT(bch_fs, rescale_prios, - TP_PROTO(struct bch_fs *c), - TP_ARGS(c) + __entry->sectors, + MAJOR(__entry->dev), + MINOR(__entry->dev), + __entry->offset) ); DECLARE_EVENT_CLASS(bucket_alloc, @@ -463,16 +420,18 @@ DECLARE_EVENT_CLASS(bucket_alloc, TP_ARGS(ca, reserve), TP_STRUCT__entry( - __array(char, uuid, 16) - __field(enum alloc_reserve, reserve ) + __field(dev_t, dev ) + __field(enum alloc_reserve, reserve ) ), TP_fast_assign( - memcpy(__entry->uuid, ca->uuid.b, 16); - __entry->reserve = reserve; + __entry->dev = ca->disk_sb.bdev->bd_dev; + __entry->reserve = reserve; ), - TP_printk("%pU reserve %d", __entry->uuid, __entry->reserve) + TP_printk("%d,%d reserve %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->reserve) ); DEFINE_EVENT(bucket_alloc, bucket_alloc, @@ -513,19 +472,20 @@ TRACE_EVENT(move_data, TP_ARGS(c, sectors_moved, keys_moved), TP_STRUCT__entry( - __array(char, uuid, 16 ) + __field(dev_t, dev ) __field(u64, sectors_moved ) __field(u64, keys_moved ) ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + __entry->dev = c->dev; __entry->sectors_moved = sectors_moved; __entry->keys_moved = keys_moved; ), - TP_printk("%pU sectors_moved %llu keys_moved %llu", - __entry->uuid, __entry->sectors_moved, __entry->keys_moved) + TP_printk("%d,%d sectors_moved %llu keys_moved %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->sectors_moved, __entry->keys_moved) ); TRACE_EVENT(copygc, @@ -537,7 +497,7 @@ TRACE_EVENT(copygc, buckets_moved, buckets_not_moved), TP_STRUCT__entry( - __array(char, uuid, 16 ) + __field(dev_t, dev ) __field(u64, sectors_moved ) __field(u64, sectors_not_moved ) __field(u64, buckets_moved ) @@ -545,17 +505,102 @@ TRACE_EVENT(copygc, ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + __entry->dev = c->dev; __entry->sectors_moved = sectors_moved; __entry->sectors_not_moved = sectors_not_moved; __entry->buckets_moved = buckets_moved; __entry->buckets_not_moved = buckets_moved; ), - TP_printk("%pU sectors moved %llu remain %llu buckets moved %llu remain %llu", - __entry->uuid, - __entry->sectors_moved, __entry->sectors_not_moved, - __entry->buckets_moved, __entry->buckets_not_moved) + TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->sectors_moved, __entry->sectors_not_moved, + __entry->buckets_moved, __entry->buckets_not_moved) +); + +TRACE_EVENT(copygc_wait, + TP_PROTO(struct bch_fs *c, + u64 wait_amount, u64 until), + TP_ARGS(c, wait_amount, until), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(u64, wait_amount ) + __field(u64, until ) + ), + + TP_fast_assign( + __entry->dev = c->dev; + __entry->wait_amount = wait_amount; + __entry->until = until; + ), + + TP_printk("%d,%u waiting for %llu sectors until %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->wait_amount, __entry->until) +); + +TRACE_EVENT(trans_get_iter, + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip, + enum btree_id btree_id, + struct bpos *got_pos, + unsigned got_locks, + unsigned got_uptodate, + struct bpos *src_pos, + unsigned src_locks, + unsigned src_uptodate), + TP_ARGS(trans_ip, caller_ip, btree_id, + got_pos, got_locks, got_uptodate, + src_pos, src_locks, src_uptodate), + + TP_STRUCT__entry( + __field(unsigned long, trans_ip ) + __field(unsigned long, caller_ip ) + __field(u8, btree_id ) + __field(u64, got_pos_inode ) + __field(u64, got_pos_offset ) + __field(u32, got_pos_snapshot ) + __field(u8, got_locks ) + __field(u8, got_uptodate ) + __field(u64, src_pos_inode ) + __field(u64, src_pos_offset ) + __field(u32, src_pos_snapshot ) + __field(u8, src_locks ) + __field(u8, src_uptodate ) + ), + + TP_fast_assign( + __entry->trans_ip = trans_ip; + __entry->caller_ip = caller_ip; + __entry->btree_id = btree_id; + __entry->got_pos_inode = got_pos->inode; + __entry->got_pos_offset = got_pos->offset; + __entry->got_pos_snapshot = got_pos->snapshot; + __entry->got_locks = got_locks; + __entry->got_uptodate = got_uptodate; + __entry->src_pos_inode = src_pos->inode; + __entry->src_pos_offset = src_pos->offset; + __entry->src_pos_snapshot = src_pos->snapshot; + __entry->src_locks = src_locks; + __entry->src_uptodate = src_uptodate; + ), + + TP_printk("%ps %pS btree %u got %llu:%llu:%u l %u u %u " + "src %llu:%llu:%u l %u u %u", + (void *) __entry->trans_ip, + (void *) __entry->caller_ip, + __entry->btree_id, + __entry->got_pos_inode, + __entry->got_pos_offset, + __entry->got_pos_snapshot, + __entry->got_locks, + __entry->got_uptodate, + __entry->src_pos_inode, + __entry->src_pos_offset, + __entry->src_pos_snapshot, + __entry->src_locks, + __entry->src_uptodate) ); TRACE_EVENT(transaction_restart_ip, @@ -576,182 +621,409 @@ TRACE_EVENT(transaction_restart_ip, ); DECLARE_EVENT_CLASS(transaction_restart, - TP_PROTO(unsigned long ip), - TP_ARGS(ip), + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip), + TP_ARGS(trans_ip, caller_ip), TP_STRUCT__entry( - __field(unsigned long, ip ) + __field(unsigned long, trans_ip ) + __field(unsigned long, caller_ip ) ), TP_fast_assign( - __entry->ip = ip; + __entry->trans_ip = trans_ip; + __entry->caller_ip = caller_ip; ), - TP_printk("%ps", (void *) __entry->ip) + TP_printk("%ps %pS", + (void *) __entry->trans_ip, + (void *) __entry->caller_ip) +); + +DEFINE_EVENT(transaction_restart, trans_blocked_journal_reclaim, + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip), + TP_ARGS(trans_ip, caller_ip) ); -DEFINE_EVENT(transaction_restart, trans_restart_btree_node_reused, - TP_PROTO(unsigned long ip), - TP_ARGS(ip) +DEFINE_EVENT(transaction_restart, trans_restart_journal_res_get, + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip), + TP_ARGS(trans_ip, caller_ip) +); + +DEFINE_EVENT(transaction_restart, trans_restart_journal_preres_get, + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip), + TP_ARGS(trans_ip, caller_ip) +); + +DEFINE_EVENT(transaction_restart, trans_restart_journal_reclaim, + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip), + TP_ARGS(trans_ip, caller_ip) +); + +DEFINE_EVENT(transaction_restart, trans_restart_fault_inject, + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip), + TP_ARGS(trans_ip, caller_ip) +); + +DEFINE_EVENT(transaction_restart, trans_traverse_all, + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip), + TP_ARGS(trans_ip, caller_ip) +); + +DEFINE_EVENT(transaction_restart, trans_restart_mark_replicas, + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip), + TP_ARGS(trans_ip, caller_ip) +); + +DECLARE_EVENT_CLASS(transaction_restart_iter, + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip, + enum btree_id btree_id, + struct bpos *pos), + TP_ARGS(trans_ip, caller_ip, btree_id, pos), + + TP_STRUCT__entry( + __field(unsigned long, trans_ip ) + __field(unsigned long, caller_ip ) + __field(u8, btree_id ) + __field(u64, pos_inode ) + __field(u64, pos_offset ) + __field(u32, pos_snapshot ) + ), + + TP_fast_assign( + __entry->trans_ip = trans_ip; + __entry->caller_ip = caller_ip; + __entry->btree_id = btree_id; + __entry->pos_inode = pos->inode; + __entry->pos_offset = pos->offset; + __entry->pos_snapshot = pos->snapshot; + ), + + TP_printk("%ps %pS btree %u pos %llu:%llu:%u", + (void *) __entry->trans_ip, + (void *) __entry->caller_ip, + __entry->btree_id, + __entry->pos_inode, + __entry->pos_offset, + __entry->pos_snapshot) +); + +DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_reused, + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip, + enum btree_id btree_id, + struct bpos *pos), + TP_ARGS(trans_ip, caller_ip, btree_id, pos) +); + +DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_split, + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip, + enum btree_id btree_id, + struct bpos *pos), + TP_ARGS(trans_ip, caller_ip, btree_id, pos) +); + +DEFINE_EVENT(transaction_restart_iter, trans_restart_mark, + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip, + enum btree_id btree_id, + struct bpos *pos), + TP_ARGS(trans_ip, caller_ip, btree_id, pos) +); + +DEFINE_EVENT(transaction_restart_iter, trans_restart_upgrade, + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip, + enum btree_id btree_id, + struct bpos *pos), + TP_ARGS(trans_ip, caller_ip, btree_id, pos) +); + +DEFINE_EVENT(transaction_restart_iter, trans_restart_iter_upgrade, + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip, + enum btree_id btree_id, + struct bpos *pos), + TP_ARGS(trans_ip, caller_ip, btree_id, pos) +); + +DEFINE_EVENT(transaction_restart_iter, trans_restart_relock, + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip, + enum btree_id btree_id, + struct bpos *pos), + TP_ARGS(trans_ip, caller_ip, btree_id, pos) +); + +DEFINE_EVENT(transaction_restart_iter, trans_restart_traverse, + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip, + enum btree_id btree_id, + struct bpos *pos), + TP_ARGS(trans_ip, caller_ip, btree_id, pos) +); + +TRACE_EVENT(iter_traverse, + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip, + bool key_cache, + enum btree_id btree_id, + struct bpos *pos, + int ret), + TP_ARGS(trans_ip, caller_ip, key_cache, btree_id, pos, ret), + + TP_STRUCT__entry( + __field(unsigned long, trans_ip ) + __field(unsigned long, caller_ip ) + __field(u8, key_cache ) + __field(u8, btree_id ) + __field(u64, pos_inode ) + __field(u64, pos_offset ) + __field(u32, pos_snapshot ) + __field(s32, ret ) + ), + + TP_fast_assign( + __entry->trans_ip = trans_ip; + __entry->caller_ip = caller_ip; + __entry->key_cache = key_cache; + __entry->btree_id = btree_id; + __entry->pos_inode = pos->inode; + __entry->pos_offset = pos->offset; + __entry->pos_snapshot = pos->snapshot; + __entry->ret = ret; + ), + + TP_printk("%ps %pS key cache %u btree %u %llu:%llu:%u ret %i", + (void *) __entry->trans_ip, + (void *) __entry->caller_ip, + __entry->key_cache, + __entry->btree_id, + __entry->pos_inode, + __entry->pos_offset, + __entry->pos_snapshot, + __entry->ret) +); + +TRACE_EVENT(iter_set_search_pos, + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip, + enum btree_id btree_id, + struct bpos *old_pos, + struct bpos *new_pos, + unsigned good_level), + TP_ARGS(trans_ip, caller_ip, btree_id, old_pos, new_pos, good_level), + + TP_STRUCT__entry( + __field(unsigned long, trans_ip ) + __field(unsigned long, caller_ip ) + __field(u8, btree_id ) + __field(u64, old_pos_inode ) + __field(u64, old_pos_offset ) + __field(u32, old_pos_snapshot ) + __field(u64, new_pos_inode ) + __field(u64, new_pos_offset ) + __field(u32, new_pos_snapshot ) + __field(u8, good_level ) + ), + + TP_fast_assign( + __entry->trans_ip = trans_ip; + __entry->caller_ip = caller_ip; + __entry->btree_id = btree_id; + __entry->old_pos_inode = old_pos->inode; + __entry->old_pos_offset = old_pos->offset; + __entry->old_pos_snapshot = old_pos->snapshot; + __entry->new_pos_inode = new_pos->inode; + __entry->new_pos_offset = new_pos->offset; + __entry->new_pos_snapshot = new_pos->snapshot; + __entry->good_level = good_level; + ), + + TP_printk("%ps %pS btree %u old pos %llu:%llu:%u new pos %llu:%llu:%u l %u", + (void *) __entry->trans_ip, + (void *) __entry->caller_ip, + __entry->btree_id, + __entry->old_pos_inode, + __entry->old_pos_offset, + __entry->old_pos_snapshot, + __entry->new_pos_inode, + __entry->new_pos_offset, + __entry->new_pos_snapshot, + __entry->good_level) ); TRACE_EVENT(trans_restart_would_deadlock, TP_PROTO(unsigned long trans_ip, unsigned long caller_ip, + bool in_traverse_all, unsigned reason, enum btree_id have_btree_id, unsigned have_iter_type, + struct bpos *have_pos, enum btree_id want_btree_id, - unsigned want_iter_type), - TP_ARGS(trans_ip, caller_ip, reason, - have_btree_id, have_iter_type, - want_btree_id, want_iter_type), + unsigned want_iter_type, + struct bpos *want_pos), + TP_ARGS(trans_ip, caller_ip, in_traverse_all, reason, + have_btree_id, have_iter_type, have_pos, + want_btree_id, want_iter_type, want_pos), TP_STRUCT__entry( __field(unsigned long, trans_ip ) __field(unsigned long, caller_ip ) + __field(u8, in_traverse_all ) __field(u8, reason ) __field(u8, have_btree_id ) __field(u8, have_iter_type ) __field(u8, want_btree_id ) __field(u8, want_iter_type ) + + __field(u64, have_pos_inode ) + __field(u64, have_pos_offset ) + __field(u32, have_pos_snapshot) + __field(u32, want_pos_snapshot) + __field(u64, want_pos_inode ) + __field(u64, want_pos_offset ) ), TP_fast_assign( __entry->trans_ip = trans_ip; __entry->caller_ip = caller_ip; + __entry->in_traverse_all = in_traverse_all; __entry->reason = reason; __entry->have_btree_id = have_btree_id; __entry->have_iter_type = have_iter_type; __entry->want_btree_id = want_btree_id; __entry->want_iter_type = want_iter_type; + + __entry->have_pos_inode = have_pos->inode; + __entry->have_pos_offset = have_pos->offset; + __entry->have_pos_snapshot = have_pos->snapshot; + + __entry->want_pos_inode = want_pos->inode; + __entry->want_pos_offset = want_pos->offset; + __entry->want_pos_snapshot = want_pos->snapshot; ), - TP_printk("%ps %pS because %u have %u:%u want %u:%u", + TP_printk("%ps %pS traverse_all %u because %u have %u:%u %llu:%llu:%u want %u:%u %llu:%llu:%u", (void *) __entry->trans_ip, (void *) __entry->caller_ip, + __entry->in_traverse_all, __entry->reason, __entry->have_btree_id, __entry->have_iter_type, + __entry->have_pos_inode, + __entry->have_pos_offset, + __entry->have_pos_snapshot, __entry->want_btree_id, - __entry->want_iter_type) -); - -TRACE_EVENT(trans_restart_iters_realloced, - TP_PROTO(unsigned long ip, unsigned nr), - TP_ARGS(ip, nr), - - TP_STRUCT__entry( - __field(unsigned long, ip ) - __field(unsigned, nr ) - ), - - TP_fast_assign( - __entry->ip = ip; - __entry->nr = nr; - ), - - TP_printk("%ps nr %u", (void *) __entry->ip, __entry->nr) + __entry->want_iter_type, + __entry->want_pos_inode, + __entry->want_pos_offset, + __entry->want_pos_snapshot) ); TRACE_EVENT(trans_restart_mem_realloced, - TP_PROTO(unsigned long ip, unsigned long bytes), - TP_ARGS(ip, bytes), + TP_PROTO(unsigned long trans_ip, unsigned long caller_ip, + unsigned long bytes), + TP_ARGS(trans_ip, caller_ip, bytes), TP_STRUCT__entry( - __field(unsigned long, ip ) - __field(unsigned long, bytes ) + __field(unsigned long, trans_ip ) + __field(unsigned long, caller_ip ) + __field(unsigned long, bytes ) ), TP_fast_assign( - __entry->ip = ip; - __entry->bytes = bytes; + __entry->trans_ip = trans_ip; + __entry->caller_ip = caller_ip; + __entry->bytes = bytes; ), - TP_printk("%ps bytes %lu", (void *) __entry->ip, __entry->bytes) -); - -DEFINE_EVENT(transaction_restart, trans_restart_journal_res_get, - TP_PROTO(unsigned long ip), - TP_ARGS(ip) -); - -DEFINE_EVENT(transaction_restart, trans_restart_journal_preres_get, - TP_PROTO(unsigned long ip), - TP_ARGS(ip) -); - -DEFINE_EVENT(transaction_restart, trans_restart_journal_reclaim, - TP_PROTO(unsigned long ip), - TP_ARGS(ip) -); - -DEFINE_EVENT(transaction_restart, trans_restart_mark_replicas, - TP_PROTO(unsigned long ip), - TP_ARGS(ip) -); - -DEFINE_EVENT(transaction_restart, trans_restart_fault_inject, - TP_PROTO(unsigned long ip), - TP_ARGS(ip) -); - -DEFINE_EVENT(transaction_restart, trans_restart_btree_node_split, - TP_PROTO(unsigned long ip), - TP_ARGS(ip) -); - -DEFINE_EVENT(transaction_restart, trans_restart_mark, - TP_PROTO(unsigned long ip), - TP_ARGS(ip) -); - -DEFINE_EVENT(transaction_restart, trans_restart_upgrade, - TP_PROTO(unsigned long ip), - TP_ARGS(ip) -); - -DEFINE_EVENT(transaction_restart, trans_restart_iter_upgrade, - TP_PROTO(unsigned long ip), - TP_ARGS(ip) -); - -DEFINE_EVENT(transaction_restart, trans_restart_traverse, - TP_PROTO(unsigned long ip), - TP_ARGS(ip) + TP_printk("%ps %pS bytes %lu", + (void *) __entry->trans_ip, + (void *) __entry->caller_ip, + __entry->bytes) ); DECLARE_EVENT_CLASS(node_lock_fail, - TP_PROTO(unsigned level, u32 iter_seq, unsigned node, u32 node_seq), - TP_ARGS(level, iter_seq, node, node_seq), + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip, + bool key_cache, + enum btree_id btree_id, + struct bpos *pos, + unsigned level, u32 iter_seq, unsigned node, u32 node_seq), + TP_ARGS(trans_ip, caller_ip, key_cache, btree_id, pos, + level, iter_seq, node, node_seq), TP_STRUCT__entry( - __field(u32, level) - __field(u32, iter_seq) - __field(u32, node) - __field(u32, node_seq) + __field(unsigned long, trans_ip ) + __field(unsigned long, caller_ip ) + __field(u8, key_cache ) + __field(u8, btree_id ) + __field(u64, pos_inode ) + __field(u64, pos_offset ) + __field(u32, pos_snapshot ) + __field(u32, level ) + __field(u32, iter_seq ) + __field(u32, node ) + __field(u32, node_seq ) ), TP_fast_assign( - __entry->level = level; - __entry->iter_seq = iter_seq; - __entry->node = node; - __entry->node_seq = node_seq; + __entry->trans_ip = trans_ip; + __entry->caller_ip = caller_ip; + __entry->key_cache = key_cache; + __entry->btree_id = btree_id; + __entry->pos_inode = pos->inode; + __entry->pos_offset = pos->offset; + __entry->pos_snapshot = pos->snapshot; + __entry->level = level; + __entry->iter_seq = iter_seq; + __entry->node = node; + __entry->node_seq = node_seq; ), - TP_printk("level %u iter seq %u node %u node seq %u", + TP_printk("%ps %pS key cache %u btree %u pos %llu:%llu:%u level %u iter seq %u node %u node seq %u", + (void *) __entry->trans_ip, + (void *) __entry->caller_ip, + __entry->key_cache, + __entry->btree_id, + __entry->pos_inode, + __entry->pos_offset, + __entry->pos_snapshot, __entry->level, __entry->iter_seq, __entry->node, __entry->node_seq) ); DEFINE_EVENT(node_lock_fail, node_upgrade_fail, - TP_PROTO(unsigned level, u32 iter_seq, unsigned node, u32 node_seq), - TP_ARGS(level, iter_seq, node, node_seq) + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip, + bool key_cache, + enum btree_id btree_id, + struct bpos *pos, + unsigned level, u32 iter_seq, unsigned node, u32 node_seq), + TP_ARGS(trans_ip, caller_ip, key_cache, btree_id, pos, + level, iter_seq, node, node_seq) ); DEFINE_EVENT(node_lock_fail, node_relock_fail, - TP_PROTO(unsigned level, u32 iter_seq, unsigned node, u32 node_seq), - TP_ARGS(level, iter_seq, node, node_seq) + TP_PROTO(unsigned long trans_ip, + unsigned long caller_ip, + bool key_cache, + enum btree_id btree_id, + struct bpos *pos, + unsigned level, u32 iter_seq, unsigned node, u32 node_seq), + TP_ARGS(trans_ip, caller_ip, key_cache, btree_id, pos, + level, iter_seq, node, node_seq) ); #endif /* _TRACE_BCACHE_H */ diff --git a/libbcachefs.c b/libbcachefs.c index e5dcfd8..34246dc 100644 --- a/libbcachefs.c +++ b/libbcachefs.c @@ -38,7 +38,7 @@ static u64 min_size(unsigned bucket_size) static void init_layout(struct bch_sb_layout *l, unsigned block_size, unsigned sb_size, - u64 start, u64 end) + u64 sb_start, u64 sb_end) { unsigned i; @@ -51,14 +51,14 @@ static void init_layout(struct bch_sb_layout *l, /* Create two superblocks in the allowed range: */ for (i = 0; i < l->nr_superblocks; i++) { - if (start != BCH_SB_SECTOR) - start = round_up(start, block_size); + if (sb_start != BCH_SB_SECTOR) + sb_start = round_up(sb_start, block_size); - l->sb_offset[i] = cpu_to_le64(start); - start += sb_size; + l->sb_offset[i] = cpu_to_le64(sb_start); + sb_start += sb_size; } - if (start >= end) + if (sb_start >= sb_end) die("insufficient space for superblocks"); } @@ -292,6 +292,21 @@ struct bch_sb *bch2_format(struct bch_opt_strs fs_opt_strs, opts.superblock_size, i->sb_offset, i->sb_end); + /* + * Also create a backup superblock at the end of the disk: + * + * If we're not creating a superblock at the default offset, it + * means we're being run from the migrate tool and we could be + * overwriting existing data if we write to the end of the disk: + */ + if (i->sb_offset == BCH_SB_SECTOR) { + struct bch_sb_layout *l = &sb.sb->layout; + u64 backup_sb = i->size - (1 << l->sb_max_size_bits); + + backup_sb = rounddown(backup_sb, i->bucket_size); + l->sb_offset[l->nr_superblocks++] = cpu_to_le64(backup_sb); + } + if (i->sb_offset == BCH_SB_SECTOR) { /* Zero start of disk */ static const char zeroes[BCH_SB_SECTOR << 9]; @@ -1006,6 +1021,16 @@ int bchu_data(struct bchfs_handle fs, struct bch_ioctl_data cmd) /* option parsing */ +void bch2_opt_strs_free(struct bch_opt_strs *opts) +{ + unsigned i; + + for (i = 0; i < bch2_opts_nr; i++) { + free(opts->by_id[i]); + opts->by_id[i] = NULL; + } +} + struct bch_opt_strs bch2_cmdline_opts_get(int *argc, char *argv[], unsigned opt_types) { @@ -1038,9 +1063,8 @@ struct bch_opt_strs bch2_cmdline_opts_get(int *argc, char *argv[], optid = bch2_opt_lookup(optstr); if (optid < 0 || !(bch2_opt_table[optid].mode & opt_types)) { - free(optstr); i++; - continue; + goto next; } if (!valstr && @@ -1052,13 +1076,15 @@ struct bch_opt_strs bch2_cmdline_opts_get(int *argc, char *argv[], if (!valstr) valstr = "1"; - opts.by_id[optid] = valstr; + opts.by_id[optid] = strdup(valstr); *argc -= nr_args; memmove(&argv[i], &argv[i + nr_args], sizeof(char *) * (*argc - i)); argv[*argc] = NULL; +next: + free(optstr); } return opts; @@ -1089,22 +1115,17 @@ struct bch_opts bch2_parse_opts(struct bch_opt_strs strs) return opts; } +#define newline(c) \ + do { \ + printf("\n"); \ + c = 0; \ + } while(0) void bch2_opts_usage(unsigned opt_types) { const struct bch_option *opt; unsigned i, c = 0, helpcol = 30; - void tabalign() { - while (c < helpcol) { - putchar(' '); - c++; - } - } - void newline() { - printf("\n"); - c = 0; - } for (opt = bch2_opt_table; opt < bch2_opt_table + bch2_opts_nr; @@ -1135,21 +1156,24 @@ void bch2_opts_usage(unsigned opt_types) const char *l = opt->help; if (c >= helpcol) - newline(); + newline(c); while (1) { const char *n = strchrnul(l, '\n'); - tabalign(); + while (c < helpcol) { + putchar(' '); + c++; + } printf("%.*s", (int) (n - l), l); - newline(); + newline(c); if (!*n) break; l = n + 1; } } else { - newline(); + newline(c); } } } diff --git a/libbcachefs.h b/libbcachefs.h index 45d2f87..7cdbf69 100644 --- a/libbcachefs.h +++ b/libbcachefs.h @@ -25,6 +25,7 @@ struct { }; }; +void bch2_opt_strs_free(struct bch_opt_strs *); struct bch_opt_strs bch2_cmdline_opts_get(int *, char *[], unsigned); struct bch_opts bch2_parse_opts(struct bch_opt_strs); void bch2_opts_usage(unsigned); diff --git a/libbcachefs/acl.c b/libbcachefs/acl.c index 0f2d743..eb907e5 100644 --- a/libbcachefs/acl.c +++ b/libbcachefs/acl.c @@ -221,6 +221,8 @@ struct posix_acl *bch2_get_acl(struct inode *vinode, int type) struct btree_iter *iter; struct bkey_s_c_xattr xattr; struct posix_acl *acl = NULL; + struct bkey_s_c k; + int ret; bch2_trans_init(&trans, c, 0, 0); retry: @@ -239,7 +241,14 @@ retry: goto out; } - xattr = bkey_s_c_to_xattr(bch2_btree_iter_peek_slot(iter)); + k = bch2_btree_iter_peek_slot(iter); + ret = bkey_err(k); + if (ret) { + acl = ERR_PTR(ret); + goto out; + } + + xattr = bkey_s_c_to_xattr(k); acl = bch2_acl_from_disk(xattr_val(xattr.v), le16_to_cpu(xattr.v->x_val_len)); @@ -281,7 +290,8 @@ int bch2_set_acl_trans(struct btree_trans *trans, return ret == -ENOENT ? 0 : ret; } -int bch2_set_acl(struct inode *vinode, struct posix_acl *_acl, int type) +int bch2_set_acl(struct user_namespace *mnt_userns, + struct inode *vinode, struct posix_acl *_acl, int type) { struct bch_inode_info *inode = to_bch_ei(vinode); struct bch_fs *c = inode->v.i_sb->s_fs_info; @@ -308,7 +318,7 @@ retry: mode = inode_u.bi_mode; if (type == ACL_TYPE_ACCESS) { - ret = posix_acl_update_mode(&inode->v, &mode, &acl); + ret = posix_acl_update_mode(mnt_userns, &inode->v, &mode, &acl); if (ret) goto btree_err; } @@ -324,8 +334,7 @@ retry: ret = bch2_inode_write(&trans, inode_iter, &inode_u) ?: bch2_trans_commit(&trans, NULL, - &inode->ei_journal_seq, - BTREE_INSERT_NOUNLOCK); + &inode->ei_journal_seq, 0); btree_err: bch2_trans_iter_put(&trans, inode_iter); @@ -355,6 +364,7 @@ int bch2_acl_chmod(struct btree_trans *trans, struct bkey_s_c_xattr xattr; struct bkey_i_xattr *new; struct posix_acl *acl; + struct bkey_s_c k; int ret; iter = bch2_hash_lookup(trans, bch2_xattr_hash_desc, @@ -365,11 +375,15 @@ int bch2_acl_chmod(struct btree_trans *trans, if (ret) return ret == -ENOENT ? 0 : ret; - xattr = bkey_s_c_to_xattr(bch2_btree_iter_peek_slot(iter)); + k = bch2_btree_iter_peek_slot(iter); + xattr = bkey_s_c_to_xattr(k); + if (ret) + goto err; + acl = bch2_acl_from_disk(xattr_val(xattr.v), le16_to_cpu(xattr.v->x_val_len)); ret = PTR_ERR_OR_ZERO(acl); - if (ret || !acl) + if (IS_ERR_OR_NULL(acl)) goto err; ret = __posix_acl_chmod(&acl, GFP_KERNEL, mode); @@ -383,12 +397,13 @@ int bch2_acl_chmod(struct btree_trans *trans, } new->k.p = iter->pos; - bch2_trans_update(trans, iter, &new->k_i, 0); + ret = bch2_trans_update(trans, iter, &new->k_i, 0); *new_acl = acl; acl = NULL; err: bch2_trans_iter_put(trans, iter); - kfree(acl); + if (!IS_ERR_OR_NULL(acl)) + kfree(acl); return ret; } diff --git a/libbcachefs/acl.h b/libbcachefs/acl.h index ba210c2..25fc54d 100644 --- a/libbcachefs/acl.h +++ b/libbcachefs/acl.h @@ -32,7 +32,7 @@ int bch2_set_acl_trans(struct btree_trans *, struct bch_inode_unpacked *, const struct bch_hash_info *, struct posix_acl *, int); -int bch2_set_acl(struct inode *, struct posix_acl *, int); +int bch2_set_acl(struct user_namespace *, struct inode *, struct posix_acl *, int); int bch2_acl_chmod(struct btree_trans *, struct bch_inode_unpacked *, umode_t, struct posix_acl **); diff --git a/libbcachefs/alloc_background.c b/libbcachefs/alloc_background.c index 48971fc..886861a 100644 --- a/libbcachefs/alloc_background.c +++ b/libbcachefs/alloc_background.c @@ -25,44 +25,19 @@ #include #include +const char * const bch2_allocator_states[] = { +#define x(n) #n, + ALLOC_THREAD_STATES() +#undef x + NULL +}; + static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = { #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8, BCH_ALLOC_FIELDS_V1() #undef x }; -/* Ratelimiting/PD controllers */ - -static void pd_controllers_update(struct work_struct *work) -{ - struct bch_fs *c = container_of(to_delayed_work(work), - struct bch_fs, - pd_controllers_update); - struct bch_dev *ca; - s64 free = 0, fragmented = 0; - unsigned i; - - for_each_member_device(ca, c, i) { - struct bch_dev_usage stats = bch2_dev_usage_read(ca); - - free += bucket_to_sector(ca, - __dev_buckets_free(ca, stats)) << 9; - /* - * Bytes of internal fragmentation, which can be - * reclaimed by copy GC - */ - fragmented += max_t(s64, 0, (bucket_to_sector(ca, - stats.d[BCH_DATA_user].buckets + - stats.d[BCH_DATA_cached].buckets) - - (stats.d[BCH_DATA_user].sectors + - stats.d[BCH_DATA_cached].sectors)) << 9); - } - - bch2_pd_controller_update(&c->copygc_pd, free, fragmented, -1); - schedule_delayed_work(&c->pd_controllers_update, - c->pd_controllers_update_seconds * HZ); -} - /* Persistent alloc info: */ static inline u64 alloc_field_v1_get(const struct bch_alloc *a, @@ -155,7 +130,7 @@ static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out, #define x(_name, _bits) \ if (fieldnr < a.v->nr_fields) { \ - ret = bch2_varint_decode(in, end, &v); \ + ret = bch2_varint_decode_fast(in, end, &v); \ if (ret < 0) \ return ret; \ in += ret; \ @@ -191,7 +166,7 @@ static void bch2_alloc_pack_v2(struct bkey_alloc_buf *dst, nr_fields++; \ \ if (src._name) { \ - out += bch2_varint_encode(out, src._name); \ + out += bch2_varint_encode_fast(out, src._name); \ \ last_nonzero_field = out; \ last_nonzero_fieldnr = nr_fields; \ @@ -234,7 +209,7 @@ void bch2_alloc_pack(struct bch_fs *c, bch2_alloc_pack_v2(dst, src); } -static unsigned bch_alloc_val_u64s(const struct bch_alloc *a) +static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a) { unsigned i, bytes = offsetof(struct bch_alloc, data); @@ -254,7 +229,7 @@ const char *bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k) return "invalid device"; /* allow for unknown fields */ - if (bkey_val_u64s(a.k) < bch_alloc_val_u64s(a.v)) + if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v)) return "incorrect value size"; return NULL; @@ -279,23 +254,21 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, { struct bkey_alloc_unpacked u = bch2_alloc_unpack(k); - pr_buf(out, "gen %u oldest_gen %u data_type %u", - u.gen, u.oldest_gen, u.data_type); -#define x(_name, ...) pr_buf(out, #_name " %llu ", (u64) u._name); + pr_buf(out, "gen %u oldest_gen %u data_type %s", + u.gen, u.oldest_gen, bch2_data_types[u.data_type]); +#define x(_name, ...) pr_buf(out, " " #_name " %llu", (u64) u._name); BCH_ALLOC_FIELDS_V2() #undef x } -static int bch2_alloc_read_fn(struct bch_fs *c, enum btree_id id, - unsigned level, struct bkey_s_c k) +static int bch2_alloc_read_fn(struct bch_fs *c, struct bkey_s_c k) { struct bch_dev *ca; struct bucket *g; struct bkey_alloc_unpacked u; - if (level || - (k.k->type != KEY_TYPE_alloc && - k.k->type != KEY_TYPE_alloc_v2)) + if (k.k->type != KEY_TYPE_alloc && + k.k->type != KEY_TYPE_alloc_v2) return 0; ca = bch_dev_bkey_exists(c, k.k->p.inode); @@ -314,15 +287,13 @@ static int bch2_alloc_read_fn(struct bch_fs *c, enum btree_id id, return 0; } -int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys) +int bch2_alloc_read(struct bch_fs *c) { int ret; down_read(&c->gc_lock); - ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_alloc, - NULL, bch2_alloc_read_fn); + ret = bch2_btree_and_journal_walk(c, BTREE_ID_alloc, bch2_alloc_read_fn); up_read(&c->gc_lock); - if (ret) { bch_err(c, "error reading alloc info: %i", ret); return ret; @@ -369,9 +340,9 @@ retry: return 0; bch2_alloc_pack(c, &a, new_u); - bch2_trans_update(trans, iter, &a.k, - BTREE_TRIGGER_NORUN); - ret = bch2_trans_commit(trans, NULL, NULL, + ret = bch2_trans_update(trans, iter, &a.k, + BTREE_TRIGGER_NORUN) ?: + bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL|flags); err: if (ret == -EINTR) @@ -400,10 +371,10 @@ int bch2_alloc_write(struct bch_fs *c, unsigned flags) ret = bch2_alloc_write_key(&trans, iter, flags); if (ret) { - percpu_ref_put(&ca->io_ref); + percpu_ref_put(&ca->ref); goto err; } - bch2_btree_iter_next_slot(iter); + bch2_btree_iter_advance(iter); } } err: @@ -467,60 +438,6 @@ out: * commands to the newly free buckets, then puts them on the various freelists. */ -/** - * wait_buckets_available - wait on reclaimable buckets - * - * If there aren't enough available buckets to fill up free_inc, wait until - * there are. - */ -static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca) -{ - unsigned long gc_count = c->gc_count; - s64 available; - unsigned i; - int ret = 0; - - ca->allocator_state = ALLOCATOR_BLOCKED; - closure_wake_up(&c->freelist_wait); - - while (1) { - set_current_state(TASK_INTERRUPTIBLE); - if (kthread_should_stop()) { - ret = 1; - break; - } - - if (gc_count != c->gc_count) - ca->inc_gen_really_needs_gc = 0; - - available = dev_buckets_available(ca); - available -= ca->inc_gen_really_needs_gc; - - spin_lock(&c->freelist_lock); - for (i = 0; i < RESERVE_NR; i++) - available -= fifo_used(&ca->free[i]); - spin_unlock(&c->freelist_lock); - - available = max(available, 0LL); - - if (available > fifo_free(&ca->free_inc) || - (available && - !fifo_full(&ca->free[RESERVE_MOVINGGC]))) - break; - - up_read(&c->gc_lock); - schedule(); - try_to_freeze(); - down_read(&c->gc_lock); - } - - __set_current_state(TASK_RUNNING); - ca->allocator_state = ALLOCATOR_RUNNING; - closure_wake_up(&c->freelist_wait); - - return ret; -} - static bool bch2_can_invalidate_bucket(struct bch_dev *ca, size_t b, struct bucket_mark m) { @@ -538,11 +455,8 @@ static bool bch2_can_invalidate_bucket(struct bch_dev *ca, size_t b, gc_gen = bucket_gc_gen(bucket(ca, b)); - if (gc_gen >= BUCKET_GC_GEN_MAX / 2) - ca->inc_gen_needs_gc++; - - if (gc_gen >= BUCKET_GC_GEN_MAX) - ca->inc_gen_really_needs_gc++; + ca->inc_gen_needs_gc += gc_gen >= BUCKET_GC_GEN_MAX / 2; + ca->inc_gen_really_needs_gc += gc_gen >= BUCKET_GC_GEN_MAX; return gc_gen < BUCKET_GC_GEN_MAX; } @@ -619,6 +533,8 @@ static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca) struct bucket_mark m = READ_ONCE(g->mark); unsigned key = bucket_sort_key(g, m, now, last_seq_ondisk); + cond_resched(); + if (!bch2_can_invalidate_bucket(ca, b, m)) continue; @@ -635,8 +551,6 @@ static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca) .key = key, }; } - - cond_resched(); } if (e.nr) @@ -729,6 +643,7 @@ static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca) size_t i, nr = 0; ca->inc_gen_needs_gc = 0; + ca->inc_gen_really_needs_gc = 0; switch (ca->mi.replacement) { case BCH_CACHE_REPLACEMENT_lru: @@ -750,25 +665,6 @@ static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca) return nr; } -static inline long next_alloc_bucket(struct bch_dev *ca) -{ - struct alloc_heap_entry e, *top = ca->alloc_heap.data; - - while (ca->alloc_heap.used) { - if (top->nr) { - size_t b = top->bucket; - - top->bucket++; - top->nr--; - return b; - } - - heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL); - } - - return -1; -} - /* * returns sequence number of most recent journal entry that updated this * bucket: @@ -791,17 +687,57 @@ static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m) } } -static int bch2_invalidate_one_bucket2(struct btree_trans *trans, - struct bch_dev *ca, - struct btree_iter *iter, - u64 *journal_seq, unsigned flags) +static int bucket_invalidate_btree(struct btree_trans *trans, + struct bch_dev *ca, u64 b) { struct bch_fs *c = trans->c; - struct bkey_alloc_buf a; + struct bkey_alloc_buf *a; struct bkey_alloc_unpacked u; struct bucket *g; struct bucket_mark m; - bool invalidating_cached_data; + struct btree_iter *iter = + bch2_trans_get_iter(trans, BTREE_ID_alloc, + POS(ca->dev_idx, b), + BTREE_ITER_CACHED| + BTREE_ITER_CACHED_NOFILL| + BTREE_ITER_INTENT); + int ret; + + a = bch2_trans_kmalloc(trans, sizeof(*a)); + ret = PTR_ERR_OR_ZERO(a); + if (ret) + goto err; + + ret = bch2_btree_iter_traverse(iter); + if (ret) + goto err; + + percpu_down_read(&c->mark_lock); + g = bucket(ca, b); + m = READ_ONCE(g->mark); + u = alloc_mem_to_key(iter, g, m); + percpu_up_read(&c->mark_lock); + + u.gen++; + u.data_type = 0; + u.dirty_sectors = 0; + u.cached_sectors = 0; + u.read_time = atomic64_read(&c->io_clock[READ].now); + u.write_time = atomic64_read(&c->io_clock[WRITE].now); + + bch2_alloc_pack(c, a, u); + ret = bch2_trans_update(trans, iter, &a->k, + BTREE_TRIGGER_BUCKET_INVALIDATE); +err: + bch2_trans_iter_put(trans, iter); + return ret; +} + +static int bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca, + u64 *journal_seq, unsigned flags) +{ + struct bucket *g; + struct bucket_mark m; size_t b; int ret = 0; @@ -816,7 +752,7 @@ static int bch2_invalidate_one_bucket2(struct btree_trans *trans, BUG_ON(m.dirty_sectors); - bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0); + bch2_mark_alloc_bucket(c, ca, b, true); spin_lock(&c->freelist_lock); verify_not_on_freelist(c, ca, b); @@ -847,48 +783,12 @@ static int bch2_invalidate_one_bucket2(struct btree_trans *trans, goto out; } - bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b)); -retry: - ret = bch2_btree_iter_traverse(iter); - if (ret) - return ret; - - percpu_down_read(&c->mark_lock); - g = bucket(ca, iter->pos.offset); - m = READ_ONCE(g->mark); - u = alloc_mem_to_key(iter, g, m); - - percpu_up_read(&c->mark_lock); - - invalidating_cached_data = u.cached_sectors != 0; - - u.gen++; - u.data_type = 0; - u.dirty_sectors = 0; - u.cached_sectors = 0; - u.read_time = atomic64_read(&c->io_clock[READ].now); - u.write_time = atomic64_read(&c->io_clock[WRITE].now); - - bch2_alloc_pack(c, &a, u); - bch2_trans_update(trans, iter, &a.k, - BTREE_TRIGGER_BUCKET_INVALIDATE); - - /* - * XXX: - * when using deferred btree updates, we have journal reclaim doing - * btree updates and thus requiring the allocator to make forward - * progress, and here the allocator is requiring space in the journal - - * so we need a journal pre-reservation: - */ - ret = bch2_trans_commit(trans, NULL, - invalidating_cached_data ? journal_seq : NULL, - BTREE_INSERT_NOUNLOCK| - BTREE_INSERT_NOCHECK_RW| - BTREE_INSERT_NOFAIL| - BTREE_INSERT_JOURNAL_RESERVED| - flags); - if (ret == -EINTR) - goto retry; + ret = bch2_trans_do(c, NULL, journal_seq, + BTREE_INSERT_NOCHECK_RW| + BTREE_INSERT_NOFAIL| + BTREE_INSERT_JOURNAL_RESERVED| + flags, + bucket_invalidate_btree(&trans, ca, b)); out: if (!ret) { /* remove from alloc_heap: */ @@ -913,8 +813,7 @@ out: percpu_down_read(&c->mark_lock); spin_lock(&c->freelist_lock); - bch2_mark_alloc_bucket(c, ca, b, false, - gc_pos_alloc(c, NULL), 0); + bch2_mark_alloc_bucket(c, ca, b, false); BUG_ON(!fifo_pop_back(&ca->free_inc, b2)); BUG_ON(b != b2); @@ -931,29 +830,28 @@ out: */ static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca) { - struct btree_trans trans; - struct btree_iter *iter; u64 journal_seq = 0; int ret = 0; - bch2_trans_init(&trans, c, 0, 0); - iter = bch2_trans_get_iter(&trans, BTREE_ID_alloc, - POS(ca->dev_idx, 0), - BTREE_ITER_CACHED| - BTREE_ITER_CACHED_NOFILL| - BTREE_ITER_INTENT); - /* Only use nowait if we've already invalidated at least one bucket: */ while (!ret && !fifo_full(&ca->free_inc) && - ca->alloc_heap.used) - ret = bch2_invalidate_one_bucket2(&trans, ca, iter, &journal_seq, - BTREE_INSERT_GC_LOCK_HELD| + ca->alloc_heap.used) { + if (kthread_should_stop()) { + ret = 1; + break; + } + + ret = bch2_invalidate_one_bucket(c, ca, &journal_seq, (!fifo_empty(&ca->free_inc) ? BTREE_INSERT_NOWAIT : 0)); - - bch2_trans_iter_put(&trans, iter); - bch2_trans_exit(&trans); + /* + * We only want to batch up invalidates when they're going to + * require flushing the journal: + */ + if (!journal_seq) + break; + } /* If we used NOWAIT, don't return the error: */ if (!fifo_empty(&ca->free_inc)) @@ -973,83 +871,72 @@ static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca) return 0; } -static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket) +static void alloc_thread_set_state(struct bch_dev *ca, unsigned new_state) +{ + if (ca->allocator_state != new_state) { + ca->allocator_state = new_state; + closure_wake_up(&ca->fs->freelist_wait); + } +} + +static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b) { unsigned i; int ret = 0; - while (1) { - set_current_state(TASK_INTERRUPTIBLE); - - spin_lock(&c->freelist_lock); - for (i = 0; i < RESERVE_NR; i++) { - - /* - * Don't strand buckets on the copygc freelist until - * after recovery is finished: - */ - if (!test_bit(BCH_FS_STARTED, &c->flags) && - i == RESERVE_MOVINGGC) - continue; - - if (fifo_push(&ca->free[i], bucket)) { - fifo_pop(&ca->free_inc, bucket); - - closure_wake_up(&c->freelist_wait); - ca->allocator_state = ALLOCATOR_RUNNING; - - spin_unlock(&c->freelist_lock); - goto out; - } - } - - if (ca->allocator_state != ALLOCATOR_BLOCKED_FULL) { - ca->allocator_state = ALLOCATOR_BLOCKED_FULL; - closure_wake_up(&c->freelist_wait); - } - - spin_unlock(&c->freelist_lock); + spin_lock(&c->freelist_lock); + for (i = 0; i < RESERVE_NR; i++) { + /* + * Don't strand buckets on the copygc freelist until + * after recovery is finished: + */ + if (i == RESERVE_MOVINGGC && + !test_bit(BCH_FS_STARTED, &c->flags)) + continue; - if ((current->flags & PF_KTHREAD) && - kthread_should_stop()) { + if (fifo_push(&ca->free[i], b)) { + fifo_pop(&ca->free_inc, b); ret = 1; break; } - - schedule(); - try_to_freeze(); } -out: - __set_current_state(TASK_RUNNING); + spin_unlock(&c->freelist_lock); + + ca->allocator_state = ret + ? ALLOCATOR_running + : ALLOCATOR_blocked_full; + closure_wake_up(&c->freelist_wait); return ret; } -/* - * Pulls buckets off free_inc, discards them (if enabled), then adds them to - * freelists, waiting until there's room if necessary: - */ -static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca) +static void discard_one_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b) { - while (!fifo_empty(&ca->free_inc)) { - size_t bucket = fifo_peek(&ca->free_inc); - - if (ca->mi.discard && - blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev))) - blkdev_issue_discard(ca->disk_sb.bdev, - bucket_to_sector(ca, bucket), - ca->mi.bucket_size, GFP_NOIO, 0); - - if (push_invalidated_bucket(c, ca, bucket)) - return 1; - } + if (ca->mi.discard && + blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev))) + blkdev_issue_discard(ca->disk_sb.bdev, bucket_to_sector(ca, b), + ca->mi.bucket_size, GFP_NOFS, 0); +} - return 0; +static bool allocator_thread_running(struct bch_dev *ca) +{ + unsigned state = ca->mi.state == BCH_MEMBER_STATE_rw && + test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags) + ? ALLOCATOR_running + : ALLOCATOR_stopped; + alloc_thread_set_state(ca, state); + return state == ALLOCATOR_running; } -static inline bool allocator_thread_running(struct bch_dev *ca) +static int buckets_available(struct bch_dev *ca, unsigned long gc_count) { - return ca->mi.state == BCH_MEMBER_STATE_rw && - test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags); + s64 available = dev_buckets_reclaimable(ca) - + (gc_count == ca->fs->gc_count ? ca->inc_gen_really_needs_gc : 0); + bool ret = available > 0; + + alloc_thread_set_state(ca, ret + ? ALLOCATOR_running + : ALLOCATOR_blocked); + return ret; } /** @@ -1064,61 +951,29 @@ static int bch2_allocator_thread(void *arg) { struct bch_dev *ca = arg; struct bch_fs *c = ca->fs; + unsigned long gc_count = c->gc_count; size_t nr; int ret; set_freezable(); while (1) { - if (!allocator_thread_running(ca)) { - ca->allocator_state = ALLOCATOR_STOPPED; - if (kthread_wait_freezable(allocator_thread_running(ca))) - break; - } - - ca->allocator_state = ALLOCATOR_RUNNING; - - cond_resched(); - if (kthread_should_stop()) - break; - - pr_debug("discarding %zu invalidated buckets", - fifo_used(&ca->free_inc)); - - ret = discard_invalidated_buckets(c, ca); + ret = kthread_wait_freezable(allocator_thread_running(ca)); if (ret) goto stop; - down_read(&c->gc_lock); - - ret = bch2_invalidate_buckets(c, ca); - if (ret) { - up_read(&c->gc_lock); - goto stop; - } - - if (!fifo_empty(&ca->free_inc)) { - up_read(&c->gc_lock); - continue; - } - - pr_debug("free_inc now empty"); + while (!ca->alloc_heap.used) { + cond_resched(); - do { - /* - * Find some buckets that we can invalidate, either - * they're completely unused, or only contain clean data - * that's been written back to the backing device or - * another cache tier - */ - - pr_debug("scanning for reclaimable buckets"); + ret = kthread_wait_freezable(buckets_available(ca, gc_count)); + if (ret) + goto stop; + gc_count = c->gc_count; nr = find_reclaimable_buckets(c, ca); - pr_debug("found %zu buckets", nr); - - trace_alloc_batch(ca, nr, ca->alloc_heap.size); + trace_alloc_scan(ca, nr, ca->inc_gen_needs_gc, + ca->inc_gen_really_needs_gc); if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) || ca->inc_gen_really_needs_gc) && @@ -1126,38 +981,24 @@ static int bch2_allocator_thread(void *arg) atomic_inc(&c->kick_gc); wake_up_process(c->gc_thread); } + } - /* - * If we found any buckets, we have to invalidate them - * before we scan for more - but if we didn't find very - * many we may want to wait on more buckets being - * available so we don't spin: - */ - if (!nr || - (nr < ALLOC_SCAN_BATCH(ca) && - !fifo_empty(&ca->free[RESERVE_NONE]))) { - ret = wait_buckets_available(c, ca); - if (ret) { - up_read(&c->gc_lock); - goto stop; - } - } - } while (!nr); + ret = bch2_invalidate_buckets(c, ca); + if (ret) + goto stop; - up_read(&c->gc_lock); + while (!fifo_empty(&ca->free_inc)) { + u64 b = fifo_peek(&ca->free_inc); - pr_debug("%zu buckets to invalidate", nr); + discard_one_bucket(c, ca, b); - /* - * alloc_heap is now full of newly-invalidated buckets: next, - * write out the new bucket gens: - */ + ret = kthread_wait_freezable(push_invalidated_bucket(c, ca, b)); + if (ret) + goto stop; + } } - stop: - pr_debug("alloc thread stopping (ret %i)", ret); - ca->allocator_state = ALLOCATOR_STOPPED; - closure_wake_up(&c->freelist_wait); + alloc_thread_set_state(ca, ALLOCATOR_stopped); return 0; } @@ -1166,7 +1007,7 @@ stop: void bch2_recalc_capacity(struct bch_fs *c) { struct bch_dev *ca; - u64 capacity = 0, reserved_sectors = 0, gc_reserve, copygc_threshold = 0; + u64 capacity = 0, reserved_sectors = 0, gc_reserve; unsigned bucket_size_max = 0; unsigned long ra_pages = 0; unsigned i, j; @@ -1209,8 +1050,6 @@ void bch2_recalc_capacity(struct bch_fs *c) dev_reserve *= ca->mi.bucket_size; - copygc_threshold += dev_reserve; - capacity += bucket_to_sector(ca, ca->mi.nbuckets - ca->mi.first_bucket); @@ -1228,7 +1067,6 @@ void bch2_recalc_capacity(struct bch_fs *c) reserved_sectors = min(reserved_sectors, capacity); - c->copygc_threshold = copygc_threshold; c->capacity = capacity - reserved_sectors; c->bucket_size_max = bucket_size_max; @@ -1339,7 +1177,7 @@ void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca) { if (ca->alloc_thread) closure_wait_event(&c->freelist_wait, - ca->allocator_state != ALLOCATOR_RUNNING); + ca->allocator_state != ALLOCATOR_running); } /* stop allocator thread: */ @@ -1393,7 +1231,23 @@ int bch2_dev_allocator_start(struct bch_dev *ca) void bch2_fs_allocator_background_init(struct bch_fs *c) { spin_lock_init(&c->freelist_lock); +} + +void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c) +{ + struct open_bucket *ob; + + for (ob = c->open_buckets; + ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); + ob++) { + spin_lock(&ob->lock); + if (ob->valid && !ob->on_partial_list) { + pr_buf(out, "%zu ref %u type %s\n", + ob - c->open_buckets, + atomic_read(&ob->pin), + bch2_data_types[ob->type]); + } + spin_unlock(&ob->lock); + } - c->pd_controllers_update_seconds = 5; - INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update); } diff --git a/libbcachefs/alloc_background.h b/libbcachefs/alloc_background.h index 6fededc..a4f6bf5 100644 --- a/libbcachefs/alloc_background.h +++ b/libbcachefs/alloc_background.h @@ -6,6 +6,8 @@ #include "alloc_types.h" #include "debug.h" +extern const char * const bch2_allocator_states[]; + struct bkey_alloc_unpacked { u64 bucket; u8 dev; @@ -89,8 +91,7 @@ void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); .val_to_text = bch2_alloc_to_text, \ } -struct journal_keys; -int bch2_alloc_read(struct bch_fs *, struct journal_keys *); +int bch2_alloc_read(struct bch_fs *); static inline void bch2_wake_allocator(struct bch_dev *ca) { @@ -98,10 +99,8 @@ static inline void bch2_wake_allocator(struct bch_dev *ca) rcu_read_lock(); p = rcu_dereference(ca->alloc_thread); - if (p) { + if (p) wake_up_process(p); - ca->allocator_state = ALLOCATOR_RUNNING; - } rcu_read_unlock(); } @@ -133,4 +132,6 @@ int bch2_dev_allocator_start(struct bch_dev *); int bch2_alloc_write(struct bch_fs *, unsigned); void bch2_fs_allocator_background_init(struct bch_fs *); +void bch2_open_buckets_to_text(struct printbuf *, struct bch_fs *); + #endif /* _BCACHEFS_ALLOC_BACKGROUND_H */ diff --git a/libbcachefs/alloc_foreground.c b/libbcachefs/alloc_foreground.c index 8f0b94f..412fed4 100644 --- a/libbcachefs/alloc_foreground.c +++ b/libbcachefs/alloc_foreground.c @@ -1,57 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Primary bucket allocation code - * * Copyright 2012 Google, Inc. * - * Allocation in bcache is done in terms of buckets: - * - * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in - * btree pointers - they must match for the pointer to be considered valid. - * - * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a - * bucket simply by incrementing its gen. - * - * The gens (along with the priorities; it's really the gens are important but - * the code is named as if it's the priorities) are written in an arbitrary list - * of buckets on disk, with a pointer to them in the journal header. - * - * When we invalidate a bucket, we have to write its new gen to disk and wait - * for that write to complete before we use it - otherwise after a crash we - * could have pointers that appeared to be good but pointed to data that had - * been overwritten. - * - * Since the gens and priorities are all stored contiguously on disk, we can - * batch this up: We fill up the free_inc list with freshly invalidated buckets, - * call prio_write(), and when prio_write() finishes we pull buckets off the - * free_inc list and optionally discard them. - * - * free_inc isn't the only freelist - if it was, we'd often have to sleep while - * priorities and gens were being written before we could allocate. c->free is a - * smaller freelist, and buckets on that list are always ready to be used. - * - * If we've got discards enabled, that happens when a bucket moves from the - * free_inc list to the free list. - * - * It's important to ensure that gens don't wrap around - with respect to - * either the oldest gen in the btree or the gen on disk. This is quite - * difficult to do in practice, but we explicitly guard against it anyways - if - * a bucket is in danger of wrapping around we simply skip invalidating it that - * time around, and we garbage collect or rewrite the priorities sooner than we - * would have otherwise. + * Foreground allocator code: allocate buckets from freelist, and allocate in + * sector granularity from writepoints. * * bch2_bucket_alloc() allocates a single bucket from a specific device. * * bch2_bucket_alloc_set() allocates one or more buckets from different devices * in a given filesystem. - * - * invalidate_buckets() drives all the processes described above. It's called - * from bch2_bucket_alloc() and a few other places that need to make sure free - * buckets are ready. - * - * invalidate_buckets_(lru|fifo)() find buckets that are available to be - * invalidated, and then invalidate them and stick them on the free_inc list - - * in either lru or fifo order. */ #include "bcachefs.h" @@ -98,8 +55,7 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob) percpu_down_read(&c->mark_lock); spin_lock(&ob->lock); - bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), - false, gc_pos_alloc(c, ob), 0); + bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), false); ob->valid = false; ob->type = 0; @@ -109,7 +65,9 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob) spin_lock(&c->freelist_lock); ob->freelist = c->open_buckets_freelist; c->open_buckets_freelist = ob - c->open_buckets; + c->open_buckets_nr_free++; + ca->nr_open_buckets--; spin_unlock(&c->freelist_lock); closure_wake_up(&c->open_buckets_wait); @@ -316,6 +274,7 @@ out: c->blocked_allocate = 0; } + ca->nr_open_buckets++; spin_unlock(&c->freelist_lock); bch2_wake_allocator(ca); @@ -351,7 +310,7 @@ void bch2_dev_stripe_increment(struct bch_dev *ca, struct dev_stripe_state *stripe) { u64 *v = stripe->next_alloc + ca->dev_idx; - u64 free_space = dev_buckets_free(ca); + u64 free_space = dev_buckets_available(ca); u64 free_space_inv = free_space ? div64_u64(1ULL << 48, free_space) : 1ULL << 48; @@ -680,11 +639,14 @@ static struct write_point *__writepoint_find(struct hlist_head *head, { struct write_point *wp; + rcu_read_lock(); hlist_for_each_entry_rcu(wp, head, node) if (wp->write_point == write_point) - return wp; - - return NULL; + goto out; + wp = NULL; +out: + rcu_read_unlock(); + return wp; } static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor) diff --git a/libbcachefs/alloc_types.h b/libbcachefs/alloc_types.h index be164d6..4a1cd8b 100644 --- a/libbcachefs/alloc_types.h +++ b/libbcachefs/alloc_types.h @@ -10,6 +10,18 @@ struct ec_bucket_buf; +#define ALLOC_THREAD_STATES() \ + x(stopped) \ + x(running) \ + x(blocked) \ + x(blocked_full) + +enum allocator_states { +#define x(n) ALLOCATOR_##n, + ALLOC_THREAD_STATES() +#undef x +}; + enum alloc_reserve { RESERVE_BTREE_MOVINGGC = -2, RESERVE_BTREE = -1, diff --git a/libbcachefs/bcachefs.h b/libbcachefs/bcachefs.h index 549cded..051aba6 100644 --- a/libbcachefs/bcachefs.h +++ b/libbcachefs/bcachefs.h @@ -259,7 +259,14 @@ do { \ BCH_DEBUG_PARAM(btree_gc_rewrite_disabled, \ "Disables rewriting of btree nodes during mark and sweep")\ BCH_DEBUG_PARAM(btree_shrinker_disabled, \ - "Disables the shrinker callback for the btree node cache") + "Disables the shrinker callback for the btree node cache")\ + BCH_DEBUG_PARAM(verify_btree_ondisk, \ + "Reread btree nodes at various points to verify the " \ + "mergesort in the read path against modifications " \ + "done in memory") \ + BCH_DEBUG_PARAM(verify_all_btree_replicas, \ + "When reading btree nodes, read all replicas and " \ + "compare them") /* Parameters that should only be compiled in in debug mode: */ #define BCH_DEBUG_PARAMS_DEBUG() \ @@ -273,10 +280,6 @@ do { \ "information) when iterating over keys") \ BCH_DEBUG_PARAM(debug_check_btree_accounting, \ "Verify btree accounting for keys within a node") \ - BCH_DEBUG_PARAM(verify_btree_ondisk, \ - "Reread btree nodes at various points to verify the " \ - "mergesort in the read path against modifications " \ - "done in memory") \ BCH_DEBUG_PARAM(journal_seq_verify, \ "Store the journal sequence number in the version " \ "number of every btree key, and verify that btree " \ @@ -379,7 +382,6 @@ enum gc_phase { GC_PHASE_BTREE_reflink, GC_PHASE_PENDING_DELETE, - GC_PHASE_ALLOC, }; struct gc_pos { @@ -388,6 +390,14 @@ struct gc_pos { unsigned level; }; +struct reflink_gc { + u64 offset; + u32 size; + u32 refcount; +}; + +typedef GENRADIX(struct reflink_gc) reflink_gc_table; + struct io_count { u64 sectors[2][BCH_DATA_NR]; }; @@ -447,6 +457,7 @@ struct bch_dev { */ alloc_fifo free[RESERVE_NR]; alloc_fifo free_inc; + unsigned nr_open_buckets; open_bucket_idx_t open_buckets_partial[OPEN_BUCKETS_COUNT]; open_bucket_idx_t open_buckets_partial_nr; @@ -456,16 +467,7 @@ struct bch_dev { size_t inc_gen_needs_gc; size_t inc_gen_really_needs_gc; - /* - * XXX: this should be an enum for allocator state, so as to include - * error state - */ - enum { - ALLOCATOR_STOPPED, - ALLOCATOR_RUNNING, - ALLOCATOR_BLOCKED, - ALLOCATOR_BLOCKED_FULL, - } allocator_state; + enum allocator_states allocator_state; alloc_heap alloc_heap; @@ -494,10 +496,13 @@ enum { BCH_FS_ALLOCATOR_RUNNING, BCH_FS_ALLOCATOR_STOPPING, BCH_FS_INITIAL_GC_DONE, + BCH_FS_INITIAL_GC_UNFIXED, + BCH_FS_TOPOLOGY_REPAIR_DONE, BCH_FS_BTREE_INTERIOR_REPLAY_DONE, BCH_FS_FSCK_DONE, BCH_FS_STARTED, BCH_FS_RW, + BCH_FS_WAS_RW, /* shutdown: */ BCH_FS_STOPPING, @@ -506,7 +511,9 @@ enum { /* errors: */ BCH_FS_ERROR, + BCH_FS_TOPOLOGY_ERROR, BCH_FS_ERRORS_FIXED, + BCH_FS_ERRORS_NOT_FIXED, /* misc: */ BCH_FS_NEED_ANOTHER_GC, @@ -554,6 +561,8 @@ struct btree_iter_buf { struct btree_iter *iter; }; +#define REPLICAS_DELTA_LIST_MAX (1U << 16) + struct bch_fs { struct closure cl; @@ -567,6 +576,7 @@ struct bch_fs { int minor; struct device *chardev; struct super_block *vfs_sb; + dev_t dev; char name[40]; /* ro/rw, add/remove/resize devices: */ @@ -581,6 +591,7 @@ struct bch_fs { struct bch_replicas_cpu replicas; struct bch_replicas_cpu replicas_gc; struct mutex replicas_gc_lock; + mempool_t replicas_delta_pool; struct journal_entry_res btree_root_journal_res; struct journal_entry_res replicas_journal_res; @@ -607,11 +618,13 @@ struct bch_fs { u64 time_base_lo; u32 time_base_hi; - u32 time_precision; + unsigned time_units_per_sec; + unsigned nsec_per_time_unit; u64 features; u64 compat; } sb; + struct bch_sb_handle disk_sb; unsigned short block_bits; /* ilog2(block_size) */ @@ -623,6 +636,7 @@ struct bch_fs { /* BTREE CACHE */ struct bio_set btree_bio; + struct workqueue_struct *io_complete_wq; struct btree_root btree_roots[BTREE_ID_NR]; struct mutex btree_root_lock; @@ -653,20 +667,19 @@ struct bch_fs { struct mutex btree_trans_lock; struct list_head btree_trans_list; mempool_t btree_iters_pool; + mempool_t btree_trans_mem_pool; struct btree_iter_buf __percpu *btree_iters_bufs; struct srcu_struct btree_trans_barrier; struct btree_key_cache btree_key_cache; - struct workqueue_struct *wq; + struct workqueue_struct *btree_update_wq; + struct workqueue_struct *btree_io_complete_wq; /* copygc needs its own workqueue for index updates.. */ struct workqueue_struct *copygc_wq; /* ALLOCATION */ - struct delayed_work pd_controllers_update; - unsigned pd_controllers_update_seconds; - struct bch_devs_mask rw_devs[BCH_DATA_NR]; u64 capacity; /* sectors */ @@ -726,6 +739,9 @@ struct bch_fs { atomic_t kick_gc; unsigned long gc_count; + enum btree_id gc_gens_btree; + struct bpos gc_gens_pos; + /* * Tracks GC's progress - everything in the range [ZERO_KEY..gc_cur_pos] * has been marked by GC. @@ -772,9 +788,8 @@ struct bch_fs { /* COPYGC */ struct task_struct *copygc_thread; copygc_heap copygc_heap; - struct bch_pd_controller copygc_pd; struct write_point copygc_write_point; - u64 copygc_threshold; + s64 copygc_wait; /* STRIPES: */ GENRADIX(struct stripe) stripes[2]; @@ -799,6 +814,9 @@ struct bch_fs { /* REFLINK */ u64 reflink_hint; + reflink_gc_table reflink_gc_table; + size_t reflink_gc_nr; + size_t reflink_gc_idx; /* VFS IO PATH - fs-io.c */ struct bio_set writepage_bioset; @@ -808,8 +826,6 @@ struct bch_fs { atomic64_t btree_writes_nr; atomic64_t btree_writes_sectors; - struct bio_list btree_write_error_list; - struct work_struct btree_write_error_work; spinlock_t btree_write_error_lock; /* ERRORS */ @@ -823,11 +839,9 @@ struct bch_fs { /* DEBUG JUNK */ struct dentry *debug; struct btree_debug btree_debug[BTREE_ID_NR]; -#ifdef CONFIG_BCACHEFS_DEBUG struct btree *verify_data; struct btree_node *verify_ondisk; struct mutex verify_lock; -#endif u64 *unused_inode_hints; unsigned inode_shard_bits; @@ -877,19 +891,22 @@ static inline unsigned block_bytes(const struct bch_fs *c) return c->opts.block_size << 9; } -static inline struct timespec64 bch2_time_to_timespec(struct bch_fs *c, u64 time) +static inline struct timespec64 bch2_time_to_timespec(struct bch_fs *c, s64 time) { - return ns_to_timespec64(time * c->sb.time_precision + c->sb.time_base_lo); + struct timespec64 t; + s32 rem; + + time += c->sb.time_base_lo; + + t.tv_sec = div_s64_rem(time, c->sb.time_units_per_sec, &rem); + t.tv_nsec = rem * c->sb.nsec_per_time_unit; + return t; } static inline s64 timespec_to_bch2_time(struct bch_fs *c, struct timespec64 ts) { - s64 ns = timespec64_to_ns(&ts) - c->sb.time_base_lo; - - if (c->sb.time_precision == 1) - return ns; - - return div_s64(ns, c->sb.time_precision); + return (ts.tv_sec * c->sb.time_units_per_sec + + (int) ts.tv_nsec / c->sb.nsec_per_time_unit) - c->sb.time_base_lo; } static inline s64 bch2_current_time(struct bch_fs *c) diff --git a/libbcachefs/bcachefs_format.h b/libbcachefs/bcachefs_format.h index ead7268..98779e4 100644 --- a/libbcachefs/bcachefs_format.h +++ b/libbcachefs/bcachefs_format.h @@ -148,7 +148,8 @@ static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot) } #define POS_MIN SPOS(0, 0, 0) -#define POS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX) +#define POS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, 0) +#define SPOS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX) #define POS(_inode, _offset) SPOS(_inode, _offset, 0) /* Empty placeholder struct, for container_of() */ @@ -1209,7 +1210,8 @@ enum bcachefs_metadata_version { bcachefs_metadata_version_inode_btree_change = 11, bcachefs_metadata_version_snapshot = 12, bcachefs_metadata_version_inode_backpointers = 13, - bcachefs_metadata_version_max = 14, + bcachefs_metadata_version_btree_ptr_sectors_written = 14, + bcachefs_metadata_version_max = 15, }; #define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1) @@ -1313,12 +1315,10 @@ LE64_BITMASK(BCH_SB_GRPQUOTA, struct bch_sb, flags[0], 58, 59); LE64_BITMASK(BCH_SB_PRJQUOTA, struct bch_sb, flags[0], 59, 60); LE64_BITMASK(BCH_SB_HAS_ERRORS, struct bch_sb, flags[0], 60, 61); +LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62); -/* bit 61 was reflink option */ LE64_BITMASK(BCH_SB_BIG_ENDIAN, struct bch_sb, flags[0], 62, 63); -/* 61-64 unused */ - LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4); LE64_BITMASK(BCH_SB_COMPRESSION_TYPE, struct bch_sb, flags[1], 4, 8); LE64_BITMASK(BCH_SB_INODE_32BIT, struct bch_sb, flags[1], 8, 9); @@ -1346,6 +1346,8 @@ LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES, struct bch_sb, flags[2], 4, 64); LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16); LE64_BITMASK(BCH_SB_METADATA_TARGET, struct bch_sb, flags[3], 16, 28); +LE64_BITMASK(BCH_SB_SHARD_INUMS, struct bch_sb, flags[3], 28, 29); +LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30); /* * Features: @@ -1457,7 +1459,8 @@ enum bch_csum_type { BCH_CSUM_CHACHA20_POLY1305_128 = 4, BCH_CSUM_CRC32C = 5, BCH_CSUM_CRC64 = 6, - BCH_CSUM_NR = 7, + BCH_CSUM_XXHASH = 7, + BCH_CSUM_NR = 8, }; static const unsigned bch_crc_bytes[] = { @@ -1466,6 +1469,7 @@ static const unsigned bch_crc_bytes[] = { [BCH_CSUM_CRC32C] = 4, [BCH_CSUM_CRC64_NONZERO] = 8, [BCH_CSUM_CRC64] = 8, + [BCH_CSUM_XXHASH] = 8, [BCH_CSUM_CHACHA20_POLY1305_80] = 10, [BCH_CSUM_CHACHA20_POLY1305_128] = 16, }; @@ -1484,7 +1488,8 @@ static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type) #define BCH_CSUM_OPTS() \ x(none, 0) \ x(crc32c, 1) \ - x(crc64, 2) + x(crc64, 2) \ + x(xxhash, 3) enum bch_csum_opts { #define x(t, n) BCH_CSUM_OPT_##t = n, @@ -1737,6 +1742,9 @@ LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 4, 5); LE32_BITMASK(BSET_SEPARATE_WHITEOUTS, struct bset, flags, 5, 6); +/* Sector offset within the btree node: */ +LE32_BITMASK(BSET_OFFSET, struct bset, flags, 16, 32); + struct btree_node { struct bch_csum csum; __le64 magic; diff --git a/libbcachefs/bkey.c b/libbcachefs/bkey.c index 3af5606..946dd27 100644 --- a/libbcachefs/bkey.c +++ b/libbcachefs/bkey.c @@ -443,8 +443,15 @@ enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *out, struct bpos orig = in; #endif bool exact = true; + unsigned i; - out->_data[0] = 0; + /* + * bch2_bkey_pack_key() will write to all of f->key_u64s, minus the 3 + * byte header, but pack_pos() won't if the len/version fields are big + * enough - we need to make sure to zero them out: + */ + for (i = 0; i < f->key_u64s; i++) + out->_data[i] = 0; if (unlikely(in.snapshot < le64_to_cpu(f->field_offset[BKEY_FIELD_SNAPSHOT]))) { @@ -613,22 +620,22 @@ const char *bch2_bkey_format_validate(struct bkey_format *f) if (f->nr_fields != BKEY_NR_FIELDS) return "incorrect number of fields"; + /* + * Verify that the packed format can't represent fields larger than the + * unpacked format: + */ for (i = 0; i < f->nr_fields; i++) { unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i]; - u64 unpacked_mask = ~((~0ULL << 1) << (unpacked_bits - 1)); + u64 unpacked_max = ~((~0ULL << 1) << (unpacked_bits - 1)); + u64 packed_max = f->bits_per_field[i] + ? ~((~0ULL << 1) << (f->bits_per_field[i] - 1)) + : 0; u64 field_offset = le64_to_cpu(f->field_offset[i]); - if (f->bits_per_field[i] > unpacked_bits) + if (packed_max + field_offset < packed_max || + packed_max + field_offset > unpacked_max) return "field too large"; - if ((f->bits_per_field[i] == unpacked_bits) && field_offset) - return "offset + bits overflow"; - - if (((field_offset + ((1ULL << f->bits_per_field[i]) - 1)) & - unpacked_mask) < - field_offset) - return "offset + bits overflow"; - bits += f->bits_per_field[i]; } diff --git a/libbcachefs/bkey_methods.c b/libbcachefs/bkey_methods.c index 6fe95b8..f8adbf4 100644 --- a/libbcachefs/bkey_methods.c +++ b/libbcachefs/bkey_methods.c @@ -84,7 +84,7 @@ static void key_type_inline_data_to_text(struct printbuf *out, struct bch_fs *c, .val_to_text = key_type_inline_data_to_text, \ } -static const struct bkey_ops bch2_bkey_ops[] = { +const struct bkey_ops bch2_bkey_ops[] = { #define x(name, nr) [KEY_TYPE_##name] = bch2_bkey_ops_##name, BCH_BKEY_TYPES() #undef x @@ -98,12 +98,51 @@ const char *bch2_bkey_val_invalid(struct bch_fs *c, struct bkey_s_c k) return bch2_bkey_ops[k.k->type].key_invalid(c, k); } +static unsigned bch2_key_types_allowed[] = { + [BKEY_TYPE_extents] = + (1U << KEY_TYPE_error)| + (1U << KEY_TYPE_cookie)| + (1U << KEY_TYPE_extent)| + (1U << KEY_TYPE_reservation)| + (1U << KEY_TYPE_reflink_p)| + (1U << KEY_TYPE_inline_data), + [BKEY_TYPE_inodes] = + (1U << KEY_TYPE_inode)| + (1U << KEY_TYPE_inode_generation), + [BKEY_TYPE_dirents] = + (1U << KEY_TYPE_hash_whiteout)| + (1U << KEY_TYPE_dirent), + [BKEY_TYPE_xattrs] = + (1U << KEY_TYPE_cookie)| + (1U << KEY_TYPE_hash_whiteout)| + (1U << KEY_TYPE_xattr), + [BKEY_TYPE_alloc] = + (1U << KEY_TYPE_alloc)| + (1U << KEY_TYPE_alloc_v2), + [BKEY_TYPE_quotas] = + (1U << KEY_TYPE_quota), + [BKEY_TYPE_stripes] = + (1U << KEY_TYPE_stripe), + [BKEY_TYPE_reflink] = + (1U << KEY_TYPE_reflink_v)| + (1U << KEY_TYPE_indirect_inline_data), + [BKEY_TYPE_btree] = + (1U << KEY_TYPE_btree_ptr)| + (1U << KEY_TYPE_btree_ptr_v2), +}; + const char *__bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k, enum btree_node_type type) { + unsigned key_types_allowed = (1U << KEY_TYPE_deleted)| + bch2_key_types_allowed[type] ; + if (k.k->u64s < BKEY_U64s) return "u64s too small"; + if (!(key_types_allowed & (1U << k.k->type))) + return "invalid key type for this btree"; + if (type == BKEY_TYPE_btree && bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX) return "value too big"; @@ -230,7 +269,7 @@ void bch2_bkey_val_to_text(struct printbuf *out, struct bch_fs *c, { bch2_bkey_to_text(out, k.k); - if (k.k) { + if (bkey_val_bytes(k.k)) { pr_buf(out, ": "); bch2_val_to_text(out, c, k); } @@ -253,24 +292,11 @@ bool bch2_bkey_normalize(struct bch_fs *c, struct bkey_s k) : false; } -enum merge_result bch2_bkey_merge(struct bch_fs *c, - struct bkey_s l, struct bkey_s r) +bool bch2_bkey_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r) { const struct bkey_ops *ops = &bch2_bkey_ops[l.k->type]; - enum merge_result ret; - - if (bch2_key_merging_disabled || - !ops->key_merge || - l.k->type != r.k->type || - bversion_cmp(l.k->version, r.k->version) || - bpos_cmp(l.k->p, bkey_start_pos(r.k))) - return BCH_MERGE_NOMERGE; - - ret = ops->key_merge(c, l, r); - if (ret != BCH_MERGE_NOMERGE) - l.k->needs_whiteout |= r.k->needs_whiteout; - return ret; + return bch2_bkey_maybe_mergable(l.k, r.k) && ops->key_merge(c, l, r); } static const struct old_bkey_type { diff --git a/libbcachefs/bkey_methods.h b/libbcachefs/bkey_methods.h index bfa6f11..3012035 100644 --- a/libbcachefs/bkey_methods.h +++ b/libbcachefs/bkey_methods.h @@ -11,17 +11,6 @@ enum btree_node_type; extern const char * const bch2_bkey_types[]; -enum merge_result { - BCH_MERGE_NOMERGE, - - /* - * The keys were mergeable, but would have overflowed size - so instead - * l was changed to the maximum size, and both keys were modified: - */ - BCH_MERGE_PARTIAL, - BCH_MERGE_MERGE, -}; - struct bkey_ops { /* Returns reason for being invalid if invalid, else NULL: */ const char * (*key_invalid)(const struct bch_fs *, @@ -30,13 +19,14 @@ struct bkey_ops { struct bkey_s_c); void (*swab)(struct bkey_s); bool (*key_normalize)(struct bch_fs *, struct bkey_s); - enum merge_result (*key_merge)(struct bch_fs *, - struct bkey_s, struct bkey_s); + bool (*key_merge)(struct bch_fs *, struct bkey_s, struct bkey_s_c); void (*compat)(enum btree_id id, unsigned version, unsigned big_endian, int write, struct bkey_s); }; +extern const struct bkey_ops bch2_bkey_ops[]; + const char *bch2_bkey_val_invalid(struct bch_fs *, struct bkey_s_c); const char *__bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c, enum btree_node_type); @@ -57,8 +47,17 @@ void bch2_bkey_swab_val(struct bkey_s); bool bch2_bkey_normalize(struct bch_fs *, struct bkey_s); -enum merge_result bch2_bkey_merge(struct bch_fs *, - struct bkey_s, struct bkey_s); +static inline bool bch2_bkey_maybe_mergable(const struct bkey *l, const struct bkey *r) +{ + return l->type == r->type && + !bversion_cmp(l->version, r->version) && + !bpos_cmp(l->p, bkey_start_pos(r)) && + (u64) l->size + r->size <= KEY_SIZE_MAX && + bch2_bkey_ops[l->type].key_merge && + !bch2_key_merging_disabled; +} + +bool bch2_bkey_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c); void bch2_bkey_renumber(enum btree_node_type, struct bkey_packed *, int); diff --git a/libbcachefs/bset.c b/libbcachefs/bset.c index f92a757..0eb85ac 100644 --- a/libbcachefs/bset.c +++ b/libbcachefs/bset.c @@ -1194,12 +1194,10 @@ static struct bkey_packed *bset_search_write_set(const struct btree *b, static inline void prefetch_four_cachelines(void *p) { #ifdef CONFIG_X86_64 - asm(".intel_syntax noprefix;" - "prefetcht0 [%0 - 127 + 64 * 0];" - "prefetcht0 [%0 - 127 + 64 * 1];" - "prefetcht0 [%0 - 127 + 64 * 2];" - "prefetcht0 [%0 - 127 + 64 * 3];" - ".att_syntax prefix;" + asm("prefetcht0 (-127 + 64 * 0)(%0);" + "prefetcht0 (-127 + 64 * 1)(%0);" + "prefetcht0 (-127 + 64 * 2)(%0);" + "prefetcht0 (-127 + 64 * 3)(%0);" : : "r" (p + 127)); #else diff --git a/libbcachefs/bset.h b/libbcachefs/bset.h index 506da4e..e42f866 100644 --- a/libbcachefs/bset.h +++ b/libbcachefs/bset.h @@ -188,7 +188,7 @@ static inline enum bset_aux_tree_type bset_aux_tree_type(const struct bset_tree * gets to the second cacheline. */ -#define BSET_CACHELINE 128 +#define BSET_CACHELINE 256 static inline size_t btree_keys_cachelines(const struct btree *b) { diff --git a/libbcachefs/btree_cache.c b/libbcachefs/btree_cache.c index 1abc50f..cd0c500 100644 --- a/libbcachefs/btree_cache.c +++ b/libbcachefs/btree_cache.c @@ -13,6 +13,8 @@ #include #include +struct lock_class_key bch2_btree_node_lock_key; + void bch2_recalc_btree_reserve(struct bch_fs *c) { unsigned i, reserve = 16; @@ -33,21 +35,21 @@ static inline unsigned btree_cache_can_free(struct btree_cache *bc) return max_t(int, 0, bc->used - bc->reserve); } -static void __btree_node_data_free(struct bch_fs *c, struct btree *b) +static void btree_node_data_free(struct bch_fs *c, struct btree *b) { + struct btree_cache *bc = &c->btree_cache; + EBUG_ON(btree_node_write_in_flight(b)); kvpfree(b->data, btree_bytes(c)); b->data = NULL; +#ifdef __KERNEL__ vfree(b->aux_data); +#else + munmap(b->aux_data, btree_aux_data_bytes(b)); +#endif b->aux_data = NULL; -} -static void btree_node_data_free(struct bch_fs *c, struct btree *b) -{ - struct btree_cache *bc = &c->btree_cache; - - __btree_node_data_free(c, b); bc->used--; list_move(&b->list, &bc->freed); } @@ -75,8 +77,13 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp) b->data = kvpmalloc(btree_bytes(c), gfp); if (!b->data) return -ENOMEM; - +#ifdef __KERNEL__ b->aux_data = vmalloc_exec(btree_aux_data_bytes(b), gfp); +#else + b->aux_data = mmap(NULL, btree_aux_data_bytes(b), + PROT_READ|PROT_WRITE|PROT_EXEC, + MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); +#endif if (!b->aux_data) { kvpfree(b->data, btree_bytes(c)); b->data = NULL; @@ -93,14 +100,14 @@ static struct btree *__btree_node_mem_alloc(struct bch_fs *c) return NULL; bkey_btree_ptr_init(&b->key); - six_lock_init(&b->c.lock); + __six_lock_init(&b->c.lock, "b->c.lock", &bch2_btree_node_lock_key); INIT_LIST_HEAD(&b->list); INIT_LIST_HEAD(&b->write_blocked); b->byte_order = ilog2(btree_bytes(c)); return b; } -static struct btree *btree_node_mem_alloc(struct bch_fs *c) +struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c) { struct btree_cache *bc = &c->btree_cache; struct btree *b = __btree_node_mem_alloc(c); @@ -179,6 +186,17 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush) int ret = 0; lockdep_assert_held(&bc->lock); +wait_on_io: + if (b->flags & ((1U << BTREE_NODE_dirty)| + (1U << BTREE_NODE_read_in_flight)| + (1U << BTREE_NODE_write_in_flight))) { + if (!flush) + return -ENOMEM; + + /* XXX: waiting on IO with btree cache lock held */ + bch2_btree_node_wait_on_read(b); + bch2_btree_node_wait_on_write(b); + } if (!six_trylock_intent(&b->c.lock)) return -ENOMEM; @@ -186,25 +204,26 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush) if (!six_trylock_write(&b->c.lock)) goto out_unlock_intent; + /* recheck under lock */ + if (b->flags & ((1U << BTREE_NODE_read_in_flight)| + (1U << BTREE_NODE_write_in_flight))) { + if (!flush) + goto out_unlock; + six_unlock_write(&b->c.lock); + six_unlock_intent(&b->c.lock); + goto wait_on_io; + } + if (btree_node_noevict(b)) goto out_unlock; if (!btree_node_may_write(b)) goto out_unlock; - if (btree_node_dirty(b) && - test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags)) - goto out_unlock; - - if (btree_node_dirty(b) || - btree_node_write_in_flight(b) || - btree_node_read_in_flight(b)) { - if (!flush) + if (btree_node_dirty(b)) { + if (!flush || + test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags)) goto out_unlock; - - wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, - TASK_UNINTERRUPTIBLE); - /* * Using the underscore version because we don't want to compact * bsets after the write, since this node is about to be evicted @@ -214,10 +233,11 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush) if (bch2_verify_btree_ondisk) bch2_btree_node_write(c, b, SIX_LOCK_intent); else - __bch2_btree_node_write(c, b, SIX_LOCK_read); + __bch2_btree_node_write(c, b, false); - /* wait for any in flight btree write */ - btree_node_wait_on_io(b); + six_unlock_write(&b->c.lock); + six_unlock_intent(&b->c.lock); + goto wait_on_io; } out: if (b->hash_val && !ret) @@ -360,12 +380,10 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c) flags = memalloc_nofs_save(); mutex_lock(&bc->lock); -#ifdef CONFIG_BCACHEFS_DEBUG if (c->verify_data) list_move(&c->verify_data->list, &bc->live); kvpfree(c->verify_ondisk, btree_bytes(c)); -#endif for (i = 0; i < BTREE_ID_NR; i++) if (c->btree_roots[i].b) @@ -419,31 +437,15 @@ int bch2_fs_btree_cache_init(struct bch_fs *c) bch2_recalc_btree_reserve(c); for (i = 0; i < bc->reserve; i++) - if (!btree_node_mem_alloc(c)) { + if (!__bch2_btree_node_mem_alloc(c)) { ret = -ENOMEM; goto out; } list_splice_init(&bc->live, &bc->freeable); -#ifdef CONFIG_BCACHEFS_DEBUG mutex_init(&c->verify_lock); - c->verify_ondisk = kvpmalloc(btree_bytes(c), GFP_KERNEL); - if (!c->verify_ondisk) { - ret = -ENOMEM; - goto out; - } - - c->verify_data = btree_node_mem_alloc(c); - if (!c->verify_data) { - ret = -ENOMEM; - goto out; - } - - list_del_init(&c->verify_data->list); -#endif - bc->shrink.count_objects = bch2_btree_cache_count; bc->shrink.scan_objects = bch2_btree_cache_scan; bc->shrink.seeks = 4; @@ -585,6 +587,7 @@ got_node: } BUG_ON(btree_node_hashed(b)); + BUG_ON(btree_node_dirty(b)); BUG_ON(btree_node_write_in_flight(b)); out: b->flags = 0; @@ -594,6 +597,7 @@ out: b->sib_u64s[1] = 0; b->whiteout_u64s = 0; bch2_btree_keys_init(b); + set_btree_node_accessed(b); bch2_time_stats_update(&c->times[BCH_TIME_btree_node_mem_alloc], start_time); @@ -637,14 +641,17 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, { struct btree_cache *bc = &c->btree_cache; struct btree *b; + u32 seq; BUG_ON(level + 1 >= BTREE_MAX_DEPTH); /* * Parent node must be locked, else we could read in a btree node that's * been freed: */ - if (iter && !bch2_btree_node_relock(iter, level + 1)) + if (iter && !bch2_btree_node_relock(iter, level + 1)) { + btree_trans_restart(iter->trans); return ERR_PTR(-EINTR); + } b = bch2_btree_node_mem_alloc(c); if (IS_ERR(b)) @@ -666,25 +673,32 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, return NULL; } - /* - * Unlock before doing IO: - * - * XXX: ideally should be dropping all btree node locks here - */ - if (iter && btree_node_read_locked(iter, level + 1)) - btree_node_unlock(iter, level + 1); - - bch2_btree_node_read(c, b, sync); + set_btree_node_read_in_flight(b); six_unlock_write(&b->c.lock); + seq = b->c.lock.state.seq; + six_unlock_intent(&b->c.lock); - if (!sync) { - six_unlock_intent(&b->c.lock); + /* Unlock before doing IO: */ + if (iter && sync) + bch2_trans_unlock(iter->trans); + + bch2_btree_node_read(c, b, sync); + + if (!sync) return NULL; + + if (iter && + (!bch2_trans_relock(iter->trans) || + !bch2_btree_iter_relock_intent(iter))) { + BUG_ON(!iter->trans->restarted); + return ERR_PTR(-EINTR); } - if (lock_type == SIX_LOCK_read) - six_lock_downgrade(&b->c.lock); + if (!six_relock_type(&b->c.lock, lock_type, seq)) { + btree_trans_restart(iter->trans); + return ERR_PTR(-EINTR); + } return b; } @@ -697,6 +711,40 @@ static int lock_node_check_fn(struct six_lock *lock, void *p) return b->hash_val == btree_ptr_hash_val(k) ? 0 : -1; } +static noinline void btree_bad_header(struct bch_fs *c, struct btree *b) +{ + char buf1[200], buf2[100], buf3[100]; + + if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) + return; + + bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&b->key)); + bch2_bpos_to_text(&PBUF(buf2), b->data->min_key); + bch2_bpos_to_text(&PBUF(buf3), b->data->max_key); + + bch2_fs_inconsistent(c, "btree node header doesn't match ptr\n" + "btree %s level %u\n" + "ptr: %s\n" + "header: btree %s level %llu\n" + "min %s max %s\n", + bch2_btree_ids[b->c.btree_id], b->c.level, + buf1, + bch2_btree_ids[BTREE_NODE_ID(b->data)], + BTREE_NODE_LEVEL(b->data), + buf2, buf3); +} + +static inline void btree_check_header(struct bch_fs *c, struct btree *b) +{ + if (b->c.btree_id != BTREE_NODE_ID(b->data) || + b->c.level != BTREE_NODE_LEVEL(b->data) || + bpos_cmp(b->data->max_key, b->key.k.p) || + (b->key.k.type == KEY_TYPE_btree_ptr_v2 && + bpos_cmp(b->data->min_key, + bkey_i_to_btree_ptr_v2(&b->key)->v.min_key))) + btree_bad_header(c, b); +} + /** * bch_btree_node_get - find a btree node in the cache and lock it, reading it * in from disk if necessary. @@ -706,20 +754,23 @@ static int lock_node_check_fn(struct six_lock *lock, void *p) * The btree node will have either a read or a write lock held, depending on * the @write parameter. */ -struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter, +struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter *iter, const struct bkey_i *k, unsigned level, enum six_lock_type lock_type, unsigned long trace_ip) { + struct bch_fs *c = trans->c; struct btree_cache *bc = &c->btree_cache; struct btree *b; struct bset_tree *t; EBUG_ON(level >= BTREE_MAX_DEPTH); - b = btree_node_mem_ptr(k); - if (b) - goto lock_node; + if (c->opts.btree_node_mem_ptr_optimization) { + b = btree_node_mem_ptr(k); + if (b) + goto lock_node; + } retry: b = btree_cache_find(bc, k); if (unlikely(!b)) { @@ -772,7 +823,7 @@ lock_node: if (!btree_node_lock(b, k->k.p, level, iter, lock_type, lock_node_check_fn, (void *) k, trace_ip)) { - if (b->hash_val != btree_ptr_hash_val(k)) + if (!trans->restarted) goto retry; return ERR_PTR(-EINTR); } @@ -784,14 +835,37 @@ lock_node: if (bch2_btree_node_relock(iter, level + 1)) goto retry; - trace_trans_restart_btree_node_reused(iter->trans->ip); + trace_trans_restart_btree_node_reused(trans->ip, + trace_ip, + iter->btree_id, + &iter->real_pos); + btree_trans_restart(trans); return ERR_PTR(-EINTR); } } - /* XXX: waiting on IO with btree locks held: */ - wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, - TASK_UNINTERRUPTIBLE); + if (unlikely(btree_node_read_in_flight(b))) { + u32 seq = b->c.lock.state.seq; + + six_unlock_type(&b->c.lock, lock_type); + bch2_trans_unlock(trans); + + bch2_btree_node_wait_on_read(b); + + /* + * should_be_locked is not set on this iterator yet, so we need + * to relock it specifically: + */ + if (iter && + (!bch2_trans_relock(trans) || + !bch2_btree_iter_relock_intent(iter))) { + BUG_ON(!trans->restarted); + return ERR_PTR(-EINTR); + } + + if (!six_relock_type(&b->c.lock, lock_type, seq)) + goto retry; + } prefetch(b->aux_data); @@ -814,10 +888,7 @@ lock_node: EBUG_ON(b->c.btree_id != iter->btree_id); EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); - EBUG_ON(bpos_cmp(b->data->max_key, k->k.p)); - EBUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 && - bpos_cmp(b->data->min_key, - bkey_i_to_btree_ptr_v2(&b->key)->v.min_key)); + btree_check_header(c, b); return b; } @@ -835,9 +906,11 @@ struct btree *bch2_btree_node_get_noiter(struct bch_fs *c, EBUG_ON(level >= BTREE_MAX_DEPTH); - b = btree_node_mem_ptr(k); - if (b) - goto lock_node; + if (c->opts.btree_node_mem_ptr_optimization) { + b = btree_node_mem_ptr(k); + if (b) + goto lock_node; + } retry: b = btree_cache_find(bc, k); if (unlikely(!b)) { @@ -872,8 +945,7 @@ lock_node: } /* XXX: waiting on IO with btree locks held: */ - wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, - TASK_UNINTERRUPTIBLE); + __bch2_btree_node_wait_on_read(b); prefetch(b->aux_data); @@ -897,18 +969,15 @@ lock_node: EBUG_ON(b->c.btree_id != btree_id); EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); - EBUG_ON(bpos_cmp(b->data->max_key, k->k.p)); - EBUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 && - bpos_cmp(b->data->min_key, - bkey_i_to_btree_ptr_v2(&b->key)->v.min_key)); + btree_check_header(c, b); out: bch2_btree_cache_cannibalize_unlock(c); return b; } -void bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter, - const struct bkey_i *k, - enum btree_id btree_id, unsigned level) +int bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter, + const struct bkey_i *k, + enum btree_id btree_id, unsigned level) { struct btree_cache *bc = &c->btree_cache; struct btree *b; @@ -918,9 +987,48 @@ void bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter, b = btree_cache_find(bc, k); if (b) + return 0; + + b = bch2_btree_node_fill(c, iter, k, btree_id, level, SIX_LOCK_read, false); + return PTR_ERR_OR_ZERO(b); +} + +void bch2_btree_node_evict(struct bch_fs *c, const struct bkey_i *k) +{ + struct btree_cache *bc = &c->btree_cache; + struct btree *b; + + b = btree_cache_find(bc, k); + if (!b) return; +wait_on_io: + /* not allowed to wait on io with btree locks held: */ - bch2_btree_node_fill(c, iter, k, btree_id, level, SIX_LOCK_read, false); + /* XXX we're called from btree_gc which will be holding other btree + * nodes locked + * */ + __bch2_btree_node_wait_on_read(b); + __bch2_btree_node_wait_on_write(b); + + six_lock_intent(&b->c.lock, NULL, NULL); + six_lock_write(&b->c.lock, NULL, NULL); + + if (btree_node_dirty(b)) { + __bch2_btree_node_write(c, b, false); + six_unlock_write(&b->c.lock); + six_unlock_intent(&b->c.lock); + goto wait_on_io; + } + + BUG_ON(btree_node_dirty(b)); + + mutex_lock(&bc->lock); + btree_node_data_free(c, b); + bch2_btree_node_hash_remove(bc, b); + mutex_unlock(&bc->lock); + + six_unlock_write(&b->c.lock); + six_unlock_intent(&b->c.lock); } void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, diff --git a/libbcachefs/btree_cache.h b/libbcachefs/btree_cache.h index 4791c3b..5032293 100644 --- a/libbcachefs/btree_cache.h +++ b/libbcachefs/btree_cache.h @@ -5,6 +5,8 @@ #include "bcachefs.h" #include "btree_types.h" +extern struct lock_class_key bch2_btree_node_lock_key; + struct btree_iter; void bch2_recalc_btree_reserve(struct bch_fs *); @@ -17,17 +19,20 @@ int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *, void bch2_btree_cache_cannibalize_unlock(struct bch_fs *); int bch2_btree_cache_cannibalize_lock(struct bch_fs *, struct closure *); +struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *); struct btree *bch2_btree_node_mem_alloc(struct bch_fs *); -struct btree *bch2_btree_node_get(struct bch_fs *, struct btree_iter *, +struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_iter *, const struct bkey_i *, unsigned, enum six_lock_type, unsigned long); struct btree *bch2_btree_node_get_noiter(struct bch_fs *, const struct bkey_i *, enum btree_id, unsigned, bool); -void bch2_btree_node_prefetch(struct bch_fs *, struct btree_iter *, - const struct bkey_i *, enum btree_id, unsigned); +int bch2_btree_node_prefetch(struct bch_fs *, struct btree_iter *, + const struct bkey_i *, enum btree_id, unsigned); + +void bch2_btree_node_evict(struct bch_fs *, const struct bkey_i *); void bch2_fs_btree_cache_exit(struct bch_fs *); int bch2_fs_btree_cache_init(struct bch_fs *); diff --git a/libbcachefs/btree_gc.c b/libbcachefs/btree_gc.c index 268e007..3dd1094 100644 --- a/libbcachefs/btree_gc.c +++ b/libbcachefs/btree_gc.c @@ -23,6 +23,7 @@ #include "keylist.h" #include "move.h" #include "recovery.h" +#include "reflink.h" #include "replicas.h" #include "super-io.h" @@ -35,6 +36,9 @@ #include #include +#define DROP_THIS_NODE 10 +#define DROP_PREV_NODE 11 + static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos) { preempt_disable(); @@ -66,8 +70,6 @@ static int bch2_gc_check_topology(struct bch_fs *c, ? node_start : bpos_successor(prev->k->k.p); char buf1[200], buf2[200]; - bool update_min = false; - bool update_max = false; int ret = 0; if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) { @@ -81,83 +83,407 @@ static int bch2_gc_check_topology(struct bch_fs *c, bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(prev->k)); } - if (fsck_err_on(bpos_cmp(expected_start, bp->v.min_key), c, + if (bpos_cmp(expected_start, bp->v.min_key)) { + bch2_topology_error(c); + + if (__fsck_err(c, + FSCK_CAN_FIX| + FSCK_CAN_IGNORE| + FSCK_NO_RATELIMIT, + "btree node with incorrect min_key at btree %s level %u:\n" + " prev %s\n" + " cur %s", + bch2_btree_ids[b->c.btree_id], b->c.level, + buf1, + (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(cur.k)), buf2)) && + !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) { + bch_info(c, "Halting mark and sweep to start topology repair pass"); + return FSCK_ERR_START_TOPOLOGY_REPAIR; + } else { + set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags); + } + } + } + + if (is_last && bpos_cmp(cur.k->k.p, node_end)) { + bch2_topology_error(c); + + if (__fsck_err(c, + FSCK_CAN_FIX| + FSCK_CAN_IGNORE| + FSCK_NO_RATELIMIT, + "btree node with incorrect max_key at btree %s level %u:\n" + " %s\n" + " expected %s", + bch2_btree_ids[b->c.btree_id], b->c.level, + (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(cur.k)), buf1), + (bch2_bpos_to_text(&PBUF(buf2), node_end), buf2)) && + !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) { + bch_info(c, "Halting mark and sweep to start topology repair pass"); + return FSCK_ERR_START_TOPOLOGY_REPAIR; + } else { + set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags); + } + } + + bch2_bkey_buf_copy(prev, c, cur.k); +fsck_err: + return ret; +} + +static void btree_ptr_to_v2(struct btree *b, struct bkey_i_btree_ptr_v2 *dst) +{ + switch (b->key.k.type) { + case KEY_TYPE_btree_ptr: { + struct bkey_i_btree_ptr *src = bkey_i_to_btree_ptr(&b->key); + + dst->k.p = src->k.p; + dst->v.mem_ptr = 0; + dst->v.seq = b->data->keys.seq; + dst->v.sectors_written = 0; + dst->v.flags = 0; + dst->v.min_key = b->data->min_key; + set_bkey_val_bytes(&dst->k, sizeof(dst->v) + bkey_val_bytes(&src->k)); + memcpy(dst->v.start, src->v.start, bkey_val_bytes(&src->k)); + break; + } + case KEY_TYPE_btree_ptr_v2: + bkey_copy(&dst->k_i, &b->key); + break; + default: + BUG(); + } +} + +static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min) +{ + struct bkey_i_btree_ptr_v2 *new; + int ret; + + new = kmalloc(BKEY_BTREE_PTR_U64s_MAX * sizeof(u64), GFP_KERNEL); + if (!new) + return -ENOMEM; + + btree_ptr_to_v2(b, new); + b->data->min_key = new_min; + new->v.min_key = new_min; + SET_BTREE_PTR_RANGE_UPDATED(&new->v, true); + + ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level + 1, &new->k_i); + if (ret) { + kfree(new); + return ret; + } + + bch2_btree_node_drop_keys_outside_node(b); + + return 0; +} + +static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max) +{ + struct bkey_i_btree_ptr_v2 *new; + int ret; + + ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level + 1, b->key.k.p); + if (ret) + return ret; + + new = kmalloc(BKEY_BTREE_PTR_U64s_MAX * sizeof(u64), GFP_KERNEL); + if (!new) + return -ENOMEM; + + btree_ptr_to_v2(b, new); + b->data->max_key = new_max; + new->k.p = new_max; + SET_BTREE_PTR_RANGE_UPDATED(&new->v, true); + + ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level + 1, &new->k_i); + if (ret) { + kfree(new); + return ret; + } + + bch2_btree_node_drop_keys_outside_node(b); + + mutex_lock(&c->btree_cache.lock); + bch2_btree_node_hash_remove(&c->btree_cache, b); + + bkey_copy(&b->key, &new->k_i); + ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); + BUG_ON(ret); + mutex_unlock(&c->btree_cache.lock); + return 0; +} + +static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b, + struct btree *prev, struct btree *cur) +{ + struct bpos expected_start = !prev + ? b->data->min_key + : bpos_successor(prev->key.k.p); + char buf1[200], buf2[200]; + int ret = 0; + + if (!prev) { + struct printbuf out = PBUF(buf1); + pr_buf(&out, "start of node: "); + bch2_bpos_to_text(&out, b->data->min_key); + } else { + bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&prev->key)); + } + + bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(&cur->key)); + + if (prev && + bpos_cmp(expected_start, cur->data->min_key) > 0 && + BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) { + /* cur overwrites prev: */ + + if (mustfix_fsck_err_on(bpos_cmp(prev->data->min_key, + cur->data->min_key) >= 0, c, + "btree node overwritten by next node at btree %s level %u:\n" + " node %s\n" + " next %s", + bch2_btree_ids[b->c.btree_id], b->c.level, + buf1, buf2)) + return DROP_PREV_NODE; + + if (mustfix_fsck_err_on(bpos_cmp(prev->key.k.p, + bpos_predecessor(cur->data->min_key)), c, + "btree node with incorrect max_key at btree %s level %u:\n" + " node %s\n" + " next %s", + bch2_btree_ids[b->c.btree_id], b->c.level, + buf1, buf2)) + ret = set_node_max(c, prev, + bpos_predecessor(cur->data->min_key)); + } else { + /* prev overwrites cur: */ + + if (mustfix_fsck_err_on(bpos_cmp(expected_start, + cur->data->max_key) >= 0, c, + "btree node overwritten by prev node at btree %s level %u:\n" + " prev %s\n" + " node %s", + bch2_btree_ids[b->c.btree_id], b->c.level, + buf1, buf2)) + return DROP_THIS_NODE; + + if (mustfix_fsck_err_on(bpos_cmp(expected_start, cur->data->min_key), c, "btree node with incorrect min_key at btree %s level %u:\n" " prev %s\n" - " cur %s", + " node %s", bch2_btree_ids[b->c.btree_id], b->c.level, - buf1, - (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(cur.k)), buf2))) - update_min = true; + buf1, buf2)) + ret = set_node_min(c, cur, expected_start); } +fsck_err: + return ret; +} - if (fsck_err_on(is_last && - bpos_cmp(cur.k->k.p, node_end), c, +static int btree_repair_node_end(struct bch_fs *c, struct btree *b, + struct btree *child) +{ + char buf1[200], buf2[200]; + int ret = 0; + + if (mustfix_fsck_err_on(bpos_cmp(child->key.k.p, b->key.k.p), c, "btree node with incorrect max_key at btree %s level %u:\n" " %s\n" " expected %s", bch2_btree_ids[b->c.btree_id], b->c.level, - (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(cur.k)), buf1), - (bch2_bpos_to_text(&PBUF(buf2), node_end), buf2))) - update_max = true; + (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&child->key)), buf1), + (bch2_bpos_to_text(&PBUF(buf2), b->key.k.p), buf2))) { + ret = set_node_max(c, child, b->key.k.p); + if (ret) + return ret; + } +fsck_err: + return ret; +} - bch2_bkey_buf_copy(prev, c, cur.k); +static int bch2_btree_repair_topology_recurse(struct bch_fs *c, struct btree *b) +{ + struct btree_and_journal_iter iter; + struct bkey_s_c k; + struct bkey_buf prev_k, cur_k; + struct btree *prev = NULL, *cur = NULL; + bool have_child, dropped_children = false; + char buf[200]; + int ret = 0; - if (update_min || update_max) { - struct bkey_i *new; - struct bkey_i_btree_ptr_v2 *bp = NULL; - struct btree *n; + if (!b->c.level) + return 0; +again: + prev = NULL; + have_child = dropped_children = false; + bch2_bkey_buf_init(&prev_k); + bch2_bkey_buf_init(&cur_k); + bch2_btree_and_journal_iter_init_node_iter(&iter, c, b); + + while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { + BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0); + BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0); - if (update_max) { + bch2_btree_and_journal_iter_advance(&iter); + bch2_bkey_buf_reassemble(&cur_k, c, k); + + cur = bch2_btree_node_get_noiter(c, cur_k.k, + b->c.btree_id, b->c.level - 1, + false); + ret = PTR_ERR_OR_ZERO(cur); + + if (mustfix_fsck_err_on(ret == -EIO, c, + "Unreadable btree node at btree %s level %u:\n" + " %s", + bch2_btree_ids[b->c.btree_id], + b->c.level - 1, + (bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(cur_k.k)), buf))) { + bch2_btree_node_evict(c, cur_k.k); ret = bch2_journal_key_delete(c, b->c.btree_id, - b->c.level, cur.k->k.p); + b->c.level, cur_k.k->k.p); if (ret) - return ret; + break; + continue; } - new = kmalloc(bkey_bytes(&cur.k->k), GFP_KERNEL); - if (!new) { - bch_err(c, "%s: error allocating new key", __func__); - return -ENOMEM; + if (ret) { + bch_err(c, "%s: error %i getting btree node", + __func__, ret); + break; + } + + ret = btree_repair_node_boundaries(c, b, prev, cur); + + if (ret == DROP_THIS_NODE) { + six_unlock_read(&cur->c.lock); + bch2_btree_node_evict(c, cur_k.k); + ret = bch2_journal_key_delete(c, b->c.btree_id, + b->c.level, cur_k.k->k.p); + if (ret) + break; + continue; } - bkey_copy(new, cur.k); + if (prev) + six_unlock_read(&prev->c.lock); + prev = NULL; + + if (ret == DROP_PREV_NODE) { + bch2_btree_node_evict(c, prev_k.k); + ret = bch2_journal_key_delete(c, b->c.btree_id, + b->c.level, prev_k.k->k.p); + if (ret) + break; + + bch2_btree_and_journal_iter_exit(&iter); + bch2_bkey_buf_exit(&prev_k, c); + bch2_bkey_buf_exit(&cur_k, c); + goto again; + } else if (ret) + break; + + prev = cur; + cur = NULL; + bch2_bkey_buf_copy(&prev_k, c, cur_k.k); + } + + if (!ret && !IS_ERR_OR_NULL(prev)) { + BUG_ON(cur); + ret = btree_repair_node_end(c, b, prev); + } + + if (!IS_ERR_OR_NULL(prev)) + six_unlock_read(&prev->c.lock); + prev = NULL; + if (!IS_ERR_OR_NULL(cur)) + six_unlock_read(&cur->c.lock); + cur = NULL; - if (new->k.type == KEY_TYPE_btree_ptr_v2) - bp = bkey_i_to_btree_ptr_v2(new); + if (ret) + goto err; - if (update_min) - bp->v.min_key = expected_start; - if (update_max) - new->k.p = node_end; - if (bp) - SET_BTREE_PTR_RANGE_UPDATED(&bp->v, true); + bch2_btree_and_journal_iter_exit(&iter); + bch2_btree_and_journal_iter_init_node_iter(&iter, c, b); + + while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { + bch2_bkey_buf_reassemble(&cur_k, c, k); + bch2_btree_and_journal_iter_advance(&iter); + + cur = bch2_btree_node_get_noiter(c, cur_k.k, + b->c.btree_id, b->c.level - 1, + false); + ret = PTR_ERR_OR_ZERO(cur); - ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level, new); if (ret) { - kfree(new); - return ret; + bch_err(c, "%s: error %i getting btree node", + __func__, ret); + goto err; } - n = bch2_btree_node_get_noiter(c, cur.k, b->c.btree_id, - b->c.level - 1, true); - if (n) { - mutex_lock(&c->btree_cache.lock); - bch2_btree_node_hash_remove(&c->btree_cache, n); - - bkey_copy(&n->key, new); - if (update_min) - n->data->min_key = expected_start; - if (update_max) - n->data->max_key = node_end; - - ret = __bch2_btree_node_hash_insert(&c->btree_cache, n); - BUG_ON(ret); - mutex_unlock(&c->btree_cache.lock); - six_unlock_read(&n->c.lock); + ret = bch2_btree_repair_topology_recurse(c, cur); + six_unlock_read(&cur->c.lock); + cur = NULL; + + if (ret == DROP_THIS_NODE) { + bch2_btree_node_evict(c, cur_k.k); + ret = bch2_journal_key_delete(c, b->c.btree_id, + b->c.level, cur_k.k->k.p); + dropped_children = true; } + + if (ret) + goto err; + + have_child = true; } + + if (mustfix_fsck_err_on(!have_child, c, + "empty interior btree node at btree %s level %u\n" + " %s", + bch2_btree_ids[b->c.btree_id], + b->c.level, + (bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(&b->key)), buf))) + ret = DROP_THIS_NODE; +err: fsck_err: + if (!IS_ERR_OR_NULL(prev)) + six_unlock_read(&prev->c.lock); + if (!IS_ERR_OR_NULL(cur)) + six_unlock_read(&cur->c.lock); + + bch2_btree_and_journal_iter_exit(&iter); + bch2_bkey_buf_exit(&prev_k, c); + bch2_bkey_buf_exit(&cur_k, c); + + if (!ret && dropped_children) + goto again; + + return ret; +} + +static int bch2_repair_topology(struct bch_fs *c) +{ + struct btree *b; + unsigned i; + int ret = 0; + + for (i = 0; i < BTREE_ID_NR && !ret; i++) { + b = c->btree_roots[i].b; + if (btree_node_fake(b)) + continue; + + six_lock_read(&b->c.lock, NULL, NULL); + ret = bch2_btree_repair_topology_recurse(c, b); + six_unlock_read(&b->c.lock); + + if (ret == DROP_THIS_NODE) { + bch_err(c, "empty btree root - repair unimplemented"); + ret = FSCK_ERR_EXIT; + } + } + return ret; } @@ -169,19 +495,39 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id, const union bch_extent_entry *entry; struct extent_ptr_decoded p = { 0 }; bool do_update = false; + char buf[200]; int ret = 0; bkey_for_each_ptr_decode(k->k, ptrs, p, entry) { struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev); struct bucket *g = PTR_BUCKET(ca, &p.ptr, true); struct bucket *g2 = PTR_BUCKET(ca, &p.ptr, false); + enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry->ptr); + + if (fsck_err_on(g->mark.data_type && + g->mark.data_type != data_type, c, + "bucket %u:%zu different types of data in same bucket: %s, %s\n" + "while marking %s", + p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), + bch2_data_types[g->mark.data_type], + bch2_data_types[data_type], + (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) { + if (data_type == BCH_DATA_btree) { + g2->_mark.data_type = g->_mark.data_type = data_type; + set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); + } else { + do_update = true; + } + } if (fsck_err_on(!g->gen_valid, c, - "bucket %u:%zu data type %s ptr gen %u missing in alloc btree", + "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n" + "while marking %s", p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), bch2_data_types[ptr_data_type(k->k, &p.ptr)], - p.ptr.gen)) { - if (p.ptr.cached) { + p.ptr.gen, + (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) { + if (!p.ptr.cached) { g2->_mark.gen = g->_mark.gen = p.ptr.gen; g2->gen_valid = g->gen_valid = true; set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); @@ -191,11 +537,13 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id, } if (fsck_err_on(gen_cmp(p.ptr.gen, g->mark.gen) > 0, c, - "bucket %u:%zu data type %s ptr gen in the future: %u > %u", + "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n" + "while marking %s", p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), bch2_data_types[ptr_data_type(k->k, &p.ptr)], - p.ptr.gen, g->mark.gen)) { - if (p.ptr.cached) { + p.ptr.gen, g->mark.gen, + (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) { + if (!p.ptr.cached) { g2->_mark.gen = g->_mark.gen = p.ptr.gen; g2->gen_valid = g->gen_valid = true; g2->_mark.data_type = 0; @@ -210,23 +558,29 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id, if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->mark.gen) < 0, c, - "bucket %u:%zu data type %s stale dirty ptr: %u < %u", + "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n" + "while marking %s", p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), bch2_data_types[ptr_data_type(k->k, &p.ptr)], - p.ptr.gen, g->mark.gen)) + p.ptr.gen, g->mark.gen, + (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) do_update = true; if (p.has_ec) { struct stripe *m = genradix_ptr(&c->stripes[true], p.ec.idx); if (fsck_err_on(!m || !m->alive, c, - "pointer to nonexistent stripe %llu", - (u64) p.ec.idx)) + "pointer to nonexistent stripe %llu\n" + "while marking %s", + (u64) p.ec.idx, + (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) do_update = true; if (fsck_err_on(!bch2_ptr_matches_stripe_m(m, p), c, - "pointer does not match stripe %llu", - (u64) p.ec.idx)) + "pointer does not match stripe %llu\n" + "while marking %s", + (u64) p.ec.idx, + (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) do_update = true; } } @@ -250,39 +604,57 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id, bkey_reassemble(new, *k); - bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, ({ - struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); - struct bucket *g = PTR_BUCKET(ca, ptr, true); - - (ptr->cached && - (!g->gen_valid || gen_cmp(ptr->gen, g->mark.gen) > 0)) || - (!ptr->cached && - gen_cmp(ptr->gen, g->mark.gen) < 0); - })); + if (level) { + /* + * We don't want to drop btree node pointers - if the + * btree node isn't there anymore, the read path will + * sort it out: + */ + ptrs = bch2_bkey_ptrs(bkey_i_to_s(new)); + bkey_for_each_ptr(ptrs, ptr) { + struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); + struct bucket *g = PTR_BUCKET(ca, ptr, true); + + ptr->gen = g->mark.gen; + } + } else { + bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, ({ + struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); + struct bucket *g = PTR_BUCKET(ca, ptr, true); + enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, ptr); + + (ptr->cached && + (!g->gen_valid || gen_cmp(ptr->gen, g->mark.gen) > 0)) || + (!ptr->cached && + gen_cmp(ptr->gen, g->mark.gen) < 0) || + (g->mark.data_type && + g->mark.data_type != data_type); + })); again: - ptrs = bch2_bkey_ptrs(bkey_i_to_s(new)); - bkey_extent_entry_for_each(ptrs, entry) { - if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) { - struct stripe *m = genradix_ptr(&c->stripes[true], - entry->stripe_ptr.idx); - union bch_extent_entry *next_ptr; - - bkey_extent_entry_for_each_from(ptrs, next_ptr, entry) - if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr) - goto found; - next_ptr = NULL; + ptrs = bch2_bkey_ptrs(bkey_i_to_s(new)); + bkey_extent_entry_for_each(ptrs, entry) { + if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) { + struct stripe *m = genradix_ptr(&c->stripes[true], + entry->stripe_ptr.idx); + union bch_extent_entry *next_ptr; + + bkey_extent_entry_for_each_from(ptrs, next_ptr, entry) + if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr) + goto found; + next_ptr = NULL; found: - if (!next_ptr) { - bch_err(c, "aieee, found stripe ptr with no data ptr"); - continue; - } - - if (!m || !m->alive || - !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block], - &next_ptr->ptr, - m->sectors)) { - bch2_bkey_extent_entry_drop(new, entry); - goto again; + if (!next_ptr) { + bch_err(c, "aieee, found stripe ptr with no data ptr"); + continue; + } + + if (!m || !m->alive || + !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block], + &next_ptr->ptr, + m->sectors)) { + bch2_bkey_extent_entry_drop(new, entry); + goto again; + } } } } @@ -301,40 +673,44 @@ fsck_err: static int bch2_gc_mark_key(struct bch_fs *c, enum btree_id btree_id, unsigned level, bool is_root, - struct bkey_s_c k, + struct bkey_s_c *k, u8 *max_stale, bool initial) { - struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + struct bkey_ptrs_c ptrs; const struct bch_extent_ptr *ptr; unsigned flags = + BTREE_TRIGGER_INSERT| BTREE_TRIGGER_GC| (initial ? BTREE_TRIGGER_NOATOMIC : 0); int ret = 0; if (initial) { BUG_ON(bch2_journal_seq_verify && - k.k->version.lo > journal_cur_seq(&c->journal)); + k->k->version.lo > journal_cur_seq(&c->journal)); + + ret = bch2_check_fix_ptrs(c, btree_id, level, is_root, k); + if (ret) + goto err; - if (fsck_err_on(k.k->version.lo > atomic64_read(&c->key_version), c, + if (fsck_err_on(k->k->version.lo > atomic64_read(&c->key_version), c, "key version number higher than recorded: %llu > %llu", - k.k->version.lo, + k->k->version.lo, atomic64_read(&c->key_version))) - atomic64_set(&c->key_version, k.k->version.lo); + atomic64_set(&c->key_version, k->k->version.lo); if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) || - fsck_err_on(!bch2_bkey_replicas_marked(c, k), c, + fsck_err_on(!bch2_bkey_replicas_marked(c, *k), c, "superblock not marked as containing replicas (type %u)", - k.k->type)) { - ret = bch2_mark_bkey_replicas(c, k); + k->k->type)) { + ret = bch2_mark_bkey_replicas(c, *k); if (ret) { bch_err(c, "error marking bkey replicas: %i", ret); goto err; } } - - ret = bch2_check_fix_ptrs(c, btree_id, level, is_root, &k); } + ptrs = bch2_bkey_ptrs_c(*k); bkey_for_each_ptr(ptrs, ptr) { struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); struct bucket *g = PTR_BUCKET(ca, ptr, true); @@ -345,7 +721,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, enum btree_id btree_id, *max_stale = max(*max_stale, ptr_stale(ca, ptr)); } - bch2_mark_key(c, k, 0, k.k->size, NULL, 0, flags); + bch2_mark_key(c, *k, flags); fsck_err: err: if (ret) @@ -374,7 +750,7 @@ static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale, while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) { ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, false, - k, max_stale, initial); + &k, max_stale, initial); if (ret) break; @@ -396,12 +772,13 @@ static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale, } static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id, - bool initial) + bool initial, bool metadata_only) { struct btree_trans trans; struct btree_iter *iter; struct btree *b; - unsigned depth = bch2_expensive_debug_checks ? 0 + unsigned depth = metadata_only ? 1 + : bch2_expensive_debug_checks ? 0 : !btree_node_type_needs_gc(btree_id) ? 1 : 0; u8 max_stale = 0; @@ -423,13 +800,13 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id, if (!initial) { if (max_stale > 64) - bch2_btree_node_rewrite(c, iter, + bch2_btree_node_rewrite(&trans, iter, b->data->keys.seq, BTREE_INSERT_NOWAIT| BTREE_INSERT_GC_LOCK_HELD); else if (!bch2_btree_gc_rewrite_disabled && (bch2_btree_gc_always_rewrite || max_stale > 16)) - bch2_btree_node_rewrite(c, iter, + bch2_btree_node_rewrite(&trans, iter, b->data->keys.seq, BTREE_INSERT_NOWAIT| BTREE_INSERT_GC_LOCK_HELD); @@ -445,10 +822,12 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id, mutex_lock(&c->btree_root_lock); b = c->btree_roots[btree_id].b; - if (!btree_node_fake(b)) + if (!btree_node_fake(b)) { + struct bkey_s_c k = bkey_i_to_s_c(&b->key); + ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, true, - bkey_i_to_s_c(&b->key), - &max_stale, initial); + &k, &max_stale, initial); + } gc_pos_set(c, gc_pos_btree_root(b->c.btree_id)); mutex_unlock(&c->btree_root_lock); @@ -462,6 +841,7 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b, struct bkey_s_c k; struct bkey_buf cur, prev; u8 max_stale = 0; + char buf[200]; int ret = 0; bch2_btree_and_journal_iter_init_node_iter(&iter, c, b); @@ -474,10 +854,10 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b, BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0); ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, false, - k, &max_stale, true); + &k, &max_stale, true); if (ret) { bch_err(c, "%s: error %i from bch2_gc_mark_key", __func__, ret); - break; + goto fsck_err; } if (b->c.level) { @@ -490,7 +870,7 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b, &prev, cur, !bch2_btree_and_journal_iter_peek(&iter).k); if (ret) - break; + goto fsck_err; } else { bch2_btree_and_journal_iter_advance(&iter); } @@ -511,18 +891,30 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b, false); ret = PTR_ERR_OR_ZERO(child); - if (fsck_err_on(ret == -EIO, c, - "unreadable btree node")) { - ret = bch2_journal_key_delete(c, b->c.btree_id, - b->c.level, cur.k->k.p); - if (ret) - return ret; - - set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags); - continue; - } - - if (ret) { + if (ret == -EIO) { + bch2_topology_error(c); + + if (__fsck_err(c, + FSCK_CAN_FIX| + FSCK_CAN_IGNORE| + FSCK_NO_RATELIMIT, + "Unreadable btree node at btree %s level %u:\n" + " %s", + bch2_btree_ids[b->c.btree_id], + b->c.level - 1, + (bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(cur.k)), buf)) && + !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) { + ret = FSCK_ERR_START_TOPOLOGY_REPAIR; + bch_info(c, "Halting mark and sweep to start topology repair pass"); + goto fsck_err; + } else { + /* Continue marking when opted to not + * fix the error: */ + ret = 0; + set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags); + continue; + } + } else if (ret) { bch_err(c, "%s: error %i getting btree node", __func__, ret); break; @@ -544,11 +936,13 @@ fsck_err: } static int bch2_gc_btree_init(struct bch_fs *c, - enum btree_id btree_id) + enum btree_id btree_id, + bool metadata_only) { struct btree *b; - unsigned target_depth = bch2_expensive_debug_checks ? 0 - : !btree_node_type_needs_gc(btree_id) ? 1 + unsigned target_depth = metadata_only ? 1 + : bch2_expensive_debug_checks ? 0 + : !btree_node_type_needs_gc(btree_id) ? 1 : 0; u8 max_stale = 0; char buf[100]; @@ -560,29 +954,35 @@ static int bch2_gc_btree_init(struct bch_fs *c, return 0; six_lock_read(&b->c.lock, NULL, NULL); - if (fsck_err_on(bpos_cmp(b->data->min_key, POS_MIN), c, + if (mustfix_fsck_err_on(bpos_cmp(b->data->min_key, POS_MIN), c, "btree root with incorrect min_key: %s", (bch2_bpos_to_text(&PBUF(buf), b->data->min_key), buf))) { - BUG(); + bch_err(c, "repair unimplemented"); + ret = FSCK_ERR_EXIT; + goto fsck_err; } - if (fsck_err_on(bpos_cmp(b->data->max_key, POS_MAX), c, + if (mustfix_fsck_err_on(bpos_cmp(b->data->max_key, SPOS_MAX), c, "btree root with incorrect max_key: %s", (bch2_bpos_to_text(&PBUF(buf), b->data->max_key), buf))) { - BUG(); + bch_err(c, "repair unimplemented"); + ret = FSCK_ERR_EXIT; + goto fsck_err; } if (b->c.level >= target_depth) ret = bch2_gc_btree_init_recurse(c, b, target_depth); - if (!ret) + if (!ret) { + struct bkey_s_c k = bkey_i_to_s_c(&b->key); + ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, true, - bkey_i_to_s_c(&b->key), - &max_stale, true); + &k, &max_stale, true); + } fsck_err: six_unlock_read(&b->c.lock); - if (ret) + if (ret < 0) bch_err(c, "%s: ret %i", __func__, ret); return ret; } @@ -593,27 +993,24 @@ static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r) (int) btree_id_to_gc_phase(r); } -static int bch2_gc_btrees(struct bch_fs *c, bool initial) +static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only) { enum btree_id ids[BTREE_ID_NR]; unsigned i; + int ret = 0; for (i = 0; i < BTREE_ID_NR; i++) ids[i] = i; bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp); - for (i = 0; i < BTREE_ID_NR; i++) { - enum btree_id id = ids[i]; - int ret = initial - ? bch2_gc_btree_init(c, id) - : bch2_gc_btree(c, id, initial); - if (ret) { - bch_err(c, "%s: ret %i", __func__, ret); - return ret; - } - } + for (i = 0; i < BTREE_ID_NR && !ret; i++) + ret = initial + ? bch2_gc_btree_init(c, ids[i], metadata_only) + : bch2_gc_btree(c, ids[i], initial, metadata_only); - return 0; + if (ret < 0) + bch_err(c, "%s: ret %i", __func__, ret); + return ret; } static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca, @@ -700,59 +1097,12 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c) for_each_pending_btree_node_free(c, as, d) if (d->index_update_done) bch2_mark_key(c, bkey_i_to_s_c(&d->key), - 0, 0, NULL, 0, - BTREE_TRIGGER_GC); + BTREE_TRIGGER_INSERT|BTREE_TRIGGER_GC); mutex_unlock(&c->btree_interior_update_lock); } #endif -static void bch2_mark_allocator_buckets(struct bch_fs *c) -{ - struct bch_dev *ca; - struct open_bucket *ob; - size_t i, j, iter; - unsigned ci; - - percpu_down_read(&c->mark_lock); - - spin_lock(&c->freelist_lock); - gc_pos_set(c, gc_pos_alloc(c, NULL)); - - for_each_member_device(ca, c, ci) { - fifo_for_each_entry(i, &ca->free_inc, iter) - bch2_mark_alloc_bucket(c, ca, i, true, - gc_pos_alloc(c, NULL), - BTREE_TRIGGER_GC); - - - - for (j = 0; j < RESERVE_NR; j++) - fifo_for_each_entry(i, &ca->free[j], iter) - bch2_mark_alloc_bucket(c, ca, i, true, - gc_pos_alloc(c, NULL), - BTREE_TRIGGER_GC); - } - - spin_unlock(&c->freelist_lock); - - for (ob = c->open_buckets; - ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); - ob++) { - spin_lock(&ob->lock); - if (ob->valid) { - gc_pos_set(c, gc_pos_alloc(c, ob)); - ca = bch_dev_bkey_exists(c, ob->ptr.dev); - bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), true, - gc_pos_alloc(c, ob), - BTREE_TRIGGER_GC); - } - spin_unlock(&ob->lock); - } - - percpu_up_read(&c->mark_lock); -} - static void bch2_gc_free(struct bch_fs *c) { struct bch_dev *ca; @@ -775,10 +1125,10 @@ static void bch2_gc_free(struct bch_fs *c) } static int bch2_gc_done(struct bch_fs *c, - bool initial) + bool initial, bool metadata_only) { - struct bch_dev *ca; - bool verify = (!initial || + struct bch_dev *ca = NULL; + bool verify = !metadata_only && (!initial || (c->sb.compat & (1ULL << BCH_COMPAT_alloc_info))); unsigned i, dev; int ret = 0; @@ -805,7 +1155,7 @@ static int bch2_gc_done(struct bch_fs *c, if (dst->b[b].mark._f != src->b[b].mark._f) { \ if (verify) \ fsck_err(c, "bucket %u:%zu gen %u data type %s has wrong " #_f \ - ": got %u, should be %u", i, b, \ + ": got %u, should be %u", dev, b, \ dst->b[b].mark.gen, \ bch2_data_types[dst->b[b].mark.data_type],\ dst->b[b].mark._f, src->b[b].mark._f); \ @@ -813,11 +1163,11 @@ static int bch2_gc_done(struct bch_fs *c, set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); \ } #define copy_dev_field(_f, _msg, ...) \ - copy_field(_f, "dev %u has wrong " _msg, i, ##__VA_ARGS__) + copy_field(_f, "dev %u has wrong " _msg, dev, ##__VA_ARGS__) #define copy_fs_field(_f, _msg, ...) \ copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__) - { + if (!metadata_only) { struct genradix_iter iter = genradix_iter_init(&c->stripes[1], 0); struct stripe *dst, *src; @@ -857,7 +1207,6 @@ static int bch2_gc_done(struct bch_fs *c, for (b = 0; b < src->nbuckets; b++) { copy_bucket_field(gen); copy_bucket_field(data_type); - copy_bucket_field(owned_by_allocator); copy_bucket_field(stripe); copy_bucket_field(dirty_sectors); copy_bucket_field(cached_sectors); @@ -890,20 +1239,28 @@ static int bch2_gc_done(struct bch_fs *c, copy_fs_field(hidden, "hidden"); copy_fs_field(btree, "btree"); - copy_fs_field(data, "data"); - copy_fs_field(cached, "cached"); - copy_fs_field(reserved, "reserved"); - copy_fs_field(nr_inodes,"nr_inodes"); - for (i = 0; i < BCH_REPLICAS_MAX; i++) - copy_fs_field(persistent_reserved[i], - "persistent_reserved[%i]", i); + if (!metadata_only) { + copy_fs_field(data, "data"); + copy_fs_field(cached, "cached"); + copy_fs_field(reserved, "reserved"); + copy_fs_field(nr_inodes,"nr_inodes"); + + for (i = 0; i < BCH_REPLICAS_MAX; i++) + copy_fs_field(persistent_reserved[i], + "persistent_reserved[%i]", i); + } for (i = 0; i < c->replicas.nr; i++) { struct bch_replicas_entry *e = cpu_replicas_entry(&c->replicas, i); char buf[80]; + if (metadata_only && + (e->data_type == BCH_DATA_user || + e->data_type == BCH_DATA_cached)) + continue; + bch2_replicas_entry_to_text(&PBUF(buf), e); copy_fs_field(replicas[i], "%s", buf); @@ -916,14 +1273,17 @@ static int bch2_gc_done(struct bch_fs *c, #undef copy_stripe_field #undef copy_field fsck_err: + if (ca) + percpu_ref_put(&ca->ref); if (ret) bch_err(c, "%s: ret %i", __func__, ret); return ret; } -static int bch2_gc_start(struct bch_fs *c) +static int bch2_gc_start(struct bch_fs *c, + bool metadata_only) { - struct bch_dev *ca; + struct bch_dev *ca = NULL; unsigned i; int ret; @@ -985,6 +1345,11 @@ static int bch2_gc_start(struct bch_fs *c) d->_mark.gen = dst->b[b].oldest_gen = s->mark.gen; d->gen_valid = s->gen_valid; + + if (metadata_only && + (s->mark.data_type == BCH_DATA_user || + s->mark.data_type == BCH_DATA_cached)) + d->_mark = s->mark; } }; @@ -993,6 +1358,201 @@ static int bch2_gc_start(struct bch_fs *c) return 0; } +static int bch2_gc_reflink_done_initial_fn(struct bch_fs *c, struct bkey_s_c k) +{ + struct reflink_gc *r; + const __le64 *refcount = bkey_refcount_c(k); + char buf[200]; + int ret = 0; + + if (!refcount) + return 0; + + r = genradix_ptr(&c->reflink_gc_table, c->reflink_gc_idx++); + if (!r) + return -ENOMEM; + + if (!r || + r->offset != k.k->p.offset || + r->size != k.k->size) { + bch_err(c, "unexpected inconsistency walking reflink table at gc finish"); + return -EINVAL; + } + + if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c, + "reflink key has wrong refcount:\n" + " %s\n" + " should be %u", + (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf), + r->refcount)) { + struct bkey_i *new; + + new = kmalloc(bkey_bytes(k.k), GFP_KERNEL); + if (!new) { + ret = -ENOMEM; + goto fsck_err; + } + + bkey_reassemble(new, k); + + if (!r->refcount) { + new->k.type = KEY_TYPE_deleted; + new->k.size = 0; + } else { + *bkey_refcount(new) = cpu_to_le64(r->refcount); + } + + ret = bch2_journal_key_insert(c, BTREE_ID_reflink, 0, new); + if (ret) + kfree(new); + } +fsck_err: + return ret; +} + +static int bch2_gc_reflink_done(struct bch_fs *c, bool initial, + bool metadata_only) +{ + struct btree_trans trans; + struct btree_iter *iter; + struct bkey_s_c k; + struct reflink_gc *r; + size_t idx = 0; + char buf[200]; + int ret = 0; + + if (metadata_only) + return 0; + + if (initial) { + c->reflink_gc_idx = 0; + + ret = bch2_btree_and_journal_walk(c, BTREE_ID_reflink, + bch2_gc_reflink_done_initial_fn); + goto out; + } + + bch2_trans_init(&trans, c, 0, 0); + + for_each_btree_key(&trans, iter, BTREE_ID_reflink, POS_MIN, + BTREE_ITER_PREFETCH, k, ret) { + const __le64 *refcount = bkey_refcount_c(k); + + if (!refcount) + continue; + + r = genradix_ptr(&c->reflink_gc_table, idx); + if (!r || + r->offset != k.k->p.offset || + r->size != k.k->size) { + bch_err(c, "unexpected inconsistency walking reflink table at gc finish"); + ret = -EINVAL; + break; + } + + if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c, + "reflink key has wrong refcount:\n" + " %s\n" + " should be %u", + (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf), + r->refcount)) { + struct bkey_i *new; + + new = kmalloc(bkey_bytes(k.k), GFP_KERNEL); + if (!new) { + ret = -ENOMEM; + break; + } + + bkey_reassemble(new, k); + + if (!r->refcount) + new->k.type = KEY_TYPE_deleted; + else + *bkey_refcount(new) = cpu_to_le64(r->refcount); + + ret = __bch2_trans_do(&trans, NULL, NULL, 0, + __bch2_btree_insert(&trans, BTREE_ID_reflink, new)); + kfree(new); + + if (ret) + break; + } + } +fsck_err: + bch2_trans_iter_put(&trans, iter); + bch2_trans_exit(&trans); +out: + genradix_free(&c->reflink_gc_table); + c->reflink_gc_nr = 0; + return ret; +} + +static int bch2_gc_reflink_start_initial_fn(struct bch_fs *c, struct bkey_s_c k) +{ + + struct reflink_gc *r; + const __le64 *refcount = bkey_refcount_c(k); + + if (!refcount) + return 0; + + r = genradix_ptr_alloc(&c->reflink_gc_table, c->reflink_gc_nr++, + GFP_KERNEL); + if (!r) + return -ENOMEM; + + r->offset = k.k->p.offset; + r->size = k.k->size; + r->refcount = 0; + return 0; +} + +static int bch2_gc_reflink_start(struct bch_fs *c, bool initial, + bool metadata_only) +{ + struct btree_trans trans; + struct btree_iter *iter; + struct bkey_s_c k; + struct reflink_gc *r; + int ret; + + if (metadata_only) + return 0; + + genradix_free(&c->reflink_gc_table); + c->reflink_gc_nr = 0; + + if (initial) + return bch2_btree_and_journal_walk(c, BTREE_ID_reflink, + bch2_gc_reflink_start_initial_fn); + + bch2_trans_init(&trans, c, 0, 0); + + for_each_btree_key(&trans, iter, BTREE_ID_reflink, POS_MIN, + BTREE_ITER_PREFETCH, k, ret) { + const __le64 *refcount = bkey_refcount_c(k); + + if (!refcount) + continue; + + r = genradix_ptr_alloc(&c->reflink_gc_table, c->reflink_gc_nr++, + GFP_KERNEL); + if (!r) { + ret = -ENOMEM; + break; + } + + r->offset = k.k->p.offset; + r->size = k.k->size; + r->refcount = 0; + } + bch2_trans_iter_put(&trans, iter); + + bch2_trans_exit(&trans); + return 0; +} + /** * bch2_gc - walk _all_ references to buckets, and recompute them: * @@ -1011,7 +1571,7 @@ static int bch2_gc_start(struct bch_fs *c) * move around - if references move backwards in the ordering GC * uses, GC could skip past them */ -int bch2_gc(struct bch_fs *c, bool initial) +int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only) { struct bch_dev *ca; u64 start_time = local_clock(); @@ -1027,21 +1587,43 @@ int bch2_gc(struct bch_fs *c, bool initial) closure_wait_event(&c->btree_interior_update_wait, !bch2_btree_interior_updates_nr_pending(c)); again: - ret = bch2_gc_start(c); + ret = bch2_gc_start(c, metadata_only) ?: + bch2_gc_reflink_start(c, initial, metadata_only); if (ret) goto out; bch2_mark_superblocks(c); - ret = bch2_gc_btrees(c, initial); + if (test_bit(BCH_FS_TOPOLOGY_ERROR, &c->flags) && + !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags) && + c->opts.fix_errors != FSCK_OPT_NO) { + bch_info(c, "starting topology repair pass"); + ret = bch2_repair_topology(c); + if (ret) + goto out; + bch_info(c, "topology repair pass done"); + + set_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags); + } + + ret = bch2_gc_btrees(c, initial, metadata_only); + + if (ret == FSCK_ERR_START_TOPOLOGY_REPAIR && + !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags) && + !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) { + set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags); + ret = 0; + } + + if (ret == FSCK_ERR_START_TOPOLOGY_REPAIR) + ret = FSCK_ERR_EXIT; + if (ret) goto out; #if 0 bch2_mark_pending_btree_node_frees(c); #endif - bch2_mark_allocator_buckets(c); - c->gc_count++; if (test_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags) || @@ -1071,7 +1653,8 @@ out: bch2_journal_block(&c->journal); percpu_down_write(&c->mark_lock); - ret = bch2_gc_done(c, initial); + ret = bch2_gc_reflink_done(c, initial, metadata_only) ?: + bch2_gc_done(c, initial, metadata_only); bch2_journal_unblock(&c->journal); } else { @@ -1142,7 +1725,7 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id) struct btree_iter *iter; struct bkey_s_c k; struct bkey_buf sk; - int ret = 0; + int ret = 0, commit_err = 0; bch2_bkey_buf_init(&sk); bch2_trans_init(&trans, c, 0, 0); @@ -1154,20 +1737,21 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id) while ((k = bch2_btree_iter_peek(iter)).k && !(ret = bkey_err(k))) { - if (gc_btree_gens_key(c, k)) { + c->gc_gens_pos = iter->pos; + + if (gc_btree_gens_key(c, k) && !commit_err) { bch2_bkey_buf_reassemble(&sk, c, k); bch2_extent_normalize(c, bkey_i_to_s(sk.k)); - bch2_btree_iter_set_pos(iter, bkey_start_pos(&sk.k->k)); - bch2_trans_update(&trans, iter, sk.k, 0); - - ret = bch2_trans_commit(&trans, NULL, NULL, - BTREE_INSERT_NOFAIL); - if (ret == -EINTR) + commit_err = + bch2_trans_update(&trans, iter, sk.k, 0) ?: + bch2_trans_commit(&trans, NULL, NULL, + BTREE_INSERT_NOWAIT| + BTREE_INSERT_NOFAIL); + if (commit_err == -EINTR) { + commit_err = 0; continue; - if (ret) { - break; } } @@ -1206,7 +1790,9 @@ int bch2_gc_gens(struct bch_fs *c) } for (i = 0; i < BTREE_ID_NR; i++) - if (btree_node_type_needs_gc(i)) { + if ((1 << i) & BTREE_ID_HAS_PTRS) { + c->gc_gens_btree = i; + c->gc_gens_pos = POS_MIN; ret = bch2_gc_btree_gens(c, i); if (ret) { bch_err(c, "error recalculating oldest_gen: %i", ret); @@ -1223,352 +1809,15 @@ int bch2_gc_gens(struct bch_fs *c) up_read(&ca->bucket_lock); } + c->gc_gens_btree = 0; + c->gc_gens_pos = POS_MIN; + c->gc_count++; err: up_read(&c->gc_lock); return ret; } -/* Btree coalescing */ - -static void recalc_packed_keys(struct btree *b) -{ - struct bset *i = btree_bset_first(b); - struct bkey_packed *k; - - memset(&b->nr, 0, sizeof(b->nr)); - - BUG_ON(b->nsets != 1); - - vstruct_for_each(i, k) - btree_keys_account_key_add(&b->nr, 0, k); -} - -static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter, - struct btree *old_nodes[GC_MERGE_NODES]) -{ - struct btree *parent = btree_node_parent(iter, old_nodes[0]); - unsigned i, nr_old_nodes, nr_new_nodes, u64s = 0; - unsigned blocks = btree_blocks(c) * 2 / 3; - struct btree *new_nodes[GC_MERGE_NODES]; - struct btree_update *as; - struct keylist keylist; - struct bkey_format_state format_state; - struct bkey_format new_format; - - memset(new_nodes, 0, sizeof(new_nodes)); - bch2_keylist_init(&keylist, NULL); - - /* Count keys that are not deleted */ - for (i = 0; i < GC_MERGE_NODES && old_nodes[i]; i++) - u64s += old_nodes[i]->nr.live_u64s; - - nr_old_nodes = nr_new_nodes = i; - - /* Check if all keys in @old_nodes could fit in one fewer node */ - if (nr_old_nodes <= 1 || - __vstruct_blocks(struct btree_node, c->block_bits, - DIV_ROUND_UP(u64s, nr_old_nodes - 1)) > blocks) - return; - - /* Find a format that all keys in @old_nodes can pack into */ - bch2_bkey_format_init(&format_state); - - /* - * XXX: this won't correctly take it account the new min/max keys: - */ - for (i = 0; i < nr_old_nodes; i++) - __bch2_btree_calc_format(&format_state, old_nodes[i]); - - new_format = bch2_bkey_format_done(&format_state); - - /* Check if repacking would make any nodes too big to fit */ - for (i = 0; i < nr_old_nodes; i++) - if (!bch2_btree_node_format_fits(c, old_nodes[i], &new_format)) { - trace_btree_gc_coalesce_fail(c, - BTREE_GC_COALESCE_FAIL_FORMAT_FITS); - return; - } - - if (bch2_keylist_realloc(&keylist, NULL, 0, - BKEY_BTREE_PTR_U64s_MAX * nr_old_nodes)) { - trace_btree_gc_coalesce_fail(c, - BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC); - return; - } - - as = bch2_btree_update_start(iter, old_nodes[0]->c.level, - btree_update_reserve_required(c, parent) + nr_old_nodes, - BTREE_INSERT_NOFAIL| - BTREE_INSERT_USE_RESERVE); - if (IS_ERR(as)) { - trace_btree_gc_coalesce_fail(c, - BTREE_GC_COALESCE_FAIL_RESERVE_GET); - bch2_keylist_free(&keylist, NULL); - return; - } - - trace_btree_gc_coalesce(c, old_nodes[0]); - - for (i = 0; i < nr_old_nodes; i++) - bch2_btree_interior_update_will_free_node(as, old_nodes[i]); - - /* Repack everything with @new_format and sort down to one bset */ - for (i = 0; i < nr_old_nodes; i++) - new_nodes[i] = - __bch2_btree_node_alloc_replacement(as, old_nodes[i], - new_format); - - /* - * Conceptually we concatenate the nodes together and slice them - * up at different boundaries. - */ - for (i = nr_new_nodes - 1; i > 0; --i) { - struct btree *n1 = new_nodes[i]; - struct btree *n2 = new_nodes[i - 1]; - - struct bset *s1 = btree_bset_first(n1); - struct bset *s2 = btree_bset_first(n2); - struct bkey_packed *k, *last = NULL; - - /* Calculate how many keys from @n2 we could fit inside @n1 */ - u64s = 0; - - for (k = s2->start; - k < vstruct_last(s2) && - vstruct_blocks_plus(n1->data, c->block_bits, - u64s + k->u64s) <= blocks; - k = bkey_next(k)) { - last = k; - u64s += k->u64s; - } - - if (u64s == le16_to_cpu(s2->u64s)) { - /* n2 fits entirely in n1 */ - n1->key.k.p = n1->data->max_key = n2->data->max_key; - - memcpy_u64s(vstruct_last(s1), - s2->start, - le16_to_cpu(s2->u64s)); - le16_add_cpu(&s1->u64s, le16_to_cpu(s2->u64s)); - - set_btree_bset_end(n1, n1->set); - - six_unlock_write(&n2->c.lock); - bch2_btree_node_free_never_inserted(c, n2); - six_unlock_intent(&n2->c.lock); - - memmove(new_nodes + i - 1, - new_nodes + i, - sizeof(new_nodes[0]) * (nr_new_nodes - i)); - new_nodes[--nr_new_nodes] = NULL; - } else if (u64s) { - /* move part of n2 into n1 */ - n1->key.k.p = n1->data->max_key = - bkey_unpack_pos(n1, last); - - n2->data->min_key = bpos_successor(n1->data->max_key); - - memcpy_u64s(vstruct_last(s1), - s2->start, u64s); - le16_add_cpu(&s1->u64s, u64s); - - memmove(s2->start, - vstruct_idx(s2, u64s), - (le16_to_cpu(s2->u64s) - u64s) * sizeof(u64)); - s2->u64s = cpu_to_le16(le16_to_cpu(s2->u64s) - u64s); - - set_btree_bset_end(n1, n1->set); - set_btree_bset_end(n2, n2->set); - } - } - - for (i = 0; i < nr_new_nodes; i++) { - struct btree *n = new_nodes[i]; - - recalc_packed_keys(n); - btree_node_reset_sib_u64s(n); - - bch2_btree_build_aux_trees(n); - - bch2_btree_update_add_new_node(as, n); - six_unlock_write(&n->c.lock); - - bch2_btree_node_write(c, n, SIX_LOCK_intent); - } - - /* - * The keys for the old nodes get deleted. We don't want to insert keys - * that compare equal to the keys for the new nodes we'll also be - * inserting - we can't because keys on a keylist must be strictly - * greater than the previous keys, and we also don't need to since the - * key for the new node will serve the same purpose (overwriting the key - * for the old node). - */ - for (i = 0; i < nr_old_nodes; i++) { - struct bkey_i delete; - unsigned j; - - for (j = 0; j < nr_new_nodes; j++) - if (!bpos_cmp(old_nodes[i]->key.k.p, - new_nodes[j]->key.k.p)) - goto next; - - bkey_init(&delete.k); - delete.k.p = old_nodes[i]->key.k.p; - bch2_keylist_add_in_order(&keylist, &delete); -next: - i = i; - } - - /* - * Keys for the new nodes get inserted: bch2_btree_insert_keys() only - * does the lookup once and thus expects the keys to be in sorted order - * so we have to make sure the new keys are correctly ordered with - * respect to the deleted keys added in the previous loop - */ - for (i = 0; i < nr_new_nodes; i++) - bch2_keylist_add_in_order(&keylist, &new_nodes[i]->key); - - /* Insert the newly coalesced nodes */ - bch2_btree_insert_node(as, parent, iter, &keylist, 0); - - BUG_ON(!bch2_keylist_empty(&keylist)); - - BUG_ON(iter->l[old_nodes[0]->c.level].b != old_nodes[0]); - - bch2_btree_iter_node_replace(iter, new_nodes[0]); - - for (i = 0; i < nr_new_nodes; i++) - bch2_btree_update_get_open_buckets(as, new_nodes[i]); - - /* Free the old nodes and update our sliding window */ - for (i = 0; i < nr_old_nodes; i++) { - bch2_btree_node_free_inmem(c, old_nodes[i], iter); - - /* - * the index update might have triggered a split, in which case - * the nodes we coalesced - the new nodes we just created - - * might not be sibling nodes anymore - don't add them to the - * sliding window (except the first): - */ - if (!i) { - old_nodes[i] = new_nodes[i]; - } else { - old_nodes[i] = NULL; - } - } - - for (i = 0; i < nr_new_nodes; i++) - six_unlock_intent(&new_nodes[i]->c.lock); - - bch2_btree_update_done(as); - bch2_keylist_free(&keylist, NULL); -} - -static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id) -{ - struct btree_trans trans; - struct btree_iter *iter; - struct btree *b; - bool kthread = (current->flags & PF_KTHREAD) != 0; - unsigned i; - int ret = 0; - - /* Sliding window of adjacent btree nodes */ - struct btree *merge[GC_MERGE_NODES]; - u32 lock_seq[GC_MERGE_NODES]; - - bch2_trans_init(&trans, c, 0, 0); - - /* - * XXX: We don't have a good way of positively matching on sibling nodes - * that have the same parent - this code works by handling the cases - * where they might not have the same parent, and is thus fragile. Ugh. - * - * Perhaps redo this to use multiple linked iterators? - */ - memset(merge, 0, sizeof(merge)); - - __for_each_btree_node(&trans, iter, btree_id, POS_MIN, - BTREE_MAX_DEPTH, 0, - BTREE_ITER_PREFETCH, b) { - memmove(merge + 1, merge, - sizeof(merge) - sizeof(merge[0])); - memmove(lock_seq + 1, lock_seq, - sizeof(lock_seq) - sizeof(lock_seq[0])); - - merge[0] = b; - - for (i = 1; i < GC_MERGE_NODES; i++) { - if (!merge[i] || - !six_relock_intent(&merge[i]->c.lock, lock_seq[i])) - break; - - if (merge[i]->c.level != merge[0]->c.level) { - six_unlock_intent(&merge[i]->c.lock); - break; - } - } - memset(merge + i, 0, (GC_MERGE_NODES - i) * sizeof(merge[0])); - - bch2_coalesce_nodes(c, iter, merge); - - for (i = 1; i < GC_MERGE_NODES && merge[i]; i++) { - lock_seq[i] = merge[i]->c.lock.state.seq; - six_unlock_intent(&merge[i]->c.lock); - } - - lock_seq[0] = merge[0]->c.lock.state.seq; - - if (kthread && kthread_should_stop()) { - ret = -ESHUTDOWN; - break; - } - - bch2_trans_cond_resched(&trans); - - /* - * If the parent node wasn't relocked, it might have been split - * and the nodes in our sliding window might not have the same - * parent anymore - blow away the sliding window: - */ - if (btree_iter_node(iter, iter->level + 1) && - !btree_node_intent_locked(iter, iter->level + 1)) - memset(merge + 1, 0, - (GC_MERGE_NODES - 1) * sizeof(merge[0])); - } - bch2_trans_iter_put(&trans, iter); - - return bch2_trans_exit(&trans) ?: ret; -} - -/** - * bch_coalesce - coalesce adjacent nodes with low occupancy - */ -void bch2_coalesce(struct bch_fs *c) -{ - enum btree_id id; - - down_read(&c->gc_lock); - trace_gc_coalesce_start(c); - - for (id = 0; id < BTREE_ID_NR; id++) { - int ret = c->btree_roots[id].b - ? bch2_coalesce_btree(c, id) - : 0; - - if (ret) { - if (ret != -ESHUTDOWN) - bch_err(c, "btree coalescing failed: %d", ret); - return; - } - } - - trace_gc_coalesce_end(c); - up_read(&c->gc_lock); -} - static int bch2_gc_thread(void *arg) { struct bch_fs *c = arg; diff --git a/libbcachefs/btree_gc.h b/libbcachefs/btree_gc.h index b1362a9..59dfb06 100644 --- a/libbcachefs/btree_gc.h +++ b/libbcachefs/btree_gc.h @@ -4,9 +4,7 @@ #include "btree_types.h" -void bch2_coalesce(struct bch_fs *); - -int bch2_gc(struct bch_fs *, bool); +int bch2_gc(struct bch_fs *, bool, bool); int bch2_gc_gens(struct bch_fs *); void bch2_gc_thread_stop(struct bch_fs *); int bch2_gc_thread_start(struct bch_fs *); @@ -89,15 +87,7 @@ static inline struct gc_pos gc_pos_btree_node(struct btree *b) */ static inline struct gc_pos gc_pos_btree_root(enum btree_id id) { - return gc_pos_btree(id, POS_MAX, BTREE_MAX_DEPTH); -} - -static inline struct gc_pos gc_pos_alloc(struct bch_fs *c, struct open_bucket *ob) -{ - return (struct gc_pos) { - .phase = GC_PHASE_ALLOC, - .pos = POS(ob ? ob - c->open_buckets : 0, 0), - }; + return gc_pos_btree(id, SPOS_MAX, BTREE_MAX_DEPTH); } static inline bool gc_visited(struct bch_fs *c, struct gc_pos pos) diff --git a/libbcachefs/btree_io.c b/libbcachefs/btree_io.c index ec1290f..40fa011 100644 --- a/libbcachefs/btree_io.c +++ b/libbcachefs/btree_io.c @@ -22,6 +22,51 @@ #include #include +void bch2_btree_node_io_unlock(struct btree *b) +{ + EBUG_ON(!btree_node_write_in_flight(b)); + + clear_btree_node_write_in_flight_inner(b); + clear_btree_node_write_in_flight(b); + wake_up_bit(&b->flags, BTREE_NODE_write_in_flight); +} + +void bch2_btree_node_io_lock(struct btree *b) +{ + BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key)); + + wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight, + TASK_UNINTERRUPTIBLE); +} + +void __bch2_btree_node_wait_on_read(struct btree *b) +{ + wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, + TASK_UNINTERRUPTIBLE); +} + +void __bch2_btree_node_wait_on_write(struct btree *b) +{ + wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight, + TASK_UNINTERRUPTIBLE); +} + +void bch2_btree_node_wait_on_read(struct btree *b) +{ + BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key)); + + wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, + TASK_UNINTERRUPTIBLE); +} + +void bch2_btree_node_wait_on_write(struct btree *b) +{ + BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key)); + + wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight, + TASK_UNINTERRUPTIBLE); +} + static void verify_no_dups(struct btree *b, struct bkey_packed *start, struct bkey_packed *end) @@ -241,7 +286,6 @@ bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b, } static void btree_node_sort(struct bch_fs *c, struct btree *b, - struct btree_iter *iter, unsigned start_idx, unsigned end_idx, bool filter_whiteouts) @@ -377,8 +421,7 @@ void bch2_btree_sort_into(struct bch_fs *c, * We're about to add another bset to the btree node, so if there's currently * too many bsets - sort some of them together: */ -static bool btree_node_compact(struct bch_fs *c, struct btree *b, - struct btree_iter *iter) +static bool btree_node_compact(struct bch_fs *c, struct btree *b) { unsigned unwritten_idx; bool ret = false; @@ -390,13 +433,13 @@ static bool btree_node_compact(struct bch_fs *c, struct btree *b, break; if (b->nsets - unwritten_idx > 1) { - btree_node_sort(c, b, iter, unwritten_idx, + btree_node_sort(c, b, unwritten_idx, b->nsets, false); ret = true; } if (unwritten_idx > 1) { - btree_node_sort(c, b, iter, 0, unwritten_idx, false); + btree_node_sort(c, b, 0, unwritten_idx, false); ret = true; } @@ -422,16 +465,37 @@ void bch2_btree_build_aux_trees(struct btree *b) * * Returns true if we sorted (i.e. invalidated iterators */ -void bch2_btree_init_next(struct bch_fs *c, struct btree *b, - struct btree_iter *iter) +void bch2_btree_init_next(struct btree_trans *trans, + struct btree_iter *iter, + struct btree *b) { + struct bch_fs *c = trans->c; struct btree_node_entry *bne; - bool did_sort; + bool reinit_iter = false; EBUG_ON(!(b->c.lock.state.seq & 1)); EBUG_ON(iter && iter->l[b->c.level].b != b); + BUG_ON(bset_written(b, bset(b, &b->set[1]))); + + if (b->nsets == MAX_BSETS && + !btree_node_write_in_flight(b)) { + unsigned log_u64s[] = { + ilog2(bset_u64s(&b->set[0])), + ilog2(bset_u64s(&b->set[1])), + ilog2(bset_u64s(&b->set[2])), + }; + + if (log_u64s[1] >= (log_u64s[0] + log_u64s[2]) / 2) { + bch2_btree_node_write(c, b, SIX_LOCK_write); + reinit_iter = true; + } + } + + if (b->nsets == MAX_BSETS && + btree_node_compact(c, b)) + reinit_iter = true; - did_sort = btree_node_compact(c, b, iter); + BUG_ON(b->nsets >= MAX_BSETS); bne = want_new_bset(c, b); if (bne) @@ -439,7 +503,7 @@ void bch2_btree_init_next(struct bch_fs *c, struct btree *b, bch2_btree_build_aux_trees(b); - if (iter && did_sort) + if (iter && reinit_iter) bch2_btree_iter_reinit_node(iter, b); } @@ -505,7 +569,7 @@ enum btree_validate_ret { \ switch (write) { \ case READ: \ - bch_err(c, "%s", _buf2); \ + bch_err(c, "%s", _buf2); \ \ switch (type) { \ case BTREE_ERR_FIXABLE: \ @@ -542,9 +606,58 @@ out: \ #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false) +/* + * When btree topology repair changes the start or end of a node, that might + * mean we have to drop keys that are no longer inside the node: + */ +void bch2_btree_node_drop_keys_outside_node(struct btree *b) +{ + struct bset_tree *t; + struct bkey_s_c k; + struct bkey unpacked; + struct btree_node_iter iter; + + for_each_bset(b, t) { + struct bset *i = bset(b, t); + struct bkey_packed *k; + + for (k = i->start; k != vstruct_last(i); k = bkey_next(k)) + if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0) + break; + + if (k != i->start) { + unsigned shift = (u64 *) k - (u64 *) i->start; + + memmove_u64s_down(i->start, k, + (u64 *) vstruct_end(i) - (u64 *) k); + i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift); + set_btree_bset_end(b, t); + bch2_bset_set_no_aux_tree(b, t); + } + + for (k = i->start; k != vstruct_last(i); k = bkey_next(k)) + if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0) + break; + + if (k != vstruct_last(i)) { + i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start); + set_btree_bset_end(b, t); + bch2_bset_set_no_aux_tree(b, t); + } + } + + bch2_btree_build_aux_trees(b); + + for_each_btree_node_key_unpack(b, k, &iter, &unpacked) { + BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0); + BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0); + } +} + static int validate_bset(struct bch_fs *c, struct bch_dev *ca, struct btree *b, struct bset *i, - unsigned sectors, int write, bool have_retry) + unsigned offset, unsigned sectors, + int write, bool have_retry) { unsigned version = le16_to_cpu(i->version); const char *err; @@ -582,18 +695,23 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, BTREE_ERR_FATAL, c, ca, b, i, "BSET_SEPARATE_WHITEOUTS no longer supported"); - if (btree_err_on(b->written + sectors > c->opts.btree_node_size, + if (btree_err_on(offset + sectors > c->opts.btree_node_size, BTREE_ERR_FIXABLE, c, ca, b, i, "bset past end of btree node")) { i->u64s = 0; return 0; } - btree_err_on(b->written && !i->u64s, + btree_err_on(offset && !i->u64s, BTREE_ERR_FIXABLE, c, ca, b, i, "empty bset"); - if (!b->written) { + btree_err_on(BSET_OFFSET(i) && + BSET_OFFSET(i) != offset, + BTREE_ERR_WANT_RETRY, c, ca, b, i, + "bset at wrong sector offset"); + + if (!offset) { struct btree_node *bn = container_of(i, struct btree_node, keys); /* These indicate that we read the wrong btree node: */ @@ -664,6 +782,8 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b, { unsigned version = le16_to_cpu(i->version); struct bkey_packed *k, *prev = NULL; + bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && + BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); int ret = 0; for (k = i->start; @@ -697,7 +817,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b, u = __bkey_disassemble(b, k, &tmp); invalid = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b)) ?: - bch2_bkey_in_btree_node(b, u.s_c) ?: + (!updated_range ? bch2_bkey_in_btree_node(b, u.s_c) : NULL) ?: (write ? bch2_bkey_val_invalid(c, u.s_c) : NULL); if (invalid) { char buf[160]; @@ -754,7 +874,11 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, struct bch_extent_ptr *ptr; struct bset *i; bool used_mempool, blacklisted; + bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && + BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); unsigned u64s; + unsigned blacklisted_written, nonblacklisted_written = 0; + unsigned ptr_written = btree_ptr_sectors_written(&b->key); int ret, retry_read = 0, write = READ; b->version_ondisk = U16_MAX; @@ -785,7 +909,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, b->data->keys.seq, bp->seq); } - while (b->written < c->opts.btree_node_size) { + while (b->written < (ptr_written ?: c->opts.btree_node_size)) { unsigned sectors, whiteout_u64s = 0; struct nonce nonce; struct bch_csum csum; @@ -841,7 +965,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, b->version_ondisk = min(b->version_ondisk, le16_to_cpu(i->version)); - ret = validate_bset(c, ca, b, i, sectors, + ret = validate_bset(c, ca, b, i, b->written, sectors, READ, have_retry); if (ret) goto fsck_err; @@ -865,6 +989,10 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, btree_err_on(blacklisted && first, BTREE_ERR_FIXABLE, c, ca, b, i, "first btree node bset has blacklisted journal seq"); + + btree_err_on(blacklisted && ptr_written, + BTREE_ERR_FIXABLE, c, ca, b, i, + "found blacklisted bset in btree node with sectors_written"); if (blacklisted && !first) continue; @@ -874,14 +1002,38 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, sort_iter_add(iter, vstruct_idx(i, whiteout_u64s), vstruct_last(i)); + + nonblacklisted_written = b->written; } - for (bne = write_block(b); - bset_byte_offset(b, bne) < btree_bytes(c); - bne = (void *) bne + block_bytes(c)) - btree_err_on(bne->keys.seq == b->data->keys.seq, + if (ptr_written) { + btree_err_on(b->written < ptr_written, BTREE_ERR_WANT_RETRY, c, ca, b, NULL, - "found bset signature after last bset"); + "btree node data missing: expected %u sectors, found %u", + ptr_written, b->written); + } else { + for (bne = write_block(b); + bset_byte_offset(b, bne) < btree_bytes(c); + bne = (void *) bne + block_bytes(c)) + btree_err_on(bne->keys.seq == b->data->keys.seq && + !bch2_journal_seq_is_blacklisted(c, + le64_to_cpu(bne->keys.journal_seq), + true), + BTREE_ERR_WANT_RETRY, c, ca, b, NULL, + "found bset signature after last bset"); + + /* + * Blacklisted bsets are those that were written after the most recent + * (flush) journal write. Since there wasn't a flush, they may not have + * made it to all devices - which means we shouldn't write new bsets + * after them, as that could leave a gap and then reads from that device + * wouldn't find all the bsets in that btree node - which means it's + * important that we start writing new bsets after the most recent _non_ + * blacklisted bset: + */ + blacklisted_written = b->written; + b->written = nonblacklisted_written; + } sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool); sorted->keys.u64s = 0; @@ -901,6 +1053,9 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, btree_bounce_free(c, btree_bytes(c), used_mempool, sorted); + if (updated_range) + bch2_btree_node_drop_keys_outside_node(b); + i = &b->data->keys; for (k = i->start; k != vstruct_last(i);) { struct bkey tmp; @@ -946,6 +1101,9 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, if (ca->mi.state != BCH_MEMBER_STATE_rw) set_btree_node_need_rewrite(b); } + + if (!ptr_written) + set_btree_node_need_rewrite(b); out: mempool_free(iter, &c->fill_iter); return retry_read; @@ -964,12 +1122,13 @@ static void btree_node_read_work(struct work_struct *work) struct btree_read_bio *rb = container_of(work, struct btree_read_bio, work); struct bch_fs *c = rb->c; + struct btree *b = rb->b; struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev); - struct btree *b = rb->bio.bi_private; struct bio *bio = &rb->bio; struct bch_io_failures failed = { .nr = 0 }; char buf[200]; struct printbuf out; + bool saw_error = false; bool can_retry; goto start; @@ -1007,6 +1166,8 @@ start: !bch2_btree_node_read_done(c, ca, b, can_retry)) break; + saw_error = true; + if (!can_retry) { set_btree_node_read_error(b); break; @@ -1016,6 +1177,10 @@ start: bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read], rb->start_time); bio_put(&rb->bio); + + if (saw_error && !btree_node_read_error(b)) + bch2_btree_node_rewrite_async(c, b); + clear_btree_node_read_in_flight(b); wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); } @@ -1031,7 +1196,264 @@ static void btree_node_read_endio(struct bio *bio) bch2_latency_acct(ca, rb->start_time, READ); } - queue_work(system_unbound_wq, &rb->work); + queue_work(c->io_complete_wq, &rb->work); +} + +struct btree_node_read_all { + struct closure cl; + struct bch_fs *c; + struct btree *b; + unsigned nr; + void *buf[BCH_REPLICAS_MAX]; + struct bio *bio[BCH_REPLICAS_MAX]; + int err[BCH_REPLICAS_MAX]; +}; + +static unsigned btree_node_sectors_written(struct bch_fs *c, void *data) +{ + struct btree_node *bn = data; + struct btree_node_entry *bne; + unsigned offset = 0; + + if (le64_to_cpu(bn->magic) != bset_magic(c)) + return 0; + + while (offset < c->opts.btree_node_size) { + if (!offset) { + offset += vstruct_sectors(bn, c->block_bits); + } else { + bne = data + (offset << 9); + if (bne->keys.seq != bn->keys.seq) + break; + offset += vstruct_sectors(bne, c->block_bits); + } + } + + return offset; +} + +static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data) +{ + struct btree_node *bn = data; + struct btree_node_entry *bne; + + if (!offset) + return false; + + while (offset < c->opts.btree_node_size) { + bne = data + (offset << 9); + if (bne->keys.seq == bn->keys.seq) + return true; + offset++; + } + + return false; + return offset; +} + +static void btree_node_read_all_replicas_done(struct closure *cl) +{ + struct btree_node_read_all *ra = + container_of(cl, struct btree_node_read_all, cl); + struct bch_fs *c = ra->c; + struct btree *b = ra->b; + bool dump_bset_maps = false; + bool have_retry = false; + int ret = 0, best = -1, write = READ; + unsigned i, written, written2; + __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2 + ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0; + + for (i = 0; i < ra->nr; i++) { + struct btree_node *bn = ra->buf[i]; + + if (ra->err[i]) + continue; + + if (le64_to_cpu(bn->magic) != bset_magic(c) || + (seq && seq != bn->keys.seq)) + continue; + + if (best < 0) { + best = i; + written = btree_node_sectors_written(c, bn); + continue; + } + + written2 = btree_node_sectors_written(c, ra->buf[i]); + if (btree_err_on(written2 != written, BTREE_ERR_FIXABLE, c, NULL, b, NULL, + "btree node sectors written mismatch: %u != %u", + written, written2) || + btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]), + BTREE_ERR_FIXABLE, c, NULL, b, NULL, + "found bset signature after last bset") || + btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9), + BTREE_ERR_FIXABLE, c, NULL, b, NULL, + "btree node replicas content mismatch")) + dump_bset_maps = true; + + if (written2 > written) { + written = written2; + best = i; + } + } +fsck_err: + if (dump_bset_maps) { + for (i = 0; i < ra->nr; i++) { + char buf[200]; + struct printbuf out = PBUF(buf); + struct btree_node *bn = ra->buf[i]; + struct btree_node_entry *bne = NULL; + unsigned offset = 0, sectors; + bool gap = false; + + if (ra->err[i]) + continue; + + while (offset < c->opts.btree_node_size) { + if (!offset) { + sectors = vstruct_sectors(bn, c->block_bits); + } else { + bne = ra->buf[i] + (offset << 9); + if (bne->keys.seq != bn->keys.seq) + break; + sectors = vstruct_sectors(bne, c->block_bits); + } + + pr_buf(&out, " %u-%u", offset, offset + sectors); + if (bne && bch2_journal_seq_is_blacklisted(c, + le64_to_cpu(bne->keys.journal_seq), false)) + pr_buf(&out, "*"); + offset += sectors; + } + + while (offset < c->opts.btree_node_size) { + bne = ra->buf[i] + (offset << 9); + if (bne->keys.seq == bn->keys.seq) { + if (!gap) + pr_buf(&out, " GAP"); + gap = true; + + sectors = vstruct_sectors(bne, c->block_bits); + pr_buf(&out, " %u-%u", offset, offset + sectors); + if (bch2_journal_seq_is_blacklisted(c, + le64_to_cpu(bne->keys.journal_seq), false)) + pr_buf(&out, "*"); + } + offset++; + } + + bch_err(c, "replica %u:%s", i, buf); + } + } + + if (best >= 0) { + memcpy(b->data, ra->buf[best], btree_bytes(c)); + ret = bch2_btree_node_read_done(c, NULL, b, false); + } else { + ret = -1; + } + + if (ret) + set_btree_node_read_error(b); + + for (i = 0; i < ra->nr; i++) { + mempool_free(ra->buf[i], &c->btree_bounce_pool); + bio_put(ra->bio[i]); + } + + closure_debug_destroy(&ra->cl); + kfree(ra); + + clear_btree_node_read_in_flight(b); + wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); +} + +static void btree_node_read_all_replicas_endio(struct bio *bio) +{ + struct btree_read_bio *rb = + container_of(bio, struct btree_read_bio, bio); + struct bch_fs *c = rb->c; + struct btree_node_read_all *ra = rb->ra; + + if (rb->have_ioref) { + struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev); + bch2_latency_acct(ca, rb->start_time, READ); + } + + ra->err[rb->idx] = bio->bi_status; + closure_put(&ra->cl); +} + +/* + * XXX This allocates multiple times from the same mempools, and can deadlock + * under sufficient memory pressure (but is only a debug path) + */ +static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync) +{ + struct bkey_s_c k = bkey_i_to_s_c(&b->key); + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + const union bch_extent_entry *entry; + struct extent_ptr_decoded pick; + struct btree_node_read_all *ra; + unsigned i; + + ra = kzalloc(sizeof(*ra), GFP_NOFS); + if (!ra) + return -ENOMEM; + + closure_init(&ra->cl, NULL); + ra->c = c; + ra->b = b; + ra->nr = bch2_bkey_nr_ptrs(k); + + for (i = 0; i < ra->nr; i++) { + ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS); + ra->bio[i] = bio_alloc_bioset(GFP_NOFS, buf_pages(ra->buf[i], + btree_bytes(c)), + &c->btree_bio); + } + + i = 0; + bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) { + struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev); + struct btree_read_bio *rb = + container_of(ra->bio[i], struct btree_read_bio, bio); + rb->c = c; + rb->b = b; + rb->ra = ra; + rb->start_time = local_clock(); + rb->have_ioref = bch2_dev_get_ioref(ca, READ); + rb->idx = i; + rb->pick = pick; + rb->bio.bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META; + rb->bio.bi_iter.bi_sector = pick.ptr.offset; + rb->bio.bi_end_io = btree_node_read_all_replicas_endio; + bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c)); + + if (rb->have_ioref) { + this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree], + bio_sectors(&rb->bio)); + bio_set_dev(&rb->bio, ca->disk_sb.bdev); + + closure_get(&ra->cl); + submit_bio(&rb->bio); + } else { + ra->err[i] = BLK_STS_REMOVED; + } + + i++; + } + + if (sync) { + closure_sync(&ra->cl); + btree_node_read_all_replicas_done(&ra->cl); + } else { + continue_at(&ra->cl, btree_node_read_all_replicas_done, + c->io_complete_wq); + } + + return 0; } void bch2_btree_node_read(struct bch_fs *c, struct btree *b, @@ -1041,14 +1463,21 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, struct btree_read_bio *rb; struct bch_dev *ca; struct bio *bio; + char buf[200]; int ret; + btree_pos_to_text(&PBUF(buf), c, b); trace_btree_read(c, b); + if (bch2_verify_all_btree_replicas && + !btree_node_read_all_replicas(c, b, sync)) + return; + ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), NULL, &pick); if (bch2_fs_fatal_err_on(ret <= 0, c, - "btree node read error: no device to read from")) { + "btree node read error: no device to read from\n" + " at %s", buf)) { set_btree_node_read_error(b); return; } @@ -1060,6 +1489,8 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, &c->btree_bio); rb = container_of(bio, struct btree_read_bio, bio); rb->c = c; + rb->b = b; + rb->ra = NULL; rb->start_time = local_clock(); rb->have_ioref = bch2_dev_get_ioref(ca, READ); rb->pick = pick; @@ -1067,11 +1498,8 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META; bio->bi_iter.bi_sector = pick.ptr.offset; bio->bi_end_io = btree_node_read_endio; - bio->bi_private = b; bch2_bio_map(bio, b->data, btree_bytes(c)); - set_btree_node_read_in_flight(b); - if (rb->have_ioref) { this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree], bio_sectors(bio)); @@ -1080,7 +1508,6 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, if (sync) { submit_bio_wait(bio); - bio->bi_private = b; btree_node_read_work(&rb->work); } else { submit_bio(bio); @@ -1091,8 +1518,7 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, if (sync) btree_node_read_work(&rb->work); else - queue_work(system_unbound_wq, &rb->work); - + queue_work(c->io_complete_wq, &rb->work); } } @@ -1118,6 +1544,8 @@ int bch2_btree_root_read(struct bch_fs *c, enum btree_id id, bkey_copy(&b->key, k); BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id)); + set_btree_node_read_in_flight(b); + bch2_btree_node_read(c, b, true); if (btree_node_read_error(b)) { @@ -1161,83 +1589,50 @@ void bch2_btree_complete_write(struct bch_fs *c, struct btree *b, static void btree_node_write_done(struct bch_fs *c, struct btree *b) { struct btree_write *w = btree_prev_write(b); + unsigned long old, new, v; bch2_btree_complete_write(c, b, w); - btree_node_io_unlock(b); -} - -static void bch2_btree_node_write_error(struct bch_fs *c, - struct btree_write_bio *wbio) -{ - struct btree *b = wbio->wbio.bio.bi_private; - struct bkey_buf k; - struct bch_extent_ptr *ptr; - struct btree_trans trans; - struct btree_iter *iter; - int ret; - - bch2_bkey_buf_init(&k); - bch2_trans_init(&trans, c, 0, 0); - - iter = bch2_trans_get_node_iter(&trans, b->c.btree_id, b->key.k.p, - BTREE_MAX_DEPTH, b->c.level, 0); -retry: - ret = bch2_btree_iter_traverse(iter); - if (ret) - goto err; - /* has node been freed? */ - if (iter->l[b->c.level].b != b) { - /* node has been freed: */ - BUG_ON(!btree_node_dying(b)); - goto out; - } - - BUG_ON(!btree_node_hashed(b)); - - bch2_bkey_buf_copy(&k, c, &b->key); + v = READ_ONCE(b->flags); + do { + old = new = v; - bch2_bkey_drop_ptrs(bkey_i_to_s(k.k), ptr, - bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev)); + if (old & (1U << BTREE_NODE_need_write)) + goto do_write; - if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(k.k))) - goto err; + new &= ~(1U << BTREE_NODE_write_in_flight); + new &= ~(1U << BTREE_NODE_write_in_flight_inner); + } while ((v = cmpxchg(&b->flags, old, new)) != old); - ret = bch2_btree_node_update_key(c, iter, b, k.k); - if (ret == -EINTR) - goto retry; - if (ret) - goto err; -out: - bch2_trans_iter_put(&trans, iter); - bch2_trans_exit(&trans); - bch2_bkey_buf_exit(&k, c); - bio_put(&wbio->wbio.bio); - btree_node_write_done(c, b); + wake_up_bit(&b->flags, BTREE_NODE_write_in_flight); return; -err: - set_btree_node_noevict(b); - bch2_fs_fatal_error(c, "fatal error writing btree node"); - goto out; -} -void bch2_btree_write_error_work(struct work_struct *work) -{ - struct bch_fs *c = container_of(work, struct bch_fs, - btree_write_error_work); - struct bio *bio; +do_write: + six_lock_read(&b->c.lock, NULL, NULL); + v = READ_ONCE(b->flags); + do { + old = new = v; - while (1) { - spin_lock_irq(&c->btree_write_error_lock); - bio = bio_list_pop(&c->btree_write_error_list); - spin_unlock_irq(&c->btree_write_error_lock); + if ((old & (1U << BTREE_NODE_dirty)) && + (old & (1U << BTREE_NODE_need_write)) && + !(old & (1U << BTREE_NODE_never_write)) && + btree_node_may_write(b)) { + new &= ~(1U << BTREE_NODE_dirty); + new &= ~(1U << BTREE_NODE_need_write); + new |= (1U << BTREE_NODE_write_in_flight); + new |= (1U << BTREE_NODE_write_in_flight_inner); + new |= (1U << BTREE_NODE_just_written); + new ^= (1U << BTREE_NODE_write_idx); + } else { + new &= ~(1U << BTREE_NODE_write_in_flight); + new &= ~(1U << BTREE_NODE_write_in_flight_inner); + } + } while ((v = cmpxchg(&b->flags, old, new)) != old); - if (!bio) - break; + if (new & (1U << BTREE_NODE_write_in_flight)) + __bch2_btree_node_write(c, b, true); - bch2_btree_node_write_error(c, - container_of(bio, struct btree_write_bio, wbio.bio)); - } + six_unlock_read(&b->c.lock); } static void btree_node_write_work(struct work_struct *work) @@ -1246,25 +1641,39 @@ static void btree_node_write_work(struct work_struct *work) container_of(work, struct btree_write_bio, work); struct bch_fs *c = wbio->wbio.c; struct btree *b = wbio->wbio.bio.bi_private; + struct bch_extent_ptr *ptr; + int ret; btree_bounce_free(c, - wbio->bytes, + wbio->data_bytes, wbio->wbio.used_mempool, wbio->data); - if (wbio->wbio.failed.nr) { - unsigned long flags; + bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr, + bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev)); - spin_lock_irqsave(&c->btree_write_error_lock, flags); - bio_list_add(&c->btree_write_error_list, &wbio->wbio.bio); - spin_unlock_irqrestore(&c->btree_write_error_lock, flags); + if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) + goto err; - queue_work(c->wq, &c->btree_write_error_work); - return; - } + if (wbio->wbio.first_btree_write) { + if (wbio->wbio.failed.nr) { + } + } else { + ret = bch2_trans_do(c, NULL, NULL, 0, + bch2_btree_node_update_key_get_iter(&trans, b, &wbio->key, + !wbio->wbio.failed.nr)); + if (ret) + goto err; + } +out: bio_put(&wbio->wbio.bio); btree_node_write_done(c, b); + return; +err: + set_btree_node_noevict(b); + bch2_fs_fatal_error(c, "fatal error writing btree node"); + goto out; } static void btree_node_write_endio(struct bio *bio) @@ -1272,7 +1681,9 @@ static void btree_node_write_endio(struct bio *bio) struct bch_write_bio *wbio = to_wbio(bio); struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL; struct bch_write_bio *orig = parent ?: wbio; + struct btree_write_bio *wb = container_of(orig, struct btree_write_bio, wbio); struct bch_fs *c = wbio->c; + struct btree *b = wbio->bio.bi_private; struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev); unsigned long flags; @@ -1293,13 +1704,13 @@ static void btree_node_write_endio(struct bio *bio) if (parent) { bio_put(bio); bio_endio(&parent->bio); - } else { - struct btree_write_bio *wb = - container_of(orig, struct btree_write_bio, wbio); - - INIT_WORK(&wb->work, btree_node_write_work); - queue_work(system_unbound_wq, &wb->work); + return; } + + clear_btree_node_write_in_flight_inner(b); + wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner); + INIT_WORK(&wb->work, btree_node_write_work); + queue_work(c->btree_io_complete_wq, &wb->work); } static int validate_bset_for_write(struct bch_fs *c, struct btree *b, @@ -1312,7 +1723,7 @@ static int validate_bset_for_write(struct bch_fs *c, struct btree *b, return -1; ret = validate_bset_keys(c, b, i, &whiteout_u64s, WRITE, false) ?: - validate_bset(c, NULL, b, i, sectors, WRITE, false); + validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false); if (ret) { bch2_inconsistent_error(c); dump_stack(); @@ -1321,16 +1732,27 @@ static int validate_bset_for_write(struct bch_fs *c, struct btree *b, return ret; } -void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, - enum six_lock_type lock_type_held) +static void btree_write_submit(struct work_struct *work) +{ + struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work); + struct bch_extent_ptr *ptr; + __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp; + + bkey_copy(&tmp.k, &wbio->key); + + bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr) + ptr->offset += wbio->sector_offset; + + bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &tmp.k); +} + +void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, bool already_started) { struct btree_write_bio *wbio; struct bset_tree *t; struct bset *i; struct btree_node *bn = NULL; struct btree_node_entry *bne = NULL; - struct bkey_buf k; - struct bch_extent_ptr *ptr; struct sort_iter sort_iter; struct nonce nonce; unsigned bytes_to_write, sectors_to_write, bytes, u64s; @@ -1340,7 +1762,8 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, bool validate_before_checksum = false; void *data; - bch2_bkey_buf_init(&k); + if (already_started) + goto do_write; if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags)) return; @@ -1364,18 +1787,19 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, if (old & (1 << BTREE_NODE_never_write)) return; - if (old & (1 << BTREE_NODE_write_in_flight)) { - btree_node_wait_on_io(b); - continue; - } + BUG_ON(old & (1 << BTREE_NODE_write_in_flight)); new &= ~(1 << BTREE_NODE_dirty); new &= ~(1 << BTREE_NODE_need_write); new |= (1 << BTREE_NODE_write_in_flight); + new |= (1 << BTREE_NODE_write_in_flight_inner); new |= (1 << BTREE_NODE_just_written); new ^= (1 << BTREE_NODE_write_idx); } while (cmpxchg_acquire(&b->flags, old, new) != old); + if (new & (1U << BTREE_NODE_need_write)) + return; +do_write: atomic_dec(&c->btree_cache.dirty); BUG_ON(btree_node_fake(b)); @@ -1415,6 +1839,9 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, /* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */ bytes += 8; + /* buffer must be a multiple of the block size */ + bytes = round_up(bytes, block_bytes(c)); + data = btree_bounce_alloc(c, bytes, &used_mempool); if (!b->written) { @@ -1459,6 +1886,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, i->version = c->sb.version < bcachefs_metadata_version_new_versioning ? cpu_to_le16(BCH_BSET_VERSION_OLD) : cpu_to_le16(c->sb.version); + SET_BSET_OFFSET(i, b->written); SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c)); if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i))) @@ -1517,45 +1945,42 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, struct btree_write_bio, wbio.bio); wbio_init(&wbio->wbio.bio); wbio->data = data; - wbio->bytes = bytes; + wbio->data_bytes = bytes; + wbio->sector_offset = b->written; + wbio->wbio.c = c; wbio->wbio.used_mempool = used_mempool; + wbio->wbio.first_btree_write = !b->written; wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META; wbio->wbio.bio.bi_end_io = btree_node_write_endio; wbio->wbio.bio.bi_private = b; bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9); - /* - * If we're appending to a leaf node, we don't technically need FUA - - * this write just needs to be persisted before the next journal write, - * which will be marked FLUSH|FUA. - * - * Similarly if we're writing a new btree root - the pointer is going to - * be in the next journal entry. - * - * But if we're writing a new btree node (that isn't a root) or - * appending to a non leaf btree node, we need either FUA or a flush - * when we write the parent with the new pointer. FUA is cheaper than a - * flush, and writes appending to leaf nodes aren't blocking anything so - * just make all btree node writes FUA to keep things sane. - */ + bkey_copy(&wbio->key, &b->key); - bch2_bkey_buf_copy(&k, c, &b->key); + b->written += sectors_to_write; - bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(k.k)), ptr) - ptr->offset += b->written; + if (wbio->wbio.first_btree_write && + b->key.k.type == KEY_TYPE_btree_ptr_v2) + bkey_i_to_btree_ptr_v2(&b->key)->v.sectors_written = + cpu_to_le16(b->written); - b->written += sectors_to_write; + if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2) + bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written = + cpu_to_le16(b->written); atomic64_inc(&c->btree_writes_nr); atomic64_add(sectors_to_write, &c->btree_writes_sectors); - /* XXX: submitting IO with btree locks held: */ - bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_btree, k.k); - bch2_bkey_buf_exit(&k, c); + INIT_WORK(&wbio->work, btree_write_submit); + queue_work(c->io_complete_wq, &wbio->work); return; err: set_btree_node_noevict(b); + if (!b->written && + b->key.k.type == KEY_TYPE_btree_ptr_v2) + bkey_i_to_btree_ptr_v2(&b->key)->v.sectors_written = + cpu_to_le16(sectors_to_write); b->written += sectors_to_write; nowrite: btree_bounce_free(c, bytes, used_mempool, data); @@ -1592,7 +2017,7 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b) * single bset: */ if (b->nsets > 1) { - btree_node_sort(c, b, NULL, 0, b->nsets, true); + btree_node_sort(c, b, 0, b->nsets, true); invalidated_iter = true; } else { invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL); @@ -1622,13 +2047,12 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b) * Use this one if the node is intent locked: */ void bch2_btree_node_write(struct bch_fs *c, struct btree *b, - enum six_lock_type lock_type_held) + enum six_lock_type lock_type_held) { - BUG_ON(lock_type_held == SIX_LOCK_write); - if (lock_type_held == SIX_LOCK_intent || - six_lock_tryupgrade(&b->c.lock)) { - __bch2_btree_node_write(c, b, SIX_LOCK_intent); + (lock_type_held == SIX_LOCK_read && + six_lock_tryupgrade(&b->c.lock))) { + __bch2_btree_node_write(c, b, false); /* don't cycle lock unnecessarily: */ if (btree_node_just_written(b) && @@ -1640,7 +2064,10 @@ void bch2_btree_node_write(struct bch_fs *c, struct btree *b, if (lock_type_held == SIX_LOCK_read) six_lock_downgrade(&b->c.lock); } else { - __bch2_btree_node_write(c, b, SIX_LOCK_read); + __bch2_btree_node_write(c, b, false); + if (lock_type_held == SIX_LOCK_write && + btree_node_just_written(b)) + bch2_btree_post_write_cleanup(c, b); } } diff --git a/libbcachefs/btree_io.h b/libbcachefs/btree_io.h index 9c14cd3..7fdcf87 100644 --- a/libbcachefs/btree_io.h +++ b/libbcachefs/btree_io.h @@ -13,6 +13,7 @@ struct bch_fs; struct btree_write; struct btree; struct btree_iter; +struct btree_node_read_all; static inline bool btree_node_dirty(struct btree *b) { @@ -31,10 +32,20 @@ static inline void clear_btree_node_dirty(struct bch_fs *c, struct btree *b) atomic_dec(&c->btree_cache.dirty); } +static inline unsigned btree_ptr_sectors_written(struct bkey_i *k) +{ + return k->k.type == KEY_TYPE_btree_ptr_v2 + ? le16_to_cpu(bkey_i_to_btree_ptr_v2(k)->v.sectors_written) + : 0; +} + struct btree_read_bio { struct bch_fs *c; + struct btree *b; + struct btree_node_read_all *ra; u64 start_time; unsigned have_ioref:1; + unsigned idx:7; struct extent_ptr_decoded pick; struct work_struct work; struct bio bio; @@ -42,29 +53,19 @@ struct btree_read_bio { struct btree_write_bio { struct work_struct work; + __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX); void *data; - unsigned bytes; + unsigned data_bytes; + unsigned sector_offset; struct bch_write_bio wbio; }; -static inline void btree_node_io_unlock(struct btree *b) -{ - EBUG_ON(!btree_node_write_in_flight(b)); - clear_btree_node_write_in_flight(b); - wake_up_bit(&b->flags, BTREE_NODE_write_in_flight); -} - -static inline void btree_node_io_lock(struct btree *b) -{ - wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight, - TASK_UNINTERRUPTIBLE); -} - -static inline void btree_node_wait_on_io(struct btree *b) -{ - wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight, - TASK_UNINTERRUPTIBLE); -} +void bch2_btree_node_io_unlock(struct btree *); +void bch2_btree_node_io_lock(struct btree *); +void __bch2_btree_node_wait_on_read(struct btree *); +void __bch2_btree_node_wait_on_write(struct btree *); +void bch2_btree_node_wait_on_read(struct btree *); +void bch2_btree_node_wait_on_write(struct btree *); static inline bool btree_node_may_write(struct btree *b) { @@ -130,9 +131,11 @@ static inline void bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offse void bch2_btree_sort_into(struct bch_fs *, struct btree *, struct btree *); +void bch2_btree_node_drop_keys_outside_node(struct btree *); + void bch2_btree_build_aux_trees(struct btree *); -void bch2_btree_init_next(struct bch_fs *, struct btree *, - struct btree_iter *); +void bch2_btree_init_next(struct btree_trans *, struct btree_iter *, + struct btree *); int bch2_btree_node_read_done(struct bch_fs *, struct bch_dev *, struct btree *, bool); @@ -142,10 +145,8 @@ int bch2_btree_root_read(struct bch_fs *, enum btree_id, void bch2_btree_complete_write(struct bch_fs *, struct btree *, struct btree_write *); -void bch2_btree_write_error_work(struct work_struct *); -void __bch2_btree_node_write(struct bch_fs *, struct btree *, - enum six_lock_type); +void __bch2_btree_node_write(struct bch_fs *, struct btree *, bool); bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *); void bch2_btree_node_write(struct bch_fs *, struct btree *, @@ -154,18 +155,11 @@ void bch2_btree_node_write(struct bch_fs *, struct btree *, static inline void btree_node_write_if_need(struct bch_fs *c, struct btree *b, enum six_lock_type lock_held) { - while (b->written && - btree_node_need_write(b) && - btree_node_may_write(b)) { - if (!btree_node_write_in_flight(b)) { - bch2_btree_node_write(c, b, lock_held); - break; - } - - six_unlock_type(&b->c.lock, lock_held); - btree_node_wait_on_io(b); - btree_node_lock_type(c, b, lock_held); - } + if (b->written && + btree_node_need_write(b) && + btree_node_may_write(b) && + !btree_node_write_in_flight(b)) + bch2_btree_node_write(c, b, lock_held); } #define bch2_btree_node_write_cond(_c, _b, cond) \ diff --git a/libbcachefs/btree_iter.c b/libbcachefs/btree_iter.c index 425c9ad..fe710d1 100644 --- a/libbcachefs/btree_iter.c +++ b/libbcachefs/btree_iter.c @@ -18,6 +18,20 @@ #include static void btree_iter_set_search_pos(struct btree_iter *, struct bpos); +static void btree_trans_sort_iters(struct btree_trans *); +static void btree_iter_check_sort(struct btree_trans *, struct btree_iter *); +static struct btree_iter *btree_iter_child_alloc(struct btree_iter *, unsigned long); +static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *, + struct btree_iter *); +static void btree_iter_copy(struct btree_iter *, struct btree_iter *); + +static inline int btree_iter_cmp(const struct btree_iter *l, + const struct btree_iter *r) +{ + return cmp_int(l->btree_id, r->btree_id) ?: + -cmp_int(btree_iter_is_cached(l), btree_iter_is_cached(r)) ?: + bkey_cmp(l->real_pos, r->real_pos); +} static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p) { @@ -170,8 +184,8 @@ success: return true; } -static inline bool btree_iter_get_locks(struct btree_iter *iter, - bool upgrade, bool trace) +static inline bool btree_iter_get_locks(struct btree_iter *iter, bool upgrade, + unsigned long trace_ip) { unsigned l = iter->level; int fail_idx = -1; @@ -183,17 +197,18 @@ static inline bool btree_iter_get_locks(struct btree_iter *iter, if (!(upgrade ? bch2_btree_node_upgrade(iter, l) : bch2_btree_node_relock(iter, l))) { - if (trace) - (upgrade - ? trace_node_upgrade_fail - : trace_node_relock_fail)(l, iter->l[l].lock_seq, - is_btree_node(iter, l) - ? 0 - : (unsigned long) iter->l[l].b, - is_btree_node(iter, l) - ? iter->l[l].b->c.lock.state.seq - : 0); - + (upgrade + ? trace_node_upgrade_fail + : trace_node_relock_fail)(iter->trans->ip, trace_ip, + btree_iter_type(iter) == BTREE_ITER_CACHED, + iter->btree_id, &iter->real_pos, + l, iter->l[l].lock_seq, + is_btree_node(iter, l) + ? 0 + : (unsigned long) iter->l[l].b, + is_btree_node(iter, l) + ? iter->l[l].b->c.lock.state.seq + : 0); fail_idx = l; btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); } @@ -260,13 +275,8 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, */ if (type == SIX_LOCK_intent && linked->nodes_locked != linked->nodes_intent_locked) { - linked->locks_want = max_t(unsigned, - linked->locks_want, - __fls(linked->nodes_locked) + 1); - if (!btree_iter_get_locks(linked, true, false)) { - deadlock_iter = linked; - reason = 1; - } + deadlock_iter = linked; + reason = 1; } if (linked->btree_id != iter->btree_id) { @@ -295,14 +305,8 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, * we're about to lock, it must have the ancestors locked too: */ if (level > __fls(linked->nodes_locked)) { - linked->locks_want = - max(level + 1, max_t(unsigned, - linked->locks_want, - iter->locks_want)); - if (!btree_iter_get_locks(linked, true, false)) { - deadlock_iter = linked; - reason = 5; - } + deadlock_iter = linked; + reason = 5; } /* Must lock btree nodes in key order: */ @@ -311,27 +315,20 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, btree_iter_type(linked))) <= 0) { deadlock_iter = linked; reason = 7; - } - - /* - * Recheck if this is a node we already have locked - since one - * of the get_locks() calls might've successfully - * upgraded/relocked it: - */ - if (linked->l[level].b == b && - btree_node_locked_type(linked, level) >= type) { - six_lock_increment(&b->c.lock, type); - return true; + BUG_ON(trans->in_traverse_all); } } if (unlikely(deadlock_iter)) { - trace_trans_restart_would_deadlock(iter->trans->ip, ip, - reason, + trace_trans_restart_would_deadlock(trans->ip, ip, + trans->in_traverse_all, reason, deadlock_iter->btree_id, btree_iter_type(deadlock_iter), + &deadlock_iter->real_pos, iter->btree_id, - btree_iter_type(iter)); + btree_iter_type(iter), + &pos); + btree_trans_restart(trans); return false; } @@ -369,7 +366,7 @@ static void bch2_btree_iter_verify_locks(struct btree_iter *iter) return; } - for (l = 0; is_btree_node(iter, l); l++) { + for (l = 0; btree_iter_node(iter, l); l++) { if (iter->uptodate >= BTREE_ITER_NEED_RELOCK && !btree_node_locked(iter, l)) continue; @@ -390,10 +387,44 @@ void bch2_btree_trans_verify_locks(struct btree_trans *trans) static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {} #endif +/* + * Only for btree_cache.c - only relocks intent locks + */ +bool bch2_btree_iter_relock_intent(struct btree_iter *iter) +{ + unsigned l; + + for (l = iter->level; + l < iter->locks_want && btree_iter_node(iter, l); + l++) { + if (!bch2_btree_node_relock(iter, l)) { + trace_node_relock_fail(iter->trans->ip, _RET_IP_, + btree_iter_type(iter) == BTREE_ITER_CACHED, + iter->btree_id, &iter->real_pos, + l, iter->l[l].lock_seq, + is_btree_node(iter, l) + ? 0 + : (unsigned long) iter->l[l].b, + is_btree_node(iter, l) + ? iter->l[l].b->c.lock.state.seq + : 0); + btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); + btree_trans_restart(iter->trans); + return false; + } + } + + return true; +} + __flatten -bool bch2_btree_iter_relock(struct btree_iter *iter, bool trace) +bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip) { - return btree_iter_get_locks(iter, false, trace); + bool ret = btree_iter_get_locks(iter, false, trace_ip); + + if (!ret) + btree_trans_restart(iter->trans); + return ret; } bool __bch2_btree_iter_upgrade(struct btree_iter *iter, @@ -405,22 +436,39 @@ bool __bch2_btree_iter_upgrade(struct btree_iter *iter, iter->locks_want = new_locks_want; - if (btree_iter_get_locks(iter, true, true)) + if (btree_iter_get_locks(iter, true, _THIS_IP_)) return true; /* - * Ancestor nodes must be locked before child nodes, so set locks_want - * on iterators that might lock ancestors before us to avoid getting - * -EINTR later: + * XXX: this is ugly - we'd prefer to not be mucking with other + * iterators in the btree_trans here. + * + * On failure to upgrade the iterator, setting iter->locks_want and + * calling get_locks() is sufficient to make bch2_btree_iter_traverse() + * get the locks we want on transaction restart. + * + * But if this iterator was a clone, on transaction restart what we did + * to this iterator isn't going to be preserved. + * + * Possibly we could add an iterator field for the parent iterator when + * an iterator is a copy - for now, we'll just upgrade any other + * iterators with the same btree id. + * + * The code below used to be needed to ensure ancestor nodes get locked + * before interior nodes - now that's handled by + * bch2_btree_iter_traverse_all(). */ trans_for_each_iter(iter->trans, linked) if (linked != iter && + btree_iter_type(linked) == btree_iter_type(iter) && linked->btree_id == iter->btree_id && linked->locks_want < new_locks_want) { linked->locks_want = new_locks_want; - btree_iter_get_locks(linked, true, false); + btree_iter_get_locks(linked, true, _THIS_IP_); } + if (iter->should_be_locked) + btree_trans_restart(iter->trans); return false; } @@ -459,14 +507,27 @@ void bch2_trans_downgrade(struct btree_trans *trans) /* Btree transaction locking: */ +static inline bool btree_iter_should_be_locked(struct btree_iter *iter) +{ + return (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) || + iter->should_be_locked; +} + bool bch2_trans_relock(struct btree_trans *trans) { struct btree_iter *iter; + if (unlikely(trans->restarted)) + return false; + trans_for_each_iter(trans, iter) - if (btree_iter_keep(trans, iter) && - !bch2_btree_iter_relock(iter, true)) + if (btree_iter_should_be_locked(iter) && + !bch2_btree_iter_relock(iter, _RET_IP_)) { + trace_trans_restart_relock(trans->ip, _RET_IP_, + iter->btree_id, &iter->real_pos); + BUG_ON(!trans->restarted); return false; + } return true; } @@ -476,6 +537,8 @@ void bch2_trans_unlock(struct btree_trans *trans) trans_for_each_iter(trans, iter) __bch2_btree_iter_unlock(iter); + + BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key)); } /* Btree iterator: */ @@ -590,6 +653,8 @@ err: static void bch2_btree_iter_verify(struct btree_iter *iter) { + struct btree_trans *trans = iter->trans; + struct bch_fs *c = trans->c; enum btree_iter_type type = btree_iter_type(iter); unsigned i; @@ -608,10 +673,16 @@ static void bch2_btree_iter_verify(struct btree_iter *iter) (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) && !btree_type_has_snapshots(iter->btree_id)); - bch2_btree_iter_verify_locks(iter); + for (i = 0; i < (type != BTREE_ITER_CACHED ? BTREE_MAX_DEPTH : 1); i++) { + if (!iter->l[i].b) { + BUG_ON(c->btree_roots[iter->btree_id].b->c.level > i); + break; + } - for (i = 0; i < BTREE_MAX_DEPTH; i++) bch2_btree_iter_verify_level(iter, i); + } + + bch2_btree_iter_verify_locks(iter); } static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) @@ -832,7 +903,14 @@ static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter, ret = bkey_disassemble(l->b, k, u); - if (bch2_debug_check_bkeys) + /* + * XXX: bch2_btree_bset_insert_key() generates invalid keys when we + * overwrite extents - it sets k->type = KEY_TYPE_deleted on the key + * being overwritten but doesn't change k->size. But this is ok, because + * those keys are never written out, we just have to avoid a spurious + * assertion here: + */ + if (bch2_debug_check_bkeys && !bkey_deleted(ret.k)) bch2_bkey_debugcheck(iter->trans->c, l->b, ret); return ret; @@ -840,10 +918,9 @@ static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter, /* peek_all() doesn't skip deleted keys */ static inline struct bkey_s_c btree_iter_level_peek_all(struct btree_iter *iter, - struct btree_iter_level *l, - struct bkey *u) + struct btree_iter_level *l) { - return __btree_iter_unpack(iter, l, u, + return __btree_iter_unpack(iter, l, &iter->k, bch2_btree_node_iter_peek_all(&l->iter, l->b)); } @@ -1029,11 +1106,12 @@ static int lock_root_check_fn(struct six_lock *lock, void *p) return b == *rootp ? 0 : -1; } -static inline int btree_iter_lock_root(struct btree_iter *iter, +static inline int btree_iter_lock_root(struct btree_trans *trans, + struct btree_iter *iter, unsigned depth_want, unsigned long trace_ip) { - struct bch_fs *c = iter->trans->c; + struct bch_fs *c = trans->c; struct btree *b, **rootp = &c->btree_roots[iter->btree_id].b; enum six_lock_type lock_type; unsigned i; @@ -1058,11 +1136,14 @@ static inline int btree_iter_lock_root(struct btree_iter *iter, } lock_type = __btree_lock_want(iter, iter->level); - if (unlikely(!btree_node_lock(b, POS_MAX, iter->level, + if (unlikely(!btree_node_lock(b, SPOS_MAX, iter->level, iter, lock_type, lock_root_check_fn, rootp, - trace_ip))) - return -EINTR; + trace_ip))) { + if (trans->restarted) + return -EINTR; + continue; + } if (likely(b == READ_ONCE(*rootp) && b->c.level == iter->level && @@ -1083,7 +1164,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter, } noinline -static void btree_iter_prefetch(struct btree_iter *iter) +static int btree_iter_prefetch(struct btree_iter *iter) { struct bch_fs *c = iter->trans->c; struct btree_iter_level *l = &iter->l[iter->level]; @@ -1094,10 +1175,11 @@ static void btree_iter_prefetch(struct btree_iter *iter) ? (iter->level > 1 ? 0 : 2) : (iter->level > 1 ? 1 : 16); bool was_locked = btree_node_locked(iter, iter->level); + int ret = 0; bch2_bkey_buf_init(&tmp); - while (nr) { + while (nr && !ret) { if (!bch2_btree_node_relock(iter, iter->level)) break; @@ -1107,14 +1189,15 @@ static void btree_iter_prefetch(struct btree_iter *iter) break; bch2_bkey_buf_unpack(&tmp, c, l->b, k); - bch2_btree_node_prefetch(c, iter, tmp.k, iter->btree_id, - iter->level - 1); + ret = bch2_btree_node_prefetch(c, iter, tmp.k, iter->btree_id, + iter->level - 1); } if (!was_locked) btree_node_unlock(iter, iter->level); bch2_bkey_buf_exit(&tmp, c); + return ret; } static noinline void btree_node_mem_ptr_set(struct btree_iter *iter, @@ -1138,10 +1221,11 @@ static noinline void btree_node_mem_ptr_set(struct btree_iter *iter, btree_node_unlock(iter, plevel); } -static __always_inline int btree_iter_down(struct btree_iter *iter, +static __always_inline int btree_iter_down(struct btree_trans *trans, + struct btree_iter *iter, unsigned long trace_ip) { - struct bch_fs *c = iter->trans->c; + struct bch_fs *c = trans->c; struct btree_iter_level *l = &iter->l[iter->level]; struct btree *b; unsigned level = iter->level - 1; @@ -1155,7 +1239,7 @@ static __always_inline int btree_iter_down(struct btree_iter *iter, bch2_bkey_buf_unpack(&tmp, c, l->b, bch2_btree_node_iter_peek(&l->iter, l->b)); - b = bch2_btree_node_get(c, iter, tmp.k, level, lock_type, trace_ip); + b = bch2_btree_node_get(trans, iter, tmp.k, level, lock_type, trace_ip); ret = PTR_ERR_OR_ZERO(b); if (unlikely(ret)) goto err; @@ -1168,9 +1252,13 @@ static __always_inline int btree_iter_down(struct btree_iter *iter, btree_node_mem_ptr_set(iter, level + 1, b); if (iter->flags & BTREE_ITER_PREFETCH) - btree_iter_prefetch(iter); + ret = btree_iter_prefetch(iter); + if (btree_node_read_locked(iter, level + 1)) + btree_node_unlock(iter, level + 1); iter->level = level; + + bch2_btree_iter_verify_locks(iter); err: bch2_bkey_buf_exit(&tmp, c); return ret; @@ -1178,28 +1266,36 @@ err: static int btree_iter_traverse_one(struct btree_iter *, unsigned long); -static int __btree_iter_traverse_all(struct btree_trans *trans, int ret) +static int __btree_iter_traverse_all(struct btree_trans *trans, int ret, + unsigned long trace_ip) { struct bch_fs *c = trans->c; struct btree_iter *iter; - u8 sorted[BTREE_ITER_MAX]; - unsigned i, nr_sorted = 0; + int i; if (trans->in_traverse_all) return -EINTR; trans->in_traverse_all = true; retry_all: - nr_sorted = 0; + trans->restarted = false; trans_for_each_iter(trans, iter) - sorted[nr_sorted++] = iter->idx; + iter->should_be_locked = false; -#define btree_iter_cmp_by_idx(_l, _r) \ - btree_iter_lock_cmp(&trans->iters[_l], &trans->iters[_r]) + btree_trans_sort_iters(trans); + + for (i = trans->nr_sorted - 2; i >= 0; --i) { + struct btree_iter *iter1 = trans->iters + trans->sorted[i]; + struct btree_iter *iter2 = trans->iters + trans->sorted[i + 1]; + + if (iter1->btree_id == iter2->btree_id && + iter1->locks_want < iter2->locks_want) + __bch2_btree_iter_upgrade(iter1, iter2->locks_want); + else if (!iter1->locks_want && iter2->locks_want) + __bch2_btree_iter_upgrade(iter1, 1); + } - bubble_sort(sorted, nr_sorted, btree_iter_cmp_by_idx); -#undef btree_iter_cmp_by_idx bch2_trans_unlock(trans); cond_resched(); @@ -1222,39 +1318,30 @@ retry_all: BUG_ON(ret && ret != -EINTR); /* Now, redo traversals in correct order: */ - for (i = 0; i < nr_sorted; i++) { - unsigned idx = sorted[i]; - - /* - * sucessfully traversing one iterator can cause another to be - * unlinked, in btree_key_cache_fill() - */ - if (!(trans->iters_linked & (1ULL << idx))) - continue; + trans_for_each_iter_inorder(trans, iter) { + EBUG_ON(!(trans->iters_linked & (1ULL << iter->idx))); - ret = btree_iter_traverse_one(&trans->iters[idx], _THIS_IP_); + ret = btree_iter_traverse_one(iter, _THIS_IP_); if (ret) goto retry_all; + + EBUG_ON(!(trans->iters_linked & (1ULL << iter->idx))); } - if (hweight64(trans->iters_live) > 1) - ret = -EINTR; - else - trans_for_each_iter(trans, iter) - if (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) { - ret = -EINTR; - break; - } + trans_for_each_iter(trans, iter) + BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK); out: bch2_btree_cache_cannibalize_unlock(c); trans->in_traverse_all = false; + + trace_trans_traverse_all(trans->ip, trace_ip); return ret; } -int bch2_btree_iter_traverse_all(struct btree_trans *trans) +static int bch2_btree_iter_traverse_all(struct btree_trans *trans) { - return __btree_iter_traverse_all(trans, 0); + return __btree_iter_traverse_all(trans, 0, _RET_IP_); } static inline bool btree_iter_good_node(struct btree_iter *iter, @@ -1298,27 +1385,40 @@ static inline unsigned btree_iter_up_until_good_node(struct btree_iter *iter, static int btree_iter_traverse_one(struct btree_iter *iter, unsigned long trace_ip) { - unsigned depth_want = iter->level; + struct btree_trans *trans = iter->trans; + unsigned l, depth_want = iter->level; + int ret = 0; /* - * if we need interior nodes locked, call btree_iter_relock() to make - * sure we walk back up enough that we lock them: + * Ensure we obey iter->should_be_locked: if it's set, we can't unlock + * and re-traverse the iterator without a transaction restart: */ - if (iter->uptodate == BTREE_ITER_NEED_RELOCK || - iter->locks_want > 1) - bch2_btree_iter_relock(iter, false); - - if (btree_iter_type(iter) == BTREE_ITER_CACHED) - return bch2_btree_iter_traverse_cached(iter); + if (iter->should_be_locked) { + ret = bch2_btree_iter_relock(iter, trace_ip) ? 0 : -EINTR; + goto out; + } - if (iter->uptodate < BTREE_ITER_NEED_RELOCK) - return 0; + if (btree_iter_type(iter) == BTREE_ITER_CACHED) { + ret = bch2_btree_iter_traverse_cached(iter); + goto out; + } if (unlikely(iter->level >= BTREE_MAX_DEPTH)) - return 0; + goto out; iter->level = btree_iter_up_until_good_node(iter, 0); + /* If we need intent locks, take them too: */ + for (l = iter->level + 1; + l < iter->locks_want && btree_iter_node(iter, l); + l++) + if (!bch2_btree_node_relock(iter, l)) + while (iter->level <= l) { + btree_node_unlock(iter, iter->level); + iter->l[iter->level].b = BTREE_ITER_NO_NODE_UP; + iter->level++; + } + /* * Note: iter->nodes[iter->level] may be temporarily NULL here - that * would indicate to other code that we got to the end of the btree, @@ -1326,13 +1426,20 @@ static int btree_iter_traverse_one(struct btree_iter *iter, * btree_iter_lock_root() comes next and that it can't fail */ while (iter->level > depth_want) { - int ret = btree_iter_node(iter, iter->level) - ? btree_iter_down(iter, trace_ip) - : btree_iter_lock_root(iter, depth_want, trace_ip); + ret = btree_iter_node(iter, iter->level) + ? btree_iter_down(trans, iter, trace_ip) + : btree_iter_lock_root(trans, iter, depth_want, trace_ip); if (unlikely(ret)) { - if (ret == 1) - return 0; + if (ret == 1) { + /* + * Got to the end of the btree (in + * BTREE_ITER_NODES mode) + */ + ret = 0; + goto out; + } + __bch2_btree_iter_unlock(iter); iter->level = depth_want; if (ret == -EIO) { @@ -1343,14 +1450,18 @@ static int btree_iter_traverse_one(struct btree_iter *iter, iter->l[iter->level].b = BTREE_ITER_NO_NODE_DOWN; } - return ret; + goto out; } } iter->uptodate = BTREE_ITER_NEED_PEEK; - +out: + BUG_ON((ret == -EINTR) != !!trans->restarted); + trace_iter_traverse(trans->ip, trace_ip, + btree_iter_type(iter) == BTREE_ITER_CACHED, + iter->btree_id, &iter->real_pos, ret); bch2_btree_iter_verify(iter); - return 0; + return ret; } static int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter) @@ -1360,8 +1471,10 @@ static int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter) ret = bch2_trans_cond_resched(trans) ?: btree_iter_traverse_one(iter, _RET_IP_); - if (unlikely(ret)) - ret = __btree_iter_traverse_all(trans, ret); + if (unlikely(ret) && hweight64(trans->iters_linked) == 1) { + ret = __btree_iter_traverse_all(trans, ret, _RET_IP_); + BUG_ON(ret == -EINTR); + } return ret; } @@ -1385,9 +1498,16 @@ btree_iter_traverse(struct btree_iter *iter) int __must_check bch2_btree_iter_traverse(struct btree_iter *iter) { + int ret; + btree_iter_set_search_pos(iter, btree_iter_search_key(iter)); - return btree_iter_traverse(iter); + ret = btree_iter_traverse(iter); + if (ret) + return ret; + + iter->should_be_locked = true; + return 0; } /* Iterate across nodes (leaf and interior nodes) */ @@ -1413,6 +1533,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter) iter->pos = iter->real_pos = b->key.k.p; bch2_btree_iter_verify(iter); + iter->should_be_locked = true; return b; } @@ -1469,6 +1590,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) iter->pos = iter->real_pos = b->key.k.p; bch2_btree_iter_verify(iter); + iter->should_be_locked = true; return b; } @@ -1477,17 +1599,25 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_pos) { +#ifdef CONFIG_BCACHEFS_DEBUG + struct bpos old_pos = iter->real_pos; +#endif int cmp = bpos_cmp(new_pos, iter->real_pos); unsigned l = iter->level; + EBUG_ON(iter->trans->restarted); + if (!cmp) goto out; iter->real_pos = new_pos; + iter->should_be_locked = false; + + btree_iter_check_sort(iter->trans, iter); if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) { btree_node_unlock(iter, 0); - iter->l[0].b = BTREE_ITER_NO_NODE_UP; + iter->l[0].b = BTREE_ITER_NO_NODE_CACHED; btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); return; } @@ -1516,12 +1646,17 @@ out: btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK); bch2_btree_iter_verify(iter); +#ifdef CONFIG_BCACHEFS_DEBUG + trace_iter_set_search_pos(iter->trans->ip, _RET_IP_, + iter->btree_id, + &old_pos, &new_pos, l); +#endif } inline bool bch2_btree_iter_advance(struct btree_iter *iter) { struct bpos pos = iter->k.p; - bool ret = bpos_cmp(pos, POS_MAX) != 0; + bool ret = bpos_cmp(pos, SPOS_MAX) != 0; if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS)) pos = bkey_successor(iter, pos); @@ -1532,7 +1667,9 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter) inline bool bch2_btree_iter_rewind(struct btree_iter *iter) { struct bpos pos = bkey_start_pos(&iter->k); - bool ret = bpos_cmp(pos, POS_MIN) != 0; + bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS + ? bpos_cmp(pos, POS_MIN) + : bkey_cmp(pos, POS_MIN)) != 0; if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS)) pos = bkey_predecessor(iter, pos); @@ -1543,7 +1680,7 @@ inline bool bch2_btree_iter_rewind(struct btree_iter *iter) static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter) { struct bpos next_pos = iter->l[0].b->key.k.p; - bool ret = bpos_cmp(next_pos, POS_MAX) != 0; + bool ret = bpos_cmp(next_pos, SPOS_MAX) != 0; /* * Typically, we don't want to modify iter->pos here, since that @@ -1553,7 +1690,7 @@ static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter) if (ret) btree_iter_set_search_pos(iter, bpos_successor(next_pos)); else - bch2_btree_iter_set_pos(iter, POS_MAX); + bch2_btree_iter_set_pos(iter, SPOS_MAX); return ret; } @@ -1571,15 +1708,18 @@ static inline bool btree_iter_set_pos_to_prev_leaf(struct btree_iter *iter) return ret; } -static struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans, - enum btree_id btree_id, struct bpos pos) +static inline struct bkey_i *btree_trans_peek_updates(struct btree_iter *iter, + struct bpos pos) { struct btree_insert_entry *i; - trans_for_each_update2(trans, i) - if ((cmp_int(btree_id, i->iter->btree_id) ?: - bkey_cmp(pos, i->k->k.p)) <= 0) { - if (btree_id == i->iter->btree_id) + if (!(iter->flags & BTREE_ITER_WITH_UPDATES)) + return NULL; + + trans_for_each_update(iter->trans, i) + if ((cmp_int(iter->btree_id, i->iter->btree_id) ?: + bkey_cmp(pos, i->k->k.p)) <= 0) { + if (iter->btree_id == i->iter->btree_id) return i->k; break; } @@ -1587,19 +1727,22 @@ static struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans, return NULL; } -static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter, bool with_updates) +/** + * bch2_btree_iter_peek: returns first key greater than or equal to iterator's + * current position + */ +struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter) { struct bpos search_key = btree_iter_search_key(iter); - struct bkey_i *next_update = with_updates - ? btree_trans_peek_updates(iter->trans, iter->btree_id, search_key) - : NULL; + struct bkey_i *next_update; struct bkey_s_c k; int ret; EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS); bch2_btree_iter_verify(iter); bch2_btree_iter_verify_entry_exit(iter); - +start: + next_update = btree_trans_peek_updates(iter, search_key); btree_iter_set_search_pos(iter, search_key); while (1) { @@ -1610,14 +1753,15 @@ static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter, bool wi k = btree_iter_level_peek(iter, &iter->l[0]); if (next_update && - bpos_cmp(next_update->k.p, iter->real_pos) <= 0) + bpos_cmp(next_update->k.p, iter->real_pos) <= 0) { + iter->k = next_update->k; k = bkey_i_to_s_c(next_update); + } if (likely(k.k)) { if (bkey_deleted(k.k)) { - btree_iter_set_search_pos(iter, - bkey_successor(iter, k.k->p)); - continue; + search_key = bkey_successor(iter, k.k->p); + goto start; } break; @@ -1631,23 +1775,17 @@ static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter, bool wi * iter->pos should be mononotically increasing, and always be equal to * the key we just returned - except extents can straddle iter->pos: */ - if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) + if (!(iter->flags & BTREE_ITER_IS_EXTENTS)) + iter->pos = k.k->p; + else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) iter->pos = bkey_start_pos(k.k); bch2_btree_iter_verify_entry_exit(iter); bch2_btree_iter_verify(iter); + iter->should_be_locked = true; return k; } -/** - * bch2_btree_iter_peek: returns first key greater than or equal to iterator's - * current position - */ -struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter) -{ - return __btree_iter_peek(iter, false); -} - /** * bch2_btree_iter_next: returns first key greater than iterator's current * position @@ -1660,19 +1798,6 @@ struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter) return bch2_btree_iter_peek(iter); } -struct bkey_s_c bch2_btree_iter_peek_with_updates(struct btree_iter *iter) -{ - return __btree_iter_peek(iter, true); -} - -struct bkey_s_c bch2_btree_iter_next_with_updates(struct btree_iter *iter) -{ - if (!bch2_btree_iter_advance(iter)) - return bkey_s_c_null; - - return bch2_btree_iter_peek_with_updates(iter); -} - /** * bch2_btree_iter_peek_prev: returns first key less than or equal to * iterator's current position @@ -1684,6 +1809,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) int ret; EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS); + EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES); bch2_btree_iter_verify(iter); bch2_btree_iter_verify_entry_exit(iter); @@ -1700,7 +1826,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) if (!k.k || ((iter->flags & BTREE_ITER_IS_EXTENTS) ? bkey_cmp(bkey_start_pos(k.k), iter->pos) >= 0 - : bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)) + : bkey_cmp(k.k->p, iter->pos) > 0)) k = btree_iter_level_prev(iter, l); if (likely(k.k)) @@ -1720,6 +1846,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) out: bch2_btree_iter_verify_entry_exit(iter); bch2_btree_iter_verify(iter); + iter->should_be_locked = true; return k; no_key: /* @@ -1744,81 +1871,113 @@ struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter) return bch2_btree_iter_peek_prev(iter); } -static inline struct bkey_s_c -__bch2_btree_iter_peek_slot_extents(struct btree_iter *iter) +struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) { + struct bpos search_key; struct bkey_s_c k; - struct bpos pos, next_start; + int ret; + + EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS && + btree_iter_type(iter) != BTREE_ITER_CACHED); + bch2_btree_iter_verify(iter); + bch2_btree_iter_verify_entry_exit(iter); - /* keys & holes can't span inode numbers: */ - if (iter->pos.offset == KEY_OFFSET_MAX) { + /* extents can't span inode numbers: */ + if ((iter->flags & BTREE_ITER_IS_EXTENTS) && + unlikely(iter->pos.offset == KEY_OFFSET_MAX)) { if (iter->pos.inode == KEY_INODE_MAX) return bkey_s_c_null; - bch2_btree_iter_set_pos(iter, bkey_successor(iter, iter->pos)); + bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos)); } - pos = iter->pos; - k = bch2_btree_iter_peek(iter); - iter->pos = pos; - - if (bkey_err(k)) - return k; - - if (k.k && bkey_cmp(bkey_start_pos(k.k), iter->pos) <= 0) - return k; - - next_start = k.k ? bkey_start_pos(k.k) : POS_MAX; - - bkey_init(&iter->k); - iter->k.p = iter->pos; - bch2_key_resize(&iter->k, - min_t(u64, KEY_SIZE_MAX, - (next_start.inode == iter->pos.inode - ? next_start.offset - : KEY_OFFSET_MAX) - - iter->pos.offset)); - - EBUG_ON(!iter->k.size); + search_key = btree_iter_search_key(iter); + btree_iter_set_search_pos(iter, search_key); - bch2_btree_iter_verify_entry_exit(iter); - bch2_btree_iter_verify(iter); + ret = btree_iter_traverse(iter); + if (unlikely(ret)) + return bkey_s_c_err(ret); - return (struct bkey_s_c) { &iter->k, NULL }; -} + if (btree_iter_type(iter) == BTREE_ITER_CACHED || + !(iter->flags & BTREE_ITER_IS_EXTENTS)) { + struct bkey_i *next_update; + struct bkey_cached *ck; -struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) -{ - struct btree_iter_level *l = &iter->l[0]; - struct bkey_s_c k; - int ret; + switch (btree_iter_type(iter)) { + case BTREE_ITER_KEYS: + k = btree_iter_level_peek_all(iter, &iter->l[0]); + EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, iter->pos) == 0); + break; + case BTREE_ITER_CACHED: + ck = (void *) iter->l[0].b; + EBUG_ON(iter->btree_id != ck->key.btree_id || + bkey_cmp(iter->pos, ck->key.pos)); + BUG_ON(!ck->valid); - EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS); - bch2_btree_iter_verify(iter); - bch2_btree_iter_verify_entry_exit(iter); + k = bkey_i_to_s_c(ck->k); + break; + case BTREE_ITER_NODES: + BUG(); + } - btree_iter_set_search_pos(iter, btree_iter_search_key(iter)); + next_update = btree_trans_peek_updates(iter, search_key); + if (next_update && + (!k.k || bpos_cmp(next_update->k.p, k.k->p) <= 0)) { + iter->k = next_update->k; + k = bkey_i_to_s_c(next_update); + } + } else { + if ((iter->flags & BTREE_ITER_INTENT)) { + struct btree_iter *child = + btree_iter_child_alloc(iter, _THIS_IP_); - if (iter->flags & BTREE_ITER_IS_EXTENTS) - return __bch2_btree_iter_peek_slot_extents(iter); + btree_iter_copy(child, iter); + k = bch2_btree_iter_peek(child); - ret = btree_iter_traverse(iter); - if (unlikely(ret)) - return bkey_s_c_err(ret); + if (k.k && !bkey_err(k)) + iter->k = child->k; + } else { + struct bpos pos = iter->pos; - k = btree_iter_level_peek_all(iter, l, &iter->k); + k = bch2_btree_iter_peek(iter); + iter->pos = pos; + } - EBUG_ON(k.k && bkey_deleted(k.k) && bkey_cmp(k.k->p, iter->pos) == 0); + if (unlikely(bkey_err(k))) + return k; + } - if (!k.k || bkey_cmp(iter->pos, k.k->p)) { - /* hole */ - bkey_init(&iter->k); - iter->k.p = iter->pos; - k = (struct bkey_s_c) { &iter->k, NULL }; + if (!(iter->flags & BTREE_ITER_IS_EXTENTS)) { + if (!k.k || + ((iter->flags & BTREE_ITER_ALL_SNAPSHOTS) + ? bpos_cmp(iter->pos, k.k->p) + : bkey_cmp(iter->pos, k.k->p))) { + bkey_init(&iter->k); + iter->k.p = iter->pos; + k = (struct bkey_s_c) { &iter->k, NULL }; + } + } else { + struct bpos next = k.k ? bkey_start_pos(k.k) : POS_MAX; + + if (bkey_cmp(iter->pos, next) < 0) { + bkey_init(&iter->k); + iter->k.p = iter->pos; + bch2_key_resize(&iter->k, + min_t(u64, KEY_SIZE_MAX, + (next.inode == iter->pos.inode + ? next.offset + : KEY_OFFSET_MAX) - + iter->pos.offset)); + + k = (struct bkey_s_c) { &iter->k, NULL }; + EBUG_ON(!k.k->size); + } } bch2_btree_iter_verify_entry_exit(iter); bch2_btree_iter_verify(iter); + iter->should_be_locked = true; + return k; } @@ -1838,27 +1997,6 @@ struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter) return bch2_btree_iter_peek_slot(iter); } -struct bkey_s_c bch2_btree_iter_peek_cached(struct btree_iter *iter) -{ - struct bkey_cached *ck; - int ret; - - EBUG_ON(btree_iter_type(iter) != BTREE_ITER_CACHED); - bch2_btree_iter_verify(iter); - - ret = btree_iter_traverse(iter); - if (unlikely(ret)) - return bkey_s_c_err(ret); - - ck = (void *) iter->l[0].b; - - EBUG_ON(iter->btree_id != ck->key.btree_id || - bkey_cmp(iter->pos, ck->key.pos)); - BUG_ON(!ck->valid); - - return bkey_i_to_s_c(ck->k); -} - static inline void bch2_btree_iter_init(struct btree_trans *trans, struct btree_iter *iter, enum btree_id btree_id) { @@ -1868,6 +2006,7 @@ static inline void bch2_btree_iter_init(struct btree_trans *trans, iter->trans = trans; iter->uptodate = BTREE_ITER_NEED_TRAVERSE; iter->btree_id = btree_id; + iter->real_pos = POS_MIN; iter->level = 0; iter->min_depth = 0; iter->locks_want = 0; @@ -1881,9 +2020,186 @@ static inline void bch2_btree_iter_init(struct btree_trans *trans, /* new transactional stuff: */ +static inline void btree_iter_verify_sorted_ref(struct btree_trans *trans, + struct btree_iter *iter) +{ + EBUG_ON(iter->sorted_idx >= trans->nr_sorted); + EBUG_ON(trans->sorted[iter->sorted_idx] != iter->idx); + EBUG_ON(!(trans->iters_linked & (1ULL << iter->idx))); +} + +static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) +{ +#ifdef CONFIG_BCACHEFS_DEBUG + unsigned i; + + for (i = 0; i < trans->nr_sorted; i++) + btree_iter_verify_sorted_ref(trans, trans->iters + trans->sorted[i]); +#endif +} + +static inline void btree_trans_verify_sorted(struct btree_trans *trans) +{ +#ifdef CONFIG_BCACHEFS_DEBUG + struct btree_iter *iter, *prev = NULL; + + trans_for_each_iter_inorder(trans, iter) + BUG_ON(prev && btree_iter_cmp(prev, iter) > 0); +#endif +} + +static inline void btree_iter_swap(struct btree_trans *trans, + struct btree_iter *l, struct btree_iter *r) +{ + swap(l->sorted_idx, r->sorted_idx); + swap(trans->sorted[l->sorted_idx], + trans->sorted[r->sorted_idx]); + + btree_iter_verify_sorted_ref(trans, l); + btree_iter_verify_sorted_ref(trans, r); +} + +static void btree_trans_sort_iters(struct btree_trans *trans) +{ + bool swapped = false; + int i, l = 0, r = trans->nr_sorted; + + while (1) { + for (i = l; i + 1 < r; i++) { + if (btree_iter_cmp(trans->iters + trans->sorted[i], + trans->iters + trans->sorted[i + 1]) > 0) { + swap(trans->sorted[i], trans->sorted[i + 1]); + trans->iters[trans->sorted[i]].sorted_idx = i; + trans->iters[trans->sorted[i + 1]].sorted_idx = i + 1; + swapped = true; + } + } + + if (!swapped) + break; + + r--; + swapped = false; + + for (i = r - 2; i >= l; --i) { + if (btree_iter_cmp(trans->iters + trans->sorted[i], + trans->iters + trans->sorted[i + 1]) > 0) { + swap(trans->sorted[i], + trans->sorted[i + 1]); + trans->iters[trans->sorted[i]].sorted_idx = i; + trans->iters[trans->sorted[i + 1]].sorted_idx = i + 1; + swapped = true; + } + } + + if (!swapped) + break; + + l++; + swapped = false; + } + + btree_trans_verify_sorted_refs(trans); + btree_trans_verify_sorted(trans); +} + +static void btree_iter_check_sort(struct btree_trans *trans, struct btree_iter *iter) +{ + struct btree_iter *n; + + EBUG_ON(iter->sorted_idx == U8_MAX); + + n = next_btree_iter(trans, iter); + if (n && btree_iter_cmp(iter, n) > 0) { + do { + btree_iter_swap(trans, iter, n); + n = next_btree_iter(trans, iter); + } while (n && btree_iter_cmp(iter, n) > 0); + + return; + } + + n = prev_btree_iter(trans, iter); + if (n && btree_iter_cmp(n, iter) > 0) { + do { + btree_iter_swap(trans, n, iter); + n = prev_btree_iter(trans, iter); + } while (n && btree_iter_cmp(n, iter) > 0); + } + + btree_trans_verify_sorted(trans); +} + +static inline void btree_iter_list_remove(struct btree_trans *trans, + struct btree_iter *iter) +{ + unsigned i; + + EBUG_ON(iter->sorted_idx >= trans->nr_sorted); + + array_remove_item(trans->sorted, trans->nr_sorted, iter->sorted_idx); + + for (i = iter->sorted_idx; i < trans->nr_sorted; i++) + trans->iters[trans->sorted[i]].sorted_idx = i; + + iter->sorted_idx = U8_MAX; + + btree_trans_verify_sorted_refs(trans); +} + +static inline void btree_iter_list_add(struct btree_trans *trans, + struct btree_iter *pos, + struct btree_iter *iter) +{ + unsigned i; + + btree_trans_verify_sorted_refs(trans); + + iter->sorted_idx = pos ? pos->sorted_idx : trans->nr_sorted; + + array_insert_item(trans->sorted, trans->nr_sorted, iter->sorted_idx, iter->idx); + + for (i = iter->sorted_idx; i < trans->nr_sorted; i++) + trans->iters[trans->sorted[i]].sorted_idx = i; + + btree_trans_verify_sorted_refs(trans); +} + +static void btree_iter_child_free(struct btree_iter *iter) +{ + struct btree_iter *child = btree_iter_child(iter); + + if (child) { + bch2_trans_iter_free(iter->trans, child); + iter->child_idx = U8_MAX; + } +} + +static struct btree_iter *btree_iter_child_alloc(struct btree_iter *iter, + unsigned long ip) +{ + struct btree_trans *trans = iter->trans; + struct btree_iter *child = btree_iter_child(iter); + + if (!child) { + child = btree_trans_iter_alloc(trans, iter); + child->ip_allocated = ip; + iter->child_idx = child->idx; + + trans->iters_live |= 1ULL << child->idx; + trans->iters_touched |= 1ULL << child->idx; + } + + return child; +} + static inline void __bch2_trans_iter_free(struct btree_trans *trans, unsigned idx) { + btree_iter_child_free(&trans->iters[idx]); + + btree_iter_list_remove(trans, &trans->iters[idx]); + __bch2_btree_iter_unlock(&trans->iters[idx]); trans->iters_linked &= ~(1ULL << idx); trans->iters_live &= ~(1ULL << idx); @@ -1930,10 +2246,12 @@ static void btree_trans_iter_alloc_fail(struct btree_trans *trans) struct btree_insert_entry *i; char buf[100]; - trans_for_each_iter(trans, iter) + btree_trans_sort_iters(trans); + + trans_for_each_iter_inorder(trans, iter) printk(KERN_ERR "iter: btree %s pos %s%s%s%s %pS\n", bch2_btree_ids[iter->btree_id], - (bch2_bpos_to_text(&PBUF(buf), iter->pos), buf), + (bch2_bpos_to_text(&PBUF(buf), iter->real_pos), buf), btree_iter_live(trans, iter) ? " live" : "", (trans->iters_touched & (1ULL << iter->idx)) ? " touched" : "", iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "", @@ -1949,8 +2267,10 @@ static void btree_trans_iter_alloc_fail(struct btree_trans *trans) panic("trans iter oveflow\n"); } -static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans) +static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans, + struct btree_iter *pos) { + struct btree_iter *iter; unsigned idx; if (unlikely(trans->iters_linked == @@ -1958,21 +2278,30 @@ static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans) btree_trans_iter_alloc_fail(trans); idx = __ffs64(~trans->iters_linked); - + iter = &trans->iters[idx]; + + iter->trans = trans; + iter->idx = idx; + iter->child_idx = U8_MAX; + iter->sorted_idx = U8_MAX; + iter->flags = 0; + iter->nodes_locked = 0; + iter->nodes_intent_locked = 0; trans->iters_linked |= 1ULL << idx; - trans->iters[idx].idx = idx; - trans->iters[idx].flags = 0; - return &trans->iters[idx]; + + btree_iter_list_add(trans, pos, iter); + return iter; } -static inline void btree_iter_copy(struct btree_iter *dst, - struct btree_iter *src) +static void btree_iter_copy(struct btree_iter *dst, struct btree_iter *src) { - unsigned i, idx = dst->idx; + unsigned i; - *dst = *src; - dst->idx = idx; - dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT; + __bch2_btree_iter_unlock(dst); + btree_iter_child_free(dst); + + memcpy(&dst->flags, &src->flags, + sizeof(struct btree_iter) - offsetof(struct btree_iter, flags)); for (i = 0; i < BTREE_MAX_DEPTH; i++) if (btree_node_locked(dst, i)) @@ -1981,6 +2310,8 @@ static inline void btree_iter_copy(struct btree_iter *dst, dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT; dst->flags &= ~BTREE_ITER_SET_POS_AFTER_COMMIT; + + btree_iter_check_sort(dst->trans, dst); } struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans, @@ -1990,6 +2321,15 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans, unsigned flags) { struct btree_iter *iter, *best = NULL; + struct bpos real_pos, pos_min = POS_MIN; + + EBUG_ON(trans->restarted); + + if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES && + btree_node_type_is_extents(btree_id) && + !(flags & BTREE_ITER_NOT_EXTENTS) && + !(flags & BTREE_ITER_ALL_SNAPSHOTS)) + flags |= BTREE_ITER_IS_EXTENTS; if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES && !btree_type_has_snapshots(btree_id)) @@ -1999,6 +2339,12 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans, pos.snapshot = btree_type_has_snapshots(btree_id) ? U32_MAX : 0; + real_pos = pos; + + if ((flags & BTREE_ITER_IS_EXTENTS) && + bkey_cmp(pos, POS_MAX)) + real_pos = bpos_nosnap_successor(pos); + trans_for_each_iter(trans, iter) { if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE)) continue; @@ -2006,19 +2352,23 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans, if (iter->btree_id != btree_id) continue; - if (best && - bkey_cmp(bpos_diff(best->real_pos, pos), - bpos_diff(iter->real_pos, pos)) > 0) - continue; + if (best) { + int cmp = bkey_cmp(bpos_diff(best->real_pos, real_pos), + bpos_diff(iter->real_pos, real_pos)); + + if (cmp < 0 || + ((cmp == 0 && btree_iter_keep(trans, iter)))) + continue; + } best = iter; } if (!best) { - iter = btree_trans_iter_alloc(trans); + iter = btree_trans_iter_alloc(trans, NULL); bch2_btree_iter_init(trans, iter, btree_id); } else if (btree_iter_keep(trans, best)) { - iter = btree_trans_iter_alloc(trans); + iter = btree_trans_iter_alloc(trans, best); btree_iter_copy(iter, best); } else { iter = best; @@ -2027,38 +2377,45 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans, trans->iters_live |= 1ULL << iter->idx; trans->iters_touched |= 1ULL << iter->idx; - if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES && - btree_node_type_is_extents(btree_id) && - !(flags & BTREE_ITER_NOT_EXTENTS) && - !(flags & BTREE_ITER_ALL_SNAPSHOTS)) - flags |= BTREE_ITER_IS_EXTENTS; - iter->flags = flags; iter->snapshot = pos.snapshot; - locks_want = min(locks_want, BTREE_MAX_DEPTH); + /* + * If the iterator has locks_want greater than requested, we explicitly + * do not downgrade it here - on transaction restart because btree node + * split needs to upgrade locks, we might be putting/getting the + * iterator again. Downgrading iterators only happens via an explicit + * bch2_trans_downgrade(). + */ + locks_want = min(locks_want, BTREE_MAX_DEPTH); if (locks_want > iter->locks_want) { iter->locks_want = locks_want; - btree_iter_get_locks(iter, true, false); - } else if (locks_want < iter->locks_want) { - __bch2_btree_iter_downgrade(iter, locks_want); + btree_iter_get_locks(iter, true, _THIS_IP_); } - while (iter->level < depth) { + while (iter->level != depth) { btree_node_unlock(iter, iter->level); iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT; - iter->level++; + iter->uptodate = BTREE_ITER_NEED_TRAVERSE; + if (iter->level < depth) + iter->level++; + else + iter->level--; } - while (iter->level > depth) - iter->l[--iter->level].b = BTREE_ITER_NO_NODE_INIT; - iter->min_depth = depth; bch2_btree_iter_set_pos(iter, pos); - btree_iter_set_search_pos(iter, btree_iter_search_key(iter)); + btree_iter_set_search_pos(iter, real_pos); + + trace_trans_get_iter(_RET_IP_, trans->ip, + btree_id, + &real_pos, locks_want, iter->uptodate, + best ? &best->real_pos : &pos_min, + best ? best->locks_want : U8_MAX, + best ? best->uptodate : U8_MAX); return iter; } @@ -2092,7 +2449,7 @@ struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans, { struct btree_iter *iter; - iter = btree_trans_iter_alloc(trans); + iter = btree_trans_iter_alloc(trans, src); btree_iter_copy(iter, src); trans->iters_live |= 1ULL << iter->idx; @@ -2105,39 +2462,41 @@ struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans, return iter; } -static int bch2_trans_preload_mem(struct btree_trans *trans, size_t size) +void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size) { - if (size > trans->mem_bytes) { + size_t new_top = trans->mem_top + size; + void *p; + + if (new_top > trans->mem_bytes) { size_t old_bytes = trans->mem_bytes; - size_t new_bytes = roundup_pow_of_two(size); - void *new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS); + size_t new_bytes = roundup_pow_of_two(new_top); + void *new_mem; + + WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX); + + new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS); + if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) { + new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL); + new_bytes = BTREE_TRANS_MEM_MAX; + kfree(trans->mem); + } if (!new_mem) - return -ENOMEM; + return ERR_PTR(-ENOMEM); trans->mem = new_mem; trans->mem_bytes = new_bytes; if (old_bytes) { - trace_trans_restart_mem_realloced(trans->ip, new_bytes); - return -EINTR; + trace_trans_restart_mem_realloced(trans->ip, _RET_IP_, new_bytes); + btree_trans_restart(trans); + return ERR_PTR(-EINTR); } } - return 0; -} - -void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size) -{ - void *p; - int ret; - - ret = bch2_trans_preload_mem(trans, trans->mem_top + size); - if (ret) - return ERR_PTR(ret); - p = trans->mem + trans->mem_top; trans->mem_top += size; + memset(p, 0, size); return p; } @@ -2155,7 +2514,15 @@ inline void bch2_trans_unlink_iters(struct btree_trans *trans) } } -void bch2_trans_reset(struct btree_trans *trans, unsigned flags) +/** + * bch2_trans_begin() - reset a transaction after a interrupted attempt + * @trans: transaction to reset + * + * While iterating over nodes or updating nodes a attempt to lock a btree + * node may return EINTR when the trylock fails. When this occurs + * bch2_trans_begin() should be called and the transaction retried. + */ +void bch2_trans_begin(struct btree_trans *trans) { struct btree_iter *iter; @@ -2163,12 +2530,15 @@ void bch2_trans_reset(struct btree_trans *trans, unsigned flags) iter->flags &= ~(BTREE_ITER_KEEP_UNTIL_COMMIT| BTREE_ITER_SET_POS_AFTER_COMMIT); + /* + * XXX: we shouldn't be doing this if the transaction was restarted, but + * currently we still overflow transaction iterators if we do that + * */ bch2_trans_unlink_iters(trans); - trans->iters_touched &= trans->iters_live; + trans->extra_journal_res = 0; trans->nr_updates = 0; - trans->nr_updates2 = 0; trans->mem_top = 0; trans->hooks = NULL; @@ -2182,17 +2552,19 @@ void bch2_trans_reset(struct btree_trans *trans, unsigned flags) (void *) &trans->fs_usage_deltas->memset_start); } - if (!(flags & TRANS_RESET_NOUNLOCK)) - bch2_trans_cond_resched(trans); + bch2_trans_cond_resched(trans); - if (!(flags & TRANS_RESET_NOTRAVERSE)) + if (trans->restarted) bch2_btree_iter_traverse_all(trans); + + trans->restarted = false; } static void bch2_trans_alloc_iters(struct btree_trans *trans, struct bch_fs *c) { size_t iters_bytes = sizeof(struct btree_iter) * BTREE_ITER_MAX; size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX; + size_t sorted_bytes = sizeof(u8) * BTREE_ITER_MAX; void *p = NULL; BUG_ON(trans->used_mempool); @@ -2205,12 +2577,13 @@ static void bch2_trans_alloc_iters(struct btree_trans *trans, struct bch_fs *c) trans->iters = p; p += iters_bytes; trans->updates = p; p += updates_bytes; - trans->updates2 = p; p += updates_bytes; + trans->sorted = p; p += sorted_bytes; } void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, unsigned expected_nr_iters, size_t expected_mem_bytes) + __acquires(&c->btree_trans_barrier) { memset(trans, 0, sizeof(*trans)); trans->c = c; @@ -2225,6 +2598,11 @@ void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, if (expected_mem_bytes) { trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes); trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL); + + if (!unlikely(trans->mem)) { + trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL); + trans->mem_bytes = BTREE_TRANS_MEM_MAX; + } } trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier); @@ -2238,12 +2616,20 @@ void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, } int bch2_trans_exit(struct btree_trans *trans) + __releases(&c->btree_trans_barrier) { struct bch_fs *c = trans->c; bch2_trans_unlock(trans); #ifdef CONFIG_BCACHEFS_DEBUG + if (trans->iters_live) { + struct btree_iter *iter; + + trans_for_each_iter(trans, iter) + btree_iter_child_free(iter); + } + if (trans->iters_live) { struct btree_iter *iter; @@ -2266,8 +2652,19 @@ int bch2_trans_exit(struct btree_trans *trans) bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres); - kfree(trans->fs_usage_deltas); - kfree(trans->mem); + if (trans->fs_usage_deltas) { + if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) == + REPLICAS_DELTA_LIST_MAX) + mempool_free(trans->fs_usage_deltas, + &trans->c->replicas_delta_pool); + else + kfree(trans->fs_usage_deltas); + } + + if (trans->mem_bytes == BTREE_TRANS_MEM_MAX) + mempool_free(trans->mem, &trans->c->btree_trans_mem_pool); + else + kfree(trans->mem); #ifdef __KERNEL__ /* @@ -2275,6 +2672,7 @@ int bch2_trans_exit(struct btree_trans *trans) */ trans->iters = this_cpu_xchg(c->btree_iters_bufs->iter, trans->iters); #endif + if (trans->iters) mempool_free(trans->iters, &trans->c->btree_iters_pool); @@ -2368,6 +2766,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c) void bch2_fs_btree_iter_exit(struct bch_fs *c) { + mempool_exit(&c->btree_trans_mem_pool); mempool_exit(&c->btree_iters_pool); cleanup_srcu_struct(&c->btree_trans_barrier); } @@ -2381,7 +2780,9 @@ int bch2_fs_btree_iter_init(struct bch_fs *c) return init_srcu_struct(&c->btree_trans_barrier) ?: mempool_init_kmalloc_pool(&c->btree_iters_pool, 1, + sizeof(u8) * nr + sizeof(struct btree_iter) * nr + - sizeof(struct btree_insert_entry) * nr + - sizeof(struct btree_insert_entry) * nr); + sizeof(struct btree_insert_entry) * nr) ?: + mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1, + BTREE_TRANS_MEM_MAX); } diff --git a/libbcachefs/btree_iter.h b/libbcachefs/btree_iter.h index 07d9b6d..39124e6 100644 --- a/libbcachefs/btree_iter.h +++ b/libbcachefs/btree_iter.h @@ -71,6 +71,30 @@ __trans_next_iter(struct btree_trans *trans, unsigned idx) (_iter); \ _iter = __trans_next_iter((_trans), (_iter)->idx + 1)) +static inline struct btree_iter *next_btree_iter(struct btree_trans *trans, struct btree_iter *iter) +{ + unsigned idx = iter ? iter->sorted_idx + 1 : 0; + + EBUG_ON(idx > trans->nr_sorted); + + return idx < trans->nr_sorted + ? trans->iters + trans->sorted[idx] + : NULL; +} + +static inline struct btree_iter *prev_btree_iter(struct btree_trans *trans, struct btree_iter *iter) +{ + EBUG_ON(iter->sorted_idx >= trans->nr_sorted); + return iter->sorted_idx + ? trans->iters + trans->sorted[iter->sorted_idx - 1] + : NULL; +} + +#define trans_for_each_iter_inorder(_trans, _iter) \ + for (_iter = next_btree_iter(trans, NULL); \ + (_iter); \ + _iter = next_btree_iter((_trans), (_iter))) + static inline bool __iter_has_node(const struct btree_iter *iter, const struct btree *b) { @@ -111,10 +135,20 @@ void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *, struct btree_node_iter *, struct bkey_packed *, unsigned, unsigned); -bool bch2_btree_iter_relock(struct btree_iter *, bool); +bool bch2_btree_iter_relock_intent(struct btree_iter *); +bool bch2_btree_iter_relock(struct btree_iter *, unsigned long); + bool bch2_trans_relock(struct btree_trans *); void bch2_trans_unlock(struct btree_trans *); +__always_inline +static inline int btree_trans_restart(struct btree_trans *trans) +{ + trans->restarted = true; + bch2_trans_unlock(trans); + return -EINTR; +} + bool __bch2_btree_iter_upgrade(struct btree_iter *, unsigned); static inline bool bch2_btree_iter_upgrade(struct btree_iter *iter, @@ -131,7 +165,7 @@ void __bch2_btree_iter_downgrade(struct btree_iter *, unsigned); static inline void bch2_btree_iter_downgrade(struct btree_iter *iter) { - unsigned new_locks_want = (iter->flags & BTREE_ITER_INTENT ? 1 : 0); + unsigned new_locks_want = iter->level + !!(iter->flags & BTREE_ITER_INTENT); if (iter->locks_want > new_locks_want) __bch2_btree_iter_downgrade(iter, new_locks_want); @@ -146,17 +180,12 @@ void bch2_btree_iter_reinit_node(struct btree_iter *, struct btree *); int __must_check bch2_btree_iter_traverse(struct btree_iter *); -int bch2_btree_iter_traverse_all(struct btree_trans *); - struct btree *bch2_btree_iter_peek_node(struct btree_iter *); struct btree *bch2_btree_iter_next_node(struct btree_iter *); struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *); struct bkey_s_c bch2_btree_iter_next(struct btree_iter *); -struct bkey_s_c bch2_btree_iter_peek_with_updates(struct btree_iter *); -struct bkey_s_c bch2_btree_iter_next_with_updates(struct btree_iter *); - struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *); struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *); @@ -164,8 +193,6 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *); struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *); struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *); -struct bkey_s_c bch2_btree_iter_peek_cached(struct btree_iter *); - bool bch2_btree_iter_advance(struct btree_iter *); bool bch2_btree_iter_rewind(struct btree_iter *); @@ -179,15 +206,23 @@ static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos iter->k.p.offset = iter->pos.offset = new_pos.offset; iter->k.p.snapshot = iter->pos.snapshot = new_pos.snapshot; iter->k.size = 0; + iter->should_be_locked = false; +} + +static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter) +{ + BUG_ON(!(iter->flags & BTREE_ITER_IS_EXTENTS)); + iter->pos = bkey_start_pos(&iter->k); +} + +static inline struct btree_iter *idx_to_btree_iter(struct btree_trans *trans, unsigned idx) +{ + return idx != U8_MAX ? trans->iters + idx : NULL; } -/* Sort order for locking btree iterators: */ -static inline int btree_iter_lock_cmp(const struct btree_iter *l, - const struct btree_iter *r) +static inline struct btree_iter *btree_iter_child(struct btree_iter *iter) { - return cmp_int(l->btree_id, r->btree_id) ?: - -cmp_int(btree_iter_is_cached(l), btree_iter_is_cached(r)) ?: - bkey_cmp(l->pos, r->pos); + return idx_to_btree_iter(iter->trans, iter->child_idx); } /* @@ -221,12 +256,9 @@ static inline int bch2_trans_cond_resched(struct btree_trans *trans) static inline struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, unsigned flags) { - if ((flags & BTREE_ITER_TYPE) == BTREE_ITER_CACHED) - return bch2_btree_iter_peek_cached(iter); - else - return flags & BTREE_ITER_SLOTS - ? bch2_btree_iter_peek_slot(iter) - : bch2_btree_iter_peek(iter); + return flags & BTREE_ITER_SLOTS + ? bch2_btree_iter_peek_slot(iter) + : bch2_btree_iter_peek(iter); } static inline struct bkey_s_c __bch2_btree_iter_next(struct btree_iter *iter, @@ -310,15 +342,7 @@ static inline void set_btree_iter_dontneed(struct btree_trans *trans, struct btr trans->iters_touched &= ~(1ULL << iter->idx); } -#define TRANS_RESET_NOTRAVERSE (1 << 0) -#define TRANS_RESET_NOUNLOCK (1 << 1) - -void bch2_trans_reset(struct btree_trans *, unsigned); - -static inline void bch2_trans_begin(struct btree_trans *trans) -{ - return bch2_trans_reset(trans, 0); -} +void bch2_trans_begin(struct btree_trans *); void *bch2_trans_kmalloc(struct btree_trans *, size_t); void bch2_trans_init(struct btree_trans *, struct bch_fs *, unsigned, size_t); diff --git a/libbcachefs/btree_key_cache.c b/libbcachefs/btree_key_cache.c index 0d3c0a4..e327ef3 100644 --- a/libbcachefs/btree_key_cache.c +++ b/libbcachefs/btree_key_cache.c @@ -214,12 +214,18 @@ static int btree_key_cache_fill(struct btree_trans *trans, if (!bch2_btree_node_relock(ck_iter, 0)) { trace_transaction_restart_ip(trans->ip, _THIS_IP_); - ret = -EINTR; + ret = btree_trans_restart(trans); goto err; } - if (k.k->u64s > ck->u64s) { - new_u64s = roundup_pow_of_two(k.k->u64s); + /* + * bch2_varint_decode can read past the end of the buffer by at + * most 7 bytes (it won't be used): + */ + new_u64s = k.k->u64s + 1; + + if (new_u64s > ck->u64s) { + new_u64s = roundup_pow_of_two(new_u64s); new_k = kmalloc(new_u64s * sizeof(u64), GFP_NOFS); if (!new_k) { ret = -ENOMEM; @@ -227,6 +233,10 @@ static int btree_key_cache_fill(struct btree_trans *trans, } } + /* + * XXX: not allowed to be holding read locks when we take a write lock, + * currently + */ bch2_btree_node_lock_write(ck_iter->l[0].b, ck_iter); if (new_k) { kfree(ck->k); @@ -264,7 +274,9 @@ int bch2_btree_iter_traverse_cached(struct btree_iter *iter) BUG_ON(iter->level); - if (btree_node_locked(iter, 0)) { + iter->l[1].b = NULL; + + if (bch2_btree_node_relock(iter, 0)) { ck = (void *) iter->l[0].b; goto fill; } @@ -291,10 +303,8 @@ retry: if (!btree_node_lock((void *) ck, iter->pos, 0, iter, lock_want, bkey_cached_check_fn, iter, _THIS_IP_)) { - if (ck->key.btree_id != iter->btree_id || - bpos_cmp(ck->key.pos, iter->pos)) { + if (!trans->restarted) goto retry; - } trace_transaction_restart_ip(trans->ip, _THIS_IP_); ret = -EINTR; @@ -314,10 +324,10 @@ retry: iter->l[0].b = (void *) ck; fill: if (!ck->valid && !(iter->flags & BTREE_ITER_CACHED_NOFILL)) { - if (!btree_node_intent_locked(iter, 0)) - bch2_btree_iter_upgrade(iter, 1); - if (!btree_node_intent_locked(iter, 0)) { + if (!iter->locks_want && + !!__bch2_btree_iter_upgrade(iter, 1)) { trace_transaction_restart_ip(trans->ip, _THIS_IP_); + BUG_ON(!trans->restarted); ret = -EINTR; goto err; } @@ -332,13 +342,14 @@ fill: iter->uptodate = BTREE_ITER_NEED_PEEK; - if (!(iter->flags & BTREE_ITER_INTENT)) - bch2_btree_iter_downgrade(iter); - else if (!iter->locks_want) { - if (!__bch2_btree_iter_upgrade(iter, 1)) - ret = -EINTR; + if ((iter->flags & BTREE_ITER_INTENT) && + !bch2_btree_iter_upgrade(iter, 1)) { + BUG_ON(!trans->restarted); + ret = -EINTR; } + BUG_ON(!ret && !btree_node_locked(iter, 0)); + return ret; err: if (ret != -EINTR) { @@ -369,10 +380,9 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans, BTREE_ITER_CACHED_NOFILL| BTREE_ITER_CACHED_NOCREATE| BTREE_ITER_INTENT); -retry: ret = bch2_btree_iter_traverse(c_iter); if (ret) - goto err; + goto out; ck = (void *) c_iter->l[0].b; if (!ck || @@ -385,25 +395,27 @@ retry: goto evict; } + /* + * Since journal reclaim depends on us making progress here, and the + * allocator/copygc depend on journal reclaim making progress, we need + * to be using alloc reserves: + * */ ret = bch2_btree_iter_traverse(b_iter) ?: - bch2_trans_update(trans, b_iter, ck->k, BTREE_TRIGGER_NORUN) ?: + bch2_trans_update(trans, b_iter, ck->k, + BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE| + BTREE_TRIGGER_NORUN) ?: bch2_trans_commit(trans, NULL, NULL, - BTREE_INSERT_NOUNLOCK| BTREE_INSERT_NOCHECK_RW| BTREE_INSERT_NOFAIL| + BTREE_INSERT_USE_RESERVE| (ck->journal.seq == journal_last_seq(j) ? BTREE_INSERT_JOURNAL_RESERVED : 0)| commit_flags); -err: - if (ret == -EINTR) - goto retry; - - if (ret == -EAGAIN) - goto out; - if (ret) { - bch2_fs_fatal_err_on(!bch2_journal_error(j), c, + bch2_fs_fatal_err_on(ret != -EINTR && + ret != -EAGAIN && + !bch2_journal_error(j), c, "error flushing key cache: %i", ret); goto out; } @@ -451,7 +463,6 @@ int bch2_btree_key_cache_journal_flush(struct journal *j, struct bkey_cached *ck = container_of(pin, struct bkey_cached, journal); struct bkey_cached_key key; - struct btree_trans trans; int ret = 0; int srcu_idx = srcu_read_lock(&c->btree_trans_barrier); @@ -466,10 +477,9 @@ int bch2_btree_key_cache_journal_flush(struct journal *j, } six_unlock_read(&ck->c.lock); - bch2_trans_init(&trans, c, 0, 0); - ret = btree_key_cache_flush_pos(&trans, key, seq, - BTREE_INSERT_JOURNAL_RECLAIM, false); - bch2_trans_exit(&trans); + ret = bch2_trans_do(c, NULL, NULL, 0, + btree_key_cache_flush_pos(&trans, key, seq, + BTREE_INSERT_JOURNAL_RECLAIM, false)); unlock: srcu_read_unlock(&c->btree_trans_barrier, srcu_idx); @@ -633,8 +643,10 @@ static unsigned long bch2_btree_key_cache_count(struct shrinker *shrink, struct bch_fs *c = container_of(shrink, struct bch_fs, btree_key_cache.shrink); struct btree_key_cache *bc = &c->btree_key_cache; + long nr = atomic_long_read(&bc->nr_keys) - + atomic_long_read(&bc->nr_dirty); - return atomic_long_read(&bc->nr_keys); + return max(0L, nr); } void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc) @@ -670,7 +682,9 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc) kmem_cache_free(bch2_key_cache, ck); } - BUG_ON(atomic_long_read(&bc->nr_dirty) && !bch2_journal_error(&c->journal)); + BUG_ON(atomic_long_read(&bc->nr_dirty) && + !bch2_journal_error(&c->journal) && + test_bit(BCH_FS_WAS_RW, &c->flags)); BUG_ON(atomic_long_read(&bc->nr_keys)); mutex_unlock(&bc->lock); @@ -689,20 +703,16 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *c) { int ret; - c->shrink.seeks = 1; - c->shrink.count_objects = bch2_btree_key_cache_count; - c->shrink.scan_objects = bch2_btree_key_cache_scan; - - ret = register_shrinker(&c->shrink); - if (ret) - return ret; - ret = rhashtable_init(&c->table, &bch2_btree_key_cache_params); if (ret) return ret; c->table_init_done = true; - return 0; + + c->shrink.seeks = 1; + c->shrink.count_objects = bch2_btree_key_cache_count; + c->shrink.scan_objects = bch2_btree_key_cache_scan; + return register_shrinker(&c->shrink); } void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *c) diff --git a/libbcachefs/btree_types.h b/libbcachefs/btree_types.h index 39e93da..a1e5deb 100644 --- a/libbcachefs/btree_types.h +++ b/libbcachefs/btree_types.h @@ -209,12 +209,13 @@ enum btree_iter_type { * @pos or the first key strictly greater than @pos */ #define BTREE_ITER_IS_EXTENTS (1 << 6) -#define BTREE_ITER_ERROR (1 << 7) -#define BTREE_ITER_SET_POS_AFTER_COMMIT (1 << 8) -#define BTREE_ITER_CACHED_NOFILL (1 << 9) -#define BTREE_ITER_CACHED_NOCREATE (1 << 10) -#define BTREE_ITER_NOT_EXTENTS (1 << 11) -#define BTREE_ITER_ALL_SNAPSHOTS (1 << 12) +#define BTREE_ITER_NOT_EXTENTS (1 << 7) +#define BTREE_ITER_ERROR (1 << 8) +#define BTREE_ITER_SET_POS_AFTER_COMMIT (1 << 9) +#define BTREE_ITER_CACHED_NOFILL (1 << 10) +#define BTREE_ITER_CACHED_NOCREATE (1 << 11) +#define BTREE_ITER_WITH_UPDATES (1 << 12) +#define BTREE_ITER_ALL_SNAPSHOTS (1 << 13) enum btree_iter_uptodate { BTREE_ITER_UPTODATE = 0, @@ -230,6 +231,7 @@ enum btree_iter_uptodate { #define BTREE_ITER_NO_NODE_DOWN ((struct btree *) 5) #define BTREE_ITER_NO_NODE_INIT ((struct btree *) 6) #define BTREE_ITER_NO_NODE_ERROR ((struct btree *) 7) +#define BTREE_ITER_NO_NODE_CACHED ((struct btree *) 8) /* * @pos - iterator's current position @@ -240,18 +242,30 @@ enum btree_iter_uptodate { */ struct btree_iter { struct btree_trans *trans; - struct bpos pos; - /* what we're searching for/what the iterator actually points to: */ - struct bpos real_pos; - struct bpos pos_after_commit; + unsigned long ip_allocated; + + u8 idx; + u8 child_idx; + u8 sorted_idx; + + /* btree_iter_copy starts here: */ + u16 flags; + /* When we're filtering by snapshot, the snapshot ID we're looking for: */ unsigned snapshot; - u16 flags; - u8 idx; + struct bpos pos; + struct bpos real_pos; + struct bpos pos_after_commit; enum btree_id btree_id:4; - enum btree_iter_uptodate uptodate:4; + enum btree_iter_uptodate uptodate:3; + /* + * True if we've returned a key (and thus are expected to keep it + * locked), false after set_pos - for avoiding spurious transaction + * restarts in bch2_trans_relock(): + */ + bool should_be_locked:1; unsigned level:4, min_depth:4, locks_want:4, @@ -269,7 +283,6 @@ struct btree_iter { * bch2_btree_iter_next_slot() can correctly advance pos. */ struct bkey k; - unsigned long ip_allocated; }; static inline enum btree_iter_type @@ -328,12 +341,11 @@ struct bkey_cached { }; struct btree_insert_entry { - unsigned trigger_flags; + unsigned flags; u8 bkey_type; enum btree_id btree_id:8; u8 level; unsigned trans_triggers_run:1; - unsigned is_extent:1; struct bkey_i *k; struct btree_iter *iter; }; @@ -352,6 +364,8 @@ struct btree_trans_commit_hook { struct btree_trans_commit_hook *next; }; +#define BTREE_TRANS_MEM_MAX (1U << 14) + struct btree_trans { struct bch_fs *c; #ifdef CONFIG_BCACHEFS_DEBUG @@ -366,11 +380,17 @@ struct btree_trans { unsigned long ip; int srcu_idx; + u8 nr_sorted; u8 nr_updates; - u8 nr_updates2; - unsigned used_mempool:1; - unsigned error:1; - unsigned in_traverse_all:1; + bool used_mempool:1; + bool error:1; + bool in_traverse_all:1; + bool restarted:1; + /* + * For when bch2_trans_update notices we'll be splitting a compressed + * extent: + */ + unsigned extra_journal_res; u64 iters_linked; u64 iters_live; @@ -380,9 +400,9 @@ struct btree_trans { unsigned mem_bytes; void *mem; + u8 *sorted; struct btree_iter *iters; struct btree_insert_entry *updates; - struct btree_insert_entry *updates2; /* update path: */ struct btree_trans_commit_hook *hooks; @@ -419,6 +439,7 @@ enum btree_flags { BTREE_NODE_write_idx, BTREE_NODE_accessed, BTREE_NODE_write_in_flight, + BTREE_NODE_write_in_flight_inner, BTREE_NODE_just_written, BTREE_NODE_dying, BTREE_NODE_fake, @@ -433,6 +454,7 @@ BTREE_FLAG(noevict); BTREE_FLAG(write_idx); BTREE_FLAG(accessed); BTREE_FLAG(write_in_flight); +BTREE_FLAG(write_in_flight_inner); BTREE_FLAG(just_written); BTREE_FLAG(dying); BTREE_FLAG(fake); @@ -614,33 +636,43 @@ static inline bool btree_iter_is_extents(struct btree_iter *iter) (1U << BTREE_ID_dirents)| \ (1U << BTREE_ID_xattrs)) +#define BTREE_ID_HAS_PTRS \ + ((1U << BTREE_ID_extents)| \ + (1U << BTREE_ID_reflink)) + static inline bool btree_type_has_snapshots(enum btree_id id) { return (1 << id) & BTREE_ID_HAS_SNAPSHOTS; } -enum btree_trigger_flags { +enum btree_update_flags { + __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE, + __BTREE_TRIGGER_NORUN, /* Don't run triggers at all */ __BTREE_TRIGGER_INSERT, __BTREE_TRIGGER_OVERWRITE, - __BTREE_TRIGGER_OVERWRITE_SPLIT, __BTREE_TRIGGER_GC, __BTREE_TRIGGER_BUCKET_INVALIDATE, __BTREE_TRIGGER_NOATOMIC, }; +#define BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE (1U << __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) + #define BTREE_TRIGGER_NORUN (1U << __BTREE_TRIGGER_NORUN) #define BTREE_TRIGGER_INSERT (1U << __BTREE_TRIGGER_INSERT) #define BTREE_TRIGGER_OVERWRITE (1U << __BTREE_TRIGGER_OVERWRITE) -#define BTREE_TRIGGER_OVERWRITE_SPLIT (1U << __BTREE_TRIGGER_OVERWRITE_SPLIT) #define BTREE_TRIGGER_GC (1U << __BTREE_TRIGGER_GC) #define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE) #define BTREE_TRIGGER_NOATOMIC (1U << __BTREE_TRIGGER_NOATOMIC) +#define BTREE_TRIGGER_WANTS_OLD_AND_NEW \ + ((1U << KEY_TYPE_stripe)| \ + (1U << KEY_TYPE_inode)) + static inline bool btree_node_type_needs_gc(enum btree_node_type type) { return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type); @@ -665,7 +697,6 @@ enum btree_insert_ret { BTREE_INSERT_OK, /* leaf node needs to be split */ BTREE_INSERT_BTREE_NODE_FULL, - BTREE_INSERT_ENOSPC, BTREE_INSERT_NEED_MARK_REPLICAS, BTREE_INSERT_NEED_JOURNAL_RES, BTREE_INSERT_NEED_JOURNAL_RECLAIM, diff --git a/libbcachefs/btree_update.h b/libbcachefs/btree_update.h index 4ce12ae..217b52e 100644 --- a/libbcachefs/btree_update.h +++ b/libbcachefs/btree_update.h @@ -8,14 +8,13 @@ struct bch_fs; struct btree; -void bch2_btree_node_lock_for_insert(struct bch_fs *, struct btree *, - struct btree_iter *); +void bch2_btree_node_lock_for_insert(struct btree_trans *, struct btree_iter *, + struct btree *); bool bch2_btree_bset_insert_key(struct btree_iter *, struct btree *, struct btree_node_iter *, struct bkey_i *); void bch2_btree_add_journal_pin(struct bch_fs *, struct btree *, u64); enum btree_insert_flags { - __BTREE_INSERT_NOUNLOCK, __BTREE_INSERT_NOFAIL, __BTREE_INSERT_NOCHECK_RW, __BTREE_INSERT_LAZY_RW, @@ -29,11 +28,6 @@ enum btree_insert_flags { __BCH_HASH_SET_MUST_REPLACE, }; -/* - * Don't drop locks _after_ successfully updating btree: - */ -#define BTREE_INSERT_NOUNLOCK (1 << __BTREE_INSERT_NOUNLOCK) - /* Don't check for -ENOSPC: */ #define BTREE_INSERT_NOFAIL (1 << __BTREE_INSERT_NOFAIL) @@ -70,13 +64,16 @@ int bch2_btree_delete_range_trans(struct btree_trans *, enum btree_id, int bch2_btree_delete_range(struct bch_fs *, enum btree_id, struct bpos, struct bpos, u64 *); -int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *, +int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *, __le64, unsigned); -int bch2_btree_node_update_key(struct bch_fs *, struct btree_iter *, - struct btree *, struct bkey_i *); +void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *); +int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *, + struct btree *, struct bkey_i *, bool); +int bch2_btree_node_update_key_get_iter(struct btree_trans *, + struct btree *, struct bkey_i *, bool); int bch2_trans_update(struct btree_trans *, struct btree_iter *, - struct bkey_i *, enum btree_trigger_flags); + struct bkey_i *, enum btree_update_flags); void bch2_trans_commit_hook(struct btree_trans *, struct btree_trans_commit_hook *); int __bch2_trans_commit(struct btree_trans *); @@ -103,21 +100,22 @@ static inline int bch2_trans_commit(struct btree_trans *trans, return __bch2_trans_commit(trans); } -#define __bch2_trans_do(_trans, _disk_res, _journal_seq, _flags, _do) \ +#define lockrestart_do(_trans, _do) \ ({ \ int _ret; \ \ - while (1) { \ - _ret = (_do) ?: bch2_trans_commit(_trans, (_disk_res), \ - (_journal_seq), (_flags)); \ - if (_ret != -EINTR) \ - break; \ - bch2_trans_reset(_trans, 0); \ - } \ + do { \ + bch2_trans_begin(_trans); \ + _ret = (_do); \ + } while (_ret == -EINTR); \ \ _ret; \ }) +#define __bch2_trans_do(_trans, _disk_res, _journal_seq, _flags, _do) \ + lockrestart_do(_trans, _do ?: bch2_trans_commit(_trans, (_disk_res),\ + (_journal_seq), (_flags))) + #define bch2_trans_do(_c, _disk_res, _journal_seq, _flags, _do) \ ({ \ struct btree_trans trans; \ @@ -136,9 +134,4 @@ static inline int bch2_trans_commit(struct btree_trans *trans, (_i) < (_trans)->updates + (_trans)->nr_updates; \ (_i)++) -#define trans_for_each_update2(_trans, _i) \ - for ((_i) = (_trans)->updates2; \ - (_i) < (_trans)->updates2 + (_trans)->nr_updates2; \ - (_i)++) - #endif /* _BCACHEFS_BTREE_UPDATE_H */ diff --git a/libbcachefs/btree_update_interior.c b/libbcachefs/btree_update_interior.c index 0014470..c8c3382 100644 --- a/libbcachefs/btree_update_interior.c +++ b/libbcachefs/btree_update_interior.c @@ -22,6 +22,10 @@ #include #include +static void bch2_btree_insert_node(struct btree_update *, struct btree_trans *, + struct btree_iter *, struct btree *, + struct keylist *, unsigned); + /* Debug code: */ /* @@ -242,11 +246,7 @@ retry: goto retry; } - if (c->sb.features & (1ULL << BCH_FEATURE_btree_ptr_v2)) - bkey_btree_ptr_v2_init(&tmp.k); - else - bkey_btree_ptr_init(&tmp.k); - + bkey_btree_ptr_v2_init(&tmp.k); bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, c->opts.btree_node_size); bch2_open_bucket_get(c, wp, &ob); @@ -286,6 +286,7 @@ static struct btree *bch2_btree_node_alloc(struct btree_update *as, unsigned lev memset(&b->nr, 0, sizeof(b->nr)); b->data->magic = cpu_to_le64(bset_magic(c)); + memset(&b->data->_ptr, 0, sizeof(b->data->_ptr)); b->data->flags = 0; SET_BTREE_NODE_ID(b->data, as->btree_id); SET_BTREE_NODE_LEVEL(b->data, level); @@ -366,7 +367,7 @@ static struct btree *__btree_root_alloc(struct btree_update *as, unsigned level) struct btree *b = bch2_btree_node_alloc(as, level); btree_set_min(b, POS_MIN); - btree_set_max(b, POS_MAX); + btree_set_max(b, SPOS_MAX); b->data->format = bch2_btree_calc_format(b); btree_node_set_format(b, b->data->format); @@ -510,7 +511,7 @@ static int btree_update_nodes_written_trans(struct btree_trans *trans, ret = bch2_trans_mark_key(trans, bkey_s_c_null, bkey_i_to_s_c(k), - 0, 0, BTREE_TRIGGER_INSERT); + BTREE_TRIGGER_INSERT); if (ret) return ret; } @@ -519,7 +520,7 @@ static int btree_update_nodes_written_trans(struct btree_trans *trans, ret = bch2_trans_mark_key(trans, bkey_i_to_s_c(k), bkey_s_c_null, - 0, 0, BTREE_TRIGGER_OVERWRITE); + BTREE_TRIGGER_OVERWRITE); if (ret) return ret; } @@ -549,6 +550,23 @@ static void btree_update_nodes_written(struct btree_update *as) BUG_ON(!journal_pin_active(&as->journal)); + /* + * Wait for any in flight writes to finish before we free the old nodes + * on disk: + */ + for (i = 0; i < as->nr_old_nodes; i++) { + struct btree *old = as->old_nodes[i]; + __le64 seq; + + six_lock_read(&old->c.lock, NULL, NULL); + seq = old->data ? old->data->keys.seq : 0; + six_unlock_read(&old->c.lock); + + if (seq == as->old_nodes_seq[i]) + wait_on_bit_io(&old->flags, BTREE_NODE_write_in_flight_inner, + TASK_UNINTERRUPTIBLE); + } + /* * We did an update to a parent node where the pointers we added pointed * to child nodes that weren't written yet: now, the child nodes have @@ -887,6 +905,10 @@ void bch2_btree_interior_update_will_free_node(struct btree_update *as, btree_update_drop_new_node(c, b); btree_update_will_delete_key(as, &b->key); + + as->old_nodes[as->nr_old_nodes] = b; + as->old_nodes_seq[as->nr_old_nodes] = b->data->keys.seq; + as->nr_old_nodes++; } void bch2_btree_update_done(struct btree_update *as) @@ -899,7 +921,8 @@ void bch2_btree_update_done(struct btree_update *as) bch2_btree_reserve_put(as); - continue_at(&as->cl, btree_update_set_nodes_written, system_freezable_wq); + continue_at(&as->cl, btree_update_set_nodes_written, + as->c->btree_interior_update_worker); } struct btree_update * @@ -915,34 +938,28 @@ bch2_btree_update_start(struct btree_iter *iter, unsigned level, int journal_flags = 0; int ret = 0; + BUG_ON(!iter->should_be_locked); + if (flags & BTREE_INSERT_JOURNAL_RESERVED) journal_flags |= JOURNAL_RES_GET_RESERVED; closure_init_stack(&cl); retry: - /* - * This check isn't necessary for correctness - it's just to potentially - * prevent us from doing a lot of work that'll end up being wasted: - */ - ret = bch2_journal_error(&c->journal); - if (ret) - return ERR_PTR(ret); /* * XXX: figure out how far we might need to split, * instead of locking/reserving all the way to the root: */ if (!bch2_btree_iter_upgrade(iter, U8_MAX)) { - trace_trans_restart_iter_upgrade(trans->ip); + trace_trans_restart_iter_upgrade(trans->ip, _RET_IP_, + iter->btree_id, + &iter->real_pos); return ERR_PTR(-EINTR); } if (flags & BTREE_INSERT_GC_LOCK_HELD) lockdep_assert_held(&c->gc_lock); else if (!down_read_trylock(&c->gc_lock)) { - if (flags & BTREE_INSERT_NOUNLOCK) - return ERR_PTR(-EINTR); - bch2_trans_unlock(trans); down_read(&c->gc_lock); if (!bch2_trans_relock(trans)) { @@ -965,29 +982,41 @@ retry: bch2_keylist_init(&as->new_keys, as->_new_keys); bch2_keylist_init(&as->parent_keys, as->inline_keys); + mutex_lock(&c->btree_interior_update_lock); + list_add_tail(&as->list, &c->btree_interior_update_list); + mutex_unlock(&c->btree_interior_update_lock); + + /* + * We don't want to allocate if we're in an error state, that can cause + * deadlock on emergency shutdown due to open buckets getting stuck in + * the btree_reserve_cache after allocator shutdown has cleared it out. + * This check needs to come after adding us to the btree_interior_update + * list but before calling bch2_btree_reserve_get, to synchronize with + * __bch2_fs_read_only(). + */ + ret = bch2_journal_error(&c->journal); + if (ret) + goto err; + ret = bch2_journal_preres_get(&c->journal, &as->journal_preres, BTREE_UPDATE_JOURNAL_RES, journal_flags|JOURNAL_RES_GET_NONBLOCK); if (ret == -EAGAIN) { - /* - * this would be cleaner if bch2_journal_preres_get() took a - * closure argument - */ - if (flags & BTREE_INSERT_NOUNLOCK) { - ret = -EINTR; - goto err; - } - bch2_trans_unlock(trans); - if (flags & BTREE_INSERT_JOURNAL_RECLAIM) - goto err; + if (flags & BTREE_INSERT_JOURNAL_RECLAIM) { + bch2_btree_update_free(as); + btree_trans_restart(trans); + return ERR_PTR(ret); + } ret = bch2_journal_preres_get(&c->journal, &as->journal_preres, BTREE_UPDATE_JOURNAL_RES, journal_flags); - if (ret) + if (ret) { + trace_trans_restart_journal_preres_get(trans->ip, _RET_IP_); goto err; + } if (!bch2_trans_relock(trans)) { ret = -EINTR; @@ -1002,8 +1031,7 @@ retry: if (ret) goto err; - ret = bch2_btree_reserve_get(as, nr_nodes, flags, - !(flags & BTREE_INSERT_NOUNLOCK) ? &cl : NULL); + ret = bch2_btree_reserve_get(as, nr_nodes, flags, &cl); if (ret) goto err; @@ -1011,17 +1039,11 @@ retry: atomic64_read(&c->journal.seq), &as->journal, NULL); - mutex_lock(&c->btree_interior_update_lock); - list_add_tail(&as->list, &c->btree_interior_update_list); - mutex_unlock(&c->btree_interior_update_lock); - return as; err: bch2_btree_update_free(as); if (ret == -EAGAIN) { - BUG_ON(flags & BTREE_INSERT_NOUNLOCK); - bch2_trans_unlock(trans); closure_sync(&cl); ret = -EINTR; @@ -1113,6 +1135,9 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b struct bkey_packed *k; const char *invalid; + BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 && + !btree_ptr_sectors_written(insert)); + invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(insert), btree_node_type(b)) ?: bch2_bkey_in_btree_node(b, bkey_i_to_s_c(insert)); if (invalid) { @@ -1141,6 +1166,27 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b set_btree_node_need_write(b); } +static void +__bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b, + struct btree_iter *iter, struct keylist *keys, + struct btree_node_iter node_iter) +{ + struct bkey_i *insert = bch2_keylist_front(keys); + struct bkey_packed *k; + + BUG_ON(btree_node_type(b) != BKEY_TYPE_btree); + + while ((k = bch2_btree_node_iter_prev_all(&node_iter, b)) && + (bkey_cmp_left_packed(b, k, &insert->k.p) >= 0)) + ; + + while (!bch2_keylist_empty(keys)) { + bch2_insert_fixup_btree_ptr(as, b, iter, + bch2_keylist_front(keys), &node_iter); + bch2_keylist_pop_front(keys); + } +} + /* * Move keys from n1 (original replacement node, now lower node) to n2 (higher * node) @@ -1271,16 +1317,9 @@ static void btree_split_insert_keys(struct btree_update *as, struct btree *b, struct bkey_packed *src, *dst, *n; struct bset *i; - BUG_ON(btree_node_type(b) != BKEY_TYPE_btree); - bch2_btree_node_iter_init(&node_iter, b, &k->k.p); - while (!bch2_keylist_empty(keys)) { - k = bch2_keylist_front(keys); - - bch2_insert_fixup_btree_ptr(as, b, iter, k, &node_iter); - bch2_keylist_pop_front(keys); - } + __bch2_btree_insert_keys_interior(as, b, iter, keys, node_iter); /* * We can't tolerate whiteouts here - with whiteouts there can be @@ -1310,8 +1349,9 @@ static void btree_split_insert_keys(struct btree_update *as, struct btree *b, btree_node_interior_verify(as->c, b); } -static void btree_split(struct btree_update *as, struct btree *b, - struct btree_iter *iter, struct keylist *keys, +static void btree_split(struct btree_update *as, + struct btree_trans *trans, struct btree_iter *iter, + struct btree *b, struct keylist *keys, unsigned flags) { struct bch_fs *c = as->c; @@ -1340,6 +1380,7 @@ static void btree_split(struct btree_update *as, struct btree *b, six_unlock_write(&n2->c.lock); six_unlock_write(&n1->c.lock); + bch2_btree_node_write(c, n1, SIX_LOCK_intent); bch2_btree_node_write(c, n2, SIX_LOCK_intent); /* @@ -1367,17 +1408,17 @@ static void btree_split(struct btree_update *as, struct btree *b, bch2_btree_build_aux_trees(n1); six_unlock_write(&n1->c.lock); + bch2_btree_node_write(c, n1, SIX_LOCK_intent); + if (parent) bch2_keylist_add(&as->parent_keys, &n1->key); } - bch2_btree_node_write(c, n1, SIX_LOCK_intent); - /* New nodes all written, now make them visible: */ if (parent) { /* Split a non root node */ - bch2_btree_insert_node(as, parent, iter, &as->parent_keys, flags); + bch2_btree_insert_node(as, trans, iter, parent, &as->parent_keys, flags); } else if (n3) { bch2_btree_set_root(as, n3, iter); } else { @@ -1415,7 +1456,7 @@ static void btree_split(struct btree_update *as, struct btree *b, six_unlock_intent(&n2->c.lock); six_unlock_intent(&n1->c.lock); - bch2_btree_trans_verify_locks(iter->trans); + bch2_btree_trans_verify_locks(trans); bch2_time_stats_update(&c->times[BCH_TIME_btree_node_split], start_time); @@ -1426,24 +1467,8 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b, struct btree_iter *iter, struct keylist *keys) { struct btree_iter *linked; - struct btree_node_iter node_iter; - struct bkey_i *insert = bch2_keylist_front(keys); - struct bkey_packed *k; - - /* Don't screw up @iter's position: */ - node_iter = iter->l[b->c.level].iter; - - /* - * btree_split(), btree_gc_coalesce() will insert keys before - * the iterator's current position - they know the keys go in - * the node the iterator points to: - */ - while ((k = bch2_btree_node_iter_prev_all(&node_iter, b)) && - (bkey_cmp_left_packed(b, k, &insert->k.p) >= 0)) - ; - for_each_keylist_key(keys, insert) - bch2_insert_fixup_btree_ptr(as, b, iter, insert, &node_iter); + __bch2_btree_insert_keys_interior(as, b, iter, keys, iter->l[b->c.level].iter); btree_update_updated_node(as, b); @@ -1465,9 +1490,10 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b, * If a split occurred, this function will return early. This can only happen * for leaf nodes -- inserts into interior nodes have to be atomic. */ -void bch2_btree_insert_node(struct btree_update *as, struct btree *b, - struct btree_iter *iter, struct keylist *keys, - unsigned flags) +static void bch2_btree_insert_node(struct btree_update *as, + struct btree_trans *trans, struct btree_iter *iter, + struct btree *b, struct keylist *keys, + unsigned flags) { struct bch_fs *c = as->c; int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s); @@ -1480,7 +1506,7 @@ void bch2_btree_insert_node(struct btree_update *as, struct btree *b, BUG_ON(!as || as->b); bch2_verify_keylist_sorted(keys); - bch2_btree_node_lock_for_insert(c, b, iter); + bch2_btree_node_lock_for_insert(trans, iter, b); if (!bch2_btree_node_insert_fits(c, b, bch2_keylist_u64s(keys))) { bch2_btree_node_unlock_write(b, iter); @@ -1508,12 +1534,14 @@ void bch2_btree_insert_node(struct btree_update *as, struct btree *b, btree_node_interior_verify(c, b); return; split: - btree_split(as, b, iter, keys, flags); + btree_split(as, trans, iter, b, keys, flags); } -int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter, +int bch2_btree_split_leaf(struct btree_trans *trans, + struct btree_iter *iter, unsigned flags) { + struct bch_fs *c = trans->c; struct btree *b = iter_l(iter)->b; struct btree_update *as; unsigned l; @@ -1524,22 +1552,22 @@ int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter, if (IS_ERR(as)) return PTR_ERR(as); - btree_split(as, b, iter, NULL, flags); + btree_split(as, trans, iter, b, NULL, flags); bch2_btree_update_done(as); for (l = iter->level + 1; btree_iter_node(iter, l) && !ret; l++) - ret = bch2_foreground_maybe_merge(c, iter, l, flags); + ret = bch2_foreground_maybe_merge(trans, iter, l, flags); return ret; } -int __bch2_foreground_maybe_merge(struct bch_fs *c, +int __bch2_foreground_maybe_merge(struct btree_trans *trans, struct btree_iter *iter, unsigned level, unsigned flags, enum btree_node_sibling sib) { - struct btree_trans *trans = iter->trans; + struct bch_fs *c = trans->c; struct btree_iter *sib_iter = NULL; struct btree_update *as; struct bkey_format_state new_s; @@ -1550,18 +1578,18 @@ int __bch2_foreground_maybe_merge(struct bch_fs *c, size_t sib_u64s; int ret = 0, ret2 = 0; - BUG_ON(!btree_node_locked(iter, level)); retry: ret = bch2_btree_iter_traverse(iter); if (ret) - goto err; + return ret; + BUG_ON(!iter->should_be_locked); BUG_ON(!btree_node_locked(iter, level)); b = iter->l[level].b; if ((sib == btree_prev_sib && !bpos_cmp(b->data->min_key, POS_MIN)) || - (sib == btree_next_sib && !bpos_cmp(b->data->max_key, POS_MAX))) { + (sib == btree_next_sib && !bpos_cmp(b->data->max_key, SPOS_MAX))) { b->sib_u64s[sib] = U16_MAX; goto out; } @@ -1593,7 +1621,20 @@ retry: next = m; } - BUG_ON(bkey_cmp(bpos_successor(prev->data->max_key), next->data->min_key)); + if (bkey_cmp(bpos_successor(prev->data->max_key), next->data->min_key)) { + char buf1[100], buf2[100]; + + bch2_bpos_to_text(&PBUF(buf1), prev->data->max_key); + bch2_bpos_to_text(&PBUF(buf2), next->data->min_key); + bch_err(c, + "btree topology error in btree merge:\n" + " prev ends at %s\n" + " next starts at %s", + buf1, buf2); + bch2_topology_error(c); + ret = -EIO; + goto err; + } bch2_bkey_format_init(&new_s); bch2_bkey_format_add_pos(&new_s, prev->data->min_key); @@ -1648,14 +1689,14 @@ retry: bch2_btree_build_aux_trees(n); six_unlock_write(&n->c.lock); + bch2_btree_node_write(c, n, SIX_LOCK_intent); + bkey_init(&delete.k); delete.k.p = prev->key.k.p; bch2_keylist_add(&as->parent_keys, &delete); bch2_keylist_add(&as->parent_keys, &n->key); - bch2_btree_node_write(c, n, SIX_LOCK_intent); - - bch2_btree_insert_node(as, parent, iter, &as->parent_keys, flags); + bch2_btree_insert_node(as, trans, iter, parent, &as->parent_keys, flags); bch2_btree_update_get_open_buckets(as, n); @@ -1695,22 +1736,17 @@ err: if (ret == -EINTR && bch2_trans_relock(trans)) goto retry; - if (ret == -EINTR && !(flags & BTREE_INSERT_NOUNLOCK)) { - ret2 = ret; - ret = bch2_btree_iter_traverse_all(trans); - if (!ret) - goto retry; - } - goto out; } /** * bch_btree_node_rewrite - Rewrite/move a btree node */ -int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter, +int bch2_btree_node_rewrite(struct btree_trans *trans, + struct btree_iter *iter, __le64 seq, unsigned flags) { + struct bch_fs *c = trans->c; struct btree *b, *n, *parent; struct btree_update *as; int ret; @@ -1753,7 +1789,8 @@ retry: if (parent) { bch2_keylist_add(&as->parent_keys, &n->key); - bch2_btree_insert_node(as, parent, iter, &as->parent_keys, flags); + bch2_btree_insert_node(as, trans, iter, parent, + &as->parent_keys, flags); } else { bch2_btree_set_root(as, n, iter); } @@ -1772,71 +1809,161 @@ out: return ret; } -static void __bch2_btree_node_update_key(struct bch_fs *c, - struct btree_update *as, - struct btree_iter *iter, - struct btree *b, struct btree *new_hash, - struct bkey_i *new_key) +struct async_btree_rewrite { + struct bch_fs *c; + struct work_struct work; + enum btree_id btree_id; + unsigned level; + struct bpos pos; + __le64 seq; +}; + +void async_btree_node_rewrite_work(struct work_struct *work) +{ + struct async_btree_rewrite *a = + container_of(work, struct async_btree_rewrite, work); + struct bch_fs *c = a->c; + struct btree_trans trans; + struct btree_iter *iter; + + bch2_trans_init(&trans, c, 0, 0); + iter = bch2_trans_get_node_iter(&trans, a->btree_id, a->pos, + BTREE_MAX_DEPTH, a->level, 0); + bch2_btree_node_rewrite(&trans, iter, a->seq, 0); + bch2_trans_iter_put(&trans, iter); + bch2_trans_exit(&trans); + percpu_ref_put(&c->writes); + kfree(a); +} + +void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b) +{ + struct async_btree_rewrite *a; + + if (!test_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags)) + return; + + if (!percpu_ref_tryget(&c->writes)) + return; + + a = kmalloc(sizeof(*a), GFP_NOFS); + if (!a) { + percpu_ref_put(&c->writes); + return; + } + + a->c = c; + a->btree_id = b->c.btree_id; + a->level = b->c.level; + a->pos = b->key.k.p; + a->seq = b->data->keys.seq; + + INIT_WORK(&a->work, async_btree_node_rewrite_work); + queue_work(c->btree_interior_update_worker, &a->work); +} + +static int __bch2_btree_node_update_key(struct btree_trans *trans, + struct btree_iter *iter, + struct btree *b, struct btree *new_hash, + struct bkey_i *new_key, + bool skip_triggers) { + struct bch_fs *c = trans->c; + struct btree_iter *iter2 = NULL; struct btree *parent; + u64 journal_entries[BKEY_BTREE_PTR_U64s_MAX]; int ret; - btree_update_will_delete_key(as, &b->key); - btree_update_will_add_key(as, new_key); + if (!skip_triggers) { + ret = bch2_trans_mark_key(trans, + bkey_s_c_null, + bkey_i_to_s_c(new_key), + BTREE_TRIGGER_INSERT); + if (ret) + return ret; + + ret = bch2_trans_mark_key(trans, + bkey_i_to_s_c(&b->key), + bkey_s_c_null, + BTREE_TRIGGER_OVERWRITE); + if (ret) + return ret; + } + + if (new_hash) { + bkey_copy(&new_hash->key, new_key); + ret = bch2_btree_node_hash_insert(&c->btree_cache, + new_hash, b->c.level, b->c.btree_id); + BUG_ON(ret); + } parent = btree_node_parent(iter, b); if (parent) { - if (new_hash) { - bkey_copy(&new_hash->key, new_key); - ret = bch2_btree_node_hash_insert(&c->btree_cache, - new_hash, b->c.level, b->c.btree_id); - BUG_ON(ret); - } - - bch2_keylist_add(&as->parent_keys, new_key); - bch2_btree_insert_node(as, parent, iter, &as->parent_keys, 0); + iter2 = bch2_trans_copy_iter(trans, iter); - if (new_hash) { - mutex_lock(&c->btree_cache.lock); - bch2_btree_node_hash_remove(&c->btree_cache, new_hash); + BUG_ON(iter2->level != b->c.level); + BUG_ON(bpos_cmp(iter2->pos, new_key->k.p)); - bch2_btree_node_hash_remove(&c->btree_cache, b); + btree_node_unlock(iter2, iter2->level); + iter2->l[iter2->level].b = BTREE_ITER_NO_NODE_UP; + iter2->level++; - bkey_copy(&b->key, new_key); - ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); - BUG_ON(ret); - mutex_unlock(&c->btree_cache.lock); - } else { - bkey_copy(&b->key, new_key); - } + ret = bch2_btree_iter_traverse(iter2) ?: + bch2_trans_update(trans, iter2, new_key, BTREE_TRIGGER_NORUN); + if (ret) + goto err; } else { BUG_ON(btree_node_root(c, b) != b); - bch2_btree_node_lock_write(b, iter); - bkey_copy(&b->key, new_key); + trans->extra_journal_entries = (void *) &journal_entries[0]; + trans->extra_journal_entry_u64s = + journal_entry_set((void *) &journal_entries[0], + BCH_JSET_ENTRY_btree_root, + b->c.btree_id, b->c.level, + new_key, new_key->k.u64s); + } - if (btree_ptr_hash_val(&b->key) != b->hash_val) { - mutex_lock(&c->btree_cache.lock); - bch2_btree_node_hash_remove(&c->btree_cache, b); + ret = bch2_trans_commit(trans, NULL, NULL, + BTREE_INSERT_NOFAIL| + BTREE_INSERT_NOCHECK_RW| + BTREE_INSERT_JOURNAL_RECLAIM| + BTREE_INSERT_JOURNAL_RESERVED); + if (ret) + goto err; - ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); - BUG_ON(ret); - mutex_unlock(&c->btree_cache.lock); - } + bch2_btree_node_lock_write(b, iter); - btree_update_updated_root(as, b); - bch2_btree_node_unlock_write(b, iter); + if (new_hash) { + mutex_lock(&c->btree_cache.lock); + bch2_btree_node_hash_remove(&c->btree_cache, new_hash); + bch2_btree_node_hash_remove(&c->btree_cache, b); + + bkey_copy(&b->key, new_key); + ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); + BUG_ON(ret); + mutex_unlock(&c->btree_cache.lock); + } else { + bkey_copy(&b->key, new_key); } - bch2_btree_update_done(as); + bch2_btree_node_unlock_write(b, iter); +out: + bch2_trans_iter_put(trans, iter2); + return ret; +err: + if (new_hash) { + mutex_lock(&c->btree_cache.lock); + bch2_btree_node_hash_remove(&c->btree_cache, b); + mutex_unlock(&c->btree_cache.lock); + } + goto out; } -int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter, - struct btree *b, - struct bkey_i *new_key) +int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter, + struct btree *b, struct bkey_i *new_key, + bool skip_triggers) { - struct btree *parent = btree_node_parent(iter, b); - struct btree_update *as = NULL; + struct bch_fs *c = trans->c; struct btree *new_hash = NULL; struct closure cl; int ret = 0; @@ -1850,27 +1977,18 @@ int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter, if (btree_ptr_hash_val(new_key) != b->hash_val) { ret = bch2_btree_cache_cannibalize_lock(c, &cl); if (ret) { - bch2_trans_unlock(iter->trans); + bch2_trans_unlock(trans); closure_sync(&cl); - if (!bch2_trans_relock(iter->trans)) + if (!bch2_trans_relock(trans)) return -EINTR; } new_hash = bch2_btree_node_mem_alloc(c); } - as = bch2_btree_update_start(iter, b->c.level, - parent ? btree_update_reserve_required(c, parent) : 0, - BTREE_INSERT_NOFAIL); - if (IS_ERR(as)) { - ret = PTR_ERR(as); - goto err; - } - - __bch2_btree_node_update_key(c, as, iter, b, new_hash, new_key); + ret = __bch2_btree_node_update_key(trans, iter, b, new_hash, + new_key, skip_triggers); - bch2_btree_iter_downgrade(iter); -err: if (new_hash) { mutex_lock(&c->btree_cache.lock); list_move(&new_hash->list, &c->btree_cache.freeable); @@ -1884,6 +2002,35 @@ err: return ret; } +int bch2_btree_node_update_key_get_iter(struct btree_trans *trans, + struct btree *b, struct bkey_i *new_key, + bool skip_triggers) +{ + struct btree_iter *iter; + int ret; + + iter = bch2_trans_get_node_iter(trans, b->c.btree_id, b->key.k.p, + BTREE_MAX_DEPTH, b->c.level, + BTREE_ITER_INTENT); + ret = bch2_btree_iter_traverse(iter); + if (ret) + goto out; + + /* has node been freed? */ + if (iter->l[b->c.level].b != b) { + /* node has been freed: */ + BUG_ON(!btree_node_dying(b)); + goto out; + } + + BUG_ON(!btree_node_hashed(b)); + + ret = bch2_btree_node_update_key(trans, iter, b, new_key, skip_triggers); +out: + bch2_trans_iter_put(trans, iter); + return ret; +} + /* Init code: */ /* @@ -1919,7 +2066,7 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) b->c.btree_id = id; bkey_btree_ptr_init(&b->key); - b->key.k.p = POS_MAX; + b->key.k.p = SPOS_MAX; *((u64 *) bkey_i_to_btree_ptr(&b->key)->v.start) = U64_MAX - id; bch2_bset_init_first(b, &b->data->keys); @@ -1927,7 +2074,7 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) b->data->flags = 0; btree_set_min(b, POS_MIN); - btree_set_max(b, POS_MAX); + btree_set_max(b, SPOS_MAX); b->data->format = bch2_btree_calc_format(b); btree_node_set_format(b, b->data->format); diff --git a/libbcachefs/btree_update_interior.h b/libbcachefs/btree_update_interior.h index f2925b0..e88e737 100644 --- a/libbcachefs/btree_update_interior.h +++ b/libbcachefs/btree_update_interior.h @@ -92,6 +92,10 @@ struct btree_update { struct btree *new_nodes[BTREE_UPDATE_NODES_MAX]; unsigned nr_new_nodes; + struct btree *old_nodes[BTREE_UPDATE_NODES_MAX]; + __le64 old_nodes_seq[BTREE_UPDATE_NODES_MAX]; + unsigned nr_old_nodes; + open_bucket_idx_t open_buckets[BTREE_UPDATE_NODES_MAX * BCH_REPLICAS_MAX]; open_bucket_idx_t nr_open_buckets; @@ -127,15 +131,12 @@ void bch2_btree_interior_update_will_free_node(struct btree_update *, struct btree *); void bch2_btree_update_add_new_node(struct btree_update *, struct btree *); -void bch2_btree_insert_node(struct btree_update *, struct btree *, - struct btree_iter *, struct keylist *, - unsigned); -int bch2_btree_split_leaf(struct bch_fs *, struct btree_iter *, unsigned); +int bch2_btree_split_leaf(struct btree_trans *, struct btree_iter *, unsigned); -int __bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *, +int __bch2_foreground_maybe_merge(struct btree_trans *, struct btree_iter *, unsigned, unsigned, enum btree_node_sibling); -static inline int bch2_foreground_maybe_merge_sibling(struct bch_fs *c, +static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans, struct btree_iter *iter, unsigned level, unsigned flags, enum btree_node_sibling sib) @@ -149,20 +150,20 @@ static inline int bch2_foreground_maybe_merge_sibling(struct bch_fs *c, return 0; b = iter->l[level].b; - if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold) + if (b->sib_u64s[sib] > trans->c->btree_foreground_merge_threshold) return 0; - return __bch2_foreground_maybe_merge(c, iter, level, flags, sib); + return __bch2_foreground_maybe_merge(trans, iter, level, flags, sib); } -static inline int bch2_foreground_maybe_merge(struct bch_fs *c, - struct btree_iter *iter, - unsigned level, - unsigned flags) +static inline int bch2_foreground_maybe_merge(struct btree_trans *trans, + struct btree_iter *iter, + unsigned level, + unsigned flags) { - return bch2_foreground_maybe_merge_sibling(c, iter, level, flags, + return bch2_foreground_maybe_merge_sibling(trans, iter, level, flags, btree_prev_sib) ?: - bch2_foreground_maybe_merge_sibling(c, iter, level, flags, + bch2_foreground_maybe_merge_sibling(trans, iter, level, flags, btree_next_sib); } @@ -256,13 +257,15 @@ static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c, return remaining; } +#define BTREE_WRITE_SET_U64s_BITS 9 + static inline unsigned btree_write_set_buffer(struct btree *b) { /* * Could buffer up larger amounts of keys for btrees with larger keys, * pending benchmarking: */ - return 4 << 10; + return 8 << BTREE_WRITE_SET_U64s_BITS; } static inline struct btree_node_entry *want_new_bset(struct bch_fs *c, diff --git a/libbcachefs/btree_update_leaf.c b/libbcachefs/btree_update_leaf.c index e258cf8..7e9909e 100644 --- a/libbcachefs/btree_update_leaf.c +++ b/libbcachefs/btree_update_leaf.c @@ -32,13 +32,16 @@ static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l, static inline bool same_leaf_as_prev(struct btree_trans *trans, struct btree_insert_entry *i) { - return i != trans->updates2 && + return i != trans->updates && iter_l(i[0].iter)->b == iter_l(i[-1].iter)->b; } -inline void bch2_btree_node_lock_for_insert(struct bch_fs *c, struct btree *b, - struct btree_iter *iter) +inline void bch2_btree_node_lock_for_insert(struct btree_trans *trans, + struct btree_iter *iter, + struct btree *b) { + struct bch_fs *c = trans->c; + bch2_btree_node_lock_write(b, iter); if (btree_iter_type(iter) == BTREE_ITER_CACHED) @@ -53,7 +56,7 @@ inline void bch2_btree_node_lock_for_insert(struct bch_fs *c, struct btree *b, * a new bset to insert into: */ if (want_new_bset(c, b)) - bch2_btree_init_next(c, b, iter); + bch2_btree_init_next(trans, iter, b); } /* Inserting into a given leaf node (last stage of insert): */ @@ -222,25 +225,14 @@ static bool btree_insert_key_leaf(struct btree_trans *trans, static inline void btree_insert_entry_checks(struct btree_trans *trans, struct btree_insert_entry *i) { - struct bch_fs *c = trans->c; - - if (bch2_debug_check_bkeys) { - const char *invalid = bch2_bkey_invalid(c, - bkey_i_to_s_c(i->k), i->bkey_type); - if (invalid) { - char buf[200]; - - bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k)); - panic("invalid bkey %s on insert: %s\n", buf, invalid); - } - } - BUG_ON(!i->is_extent && bpos_cmp(i->k->k.p, i->iter->real_pos)); + BUG_ON(bpos_cmp(i->k->k.p, i->iter->real_pos)); BUG_ON(i->level != i->iter->level); BUG_ON(i->btree_id != i->iter->btree_id); } static noinline int -bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned u64s) +bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned u64s, + unsigned long trace_ip) { struct bch_fs *c = trans->c; int ret; @@ -253,7 +245,7 @@ bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned u64s) return ret; if (!bch2_trans_relock(trans)) { - trace_trans_restart_journal_preres_get(trans->ip); + trace_trans_restart_journal_preres_get(trans->ip, trace_ip); return -EINTR; } @@ -305,6 +297,12 @@ btree_key_can_insert_cached(struct btree_trans *trans, !(trans->flags & BTREE_INSERT_JOURNAL_RECLAIM)) return BTREE_INSERT_NEED_JOURNAL_RECLAIM; + /* + * bch2_varint_decode can read past the end of the buffer by at most 7 + * bytes (it won't be used): + */ + u64s += 1; + if (u64s <= ck->u64s) return BTREE_INSERT_OK; @@ -319,8 +317,7 @@ btree_key_can_insert_cached(struct btree_trans *trans, } static inline void do_btree_insert_one(struct btree_trans *trans, - struct btree_iter *iter, - struct bkey_i *insert) + struct btree_insert_entry *i) { struct bch_fs *c = trans->c; struct journal *j = &c->journal; @@ -329,31 +326,28 @@ static inline void do_btree_insert_one(struct btree_trans *trans, EBUG_ON(trans->journal_res.ref != !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)); - insert->k.needs_whiteout = false; + i->k->k.needs_whiteout = false; - did_work = (btree_iter_type(iter) != BTREE_ITER_CACHED) - ? btree_insert_key_leaf(trans, iter, insert) - : bch2_btree_insert_key_cached(trans, iter, insert); + did_work = (btree_iter_type(i->iter) != BTREE_ITER_CACHED) + ? btree_insert_key_leaf(trans, i->iter, i->k) + : bch2_btree_insert_key_cached(trans, i->iter, i->k); if (!did_work) return; if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) { bch2_journal_add_keys(j, &trans->journal_res, - iter->btree_id, insert); + i->btree_id, + i->level, + i->k); bch2_journal_set_has_inode(j, &trans->journal_res, - insert->k.p.inode); + i->k->k.p.inode); if (trans->journal_seq) *trans->journal_seq = trans->journal_res.seq; } } -static noinline void bch2_btree_iter_unlock_noinline(struct btree_iter *iter) -{ - __bch2_btree_iter_unlock(iter); -} - static noinline void bch2_trans_mark_gc(struct btree_trans *trans) { struct bch_fs *c = trans->c; @@ -366,14 +360,15 @@ static noinline void bch2_trans_mark_gc(struct btree_trans *trans) BUG_ON(btree_iter_type(i->iter) == BTREE_ITER_CACHED); if (gc_visited(c, gc_pos_btree_node(i->iter->l[0].b))) - bch2_mark_update(trans, i->iter, i->k, NULL, - i->trigger_flags|BTREE_TRIGGER_GC); + bch2_mark_update(trans, i->iter, i->k, + i->flags|BTREE_TRIGGER_GC); } } static inline int bch2_trans_commit_write_locked(struct btree_trans *trans, - struct btree_insert_entry **stopped_at) + struct btree_insert_entry **stopped_at, + unsigned long trace_ip) { struct bch_fs *c = trans->c; struct btree_insert_entry *i; @@ -383,7 +378,8 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, int ret; if (race_fault()) { - trace_trans_restart_fault_inject(trans->ip); + trace_trans_restart_fault_inject(trans->ip, trace_ip); + trans->restarted = true; return -EINTR; } @@ -403,7 +399,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, h = h->next; } - trans_for_each_update2(trans, i) { + trans_for_each_update(trans, i) { /* Multiple inserts might go to same leaf: */ if (!same_leaf_as_prev(trans, i)) u64s = 0; @@ -461,17 +457,17 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)) { if (bch2_journal_seq_verify) - trans_for_each_update2(trans, i) + trans_for_each_update(trans, i) i->k->k.version.lo = trans->journal_res.seq; else if (bch2_inject_invalid_keys) - trans_for_each_update2(trans, i) + trans_for_each_update(trans, i) i->k->k.version = MAX_VERSION; } trans_for_each_update(trans, i) if (BTREE_NODE_TYPE_HAS_MEM_TRIGGERS & (1U << i->bkey_type)) bch2_mark_update(trans, i->iter, i->k, - NULL, i->trigger_flags); + i->flags); if (marking && trans->fs_usage_deltas) bch2_trans_fs_usage_apply(trans, trans->fs_usage_deltas); @@ -479,8 +475,8 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, if (unlikely(c->gc_pos.phase)) bch2_trans_mark_gc(trans); - trans_for_each_update2(trans, i) - do_btree_insert_one(trans, i->iter, i->k); + trans_for_each_update(trans, i) + do_btree_insert_one(trans, i); err: if (marking) { percpu_up_read(&c->mark_lock); @@ -507,7 +503,7 @@ static noinline int maybe_do_btree_merge(struct btree_trans *trans, struct btree BUG_ON(iter->level); - trans_for_each_update2(trans, i) { + trans_for_each_update(trans, i) { if (iter_l(i->iter)->b != b) continue; @@ -520,24 +516,26 @@ static noinline int maybe_do_btree_merge(struct btree_trans *trans, struct btree u64s_delta -= !bkey_deleted(old.k) ? old.k->u64s : 0; } - return u64s_delta <= 0 - ? (bch2_foreground_maybe_merge(trans->c, iter, iter->level, - trans->flags & ~BTREE_INSERT_NOUNLOCK) ?: -EINTR) - : 0; + if (u64s_delta > 0) + return 0; + + return bch2_foreground_maybe_merge(trans, iter, + iter->level, trans->flags); } /* * Get journal reservation, take write locks, and attempt to do btree update(s): */ static inline int do_bch2_trans_commit(struct btree_trans *trans, - struct btree_insert_entry **stopped_at) + struct btree_insert_entry **stopped_at, + unsigned long trace_ip) { struct bch_fs *c = trans->c; struct btree_insert_entry *i; struct btree_iter *iter; int ret; - trans_for_each_update2(trans, i) { + trans_for_each_update(trans, i) { struct btree *b; BUG_ON(!btree_node_intent_locked(i->iter, i->level)); @@ -554,7 +552,7 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, } } - trans_for_each_update2(trans, i) + trans_for_each_update(trans, i) BUG_ON(!btree_node_intent_locked(i->iter, i->level)); ret = bch2_journal_preres_get(&c->journal, @@ -564,7 +562,7 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, ? JOURNAL_RES_GET_RESERVED : 0)); if (unlikely(ret == -EAGAIN)) ret = bch2_trans_journal_preres_get_cold(trans, - trans->journal_preres_u64s); + trans->journal_preres_u64s, trace_ip); if (unlikely(ret)) return ret; @@ -579,32 +577,38 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, * or anything else that might call bch2_trans_relock(), since that * would just retake the read locks: */ - trans_for_each_iter(trans, iter) { - if (iter->nodes_locked != iter->nodes_intent_locked) { - if (btree_iter_keep(trans, iter)) { - if (!bch2_btree_iter_upgrade(iter, 1)) { - trace_trans_restart_upgrade(trans->ip); - return -EINTR; - } - } else { - bch2_btree_iter_unlock_noinline(iter); - } + trans_for_each_iter(trans, iter) + if (iter->nodes_locked != iter->nodes_intent_locked && + !bch2_btree_iter_upgrade(iter, 1)) { + trace_trans_restart_upgrade(trans->ip, trace_ip, + iter->btree_id, + &iter->real_pos); + trans->restarted = true; + return -EINTR; } - } - if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) - trans_for_each_update2(trans, i) - btree_insert_entry_checks(trans, i); + trans_for_each_update(trans, i) { + const char *invalid = bch2_bkey_invalid(c, + bkey_i_to_s_c(i->k), i->bkey_type); + if (invalid) { + char buf[200]; + + bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k)); + bch_err(c, "invalid bkey %s on insert: %s\n", buf, invalid); + bch2_fatal_error(c); + } + btree_insert_entry_checks(trans, i); + } bch2_btree_trans_verify_locks(trans); - trans_for_each_update2(trans, i) + trans_for_each_update(trans, i) if (!same_leaf_as_prev(trans, i)) - bch2_btree_node_lock_for_insert(c, - iter_l(i->iter)->b, i->iter); + bch2_btree_node_lock_for_insert(trans, i->iter, + iter_l(i->iter)->b); - ret = bch2_trans_commit_write_locked(trans, stopped_at); + ret = bch2_trans_commit_write_locked(trans, stopped_at, trace_ip); - trans_for_each_update2(trans, i) + trans_for_each_update(trans, i) if (!same_leaf_as_prev(trans, i)) bch2_btree_node_unlock_write_inlined(iter_l(i->iter)->b, i->iter); @@ -629,117 +633,79 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, static int journal_reclaim_wait_done(struct bch_fs *c) { - int ret; - - ret = bch2_journal_error(&c->journal); - if (ret) - return ret; - - ret = !bch2_btree_key_cache_must_wait(c); - if (ret) - return ret; - - if (mutex_trylock(&c->journal.reclaim_lock)) { - ret = bch2_journal_reclaim(&c->journal); - mutex_unlock(&c->journal.reclaim_lock); - } + int ret = bch2_journal_error(&c->journal) ?: + !bch2_btree_key_cache_must_wait(c); if (!ret) - ret = !bch2_btree_key_cache_must_wait(c); + journal_reclaim_kick(&c->journal); return ret; } static noinline int bch2_trans_commit_error(struct btree_trans *trans, struct btree_insert_entry *i, - int ret) + int ret, unsigned long trace_ip) { struct bch_fs *c = trans->c; - unsigned flags = trans->flags; - - /* - * BTREE_INSERT_NOUNLOCK means don't unlock _after_ successful btree - * update; if we haven't done anything yet it doesn't apply - */ - flags &= ~BTREE_INSERT_NOUNLOCK; switch (ret) { case BTREE_INSERT_BTREE_NODE_FULL: - ret = bch2_btree_split_leaf(c, i->iter, flags); - - /* - * if the split succeeded without dropping locks the insert will - * still be atomic (what the caller peeked() and is overwriting - * won't have changed) - */ -#if 0 - /* - * XXX: - * split -> btree node merging (of parent node) might still drop - * locks when we're not passing it BTREE_INSERT_NOUNLOCK - * - * we don't want to pass BTREE_INSERT_NOUNLOCK to split as that - * will inhibit merging - but we don't have a reliable way yet - * (do we?) of checking if we dropped locks in this path - */ + ret = bch2_btree_split_leaf(trans, i->iter, trans->flags); if (!ret) - goto retry; -#endif + return 0; - /* - * don't care if we got ENOSPC because we told split it - * couldn't block: - */ - if (!ret || - ret == -EINTR || - (flags & BTREE_INSERT_NOUNLOCK)) { - trace_trans_restart_btree_node_split(trans->ip); - ret = -EINTR; - } - break; - case BTREE_INSERT_ENOSPC: - ret = -ENOSPC; + if (ret == -EINTR) + trace_trans_restart_btree_node_split(trans->ip, trace_ip, + i->iter->btree_id, + &i->iter->real_pos); break; case BTREE_INSERT_NEED_MARK_REPLICAS: bch2_trans_unlock(trans); ret = bch2_replicas_delta_list_mark(c, trans->fs_usage_deltas); if (ret) - return ret; + break; if (bch2_trans_relock(trans)) return 0; - trace_trans_restart_mark_replicas(trans->ip); + trace_trans_restart_mark_replicas(trans->ip, trace_ip); ret = -EINTR; break; case BTREE_INSERT_NEED_JOURNAL_RES: bch2_trans_unlock(trans); if ((trans->flags & BTREE_INSERT_JOURNAL_RECLAIM) && - !(trans->flags & BTREE_INSERT_JOURNAL_RESERVED)) - return -EAGAIN; + !(trans->flags & BTREE_INSERT_JOURNAL_RESERVED)) { + trans->restarted = true; + ret = -EAGAIN; + break; + } ret = bch2_trans_journal_res_get(trans, JOURNAL_RES_GET_CHECK); if (ret) - return ret; + break; if (bch2_trans_relock(trans)) return 0; - trace_trans_restart_journal_res_get(trans->ip); + trace_trans_restart_journal_res_get(trans->ip, trace_ip); ret = -EINTR; break; case BTREE_INSERT_NEED_JOURNAL_RECLAIM: bch2_trans_unlock(trans); - wait_event(c->journal.reclaim_wait, - (ret = journal_reclaim_wait_done(c))); + trace_trans_blocked_journal_reclaim(trans->ip, trace_ip); - if (!ret && bch2_trans_relock(trans)) + wait_event_freezable(c->journal.reclaim_wait, + (ret = journal_reclaim_wait_done(c))); + if (ret < 0) + break; + + if (bch2_trans_relock(trans)) return 0; - trace_trans_restart_journal_reclaim(trans->ip); + trace_trans_restart_journal_reclaim(trans->ip, trace_ip); ret = -EINTR; break; default: @@ -747,6 +713,9 @@ int bch2_trans_commit_error(struct btree_trans *trans, break; } + BUG_ON((ret == EINTR || ret == -EAGAIN) && !trans->restarted); + BUG_ON(ret == -ENOSPC && (trans->flags & BTREE_INSERT_NOFAIL)); + return ret; } @@ -769,132 +738,111 @@ bch2_trans_commit_get_rw_cold(struct btree_trans *trans) return 0; } -static void __bch2_trans_update2(struct btree_trans *trans, - struct btree_insert_entry n) -{ - struct btree_insert_entry *i; - - btree_insert_entry_checks(trans, &n); - - EBUG_ON(trans->nr_updates2 >= BTREE_ITER_MAX); - - n.iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT; - - trans_for_each_update2(trans, i) - if (btree_insert_entry_cmp(&n, i) <= 0) - break; - - if (i < trans->updates2 + trans->nr_updates2 && - !btree_insert_entry_cmp(&n, i)) - *i = n; - else - array_insert_item(trans->updates2, trans->nr_updates2, - i - trans->updates2, n); -} - -static void bch2_trans_update2(struct btree_trans *trans, - struct btree_iter *iter, - struct bkey_i *insert) -{ - __bch2_trans_update2(trans, (struct btree_insert_entry) { - .bkey_type = __btree_node_type(iter->level, iter->btree_id), - .btree_id = iter->btree_id, - .level = iter->level, - .iter = iter, - .k = insert, - }); -} - -static int extent_update_to_keys(struct btree_trans *trans, - struct btree_insert_entry n) +static int extent_handle_overwrites(struct btree_trans *trans, + struct btree_insert_entry *i) { - int ret; + struct bch_fs *c = trans->c; + struct btree_iter *iter, *update_iter; + struct bpos start = bkey_start_pos(&i->k->k); + struct bkey_i *update; + struct bkey_s_c k; + int ret = 0, compressed_sectors; + + iter = bch2_trans_get_iter(trans, i->btree_id, start, + BTREE_ITER_INTENT| + BTREE_ITER_WITH_UPDATES| + BTREE_ITER_NOT_EXTENTS); + k = bch2_btree_iter_peek(iter); + if (!k.k || (ret = bkey_err(k))) + goto out; - if (bkey_deleted(&n.k->k)) - return 0; + if (bch2_bkey_maybe_mergable(k.k, &i->k->k)) { + update = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); + if ((ret = PTR_ERR_OR_ZERO(update))) + goto out; - ret = bch2_extent_can_insert(trans, n.iter, n.k); - if (ret) - return ret; + bkey_reassemble(update, k); - n.iter = bch2_trans_get_iter(trans, n.iter->btree_id, n.k->k.p, - BTREE_ITER_INTENT| - BTREE_ITER_NOT_EXTENTS); - n.is_extent = false; + if (bch2_bkey_merge(c, bkey_i_to_s(update), bkey_i_to_s_c(i->k))) { + update_iter = bch2_trans_copy_iter(trans, iter); + ret = bch2_btree_delete_at(trans, update_iter, i->flags); + bch2_trans_iter_put(trans, update_iter); - __bch2_trans_update2(trans, n); - bch2_trans_iter_put(trans, n.iter); - return 0; -} + if (ret) + goto out; -static int extent_handle_overwrites(struct btree_trans *trans, - enum btree_id btree_id, - struct bkey_i *insert) -{ - struct btree_iter *iter, *update_iter; - struct bpos start = bkey_start_pos(&insert->k); - struct bkey_i *update; - struct bkey_s_c k; - int ret = 0; + i->k = update; + goto next; + } + } - iter = bch2_trans_get_iter(trans, btree_id, start, - BTREE_ITER_INTENT); - k = bch2_btree_iter_peek_with_updates(iter); + if (!bkey_cmp(k.k->p, bkey_start_pos(&i->k->k))) + goto next; - while (k.k && !(ret = bkey_err(k))) { - if (bkey_cmp(insert->k.p, bkey_start_pos(k.k)) <= 0) - break; + while (bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) > 0) { + /* + * If we're going to be splitting a compressed extent, note it + * so that __bch2_trans_commit() can increase our disk + * reservation: + */ + if (bkey_cmp(bkey_start_pos(k.k), start) < 0 && + bkey_cmp(k.k->p, i->k->k.p) > 0 && + (compressed_sectors = bch2_bkey_sectors_compressed(k))) + trans->extra_journal_res += compressed_sectors; if (bkey_cmp(bkey_start_pos(k.k), start) < 0) { update = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); if ((ret = PTR_ERR_OR_ZERO(update))) - break; + goto out; bkey_reassemble(update, k); bch2_cut_back(start, update); - update_iter = bch2_trans_get_iter(trans, btree_id, update->k.p, + update_iter = bch2_trans_get_iter(trans, i->btree_id, update->k.p, BTREE_ITER_NOT_EXTENTS| BTREE_ITER_INTENT); - bch2_trans_update2(trans, update_iter, update); + ret = bch2_btree_iter_traverse(update_iter); + if (ret) { + bch2_trans_iter_put(trans, update_iter); + goto out; + } + + bch2_trans_update(trans, update_iter, update, + BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE| + i->flags); bch2_trans_iter_put(trans, update_iter); } - if (bkey_cmp(k.k->p, insert->k.p) < 0 || - (!bkey_cmp(k.k->p, insert->k.p) && bkey_deleted(&insert->k))) { - update = bch2_trans_kmalloc(trans, sizeof(struct bkey)); - if ((ret = PTR_ERR_OR_ZERO(update))) - break; - - bkey_init(&update->k); - update->k.p = k.k->p; - - update_iter = bch2_trans_get_iter(trans, btree_id, update->k.p, - BTREE_ITER_NOT_EXTENTS| - BTREE_ITER_INTENT); - bch2_trans_update2(trans, update_iter, update); + if (bkey_cmp(k.k->p, i->k->k.p) <= 0) { + update_iter = bch2_trans_copy_iter(trans, iter); + ret = bch2_btree_delete_at(trans, update_iter, + i->flags); bch2_trans_iter_put(trans, update_iter); + + if (ret) + goto out; } - if (bkey_cmp(k.k->p, insert->k.p) > 0) { + if (bkey_cmp(k.k->p, i->k->k.p) > 0) { update = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); if ((ret = PTR_ERR_OR_ZERO(update))) - break; + goto out; bkey_reassemble(update, k); - bch2_cut_front(insert->k.p, update); + bch2_cut_front(i->k->k.p, update); - update_iter = bch2_trans_get_iter(trans, btree_id, update->k.p, - BTREE_ITER_NOT_EXTENTS| - BTREE_ITER_INTENT); - bch2_trans_update2(trans, update_iter, update); - bch2_trans_iter_put(trans, update_iter); - break; + bch2_trans_update(trans, iter, update, i->flags); + goto out; } - - k = bch2_btree_iter_next_with_updates(iter); +next: + k = bch2_btree_iter_next(iter); + if (!k.k || (ret = bkey_err(k))) + goto out; } + + bch2_bkey_merge(c, bkey_i_to_s(i->k), k); +out: bch2_trans_iter_put(trans, iter); return ret; @@ -905,10 +853,11 @@ int __bch2_trans_commit(struct btree_trans *trans) struct btree_insert_entry *i = NULL; struct btree_iter *iter; bool trans_trigger_run; - unsigned u64s, reset_flags = 0; + unsigned u64s; int ret = 0; - if (!trans->nr_updates) + if (!trans->nr_updates && + !trans->extra_journal_entry_u64s) goto out_reset; if (trans->flags & BTREE_INSERT_GC_LOCK_HELD) @@ -929,7 +878,7 @@ int __bch2_trans_commit(struct btree_trans *trans) #ifdef CONFIG_BCACHEFS_DEBUG trans_for_each_update(trans, i) if (btree_iter_type(i->iter) != BTREE_ITER_CACHED && - !(i->trigger_flags & BTREE_TRIGGER_NORUN)) + !(i->flags & BTREE_TRIGGER_NORUN)) bch2_btree_key_cache_verify_clean(trans, i->btree_id, i->k->k.p); #endif @@ -947,42 +896,27 @@ int __bch2_trans_commit(struct btree_trans *trans) i->trans_triggers_run = true; trans_trigger_run = true; - ret = bch2_trans_mark_update(trans, i->iter, i->k, - i->trigger_flags); + ret = bch2_trans_mark_update(trans, i->iter, + i->k, i->flags); if (unlikely(ret)) { if (ret == -EINTR) - trace_trans_restart_mark(trans->ip); + trace_trans_restart_mark(trans->ip, _RET_IP_, + i->iter->btree_id, + &i->iter->pos); goto out; } } } } while (trans_trigger_run); - /* Turn extents updates into keys: */ - trans_for_each_update(trans, i) - if (i->is_extent) { - ret = extent_handle_overwrites(trans, i->btree_id, i->k); - if (unlikely(ret)) - goto out; - } - trans_for_each_update(trans, i) { - ret = i->is_extent - ? extent_update_to_keys(trans, *i) - : (__bch2_trans_update2(trans, *i), 0); - if (unlikely(ret)) - goto out; - } - - trans_for_each_update2(trans, i) { - ret = bch2_btree_iter_traverse(i->iter); - if (unlikely(ret)) { - trace_trans_restart_traverse(trans->ip); - goto out; - } + BUG_ON(!i->iter->should_be_locked); if (unlikely(!bch2_btree_iter_upgrade(i->iter, i->level + 1))) { - trace_trans_restart_upgrade(trans->ip); + trace_trans_restart_upgrade(trans->ip, _RET_IP_, + i->iter->btree_id, + &i->iter->pos); + trans->restarted = true; ret = -EINTR; goto out; } @@ -995,10 +929,20 @@ int __bch2_trans_commit(struct btree_trans *trans) trans->journal_preres_u64s += u64s; trans->journal_u64s += u64s; } + + if (trans->extra_journal_res) { + ret = bch2_disk_reservation_add(trans->c, trans->disk_res, + trans->extra_journal_res, + (trans->flags & BTREE_INSERT_NOFAIL) + ? BCH_DISK_RESERVATION_NOFAIL : 0); + if (ret) + goto err; + } retry: + BUG_ON(trans->restarted); memset(&trans->journal_res, 0, sizeof(trans->journal_res)); - ret = do_bch2_trans_commit(trans, &i); + ret = do_bch2_trans_commit(trans, &i, _RET_IP_); /* make sure we didn't drop or screw up locks: */ bch2_btree_trans_verify_locks(trans); @@ -1016,15 +960,22 @@ out: if (likely(!(trans->flags & BTREE_INSERT_NOCHECK_RW))) percpu_ref_put(&trans->c->writes); out_reset: - if (!ret) - reset_flags |= TRANS_RESET_NOTRAVERSE; - if (!ret && (trans->flags & BTREE_INSERT_NOUNLOCK)) - reset_flags |= TRANS_RESET_NOUNLOCK; - bch2_trans_reset(trans, reset_flags); + trans->extra_journal_res = 0; + trans->nr_updates = 0; + trans->hooks = NULL; + trans->extra_journal_entries = NULL; + trans->extra_journal_entry_u64s = 0; + + if (trans->fs_usage_deltas) { + trans->fs_usage_deltas->used = 0; + memset(&trans->fs_usage_deltas->memset_start, 0, + (void *) &trans->fs_usage_deltas->memset_end - + (void *) &trans->fs_usage_deltas->memset_start); + } return ret; err: - ret = bch2_trans_commit_error(trans, i, ret); + ret = bch2_trans_commit_error(trans, i, ret, _RET_IP_); if (ret) goto out; @@ -1032,103 +983,80 @@ err: } int bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter, - struct bkey_i *k, enum btree_trigger_flags flags) + struct bkey_i *k, enum btree_update_flags flags) { struct btree_insert_entry *i, n = (struct btree_insert_entry) { - .trigger_flags = flags, + .flags = flags, .bkey_type = __btree_node_type(iter->level, iter->btree_id), .btree_id = iter->btree_id, .level = iter->level, - .is_extent = (iter->flags & BTREE_ITER_IS_EXTENTS) != 0, .iter = iter, .k = k }; + bool is_extent = (iter->flags & BTREE_ITER_IS_EXTENTS) != 0; + int ret = 0; BUG_ON(trans->nr_updates >= BTREE_ITER_MAX); + BUG_ON(!iter->should_be_locked); #ifdef CONFIG_BCACHEFS_DEBUG - BUG_ON(bkey_cmp(iter->pos, - n.is_extent ? bkey_start_pos(&k->k) : k->k.p)); - - trans_for_each_update(trans, i) { - BUG_ON(bkey_cmp(i->iter->pos, - i->is_extent ? bkey_start_pos(&i->k->k) : i->k->k.p)); - + trans_for_each_update(trans, i) BUG_ON(i != trans->updates && btree_insert_entry_cmp(i - 1, i) >= 0); - } #endif - iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT; + if (is_extent) { + ret = extent_handle_overwrites(trans, &n); + if (ret) + return ret; - if (n.is_extent) { iter->pos_after_commit = k->k.p; iter->flags |= BTREE_ITER_SET_POS_AFTER_COMMIT; + + if (bkey_deleted(&n.k->k)) + return 0; + + n.iter = bch2_trans_get_iter(trans, n.btree_id, n.k->k.p, + BTREE_ITER_INTENT| + BTREE_ITER_NOT_EXTENTS); + ret = bch2_btree_iter_traverse(n.iter); + bch2_trans_iter_put(trans, n.iter); + + if (ret) + return ret; } + BUG_ON(n.iter->flags & BTREE_ITER_IS_EXTENTS); + + n.iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT; + /* * Pending updates are kept sorted: first, find position of new update, * then delete/trim any updates the new update overwrites: */ - if (!n.is_extent) { - trans_for_each_update(trans, i) - if (btree_insert_entry_cmp(&n, i) <= 0) - break; - - if (i < trans->updates + trans->nr_updates && - !btree_insert_entry_cmp(&n, i)) - *i = n; - else - array_insert_item(trans->updates, trans->nr_updates, - i - trans->updates, n); - } else { - trans_for_each_update(trans, i) - if (btree_insert_entry_cmp(&n, i) < 0) - break; - - while (i > trans->updates && - i[-1].btree_id == n.btree_id && - bkey_cmp(bkey_start_pos(&n.k->k), - bkey_start_pos(&i[-1].k->k)) <= 0) { - --i; - array_remove_item(trans->updates, trans->nr_updates, - i - trans->updates); - } + trans_for_each_update(trans, i) + if (btree_insert_entry_cmp(&n, i) <= 0) + break; - if (i > trans->updates && - i[-1].btree_id == n.btree_id && - bkey_cmp(bkey_start_pos(&n.k->k), i[-1].k->k.p) < 0) - bch2_cut_back(bkey_start_pos(&n.k->k), i[-1].k); - - if (i < trans->updates + trans->nr_updates && - i->btree_id == n.btree_id && - bkey_cmp(n.k->k.p, bkey_start_pos(&i->k->k)) > 0) { - /* We don't handle splitting extents here: */ - BUG_ON(bkey_cmp(bkey_start_pos(&n.k->k), - bkey_start_pos(&i->k->k)) > 0); - - /* - * When we have an extent that overwrites the start of another - * update, trimming that extent will mean the iterator's - * position has to change since the iterator position has to - * match the extent's start pos - but we don't want to change - * the iterator pos if some other code is using it, so we may - * need to clone it: - */ - if (btree_iter_live(trans, i->iter)) { - i->iter = bch2_trans_copy_iter(trans, i->iter); - - i->iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT; - bch2_trans_iter_put(trans, i->iter); - } + if (i < trans->updates + trans->nr_updates && + !btree_insert_entry_cmp(&n, i)) { + BUG_ON(i->trans_triggers_run); - bch2_cut_front(n.k->k.p, i->k); - bch2_btree_iter_set_pos(i->iter, n.k->k.p); + /* + * This is a hack to ensure that inode creates update the btree, + * not the key cache, which helps with cache coherency issues in + * other areas: + */ + if (btree_iter_type(n.iter) == BTREE_ITER_CACHED && + btree_iter_type(i->iter) != BTREE_ITER_CACHED) { + i->k = n.k; + i->flags = n.flags; + } else { + *i = n; } - + } else array_insert_item(trans->updates, trans->nr_updates, i - trans->updates, n); - } return 0; } @@ -1172,16 +1100,17 @@ int bch2_btree_insert(struct bch_fs *c, enum btree_id id, } int bch2_btree_delete_at(struct btree_trans *trans, - struct btree_iter *iter, unsigned flags) + struct btree_iter *iter, unsigned update_flags) { - struct bkey_i k; + struct bkey_i *k; - bkey_init(&k.k); - k.k.p = iter->pos; + k = bch2_trans_kmalloc(trans, sizeof(*k)); + if (IS_ERR(k)) + return PTR_ERR(k); - bch2_trans_update(trans, iter, &k, 0); - return bch2_trans_commit(trans, NULL, NULL, - BTREE_INSERT_NOFAIL|flags); + bkey_init(&k->k); + k->k.p = iter->pos; + return bch2_trans_update(trans, iter, k, update_flags); } int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id, @@ -1194,13 +1123,12 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id, iter = bch2_trans_get_iter(trans, id, start, BTREE_ITER_INTENT); retry: - while ((k = bch2_btree_iter_peek(iter)).k && + while ((bch2_trans_begin(trans), + (k = bch2_btree_iter_peek(iter)).k) && !(ret = bkey_err(k)) && bkey_cmp(iter->pos, end) < 0) { struct bkey_i delete; - bch2_trans_begin(trans); - bkey_init(&delete.k); /* @@ -1232,8 +1160,8 @@ retry: break; } - bch2_trans_update(trans, iter, &delete, 0); - ret = bch2_trans_commit(trans, NULL, journal_seq, + ret = bch2_trans_update(trans, iter, &delete, 0) ?: + bch2_trans_commit(trans, NULL, journal_seq, BTREE_INSERT_NOFAIL); if (ret) break; diff --git a/libbcachefs/buckets.c b/libbcachefs/buckets.c index 31f7617..76945e5 100644 --- a/libbcachefs/buckets.c +++ b/libbcachefs/buckets.c @@ -3,64 +3,6 @@ * Code for manipulating bucket marks for garbage collection. * * Copyright 2014 Datera, Inc. - * - * Bucket states: - * - free bucket: mark == 0 - * The bucket contains no data and will not be read - * - * - allocator bucket: owned_by_allocator == 1 - * The bucket is on a free list, or it is an open bucket - * - * - cached bucket: owned_by_allocator == 0 && - * dirty_sectors == 0 && - * cached_sectors > 0 - * The bucket contains data but may be safely discarded as there are - * enough replicas of the data on other cache devices, or it has been - * written back to the backing device - * - * - dirty bucket: owned_by_allocator == 0 && - * dirty_sectors > 0 - * The bucket contains data that we must not discard (either only copy, - * or one of the 'main copies' for data requiring multiple replicas) - * - * - metadata bucket: owned_by_allocator == 0 && is_metadata == 1 - * This is a btree node, journal or gen/prio bucket - * - * Lifecycle: - * - * bucket invalidated => bucket on freelist => open bucket => - * [dirty bucket =>] cached bucket => bucket invalidated => ... - * - * Note that cache promotion can skip the dirty bucket step, as data - * is copied from a deeper tier to a shallower tier, onto a cached - * bucket. - * Note also that a cached bucket can spontaneously become dirty -- - * see below. - * - * Only a traversal of the key space can determine whether a bucket is - * truly dirty or cached. - * - * Transitions: - * - * - free => allocator: bucket was invalidated - * - cached => allocator: bucket was invalidated - * - * - allocator => dirty: open bucket was filled up - * - allocator => cached: open bucket was filled up - * - allocator => metadata: metadata was allocated - * - * - dirty => cached: dirty sectors were copied to a deeper tier - * - dirty => free: dirty sectors were overwritten or moved (copy gc) - * - cached => free: cached sectors were overwritten - * - * - metadata => free: metadata was freed - * - * Oddities: - * - cached => dirty: a device was removed so formerly replicated data - * is no longer sufficiently replicated - * - free => cached: cannot happen - * - free => dirty: cannot happen - * - free => metadata: cannot happen */ #include "bcachefs.h" @@ -72,6 +14,7 @@ #include "ec.h" #include "error.h" #include "movinggc.h" +#include "reflink.h" #include "replicas.h" #include @@ -229,7 +172,7 @@ struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c) percpu_down_read(&c->mark_lock); ret = kmalloc(sizeof(struct bch_fs_usage_online) + - sizeof(u64) + c->replicas.nr, GFP_NOFS); + sizeof(u64) * c->replicas.nr, GFP_NOFS); if (unlikely(!ret)) { percpu_up_read(&c->mark_lock); return NULL; @@ -315,18 +258,11 @@ void bch2_fs_usage_to_text(struct printbuf *out, } } -#define RESERVE_FACTOR 6 - static u64 reserve_factor(u64 r) { return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR); } -static u64 avail_factor(u64 r) -{ - return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1); -} - u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage) { return min(fs_usage->u.hidden + @@ -415,17 +351,16 @@ static inline void account_bucket(struct bch_fs_usage *fs_usage, } static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca, - struct bch_fs_usage *fs_usage, struct bucket_mark old, struct bucket_mark new, u64 journal_seq, bool gc) { + struct bch_fs_usage *fs_usage; struct bch_dev_usage *u; percpu_rwsem_assert_held(&c->mark_lock); preempt_disable(); - if (!fs_usage) - fs_usage = fs_usage_ptr(c, journal_seq, gc); + fs_usage = fs_usage_ptr(c, journal_seq, gc); u = dev_usage_ptr(ca, journal_seq, gc); if (bucket_type(old)) @@ -454,28 +389,48 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca, bch2_wake_allocator(ca); } -static inline void update_replicas(struct bch_fs *c, - struct bch_fs_usage *fs_usage, - struct bch_replicas_entry *r, - s64 sectors) +static inline int __update_replicas(struct bch_fs *c, + struct bch_fs_usage *fs_usage, + struct bch_replicas_entry *r, + s64 sectors) +{ + int idx = bch2_replicas_entry_idx(c, r); + + if (idx < 0) + return -1; + + fs_usage_data_type_to_base(fs_usage, r->data_type, sectors); + fs_usage->replicas[idx] += sectors; + return 0; +} + +static inline int update_replicas(struct bch_fs *c, + struct bch_replicas_entry *r, s64 sectors, + unsigned journal_seq, bool gc) { + struct bch_fs_usage __percpu *fs_usage; int idx = bch2_replicas_entry_idx(c, r); - BUG_ON(idx < 0); + if (idx < 0) + return -1; + preempt_disable(); + fs_usage = fs_usage_ptr(c, journal_seq, gc); fs_usage_data_type_to_base(fs_usage, r->data_type, sectors); fs_usage->replicas[idx] += sectors; + preempt_enable(); + return 0; } -static inline void update_cached_sectors(struct bch_fs *c, - struct bch_fs_usage *fs_usage, - unsigned dev, s64 sectors) +static inline int update_cached_sectors(struct bch_fs *c, + unsigned dev, s64 sectors, + unsigned journal_seq, bool gc) { struct bch_replicas_padded r; bch2_replicas_entry_cached(&r.e, dev); - update_replicas(c, fs_usage, &r.e, sectors); + return update_replicas(c, &r.e, sectors, journal_seq, gc); } static struct replicas_delta_list * @@ -483,10 +438,26 @@ replicas_deltas_realloc(struct btree_trans *trans, unsigned more) { struct replicas_delta_list *d = trans->fs_usage_deltas; unsigned new_size = d ? (d->size + more) * 2 : 128; + unsigned alloc_size = sizeof(*d) + new_size; + + WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX); if (!d || d->used + more > d->size) { - d = krealloc(d, sizeof(*d) + new_size, GFP_NOIO|__GFP_ZERO); - BUG_ON(!d); + d = krealloc(d, alloc_size, GFP_NOIO|__GFP_ZERO); + + BUG_ON(!d && alloc_size > REPLICAS_DELTA_LIST_MAX); + + if (!d) { + d = mempool_alloc(&trans->c->replicas_delta_pool, GFP_NOIO); + memset(d, 0, REPLICAS_DELTA_LIST_MAX); + + if (trans->fs_usage_deltas) + memcpy(d, trans->fs_usage_deltas, + trans->fs_usage_deltas->size + sizeof(*d)); + + new_size = REPLICAS_DELTA_LIST_MAX - sizeof(*d); + kfree(trans->fs_usage_deltas); + } d->size = new_size; trans->fs_usage_deltas = d; @@ -538,38 +509,21 @@ static inline void update_cached_sectors_list(struct btree_trans *trans, ret; \ }) -static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, - size_t b, bool owned_by_allocator, - bool gc) +void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, + size_t b, bool owned_by_allocator) { - struct bucket *g = __bucket(ca, b, gc); + struct bucket *g = bucket(ca, b); struct bucket_mark old, new; old = bucket_cmpxchg(g, new, ({ new.owned_by_allocator = owned_by_allocator; })); - BUG_ON(!gc && - !owned_by_allocator && !old.owned_by_allocator); - - return 0; -} - -void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, - size_t b, bool owned_by_allocator, - struct gc_pos pos, unsigned flags) -{ - preempt_disable(); - - do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags, - ca, b, owned_by_allocator); - - preempt_enable(); + BUG_ON(owned_by_allocator == old.owned_by_allocator); } static int bch2_mark_alloc(struct bch_fs *c, struct bkey_s_c old, struct bkey_s_c new, - struct bch_fs_usage *fs_usage, u64 journal_seq, unsigned flags) { bool gc = flags & BTREE_TRIGGER_GC; @@ -611,7 +565,7 @@ static int bch2_mark_alloc(struct bch_fs *c, } })); - bch2_dev_usage_update(c, ca, fs_usage, old_m, m, journal_seq, gc); + bch2_dev_usage_update(c, ca, old_m, m, journal_seq, gc); g->io_time[READ] = u.read_time; g->io_time[WRITE] = u.write_time; @@ -627,8 +581,12 @@ static int bch2_mark_alloc(struct bch_fs *c, if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) && old_m.cached_sectors) { - update_cached_sectors(c, fs_usage, ca->dev_idx, - -old_m.cached_sectors); + if (update_cached_sectors(c, ca->dev_idx, -old_m.cached_sectors, + journal_seq, gc)) { + bch2_fs_fatal_error(c, "bch2_mark_alloc(): no replicas entry while updating cached sectors"); + return -1; + } + trace_invalidate(ca, bucket_to_sector(ca, new.k->p.offset), old_m.cached_sectors); } @@ -675,8 +633,7 @@ static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, old.dirty_sectors, sectors); if (c) - bch2_dev_usage_update(c, ca, fs_usage_ptr(c, 0, gc), - old, new, 0, gc); + bch2_dev_usage_update(c, ca, old, new, 0, gc); return 0; } @@ -689,7 +646,11 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, BUG_ON(type != BCH_DATA_sb && type != BCH_DATA_journal); - preempt_disable(); + /* + * Backup superblock might be past the end of our normal usable space: + */ + if (b >= ca->mi.nbuckets) + return; if (likely(c)) { do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags, @@ -697,46 +658,14 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, } else { __bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0); } - - preempt_enable(); } -static s64 disk_sectors_scaled(unsigned n, unsigned d, unsigned sectors) -{ - return DIV_ROUND_UP(sectors * n, d); -} - -static s64 __ptr_disk_sectors_delta(unsigned old_size, - unsigned offset, s64 delta, - unsigned flags, - unsigned n, unsigned d) -{ - BUG_ON(!n || !d); - - if (flags & BTREE_TRIGGER_OVERWRITE_SPLIT) { - BUG_ON(offset + -delta > old_size); - - return -disk_sectors_scaled(n, d, old_size) + - disk_sectors_scaled(n, d, offset) + - disk_sectors_scaled(n, d, old_size - offset + delta); - } else if (flags & BTREE_TRIGGER_OVERWRITE) { - BUG_ON(offset + -delta > old_size); - - return -disk_sectors_scaled(n, d, old_size) + - disk_sectors_scaled(n, d, old_size + delta); - } else { - return disk_sectors_scaled(n, d, delta); - } -} - -static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p, - unsigned offset, s64 delta, - unsigned flags) +static s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p) { - return __ptr_disk_sectors_delta(p.crc.live_size, - offset, delta, flags, - p.crc.compressed_size, - p.crc.uncompressed_size); + return p.crc.compression_type + ? DIV_ROUND_UP(sectors * p.crc.compressed_size, + p.crc.uncompressed_size) + : sectors; } static int check_bucket_ref(struct bch_fs *c, struct bkey_s_c k, @@ -815,7 +744,6 @@ static int check_bucket_ref(struct bch_fs *c, struct bkey_s_c k, static int mark_stripe_bucket(struct bch_fs *c, struct bkey_s_c k, unsigned ptr_idx, - struct bch_fs_usage *fs_usage, u64 journal_seq, unsigned flags) { const struct bch_stripe *s = bkey_s_c_to_stripe(k).v; @@ -832,7 +760,7 @@ static int mark_stripe_bucket(struct bch_fs *c, struct bkey_s_c k, if (g->stripe && g->stripe != k.k->p.offset) { bch2_fs_inconsistent(c, "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s", - ptr->dev, PTR_BUCKET_NR(ca, ptr), new.gen, + ptr->dev, PTR_BUCKET_NR(ca, ptr), g->mark.gen, (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)); return -EINVAL; } @@ -857,7 +785,7 @@ static int mark_stripe_bucket(struct bch_fs *c, struct bkey_s_c k, g->stripe = k.k->p.offset; g->stripe_redundancy = s->nr_redundant; - bch2_dev_usage_update(c, ca, fs_usage, old, new, journal_seq, gc); + bch2_dev_usage_update(c, ca, old, new, journal_seq, gc); return 0; } @@ -886,7 +814,6 @@ static int __mark_pointer(struct bch_fs *c, struct bkey_s_c k, static int bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c k, struct extent_ptr_decoded p, s64 sectors, enum bch_data_type data_type, - struct bch_fs_usage *fs_usage, u64 journal_seq, unsigned flags) { bool gc = flags & BTREE_TRIGGER_GC; @@ -924,7 +851,7 @@ static int bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c k, old.v.counter, new.v.counter)) != old.v.counter); - bch2_dev_usage_update(c, ca, fs_usage, old, new, journal_seq, gc); + bch2_dev_usage_update(c, ca, old, new, journal_seq, gc); BUG_ON(!gc && bucket_became_unavailable(old, new)); @@ -934,8 +861,8 @@ static int bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c k, static int bch2_mark_stripe_ptr(struct bch_fs *c, struct bch_extent_stripe_ptr p, enum bch_data_type data_type, - struct bch_fs_usage *fs_usage, - s64 sectors, unsigned flags) + s64 sectors, + unsigned journal_seq, unsigned flags) { bool gc = flags & BTREE_TRIGGER_GC; struct bch_replicas_padded r; @@ -950,6 +877,7 @@ static int bch2_mark_stripe_ptr(struct bch_fs *c, spin_unlock(&c->ec_stripes_heap_lock); bch_err_ratelimited(c, "pointer to nonexistent stripe %llu", (u64) p.idx); + bch2_inconsistent_error(c); return -EIO; } @@ -969,40 +897,46 @@ static int bch2_mark_stripe_ptr(struct bch_fs *c, spin_unlock(&c->ec_stripes_heap_lock); r.e.data_type = data_type; - update_replicas(c, fs_usage, &r.e, sectors); + update_replicas(c, &r.e, sectors, journal_seq, gc); return 0; } static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c old, struct bkey_s_c new, - unsigned offset, s64 sectors, - enum bch_data_type data_type, - struct bch_fs_usage *fs_usage, unsigned journal_seq, unsigned flags) { + bool gc = flags & BTREE_TRIGGER_GC; struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old; struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); const union bch_extent_entry *entry; struct extent_ptr_decoded p; struct bch_replicas_padded r; + enum bch_data_type data_type = bkey_is_btree_ptr(k.k) + ? BCH_DATA_btree + : BCH_DATA_user; + s64 sectors = bkey_is_btree_ptr(k.k) + ? c->opts.btree_node_size + : k.k->size; s64 dirty_sectors = 0; bool stale; int ret; + BUG_ON((flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)) == + (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)); + + if (flags & BTREE_TRIGGER_OVERWRITE) + sectors = -sectors; + r.e.data_type = data_type; r.e.nr_devs = 0; r.e.nr_required = 1; - BUG_ON(!sectors); - bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { - s64 disk_sectors = data_type == BCH_DATA_btree - ? sectors - : ptr_disk_sectors_delta(p, offset, sectors, flags); + s64 disk_sectors = ptr_disk_sectors(sectors, p); ret = bch2_mark_pointer(c, k, p, disk_sectors, data_type, - fs_usage, journal_seq, flags); + journal_seq, flags); if (ret < 0) return ret; @@ -1010,14 +944,18 @@ static int bch2_mark_extent(struct bch_fs *c, if (p.ptr.cached) { if (!stale) - update_cached_sectors(c, fs_usage, p.ptr.dev, - disk_sectors); + if (update_cached_sectors(c, p.ptr.dev, disk_sectors, + journal_seq, gc)) { + bch2_fs_fatal_error(c, "bch2_mark_extent(): no replicas entry while updating cached sectors"); + return -1; + + } } else if (!p.has_ec) { dirty_sectors += disk_sectors; r.e.devs[r.e.nr_devs++] = p.ptr.dev; } else { ret = bch2_mark_stripe_ptr(c, p.ec, data_type, - fs_usage, disk_sectors, flags); + disk_sectors, journal_seq, flags); if (ret) return ret; @@ -1030,16 +968,22 @@ static int bch2_mark_extent(struct bch_fs *c, } } - if (r.e.nr_devs) - update_replicas(c, fs_usage, &r.e, dirty_sectors); + if (r.e.nr_devs) { + if (update_replicas(c, &r.e, dirty_sectors, journal_seq, gc)) { + char buf[200]; + + bch2_bkey_val_to_text(&PBUF(buf), c, k); + bch2_fs_fatal_error(c, "no replicas entry for %s", buf); + return -1; + } + } return 0; } static int bch2_mark_stripe(struct bch_fs *c, - struct bkey_s_c old, struct bkey_s_c new, - struct bch_fs_usage *fs_usage, - u64 journal_seq, unsigned flags) + struct bkey_s_c old, struct bkey_s_c new, + u64 journal_seq, unsigned flags) { bool gc = flags & BTREE_TRIGGER_GC; size_t idx = new.k->p.offset; @@ -1056,6 +1000,7 @@ static int bch2_mark_stripe(struct bch_fs *c, if (!m || (old_s && !m->alive)) { bch_err_ratelimited(c, "error marking nonexistent stripe %zu", idx); + bch2_inconsistent_error(c); return -1; } @@ -1099,14 +1044,128 @@ static int bch2_mark_stripe(struct bch_fs *c, m->blocks_nonempty = 0; for (i = 0; i < new_s->nr_blocks; i++) { - ret = mark_stripe_bucket(c, new, i, fs_usage, - journal_seq, flags); + ret = mark_stripe_bucket(c, new, i, journal_seq, flags); if (ret) return ret; } - update_replicas(c, fs_usage, &m->r.e, - ((s64) m->sectors * m->nr_redundant)); + if (update_replicas(c, &m->r.e, + ((s64) m->sectors * m->nr_redundant), + journal_seq, gc)) { + char buf[200]; + + bch2_bkey_val_to_text(&PBUF(buf), c, new); + bch2_fs_fatal_error(c, "no replicas entry for %s", buf); + return -1; + } + } + + return 0; +} + +static int bch2_mark_inode(struct bch_fs *c, + struct bkey_s_c old, struct bkey_s_c new, + u64 journal_seq, unsigned flags) +{ + struct bch_fs_usage __percpu *fs_usage; + + preempt_disable(); + fs_usage = fs_usage_ptr(c, journal_seq, flags & BTREE_TRIGGER_GC); + fs_usage->nr_inodes += new.k->type == KEY_TYPE_inode; + fs_usage->nr_inodes -= old.k->type == KEY_TYPE_inode; + preempt_enable(); + return 0; +} + +static int bch2_mark_reservation(struct bch_fs *c, + struct bkey_s_c old, struct bkey_s_c new, + u64 journal_seq, unsigned flags) +{ + struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old; + struct bch_fs_usage __percpu *fs_usage; + unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas; + s64 sectors = (s64) k.k->size; + + if (flags & BTREE_TRIGGER_OVERWRITE) + sectors = -sectors; + sectors *= replicas; + + preempt_disable(); + fs_usage = fs_usage_ptr(c, journal_seq, flags & BTREE_TRIGGER_GC); + replicas = clamp_t(unsigned, replicas, 1, + ARRAY_SIZE(fs_usage->persistent_reserved)); + + fs_usage->reserved += sectors; + fs_usage->persistent_reserved[replicas - 1] += sectors; + preempt_enable(); + + return 0; +} + +static s64 __bch2_mark_reflink_p(struct bch_fs *c, struct bkey_s_c_reflink_p p, + u64 idx, unsigned flags, size_t *r_idx) +{ + struct reflink_gc *r; + int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1; + + while (1) { + if (*r_idx >= c->reflink_gc_nr) + goto not_found; + r = genradix_ptr(&c->reflink_gc_table, *r_idx); + BUG_ON(!r); + + if (idx < r->offset) + break; + (*r_idx)++; + } + + BUG_ON((s64) r->refcount + add < 0); + + r->refcount += add; + return r->offset - idx; +not_found: + bch2_fs_inconsistent(c, + "%llu:%llu len %u points to nonexistent indirect extent %llu", + p.k->p.inode, p.k->p.offset, p.k->size, idx); + bch2_inconsistent_error(c); + return -EIO; +} + +static int bch2_mark_reflink_p(struct bch_fs *c, + struct bkey_s_c old, struct bkey_s_c new, + u64 journal_seq, unsigned flags) +{ + struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old; + struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k); + struct reflink_gc *ref; + size_t l, r, m; + u64 idx = le64_to_cpu(p.v->idx); + unsigned sectors = p.k->size; + s64 ret = 0; + + BUG_ON((flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)) == + (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)); + + l = 0; + r = c->reflink_gc_nr; + while (l < r) { + m = l + (r - l) / 2; + + ref = genradix_ptr(&c->reflink_gc_table, m); + if (ref->offset <= idx) + l = m + 1; + else + r = m; + } + + while (sectors) { + ret = __bch2_mark_reflink_p(c, p, idx, flags, &l); + if (ret < 0) + return ret; + + ret = min_t(s64, ret, sectors); + idx += ret; + sectors -= ret; } return 0; @@ -1115,95 +1174,55 @@ static int bch2_mark_stripe(struct bch_fs *c, static int bch2_mark_key_locked(struct bch_fs *c, struct bkey_s_c old, struct bkey_s_c new, - unsigned offset, s64 sectors, - struct bch_fs_usage *fs_usage, u64 journal_seq, unsigned flags) { struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old; - int ret = 0; BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE))); - preempt_disable(); - - if (!fs_usage || (flags & BTREE_TRIGGER_GC)) - fs_usage = fs_usage_ptr(c, journal_seq, - flags & BTREE_TRIGGER_GC); - switch (k.k->type) { case KEY_TYPE_alloc: case KEY_TYPE_alloc_v2: - ret = bch2_mark_alloc(c, old, new, fs_usage, journal_seq, flags); - break; + return bch2_mark_alloc(c, old, new, journal_seq, flags); case KEY_TYPE_btree_ptr: case KEY_TYPE_btree_ptr_v2: - sectors = !(flags & BTREE_TRIGGER_OVERWRITE) - ? c->opts.btree_node_size - : -c->opts.btree_node_size; - - ret = bch2_mark_extent(c, old, new, offset, sectors, - BCH_DATA_btree, fs_usage, journal_seq, flags); - break; case KEY_TYPE_extent: case KEY_TYPE_reflink_v: - ret = bch2_mark_extent(c, old, new, offset, sectors, - BCH_DATA_user, fs_usage, journal_seq, flags); - break; + return bch2_mark_extent(c, old, new, journal_seq, flags); case KEY_TYPE_stripe: - ret = bch2_mark_stripe(c, old, new, fs_usage, journal_seq, flags); - break; + return bch2_mark_stripe(c, old, new, journal_seq, flags); case KEY_TYPE_inode: - fs_usage->nr_inodes += new.k->type == KEY_TYPE_inode; - fs_usage->nr_inodes -= old.k->type == KEY_TYPE_inode; - break; - case KEY_TYPE_reservation: { - unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas; - - sectors *= replicas; - replicas = clamp_t(unsigned, replicas, 1, - ARRAY_SIZE(fs_usage->persistent_reserved)); - - fs_usage->reserved += sectors; - fs_usage->persistent_reserved[replicas - 1] += sectors; - break; - } + return bch2_mark_inode(c, old, new, journal_seq, flags); + case KEY_TYPE_reservation: + return bch2_mark_reservation(c, old, new, journal_seq, flags); + case KEY_TYPE_reflink_p: + return bch2_mark_reflink_p(c, old, new, journal_seq, flags); + default: + return 0; } - - preempt_enable(); - - return ret; } -int bch2_mark_key(struct bch_fs *c, struct bkey_s_c new, - unsigned offset, s64 sectors, - struct bch_fs_usage *fs_usage, - u64 journal_seq, unsigned flags) +int bch2_mark_key(struct bch_fs *c, struct bkey_s_c new, unsigned flags) { - struct bkey deleted; + struct bkey deleted = KEY(0, 0, 0); struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL }; int ret; - bkey_init(&deleted); - percpu_down_read(&c->mark_lock); - ret = bch2_mark_key_locked(c, old, new, offset, sectors, - fs_usage, journal_seq, - BTREE_TRIGGER_INSERT|flags); + ret = bch2_mark_key_locked(c, old, new, 0, flags); percpu_up_read(&c->mark_lock); return ret; } -int bch2_mark_update(struct btree_trans *trans, - struct btree_iter *iter, - struct bkey_i *new, - struct bch_fs_usage *fs_usage, - unsigned flags) +int bch2_mark_update(struct btree_trans *trans, struct btree_iter *iter, + struct bkey_i *new, unsigned flags) { struct bch_fs *c = trans->c; + struct bkey _deleted = KEY(0, 0, 0); + struct bkey_s_c deleted = (struct bkey_s_c) { &_deleted, NULL }; struct bkey_s_c old; - struct bkey unpacked; - int ret = 0; + int iter_flags, ret; if (unlikely(flags & BTREE_TRIGGER_NORUN)) return 0; @@ -1211,87 +1230,36 @@ int bch2_mark_update(struct btree_trans *trans, if (!btree_node_type_needs_gc(iter->btree_id)) return 0; - bkey_init(&unpacked); - old = (struct bkey_s_c) { &unpacked, NULL }; + if (likely(!(iter->flags & BTREE_ITER_CACHED_NOFILL))) { + iter_flags = iter->flags & BTREE_ITER_WITH_UPDATES; + iter->flags &= ~BTREE_ITER_WITH_UPDATES; - if (!btree_node_type_is_extents(iter->btree_id)) { - /* iterators should be uptodate, shouldn't get errors here: */ - if (btree_iter_type(iter) != BTREE_ITER_CACHED) { - old = bch2_btree_iter_peek_slot(iter); - BUG_ON(bkey_err(old)); - } else { - struct bkey_cached *ck = (void *) iter->l[0].b; + old = bch2_btree_iter_peek_slot(iter); + iter->flags |= iter_flags; - if (ck->valid) - old = bkey_i_to_s_c(ck->k); - } + ret = bkey_err(old); + if (ret) + return ret; + } else { + /* + * If BTREE_ITER_CACHED_NOFILL was used, we better not be + * running triggers that do anything on removal (alloc btree): + */ + old = deleted; + } - if (old.k->type == new->k.type) { - bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0, - fs_usage, trans->journal_res.seq, + if (old.k->type == new->k.type && + ((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) { + ret = bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), + trans->journal_res.seq, BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags); - - } else { - bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0, - fs_usage, trans->journal_res.seq, - BTREE_TRIGGER_INSERT|flags); - bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0, - fs_usage, trans->journal_res.seq, - BTREE_TRIGGER_OVERWRITE|flags); - } } else { - struct btree_iter *copy; - - BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED); - bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), - 0, new->k.size, - fs_usage, trans->journal_res.seq, - BTREE_TRIGGER_INSERT|flags); - - copy = bch2_trans_copy_iter(trans, iter); - - for_each_btree_key_continue(copy, 0, old, ret) { - unsigned offset = 0; - s64 sectors = -((s64) old.k->size); - - flags |= BTREE_TRIGGER_OVERWRITE; - - if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0) - break; - - switch (bch2_extent_overlap(&new->k, old.k)) { - case BCH_EXTENT_OVERLAP_ALL: - offset = 0; - sectors = -((s64) old.k->size); - break; - case BCH_EXTENT_OVERLAP_BACK: - offset = bkey_start_offset(&new->k) - - bkey_start_offset(old.k); - sectors = bkey_start_offset(&new->k) - - old.k->p.offset; - break; - case BCH_EXTENT_OVERLAP_FRONT: - offset = 0; - sectors = bkey_start_offset(old.k) - - new->k.p.offset; - break; - case BCH_EXTENT_OVERLAP_MIDDLE: - offset = bkey_start_offset(&new->k) - - bkey_start_offset(old.k); - sectors = -((s64) new->k.size); - flags |= BTREE_TRIGGER_OVERWRITE_SPLIT; - break; - } - - BUG_ON(sectors >= 0); - - ret = bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), - offset, sectors, fs_usage, - trans->journal_res.seq, flags) ?: 1; - if (ret <= 0) - break; - } - bch2_trans_iter_put(trans, copy); + ret = bch2_mark_key_locked(c, deleted, bkey_i_to_s_c(new), + trans->journal_res.seq, + BTREE_TRIGGER_INSERT|flags) ?: + bch2_mark_key_locked(c, old, deleted, + trans->journal_res.seq, + BTREE_TRIGGER_OVERWRITE|flags); } return ret; @@ -1299,14 +1267,15 @@ int bch2_mark_update(struct btree_trans *trans, static noinline __cold void fs_usage_apply_warn(struct btree_trans *trans, - unsigned disk_res_sectors) + unsigned disk_res_sectors, + s64 should_not_have_added) { struct bch_fs *c = trans->c; struct btree_insert_entry *i; char buf[200]; - bch_err(c, "disk usage increased more than %u sectors reserved", - disk_res_sectors); + bch_err(c, "disk usage increased %lli more than %u sectors reserved", + should_not_have_added, disk_res_sectors); trans_for_each_update(trans, i) { pr_err("while inserting"); @@ -1338,6 +1307,7 @@ void fs_usage_apply_warn(struct btree_trans *trans, } } } + __WARN(); } void bch2_trans_fs_usage_apply(struct btree_trans *trans, @@ -1366,7 +1336,7 @@ void bch2_trans_fs_usage_apply(struct btree_trans *trans, added += d->delta; } - update_replicas(c, dst, &d->r, d->delta); + BUG_ON(__update_replicas(c, dst, &d->r, d->delta)); } dst->nr_inodes += deltas->nr_inodes; @@ -1383,7 +1353,14 @@ void bch2_trans_fs_usage_apply(struct btree_trans *trans, */ should_not_have_added = added - (s64) disk_res_sectors; if (unlikely(should_not_have_added > 0)) { - atomic64_sub(should_not_have_added, &c->sectors_available); + u64 old, new, v = atomic64_read(&c->sectors_available); + + do { + old = v; + new = max_t(s64, 0, old - should_not_have_added); + } while ((v = atomic64_cmpxchg(&c->sectors_available, + old, new)) != old); + added -= should_not_have_added; warn = true; } @@ -1396,7 +1373,7 @@ void bch2_trans_fs_usage_apply(struct btree_trans *trans, preempt_enable(); if (unlikely(warn) && !xchg(&warned_disk_usage, 1)) - fs_usage_apply_warn(trans, disk_res_sectors); + fs_usage_apply_warn(trans, disk_res_sectors, should_not_have_added); } /* trans_mark: */ @@ -1424,29 +1401,6 @@ static struct btree_iter *trans_get_update(struct btree_trans *trans, return NULL; } -static int trans_get_key(struct btree_trans *trans, - enum btree_id btree_id, struct bpos pos, - struct btree_iter **iter, - struct bkey_s_c *k) -{ - unsigned flags = btree_id != BTREE_ID_alloc - ? BTREE_ITER_SLOTS - : BTREE_ITER_CACHED; - int ret; - - *iter = trans_get_update(trans, btree_id, pos, k); - if (*iter) - return 1; - - *iter = bch2_trans_get_iter(trans, btree_id, pos, - flags|BTREE_ITER_INTENT); - *k = __bch2_btree_iter_peek(*iter, flags); - ret = bkey_err(*k); - if (ret) - bch2_trans_iter_put(trans, *iter); - return ret; -} - static struct bkey_alloc_buf * bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter **_iter, const struct bch_extent_ptr *ptr, @@ -1526,16 +1480,21 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans, struct bch_replicas_padded r; int ret = 0; - ret = trans_get_key(trans, BTREE_ID_stripes, POS(0, p.ec.idx), &iter, &k); - if (ret < 0) - return ret; + iter = bch2_trans_get_iter(trans, BTREE_ID_stripes, POS(0, p.ec.idx), + BTREE_ITER_INTENT| + BTREE_ITER_WITH_UPDATES); + k = bch2_btree_iter_peek_slot(iter); + ret = bkey_err(k); + if (ret) + goto err; if (k.k->type != KEY_TYPE_stripe) { bch2_fs_inconsistent(c, "pointer to nonexistent stripe %llu", (u64) p.ec.idx); + bch2_inconsistent_error(c); ret = -EIO; - goto out; + goto err; } if (!bch2_ptr_matches_stripe(bkey_s_c_to_stripe(k).v, p)) { @@ -1543,13 +1502,13 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans, "stripe pointer doesn't match stripe %llu", (u64) p.ec.idx); ret = -EIO; - goto out; + goto err; } s = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); ret = PTR_ERR_OR_ZERO(s); if (ret) - goto out; + goto err; bkey_reassemble(&s->k_i, k); stripe_blockcount_set(&s->v, p.ec.block, @@ -1560,37 +1519,44 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans, bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i)); r.e.data_type = data_type; update_replicas_list(trans, &r.e, sectors); -out: +err: bch2_trans_iter_put(trans, iter); return ret; } static int bch2_trans_mark_extent(struct btree_trans *trans, - struct bkey_s_c k, unsigned offset, - s64 sectors, unsigned flags, - enum bch_data_type data_type) + struct bkey_s_c k, unsigned flags) { + struct bch_fs *c = trans->c; struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); const union bch_extent_entry *entry; struct extent_ptr_decoded p; struct bch_replicas_padded r; + enum bch_data_type data_type = bkey_is_btree_ptr(k.k) + ? BCH_DATA_btree + : BCH_DATA_user; + s64 sectors = bkey_is_btree_ptr(k.k) + ? c->opts.btree_node_size + : k.k->size; s64 dirty_sectors = 0; bool stale; int ret; + BUG_ON((flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)) == + (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)); + + if (flags & BTREE_TRIGGER_OVERWRITE) + sectors = -sectors; + r.e.data_type = data_type; r.e.nr_devs = 0; r.e.nr_required = 1; - BUG_ON(!sectors); - bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { - s64 disk_sectors = data_type == BCH_DATA_btree - ? sectors - : ptr_disk_sectors_delta(p, offset, sectors, flags); + s64 disk_sectors = ptr_disk_sectors(sectors, p); - ret = bch2_trans_mark_pointer(trans, k, p, disk_sectors, - data_type); + ret = bch2_trans_mark_pointer(trans, k, p, + disk_sectors, data_type); if (ret < 0) return ret; @@ -1674,8 +1640,8 @@ static int bch2_trans_mark_stripe(struct btree_trans *trans, struct bkey_s_c old, struct bkey_s_c new, unsigned flags) { - struct bkey_s_c_stripe old_s = { NULL }; - struct bkey_s_c_stripe new_s = { NULL }; + struct bkey_s_c_stripe old_s = { .k = NULL }; + struct bkey_s_c_stripe new_s = { .k = NULL }; struct bch_replicas_padded r; unsigned i; int ret = 0; @@ -1726,41 +1692,65 @@ static int bch2_trans_mark_stripe(struct btree_trans *trans, return ret; } -static __le64 *bkey_refcount(struct bkey_i *k) +static int bch2_trans_mark_inode(struct btree_trans *trans, + struct bkey_s_c old, + struct bkey_s_c new, + unsigned flags) { - switch (k->k.type) { - case KEY_TYPE_reflink_v: - return &bkey_i_to_reflink_v(k)->v.refcount; - case KEY_TYPE_indirect_inline_data: - return &bkey_i_to_indirect_inline_data(k)->v.refcount; - default: - return NULL; + int nr = (new.k->type == KEY_TYPE_inode) - + (old.k->type == KEY_TYPE_inode); + + if (nr) { + struct replicas_delta_list *d = + replicas_deltas_realloc(trans, 0); + d->nr_inodes += nr; } + + return 0; +} + +static int bch2_trans_mark_reservation(struct btree_trans *trans, + struct bkey_s_c k, unsigned flags) +{ + unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas; + s64 sectors = (s64) k.k->size; + struct replicas_delta_list *d; + + BUG_ON((flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)) == + (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)); + + if (flags & BTREE_TRIGGER_OVERWRITE) + sectors = -sectors; + sectors *= replicas; + + d = replicas_deltas_realloc(trans, 0); + + replicas = clamp_t(unsigned, replicas, 1, + ARRAY_SIZE(d->persistent_reserved)); + + d->persistent_reserved[replicas - 1] += sectors; + return 0; } static int __bch2_trans_mark_reflink_p(struct btree_trans *trans, struct bkey_s_c_reflink_p p, - u64 idx, unsigned sectors, - unsigned flags) + u64 idx, unsigned flags) { struct bch_fs *c = trans->c; struct btree_iter *iter; struct bkey_s_c k; struct bkey_i *n; __le64 *refcount; + int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1; s64 ret; - ret = trans_get_key(trans, BTREE_ID_reflink, - POS(0, idx), &iter, &k); - if (ret < 0) - return ret; - - if ((flags & BTREE_TRIGGER_OVERWRITE) && - (bkey_start_offset(k.k) < idx || - k.k->p.offset > idx + sectors)) - goto out; - - sectors = k.k->p.offset - idx; + iter = bch2_trans_get_iter(trans, BTREE_ID_reflink, POS(0, idx), + BTREE_ITER_INTENT| + BTREE_ITER_WITH_UPDATES); + k = bch2_btree_iter_peek_slot(iter); + ret = bkey_err(k); + if (ret) + goto err; n = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); ret = PTR_ERR_OR_ZERO(n); @@ -1774,102 +1764,72 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans, bch2_fs_inconsistent(c, "%llu:%llu len %u points to nonexistent indirect extent %llu", p.k->p.inode, p.k->p.offset, p.k->size, idx); + bch2_inconsistent_error(c); ret = -EIO; goto err; } - le64_add_cpu(refcount, !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1); + BUG_ON(!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)); + le64_add_cpu(refcount, add); if (!*refcount) { n->k.type = KEY_TYPE_deleted; set_bkey_val_u64s(&n->k, 0); } - bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k)); - bch2_trans_update(trans, iter, n, 0); -out: - ret = sectors; + bch2_btree_iter_set_pos_to_extent_start(iter); + ret = bch2_trans_update(trans, iter, n, 0); + if (ret) + goto err; + + ret = k.k->p.offset - idx; err: bch2_trans_iter_put(trans, iter); return ret; } static int bch2_trans_mark_reflink_p(struct btree_trans *trans, - struct bkey_s_c_reflink_p p, unsigned offset, - s64 sectors, unsigned flags) + struct bkey_s_c k, unsigned flags) { - u64 idx = le64_to_cpu(p.v->idx) + offset; + struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k); + u64 idx = le64_to_cpu(p.v->idx); + unsigned sectors = p.k->size; s64 ret = 0; - sectors = abs(sectors); - BUG_ON(offset + sectors > p.k->size); - while (sectors) { - ret = __bch2_trans_mark_reflink_p(trans, p, idx, sectors, flags); + ret = __bch2_trans_mark_reflink_p(trans, p, idx, flags); if (ret < 0) - break; + return ret; - idx += ret; - sectors = max_t(s64, 0LL, sectors - ret); - ret = 0; + ret = min_t(s64, ret, sectors); + idx += ret; + sectors -= ret; } - return ret; + return 0; } -int bch2_trans_mark_key(struct btree_trans *trans, - struct bkey_s_c old, - struct bkey_s_c new, - unsigned offset, s64 sectors, unsigned flags) +int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c old, + struct bkey_s_c new, unsigned flags) { - struct bch_fs *c = trans->c; struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old; - struct replicas_delta_list *d; BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE))); switch (k.k->type) { case KEY_TYPE_btree_ptr: case KEY_TYPE_btree_ptr_v2: - sectors = !(flags & BTREE_TRIGGER_OVERWRITE) - ? c->opts.btree_node_size - : -c->opts.btree_node_size; - - return bch2_trans_mark_extent(trans, k, offset, sectors, - flags, BCH_DATA_btree); case KEY_TYPE_extent: case KEY_TYPE_reflink_v: - return bch2_trans_mark_extent(trans, k, offset, sectors, - flags, BCH_DATA_user); + return bch2_trans_mark_extent(trans, k, flags); case KEY_TYPE_stripe: return bch2_trans_mark_stripe(trans, old, new, flags); - case KEY_TYPE_inode: { - int nr = (new.k->type == KEY_TYPE_inode) - - (old.k->type == KEY_TYPE_inode); - - if (nr) { - d = replicas_deltas_realloc(trans, 0); - d->nr_inodes += nr; - } - - return 0; - } - case KEY_TYPE_reservation: { - unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas; - - d = replicas_deltas_realloc(trans, 0); - - sectors *= replicas; - replicas = clamp_t(unsigned, replicas, 1, - ARRAY_SIZE(d->persistent_reserved)); - - d->persistent_reserved[replicas - 1] += sectors; - return 0; - } + case KEY_TYPE_inode: + return bch2_trans_mark_inode(trans, old, new, flags); + case KEY_TYPE_reservation: + return bch2_trans_mark_reservation(trans, k, flags); case KEY_TYPE_reflink_p: - return bch2_trans_mark_reflink_p(trans, - bkey_s_c_to_reflink_p(k), - offset, sectors, flags); + return bch2_trans_mark_reflink_p(trans, k, flags); default: return 0; } @@ -1880,8 +1840,10 @@ int bch2_trans_mark_update(struct btree_trans *trans, struct bkey_i *new, unsigned flags) { - struct bkey_s_c old; - int ret; + struct bkey _deleted = KEY(0, 0, 0); + struct bkey_s_c deleted = (struct bkey_s_c) { &_deleted, NULL }; + struct bkey_s_c old; + int iter_flags, ret; if (unlikely(flags & BTREE_TRIGGER_NORUN)) return 0; @@ -1889,85 +1851,34 @@ int bch2_trans_mark_update(struct btree_trans *trans, if (!btree_node_type_needs_gc(iter->btree_id)) return 0; - if (!btree_node_type_is_extents(iter->btree_id)) { - /* iterators should be uptodate, shouldn't get errors here: */ - if (btree_iter_type(iter) != BTREE_ITER_CACHED) { - old = bch2_btree_iter_peek_slot(iter); - BUG_ON(bkey_err(old)); - } else { - struct bkey_cached *ck = (void *) iter->l[0].b; - - BUG_ON(!ck->valid); - old = bkey_i_to_s_c(ck->k); - } - - if (old.k->type == new->k.type) { - ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0, - BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags); - } else { - ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0, - BTREE_TRIGGER_INSERT|flags) ?: - bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0, - BTREE_TRIGGER_OVERWRITE|flags); - } - } else { - struct btree_iter *copy; - struct bkey _old; - EBUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED); + if (likely(!(iter->flags & BTREE_ITER_CACHED_NOFILL))) { + iter_flags = iter->flags & BTREE_ITER_WITH_UPDATES; + iter->flags &= ~BTREE_ITER_WITH_UPDATES; - bkey_init(&_old); - old = (struct bkey_s_c) { &_old, NULL }; + old = bch2_btree_iter_peek_slot(iter); + iter->flags |= iter_flags; - ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), - 0, new->k.size, - BTREE_TRIGGER_INSERT); + ret = bkey_err(old); if (ret) return ret; + } else { + /* + * If BTREE_ITER_CACHED_NOFILL was used, we better not be + * running triggers that do anything on removal (alloc btree): + */ + old = deleted; + } - copy = bch2_trans_copy_iter(trans, iter); - - for_each_btree_key_continue(copy, 0, old, ret) { - unsigned offset = 0; - s64 sectors = -((s64) old.k->size); - - flags |= BTREE_TRIGGER_OVERWRITE; - - if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0) - break; - - switch (bch2_extent_overlap(&new->k, old.k)) { - case BCH_EXTENT_OVERLAP_ALL: - offset = 0; - sectors = -((s64) old.k->size); - break; - case BCH_EXTENT_OVERLAP_BACK: - offset = bkey_start_offset(&new->k) - - bkey_start_offset(old.k); - sectors = bkey_start_offset(&new->k) - - old.k->p.offset; - break; - case BCH_EXTENT_OVERLAP_FRONT: - offset = 0; - sectors = bkey_start_offset(old.k) - - new->k.p.offset; - break; - case BCH_EXTENT_OVERLAP_MIDDLE: - offset = bkey_start_offset(&new->k) - - bkey_start_offset(old.k); - sectors = -((s64) new->k.size); - flags |= BTREE_TRIGGER_OVERWRITE_SPLIT; - break; - } - - BUG_ON(sectors >= 0); - - ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), - offset, sectors, flags); - if (ret) - break; - } - bch2_trans_iter_put(trans, copy); + if (old.k->type == new->k.type && + ((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) { + ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), + BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags); + } else { + ret = bch2_trans_mark_key(trans, deleted, bkey_i_to_s_c(new), + BTREE_TRIGGER_INSERT|flags) ?: + bch2_trans_mark_key(trans, old, deleted, + BTREE_TRIGGER_OVERWRITE|flags); } return ret; @@ -1988,6 +1899,12 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans, }; int ret = 0; + /* + * Backup superblock might be past the end of our normal usable space: + */ + if (b >= ca->mi.nbuckets) + return 0; + a = bch2_trans_start_alloc_update(trans, &iter, &ptr, &u); if (IS_ERR(a)) return PTR_ERR(a); @@ -2004,22 +1921,6 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans, goto out; } - if ((unsigned) (u.dirty_sectors + sectors) > ca->mi.bucket_size) { - bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, - "bucket %llu:%llu gen %u data type %s sector count overflow: %u + %u > %u\n" - "while marking %s", - iter->pos.inode, iter->pos.offset, u.gen, - bch2_data_types[u.data_type ?: type], - u.dirty_sectors, sectors, ca->mi.bucket_size, - bch2_data_types[type]); - ret = -EIO; - goto out; - } - - if (u.data_type == type && - u.dirty_sectors == sectors) - goto out; - u.data_type = type; u.dirty_sectors = sectors; @@ -2031,53 +1932,44 @@ out: } int bch2_trans_mark_metadata_bucket(struct btree_trans *trans, - struct disk_reservation *res, struct bch_dev *ca, size_t b, enum bch_data_type type, unsigned sectors) { - return __bch2_trans_do(trans, res, NULL, 0, - __bch2_trans_mark_metadata_bucket(trans, ca, b, BCH_DATA_journal, - ca->mi.bucket_size)); - + return __bch2_trans_do(trans, NULL, NULL, 0, + __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors)); } static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans, - struct disk_reservation *res, struct bch_dev *ca, u64 start, u64 end, enum bch_data_type type, u64 *bucket, unsigned *bucket_sectors) { - int ret; - do { u64 b = sector_to_bucket(ca, start); unsigned sectors = min_t(u64, bucket_to_sector(ca, b + 1), end) - start; - if (b != *bucket) { - if (*bucket_sectors) { - ret = bch2_trans_mark_metadata_bucket(trans, res, ca, - *bucket, type, *bucket_sectors); - if (ret) - return ret; - } + if (b != *bucket && *bucket_sectors) { + int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket, + type, *bucket_sectors); + if (ret) + return ret; - *bucket = b; - *bucket_sectors = 0; + *bucket_sectors = 0; } + *bucket = b; *bucket_sectors += sectors; start += sectors; - } while (!ret && start < end); + } while (start < end); return 0; } static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, - struct disk_reservation *res, - struct bch_dev *ca) + struct bch_dev *ca) { struct bch_sb_layout *layout = &ca->disk_sb.sb->layout; u64 bucket = 0; @@ -2088,14 +1980,14 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, u64 offset = le64_to_cpu(layout->sb_offset[i]); if (offset == BCH_SB_SECTOR) { - ret = bch2_trans_mark_metadata_sectors(trans, res, ca, + ret = bch2_trans_mark_metadata_sectors(trans, ca, 0, BCH_SB_SECTOR, BCH_DATA_sb, &bucket, &bucket_sectors); if (ret) return ret; } - ret = bch2_trans_mark_metadata_sectors(trans, res, ca, offset, + ret = bch2_trans_mark_metadata_sectors(trans, ca, offset, offset + (1 << layout->sb_max_size_bits), BCH_DATA_sb, &bucket, &bucket_sectors); if (ret) @@ -2103,14 +1995,14 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, } if (bucket_sectors) { - ret = bch2_trans_mark_metadata_bucket(trans, res, ca, + ret = bch2_trans_mark_metadata_bucket(trans, ca, bucket, BCH_DATA_sb, bucket_sectors); if (ret) return ret; } for (i = 0; i < ca->journal.nr; i++) { - ret = bch2_trans_mark_metadata_bucket(trans, res, ca, + ret = bch2_trans_mark_metadata_bucket(trans, ca, ca->journal.buckets[i], BCH_DATA_journal, ca->mi.bucket_size); if (ret) @@ -2120,12 +2012,10 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, return 0; } -int bch2_trans_mark_dev_sb(struct bch_fs *c, - struct disk_reservation *res, - struct bch_dev *ca) +int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca) { - return bch2_trans_do(c, res, NULL, 0, - __bch2_trans_mark_dev_sb(&trans, res, ca)); + return bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW, + __bch2_trans_mark_dev_sb(&trans, ca)); } /* Disk reservations: */ diff --git a/libbcachefs/buckets.h b/libbcachefs/buckets.h index 54dcc82..0f544b6 100644 --- a/libbcachefs/buckets.h +++ b/libbcachefs/buckets.h @@ -125,20 +125,6 @@ static inline u8 ptr_stale(struct bch_dev *ca, return gen_after(ptr_bucket_mark(ca, ptr).gen, ptr->gen); } -static inline s64 __ptr_disk_sectors(struct extent_ptr_decoded p, - unsigned live_size) -{ - return live_size && p.crc.compression_type - ? max(1U, DIV_ROUND_UP(live_size * p.crc.compressed_size, - p.crc.uncompressed_size)) - : live_size; -} - -static inline s64 ptr_disk_sectors(struct extent_ptr_decoded p) -{ - return __ptr_disk_sectors(p, p.crc.live_size); -} - /* bucket gc marks */ static inline unsigned bucket_sectors_used(struct bucket_mark mark) @@ -175,25 +161,31 @@ static inline u64 __dev_buckets_available(struct bch_dev *ca, return total - stats.buckets_unavailable; } -/* - * Number of reclaimable buckets - only for use by the allocator thread: - */ static inline u64 dev_buckets_available(struct bch_dev *ca) { return __dev_buckets_available(ca, bch2_dev_usage_read(ca)); } -static inline u64 __dev_buckets_free(struct bch_dev *ca, - struct bch_dev_usage stats) +static inline u64 __dev_buckets_reclaimable(struct bch_dev *ca, + struct bch_dev_usage stats) { - return __dev_buckets_available(ca, stats) + - fifo_used(&ca->free[RESERVE_NONE]) + - fifo_used(&ca->free_inc); + struct bch_fs *c = ca->fs; + s64 available = __dev_buckets_available(ca, stats); + unsigned i; + + spin_lock(&c->freelist_lock); + for (i = 0; i < RESERVE_NR; i++) + available -= fifo_used(&ca->free[i]); + available -= fifo_used(&ca->free_inc); + available -= ca->nr_open_buckets; + spin_unlock(&c->freelist_lock); + + return max(available, 0LL); } -static inline u64 dev_buckets_free(struct bch_dev *ca) +static inline u64 dev_buckets_reclaimable(struct bch_dev *ca) { - return __dev_buckets_free(ca, bch2_dev_usage_read(ca)); + return __dev_buckets_reclaimable(ca, bch2_dev_usage_read(ca)); } /* Filesystem usage: */ @@ -229,29 +221,25 @@ bch2_fs_usage_read_short(struct bch_fs *); void bch2_bucket_seq_cleanup(struct bch_fs *); void bch2_fs_usage_initialize(struct bch_fs *); -void bch2_mark_alloc_bucket(struct bch_fs *, struct bch_dev *, - size_t, bool, struct gc_pos, unsigned); +void bch2_mark_alloc_bucket(struct bch_fs *, struct bch_dev *, size_t, bool); void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *, size_t, enum bch_data_type, unsigned, struct gc_pos, unsigned); -int bch2_mark_key(struct bch_fs *, struct bkey_s_c, unsigned, - s64, struct bch_fs_usage *, u64, unsigned); +int bch2_mark_key(struct bch_fs *, struct bkey_s_c, unsigned); int bch2_mark_update(struct btree_trans *, struct btree_iter *, - struct bkey_i *, struct bch_fs_usage *, unsigned); + struct bkey_i *, unsigned); -int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, - unsigned, s64, unsigned); +int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c, + struct bkey_s_c, unsigned); int bch2_trans_mark_update(struct btree_trans *, struct btree_iter *iter, struct bkey_i *insert, unsigned); void bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *); -int bch2_trans_mark_metadata_bucket(struct btree_trans *, - struct disk_reservation *, struct bch_dev *, - size_t, enum bch_data_type, unsigned); -int bch2_trans_mark_dev_sb(struct bch_fs *, struct disk_reservation *, - struct bch_dev *); +int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *, + size_t, enum bch_data_type, unsigned); +int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *); /* disk reservations: */ @@ -291,6 +279,13 @@ static inline int bch2_disk_reservation_get(struct bch_fs *c, return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags); } +#define RESERVE_FACTOR 6 + +static inline u64 avail_factor(u64 r) +{ + return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1); +} + int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64); void bch2_dev_buckets_free(struct bch_dev *); int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *); diff --git a/libbcachefs/buckets_types.h b/libbcachefs/buckets_types.h index 588b1a7..b2de299 100644 --- a/libbcachefs/buckets_types.h +++ b/libbcachefs/buckets_types.h @@ -59,6 +59,11 @@ struct bch_dev_usage { struct { u64 buckets; u64 sectors; /* _compressed_ sectors: */ + /* + * XXX + * Why do we have this? Isn't it just buckets * bucket_size - + * sectors? + */ u64 fragmented; } d[BCH_DATA_NR]; }; diff --git a/libbcachefs/chardev.c b/libbcachefs/chardev.c index c616014..db68a78 100644 --- a/libbcachefs/chardev.c +++ b/libbcachefs/chardev.c @@ -157,6 +157,9 @@ static long bch2_ioctl_query_uuid(struct bch_fs *c, #if 0 static long bch2_ioctl_start(struct bch_fs *c, struct bch_ioctl_start arg) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (arg.flags || arg.pad) return -EINVAL; @@ -165,6 +168,9 @@ static long bch2_ioctl_start(struct bch_fs *c, struct bch_ioctl_start arg) static long bch2_ioctl_stop(struct bch_fs *c) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + bch2_fs_stop(c); return 0; } @@ -175,6 +181,9 @@ static long bch2_ioctl_disk_add(struct bch_fs *c, struct bch_ioctl_disk arg) char *path; int ret; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (arg.flags || arg.pad) return -EINVAL; @@ -192,6 +201,9 @@ static long bch2_ioctl_disk_remove(struct bch_fs *c, struct bch_ioctl_disk arg) { struct bch_dev *ca; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST| BCH_FORCE_IF_METADATA_LOST| BCH_FORCE_IF_DEGRADED| @@ -211,6 +223,9 @@ static long bch2_ioctl_disk_online(struct bch_fs *c, struct bch_ioctl_disk arg) char *path; int ret; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (arg.flags || arg.pad) return -EINVAL; @@ -228,6 +243,9 @@ static long bch2_ioctl_disk_offline(struct bch_fs *c, struct bch_ioctl_disk arg) struct bch_dev *ca; int ret; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST| BCH_FORCE_IF_METADATA_LOST| BCH_FORCE_IF_DEGRADED| @@ -250,11 +268,15 @@ static long bch2_ioctl_disk_set_state(struct bch_fs *c, struct bch_dev *ca; int ret; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST| BCH_FORCE_IF_METADATA_LOST| BCH_FORCE_IF_DEGRADED| BCH_BY_INDEX)) || - arg.pad[0] || arg.pad[1] || arg.pad[2]) + arg.pad[0] || arg.pad[1] || arg.pad[2] || + arg.new_state >= BCH_MEMBER_STATE_NR) return -EINVAL; ca = bch2_device_lookup(c, arg.dev, arg.flags); @@ -331,6 +353,9 @@ static long bch2_ioctl_data(struct bch_fs *c, unsigned flags = O_RDONLY|O_CLOEXEC|O_NONBLOCK; int ret, fd = -1; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (arg.op >= BCH_DATA_OP_NR || arg.flags) return -EINVAL; @@ -414,7 +439,8 @@ static long bch2_ioctl_fs_usage(struct bch_fs *c, struct bch_replicas_entry *src_e = cpu_replicas_entry(&c->replicas, i); - if (replicas_usage_next(dst_e) > dst_end) { + /* check that we have enough space for one replicas entry */ + if (dst_e + 1 > dst_end) { ret = -ERANGE; break; } @@ -496,6 +522,9 @@ static long bch2_ioctl_read_super(struct bch_fs *c, struct bch_sb *sb; int ret = 0; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if ((arg.flags & ~(BCH_BY_INDEX|BCH_READ_DEV)) || arg.pad) return -EINVAL; @@ -523,7 +552,7 @@ static long bch2_ioctl_read_super(struct bch_fs *c, ret = copy_to_user((void __user *)(unsigned long)arg.sb, sb, vstruct_bytes(sb)); err: - if (ca) + if (!IS_ERR_OR_NULL(ca)) percpu_ref_put(&ca->ref); mutex_unlock(&c->sb_lock); return ret; @@ -536,6 +565,9 @@ static long bch2_ioctl_disk_get_idx(struct bch_fs *c, struct bch_dev *ca; unsigned i; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + for_each_online_member(ca, c, i) if (ca->disk_sb.bdev->bd_dev == dev) { percpu_ref_put(&ca->io_ref); @@ -551,6 +583,9 @@ static long bch2_ioctl_disk_resize(struct bch_fs *c, struct bch_dev *ca; int ret; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if ((arg.flags & ~BCH_BY_INDEX) || arg.pad) return -EINVAL; @@ -571,6 +606,9 @@ static long bch2_ioctl_disk_resize_journal(struct bch_fs *c, struct bch_dev *ca; int ret; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if ((arg.flags & ~BCH_BY_INDEX) || arg.pad) return -EINVAL; @@ -596,7 +634,6 @@ do { \ long bch2_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg) { - /* ioctls that don't require admin cap: */ switch (cmd) { case BCH_IOCTL_QUERY_UUID: return bch2_ioctl_query_uuid(c, arg); @@ -604,12 +641,6 @@ long bch2_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg) return bch2_ioctl_fs_usage(c, arg); case BCH_IOCTL_DEV_USAGE: return bch2_ioctl_dev_usage(c, arg); - } - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - switch (cmd) { #if 0 case BCH_IOCTL_START: BCH_IOCTL(start, struct bch_ioctl_start); @@ -625,7 +656,6 @@ long bch2_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg) if (!test_bit(BCH_FS_STARTED, &c->flags)) return -EINVAL; - /* ioctls that do require admin cap: */ switch (cmd) { case BCH_IOCTL_DISK_ADD: BCH_IOCTL(disk_add, struct bch_ioctl_disk); diff --git a/libbcachefs/checksum.c b/libbcachefs/checksum.c index 3d88719..d20924e 100644 --- a/libbcachefs/checksum.c +++ b/libbcachefs/checksum.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -16,53 +17,77 @@ #include #include -static u64 bch2_checksum_init(unsigned type) +/* + * bch2_checksum state is an abstraction of the checksum state calculated over different pages. + * it features page merging without having the checksum algorithm lose its state. + * for native checksum aglorithms (like crc), a default seed value will do. + * for hash-like algorithms, a state needs to be stored + */ + +struct bch2_checksum_state { + union { + u64 seed; + struct xxh64_state h64state; + }; + unsigned int type; +}; + +static void bch2_checksum_init(struct bch2_checksum_state *state) { - switch (type) { + switch (state->type) { case BCH_CSUM_NONE: - return 0; - case BCH_CSUM_CRC32C_NONZERO: - return U32_MAX; - case BCH_CSUM_CRC64_NONZERO: - return U64_MAX; case BCH_CSUM_CRC32C: - return 0; case BCH_CSUM_CRC64: - return 0; + state->seed = 0; + break; + case BCH_CSUM_CRC32C_NONZERO: + state->seed = U32_MAX; + break; + case BCH_CSUM_CRC64_NONZERO: + state->seed = U64_MAX; + break; + case BCH_CSUM_XXHASH: + xxh64_reset(&state->h64state, 0); + break; default: BUG(); } } -static u64 bch2_checksum_final(unsigned type, u64 crc) +static u64 bch2_checksum_final(const struct bch2_checksum_state *state) { - switch (type) { + switch (state->type) { case BCH_CSUM_NONE: - return 0; - case BCH_CSUM_CRC32C_NONZERO: - return crc ^ U32_MAX; - case BCH_CSUM_CRC64_NONZERO: - return crc ^ U64_MAX; case BCH_CSUM_CRC32C: - return crc; case BCH_CSUM_CRC64: - return crc; + return state->seed; + case BCH_CSUM_CRC32C_NONZERO: + return state->seed ^ U32_MAX; + case BCH_CSUM_CRC64_NONZERO: + return state->seed ^ U64_MAX; + case BCH_CSUM_XXHASH: + return xxh64_digest(&state->h64state); default: BUG(); } } -static u64 bch2_checksum_update(unsigned type, u64 crc, const void *data, size_t len) +static void bch2_checksum_update(struct bch2_checksum_state *state, const void *data, size_t len) { - switch (type) { + switch (state->type) { case BCH_CSUM_NONE: - return 0; + return; case BCH_CSUM_CRC32C_NONZERO: case BCH_CSUM_CRC32C: - return crc32c(crc, data, len); + state->seed = crc32c(state->seed, data, len); + break; case BCH_CSUM_CRC64_NONZERO: case BCH_CSUM_CRC64: - return crc64_be(crc, data, len); + state->seed = crc64_be(state->seed, data, len); + break; + case BCH_CSUM_XXHASH: + xxh64_update(&state->h64state, data, len); + break; default: BUG(); } @@ -140,13 +165,16 @@ struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type, case BCH_CSUM_CRC32C_NONZERO: case BCH_CSUM_CRC64_NONZERO: case BCH_CSUM_CRC32C: + case BCH_CSUM_XXHASH: case BCH_CSUM_CRC64: { - u64 crc = bch2_checksum_init(type); + struct bch2_checksum_state state; - crc = bch2_checksum_update(type, crc, data, len); - crc = bch2_checksum_final(type, crc); + state.type = type; - return (struct bch_csum) { .lo = cpu_to_le64(crc) }; + bch2_checksum_init(&state); + bch2_checksum_update(&state, data, len); + + return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) }; } case BCH_CSUM_CHACHA20_POLY1305_80: @@ -189,24 +217,25 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type, case BCH_CSUM_CRC32C_NONZERO: case BCH_CSUM_CRC64_NONZERO: case BCH_CSUM_CRC32C: + case BCH_CSUM_XXHASH: case BCH_CSUM_CRC64: { - u64 crc = bch2_checksum_init(type); + struct bch2_checksum_state state; + + state.type = type; + bch2_checksum_init(&state); #ifdef CONFIG_HIGHMEM __bio_for_each_segment(bv, bio, *iter, *iter) { void *p = kmap_atomic(bv.bv_page) + bv.bv_offset; - crc = bch2_checksum_update(type, - crc, p, bv.bv_len); + bch2_checksum_update(&state, p, bv.bv_len); kunmap_atomic(p); } #else __bio_for_each_bvec(bv, bio, *iter, *iter) - crc = bch2_checksum_update(type, crc, - page_address(bv.bv_page) + bv.bv_offset, + bch2_checksum_update(&state, page_address(bv.bv_page) + bv.bv_offset, bv.bv_len); #endif - crc = bch2_checksum_final(type, crc); - return (struct bch_csum) { .lo = cpu_to_le64(crc) }; + return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) }; } case BCH_CSUM_CHACHA20_POLY1305_80: @@ -284,16 +313,22 @@ void bch2_encrypt_bio(struct bch_fs *c, unsigned type, struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a, struct bch_csum b, size_t b_len) { + struct bch2_checksum_state state; + + state.type = type; + bch2_checksum_init(&state); + state.seed = a.lo; + BUG_ON(!bch2_checksum_mergeable(type)); while (b_len) { unsigned b = min_t(unsigned, b_len, PAGE_SIZE); - a.lo = bch2_checksum_update(type, a.lo, + bch2_checksum_update(&state, page_address(ZERO_PAGE(0)), b); b_len -= b; } - + a.lo = bch2_checksum_final(&state); a.lo ^= b.lo; a.hi ^= b.hi; return a; diff --git a/libbcachefs/checksum.h b/libbcachefs/checksum.h index 728b7ef..6841fb1 100644 --- a/libbcachefs/checksum.h +++ b/libbcachefs/checksum.h @@ -83,6 +83,8 @@ static inline enum bch_csum_type bch2_csum_opt_to_type(enum bch_csum_opts type, return data ? BCH_CSUM_CRC32C : BCH_CSUM_CRC32C_NONZERO; case BCH_CSUM_OPT_crc64: return data ? BCH_CSUM_CRC64 : BCH_CSUM_CRC64_NONZERO; + case BCH_CSUM_OPT_xxhash: + return BCH_CSUM_XXHASH; default: BUG(); } diff --git a/libbcachefs/debug.c b/libbcachefs/debug.c index acf6003..b0a8eb5 100644 --- a/libbcachefs/debug.c +++ b/libbcachefs/debug.c @@ -29,40 +29,19 @@ static struct dentry *bch_debug; -#ifdef CONFIG_BCACHEFS_DEBUG - -void __bch2_btree_verify(struct bch_fs *c, struct btree *b) +static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b, + struct extent_ptr_decoded pick) { struct btree *v = c->verify_data; - struct btree_node *n_ondisk, *n_sorted, *n_inmemory; - struct bset *sorted, *inmemory; - struct extent_ptr_decoded pick; - struct bch_dev *ca; + struct btree_node *n_ondisk = c->verify_ondisk; + struct btree_node *n_sorted = c->verify_data->data; + struct bset *sorted, *inmemory = &b->data->keys; + struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev); struct bio *bio; + bool failed = false; - if (c->opts.nochanges) - return; - - btree_node_io_lock(b); - mutex_lock(&c->verify_lock); - - n_ondisk = c->verify_ondisk; - n_sorted = c->verify_data->data; - n_inmemory = b->data; - - bkey_copy(&v->key, &b->key); - v->written = 0; - v->c.level = b->c.level; - v->c.btree_id = b->c.btree_id; - bch2_btree_keys_init(v); - - if (bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), - NULL, &pick) <= 0) - return; - - ca = bch_dev_bkey_exists(c, pick.ptr.dev); if (!bch2_dev_get_ioref(ca, READ)) - return; + return false; bio = bio_alloc_bioset(GFP_NOIO, buf_pages(n_sorted, btree_bytes(c)), @@ -79,12 +58,12 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b) memcpy(n_ondisk, n_sorted, btree_bytes(c)); + v->written = 0; if (bch2_btree_node_read_done(c, ca, v, false)) - goto out; + return false; n_sorted = c->verify_data->data; sorted = &n_sorted->keys; - inmemory = &n_inmemory->keys; if (inmemory->u64s != sorted->u64s || memcmp(inmemory->start, @@ -102,8 +81,8 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b) printk(KERN_ERR "*** read back in:\n"); bch2_dump_bset(c, v, sorted, 0); - while (offset < b->written) { - if (!offset ) { + while (offset < v->written) { + if (!offset) { i = &n_ondisk->keys; sectors = vstruct_blocks(n_ondisk, c->block_bits) << c->block_bits; @@ -122,25 +101,84 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b) offset += sectors; } - printk(KERN_ERR "*** block %u/%u not written\n", - offset >> c->block_bits, btree_blocks(c)); - for (j = 0; j < le16_to_cpu(inmemory->u64s); j++) if (inmemory->_data[j] != sorted->_data[j]) break; - printk(KERN_ERR "b->written %u\n", b->written); - console_unlock(); - panic("verify failed at %u\n", j); + bch_err(c, "verify failed at key %u", j); + + failed = true; + } + + if (v->written != b->written) { + bch_err(c, "written wrong: expected %u, got %u", + b->written, v->written); + failed = true; + } + + return failed; +} + +void __bch2_btree_verify(struct bch_fs *c, struct btree *b) +{ + struct bkey_ptrs_c ptrs; + struct extent_ptr_decoded p; + const union bch_extent_entry *entry; + struct btree *v; + struct bset *inmemory = &b->data->keys; + struct bkey_packed *k; + bool failed = false; + + if (c->opts.nochanges) + return; + + bch2_btree_node_io_lock(b); + mutex_lock(&c->verify_lock); + + if (!c->verify_ondisk) { + c->verify_ondisk = kvpmalloc(btree_bytes(c), GFP_KERNEL); + if (!c->verify_ondisk) + goto out; + } + + if (!c->verify_data) { + c->verify_data = __bch2_btree_node_mem_alloc(c); + if (!c->verify_data) + goto out; + + list_del_init(&c->verify_data->list); + } + + BUG_ON(b->nsets != 1); + + for (k = inmemory->start; k != vstruct_last(inmemory); k = bkey_next(k)) + if (k->type == KEY_TYPE_btree_ptr_v2) { + struct bch_btree_ptr_v2 *v = (void *) bkeyp_val(&b->format, k); + v->mem_ptr = 0; + } + + v = c->verify_data; + bkey_copy(&v->key, &b->key); + v->c.level = b->c.level; + v->c.btree_id = b->c.btree_id; + bch2_btree_keys_init(v); + + ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(&b->key)); + bkey_for_each_ptr_decode(&b->key.k, ptrs, p, entry) + failed |= bch2_btree_verify_replica(c, b, p); + + if (failed) { + char buf[200]; + + bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(&b->key)); + bch2_fs_fatal_error(c, "btree node verify failed for : %s\n", buf); } out: mutex_unlock(&c->verify_lock); - btree_node_io_unlock(b); + bch2_btree_node_io_unlock(b); } -#endif - #ifdef CONFIG_DEBUG_FS /* XXX: bch_fs refcounting */ @@ -150,7 +188,7 @@ struct dump_iter { struct bch_fs *c; enum btree_id id; - char buf[PAGE_SIZE]; + char buf[1 << 12]; size_t bytes; /* what's currently in buf */ char __user *ubuf; /* destination user buffer */ @@ -230,7 +268,7 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf, while (k.k && !(err = bkey_err(k))) { bch2_bkey_val_to_text(&PBUF(i->buf), i->c, k); i->bytes = strlen(i->buf); - BUG_ON(i->bytes >= PAGE_SIZE); + BUG_ON(i->bytes >= sizeof(i->buf)); i->buf[i->bytes] = '\n'; i->bytes++; @@ -275,7 +313,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf, if (err) return err; - if (!i->size || !bpos_cmp(POS_MAX, i->from)) + if (!i->size || !bpos_cmp(SPOS_MAX, i->from)) return i->ret; bch2_trans_init(&trans, i->c, 0, 0); @@ -291,7 +329,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf, * can't easily correctly restart a btree node traversal across * all nodes, meh */ - i->from = bpos_cmp(POS_MAX, b->key.k.p) + i->from = bpos_cmp(SPOS_MAX, b->key.k.p) ? bpos_successor(b->key.k.p) : b->key.k.p; diff --git a/libbcachefs/debug.h b/libbcachefs/debug.h index 7ac1615..0b86736 100644 --- a/libbcachefs/debug.h +++ b/libbcachefs/debug.h @@ -8,11 +8,7 @@ struct bio; struct btree; struct bch_fs; -#ifdef CONFIG_BCACHEFS_DEBUG void __bch2_btree_verify(struct bch_fs *, struct btree *); -#else -static inline void __bch2_btree_verify(struct bch_fs *c, struct btree *b) {} -#endif static inline void bch2_btree_verify(struct bch_fs *c, struct btree *b) { diff --git a/libbcachefs/dirent.c b/libbcachefs/dirent.c index cf4ce2e..02b2968 100644 --- a/libbcachefs/dirent.c +++ b/libbcachefs/dirent.c @@ -84,16 +84,24 @@ const char *bch2_dirent_invalid(const struct bch_fs *c, struct bkey_s_c k) if (!len) return "empty name"; - /* - * older versions of bcachefs were buggy and creating dirent - * keys that were bigger than necessary: - */ - if (bkey_val_u64s(k.k) > dirent_val_u64s(len + 7)) + if (bkey_val_u64s(k.k) > dirent_val_u64s(len)) return "value too big"; if (len > BCH_NAME_MAX) return "dirent name too big"; + if (len == 1 && !memcmp(d.v->d_name, ".", 1)) + return "invalid name"; + + if (len == 2 && !memcmp(d.v->d_name, "..", 2)) + return "invalid name"; + + if (memchr(d.v->d_name, '/', len)) + return "invalid name"; + + if (le64_to_cpu(d.v->d_inum) == d.k->p.inode) + return "dirent points to own directory"; + return NULL; } @@ -104,7 +112,10 @@ void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c, bch_scnmemcpy(out, d.v->d_name, bch2_dirent_name_bytes(d)); - pr_buf(out, " -> %llu type %u", d.v->d_inum, d.v->d_type); + pr_buf(out, " -> %llu type %s", d.v->d_inum, + d.v->d_type < DT_MAX + ? bch2_d_types[d.v->d_type] + : "(bad d_type)"); } static struct bkey_i_dirent *dirent_create_key(struct btree_trans *trans, @@ -199,9 +210,14 @@ int bch2_dirent_rename(struct btree_trans *trans, goto out; old_dst = bch2_btree_iter_peek_slot(dst_iter); + ret = bkey_err(old_dst); + if (ret) + goto out; if (mode != BCH_RENAME) *dst_inum = le64_to_cpu(bkey_s_c_to_dirent(old_dst).v->d_inum); + if (mode != BCH_RENAME_EXCHANGE) + *src_offset = dst_iter->pos.offset; /* Lookup src: */ src_iter = bch2_hash_lookup(trans, bch2_dirent_hash_desc, @@ -212,6 +228,10 @@ int bch2_dirent_rename(struct btree_trans *trans, goto out; old_src = bch2_btree_iter_peek_slot(src_iter); + ret = bkey_err(old_src); + if (ret) + goto out; + *src_inum = le64_to_cpu(bkey_s_c_to_dirent(old_src).v->d_inum); /* Create new dst key: */ @@ -282,7 +302,8 @@ int bch2_dirent_rename(struct btree_trans *trans, bch2_trans_update(trans, src_iter, &new_src->k_i, 0); bch2_trans_update(trans, dst_iter, &new_dst->k_i, 0); out_set_offset: - *src_offset = new_src->k.p.offset; + if (mode == BCH_RENAME_EXCHANGE) + *src_offset = new_src->k.p.offset; *dst_offset = new_dst->k.p.offset; out: bch2_trans_iter_put(trans, src_iter); @@ -315,20 +336,25 @@ u64 bch2_dirent_lookup(struct bch_fs *c, u64 dir_inum, struct btree_iter *iter; struct bkey_s_c k; u64 inum = 0; + int ret = 0; bch2_trans_init(&trans, c, 0, 0); iter = __bch2_dirent_lookup_trans(&trans, dir_inum, hash_info, name, 0); - if (IS_ERR(iter)) { - BUG_ON(PTR_ERR(iter) == -EINTR); + ret = PTR_ERR_OR_ZERO(iter); + if (ret) goto out; - } k = bch2_btree_iter_peek_slot(iter); + ret = bkey_err(k); + if (ret) + goto out; + inum = le64_to_cpu(bkey_s_c_to_dirent(k).v->d_inum); bch2_trans_iter_put(&trans, iter); out: + BUG_ON(ret == -EINTR); bch2_trans_exit(&trans); return inum; } diff --git a/libbcachefs/ec.c b/libbcachefs/ec.c index f712f68..328e042 100644 --- a/libbcachefs/ec.c +++ b/libbcachefs/ec.c @@ -392,7 +392,7 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf, this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size); while (offset < bytes) { - unsigned nr_iovecs = min_t(size_t, BIO_MAX_PAGES, + unsigned nr_iovecs = min_t(size_t, BIO_MAX_VECS, DIV_ROUND_UP(bytes, PAGE_SIZE)); unsigned b = min_t(size_t, bytes - offset, nr_iovecs << PAGE_SHIFT); @@ -741,9 +741,8 @@ found_slot: stripe->k.p = iter->pos; - bch2_trans_update(&trans, iter, &stripe->k_i, 0); - - ret = bch2_trans_commit(&trans, res, NULL, + ret = bch2_trans_update(&trans, iter, &stripe->k_i, 0) ?: + bch2_trans_commit(&trans, res, NULL, BTREE_INSERT_NOFAIL); err: bch2_trans_iter_put(&trans, iter); @@ -791,7 +790,7 @@ static int ec_stripe_bkey_update(struct btree_trans *trans, stripe_blockcount_set(&new->v, i, stripe_blockcount_get(existing, i)); - bch2_trans_update(trans, iter, &new->k_i, 0); + ret = bch2_trans_update(trans, iter, &new->k_i, 0); err: bch2_trans_iter_put(trans, iter); return ret; @@ -864,9 +863,9 @@ static int ec_stripe_update_ptrs(struct bch_fs *c, extent_stripe_ptr_add(e, s, ec_ptr, block); bch2_btree_iter_set_pos(iter, bkey_start_pos(&sk.k->k)); - bch2_trans_update(&trans, iter, sk.k, 0); - - ret = bch2_trans_commit(&trans, NULL, NULL, + ret = bch2_btree_iter_traverse(iter) ?: + bch2_trans_update(&trans, iter, sk.k, 0) ?: + bch2_trans_commit(&trans, NULL, NULL, BTREE_INSERT_NOFAIL); if (ret == -EINTR) ret = 0; @@ -1588,8 +1587,7 @@ write: stripe_blockcount_set(&new_key->v, i, m->block_sectors[i]); - bch2_trans_update(trans, iter, &new_key->k_i, 0); - return 0; + return bch2_trans_update(trans, iter, &new_key->k_i, 0); } int bch2_stripes_write(struct bch_fs *c, unsigned flags) @@ -1621,6 +1619,7 @@ int bch2_stripes_write(struct bch_fs *c, unsigned flags) if (ret) break; } + bch2_trans_iter_put(&trans, iter); bch2_trans_exit(&trans); @@ -1629,26 +1628,23 @@ int bch2_stripes_write(struct bch_fs *c, unsigned flags) return ret; } -static int bch2_stripes_read_fn(struct bch_fs *c, enum btree_id id, - unsigned level, struct bkey_s_c k) +static int bch2_stripes_read_fn(struct bch_fs *c, struct bkey_s_c k) { int ret = 0; - if (k.k->type == KEY_TYPE_stripe) { + if (k.k->type == KEY_TYPE_stripe) ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL) ?: - bch2_mark_key(c, k, 0, 0, NULL, 0, + bch2_mark_key(c, k, + BTREE_TRIGGER_INSERT| BTREE_TRIGGER_NOATOMIC); - if (ret) - return ret; - } return ret; } -int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys) +int bch2_stripes_read(struct bch_fs *c) { - int ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_stripes, - NULL, bch2_stripes_read_fn); + int ret = bch2_btree_and_journal_walk(c, BTREE_ID_stripes, + bch2_stripes_read_fn); if (ret) bch_err(c, "error reading stripes: %i", ret); diff --git a/libbcachefs/ec.h b/libbcachefs/ec.h index 744e51e..e79626b 100644 --- a/libbcachefs/ec.h +++ b/libbcachefs/ec.h @@ -215,8 +215,7 @@ void bch2_ec_flush_new_stripes(struct bch_fs *); void bch2_stripes_heap_start(struct bch_fs *); -struct journal_keys; -int bch2_stripes_read(struct bch_fs *, struct journal_keys *); +int bch2_stripes_read(struct bch_fs *); int bch2_stripes_write(struct bch_fs *, unsigned); int bch2_ec_mem_alloc(struct bch_fs *, bool); diff --git a/libbcachefs/error.c b/libbcachefs/error.c index a8ee1db..2cea694 100644 --- a/libbcachefs/error.c +++ b/libbcachefs/error.c @@ -25,6 +25,13 @@ bool bch2_inconsistent_error(struct bch_fs *c) } } +void bch2_topology_error(struct bch_fs *c) +{ + set_bit(BCH_FS_TOPOLOGY_ERROR, &c->flags); + if (test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) + bch2_inconsistent_error(c); +} + void bch2_fatal_error(struct bch_fs *c) { if (bch2_fs_emergency_read_only(c)) @@ -74,9 +81,13 @@ enum fsck_err_ret bch2_fsck_err(struct bch_fs *c, unsigned flags, vprintk(fmt, args); va_end(args); - return bch2_inconsistent_error(c) - ? FSCK_ERR_EXIT - : FSCK_ERR_FIX; + if (c->opts.errors == BCH_ON_ERROR_continue) { + bch_err(c, "fixing"); + return FSCK_ERR_FIX; + } else { + bch2_inconsistent_error(c); + return FSCK_ERR_EXIT; + } } mutex_lock(&c->fsck_error_lock); @@ -100,6 +111,7 @@ found: list_move(&s->list, &c->fsck_errors); s->nr++; if (c->opts.ratelimit_errors && + !(flags & FSCK_NO_RATELIMIT) && s->nr >= FSCK_ERR_RATELIMIT_NR) { if (s->nr == FSCK_ERR_RATELIMIT_NR) suppressing = true; @@ -146,6 +158,7 @@ print: set_bit(BCH_FS_ERRORS_FIXED, &c->flags); return FSCK_ERR_FIX; } else { + set_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags); set_bit(BCH_FS_ERROR, &c->flags); return c->opts.fix_errors == FSCK_OPT_EXIT || !(flags & FSCK_CAN_IGNORE) diff --git a/libbcachefs/error.h b/libbcachefs/error.h index 0e49fd7..9869382 100644 --- a/libbcachefs/error.h +++ b/libbcachefs/error.h @@ -29,6 +29,8 @@ struct work_struct; bool bch2_inconsistent_error(struct bch_fs *); +void bch2_topology_error(struct bch_fs *); + #define bch2_fs_inconsistent(c, ...) \ ({ \ bch_err(c, __VA_ARGS__); \ @@ -88,6 +90,7 @@ enum fsck_err_ret { FSCK_ERR_IGNORE = 0, FSCK_ERR_FIX = 1, FSCK_ERR_EXIT = 2, + FSCK_ERR_START_TOPOLOGY_REPAIR = 3, }; struct fsck_err_state { @@ -101,6 +104,7 @@ struct fsck_err_state { #define FSCK_CAN_FIX (1 << 0) #define FSCK_CAN_IGNORE (1 << 1) #define FSCK_NEED_FSCK (1 << 2) +#define FSCK_NO_RATELIMIT (1 << 3) __printf(3, 4) __cold enum fsck_err_ret bch2_fsck_err(struct bch_fs *, diff --git a/libbcachefs/extent_update.c b/libbcachefs/extent_update.c index bb4b2b4..4a8dd08 100644 --- a/libbcachefs/extent_update.c +++ b/libbcachefs/extent_update.c @@ -104,6 +104,10 @@ int bch2_extent_atomic_end(struct btree_iter *iter, unsigned nr_iters = 0; int ret; + ret = bch2_btree_iter_traverse(iter); + if (ret) + return ret; + *end = insert->k.p; /* extent_update_to_keys(): */ @@ -173,38 +177,3 @@ int bch2_extent_is_atomic(struct bkey_i *k, struct btree_iter *iter) return !bkey_cmp(end, k->k.p); } - -enum btree_insert_ret -bch2_extent_can_insert(struct btree_trans *trans, - struct btree_iter *iter, - struct bkey_i *insert) -{ - struct bkey_s_c k; - int ret, sectors; - - k = bch2_btree_iter_peek_slot(iter); - ret = bkey_err(k); - if (ret) - return ret; - - /* Check if we're splitting a compressed extent: */ - - if (bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k)) > 0 && - bkey_cmp(insert->k.p, k.k->p) < 0 && - (sectors = bch2_bkey_sectors_compressed(k))) { - int flags = trans->flags & BTREE_INSERT_NOFAIL - ? BCH_DISK_RESERVATION_NOFAIL : 0; - - switch (bch2_disk_reservation_add(trans->c, trans->disk_res, - sectors, flags)) { - case 0: - break; - case -ENOSPC: - return BTREE_INSERT_ENOSPC; - default: - BUG(); - } - } - - return BTREE_INSERT_OK; -} diff --git a/libbcachefs/extent_update.h b/libbcachefs/extent_update.h index 38dc084..2fa4602 100644 --- a/libbcachefs/extent_update.h +++ b/libbcachefs/extent_update.h @@ -9,8 +9,4 @@ int bch2_extent_atomic_end(struct btree_iter *, struct bkey_i *, int bch2_extent_trim_atomic(struct bkey_i *, struct btree_iter *); int bch2_extent_is_atomic(struct bkey_i *, struct btree_iter *); -enum btree_insert_ret -bch2_extent_can_insert(struct btree_trans *, struct btree_iter *, - struct bkey_i *); - #endif /* _BCACHEFS_EXTENT_UPDATE_H */ diff --git a/libbcachefs/extents.c b/libbcachefs/extents.c index b07d395..563e130 100644 --- a/libbcachefs/extents.c +++ b/libbcachefs/extents.c @@ -192,9 +192,10 @@ void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c, { struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k); - pr_buf(out, "seq %llx written %u min_key ", + pr_buf(out, "seq %llx written %u min_key %s", le64_to_cpu(bp.v->seq), - le16_to_cpu(bp.v->sectors_written)); + le16_to_cpu(bp.v->sectors_written), + BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : ""); bch2_bpos_to_text(out, bp.v->min_key); pr_buf(out, " "); @@ -230,112 +231,134 @@ void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c, bch2_bkey_ptrs_to_text(out, c, k); } -enum merge_result bch2_extent_merge(struct bch_fs *c, - struct bkey_s _l, struct bkey_s _r) +bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r) { - struct bkey_s_extent l = bkey_s_to_extent(_l); - struct bkey_s_extent r = bkey_s_to_extent(_r); - union bch_extent_entry *en_l = l.v->start; - union bch_extent_entry *en_r = r.v->start; - struct bch_extent_crc_unpacked crc_l, crc_r; - - if (bkey_val_u64s(l.k) != bkey_val_u64s(r.k)) - return BCH_MERGE_NOMERGE; - - crc_l = bch2_extent_crc_unpack(l.k, NULL); - - extent_for_each_entry(l, en_l) { - en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data); + struct bkey_ptrs l_ptrs = bch2_bkey_ptrs(l); + struct bkey_ptrs_c r_ptrs = bch2_bkey_ptrs_c(r); + union bch_extent_entry *en_l; + const union bch_extent_entry *en_r; + struct extent_ptr_decoded lp, rp; + bool use_right_ptr; + struct bch_dev *ca; + en_l = l_ptrs.start; + en_r = r_ptrs.start; + while (en_l < l_ptrs.end && en_r < r_ptrs.end) { if (extent_entry_type(en_l) != extent_entry_type(en_r)) - return BCH_MERGE_NOMERGE; - - switch (extent_entry_type(en_l)) { - case BCH_EXTENT_ENTRY_ptr: { - const struct bch_extent_ptr *lp = &en_l->ptr; - const struct bch_extent_ptr *rp = &en_r->ptr; - struct bch_dev *ca; - - if (lp->offset + crc_l.compressed_size != rp->offset || - lp->dev != rp->dev || - lp->gen != rp->gen) - return BCH_MERGE_NOMERGE; - - /* We don't allow extents to straddle buckets: */ - ca = bch_dev_bkey_exists(c, lp->dev); - - if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp)) - return BCH_MERGE_NOMERGE; - - break; - } - case BCH_EXTENT_ENTRY_stripe_ptr: - if (en_l->stripe_ptr.block != en_r->stripe_ptr.block || - en_l->stripe_ptr.idx != en_r->stripe_ptr.idx) - return BCH_MERGE_NOMERGE; - break; - case BCH_EXTENT_ENTRY_crc32: - case BCH_EXTENT_ENTRY_crc64: - case BCH_EXTENT_ENTRY_crc128: - crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l)); - crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r)); - - if (crc_l.csum_type != crc_r.csum_type || - crc_l.compression_type != crc_r.compression_type || - crc_l.nonce != crc_r.nonce) - return BCH_MERGE_NOMERGE; - - if (crc_l.offset + crc_l.live_size != crc_l.compressed_size || - crc_r.offset) - return BCH_MERGE_NOMERGE; - - if (!bch2_checksum_mergeable(crc_l.csum_type)) - return BCH_MERGE_NOMERGE; + return false; - if (crc_is_compressed(crc_l)) - return BCH_MERGE_NOMERGE; + en_l = extent_entry_next(en_l); + en_r = extent_entry_next(en_r); + } - if (crc_l.csum_type && - crc_l.uncompressed_size + - crc_r.uncompressed_size > c->sb.encoded_extent_max) - return BCH_MERGE_NOMERGE; + if (en_l < l_ptrs.end || en_r < r_ptrs.end) + return false; - if (crc_l.uncompressed_size + crc_r.uncompressed_size > + en_l = l_ptrs.start; + en_r = r_ptrs.start; + lp.crc = bch2_extent_crc_unpack(l.k, NULL); + rp.crc = bch2_extent_crc_unpack(r.k, NULL); + + while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) && + __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) { + if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size != + rp.ptr.offset + rp.crc.offset || + lp.ptr.dev != rp.ptr.dev || + lp.ptr.gen != rp.ptr.gen || + lp.has_ec != rp.has_ec) + return false; + + /* Extents may not straddle buckets: */ + ca = bch_dev_bkey_exists(c, lp.ptr.dev); + if (PTR_BUCKET_NR(ca, &lp.ptr) != PTR_BUCKET_NR(ca, &rp.ptr)) + return false; + + if (lp.has_ec != rp.has_ec || + (lp.has_ec && + (lp.ec.block != rp.ec.block || + lp.ec.redundancy != rp.ec.redundancy || + lp.ec.idx != rp.ec.idx))) + return false; + + if (lp.crc.compression_type != rp.crc.compression_type || + lp.crc.nonce != rp.crc.nonce) + return false; + + if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <= + lp.crc.uncompressed_size) { + /* can use left extent's crc entry */ + } else if (lp.crc.live_size <= rp.crc.offset ) { + /* can use right extent's crc entry */ + } else { + /* check if checksums can be merged: */ + if (lp.crc.csum_type != rp.crc.csum_type || + lp.crc.nonce != rp.crc.nonce || + crc_is_compressed(lp.crc) || + !bch2_checksum_mergeable(lp.crc.csum_type)) + return false; + + if (lp.crc.offset + lp.crc.live_size != lp.crc.compressed_size || + rp.crc.offset) + return false; + + if (lp.crc.csum_type && + lp.crc.uncompressed_size + + rp.crc.uncompressed_size > c->sb.encoded_extent_max) + return false; + + if (lp.crc.uncompressed_size + rp.crc.uncompressed_size > bch2_crc_field_size_max[extent_entry_type(en_l)]) - return BCH_MERGE_NOMERGE; - - break; - default: - return BCH_MERGE_NOMERGE; + return false; } - } - - extent_for_each_entry(l, en_l) { - struct bch_extent_crc_unpacked crc_l, crc_r; - - en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data); - - if (!extent_entry_is_crc(en_l)) - continue; - crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l)); - crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r)); - - crc_l.csum = bch2_checksum_merge(crc_l.csum_type, - crc_l.csum, - crc_r.csum, - crc_r.uncompressed_size << 9); + en_l = extent_entry_next(en_l); + en_r = extent_entry_next(en_r); + } - crc_l.uncompressed_size += crc_r.uncompressed_size; - crc_l.compressed_size += crc_r.compressed_size; + use_right_ptr = false; + en_l = l_ptrs.start; + en_r = r_ptrs.start; + while (en_l < l_ptrs.end) { + if (extent_entry_type(en_l) == BCH_EXTENT_ENTRY_ptr && + use_right_ptr) + en_l->ptr = en_r->ptr; + + if (extent_entry_is_crc(en_l)) { + struct bch_extent_crc_unpacked crc_l = + bch2_extent_crc_unpack(l.k, entry_to_crc(en_l)); + struct bch_extent_crc_unpacked crc_r = + bch2_extent_crc_unpack(r.k, entry_to_crc(en_r)); + + use_right_ptr = false; + + if (crc_l.offset + crc_l.live_size + crc_r.live_size <= + crc_l.uncompressed_size) { + /* can use left extent's crc entry */ + } else if (crc_l.live_size <= crc_r.offset ) { + /* can use right extent's crc entry */ + crc_r.offset -= crc_l.live_size; + bch2_extent_crc_pack(entry_to_crc(en_l), crc_r, + extent_entry_type(en_l)); + use_right_ptr = true; + } else { + crc_l.csum = bch2_checksum_merge(crc_l.csum_type, + crc_l.csum, + crc_r.csum, + crc_r.uncompressed_size << 9); + + crc_l.uncompressed_size += crc_r.uncompressed_size; + crc_l.compressed_size += crc_r.compressed_size; + bch2_extent_crc_pack(entry_to_crc(en_l), crc_l, + extent_entry_type(en_l)); + } + } - bch2_extent_crc_pack(entry_to_crc(en_l), crc_l, - extent_entry_type(en_l)); + en_l = extent_entry_next(en_l); + en_r = extent_entry_next(en_r); } bch2_key_resize(l.k, l.k->size + r.k->size); - - return BCH_MERGE_MERGE; + return true; } /* KEY_TYPE_reservation: */ @@ -363,25 +386,17 @@ void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c, r.v->nr_replicas); } -enum merge_result bch2_reservation_merge(struct bch_fs *c, - struct bkey_s _l, struct bkey_s _r) +bool bch2_reservation_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r) { struct bkey_s_reservation l = bkey_s_to_reservation(_l); - struct bkey_s_reservation r = bkey_s_to_reservation(_r); + struct bkey_s_c_reservation r = bkey_s_c_to_reservation(_r); if (l.v->generation != r.v->generation || l.v->nr_replicas != r.v->nr_replicas) - return BCH_MERGE_NOMERGE; - - if ((u64) l.k->size + r.k->size > KEY_SIZE_MAX) { - bch2_key_resize(l.k, KEY_SIZE_MAX); - bch2_cut_front_s(l.k->p, r.s); - return BCH_MERGE_PARTIAL; - } + return false; bch2_key_resize(l.k, l.k->size + r.k->size); - - return BCH_MERGE_MERGE; + return true; } /* Extent checksum entries: */ diff --git a/libbcachefs/extents.h b/libbcachefs/extents.h index ccee43a..43cef0a 100644 --- a/libbcachefs/extents.h +++ b/libbcachefs/extents.h @@ -394,8 +394,7 @@ void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned, const char *bch2_extent_invalid(const struct bch_fs *, struct bkey_s_c); void bch2_extent_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); -enum merge_result bch2_extent_merge(struct bch_fs *, - struct bkey_s, struct bkey_s); +bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c); #define bch2_bkey_ops_extent (struct bkey_ops) { \ .key_invalid = bch2_extent_invalid, \ @@ -409,8 +408,7 @@ enum merge_result bch2_extent_merge(struct bch_fs *, const char *bch2_reservation_invalid(const struct bch_fs *, struct bkey_s_c); void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); -enum merge_result bch2_reservation_merge(struct bch_fs *, - struct bkey_s, struct bkey_s); +bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c); #define bch2_bkey_ops_reservation (struct bkey_ops) { \ .key_invalid = bch2_reservation_invalid, \ @@ -428,6 +426,17 @@ void bch2_extent_crc_append(struct bkey_i *, /* Generic code for keys with pointers: */ +static inline bool bkey_is_btree_ptr(const struct bkey *k) +{ + switch (k->type) { + case KEY_TYPE_btree_ptr: + case KEY_TYPE_btree_ptr_v2: + return true; + default: + return false; + } +} + static inline bool bkey_extent_is_direct_data(const struct bkey *k) { switch (k->type) { @@ -529,6 +538,30 @@ static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k) return ret; } +static inline unsigned bch2_bkey_ptr_data_type(struct bkey_s_c k, const struct bch_extent_ptr *ptr) +{ + switch (k.k->type) { + case KEY_TYPE_btree_ptr: + case KEY_TYPE_btree_ptr_v2: + return BCH_DATA_btree; + case KEY_TYPE_extent: + case KEY_TYPE_reflink_v: + return BCH_DATA_user; + case KEY_TYPE_stripe: { + struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k); + + BUG_ON(ptr < s.v->ptrs || + ptr >= s.v->ptrs + s.v->nr_blocks); + + return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant + ? BCH_DATA_parity + : BCH_DATA_user; + } + default: + BUG(); + } +} + unsigned bch2_bkey_nr_ptrs(struct bkey_s_c); unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c); unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c); diff --git a/libbcachefs/fs-common.c b/libbcachefs/fs-common.c index 281a613..2189a11 100644 --- a/libbcachefs/fs-common.c +++ b/libbcachefs/fs-common.c @@ -23,6 +23,7 @@ int bch2_create_trans(struct btree_trans *trans, u64 dir_inum, struct btree_iter *inode_iter = NULL; struct bch_hash_info hash = bch2_hash_info_init(c, new_inode); u64 now = bch2_current_time(c); + u64 cpu = raw_smp_processor_id(); u64 dir_offset = 0; int ret; @@ -36,7 +37,7 @@ int bch2_create_trans(struct btree_trans *trans, u64 dir_inum, if (!name) new_inode->bi_flags |= BCH_INODE_UNLINKED; - inode_iter = bch2_inode_create(trans, new_inode, U32_MAX); + inode_iter = bch2_inode_create(trans, new_inode, U32_MAX, cpu); ret = PTR_ERR_OR_ZERO(inode_iter); if (ret) goto err; @@ -84,7 +85,8 @@ int bch2_create_trans(struct btree_trans *trans, u64 dir_inum, inode_iter->snapshot = U32_MAX; bch2_btree_iter_set_pos(inode_iter, SPOS(0, new_inode->bi_inum, U32_MAX)); - ret = bch2_inode_write(trans, inode_iter, new_inode); + ret = bch2_btree_iter_traverse(inode_iter) ?: + bch2_inode_write(trans, inode_iter, new_inode); err: bch2_trans_iter_put(trans, inode_iter); bch2_trans_iter_put(trans, dir_iter); @@ -110,8 +112,6 @@ int bch2_link_trans(struct btree_trans *trans, u64 dir_inum, inode_u->bi_ctime = now; bch2_inode_nlink_inc(inode_u); - inode_u->bi_flags |= BCH_INODE_BACKPTR_UNTRUSTED; - dir_iter = bch2_inode_peek(trans, dir_u, dir_inum, 0); ret = PTR_ERR_OR_ZERO(dir_iter); if (ret) @@ -168,6 +168,10 @@ int bch2_unlink_trans(struct btree_trans *trans, goto err; k = bch2_btree_iter_peek_slot(dirent_iter); + ret = bkey_err(k); + if (ret) + goto err; + inum = le64_to_cpu(bkey_s_c_to_dirent(k).v->d_inum); inode_iter = bch2_inode_peek(trans, inode_u, inum, BTREE_ITER_INTENT); @@ -175,6 +179,12 @@ int bch2_unlink_trans(struct btree_trans *trans, if (ret) goto err; + if (inode_u->bi_dir == k.k->p.inode && + inode_u->bi_dir_offset == k.k->p.offset) { + inode_u->bi_dir = 0; + inode_u->bi_dir_offset = 0; + } + dir_u->bi_mtime = dir_u->bi_ctime = inode_u->bi_ctime = now; dir_u->bi_nlink -= S_ISDIR(inode_u->bi_mode); bch2_inode_nlink_dec(inode_u); @@ -285,6 +295,13 @@ int bch2_rename_trans(struct btree_trans *trans, dst_inode_u->bi_dir = src_dir_u->bi_inum; dst_inode_u->bi_dir_offset = src_offset; } + + if (mode == BCH_RENAME_OVERWRITE && + dst_inode_u->bi_dir == dst_dir_u->bi_inum && + dst_inode_u->bi_dir_offset == src_offset) { + dst_inode_u->bi_dir = 0; + dst_inode_u->bi_dir_offset = 0; + } } if (mode == BCH_RENAME_OVERWRITE) { diff --git a/libbcachefs/fs-io.c b/libbcachefs/fs-io.c index 1a94e7f..3333f61 100644 --- a/libbcachefs/fs-io.c +++ b/libbcachefs/fs-io.c @@ -99,8 +99,7 @@ static int write_invalidate_inode_pages_range(struct address_space *mapping, * is continually redirtying a specific page */ do { - if (!mapping->nrpages && - !mapping->nrexceptional) + if (!mapping->nrpages) return 0; ret = filemap_write_and_wait_range(mapping, start, end); @@ -802,11 +801,22 @@ static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter, bch2_bkey_buf_init(&sk); retry: + bch2_trans_begin(trans); + while (1) { struct bkey_s_c k; unsigned bytes, sectors, offset_into_extent; enum btree_id data_btree = BTREE_ID_extents; + /* + * read_extent -> io_time_reset may cause a transaction restart + * without returning an error, we need to check for that here: + */ + if (!bch2_trans_relock(trans)) { + ret = -EINTR; + break; + } + bch2_btree_iter_set_pos(iter, POS(inum, rbio->bio.bi_iter.bi_sector)); @@ -893,7 +903,7 @@ void bch2_readahead(struct readahead_control *ractl) unsigned n = min_t(unsigned, readpages_iter.nr_pages - readpages_iter.idx, - BIO_MAX_PAGES); + BIO_MAX_VECS); struct bch_read_bio *rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read), opts); @@ -1018,6 +1028,8 @@ static void bch2_writepage_io_done(struct closure *cl) struct bio_vec *bvec; unsigned i; + up(&io->op.c->io_in_flight); + if (io->op.error) { set_bit(EI_INODE_ERROR, &io->inode->ei_flags); @@ -1080,6 +1092,8 @@ static void bch2_writepage_do_io(struct bch_writepage_state *w) { struct bch_writepage_io *io = w->io; + down(&io->op.c->io_in_flight); + w->io = NULL; closure_call(&io->op.cl, bch2_write, NULL, &io->cl); continue_at(&io->cl, bch2_writepage_io_done, NULL); @@ -1098,8 +1112,7 @@ static void bch2_writepage_io_alloc(struct bch_fs *c, { struct bch_write_op *op; - w->io = container_of(bio_alloc_bioset(GFP_NOFS, - BIO_MAX_PAGES, + w->io = container_of(bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &c->writepage_bioset), struct bch_writepage_io, op.wbio.bio); @@ -1222,7 +1235,7 @@ do_io: (w->io->op.res.nr_replicas != nr_replicas_this_write || bio_full(&w->io->op.wbio.bio, PAGE_SIZE) || w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >= - (BIO_MAX_PAGES * PAGE_SIZE) || + (BIO_MAX_VECS * PAGE_SIZE) || bio_end_sector(&w->io->op.wbio.bio) != sector)) bch2_writepage_do_io(w); @@ -1686,7 +1699,7 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter) iter->count -= shorten; bio = bio_alloc_bioset(GFP_KERNEL, - iov_iter_npages(iter, BIO_MAX_PAGES), + iov_iter_npages(iter, BIO_MAX_VECS), &c->dio_read_bioset); bio->bi_end_io = bch2_direct_IO_read_endio; @@ -1721,7 +1734,7 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter) goto start; while (iter->count) { bio = bio_alloc_bioset(GFP_KERNEL, - iov_iter_npages(iter, BIO_MAX_PAGES), + iov_iter_npages(iter, BIO_MAX_VECS), &c->bio_read); bio->bi_end_io = bch2_direct_IO_read_split_endio; start: @@ -1819,6 +1832,8 @@ static long bch2_dio_write_loop(struct dio_write *dio) if (dio->loop) goto loop; + down(&c->io_in_flight); + while (1) { iter_count = dio->iter.count; @@ -1866,8 +1881,6 @@ static long bch2_dio_write_loop(struct dio_write *dio) * bio_iov_iter_get_pages was only able to get < * blocksize worth of pages: */ - bio_for_each_segment_all(bv, bio, iter) - put_page(bv->bv_page); ret = -EFAULT; goto err; } @@ -1883,6 +1896,7 @@ static long bch2_dio_write_loop(struct dio_write *dio) if ((req->ki_flags & IOCB_DSYNC) && !c->opts.journal_flush_disabled) dio->op.flags |= BCH_WRITE_FLUSH; + dio->op.flags |= BCH_WRITE_CHECK_ENOSPC; ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio), dio->op.opts.data_replicas, 0); @@ -1931,8 +1945,10 @@ loop: i_size_write(&inode->v, req->ki_pos); spin_unlock(&inode->v.i_lock); - bio_for_each_segment_all(bv, bio, iter) - put_page(bv->bv_page); + if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF))) + bio_for_each_segment_all(bv, bio, iter) + put_page(bv->bv_page); + bio->bi_vcnt = 0; if (dio->op.error) { set_bit(EI_INODE_ERROR, &inode->ei_flags); @@ -1948,12 +1964,16 @@ loop: ret = dio->op.error ?: ((long) dio->written << 9); err: + up(&c->io_in_flight); bch2_pagecache_block_put(&inode->ei_pagecache_lock); bch2_quota_reservation_put(c, inode, &dio->quota_res); if (dio->free_iov) kfree(dio->iter.iov); + if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF))) + bio_for_each_segment_all(bv, bio, iter) + put_page(bv->bv_page); bio_put(bio); /* inode->i_dio_count is our ref on inode and thus bch_fs */ @@ -2020,7 +2040,9 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter) } bio = bio_alloc_bioset(GFP_KERNEL, - iov_iter_npages(iter, BIO_MAX_PAGES), + iov_iter_is_bvec(iter) + ? 0 + : iov_iter_npages(iter, BIO_MAX_VECS), &c->dio_write_bioset); dio = container_of(bio, struct dio_write, op.wbio.bio); init_completion(&dio->done); @@ -2244,11 +2266,11 @@ static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from) from, round_up(from, PAGE_SIZE)); } -static int bch2_extend(struct bch_inode_info *inode, +static int bch2_extend(struct user_namespace *mnt_userns, + struct bch_inode_info *inode, struct bch_inode_unpacked *inode_u, struct iattr *iattr) { - struct bch_fs *c = inode->v.i_sb->s_fs_info; struct address_space *mapping = inode->v.i_mapping; int ret; @@ -2262,24 +2284,15 @@ static int bch2_extend(struct bch_inode_info *inode, return ret; truncate_setsize(&inode->v, iattr->ia_size); - setattr_copy(&inode->v, iattr); - mutex_lock(&inode->ei_update_lock); - ret = bch2_write_inode_size(c, inode, inode->v.i_size, - ATTR_MTIME|ATTR_CTIME); - mutex_unlock(&inode->ei_update_lock); - - return ret; + return bch2_setattr_nonsize(mnt_userns, inode, iattr); } static int bch2_truncate_finish_fn(struct bch_inode_info *inode, struct bch_inode_unpacked *bi, void *p) { - struct bch_fs *c = inode->v.i_sb->s_fs_info; - bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY; - bi->bi_mtime = bi->bi_ctime = bch2_current_time(c); return 0; } @@ -2293,30 +2306,33 @@ static int bch2_truncate_start_fn(struct bch_inode_info *inode, return 0; } -int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr) +int bch2_truncate(struct user_namespace *mnt_userns, + struct bch_inode_info *inode, struct iattr *iattr) { struct bch_fs *c = inode->v.i_sb->s_fs_info; struct address_space *mapping = inode->v.i_mapping; struct bch_inode_unpacked inode_u; - struct btree_trans trans; - struct btree_iter *iter; u64 new_i_size = iattr->ia_size; s64 i_sectors_delta = 0; int ret = 0; - inode_dio_wait(&inode->v); - bch2_pagecache_block_get(&inode->ei_pagecache_lock); - /* - * fetch current on disk i_size: inode is locked, i_size can only - * increase underneath us: + * If the truncate call with change the size of the file, the + * cmtimes should be updated. If the size will not change, we + * do not need to update the cmtimes. */ - bch2_trans_init(&trans, c, 0, 0); - iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino, 0); - ret = PTR_ERR_OR_ZERO(iter); - bch2_trans_iter_put(&trans, iter); - bch2_trans_exit(&trans); + if (iattr->ia_size != inode->v.i_size) { + if (!(iattr->ia_valid & ATTR_MTIME)) + ktime_get_coarse_real_ts64(&iattr->ia_mtime); + if (!(iattr->ia_valid & ATTR_CTIME)) + ktime_get_coarse_real_ts64(&iattr->ia_ctime); + iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME; + } + + inode_dio_wait(&inode->v); + bch2_pagecache_block_get(&inode->ei_pagecache_lock); + ret = bch2_inode_find_by_inum(c, inode->v.i_ino, &inode_u); if (ret) goto err; @@ -2333,10 +2349,12 @@ int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr) inode->v.i_size < inode_u.bi_size); if (iattr->ia_size > inode->v.i_size) { - ret = bch2_extend(inode, &inode_u, iattr); + ret = bch2_extend(mnt_userns, inode, &inode_u, iattr); goto err; } + iattr->ia_valid &= ~ATTR_SIZE; + ret = bch2_truncate_page(inode, iattr->ia_size); if (unlikely(ret)) goto err; @@ -2380,12 +2398,11 @@ int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr) if (unlikely(ret)) goto err; - setattr_copy(&inode->v, iattr); - mutex_lock(&inode->ei_update_lock); - ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, - ATTR_MTIME|ATTR_CTIME); + ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0); mutex_unlock(&inode->ei_update_lock); + + ret = bch2_setattr_nonsize(mnt_userns, inode, iattr); err: bch2_pagecache_block_put(&inode->ei_pagecache_lock); return ret; @@ -2393,6 +2410,15 @@ err: /* fallocate: */ +static int inode_update_times_fn(struct bch_inode_info *inode, + struct bch_inode_unpacked *bi, void *p) +{ + struct bch_fs *c = inode->v.i_sb->s_fs_info; + + bi->bi_mtime = bi->bi_ctime = bch2_current_time(c); + return 0; +} + static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len) { struct bch_fs *c = inode->v.i_sb->s_fs_info; @@ -2430,6 +2456,11 @@ static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len &i_sectors_delta); i_sectors_acct(c, inode, NULL, i_sectors_delta); } + + mutex_lock(&inode->ei_update_lock); + ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL, + ATTR_MTIME|ATTR_CTIME) ?: ret; + mutex_unlock(&inode->ei_update_lock); err: bch2_pagecache_block_put(&inode->ei_pagecache_lock); inode_unlock(&inode->v); @@ -2509,7 +2540,7 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode, } bch2_bkey_buf_init(©); - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256); + bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024); src = bch2_trans_get_iter(&trans, BTREE_ID_extents, POS(inode->v.i_ino, src_start >> 9), BTREE_ITER_INTENT); @@ -2526,6 +2557,8 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode, struct bpos atomic_end; unsigned trigger_flags = 0; + bch2_trans_begin(&trans); + k = insert ? bch2_btree_iter_peek_prev(src) : bch2_btree_iter_peek(src); @@ -2587,7 +2620,8 @@ reassemble: BUG_ON(ret); } - ret = bch2_trans_update(&trans, del, &delete, trigger_flags) ?: + ret = bch2_btree_iter_traverse(del) ?: + bch2_trans_update(&trans, del, &delete, trigger_flags) ?: bch2_trans_update(&trans, dst, copy.k, trigger_flags) ?: bch2_trans_commit(&trans, &disk_res, &inode->ei_journal_seq, @@ -2619,54 +2653,21 @@ err: return ret; } -static long bchfs_fallocate(struct bch_inode_info *inode, int mode, - loff_t offset, loff_t len) +static int __bchfs_fallocate(struct bch_inode_info *inode, int mode, + u64 start_sector, u64 end_sector) { - struct address_space *mapping = inode->v.i_mapping; struct bch_fs *c = inode->v.i_sb->s_fs_info; struct btree_trans trans; struct btree_iter *iter; - struct bpos end_pos; - loff_t end = offset + len; - loff_t block_start = round_down(offset, block_bytes(c)); - loff_t block_end = round_up(end, block_bytes(c)); - unsigned sectors; + struct bpos end_pos = POS(inode->v.i_ino, end_sector); unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas; - int ret; - - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); - - inode_lock(&inode->v); - inode_dio_wait(&inode->v); - bch2_pagecache_block_get(&inode->ei_pagecache_lock); - - if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) { - ret = inode_newsize_ok(&inode->v, end); - if (ret) - goto err; - } - - if (mode & FALLOC_FL_ZERO_RANGE) { - ret = __bch2_truncate_page(inode, - offset >> PAGE_SHIFT, - offset, end); - - if (!ret && - offset >> PAGE_SHIFT != end >> PAGE_SHIFT) - ret = __bch2_truncate_page(inode, - end >> PAGE_SHIFT, - offset, end); - - if (unlikely(ret)) - goto err; + int ret = 0; - truncate_pagecache_range(&inode->v, offset, end - 1); - } + bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512); iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, - POS(inode->v.i_ino, block_start >> 9), + POS(inode->v.i_ino, start_sector), BTREE_ITER_SLOTS|BTREE_ITER_INTENT); - end_pos = POS(inode->v.i_ino, block_end >> 9); while (!ret && bkey_cmp(iter->pos, end_pos) < 0) { s64 i_sectors_delta = 0; @@ -2674,6 +2675,7 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode, struct quota_res quota_res = { 0 }; struct bkey_i_reservation reservation; struct bkey_s_c k; + unsigned sectors; bch2_trans_begin(&trans); @@ -2684,13 +2686,13 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode, /* already reserved */ if (k.k->type == KEY_TYPE_reservation && bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) { - bch2_btree_iter_next_slot(iter); + bch2_btree_iter_advance(iter); continue; } if (bkey_extent_is_data(k.k) && !(mode & FALLOC_FL_ZERO_RANGE)) { - bch2_btree_iter_next_slot(iter); + bch2_btree_iter_advance(iter); continue; } @@ -2725,7 +2727,7 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode, ret = bch2_extent_update(&trans, iter, &reservation.k_i, &disk_res, &inode->ei_journal_seq, - 0, &i_sectors_delta); + 0, &i_sectors_delta, true); i_sectors_acct(c, inode, "a_res, i_sectors_delta); bkey_err: bch2_quota_reservation_put(c, inode, "a_res); @@ -2734,7 +2736,48 @@ bkey_err: ret = 0; } bch2_trans_iter_put(&trans, iter); + bch2_trans_exit(&trans); + return ret; +} + +static long bchfs_fallocate(struct bch_inode_info *inode, int mode, + loff_t offset, loff_t len) +{ + struct address_space *mapping = inode->v.i_mapping; + struct bch_fs *c = inode->v.i_sb->s_fs_info; + loff_t end = offset + len; + loff_t block_start = round_down(offset, block_bytes(c)); + loff_t block_end = round_up(end, block_bytes(c)); + int ret; + inode_lock(&inode->v); + inode_dio_wait(&inode->v); + bch2_pagecache_block_get(&inode->ei_pagecache_lock); + + if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) { + ret = inode_newsize_ok(&inode->v, end); + if (ret) + goto err; + } + + if (mode & FALLOC_FL_ZERO_RANGE) { + ret = __bch2_truncate_page(inode, + offset >> PAGE_SHIFT, + offset, end); + + if (!ret && + offset >> PAGE_SHIFT != end >> PAGE_SHIFT) + ret = __bch2_truncate_page(inode, + end >> PAGE_SHIFT, + offset, end); + + if (unlikely(ret)) + goto err; + + truncate_pagecache_range(&inode->v, offset, end - 1); + } + + ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9); if (ret) goto err; @@ -2748,28 +2791,13 @@ bkey_err: if (end >= inode->v.i_size && (!(mode & FALLOC_FL_KEEP_SIZE) || (mode & FALLOC_FL_ZERO_RANGE))) { - struct btree_iter *inode_iter; - struct bch_inode_unpacked inode_u; - - do { - bch2_trans_begin(&trans); - inode_iter = bch2_inode_peek(&trans, &inode_u, - inode->v.i_ino, 0); - ret = PTR_ERR_OR_ZERO(inode_iter); - } while (ret == -EINTR); - - bch2_trans_iter_put(&trans, inode_iter); - bch2_trans_unlock(&trans); - - if (ret) - goto err; /* * Sync existing appends before extending i_size, * as in bch2_extend(): */ ret = filemap_write_and_wait_range(mapping, - inode_u.bi_size, S64_MAX); + inode->ei_inode.bi_size, S64_MAX); if (ret) goto err; @@ -2783,7 +2811,6 @@ bkey_err: mutex_unlock(&inode->ei_update_lock); } err: - bch2_trans_exit(&trans); bch2_pagecache_block_put(&inode->ei_pagecache_lock); inode_unlock(&inode->v); return ret; @@ -2920,6 +2947,11 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src, if (pos_dst + ret > dst->v.i_size) i_size_write(&dst->v, pos_dst + ret); spin_unlock(&dst->v.i_lock); + + if (((file_dst->f_flags & (__O_SYNC | O_DSYNC)) || + IS_SYNC(file_inode(file_dst))) && + !c->opts.journal_flush_disabled) + ret = bch2_journal_flush_seq(&c->journal, dst->ei_journal_seq); err: bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst); diff --git a/libbcachefs/fs-io.h b/libbcachefs/fs-io.h index 2537a3d..b24efea 100644 --- a/libbcachefs/fs-io.h +++ b/libbcachefs/fs-io.h @@ -31,7 +31,8 @@ ssize_t bch2_write_iter(struct kiocb *, struct iov_iter *); int bch2_fsync(struct file *, loff_t, loff_t, int); -int bch2_truncate(struct bch_inode_info *, struct iattr *); +int bch2_truncate(struct user_namespace *, + struct bch_inode_info *, struct iattr *); long bch2_fallocate_dispatch(struct file *, int, loff_t, loff_t); loff_t bch2_remap_file_range(struct file *, loff_t, struct file *, diff --git a/libbcachefs/fs-ioctl.c b/libbcachefs/fs-ioctl.c index eb87163..91a0e76 100644 --- a/libbcachefs/fs-ioctl.c +++ b/libbcachefs/fs-ioctl.c @@ -13,6 +13,9 @@ #include #define FS_IOC_GOINGDOWN _IOR('X', 125, __u32) +#define FSOP_GOING_FLAGS_DEFAULT 0x0 /* going down */ +#define FSOP_GOING_FLAGS_LOGFLUSH 0x1 /* flush log but not data */ +#define FSOP_GOING_FLAGS_NOLOGFLUSH 0x2 /* don't flush log nor data */ struct flags_set { unsigned mask; @@ -78,7 +81,7 @@ static int bch2_ioc_setflags(struct bch_fs *c, return ret; inode_lock(&inode->v); - if (!inode_owner_or_capable(&inode->v)) { + if (!inode_owner_or_capable(file_mnt_user_ns(file), &inode->v)) { ret = -EACCES; goto setflags_out; } @@ -149,7 +152,7 @@ static int bch2_ioc_fssetxattr(struct bch_fs *c, return ret; inode_lock(&inode->v); - if (!inode_owner_or_capable(&inode->v)) { + if (!inode_owner_or_capable(file_mnt_user_ns(file), &inode->v)) { ret = -EACCES; goto err; } @@ -247,11 +250,54 @@ err1: return ret; } +static int bch2_ioc_goingdown(struct bch_fs *c, u32 __user *arg) +{ + u32 flags; + int ret = 0; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (get_user(flags, arg)) + return -EFAULT; + + bch_notice(c, "shutdown by ioctl type %u", flags); + + down_write(&c->vfs_sb->s_umount); + + switch (flags) { + case FSOP_GOING_FLAGS_DEFAULT: + ret = freeze_bdev(c->vfs_sb->s_bdev); + if (ret) + goto err; + + bch2_journal_flush(&c->journal); + c->vfs_sb->s_flags |= SB_RDONLY; + bch2_fs_emergency_read_only(c); + thaw_bdev(c->vfs_sb->s_bdev); + break; + + case FSOP_GOING_FLAGS_LOGFLUSH: + bch2_journal_flush(&c->journal); + fallthrough; + + case FSOP_GOING_FLAGS_NOLOGFLUSH: + c->vfs_sb->s_flags |= SB_RDONLY; + bch2_fs_emergency_read_only(c); + break; + default: + ret = -EINVAL; + break; + } +err: + up_write(&c->vfs_sb->s_umount); + return ret; +} + long bch2_fs_file_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct bch_inode_info *inode = file_bch_inode(file); - struct super_block *sb = inode->v.i_sb; - struct bch_fs *c = sb->s_fs_info; + struct bch_fs *c = inode->v.i_sb->s_fs_info; switch (cmd) { case FS_IOC_GETFLAGS: @@ -276,15 +322,7 @@ long bch2_fs_file_ioctl(struct file *file, unsigned cmd, unsigned long arg) return -ENOTTY; case FS_IOC_GOINGDOWN: - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - down_write(&sb->s_umount); - sb->s_flags |= SB_RDONLY; - if (bch2_fs_emergency_read_only(c)) - bch_err(c, "emergency read only due to ioctl"); - up_write(&sb->s_umount); - return 0; + return bch2_ioc_goingdown(c, (u32 __user *) arg); default: return bch2_fs_ioctl(c, cmd, (void __user *) arg); diff --git a/libbcachefs/fs.c b/libbcachefs/fs.c index 8034d48..631fb87 100644 --- a/libbcachefs/fs.c +++ b/libbcachefs/fs.c @@ -27,9 +27,11 @@ #include #include #include +#include #include #include #include +#include #include static struct kmem_cache *bch2_inode_cache; @@ -143,7 +145,7 @@ int __must_check bch2_write_inode(struct bch_fs *c, struct bch_inode_unpacked inode_u; int ret; - bch2_trans_init(&trans, c, 0, 0); + bch2_trans_init(&trans, c, 0, 512); retry: bch2_trans_begin(&trans); @@ -154,7 +156,6 @@ retry: bch2_inode_write(&trans, iter, &inode_u) ?: bch2_trans_commit(&trans, NULL, &inode->ei_journal_seq, - BTREE_INSERT_NOUNLOCK| BTREE_INSERT_NOFAIL); /* @@ -243,11 +244,11 @@ static int inum_test(struct inode *inode, void *p) } static struct bch_inode_info * -__bch2_create(struct bch_inode_info *dir, struct dentry *dentry, +__bch2_create(struct user_namespace *mnt_userns, + struct bch_inode_info *dir, struct dentry *dentry, umode_t mode, dev_t rdev, bool tmpfile) { struct bch_fs *c = dir->v.i_sb->s_fs_info; - struct user_namespace *ns = dir->v.i_sb->s_user_ns; struct btree_trans trans; struct bch_inode_unpacked dir_u; struct bch_inode_info *inode, *old; @@ -283,8 +284,8 @@ retry: ret = bch2_create_trans(&trans, dir->v.i_ino, &dir_u, &inode_u, !tmpfile ? &dentry->d_name : NULL, - from_kuid(ns, current_fsuid()), - from_kgid(ns, current_fsgid()), + from_kuid(mnt_userns, current_fsuid()), + from_kgid(mnt_userns, current_fsgid()), mode, rdev, default_acl, acl) ?: bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, 1, @@ -292,8 +293,7 @@ retry: if (unlikely(ret)) goto err_before_quota; - ret = bch2_trans_commit(&trans, NULL, &journal_seq, - BTREE_INSERT_NOUNLOCK); + ret = bch2_trans_commit(&trans, NULL, &journal_seq, 0); if (unlikely(ret)) { bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, -1, KEY_TYPE_QUOTA_WARN); @@ -381,11 +381,12 @@ static struct dentry *bch2_lookup(struct inode *vdir, struct dentry *dentry, return d_splice_alias(vinode, dentry); } -static int bch2_mknod(struct inode *vdir, struct dentry *dentry, +static int bch2_mknod(struct user_namespace *mnt_userns, + struct inode *vdir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct bch_inode_info *inode = - __bch2_create(to_bch_ei(vdir), dentry, mode, rdev, false); + __bch2_create(mnt_userns, to_bch_ei(vdir), dentry, mode, rdev, false); if (IS_ERR(inode)) return PTR_ERR(inode); @@ -394,10 +395,11 @@ static int bch2_mknod(struct inode *vdir, struct dentry *dentry, return 0; } -static int bch2_create(struct inode *vdir, struct dentry *dentry, +static int bch2_create(struct user_namespace *mnt_userns, + struct inode *vdir, struct dentry *dentry, umode_t mode, bool excl) { - return bch2_mknod(vdir, dentry, mode|S_IFREG, 0); + return bch2_mknod(mnt_userns, vdir, dentry, mode|S_IFREG, 0); } static int __bch2_link(struct bch_fs *c, @@ -412,8 +414,7 @@ static int __bch2_link(struct bch_fs *c, mutex_lock(&inode->ei_update_lock); bch2_trans_init(&trans, c, 4, 1024); - ret = __bch2_trans_do(&trans, NULL, &inode->ei_journal_seq, - BTREE_INSERT_NOUNLOCK, + ret = __bch2_trans_do(&trans, NULL, &inode->ei_journal_seq, 0, bch2_link_trans(&trans, dir->v.i_ino, inode->v.i_ino, &dir_u, &inode_u, @@ -465,7 +466,6 @@ static int bch2_unlink(struct inode *vdir, struct dentry *dentry) bch2_trans_init(&trans, c, 4, 1024); ret = __bch2_trans_do(&trans, NULL, &dir->ei_journal_seq, - BTREE_INSERT_NOUNLOCK| BTREE_INSERT_NOFAIL, bch2_unlink_trans(&trans, dir->v.i_ino, &dir_u, @@ -487,14 +487,15 @@ static int bch2_unlink(struct inode *vdir, struct dentry *dentry) return ret; } -static int bch2_symlink(struct inode *vdir, struct dentry *dentry, +static int bch2_symlink(struct user_namespace *mnt_userns, + struct inode *vdir, struct dentry *dentry, const char *symname) { struct bch_fs *c = vdir->i_sb->s_fs_info; struct bch_inode_info *dir = to_bch_ei(vdir), *inode; int ret; - inode = __bch2_create(dir, dentry, S_IFLNK|S_IRWXUGO, 0, true); + inode = __bch2_create(mnt_userns, dir, dentry, S_IFLNK|S_IRWXUGO, 0, true); if (unlikely(IS_ERR(inode))) return PTR_ERR(inode); @@ -522,12 +523,14 @@ err: return ret; } -static int bch2_mkdir(struct inode *vdir, struct dentry *dentry, umode_t mode) +static int bch2_mkdir(struct user_namespace *mnt_userns, + struct inode *vdir, struct dentry *dentry, umode_t mode) { - return bch2_mknod(vdir, dentry, mode|S_IFDIR, 0); + return bch2_mknod(mnt_userns, vdir, dentry, mode|S_IFDIR, 0); } -static int bch2_rename2(struct inode *src_vdir, struct dentry *src_dentry, +static int bch2_rename2(struct user_namespace *mnt_userns, + struct inode *src_vdir, struct dentry *src_dentry, struct inode *dst_vdir, struct dentry *dst_dentry, unsigned flags) { @@ -583,8 +586,7 @@ static int bch2_rename2(struct inode *src_vdir, struct dentry *src_dentry, goto err; } - ret = __bch2_trans_do(&trans, NULL, &journal_seq, - BTREE_INSERT_NOUNLOCK, + ret = __bch2_trans_do(&trans, NULL, &journal_seq, 0, bch2_rename_trans(&trans, src_dir->v.i_ino, &src_dir_u, dst_dir->v.i_ino, &dst_dir_u, @@ -641,17 +643,21 @@ err: return ret; } -void bch2_setattr_copy(struct bch_inode_info *inode, - struct bch_inode_unpacked *bi, - struct iattr *attr) +static void bch2_setattr_copy(struct user_namespace *mnt_userns, + struct bch_inode_info *inode, + struct bch_inode_unpacked *bi, + struct iattr *attr) { struct bch_fs *c = inode->v.i_sb->s_fs_info; unsigned int ia_valid = attr->ia_valid; if (ia_valid & ATTR_UID) - bi->bi_uid = from_kuid(c->vfs_sb->s_user_ns, attr->ia_uid); + bi->bi_uid = from_kuid(mnt_userns, attr->ia_uid); if (ia_valid & ATTR_GID) - bi->bi_gid = from_kgid(c->vfs_sb->s_user_ns, attr->ia_gid); + bi->bi_gid = from_kgid(mnt_userns, attr->ia_gid); + + if (ia_valid & ATTR_SIZE) + bi->bi_size = attr->ia_size; if (ia_valid & ATTR_ATIME) bi->bi_atime = timespec_to_bch2_time(c, attr->ia_atime); @@ -667,14 +673,15 @@ void bch2_setattr_copy(struct bch_inode_info *inode, : inode->v.i_gid; if (!in_group_p(gid) && - !capable_wrt_inode_uidgid(&inode->v, CAP_FSETID)) + !capable_wrt_inode_uidgid(mnt_userns, &inode->v, CAP_FSETID)) mode &= ~S_ISGID; bi->bi_mode = mode; } } -static int bch2_setattr_nonsize(struct bch_inode_info *inode, - struct iattr *attr) +int bch2_setattr_nonsize(struct user_namespace *mnt_userns, + struct bch_inode_info *inode, + struct iattr *attr) { struct bch_fs *c = inode->v.i_sb->s_fs_info; struct bch_qid qid; @@ -711,7 +718,7 @@ retry: if (ret) goto btree_err; - bch2_setattr_copy(inode, &inode_u, attr); + bch2_setattr_copy(mnt_userns, inode, &inode_u, attr); if (attr->ia_valid & ATTR_MODE) { ret = bch2_acl_chmod(&trans, &inode_u, inode_u.bi_mode, &acl); @@ -722,7 +729,6 @@ retry: ret = bch2_inode_write(&trans, inode_iter, &inode_u) ?: bch2_trans_commit(&trans, NULL, &inode->ei_journal_seq, - BTREE_INSERT_NOUNLOCK| BTREE_INSERT_NOFAIL); btree_err: bch2_trans_iter_put(&trans, inode_iter); @@ -744,7 +750,8 @@ err: return ret; } -static int bch2_getattr(const struct path *path, struct kstat *stat, +static int bch2_getattr(struct user_namespace *mnt_userns, + const struct path *path, struct kstat *stat, u32 request_mask, unsigned query_flags) { struct bch_inode_info *inode = to_bch_ei(d_inode(path->dentry)); @@ -784,26 +791,28 @@ static int bch2_getattr(const struct path *path, struct kstat *stat, return 0; } -static int bch2_setattr(struct dentry *dentry, struct iattr *iattr) +static int bch2_setattr(struct user_namespace *mnt_userns, + struct dentry *dentry, struct iattr *iattr) { struct bch_inode_info *inode = to_bch_ei(dentry->d_inode); int ret; lockdep_assert_held(&inode->v.i_rwsem); - ret = setattr_prepare(dentry, iattr); + ret = setattr_prepare(mnt_userns, dentry, iattr); if (ret) return ret; return iattr->ia_valid & ATTR_SIZE - ? bch2_truncate(inode, iattr) - : bch2_setattr_nonsize(inode, iattr); + ? bch2_truncate(mnt_userns, inode, iattr) + : bch2_setattr_nonsize(mnt_userns, inode, iattr); } -static int bch2_tmpfile(struct inode *vdir, struct dentry *dentry, umode_t mode) +static int bch2_tmpfile(struct user_namespace *mnt_userns, + struct inode *vdir, struct dentry *dentry, umode_t mode) { struct bch_inode_info *inode = - __bch2_create(to_bch_ei(vdir), dentry, mode, 0, true); + __bch2_create(mnt_userns, to_bch_ei(vdir), dentry, mode, 0, true); if (IS_ERR(inode)) return PTR_ERR(inode); @@ -894,6 +903,8 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, POS(ei->v.i_ino, start >> 9), 0); retry: + bch2_trans_begin(&trans); + while ((k = bch2_btree_iter_peek(iter)).k && !(ret = bkey_err(k)) && bkey_cmp(iter->pos, end) < 0) { @@ -998,10 +1009,7 @@ static const struct file_operations bch_file_operations = { .open = generic_file_open, .fsync = bch2_fsync, .splice_read = generic_file_splice_read, -#if 0 - /* Busted: */ .splice_write = iter_file_splice_write, -#endif .fallocate = bch2_fallocate_dispatch, .unlocked_ioctl = bch2_fs_file_ioctl, #ifdef CONFIG_COMPAT @@ -1263,8 +1271,8 @@ static int bch2_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_type = BCACHEFS_STATFS_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = usage.capacity >> shift; - buf->f_bfree = (usage.capacity - usage.used) >> shift; - buf->f_bavail = buf->f_bfree; + buf->f_bfree = usage.free >> shift; + buf->f_bavail = avail_factor(usage.free) >> shift; buf->f_files = usage.nr_inodes + avail_inodes; buf->f_ffree = avail_inodes; @@ -1293,16 +1301,17 @@ static int bch2_sync_fs(struct super_block *sb, int wait) return bch2_journal_flush(&c->journal); } -static struct bch_fs *bch2_path_to_fs(const char *dev) +static struct bch_fs *bch2_path_to_fs(const char *path) { struct bch_fs *c; - struct block_device *bdev = lookup_bdev(dev); + dev_t dev; + int ret; - if (IS_ERR(bdev)) - return ERR_CAST(bdev); + ret = lookup_bdev(path, &dev); + if (ret) + return ERR_PTR(ret); - c = bch2_bdev_to_fs(bdev); - bdput(bdev); + c = bch2_dev_to_fs(dev); if (c) closure_put(&c->cl); return c ?: ERR_PTR(-ENOENT); @@ -1488,6 +1497,9 @@ static struct dentry *bch2_mount(struct file_system_type *fs_type, if (ret) return ERR_PTR(ret); + if (!dev_name || strlen(dev_name) == 0) + return ERR_PTR(-EINVAL); + devs = split_devs(dev_name, &nr_devs); if (!devs) return ERR_PTR(-ENOMEM); @@ -1554,7 +1566,9 @@ got_sb: #endif sb->s_xattr = bch2_xattr_handlers; sb->s_magic = BCACHEFS_STATFS_MAGIC; - sb->s_time_gran = c->sb.time_precision; + sb->s_time_gran = c->sb.nsec_per_time_unit; + sb->s_time_min = div_s64(S64_MIN, c->sb.time_units_per_sec) + 1; + sb->s_time_max = div_s64(S64_MAX, c->sb.time_units_per_sec); c->vfs_sb = sb; strlcpy(sb->s_id, c->name, sizeof(sb->s_id)); @@ -1574,6 +1588,8 @@ got_sb: break; } + c->dev = sb->s_dev; + #ifdef CONFIG_BCACHEFS_POSIX_ACL if (c->opts.acl) sb->s_flags |= SB_POSIXACL; diff --git a/libbcachefs/fs.h b/libbcachefs/fs.h index 2d82ed7..36cc6ba 100644 --- a/libbcachefs/fs.h +++ b/libbcachefs/fs.h @@ -167,6 +167,10 @@ void bch2_inode_update_after_write(struct bch_fs *, int __must_check bch2_write_inode(struct bch_fs *, struct bch_inode_info *, inode_set_fn, void *, unsigned); +int bch2_setattr_nonsize(struct user_namespace *, + struct bch_inode_info *, + struct iattr *); + void bch2_vfs_exit(void); int bch2_vfs_init(void); diff --git a/libbcachefs/fsck.c b/libbcachefs/fsck.c index acf128f..36eba46 100644 --- a/libbcachefs/fsck.c +++ b/libbcachefs/fsck.c @@ -12,8 +12,8 @@ #include "super.h" #include "xattr.h" +#include #include /* struct qstr */ -#include #define QSTR(n) { { { .len = strlen(n) } }, .name = n } @@ -38,72 +38,216 @@ static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum) return ret ?: sectors; } -static int __remove_dirent(struct btree_trans *trans, - struct bkey_s_c_dirent dirent) +static int __lookup_inode(struct btree_trans *trans, u64 inode_nr, + struct bch_inode_unpacked *inode, + u32 *snapshot) +{ + struct btree_iter *iter; + struct bkey_s_c k; + int ret; + + iter = bch2_trans_get_iter(trans, BTREE_ID_inodes, + POS(0, inode_nr), 0); + k = bch2_btree_iter_peek_slot(iter); + ret = bkey_err(k); + if (ret) + goto err; + + if (snapshot) + *snapshot = iter->pos.snapshot; + ret = k.k->type == KEY_TYPE_inode + ? bch2_inode_unpack(bkey_s_c_to_inode(k), inode) + : -ENOENT; +err: + bch2_trans_iter_free(trans, iter); + return ret; +} + +static int lookup_inode(struct btree_trans *trans, u64 inode_nr, + struct bch_inode_unpacked *inode, + u32 *snapshot) +{ + return lockrestart_do(trans, __lookup_inode(trans, inode_nr, inode, snapshot)); +} + +static int __write_inode(struct btree_trans *trans, + struct bch_inode_unpacked *inode, + u32 snapshot) +{ + struct btree_iter *inode_iter = + bch2_trans_get_iter(trans, BTREE_ID_inodes, + SPOS(0, inode->bi_inum, snapshot), + BTREE_ITER_INTENT); + int ret = bch2_btree_iter_traverse(inode_iter) ?: + bch2_inode_write(trans, inode_iter, inode); + bch2_trans_iter_put(trans, inode_iter); + return ret; +} + +static int write_inode(struct btree_trans *trans, + struct bch_inode_unpacked *inode, + u32 snapshot) +{ + int ret = __bch2_trans_do(trans, NULL, NULL, + BTREE_INSERT_NOFAIL| + BTREE_INSERT_LAZY_RW, + __write_inode(trans, inode, snapshot)); + if (ret) + bch_err(trans->c, "error in fsck: error %i updating inode", ret); + return ret; +} + +static int __remove_dirent(struct btree_trans *trans, struct bpos pos) { struct bch_fs *c = trans->c; - struct qstr name; + struct btree_iter *iter; struct bch_inode_unpacked dir_inode; struct bch_hash_info dir_hash_info; - u64 dir_inum = dirent.k->p.inode; int ret; - char *buf; - name.len = bch2_dirent_name_bytes(dirent); - buf = bch2_trans_kmalloc(trans, name.len + 1); - if (IS_ERR(buf)) - return PTR_ERR(buf); - - memcpy(buf, dirent.v->d_name, name.len); - buf[name.len] = '\0'; - name.name = buf; - - ret = __bch2_inode_find_by_inum_trans(trans, dir_inum, &dir_inode, 0); - if (ret && ret != -EINTR) - bch_err(c, "remove_dirent: err %i looking up directory inode", ret); + ret = lookup_inode(trans, pos.inode, &dir_inode, NULL); if (ret) return ret; dir_hash_info = bch2_hash_info_init(c, &dir_inode); - ret = bch2_hash_delete(trans, bch2_dirent_hash_desc, - &dir_hash_info, dir_inum, &name); - if (ret && ret != -EINTR) - bch_err(c, "remove_dirent: err %i deleting dirent", ret); - if (ret) - return ret; + iter = bch2_trans_get_iter(trans, BTREE_ID_dirents, pos, BTREE_ITER_INTENT); - return 0; + ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc, + &dir_hash_info, iter); + bch2_trans_iter_put(trans, iter); + return ret; +} + +static int remove_dirent(struct btree_trans *trans, struct bpos pos) +{ + int ret = __bch2_trans_do(trans, NULL, NULL, + BTREE_INSERT_NOFAIL| + BTREE_INSERT_LAZY_RW, + __remove_dirent(trans, pos)); + if (ret) + bch_err(trans->c, "remove_dirent: err %i deleting dirent", ret); + return ret; } -static int remove_dirent(struct btree_trans *trans, - struct bkey_s_c_dirent dirent) +/* Get lost+found, create if it doesn't exist: */ +static int lookup_lostfound(struct btree_trans *trans, + struct bch_inode_unpacked *lostfound) { - return __bch2_trans_do(trans, NULL, NULL, - BTREE_INSERT_NOFAIL| - BTREE_INSERT_LAZY_RW, - __remove_dirent(trans, dirent)); + struct bch_fs *c = trans->c; + struct bch_inode_unpacked root; + struct bch_hash_info root_hash_info; + struct qstr lostfound_str = QSTR("lost+found"); + u64 inum; + u32 snapshot; + int ret; + + ret = lookup_inode(trans, BCACHEFS_ROOT_INO, &root, &snapshot); + if (ret && ret != -ENOENT) + return ret; + + root_hash_info = bch2_hash_info_init(c, &root); + inum = bch2_dirent_lookup(c, BCACHEFS_ROOT_INO, &root_hash_info, + &lostfound_str); + if (!inum) { + bch_notice(c, "creating lost+found"); + goto create_lostfound; + } + + ret = lookup_inode(trans, inum, lostfound, &snapshot); + if (ret && ret != -ENOENT) { + /* + * The check_dirents pass has already run, dangling dirents + * shouldn't exist here: + */ + bch_err(c, "error looking up lost+found: %i", ret); + return ret; + } + + if (ret == -ENOENT) { +create_lostfound: + bch2_inode_init_early(c, lostfound); + + ret = __bch2_trans_do(trans, NULL, NULL, + BTREE_INSERT_NOFAIL| + BTREE_INSERT_LAZY_RW, + bch2_create_trans(trans, + BCACHEFS_ROOT_INO, &root, + lostfound, + &lostfound_str, + 0, 0, S_IFDIR|0700, 0, NULL, NULL)); + if (ret) + bch_err(c, "error creating lost+found: %i", ret); + } + + return 0; } -static int reattach_inode(struct bch_fs *c, - struct bch_inode_unpacked *lostfound_inode, - u64 inum) +static int reattach_inode(struct btree_trans *trans, + struct bch_inode_unpacked *inode) { - struct bch_inode_unpacked dir_u, inode_u; + struct bch_hash_info dir_hash; + struct bch_inode_unpacked lostfound; char name_buf[20]; struct qstr name; + u64 dir_offset = 0; int ret; - snprintf(name_buf, sizeof(name_buf), "%llu", inum); + ret = lookup_lostfound(trans, &lostfound); + if (ret) + return ret; + + if (S_ISDIR(inode->bi_mode)) { + lostfound.bi_nlink++; + + ret = write_inode(trans, &lostfound, U32_MAX); + if (ret) + return ret; + } + + dir_hash = bch2_hash_info_init(trans->c, &lostfound); + + snprintf(name_buf, sizeof(name_buf), "%llu", inode->bi_inum); name = (struct qstr) QSTR(name_buf); - ret = bch2_trans_do(c, NULL, NULL, - BTREE_INSERT_LAZY_RW, - bch2_link_trans(&trans, lostfound_inode->bi_inum, - inum, &dir_u, &inode_u, &name)); + ret = __bch2_trans_do(trans, NULL, NULL, BTREE_INSERT_LAZY_RW, + bch2_dirent_create(trans, lostfound.bi_inum, &dir_hash, + mode_to_type(inode->bi_mode), + &name, inode->bi_inum, &dir_offset, + BCH_HASH_SET_MUST_CREATE)); + if (ret) { + bch_err(trans->c, "error %i reattaching inode %llu", + ret, inode->bi_inum); + return ret; + } + + inode->bi_dir = lostfound.bi_inum; + inode->bi_dir_offset = dir_offset; + + return write_inode(trans, inode, U32_MAX); +} + +static int remove_backpointer(struct btree_trans *trans, + struct bch_inode_unpacked *inode) +{ + struct btree_iter *iter; + struct bkey_s_c k; + int ret; + + iter = bch2_trans_get_iter(trans, BTREE_ID_dirents, + POS(inode->bi_dir, inode->bi_dir_offset), 0); + k = bch2_btree_iter_peek_slot(iter); + ret = bkey_err(k); if (ret) - bch_err(c, "error %i reattaching inode %llu", ret, inum); + goto out; + if (k.k->type != KEY_TYPE_dirent) { + ret = -ENOENT; + goto out; + } + ret = remove_dirent(trans, k.k->p); +out: + bch2_trans_iter_put(trans, iter); return ret; } @@ -111,6 +255,7 @@ struct inode_walker { bool first_this_inode; bool have_inode; u64 cur_inum; + u32 snapshot; struct bch_inode_unpacked inode; }; @@ -122,12 +267,11 @@ static struct inode_walker inode_walker_init(void) }; } -static int walk_inode(struct btree_trans *trans, - struct inode_walker *w, u64 inum) +static int __walk_inode(struct btree_trans *trans, + struct inode_walker *w, u64 inum) { if (inum != w->cur_inum) { - int ret = __bch2_inode_find_by_inum_trans(trans, inum, - &w->inode, 0); + int ret = __lookup_inode(trans, inum, &w->inode, &w->snapshot); if (ret && ret != -ENOENT) return ret; @@ -142,58 +286,35 @@ static int walk_inode(struct btree_trans *trans, return 0; } -struct hash_check { - struct bch_hash_info info; - - /* start of current chain of hash collisions: */ - struct btree_iter *chain; - - /* next offset in current chain of hash collisions: */ - u64 chain_end; -}; - -static void hash_check_init(struct hash_check *h) -{ - h->chain = NULL; - h->chain_end = 0; -} - -static void hash_stop_chain(struct btree_trans *trans, - struct hash_check *h) -{ - if (h->chain) - bch2_trans_iter_free(trans, h->chain); - h->chain = NULL; -} - -static void hash_check_set_inode(struct btree_trans *trans, - struct hash_check *h, - const struct bch_inode_unpacked *bi) +static int walk_inode(struct btree_trans *trans, + struct inode_walker *w, u64 inum) { - h->info = bch2_hash_info_init(trans->c, bi); - hash_stop_chain(trans, h); + return lockrestart_do(trans, __walk_inode(trans, w, inum)); } -static int hash_redo_key(const struct bch_hash_desc desc, - struct btree_trans *trans, struct hash_check *h, - struct btree_iter *k_iter, struct bkey_s_c k, - u64 hashed) +static int hash_redo_key(struct btree_trans *trans, + const struct bch_hash_desc desc, + struct bch_hash_info *hash_info, + struct btree_iter *k_iter, struct bkey_s_c k) { - struct bkey_i delete; + struct bkey_i *delete; struct bkey_i *tmp; + delete = bch2_trans_kmalloc(trans, sizeof(*delete)); + if (IS_ERR(delete)) + return PTR_ERR(delete); + tmp = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); if (IS_ERR(tmp)) return PTR_ERR(tmp); bkey_reassemble(tmp, k); - bkey_init(&delete.k); - delete.k.p = k_iter->pos; - bch2_trans_update(trans, k_iter, &delete, 0); - - return bch2_hash_set(trans, desc, &h->info, k_iter->pos.inode, - tmp, 0); + bkey_init(&delete->k); + delete->k.p = k_iter->pos; + return bch2_btree_iter_traverse(k_iter) ?: + bch2_trans_update(trans, k_iter, delete, 0) ?: + bch2_hash_set(trans, desc, hash_info, k_iter->pos.inode, tmp, 0); } static int fsck_hash_delete_at(struct btree_trans *trans, @@ -216,201 +337,211 @@ retry: return ret; } -static int hash_check_duplicates(struct btree_trans *trans, - const struct bch_hash_desc desc, struct hash_check *h, - struct btree_iter *k_iter, struct bkey_s_c k) +static int hash_check_key(struct btree_trans *trans, + const struct bch_hash_desc desc, + struct bch_hash_info *hash_info, + struct btree_iter *k_iter, struct bkey_s_c hash_k) { struct bch_fs *c = trans->c; - struct btree_iter *iter; - struct bkey_s_c k2; + struct btree_iter *iter = NULL; char buf[200]; + struct bkey_s_c k; + u64 hash; int ret = 0; - if (!bkey_cmp(h->chain->pos, k_iter->pos)) + if (hash_k.k->type != desc.key_type) return 0; - iter = bch2_trans_copy_iter(trans, h->chain); + hash = desc.hash_bkey(hash_info, hash_k); - for_each_btree_key_continue(iter, 0, k2, ret) { - if (bkey_cmp(k2.k->p, k.k->p) >= 0) + if (likely(hash == hash_k.k->p.offset)) + return 0; + + if (hash_k.k->p.offset < hash) + goto bad_hash; + + for_each_btree_key(trans, iter, desc.btree_id, POS(hash_k.k->p.inode, hash), + BTREE_ITER_SLOTS, k, ret) { + if (!bkey_cmp(k.k->p, hash_k.k->p)) break; - if (fsck_err_on(k2.k->type == desc.key_type && - !desc.cmp_bkey(k, k2), c, + if (fsck_err_on(k.k->type == desc.key_type && + !desc.cmp_bkey(k, hash_k), c, "duplicate hash table keys:\n%s", (bch2_bkey_val_to_text(&PBUF(buf), c, - k), buf))) { - ret = fsck_hash_delete_at(trans, desc, &h->info, k_iter); + hash_k), buf))) { + ret = fsck_hash_delete_at(trans, desc, hash_info, k_iter); if (ret) return ret; ret = 1; break; } + + if (bkey_deleted(k.k)) { + bch2_trans_iter_free(trans, iter); + goto bad_hash; + } + } -fsck_err: bch2_trans_iter_free(trans, iter); return ret; -} - -static void hash_set_chain_start(struct btree_trans *trans, - const struct bch_hash_desc desc, - struct hash_check *h, - struct btree_iter *k_iter, struct bkey_s_c k) -{ - bool hole = (k.k->type != KEY_TYPE_hash_whiteout && - k.k->type != desc.key_type); - - if (hole || k.k->p.offset > h->chain_end + 1) - hash_stop_chain(trans, h); - - if (!hole) { - if (!h->chain) - h->chain = bch2_trans_copy_iter(trans, k_iter); +bad_hash: + if (fsck_err(c, "hash table key at wrong offset: btree %u inode %llu offset %llu, " + "hashed to %llu\n%s", + desc.btree_id, hash_k.k->p.inode, hash_k.k->p.offset, hash, + (bch2_bkey_val_to_text(&PBUF(buf), c, hash_k), buf)) == FSCK_ERR_IGNORE) + return 0; - h->chain_end = k.k->p.offset; + ret = __bch2_trans_do(trans, NULL, NULL, + BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW, + hash_redo_key(trans, desc, hash_info, k_iter, hash_k)); + if (ret) { + bch_err(c, "hash_redo_key err %i", ret); + return ret; } + return -EINTR; +fsck_err: + return ret; } -static bool key_has_correct_hash(struct btree_trans *trans, - const struct bch_hash_desc desc, - struct hash_check *h, - struct btree_iter *k_iter, struct bkey_s_c k) +static int check_inode(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_s_c_inode inode) { - u64 hash; - - hash_set_chain_start(trans, desc, h, k_iter, k); + struct bch_fs *c = trans->c; + struct bch_inode_unpacked u; + bool do_update = false; + int ret = 0; - if (k.k->type != desc.key_type) - return true; + ret = bch2_inode_unpack(inode, &u); - hash = desc.hash_bkey(&h->info, k); + if (bch2_fs_inconsistent_on(ret, c, + "error unpacking inode %llu in fsck", + inode.k->p.inode)) + return ret; - return hash >= h->chain->pos.offset && - hash <= k.k->p.offset; -} + if (u.bi_flags & BCH_INODE_UNLINKED && + (!c->sb.clean || + fsck_err(c, "filesystem marked clean, but inode %llu unlinked", + u.bi_inum))) { + bch_verbose(c, "deleting inode %llu", u.bi_inum); -static int hash_check_key(struct btree_trans *trans, - const struct bch_hash_desc desc, struct hash_check *h, - struct btree_iter *k_iter, struct bkey_s_c k) -{ - struct bch_fs *c = trans->c; - char buf[200]; - u64 hashed; - int ret = 0; + bch2_trans_unlock(trans); + bch2_fs_lazy_rw(c); - hash_set_chain_start(trans, desc, h, k_iter, k); + ret = bch2_inode_rm(c, u.bi_inum, false); + if (ret) + bch_err(c, "error in fsck: error %i while deleting inode", ret); + return ret; + } - if (k.k->type != desc.key_type) - return 0; + if (u.bi_flags & BCH_INODE_I_SIZE_DIRTY && + (!c->sb.clean || + fsck_err(c, "filesystem marked clean, but inode %llu has i_size dirty", + u.bi_inum))) { + bch_verbose(c, "truncating inode %llu", u.bi_inum); - hashed = desc.hash_bkey(&h->info, k); + bch2_trans_unlock(trans); + bch2_fs_lazy_rw(c); - if (fsck_err_on(hashed < h->chain->pos.offset || - hashed > k.k->p.offset, c, - "hash table key at wrong offset: btree %u, %llu, " - "hashed to %llu chain starts at %llu\n%s", - desc.btree_id, k.k->p.offset, - hashed, h->chain->pos.offset, - (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf))) { - ret = __bch2_trans_do(trans, NULL, NULL, - BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW, - hash_redo_key(desc, trans, h, k_iter, k, hashed)); + /* + * XXX: need to truncate partial blocks too here - or ideally + * just switch units to bytes and that issue goes away + */ + ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents, + POS(u.bi_inum, round_up(u.bi_size, block_bytes(c)) >> 9), + POS(u.bi_inum, U64_MAX), + NULL); if (ret) { - bch_err(c, "hash_redo_key err %i", ret); + bch_err(c, "error in fsck: error %i truncating inode", ret); return ret; } - return -EINTR; - } - - ret = hash_check_duplicates(trans, desc, h, k_iter, k); -fsck_err: - return ret; -} - -static int check_dirent_hash(struct btree_trans *trans, struct hash_check *h, - struct btree_iter *iter, struct bkey_s_c *k) -{ - struct bch_fs *c = trans->c; - struct bkey_i_dirent *d = NULL; - int ret = -EINVAL; - char buf[200]; - unsigned len; - u64 hash; - - if (key_has_correct_hash(trans, bch2_dirent_hash_desc, h, iter, *k)) - return 0; - len = bch2_dirent_name_bytes(bkey_s_c_to_dirent(*k)); - BUG_ON(!len); - - memcpy(buf, bkey_s_c_to_dirent(*k).v->d_name, len); - buf[len] = '\0'; + /* + * We truncated without our normal sector accounting hook, just + * make sure we recalculate it: + */ + u.bi_flags |= BCH_INODE_I_SECTORS_DIRTY; - d = kmalloc(bkey_bytes(k->k), GFP_KERNEL); - if (!d) { - bch_err(c, "memory allocation failure"); - return -ENOMEM; + u.bi_flags &= ~BCH_INODE_I_SIZE_DIRTY; + do_update = true; } - bkey_reassemble(&d->k_i, *k); - - do { - --len; - if (!len) - goto err_redo; + if (u.bi_flags & BCH_INODE_I_SECTORS_DIRTY && + (!c->sb.clean || + fsck_err(c, "filesystem marked clean, but inode %llu has i_sectors dirty", + u.bi_inum))) { + s64 sectors; - d->k.u64s = BKEY_U64s + dirent_val_u64s(len); + bch_verbose(c, "recounting sectors for inode %llu", + u.bi_inum); - BUG_ON(bkey_val_bytes(&d->k) < - offsetof(struct bch_dirent, d_name) + len); + sectors = bch2_count_inode_sectors(trans, u.bi_inum); + if (sectors < 0) { + bch_err(c, "error in fsck: error %i recounting inode sectors", + (int) sectors); + return sectors; + } - memset(d->v.d_name + len, 0, - bkey_val_bytes(&d->k) - - offsetof(struct bch_dirent, d_name) - len); + u.bi_sectors = sectors; + u.bi_flags &= ~BCH_INODE_I_SECTORS_DIRTY; + do_update = true; + } - hash = bch2_dirent_hash_desc.hash_bkey(&h->info, - bkey_i_to_s_c(&d->k_i)); - } while (hash < h->chain->pos.offset || - hash > k->k->p.offset); + if (u.bi_flags & BCH_INODE_BACKPTR_UNTRUSTED) { + u.bi_dir = 0; + u.bi_dir_offset = 0; + u.bi_flags &= ~BCH_INODE_BACKPTR_UNTRUSTED; + do_update = true; + } - if (fsck_err(c, "dirent with junk at end, was %s (%zu) now %s (%u)", - buf, strlen(buf), d->v.d_name, len)) { + if (do_update) { ret = __bch2_trans_do(trans, NULL, NULL, BTREE_INSERT_NOFAIL| BTREE_INSERT_LAZY_RW, - (bch2_trans_update(trans, iter, &d->k_i, 0), 0)); + bch2_btree_iter_traverse(iter) ?: + bch2_inode_write(trans, iter, &u)); if (ret) - goto err; - - *k = bch2_btree_iter_peek(iter); - - BUG_ON(k->k->type != KEY_TYPE_dirent); + bch_err(c, "error in fsck: error %i " + "updating inode", ret); } -err: fsck_err: - kfree(d); return ret; -err_redo: - hash = bch2_dirent_hash_desc.hash_bkey(&h->info, *k); - - if (fsck_err(c, "cannot fix dirent by removing trailing garbage %s (%zu)\n" - "hash table key at wrong offset: btree %u, offset %llu, " - "hashed to %llu chain starts at %llu\n%s", - buf, strlen(buf), BTREE_ID_dirents, - k->k->p.offset, hash, h->chain->pos.offset, - (bch2_bkey_val_to_text(&PBUF(buf), c, - *k), buf))) { - ret = __bch2_trans_do(trans, NULL, NULL, - BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW, - hash_redo_key(bch2_dirent_hash_desc, trans, - h, iter, *k, hash)); - if (ret) - bch_err(c, "hash_redo_key err %i", ret); - else - ret = 1; +} + +noinline_for_stack +static int check_inodes(struct bch_fs *c, bool full) +{ + struct btree_trans trans; + struct btree_iter *iter; + struct bkey_s_c k; + struct bkey_s_c_inode inode; + int ret; + + bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); + + for_each_btree_key(&trans, iter, BTREE_ID_inodes, POS_MIN, + BTREE_ITER_INTENT| + BTREE_ITER_PREFETCH, k, ret) { + if (k.k->type != KEY_TYPE_inode) + continue; + + inode = bkey_s_c_to_inode(k); + + if (full || + (inode.v->bi_flags & (BCH_INODE_I_SIZE_DIRTY| + BCH_INODE_I_SECTORS_DIRTY| + BCH_INODE_UNLINKED))) { + ret = check_inode(&trans, iter, inode); + if (ret) + break; + } } + bch2_trans_iter_put(&trans, iter); + + BUG_ON(ret == -EINTR); - goto err; + return bch2_trans_exit(&trans) ?: ret; } static int fix_overlapping_extent(struct btree_trans *trans, @@ -440,12 +571,42 @@ static int fix_overlapping_extent(struct btree_trans *trans, BTREE_ITER_INTENT|BTREE_ITER_NOT_EXTENTS); BUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS); - bch2_trans_update(trans, iter, u, BTREE_TRIGGER_NORUN); + ret = bch2_btree_iter_traverse(iter) ?: + bch2_trans_update(trans, iter, u, BTREE_TRIGGER_NORUN) ?: + bch2_trans_commit(trans, NULL, NULL, + BTREE_INSERT_NOFAIL| + BTREE_INSERT_LAZY_RW); bch2_trans_iter_put(trans, iter); + return ret; +} - return bch2_trans_commit(trans, NULL, NULL, - BTREE_INSERT_NOFAIL| - BTREE_INSERT_LAZY_RW); +static int inode_backpointer_exists(struct btree_trans *trans, + struct bch_inode_unpacked *inode) +{ + struct btree_iter *iter; + struct bkey_s_c k; + int ret; + + iter = bch2_trans_get_iter(trans, BTREE_ID_dirents, + POS(inode->bi_dir, inode->bi_dir_offset), 0); + k = bch2_btree_iter_peek_slot(iter); + ret = bkey_err(k); + if (ret) + goto out; + if (k.k->type != KEY_TYPE_dirent) + goto out; + + ret = le64_to_cpu(bkey_s_c_to_dirent(k).v->d_inum) == inode->bi_inum; +out: + bch2_trans_iter_free(trans, iter); + return ret; +} + +static bool inode_backpointer_matches(struct bkey_s_c_dirent d, + struct bch_inode_unpacked *inode) +{ + return d.k->p.inode == inode->bi_dir && + d.k->p.offset == inode->bi_dir_offset; } /* @@ -471,7 +632,8 @@ static int check_extents(struct bch_fs *c) iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, POS(BCACHEFS_ROOT_INO, 0), - BTREE_ITER_INTENT); + BTREE_ITER_INTENT| + BTREE_ITER_PREFETCH); retry: while ((k = bch2_btree_iter_peek(iter)).k && !(ret = bkey_err(k))) { @@ -482,18 +644,9 @@ retry: "inode %llu has incorrect i_sectors: got %llu, should be %llu", w.inode.bi_inum, w.inode.bi_sectors, i_sectors)) { - struct btree_iter *inode_iter = - bch2_trans_get_iter(&trans, BTREE_ID_inodes, - POS(0, w.cur_inum), - BTREE_ITER_INTENT); - w.inode.bi_sectors = i_sectors; - ret = __bch2_trans_do(&trans, NULL, NULL, - BTREE_INSERT_NOFAIL| - BTREE_INSERT_LAZY_RW, - bch2_inode_write(&trans, inode_iter, &w.inode)); - bch2_trans_iter_put(&trans, inode_iter); + ret = write_inode(&trans, &w.inode, w.snapshot); if (ret) break; } @@ -538,7 +691,7 @@ retry: k.k->type, k.k->p.offset, k.k->p.inode, w.inode.bi_size)) { bch2_fs_lazy_rw(c); return bch2_btree_delete_range_trans(&trans, BTREE_ID_extents, - POS(k.k->p.inode, round_up(w.inode.bi_size, block_bytes(c))), + POS(k.k->p.inode, round_up(w.inode.bi_size, block_bytes(c)) >> 9), POS(k.k->p.inode, U64_MAX), NULL) ?: -EINTR; } @@ -557,196 +710,207 @@ fsck_err: return bch2_trans_exit(&trans) ?: ret; } -/* - * Walk dirents: verify that they all have a corresponding S_ISDIR inode, - * validate d_type - */ -noinline_for_stack -static int check_dirents(struct bch_fs *c) +static int check_dirent(struct btree_trans *trans, struct btree_iter *iter, + struct bch_hash_info *hash_info, + struct inode_walker *w, unsigned *nr_subdirs) { - struct inode_walker w = inode_walker_init(); - struct hash_check h; - struct btree_trans trans; - struct btree_iter *iter; + struct bch_fs *c = trans->c; struct bkey_s_c k; - unsigned name_len; + struct bkey_s_c_dirent d; + struct bch_inode_unpacked target; + u32 target_snapshot; + bool have_target; + bool backpointer_exists = true; + u64 d_inum; char buf[200]; - int ret = 0; + int ret; - bch_verbose(c, "checking dirents"); + k = bch2_btree_iter_peek(iter); + if (!k.k) + return 0; - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); + ret = bkey_err(k); + if (ret) + return ret; - hash_check_init(&h); + if (w->have_inode && + w->cur_inum != k.k->p.inode && + fsck_err_on(w->inode.bi_nlink != *nr_subdirs, c, + "directory %llu with wrong i_nlink: got %u, should be %u", + w->inode.bi_inum, w->inode.bi_nlink, *nr_subdirs)) { + w->inode.bi_nlink = *nr_subdirs; + ret = write_inode(trans, &w->inode, w->snapshot); + return ret ?: -EINTR; + } - iter = bch2_trans_get_iter(&trans, BTREE_ID_dirents, - POS(BCACHEFS_ROOT_INO, 0), 0); -retry: - while ((k = bch2_btree_iter_peek(iter)).k && - !(ret = bkey_err(k))) { - struct bkey_s_c_dirent d; - struct bch_inode_unpacked target; - bool have_target; - u64 d_inum; + ret = __walk_inode(trans, w, k.k->p.inode); + if (ret) + return ret; - ret = walk_inode(&trans, &w, k.k->p.inode); - if (ret) - break; + if (w->first_this_inode) + *nr_subdirs = 0; - if (fsck_err_on(!w.have_inode, c, - "dirent in nonexisting directory:\n%s", - (bch2_bkey_val_to_text(&PBUF(buf), c, - k), buf)) || - fsck_err_on(!S_ISDIR(w.inode.bi_mode), c, - "dirent in non directory inode type %u:\n%s", - mode_to_type(w.inode.bi_mode), - (bch2_bkey_val_to_text(&PBUF(buf), c, - k), buf))) { - ret = bch2_btree_delete_at(&trans, iter, 0); - if (ret) - goto err; - continue; - } + if (fsck_err_on(!w->have_inode, c, + "dirent in nonexisting directory:\n%s", + (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)) || + fsck_err_on(!S_ISDIR(w->inode.bi_mode), c, + "dirent in non directory inode type %u:\n%s", + mode_to_type(w->inode.bi_mode), + (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf))) + return __bch2_trans_do(trans, NULL, NULL, 0, + bch2_btree_delete_at(trans, iter, 0)); - if (w.first_this_inode && w.have_inode) - hash_check_set_inode(&trans, &h, &w.inode); + if (!w->have_inode) + return 0; - ret = check_dirent_hash(&trans, &h, iter, &k); - if (ret > 0) { - ret = 0; - continue; - } - if (ret) - goto fsck_err; + if (w->first_this_inode) + *hash_info = bch2_hash_info_init(c, &w->inode); - if (ret) - goto fsck_err; + ret = hash_check_key(trans, bch2_dirent_hash_desc, + hash_info, iter, k); + if (ret < 0) + return ret; + if (ret) /* dirent has been deleted */ + return 0; - if (k.k->type != KEY_TYPE_dirent) - continue; + if (k.k->type != KEY_TYPE_dirent) + return 0; - d = bkey_s_c_to_dirent(k); - d_inum = le64_to_cpu(d.v->d_inum); - - name_len = bch2_dirent_name_bytes(d); - - if (fsck_err_on(!name_len, c, "empty dirent") || - fsck_err_on(name_len == 1 && - !memcmp(d.v->d_name, ".", 1), c, - ". dirent") || - fsck_err_on(name_len == 2 && - !memcmp(d.v->d_name, "..", 2), c, - ".. dirent") || - fsck_err_on(name_len == 2 && - !memcmp(d.v->d_name, "..", 2), c, - ".. dirent") || - fsck_err_on(memchr(d.v->d_name, '/', name_len), c, - "dirent name has invalid chars")) { - ret = remove_dirent(&trans, d); - if (ret) - goto err; - continue; - } + d = bkey_s_c_to_dirent(k); + d_inum = le64_to_cpu(d.v->d_inum); - if (fsck_err_on(d_inum == d.k->p.inode, c, - "dirent points to own directory:\n%s", - (bch2_bkey_val_to_text(&PBUF(buf), c, - k), buf))) { - ret = remove_dirent(&trans, d); - if (ret) - goto err; - continue; - } + ret = __lookup_inode(trans, d_inum, &target, &target_snapshot); + if (ret && ret != -ENOENT) + return ret; - ret = __bch2_inode_find_by_inum_trans(&trans, d_inum, &target, 0); - if (ret && ret != -ENOENT) - break; + have_target = !ret; + ret = 0; - have_target = !ret; - ret = 0; + if (fsck_err_on(!have_target, c, + "dirent points to missing inode:\n%s", + (bch2_bkey_val_to_text(&PBUF(buf), c, + k), buf))) + return remove_dirent(trans, d.k->p); - if (fsck_err_on(!have_target, c, - "dirent points to missing inode:\n%s", - (bch2_bkey_val_to_text(&PBUF(buf), c, - k), buf))) { - ret = remove_dirent(&trans, d); - if (ret) - goto err; - continue; - } + if (!have_target) + return 0; - if (!target.bi_nlink && - !(target.bi_flags & BCH_INODE_BACKPTR_UNTRUSTED) && - (target.bi_dir != k.k->p.inode || - target.bi_dir_offset != k.k->p.offset) && - (fsck_err_on(c->sb.version >= bcachefs_metadata_version_inode_backpointers, c, - "inode %llu has wrong backpointer:\n" - "got %llu:%llu\n" - "should be %llu:%llu", - d_inum, - target.bi_dir, - target.bi_dir_offset, - k.k->p.inode, - k.k->p.offset) || - c->opts.version_upgrade)) { - struct bkey_inode_buf p; + if (!target.bi_dir && + !target.bi_dir_offset) { + target.bi_dir = k.k->p.inode; + target.bi_dir_offset = k.k->p.offset; - target.bi_dir = k.k->p.inode; - target.bi_dir_offset = k.k->p.offset; - bch2_trans_unlock(&trans); + ret = __write_inode(trans, &target, target_snapshot) ?: + bch2_trans_commit(trans, NULL, NULL, + BTREE_INSERT_NOFAIL| + BTREE_INSERT_LAZY_RW); + if (ret) + return ret; + return -EINTR; + } - bch2_inode_pack(c, &p, &target); + if (!inode_backpointer_matches(d, &target)) { + ret = inode_backpointer_exists(trans, &target); + if (ret < 0) + return ret; - ret = bch2_btree_insert(c, BTREE_ID_inodes, - &p.inode.k_i, NULL, NULL, - BTREE_INSERT_NOFAIL| - BTREE_INSERT_LAZY_RW); - if (ret) { - bch_err(c, "error in fsck: error %i updating inode", ret); - goto err; - } - continue; + backpointer_exists = ret; + ret = 0; + + if (fsck_err_on(S_ISDIR(target.bi_mode) && + backpointer_exists, c, + "directory %llu with multiple links", + target.bi_inum)) + return remove_dirent(trans, d.k->p); + + if (fsck_err_on(backpointer_exists && + !target.bi_nlink, c, + "inode %llu has multiple links but i_nlink 0", + d_inum)) { + target.bi_nlink++; + target.bi_flags &= ~BCH_INODE_UNLINKED; + + ret = write_inode(trans, &target, target_snapshot); + return ret ?: -EINTR; } - if (fsck_err_on(have_target && - d.v->d_type != - mode_to_type(target.bi_mode), c, - "incorrect d_type: should be %u:\n%s", - mode_to_type(target.bi_mode), - (bch2_bkey_val_to_text(&PBUF(buf), c, - k), buf))) { - struct bkey_i_dirent *n; + if (fsck_err_on(!backpointer_exists, c, + "inode %llu has wrong backpointer:\n" + "got %llu:%llu\n" + "should be %llu:%llu", + d_inum, + target.bi_dir, + target.bi_dir_offset, + k.k->p.inode, + k.k->p.offset)) { + target.bi_dir = k.k->p.inode; + target.bi_dir_offset = k.k->p.offset; - n = kmalloc(bkey_bytes(d.k), GFP_KERNEL); - if (!n) { - ret = -ENOMEM; - goto err; - } + ret = write_inode(trans, &target, target_snapshot); + return ret ?: -EINTR; + } + } - bkey_reassemble(&n->k_i, d.s_c); - n->v.d_type = mode_to_type(target.bi_mode); + if (fsck_err_on(d.v->d_type != mode_to_type(target.bi_mode), c, + "incorrect d_type: should be %u:\n%s", + mode_to_type(target.bi_mode), + (bch2_bkey_val_to_text(&PBUF(buf), c, + k), buf))) { + struct bkey_i_dirent *n; - ret = __bch2_trans_do(&trans, NULL, NULL, - BTREE_INSERT_NOFAIL| - BTREE_INSERT_LAZY_RW, - (bch2_trans_update(&trans, iter, &n->k_i, 0), 0)); - kfree(n); - if (ret) - goto err; + n = kmalloc(bkey_bytes(d.k), GFP_KERNEL); + if (!n) + return -ENOMEM; - } + bkey_reassemble(&n->k_i, d.s_c); + n->v.d_type = mode_to_type(target.bi_mode); - bch2_btree_iter_advance(iter); + ret = __bch2_trans_do(trans, NULL, NULL, + BTREE_INSERT_NOFAIL| + BTREE_INSERT_LAZY_RW, + bch2_btree_iter_traverse(iter) ?: + bch2_trans_update(trans, iter, &n->k_i, 0)); + kfree(n); + return ret ?: -EINTR; } - hash_stop_chain(&trans, &h); -err: + *nr_subdirs += d.v->d_type == DT_DIR; + return 0; fsck_err: - if (ret == -EINTR) - goto retry; + return ret; +} - bch2_trans_iter_put(&trans, h.chain); +/* + * Walk dirents: verify that they all have a corresponding S_ISDIR inode, + * validate d_type + */ +noinline_for_stack +static int check_dirents(struct bch_fs *c) +{ + struct inode_walker w = inode_walker_init(); + struct bch_hash_info hash_info; + struct btree_trans trans; + struct btree_iter *iter; + unsigned nr_subdirs = 0; + int ret = 0; + + bch_verbose(c, "checking dirents"); + + bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); + + iter = bch2_trans_get_iter(&trans, BTREE_ID_dirents, + POS(BCACHEFS_ROOT_INO, 0), + BTREE_ITER_INTENT| + BTREE_ITER_PREFETCH); + + do { + ret = lockrestart_do(&trans, + check_dirent(&trans, iter, &hash_info, &w, &nr_subdirs)); + if (ret) + break; + } while (bch2_btree_iter_advance(iter)); bch2_trans_iter_put(&trans, iter); + return bch2_trans_exit(&trans) ?: ret; } @@ -757,7 +921,7 @@ noinline_for_stack static int check_xattrs(struct bch_fs *c) { struct inode_walker w = inode_walker_init(); - struct hash_check h; + struct bch_hash_info hash_info; struct btree_trans trans; struct btree_iter *iter; struct bkey_s_c k; @@ -765,12 +929,12 @@ static int check_xattrs(struct bch_fs *c) bch_verbose(c, "checking xattrs"); - hash_check_init(&h); - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, - POS(BCACHEFS_ROOT_INO, 0), 0); + POS(BCACHEFS_ROOT_INO, 0), + BTREE_ITER_INTENT| + BTREE_ITER_PREFETCH); retry: while ((k = bch2_btree_iter_peek(iter)).k && !(ret = bkey_err(k))) { @@ -788,10 +952,10 @@ retry: } if (w.first_this_inode && w.have_inode) - hash_check_set_inode(&trans, &h, &w.inode); + hash_info = bch2_hash_info_init(c, &w.inode); ret = hash_check_key(&trans, bch2_xattr_hash_desc, - &h, iter, k); + &hash_info, iter, k); if (ret) break; @@ -801,7 +965,6 @@ fsck_err: if (ret == -EINTR) goto retry; - bch2_trans_iter_put(&trans, h.chain); bch2_trans_iter_put(&trans, iter); return bch2_trans_exit(&trans) ?: ret; } @@ -810,13 +973,13 @@ fsck_err: static int check_root(struct bch_fs *c, struct bch_inode_unpacked *root_inode) { struct bkey_inode_buf packed; + u32 snapshot; int ret; bch_verbose(c, "checking root directory"); ret = bch2_trans_do(c, NULL, NULL, 0, - __bch2_inode_find_by_inum_trans(&trans, BCACHEFS_ROOT_INO, - root_inode, 0)); + lookup_inode(&trans, BCACHEFS_ROOT_INO, root_inode, &snapshot)); if (ret && ret != -ENOENT) return ret; @@ -843,83 +1006,12 @@ create_root: BTREE_INSERT_LAZY_RW); } -/* Get lost+found, create if it doesn't exist: */ -static int check_lostfound(struct bch_fs *c, - struct bch_inode_unpacked *root_inode, - struct bch_inode_unpacked *lostfound_inode) -{ - struct qstr lostfound = QSTR("lost+found"); - struct bch_hash_info root_hash_info = - bch2_hash_info_init(c, root_inode); - u64 inum; - int ret; - - bch_verbose(c, "checking lost+found"); - - inum = bch2_dirent_lookup(c, BCACHEFS_ROOT_INO, &root_hash_info, - &lostfound); - if (!inum) { - bch_notice(c, "creating lost+found"); - goto create_lostfound; - } - - ret = bch2_trans_do(c, NULL, NULL, 0, - __bch2_inode_find_by_inum_trans(&trans, inum, lostfound_inode, 0)); - if (ret && ret != -ENOENT) - return ret; - - if (fsck_err_on(ret, c, "lost+found missing")) - goto create_lostfound; - - if (fsck_err_on(!S_ISDIR(lostfound_inode->bi_mode), c, - "lost+found inode not a directory")) - goto create_lostfound; - - return 0; -fsck_err: - return ret; -create_lostfound: - bch2_inode_init_early(c, lostfound_inode); - - ret = bch2_trans_do(c, NULL, NULL, - BTREE_INSERT_NOFAIL| - BTREE_INSERT_LAZY_RW, - bch2_create_trans(&trans, - BCACHEFS_ROOT_INO, root_inode, - lostfound_inode, &lostfound, - 0, 0, S_IFDIR|0700, 0, NULL, NULL)); - if (ret) - bch_err(c, "error creating lost+found: %i", ret); - - return ret; -} - -typedef GENRADIX(unsigned long) inode_bitmap; - -static inline bool inode_bitmap_test(inode_bitmap *b, size_t nr) -{ - unsigned long *w = genradix_ptr(b, nr / BITS_PER_LONG); - return w ? test_bit(nr & (BITS_PER_LONG - 1), w) : false; -} - -static inline int inode_bitmap_set(inode_bitmap *b, size_t nr) -{ - unsigned long *w = genradix_ptr_alloc(b, nr / BITS_PER_LONG, GFP_KERNEL); - - if (!w) - return -ENOMEM; - - *w |= 1UL << (nr & (BITS_PER_LONG - 1)); - return 0; -} - struct pathbuf { size_t nr; size_t size; struct pathbuf_entry { u64 inum; - u64 offset; } *entries; }; @@ -930,8 +1022,9 @@ static int path_down(struct pathbuf *p, u64 inum) void *n = krealloc(p->entries, new_size * sizeof(p->entries[0]), GFP_KERNEL); - if (!n) + if (!n) { return -ENOMEM; + } p->entries = n; p->size = new_size; @@ -939,545 +1032,369 @@ static int path_down(struct pathbuf *p, u64 inum) p->entries[p->nr++] = (struct pathbuf_entry) { .inum = inum, - .offset = 0, }; return 0; } -noinline_for_stack -static int check_directory_structure(struct bch_fs *c, - struct bch_inode_unpacked *lostfound_inode) +static int check_path(struct btree_trans *trans, + struct pathbuf *p, + struct bch_inode_unpacked *inode) { - inode_bitmap dirs_done; - struct pathbuf path = { 0, 0, NULL }; - struct pathbuf_entry *e; - struct btree_trans trans; - struct btree_iter *iter; - struct bkey_s_c k; - struct bkey_s_c_dirent dirent; - bool had_unreachable; - u64 d_inum; + struct bch_fs *c = trans->c; + u32 snapshot; + size_t i; int ret = 0; - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); - - bch_verbose(c, "checking directory structure"); - - /* DFS: */ -restart_dfs: - genradix_init(&dirs_done); - had_unreachable = false; - - ret = inode_bitmap_set(&dirs_done, BCACHEFS_ROOT_INO); - if (ret) { - bch_err(c, "memory allocation failure in inode_bitmap_set()"); - goto err; - } - - ret = path_down(&path, BCACHEFS_ROOT_INO); - if (ret) - goto err; - - while (path.nr) { -next: - e = &path.entries[path.nr - 1]; - - if (e->offset == U64_MAX) - goto up; + p->nr = 0; - for_each_btree_key(&trans, iter, BTREE_ID_dirents, - POS(e->inum, e->offset + 1), 0, k, ret) { - if (k.k->p.inode != e->inum) - break; - - e->offset = k.k->p.offset; - - if (k.k->type != KEY_TYPE_dirent) - continue; - - dirent = bkey_s_c_to_dirent(k); - - if (dirent.v->d_type != DT_DIR) - continue; - - d_inum = le64_to_cpu(dirent.v->d_inum); - - if (fsck_err_on(inode_bitmap_test(&dirs_done, d_inum), c, - "directory %llu has multiple hardlinks", - d_inum)) { - ret = remove_dirent(&trans, dirent); - if (ret) - goto err; - continue; - } + while (inode->bi_inum != BCACHEFS_ROOT_INO) { + ret = lockrestart_do(trans, + inode_backpointer_exists(trans, inode)); + if (ret < 0) + break; - ret = inode_bitmap_set(&dirs_done, d_inum); - if (ret) { - bch_err(c, "memory allocation failure in inode_bitmap_set()"); - goto err; - } + if (!ret) { + if (fsck_err(c, "unreachable inode %llu, type %u nlink %u backptr %llu:%llu", + inode->bi_inum, + mode_to_type(inode->bi_mode), + inode->bi_nlink, + inode->bi_dir, + inode->bi_dir_offset)) + ret = reattach_inode(trans, inode); + break; + } + ret = 0; - ret = path_down(&path, d_inum); - if (ret) { - goto err; - } + if (!S_ISDIR(inode->bi_mode)) + break; - ret = bch2_trans_iter_free(&trans, iter); - if (ret) { - bch_err(c, "btree error %i in fsck", ret); - goto err; - } - goto next; - } - ret = bch2_trans_iter_free(&trans, iter) ?: ret; + ret = path_down(p, inode->bi_inum); if (ret) { - bch_err(c, "btree error %i in fsck", ret); - goto err; + bch_err(c, "memory allocation failure"); + return ret; } -up: - path.nr--; - } - - iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes, POS_MIN, 0); -retry: - for_each_btree_key_continue(iter, 0, k, ret) { - if (k.k->type != KEY_TYPE_inode) - continue; - - if (!S_ISDIR(le16_to_cpu(bkey_s_c_to_inode(k).v->bi_mode))) - continue; - ret = bch2_empty_dir_trans(&trans, k.k->p.inode); - if (ret == -EINTR) - goto retry; - if (!ret) - continue; + for (i = 0; i < p->nr; i++) { + if (inode->bi_dir != p->entries[i].inum) + continue; - if (fsck_err_on(!inode_bitmap_test(&dirs_done, k.k->p.offset), c, - "unreachable directory found (inum %llu)", - k.k->p.offset)) { - bch2_trans_unlock(&trans); + /* XXX print path */ + if (!fsck_err(c, "directory structure loop")) + return 0; - ret = reattach_inode(c, lostfound_inode, k.k->p.offset); + ret = lockrestart_do(trans, + remove_backpointer(trans, inode)); if (ret) { - goto err; + bch_err(c, "error removing dirent: %i", ret); + break; } - had_unreachable = true; + ret = reattach_inode(trans, inode); + break; } - } - bch2_trans_iter_free(&trans, iter); - if (ret) - goto err; - if (had_unreachable) { - bch_info(c, "reattached unreachable directories, restarting pass to check for loops"); - genradix_free(&dirs_done); - kfree(path.entries); - memset(&dirs_done, 0, sizeof(dirs_done)); - memset(&path, 0, sizeof(path)); - goto restart_dfs; + ret = lookup_inode(trans, inode->bi_dir, inode, &snapshot); + if (ret) { + /* Should have been caught in dirents pass */ + bch_err(c, "error looking up parent directory: %i", ret); + break; + } } -err: fsck_err: - ret = bch2_trans_exit(&trans) ?: ret; - genradix_free(&dirs_done); - kfree(path.entries); + if (ret) + bch_err(c, "%s: err %i", __func__, ret); return ret; } -struct nlink { - u32 count; - u32 dir_count; -}; - -typedef GENRADIX(struct nlink) nlink_table; - -static void inc_link(struct bch_fs *c, nlink_table *links, - u64 range_start, u64 *range_end, - u64 inum, bool dir) -{ - struct nlink *link; - - if (inum < range_start || inum >= *range_end) - return; - - if (inum - range_start >= SIZE_MAX / sizeof(struct nlink)) { - *range_end = inum; - return; - } - - link = genradix_ptr_alloc(links, inum - range_start, GFP_KERNEL); - if (!link) { - bch_verbose(c, "allocation failed during fsck - will need another pass"); - *range_end = inum; - return; - } - - if (dir) - link->dir_count++; - else - link->count++; -} - -noinline_for_stack -static int bch2_gc_walk_dirents(struct bch_fs *c, nlink_table *links, - u64 range_start, u64 *range_end) +/* + * Check for unreachable inodes, as well as loops in the directory structure: + * After check_dirents(), if an inode backpointer doesn't exist that means it's + * unreachable: + */ +static int check_directory_structure(struct bch_fs *c) { struct btree_trans trans; struct btree_iter *iter; struct bkey_s_c k; - struct bkey_s_c_dirent d; - u64 d_inum; + struct bch_inode_unpacked u; + struct pathbuf path = { 0, 0, NULL }; int ret; bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); - inc_link(c, links, range_start, range_end, BCACHEFS_ROOT_INO, false); - - for_each_btree_key(&trans, iter, BTREE_ID_dirents, POS_MIN, 0, k, ret) { - switch (k.k->type) { - case KEY_TYPE_dirent: - d = bkey_s_c_to_dirent(k); - d_inum = le64_to_cpu(d.v->d_inum); - - if (d.v->d_type == DT_DIR) - inc_link(c, links, range_start, range_end, - d.k->p.inode, true); - - inc_link(c, links, range_start, range_end, - d_inum, false); + for_each_btree_key(&trans, iter, BTREE_ID_inodes, POS_MIN, + BTREE_ITER_INTENT| + BTREE_ITER_PREFETCH, k, ret) { + if (k.k->type != KEY_TYPE_inode) + continue; + ret = bch2_inode_unpack(bkey_s_c_to_inode(k), &u); + if (ret) { + /* Should have been caught earlier in fsck: */ + bch_err(c, "error unpacking inode %llu: %i", k.k->p.offset, ret); break; } - bch2_trans_cond_resched(&trans); + ret = check_path(&trans, &path, &u); + if (ret) + break; } bch2_trans_iter_put(&trans, iter); - ret = bch2_trans_exit(&trans) ?: ret; - if (ret) - bch_err(c, "error in fsck: btree error %i while walking dirents", ret); + BUG_ON(ret == -EINTR); - return ret; + kfree(path.entries); + + return bch2_trans_exit(&trans) ?: ret; } -static int check_inode_nlink(struct bch_fs *c, - struct bch_inode_unpacked *lostfound_inode, - struct bch_inode_unpacked *u, - struct nlink *link, - bool *do_update) +struct nlink_table { + size_t nr; + size_t size; + + struct nlink { + u64 inum; + u32 snapshot; + u32 count; + } *d; +}; + +static int add_nlink(struct nlink_table *t, u64 inum, u32 snapshot) { - u32 i_nlink = bch2_inode_nlink_get(u); - u32 real_i_nlink = - link->count * nlink_bias(u->bi_mode) + - link->dir_count; - int ret = 0; + if (t->nr == t->size) { + size_t new_size = max_t(size_t, 128UL, t->size * 2); + void *d = kvmalloc(new_size * sizeof(t->d[0]), GFP_KERNEL); + if (!d) { + return -ENOMEM; + } - /* - * These should have been caught/fixed by earlier passes, we don't - * repair them here: - */ - if (S_ISDIR(u->bi_mode) && link->count > 1) { - need_fsck_err(c, "directory %llu with multiple hardlinks: %u", - u->bi_inum, link->count); - return 0; - } + if (t->d) + memcpy(d, t->d, t->size * sizeof(t->d[0])); + kvfree(t->d); - if (S_ISDIR(u->bi_mode) && !link->count) { - need_fsck_err(c, "unreachable directory found (inum %llu)", - u->bi_inum); - return 0; + t->d = d; + t->size = new_size; } - if (!S_ISDIR(u->bi_mode) && link->dir_count) { - need_fsck_err(c, "non directory with subdirectories (inum %llu)", - u->bi_inum); - return 0; - } - if (!link->count && - !(u->bi_flags & BCH_INODE_UNLINKED) && - (c->sb.features & (1 << BCH_FEATURE_atomic_nlink))) { - if (fsck_err(c, "unreachable inode %llu not marked as unlinked (type %u)", - u->bi_inum, mode_to_type(u->bi_mode)) == - FSCK_ERR_IGNORE) - return 0; + t->d[t->nr++] = (struct nlink) { + .inum = inum, + .snapshot = snapshot, + }; - ret = reattach_inode(c, lostfound_inode, u->bi_inum); - if (ret) - return ret; + return 0; +} - link->count = 1; - real_i_nlink = nlink_bias(u->bi_mode) + link->dir_count; - goto set_i_nlink; - } +static int nlink_cmp(const void *_l, const void *_r) +{ + const struct nlink *l = _l; + const struct nlink *r = _r; - if (i_nlink < link->count) { - if (fsck_err(c, "inode %llu i_link too small (%u < %u, type %i)", - u->bi_inum, i_nlink, link->count, - mode_to_type(u->bi_mode)) == FSCK_ERR_IGNORE) - return 0; - goto set_i_nlink; - } + return cmp_int(l->inum, r->inum) ?: cmp_int(l->snapshot, r->snapshot); +} - if (i_nlink != real_i_nlink && - c->sb.clean) { - if (fsck_err(c, "filesystem marked clean, " - "but inode %llu has wrong i_nlink " - "(type %u i_nlink %u, should be %u)", - u->bi_inum, mode_to_type(u->bi_mode), - i_nlink, real_i_nlink) == FSCK_ERR_IGNORE) - return 0; - goto set_i_nlink; - } +static void inc_link(struct bch_fs *c, struct nlink_table *links, + u64 range_start, u64 range_end, u64 inum) +{ + struct nlink *link, key = { + .inum = inum, .snapshot = U32_MAX, + }; - if (i_nlink != real_i_nlink && - (c->sb.features & (1 << BCH_FEATURE_atomic_nlink))) { - if (fsck_err(c, "inode %llu has wrong i_nlink " - "(type %u i_nlink %u, should be %u)", - u->bi_inum, mode_to_type(u->bi_mode), - i_nlink, real_i_nlink) == FSCK_ERR_IGNORE) - return 0; - goto set_i_nlink; - } + if (inum < range_start || inum >= range_end) + return; - if (real_i_nlink && i_nlink != real_i_nlink) - bch_verbose(c, "setting inode %llu nlink from %u to %u", - u->bi_inum, i_nlink, real_i_nlink); -set_i_nlink: - if (i_nlink != real_i_nlink) { - bch2_inode_nlink_set(u, real_i_nlink); - *do_update = true; - } -fsck_err: - return ret; + link = __inline_bsearch(&key, links->d, links->nr, + sizeof(links->d[0]), nlink_cmp); + if (link) + link->count++; } -static int check_inode(struct btree_trans *trans, - struct bch_inode_unpacked *lostfound_inode, - struct btree_iter *iter, - struct bkey_s_c_inode inode, - struct nlink *link) +noinline_for_stack +static int check_nlinks_find_hardlinks(struct bch_fs *c, + struct nlink_table *t, + u64 start, u64 *end) { - struct bch_fs *c = trans->c; + struct btree_trans trans; + struct btree_iter *iter; + struct bkey_s_c k; + struct bkey_s_c_inode inode; struct bch_inode_unpacked u; - bool do_update = false; int ret = 0; - ret = bch2_inode_unpack(inode, &u); - - bch2_trans_unlock(trans); - - if (bch2_fs_inconsistent_on(ret, c, - "error unpacking inode %llu in fsck", - inode.k->p.inode)) - return ret; - - if (link) { - ret = check_inode_nlink(c, lostfound_inode, &u, link, - &do_update); - if (ret) - return ret; - } + bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); - if (u.bi_flags & BCH_INODE_UNLINKED && - (!c->sb.clean || - fsck_err(c, "filesystem marked clean, but inode %llu unlinked", - u.bi_inum))) { - bch_verbose(c, "deleting inode %llu", u.bi_inum); + for_each_btree_key(&trans, iter, BTREE_ID_inodes, + POS(0, start), + BTREE_ITER_INTENT| + BTREE_ITER_PREFETCH, k, ret) { + if (k.k->type != KEY_TYPE_inode) + continue; - bch2_fs_lazy_rw(c); + inode = bkey_s_c_to_inode(k); - ret = bch2_inode_rm(c, u.bi_inum, false); - if (ret) - bch_err(c, "error in fsck: error %i while deleting inode", ret); - return ret; - } + /* + * Backpointer and directory structure checks are sufficient for + * directories, since they can't have hardlinks: + */ + if (S_ISDIR(le16_to_cpu(inode.v->bi_mode))) + continue; - if (u.bi_flags & BCH_INODE_I_SIZE_DIRTY && - (!c->sb.clean || - fsck_err(c, "filesystem marked clean, but inode %llu has i_size dirty", - u.bi_inum))) { - bch_verbose(c, "truncating inode %llu", u.bi_inum); + /* Should never fail, checked by bch2_inode_invalid: */ + BUG_ON(bch2_inode_unpack(inode, &u)); - bch2_fs_lazy_rw(c); + if (!u.bi_nlink) + continue; - /* - * XXX: need to truncate partial blocks too here - or ideally - * just switch units to bytes and that issue goes away - */ - ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents, - POS(u.bi_inum, round_up(u.bi_size, block_bytes(c))), - POS(u.bi_inum, U64_MAX), - NULL); + ret = add_nlink(t, k.k->p.offset, k.k->p.snapshot); if (ret) { - bch_err(c, "error in fsck: error %i truncating inode", ret); - return ret; + *end = k.k->p.offset; + ret = 0; + break; } - /* - * We truncated without our normal sector accounting hook, just - * make sure we recalculate it: - */ - u.bi_flags |= BCH_INODE_I_SECTORS_DIRTY; - - u.bi_flags &= ~BCH_INODE_I_SIZE_DIRTY; - do_update = true; } + bch2_trans_iter_put(&trans, iter); + bch2_trans_exit(&trans); - if (u.bi_flags & BCH_INODE_I_SECTORS_DIRTY && - (!c->sb.clean || - fsck_err(c, "filesystem marked clean, but inode %llu has i_sectors dirty", - u.bi_inum))) { - s64 sectors; - - bch_verbose(c, "recounting sectors for inode %llu", - u.bi_inum); + if (ret) + bch_err(c, "error in fsck: btree error %i while walking inodes", ret); - sectors = bch2_count_inode_sectors(trans, u.bi_inum); - if (sectors < 0) { - bch_err(c, "error in fsck: error %i recounting inode sectors", - (int) sectors); - return sectors; - } + return ret; +} - u.bi_sectors = sectors; - u.bi_flags &= ~BCH_INODE_I_SECTORS_DIRTY; - do_update = true; - } +noinline_for_stack +static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links, + u64 range_start, u64 range_end) +{ + struct btree_trans trans; + struct btree_iter *iter; + struct bkey_s_c k; + struct bkey_s_c_dirent d; + int ret; - if (!S_ISDIR(u.bi_mode) && - u.bi_nlink && - !(u.bi_flags & BCH_INODE_BACKPTR_UNTRUSTED) && - (fsck_err_on(c->sb.version >= bcachefs_metadata_version_inode_backpointers, c, - "inode missing BCH_INODE_BACKPTR_UNTRUSTED flags") || - c->opts.version_upgrade)) { - u.bi_flags |= BCH_INODE_BACKPTR_UNTRUSTED; - do_update = true; - } + bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); - if (do_update) { - struct bkey_inode_buf p; + for_each_btree_key(&trans, iter, BTREE_ID_dirents, POS_MIN, + BTREE_ITER_INTENT| + BTREE_ITER_PREFETCH, k, ret) { + switch (k.k->type) { + case KEY_TYPE_dirent: + d = bkey_s_c_to_dirent(k); - bch2_inode_pack(c, &p, &u); - p.inode.k.p = iter->pos; + if (d.v->d_type != DT_DIR) + inc_link(c, links, range_start, range_end, + le64_to_cpu(d.v->d_inum)); + break; + } - ret = __bch2_trans_do(trans, NULL, NULL, - BTREE_INSERT_NOFAIL| - BTREE_INSERT_LAZY_RW, - (bch2_trans_update(trans, iter, &p.inode.k_i, 0), 0)); - if (ret) - bch_err(c, "error in fsck: error %i " - "updating inode", ret); + bch2_trans_cond_resched(&trans); } -fsck_err: + bch2_trans_iter_put(&trans, iter); + + ret = bch2_trans_exit(&trans) ?: ret; + if (ret) + bch_err(c, "error in fsck: btree error %i while walking dirents", ret); + return ret; } noinline_for_stack -static int bch2_gc_walk_inodes(struct bch_fs *c, - struct bch_inode_unpacked *lostfound_inode, - nlink_table *links, +static int check_nlinks_update_hardlinks(struct bch_fs *c, + struct nlink_table *links, u64 range_start, u64 range_end) { struct btree_trans trans; struct btree_iter *iter; struct bkey_s_c k; - struct nlink *link, zero_links = { 0, 0 }; - struct genradix_iter nlinks_iter; - int ret = 0, ret2 = 0; - u64 nlinks_pos; + struct bkey_s_c_inode inode; + struct bch_inode_unpacked u; + struct nlink *link = links->d; + int ret = 0; bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); - iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes, - POS(0, range_start), 0); - nlinks_iter = genradix_iter_init(links, 0); + for_each_btree_key(&trans, iter, BTREE_ID_inodes, + POS(0, range_start), + BTREE_ITER_INTENT| + BTREE_ITER_PREFETCH, k, ret) { + if (k.k->p.offset >= range_end) + break; - while ((k = bch2_btree_iter_peek(iter)).k && - !(ret2 = bkey_err(k)) && - iter->pos.offset < range_end) { -peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links); + if (k.k->type != KEY_TYPE_inode) + continue; - if (!link && (!k.k || iter->pos.offset >= range_end)) - break; + inode = bkey_s_c_to_inode(k); + if (S_ISDIR(le16_to_cpu(inode.v->bi_mode))) + continue; + + BUG_ON(bch2_inode_unpack(inode, &u)); - nlinks_pos = range_start + nlinks_iter.pos; + if (!u.bi_nlink) + continue; - if (link && nlinks_pos < iter->pos.offset) { - /* Should have been caught by dirents pass: */ - need_fsck_err_on(link->count, c, - "missing inode %llu (nlink %u)", - nlinks_pos, link->count); - genradix_iter_advance(&nlinks_iter, links); - goto peek_nlinks; + while (link->inum < k.k->p.offset) { + link++; + BUG_ON(link >= links->d + links->nr); } - if (!link || nlinks_pos > iter->pos.offset) - link = &zero_links; + if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count, c, + "inode %llu has wrong i_nlink (type %u i_nlink %u, should be %u)", + u.bi_inum, mode_to_type(u.bi_mode), + bch2_inode_nlink_get(&u), link->count)) { + bch2_inode_nlink_set(&u, link->count); - if (k.k && k.k->type == KEY_TYPE_inode) { - ret = check_inode(&trans, lostfound_inode, iter, - bkey_s_c_to_inode(k), link); - BUG_ON(ret == -EINTR); + ret = __bch2_trans_do(&trans, NULL, NULL, + BTREE_INSERT_NOFAIL| + BTREE_INSERT_LAZY_RW, + bch2_btree_iter_traverse(iter) ?: + bch2_inode_write(&trans, iter, &u)); if (ret) - break; - } else { - /* Should have been caught by dirents pass: */ - need_fsck_err_on(link->count, c, - "missing inode %llu (nlink %u)", - nlinks_pos, link->count); + bch_err(c, "error in fsck: error %i updating inode", ret); } - - if (nlinks_pos == iter->pos.offset) - genradix_iter_advance(&nlinks_iter, links); - - bch2_btree_iter_advance(iter); - bch2_trans_cond_resched(&trans); } fsck_err: bch2_trans_iter_put(&trans, iter); bch2_trans_exit(&trans); - if (ret2) - bch_err(c, "error in fsck: btree error %i while walking inodes", ret2); + if (ret) + bch_err(c, "error in fsck: btree error %i while walking inodes", ret); - return ret ?: ret2; + return ret; } noinline_for_stack -static int check_inode_nlinks(struct bch_fs *c, - struct bch_inode_unpacked *lostfound_inode) +static int check_nlinks(struct bch_fs *c) { - nlink_table links; + struct nlink_table links = { 0 }; u64 this_iter_range_start, next_iter_range_start = 0; int ret = 0; bch_verbose(c, "checking inode nlinks"); - genradix_init(&links); - do { this_iter_range_start = next_iter_range_start; next_iter_range_start = U64_MAX; - ret = bch2_gc_walk_dirents(c, &links, + ret = check_nlinks_find_hardlinks(c, &links, + this_iter_range_start, + &next_iter_range_start); + + ret = check_nlinks_walk_dirents(c, &links, this_iter_range_start, - &next_iter_range_start); + next_iter_range_start); if (ret) break; - ret = bch2_gc_walk_inodes(c, lostfound_inode, &links, + ret = check_nlinks_update_hardlinks(c, &links, this_iter_range_start, next_iter_range_start); if (ret) break; - genradix_free(&links); + links.nr = 0; } while (next_iter_range_start != U64_MAX); - genradix_free(&links); + kvfree(links.d); return ret; } @@ -1488,54 +1405,18 @@ static int check_inode_nlinks(struct bch_fs *c, */ int bch2_fsck_full(struct bch_fs *c) { - struct bch_inode_unpacked root_inode, lostfound_inode; + struct bch_inode_unpacked root_inode; - return check_extents(c) ?: + return check_inodes(c, true) ?: + check_extents(c) ?: check_dirents(c) ?: check_xattrs(c) ?: check_root(c, &root_inode) ?: - check_lostfound(c, &root_inode, &lostfound_inode) ?: - check_directory_structure(c, &lostfound_inode) ?: - check_inode_nlinks(c, &lostfound_inode); -} - -int bch2_fsck_inode_nlink(struct bch_fs *c) -{ - struct bch_inode_unpacked root_inode, lostfound_inode; - - return check_root(c, &root_inode) ?: - check_lostfound(c, &root_inode, &lostfound_inode) ?: - check_inode_nlinks(c, &lostfound_inode); + check_directory_structure(c) ?: + check_nlinks(c); } int bch2_fsck_walk_inodes_only(struct bch_fs *c) { - struct btree_trans trans; - struct btree_iter *iter; - struct bkey_s_c k; - struct bkey_s_c_inode inode; - int ret; - - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); - - for_each_btree_key(&trans, iter, BTREE_ID_inodes, POS_MIN, 0, k, ret) { - if (k.k->type != KEY_TYPE_inode) - continue; - - inode = bkey_s_c_to_inode(k); - - if (inode.v->bi_flags & - (BCH_INODE_I_SIZE_DIRTY| - BCH_INODE_I_SECTORS_DIRTY| - BCH_INODE_UNLINKED)) { - ret = check_inode(&trans, NULL, iter, inode, NULL); - if (ret) - break; - } - } - bch2_trans_iter_put(&trans, iter); - - BUG_ON(ret == -EINTR); - - return bch2_trans_exit(&trans) ?: ret; + return check_inodes(c, false); } diff --git a/libbcachefs/fsck.h b/libbcachefs/fsck.h index 9e4af02..264f270 100644 --- a/libbcachefs/fsck.h +++ b/libbcachefs/fsck.h @@ -3,7 +3,6 @@ #define _BCACHEFS_FSCK_H int bch2_fsck_full(struct bch_fs *); -int bch2_fsck_inode_nlink(struct bch_fs *); int bch2_fsck_walk_inodes_only(struct bch_fs *); #endif /* _BCACHEFS_FSCK_H */ diff --git a/libbcachefs/inode.c b/libbcachefs/inode.c index d4c3283..3b67108 100644 --- a/libbcachefs/inode.c +++ b/libbcachefs/inode.c @@ -137,7 +137,7 @@ static void bch2_inode_pack_v2(struct bkey_inode_buf *packed, nr_fields++; \ \ if (inode->_name) { \ - ret = bch2_varint_encode(out, inode->_name); \ + ret = bch2_varint_encode_fast(out, inode->_name); \ out += ret; \ \ if (_bits > 64) \ @@ -246,13 +246,13 @@ static int bch2_inode_unpack_v2(struct bkey_s_c_inode inode, #define x(_name, _bits) \ if (fieldnr < INODE_NR_FIELDS(inode.v)) { \ - ret = bch2_varint_decode(in, end, &v[0]); \ + ret = bch2_varint_decode_fast(in, end, &v[0]); \ if (ret < 0) \ return ret; \ in += ret; \ \ if (_bits > 64) { \ - ret = bch2_varint_decode(in, end, &v[1]); \ + ret = bch2_varint_decode_fast(in, end, &v[1]); \ if (ret < 0) \ return ret; \ in += ret; \ @@ -300,14 +300,16 @@ struct btree_iter *bch2_inode_peek(struct btree_trans *trans, struct bkey_s_c k; int ret; - iter = bch2_trans_get_iter(trans, BTREE_ID_inodes, POS(0, inum), - BTREE_ITER_CACHED|flags); - k = bch2_btree_iter_peek_cached(iter); + if (trans->c->opts.inodes_use_key_cache) + flags |= BTREE_ITER_CACHED; + + iter = bch2_trans_get_iter(trans, BTREE_ID_inodes, POS(0, inum), flags); + k = bch2_btree_iter_peek_slot(iter); ret = bkey_err(k); if (ret) goto err; - ret = k.k->type == KEY_TYPE_inode ? 0 : -EIO; + ret = k.k->type == KEY_TYPE_inode ? 0 : -ENOENT; if (ret) goto err; @@ -333,8 +335,7 @@ int bch2_inode_write(struct btree_trans *trans, bch2_inode_pack(trans->c, inode_p, inode); inode_p->inode.k.p.snapshot = iter->snapshot; - bch2_trans_update(trans, iter, &inode_p->inode.k_i, 0); - return 0; + return bch2_trans_update(trans, iter, &inode_p->inode.k_i, 0); } const char *bch2_inode_invalid(const struct bch_fs *c, struct bkey_s_c k) @@ -370,6 +371,22 @@ const char *bch2_inode_invalid(const struct bch_fs *c, struct bkey_s_c k) return NULL; } +static void __bch2_inode_unpacked_to_text(struct printbuf *out, struct bch_inode_unpacked *inode) +{ + pr_buf(out, "mode %o flags %x ", inode->bi_mode, inode->bi_flags); + +#define x(_name, _bits) \ + pr_buf(out, #_name " %llu ", (u64) inode->_name); + BCH_INODE_FIELDS() +#undef x +} + +void bch2_inode_unpacked_to_text(struct printbuf *out, struct bch_inode_unpacked *inode) +{ + pr_buf(out, "inum: %llu ", inode->bi_inum); + __bch2_inode_unpacked_to_text(out, inode); +} + void bch2_inode_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) { @@ -381,12 +398,7 @@ void bch2_inode_to_text(struct printbuf *out, struct bch_fs *c, return; } - pr_buf(out, "mode: %o ", unpacked.bi_mode); - -#define x(_name, _bits) \ - pr_buf(out, #_name ": %llu ", (u64) unpacked._name); - BCH_INODE_FIELDS() -#undef x + __bch2_inode_unpacked_to_text(out, &unpacked); } const char *bch2_inode_generation_invalid(const struct bch_fs *c, @@ -472,23 +484,28 @@ static inline u32 bkey_generation(struct bkey_s_c k) struct btree_iter *bch2_inode_create(struct btree_trans *trans, struct bch_inode_unpacked *inode_u, - u32 snapshot) + u32 snapshot, u64 cpu) { struct bch_fs *c = trans->c; struct btree_iter *iter = NULL; struct bkey_s_c k; u64 min, max, start, pos, *hint; - int ret; + int ret = 0; + unsigned bits = (c->opts.inodes_32bit ? 31 : 63); - u64 cpu = raw_smp_processor_id(); - unsigned bits = (c->opts.inodes_32bit - ? 31 : 63) - c->inode_shard_bits; + if (c->opts.shard_inode_numbers) { + bits -= c->inode_shard_bits; - min = (cpu << bits); - max = (cpu << bits) | ~(ULLONG_MAX << bits); + min = (cpu << bits); + max = (cpu << bits) | ~(ULLONG_MAX << bits); - min = max_t(u64, min, BLOCKDEV_INODE_MAX); - hint = c->unused_inode_hints + cpu; + min = max_t(u64, min, BLOCKDEV_INODE_MAX); + hint = c->unused_inode_hints + cpu; + } else { + min = BLOCKDEV_INODE_MAX; + max = ~(ULLONG_MAX << bits); + hint = c->unused_inode_hints; + } start = READ_ONCE(*hint); @@ -513,7 +530,7 @@ again: if (k.k->p.snapshot == snapshot && k.k->type != KEY_TYPE_inode && !bch2_btree_key_cache_find(c, BTREE_ID_inodes, SPOS(0, pos, snapshot))) { - bch2_btree_iter_next(iter); + bch2_btree_iter_advance(iter); continue; } @@ -573,9 +590,13 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr, bool cached) struct bpos end = POS(inode_nr + 1, 0); struct bch_inode_unpacked inode_u; struct bkey_s_c k; + unsigned iter_flags = BTREE_ITER_INTENT; int ret; - bch2_trans_init(&trans, c, 0, 0); + if (cached && c->opts.inodes_use_key_cache) + iter_flags |= BTREE_ITER_CACHED; + + bch2_trans_init(&trans, c, 0, 1024); /* * If this was a directory, there shouldn't be any real dirents left - @@ -596,15 +617,9 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr, bool cached) retry: bch2_trans_begin(&trans); - if (cached) { - iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes, POS(0, inode_nr), - BTREE_ITER_CACHED|BTREE_ITER_INTENT); - k = bch2_btree_iter_peek_cached(iter); - } else { - iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes, POS(0, inode_nr), - BTREE_ITER_SLOTS|BTREE_ITER_INTENT); - k = bch2_btree_iter_peek_slot(iter); - } + iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes, + POS(0, inode_nr), iter_flags); + k = bch2_btree_iter_peek_slot(iter); ret = bkey_err(k); if (ret) @@ -624,9 +639,8 @@ retry: delete.k.p = iter->pos; delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1); - bch2_trans_update(&trans, iter, &delete.k_i, 0); - - ret = bch2_trans_commit(&trans, NULL, NULL, + ret = bch2_trans_update(&trans, iter, &delete.k_i, 0) ?: + bch2_trans_commit(&trans, NULL, NULL, BTREE_INSERT_NOFAIL); err: bch2_trans_iter_put(&trans, iter); @@ -637,39 +651,18 @@ err: return ret; } -int __bch2_inode_find_by_inum_trans(struct btree_trans *trans, u64 inode_nr, - struct bch_inode_unpacked *inode, - unsigned flags) +static int bch2_inode_find_by_inum_trans(struct btree_trans *trans, u64 inode_nr, + struct bch_inode_unpacked *inode) { struct btree_iter *iter; - struct bkey_s_c k; int ret; - iter = bch2_trans_get_iter(trans, BTREE_ID_inodes, - POS(0, inode_nr), flags); - k = (flags & BTREE_ITER_TYPE) == BTREE_ITER_CACHED - ? bch2_btree_iter_peek_cached(iter) - : bch2_btree_iter_peek_slot(iter); - ret = bkey_err(k); - if (ret) - goto err; - - ret = k.k->type == KEY_TYPE_inode - ? bch2_inode_unpack(bkey_s_c_to_inode(k), inode) - : -ENOENT; -err: + iter = bch2_inode_peek(trans, inode, inode_nr, 0); + ret = PTR_ERR_OR_ZERO(iter); bch2_trans_iter_put(trans, iter); return ret; } -int bch2_inode_find_by_inum_trans(struct btree_trans *trans, u64 inode_nr, - struct bch_inode_unpacked *inode) -{ - return __bch2_inode_find_by_inum_trans(trans, inode_nr, - inode, BTREE_ITER_CACHED); - -} - int bch2_inode_find_by_inum(struct bch_fs *c, u64 inode_nr, struct bch_inode_unpacked *inode) { diff --git a/libbcachefs/inode.h b/libbcachefs/inode.h index 23c322d..d67af4f 100644 --- a/libbcachefs/inode.h +++ b/libbcachefs/inode.h @@ -55,6 +55,8 @@ void bch2_inode_pack(struct bch_fs *, struct bkey_inode_buf *, const struct bch_inode_unpacked *); int bch2_inode_unpack(struct bkey_s_c_inode, struct bch_inode_unpacked *); +void bch2_inode_unpacked_to_text(struct printbuf *, struct bch_inode_unpacked *); + struct btree_iter *bch2_inode_peek(struct btree_trans *, struct bch_inode_unpacked *, u64, unsigned); int bch2_inode_write(struct btree_trans *, struct btree_iter *, @@ -70,14 +72,10 @@ void bch2_inode_init(struct bch_fs *, struct bch_inode_unpacked *, struct bch_inode_unpacked *); struct btree_iter *bch2_inode_create(struct btree_trans *, - struct bch_inode_unpacked *, u32); + struct bch_inode_unpacked *, u32, u64); int bch2_inode_rm(struct bch_fs *, u64, bool); -int __bch2_inode_find_by_inum_trans(struct btree_trans *, u64, - struct bch_inode_unpacked *, unsigned); -int bch2_inode_find_by_inum_trans(struct btree_trans *, u64, - struct bch_inode_unpacked *); int bch2_inode_find_by_inum(struct bch_fs *, u64, struct bch_inode_unpacked *); static inline struct bch_io_opts bch2_inode_opts_get(struct bch_inode_unpacked *inode) diff --git a/libbcachefs/io.c b/libbcachefs/io.c index 36b10cb..4585a40 100644 --- a/libbcachefs/io.c +++ b/libbcachefs/io.c @@ -120,7 +120,7 @@ void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw) * the time: */ if (abs((int) (old - io_latency)) < (old >> 1) && - now & ~(~0 << 5)) + now & ~(~0U << 5)) break; new = ewma_add(old, io_latency, 5); @@ -187,7 +187,7 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans, struct btree_iter *extent_iter, struct bkey_i *new, bool *maybe_extending, - bool *should_check_enospc, + bool *usage_increasing, s64 *i_sectors_delta, s64 *disk_sectors_delta) { @@ -199,7 +199,7 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans, int ret = 0; *maybe_extending = true; - *should_check_enospc = false; + *usage_increasing = false; *i_sectors_delta = 0; *disk_sectors_delta = 0; @@ -219,10 +219,10 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans, ? sectors * bch2_bkey_nr_ptrs_fully_allocated(old) : 0; - if (!*should_check_enospc && + if (!*usage_increasing && (new_replicas > bch2_bkey_replicas(c, old) || (!new_compressed && bch2_bkey_sectors_compressed(old)))) - *should_check_enospc = true; + *usage_increasing = true; if (bkey_cmp(old.k->p, new->k.p) >= 0) { /* @@ -235,8 +235,12 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans, * writing to, because i_size could be up to one block * less: */ - if (!bkey_cmp(old.k->p, new->k.p)) + if (!bkey_cmp(old.k->p, new->k.p)) { old = bch2_btree_iter_next(iter); + ret = bkey_err(old); + if (ret) + break; + } if (old.k && !bkey_err(old) && old.k->p.inode == extent_iter->pos.inode && @@ -257,11 +261,12 @@ int bch2_extent_update(struct btree_trans *trans, struct disk_reservation *disk_res, u64 *journal_seq, u64 new_i_size, - s64 *i_sectors_delta_total) + s64 *i_sectors_delta_total, + bool check_enospc) { /* this must live until after bch2_trans_commit(): */ struct bkey_inode_buf inode_p; - bool extending = false, should_check_enospc; + bool extending = false, usage_increasing; s64 i_sectors_delta = 0, disk_sectors_delta = 0; int ret; @@ -271,17 +276,20 @@ int bch2_extent_update(struct btree_trans *trans, ret = bch2_sum_sector_overwrites(trans, iter, k, &extending, - &should_check_enospc, + &usage_increasing, &i_sectors_delta, &disk_sectors_delta); if (ret) return ret; + if (!usage_increasing) + check_enospc = false; + if (disk_res && disk_sectors_delta > (s64) disk_res->sectors) { ret = bch2_disk_reservation_add(trans->c, disk_res, disk_sectors_delta - disk_res->sectors, - !should_check_enospc + !check_enospc ? BCH_DISK_RESERVATION_NOFAIL : 0); if (ret) return ret; @@ -297,8 +305,9 @@ int bch2_extent_update(struct btree_trans *trans, inode_iter = bch2_inode_peek(trans, &inode_u, k->k.p.inode, BTREE_ITER_INTENT); - if (IS_ERR(inode_iter)) - return PTR_ERR(inode_iter); + ret = PTR_ERR_OR_ZERO(inode_iter); + if (ret) + return ret; /* * XXX: @@ -325,18 +334,21 @@ int bch2_extent_update(struct btree_trans *trans, inode_p.inode.k.p.snapshot = iter->snapshot; - bch2_trans_update(trans, inode_iter, + ret = bch2_trans_update(trans, inode_iter, &inode_p.inode.k_i, 0); } bch2_trans_iter_put(trans, inode_iter); - } - bch2_trans_update(trans, iter, k, 0); + if (ret) + return ret; + } - ret = bch2_trans_commit(trans, disk_res, journal_seq, + ret = bch2_trans_update(trans, iter, k, 0) ?: + bch2_trans_commit(trans, disk_res, journal_seq, BTREE_INSERT_NOCHECK_RW| BTREE_INSERT_NOFAIL); + BUG_ON(ret == -ENOSPC); if (ret) return ret; @@ -354,14 +366,13 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k; int ret = 0, ret2 = 0; - while ((k = bch2_btree_iter_peek(iter)).k && + while ((bch2_trans_begin(trans), + (k = bch2_btree_iter_peek(iter)).k) && bkey_cmp(iter->pos, end) < 0) { struct disk_reservation disk_res = bch2_disk_reservation_init(c, 0); struct bkey_i delete; - bch2_trans_begin(trans); - ret = bkey_err(k); if (ret) goto btree_err; @@ -375,7 +386,7 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter, ret = bch2_extent_update(trans, iter, &delete, &disk_res, journal_seq, - 0, i_sectors_delta); + 0, i_sectors_delta, false); bch2_disk_reservation_put(c, &disk_res); btree_err: if (ret == -EINTR) { @@ -448,7 +459,8 @@ int bch2_write_index_default(struct bch_write_op *op) ret = bch2_extent_update(&trans, iter, sk.k, &op->res, op_journal_seq(op), - op->new_i_size, &op->i_sectors_delta); + op->new_i_size, &op->i_sectors_delta, + op->flags & BCH_WRITE_CHECK_ENOSPC); if (ret == -EINTR) continue; if (ret) @@ -537,9 +549,6 @@ static void bch2_write_done(struct closure *cl) bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time); - if (!(op->flags & BCH_WRITE_FROM_INTERNAL)) - up(&c->io_in_flight); - if (op->end_io) { EBUG_ON(cl->parent); closure_debug_destroy(cl); @@ -1323,12 +1332,6 @@ void bch2_write(struct closure *cl) goto err; } - /* - * Can't ratelimit copygc - we'd deadlock: - */ - if (!(op->flags & BCH_WRITE_FROM_INTERNAL)) - down(&c->io_in_flight); - bch2_increment_clock(c, bio_sectors(bio), WRITE); data_len = min_t(u64, bio->bi_iter.bi_size, @@ -1443,7 +1446,7 @@ static void promote_start(struct promote_op *op, struct bch_read_bio *rbio) bch2_migrate_read_done(&op->write, rbio); closure_init(cl, NULL); - closure_call(&op->write.op.cl, bch2_write, c->wq, cl); + closure_call(&op->write.op.cl, bch2_write, c->btree_update_wq, cl); closure_return_with_destructor(cl, promote_done); } @@ -1787,7 +1790,7 @@ static int __bch2_rbio_narrow_crcs(struct btree_trans *trans, if (!bch2_bkey_narrow_crcs(new, new_crc)) goto out; - bch2_trans_update(trans, iter, new, 0); + ret = bch2_trans_update(trans, iter, new, 0); out: bch2_trans_iter_put(trans, iter); return ret; @@ -1811,8 +1814,11 @@ static void __bch2_read_endio(struct work_struct *work) struct bvec_iter dst_iter = rbio->bvec_iter; struct bch_extent_crc_unpacked crc = rbio->pick.crc; struct nonce nonce = extent_nonce(rbio->version, crc); + unsigned nofs_flags; struct bch_csum csum; + nofs_flags = memalloc_nofs_save(); + /* Reset iterator for checksumming and copying bounced data: */ if (rbio->bounce) { src->bi_iter.bi_size = crc.compressed_size << 9; @@ -1826,6 +1832,13 @@ static void __bch2_read_endio(struct work_struct *work) if (bch2_crc_cmp(csum, rbio->pick.crc.csum)) goto csum_err; + /* + * XXX + * We need to rework the narrow_crcs path to deliver the read completion + * first, and then punt to a different workqueue, otherwise we're + * holding up reads while doing btree updates which is bad for memory + * reclaim. + */ if (unlikely(rbio->narrow_crcs)) bch2_rbio_narrow_crcs(rbio); @@ -1870,6 +1883,8 @@ nodecode: rbio = bch2_rbio_free(rbio); bch2_rbio_done(rbio); } +out: + memalloc_nofs_restore(nofs_flags); return; csum_err: /* @@ -1880,7 +1895,7 @@ csum_err: if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) { rbio->flags |= BCH_READ_MUST_BOUNCE; bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR); - return; + goto out; } bch2_dev_inum_io_error(ca, rbio->read_pos.inode, (u64) rbio->bvec_iter.bi_sector, @@ -1888,12 +1903,12 @@ csum_err: rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo, csum.hi, csum.lo, crc.csum_type); bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR); - return; + goto out; decompression_err: bch_err_inum_ratelimited(c, rbio->read_pos.inode, "decompression error"); bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR); - return; + goto out; } static void bch2_read_endio(struct bio *bio) @@ -1967,7 +1982,11 @@ int __bch2_read_indirect_extent(struct btree_trans *trans, if (k.k->type != KEY_TYPE_reflink_v && k.k->type != KEY_TYPE_indirect_inline_data) { bch_err_inum_ratelimited(trans->c, orig_k->k->k.p.inode, - "pointer to nonexistent indirect extent"); + "%llu len %u points to nonexistent indirect extent %llu", + orig_k->k->k.p.offset, + orig_k->k->k.size, + reflink_offset); + bch2_inconsistent_error(trans->c); ret = -EIO; goto err; } @@ -2254,16 +2273,26 @@ void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, bch2_bkey_buf_init(&sk); bch2_trans_init(&trans, c, 0, 0); -retry: - bch2_trans_begin(&trans); iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, POS(inode, bvec_iter.bi_sector), BTREE_ITER_SLOTS); +retry: + bch2_trans_begin(&trans); + while (1) { unsigned bytes, sectors, offset_into_extent; enum btree_id data_btree = BTREE_ID_extents; + /* + * read_extent -> io_time_reset may cause a transaction restart + * without returning an error, we need to check for that here: + */ + if (!bch2_trans_relock(&trans)) { + ret = -EINTR; + break; + } + bch2_btree_iter_set_pos(iter, POS(inode, bvec_iter.bi_sector)); @@ -2315,19 +2344,20 @@ retry: swap(bvec_iter.bi_size, bytes); bio_advance_iter(&rbio->bio, &bvec_iter, bytes); } - bch2_trans_iter_put(&trans, iter); if (ret == -EINTR || ret == READ_RETRY || ret == READ_RETRY_AVOID) goto retry; + bch2_trans_iter_put(&trans, iter); + bch2_trans_exit(&trans); + bch2_bkey_buf_exit(&sk, c); + if (ret) { bch_err_inum_ratelimited(c, inode, "read error %i from btree lookup", ret); rbio->bio.bi_status = BLK_STS_IOERR; bch2_rbio_done(rbio); } - bch2_trans_exit(&trans); - bch2_bkey_buf_exit(&sk, c); } void bch2_fs_io_exit(struct bch_fs *c) diff --git a/libbcachefs/io.h b/libbcachefs/io.h index 2ac03c0..bc0a0bd 100644 --- a/libbcachefs/io.h +++ b/libbcachefs/io.h @@ -34,11 +34,12 @@ enum bch_write_flags { BCH_WRITE_ONLY_SPECIFIED_DEVS = (1 << 6), BCH_WRITE_WROTE_DATA_INLINE = (1 << 7), BCH_WRITE_FROM_INTERNAL = (1 << 8), + BCH_WRITE_CHECK_ENOSPC = (1 << 9), /* Internal: */ - BCH_WRITE_JOURNAL_SEQ_PTR = (1 << 9), - BCH_WRITE_SKIP_CLOSURE_PUT = (1 << 10), - BCH_WRITE_DONE = (1 << 11), + BCH_WRITE_JOURNAL_SEQ_PTR = (1 << 10), + BCH_WRITE_SKIP_CLOSURE_PUT = (1 << 11), + BCH_WRITE_DONE = (1 << 12), }; static inline u64 *op_journal_seq(struct bch_write_op *op) @@ -57,14 +58,14 @@ static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op) { return op->alloc_reserve == RESERVE_MOVINGGC ? op->c->copygc_wq - : op->c->wq; + : op->c->btree_update_wq; } int bch2_sum_sector_overwrites(struct btree_trans *, struct btree_iter *, struct bkey_i *, bool *, bool *, s64 *, s64 *); int bch2_extent_update(struct btree_trans *, struct btree_iter *, struct bkey_i *, struct disk_reservation *, - u64 *, u64, s64 *); + u64 *, u64, s64 *, bool); int bch2_fpunch_at(struct btree_trans *, struct btree_iter *, struct bpos, u64 *, s64 *); int bch2_fpunch(struct bch_fs *c, u64, u64, u64, u64 *, s64 *); diff --git a/libbcachefs/io_types.h b/libbcachefs/io_types.h index e7aca7c..0aab779 100644 --- a/libbcachefs/io_types.h +++ b/libbcachefs/io_types.h @@ -94,7 +94,8 @@ struct bch_write_bio { bounce:1, put_bio:1, have_ioref:1, - used_mempool:1; + used_mempool:1, + first_btree_write:1; struct bio bio; }; diff --git a/libbcachefs/journal.c b/libbcachefs/journal.c index b901be5..ac4071f 100644 --- a/libbcachefs/journal.c +++ b/libbcachefs/journal.c @@ -118,7 +118,9 @@ void bch2_journal_halt(struct journal *j) void __bch2_journal_buf_put(struct journal *j) { - closure_call(&j->io, bch2_journal_write, system_highpri_wq, NULL); + struct bch_fs *c = container_of(j, struct bch_fs, journal); + + closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL); } /* @@ -190,7 +192,8 @@ static bool __journal_entry_close(struct journal *j) * Hence, we want update/set last_seq on the current journal entry right * before we open a new one: */ - buf->data->last_seq = cpu_to_le64(journal_last_seq(j)); + buf->last_seq = journal_last_seq(j); + buf->data->last_seq = cpu_to_le64(buf->last_seq); __bch2_journal_pin_put(j, le64_to_cpu(buf->data->seq)); @@ -303,7 +306,7 @@ static int journal_entry_open(struct journal *j) j->res_get_blocked_start); j->res_get_blocked_start = 0; - mod_delayed_work(system_freezable_wq, + mod_delayed_work(c->io_complete_wq, &j->write_work, msecs_to_jiffies(j->write_delay_ms)); journal_wake(j); @@ -787,7 +790,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, * We may be called from the device add path, before the new device has * actually been added to the running filesystem: */ - if (c) + if (!new_fs) spin_lock(&c->journal.lock); memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64)); @@ -795,17 +798,20 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, swap(new_buckets, ja->buckets); swap(new_bucket_seq, ja->bucket_seq); - if (c) + if (!new_fs) spin_unlock(&c->journal.lock); while (ja->nr < nr) { struct open_bucket *ob = NULL; unsigned pos; - long bucket; + long b; if (new_fs) { - bucket = bch2_bucket_alloc_new_fs(ca); - if (bucket < 0) { + if (c) + percpu_down_read(&c->mark_lock); + b = bch2_bucket_alloc_new_fs(ca); + if (b < 0) { + percpu_up_read(&c->mark_lock); ret = -ENOSPC; goto err; } @@ -819,13 +825,11 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, goto err; } - bucket = sector_to_bucket(ca, ob->ptr.offset); + b = sector_to_bucket(ca, ob->ptr.offset); } - if (c) { - percpu_down_read(&c->mark_lock); + if (c) spin_lock(&c->journal.lock); - } /* * XXX @@ -839,9 +843,9 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, __array_insert_item(journal_buckets->buckets, ja->nr, pos); ja->nr++; - ja->buckets[pos] = bucket; + ja->buckets[pos] = b; ja->bucket_seq[pos] = 0; - journal_buckets->buckets[pos] = cpu_to_le64(bucket); + journal_buckets->buckets[pos] = cpu_to_le64(b); if (pos <= ja->discard_idx) ja->discard_idx = (ja->discard_idx + 1) % ja->nr; @@ -852,28 +856,27 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, if (pos <= ja->cur_idx) ja->cur_idx = (ja->cur_idx + 1) % ja->nr; - if (!c || new_fs) - bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_journal, + if (c) + spin_unlock(&c->journal.lock); + + if (new_fs) { + bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal, ca->mi.bucket_size, gc_phase(GC_PHASE_SB), 0); - - if (c) { - spin_unlock(&c->journal.lock); - percpu_up_read(&c->mark_lock); - } - - if (c && !new_fs) + if (c) + percpu_up_read(&c->mark_lock); + } else { ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_NOFAIL, - bch2_trans_mark_metadata_bucket(&trans, NULL, ca, - bucket, BCH_DATA_journal, + bch2_trans_mark_metadata_bucket(&trans, ca, + b, BCH_DATA_journal, ca->mi.bucket_size)); - if (!new_fs) bch2_open_bucket_put(c, ob); - if (ret) - goto err; + if (ret) + goto err; + } } err: bch2_sb_resize_journal(&ca->disk_sb, @@ -1068,7 +1071,7 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq, bch2_journal_space_available(j); spin_unlock(&j->lock); - return 0; + return bch2_journal_reclaim_start(j); } /* init/exit: */ @@ -1192,6 +1195,8 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) "nr noflush writes:\t%llu\n" "nr direct reclaim:\t%llu\n" "nr background reclaim:\t%llu\n" + "reclaim kicked:\t\t%u\n" + "reclaim runs in:\t%u ms\n" "current entry sectors:\t%u\n" "current entry error:\t%u\n" "current entry:\t\t", @@ -1207,6 +1212,8 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) j->nr_noflush_writes, j->nr_direct_reclaim, j->nr_background_reclaim, + j->reclaim_kicked, + jiffies_to_msecs(j->next_reclaim - jiffies), j->cur_entry_sectors, j->cur_entry_error); diff --git a/libbcachefs/journal.h b/libbcachefs/journal.h index cc49712..1d55679 100644 --- a/libbcachefs/journal.h +++ b/libbcachefs/journal.h @@ -241,10 +241,11 @@ static inline void bch2_journal_add_entry(struct journal *j, struct journal_res } static inline void bch2_journal_add_keys(struct journal *j, struct journal_res *res, - enum btree_id id, const struct bkey_i *k) + enum btree_id id, unsigned level, + const struct bkey_i *k) { bch2_journal_add_entry(j, res, BCH_JSET_ENTRY_btree_keys, - id, 0, k, k->k.u64s); + id, level, k, k->k.u64s); } static inline bool journal_entry_empty(struct jset *j) diff --git a/libbcachefs/journal_io.c b/libbcachefs/journal_io.c index c7fa03c..66a0e26 100644 --- a/libbcachefs/journal_io.c +++ b/libbcachefs/journal_io.c @@ -450,7 +450,7 @@ static int journal_entry_validate_dev_usage(struct bch_fs *c, struct jset_entry_dev_usage *u = container_of(entry, struct jset_entry_dev_usage, entry); unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); - unsigned expected = sizeof(*u) + sizeof(u->d[0]) * 7; /* Current value of BCH_DATA_NR */ + unsigned expected = sizeof(*u); unsigned dev; int ret = 0; @@ -834,7 +834,7 @@ static void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c, unsigned i; for (i = 0; i < j->nr_ptrs; i++) { - struct bch_dev *ca = c->devs[j->ptrs[i].dev]; + struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev); u64 offset; div64_u64_rem(j->ptrs[i].offset, ca->mi.bucket_size, &offset); @@ -1233,20 +1233,19 @@ static void journal_write_done(struct closure *cl) struct journal *j = container_of(cl, struct journal, io); struct bch_fs *c = container_of(j, struct bch_fs, journal); struct journal_buf *w = journal_last_unwritten_buf(j); - struct bch_devs_list devs = - bch2_bkey_devs(bkey_i_to_s_c(&w->key)); struct bch_replicas_padded replicas; union journal_res_state old, new; - u64 v, seq, last_seq; + u64 v, seq; int err = 0; bch2_time_stats_update(j->write_time, j->write_start_time); - if (!devs.nr) { + if (!w->devs_written.nr) { bch_err(c, "unable to write journal to sufficient devices"); err = -EIO; } else { - bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, devs); + bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, + w->devs_written); if (bch2_mark_replicas(c, &replicas.e)) err = -EIO; } @@ -1256,10 +1255,9 @@ static void journal_write_done(struct closure *cl) spin_lock(&j->lock); seq = le64_to_cpu(w->data->seq); - last_seq = le64_to_cpu(w->data->last_seq); if (seq >= j->pin.front) - journal_seq_pin(j, seq)->devs = devs; + journal_seq_pin(j, seq)->devs = w->devs_written; j->seq_ondisk = seq; if (err && (!j->err_seq || seq < j->err_seq)) @@ -1267,7 +1265,7 @@ static void journal_write_done(struct closure *cl) if (!JSET_NO_FLUSH(w->data)) { j->flushed_seq_ondisk = seq; - j->last_seq_ondisk = last_seq; + j->last_seq_ondisk = w->last_seq; } /* @@ -1297,27 +1295,27 @@ static void journal_write_done(struct closure *cl) journal_wake(j); if (test_bit(JOURNAL_NEED_WRITE, &j->flags)) - mod_delayed_work(system_freezable_wq, &j->write_work, 0); + mod_delayed_work(c->io_complete_wq, &j->write_work, 0); spin_unlock(&j->lock); if (new.unwritten_idx != new.idx && !journal_state_count(new, new.unwritten_idx)) - closure_call(&j->io, bch2_journal_write, system_highpri_wq, NULL); + closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL); } static void journal_write_endio(struct bio *bio) { struct bch_dev *ca = bio->bi_private; struct journal *j = &ca->fs->journal; + struct journal_buf *w = journal_last_unwritten_buf(j); + unsigned long flags; - if (bch2_dev_io_err_on(bio->bi_status, ca, "journal write error: %s", + if (bch2_dev_io_err_on(bio->bi_status, ca, "error writing journal entry %llu: %s", + le64_to_cpu(w->data->seq), bch2_blk_status_to_str(bio->bi_status)) || bch2_meta_write_fault("journal")) { - struct journal_buf *w = journal_last_unwritten_buf(j); - unsigned long flags; - spin_lock_irqsave(&j->err_lock, flags); - bch2_bkey_drop_device(bkey_i_to_s(&w->key), ca->dev_idx); + bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx); spin_unlock_irqrestore(&j->err_lock, flags); } @@ -1371,7 +1369,7 @@ static void do_journal_write(struct closure *cl) le64_to_cpu(w->data->seq); } - continue_at(cl, journal_write_done, system_highpri_wq); + continue_at(cl, journal_write_done, c->io_complete_wq); return; } @@ -1403,7 +1401,8 @@ void bch2_journal_write(struct closure *cl) test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags)) { w->noflush = true; SET_JSET_NO_FLUSH(jset, true); - jset->last_seq = 0; + jset->last_seq = 0; + w->last_seq = 0; j->nr_noflush_writes++; } else { @@ -1510,14 +1509,12 @@ retry_alloc: journal_debug_buf); kfree(journal_debug_buf); bch2_fatal_error(c); - continue_at(cl, journal_write_done, system_highpri_wq); + continue_at(cl, journal_write_done, c->io_complete_wq); return; } - /* - * XXX: we really should just disable the entire journal in nochanges - * mode - */ + w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key)); + if (c->opts.nochanges) goto no_io; @@ -1543,14 +1540,14 @@ retry_alloc: bch2_bucket_seq_cleanup(c); - continue_at(cl, do_journal_write, system_highpri_wq); + continue_at(cl, do_journal_write, c->io_complete_wq); return; no_io: bch2_bucket_seq_cleanup(c); - continue_at(cl, journal_write_done, system_highpri_wq); + continue_at(cl, journal_write_done, c->io_complete_wq); return; err: bch2_inconsistent_error(c); - continue_at(cl, journal_write_done, system_highpri_wq); + continue_at(cl, journal_write_done, c->io_complete_wq); } diff --git a/libbcachefs/journal_reclaim.c b/libbcachefs/journal_reclaim.c index 7be6c65..7a0ae5d 100644 --- a/libbcachefs/journal_reclaim.c +++ b/libbcachefs/journal_reclaim.c @@ -93,6 +93,10 @@ journal_dev_space_available(struct journal *j, struct bch_dev *ca, * until we write it out - thus, account for it here: */ while ((unwritten = get_unwritten_sectors(j, &idx))) { + /* entry won't fit on this device, skip: */ + if (unwritten > ca->mi.bucket_size) + continue; + if (unwritten >= sectors) { if (!buckets) { sectors = 0; @@ -599,7 +603,7 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct) struct bch_fs *c = container_of(j, struct bch_fs, journal); bool kthread = (current->flags & PF_KTHREAD) != 0; u64 seq_to_flush; - size_t min_nr, nr_flushed; + size_t min_nr, min_key_cache, nr_flushed; unsigned flags; int ret = 0; @@ -634,7 +638,7 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct) msecs_to_jiffies(j->reclaim_delay_ms))) min_nr = 1; - if (j->prereserved.reserved * 2 > j->prereserved.remaining) + if (j->prereserved.reserved * 4 > j->prereserved.remaining) min_nr = 1; if (fifo_free(&j->pin) <= 32) @@ -649,9 +653,10 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct) atomic_long_read(&c->btree_key_cache.nr_dirty), atomic_long_read(&c->btree_key_cache.nr_keys)); + min_key_cache = min(bch2_nr_btree_keys_need_flush(c), 128UL); + nr_flushed = journal_flush_pins(j, seq_to_flush, - min_nr, - min(bch2_nr_btree_keys_need_flush(c), 128UL)); + min_nr, min_key_cache); if (direct) j->nr_direct_reclaim += nr_flushed; @@ -661,7 +666,7 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct) if (nr_flushed) wake_up(&j->reclaim_wait); - } while (min_nr && nr_flushed && !direct); + } while ((min_nr || min_key_cache) && !direct); memalloc_noreclaim_restore(flags); @@ -676,13 +681,15 @@ int bch2_journal_reclaim(struct journal *j) static int bch2_journal_reclaim_thread(void *arg) { struct journal *j = arg; - unsigned long next; + unsigned long delay, now; int ret = 0; set_freezable(); kthread_wait_freezable(test_bit(JOURNAL_RECLAIM_STARTED, &j->flags)); + j->last_flushed = jiffies; + while (!ret && !kthread_should_stop()) { j->reclaim_kicked = false; @@ -690,7 +697,12 @@ static int bch2_journal_reclaim_thread(void *arg) ret = __bch2_journal_reclaim(j, false); mutex_unlock(&j->reclaim_lock); - next = j->last_flushed + msecs_to_jiffies(j->reclaim_delay_ms); + now = jiffies; + delay = msecs_to_jiffies(j->reclaim_delay_ms); + j->next_reclaim = j->last_flushed + delay; + + if (!time_in_range(j->next_reclaim, now, now + delay)) + j->next_reclaim = now + delay; while (1) { set_current_state(TASK_INTERRUPTIBLE); @@ -698,10 +710,9 @@ static int bch2_journal_reclaim_thread(void *arg) break; if (j->reclaim_kicked) break; - if (time_after_eq(jiffies, next)) + if (time_after_eq(jiffies, j->next_reclaim)) break; - schedule_timeout(next - jiffies); - try_to_freeze(); + freezable_schedule_timeout(j->next_reclaim - jiffies); } __set_current_state(TASK_RUNNING); diff --git a/libbcachefs/journal_reclaim.h b/libbcachefs/journal_reclaim.h index adf1f5c..0fd1af1 100644 --- a/libbcachefs/journal_reclaim.h +++ b/libbcachefs/journal_reclaim.h @@ -8,11 +8,9 @@ static inline void journal_reclaim_kick(struct journal *j) { struct task_struct *p = READ_ONCE(j->reclaim_thread); - if (p && !j->reclaim_kicked) { - j->reclaim_kicked = true; - if (p) - wake_up_process(p); - } + j->reclaim_kicked = true; + if (p) + wake_up_process(p); } unsigned bch2_journal_dev_buckets_available(struct journal *, diff --git a/libbcachefs/journal_seq_blacklist.c b/libbcachefs/journal_seq_blacklist.c index e1b63f3..f2060f9 100644 --- a/libbcachefs/journal_seq_blacklist.c +++ b/libbcachefs/journal_seq_blacklist.c @@ -111,8 +111,7 @@ int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64 start, u64 end) bl->start[nr].start = cpu_to_le64(start); bl->start[nr].end = cpu_to_le64(end); out_write_sb: - c->disk_sb.sb->features[0] |= - 1ULL << BCH_FEATURE_journal_seq_blacklist_v3; + c->disk_sb.sb->features[0] |= cpu_to_le64(1ULL << BCH_FEATURE_journal_seq_blacklist_v3); ret = bch2_write_super(c); out: @@ -298,8 +297,7 @@ void bch2_blacklist_entries_gc(struct work_struct *work) BUG_ON(new_nr && !bl); if (!new_nr) - c->disk_sb.sb->features[0] &= - ~(1ULL << BCH_FEATURE_journal_seq_blacklist_v3); + c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_journal_seq_blacklist_v3)); bch2_write_super(c); } diff --git a/libbcachefs/journal_types.h b/libbcachefs/journal_types.h index c24bc4a..61674ae 100644 --- a/libbcachefs/journal_types.h +++ b/libbcachefs/journal_types.h @@ -21,8 +21,10 @@ struct journal_buf { struct jset *data; __BKEY_PADDED(key, BCH_REPLICAS_MAX); + struct bch_devs_list devs_written; struct closure_waitlist wait; + u64 last_seq; /* copy of data->last_seq */ unsigned buf_size; /* size in bytes of @data */ unsigned sectors; /* maximum size for current entry */ @@ -248,6 +250,7 @@ struct journal { wait_queue_head_t reclaim_wait; struct task_struct *reclaim_thread; bool reclaim_kicked; + unsigned long next_reclaim; u64 nr_direct_reclaim; u64 nr_background_reclaim; diff --git a/libbcachefs/keylist.c b/libbcachefs/keylist.c index 864dfaa..cda7783 100644 --- a/libbcachefs/keylist.c +++ b/libbcachefs/keylist.c @@ -62,6 +62,6 @@ void bch2_verify_keylist_sorted(struct keylist *l) for_each_keylist_key(l, k) BUG_ON(bkey_next(k) != l->top && - bkey_cmp(k->k.p, bkey_next(k)->k.p) >= 0); + bpos_cmp(k->k.p, bkey_next(k)->k.p) >= 0); } #endif diff --git a/libbcachefs/migrate.c b/libbcachefs/migrate.c index ef69a19..1f65eca 100644 --- a/libbcachefs/migrate.c +++ b/libbcachefs/migrate.c @@ -73,9 +73,9 @@ static int __bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags bch2_btree_iter_set_pos(iter, bkey_start_pos(&sk.k->k)); - bch2_trans_update(&trans, iter, sk.k, 0); - - ret = bch2_trans_commit(&trans, NULL, NULL, + ret = bch2_btree_iter_traverse(iter) ?: + bch2_trans_update(&trans, iter, sk.k, 0) ?: + bch2_trans_commit(&trans, NULL, NULL, BTREE_INSERT_NOFAIL); /* @@ -139,7 +139,7 @@ retry: break; } - ret = bch2_btree_node_update_key(c, iter, b, k.k); + ret = bch2_btree_node_update_key(&trans, iter, b, k.k, false); if (ret == -EINTR) { b = bch2_btree_iter_peek_node(iter); ret = 0; diff --git a/libbcachefs/move.c b/libbcachefs/move.c index 5b10849..ee0f155 100644 --- a/libbcachefs/move.c +++ b/libbcachefs/move.c @@ -68,7 +68,7 @@ static int bch2_migrate_index_update(struct bch_write_op *op) bch2_bkey_buf_init(&_insert); bch2_bkey_buf_realloc(&_insert, c, U8_MAX); - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); + bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024); iter = bch2_trans_get_iter(&trans, m->btree_id, bkey_start_pos(&bch2_keylist_front(keys)->k), @@ -84,7 +84,7 @@ static int bch2_migrate_index_update(struct bch_write_op *op) bool extending = false, should_check_enospc; s64 i_sectors_delta = 0, disk_sectors_delta = 0; - bch2_trans_reset(&trans, 0); + bch2_trans_begin(&trans); k = bch2_btree_iter_peek_slot(iter); ret = bkey_err(k); @@ -163,9 +163,8 @@ static int bch2_migrate_index_update(struct bch_write_op *op) goto out; } - bch2_trans_update(&trans, iter, insert, 0); - - ret = bch2_trans_commit(&trans, &op->res, + ret = bch2_trans_update(&trans, iter, insert, 0) ?: + bch2_trans_commit(&trans, &op->res, op_journal_seq(op), BTREE_INSERT_NOFAIL| m->data_opts.btree_insert_flags); @@ -192,7 +191,7 @@ nomatch: } atomic_long_inc(&c->extent_migrate_raced); trace_move_race(&new->k); - bch2_btree_iter_next_slot(iter); + bch2_btree_iter_advance(iter); goto next; } out: @@ -523,6 +522,11 @@ static int lookup_inode(struct btree_trans *trans, struct bpos pos, if (ret) goto err; + if (!k.k || bkey_cmp(k.k->p, pos)) { + ret = -ENOENT; + goto err; + } + ret = k.k->type == KEY_TYPE_inode ? 0 : -EIO; if (ret) goto err; @@ -593,6 +597,8 @@ static int __bch2_move_data(struct bch_fs *c, } } while (delay); + bch2_trans_begin(&trans); + k = bch2_btree_iter_peek(iter); stats->pos = iter->pos; @@ -648,8 +654,7 @@ static int __bch2_move_data(struct bch_fs *c, data_cmd, data_opts); if (ret2) { if (ret2 == -EINTR) { - bch2_trans_reset(&trans, 0); - bch2_trans_cond_resched(&trans); + bch2_trans_begin(&trans); continue; } @@ -762,10 +767,10 @@ static int bch2_move_btree(struct bch_fs *c, id == start_btree_id ? start_pos : POS_MIN, BTREE_ITER_PREFETCH, b) { if (kthread && kthread_should_stop()) - goto out; + break; if ((cmp_int(id, end_btree_id) ?: - bkey_cmp(b->key.k.p, end_pos)) > 0) + bpos_cmp(b->key.k.p, end_pos)) > 0) break; stats->pos = iter->pos; @@ -782,15 +787,17 @@ static int bch2_move_btree(struct bch_fs *c, BUG(); } - ret = bch2_btree_node_rewrite(c, iter, + ret = bch2_btree_node_rewrite(&trans, iter, b->data->keys.seq, 0) ?: ret; next: bch2_trans_cond_resched(&trans); } ret = bch2_trans_iter_free(&trans, iter) ?: ret; + if (kthread && kthread_should_stop()) + break; } -out: + bch2_trans_exit(&trans); if (ret) @@ -915,12 +922,12 @@ int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats) ret = bch2_move_btree(c, 0, POS_MIN, - BTREE_ID_NR, POS_MAX, + BTREE_ID_NR, SPOS_MAX, rewrite_old_nodes_pred, c, stats); if (!ret) { mutex_lock(&c->sb_lock); - c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_extents_above_btree_updates_done; - c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_bformat_overflow_done; + c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done); + c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done); c->disk_sb.sb->version_min = c->disk_sb.sb->version; bch2_write_super(c); mutex_unlock(&c->sb_lock); diff --git a/libbcachefs/movinggc.c b/libbcachefs/movinggc.c index 03668e4..2acca0d 100644 --- a/libbcachefs/movinggc.c +++ b/libbcachefs/movinggc.c @@ -87,9 +87,20 @@ static enum data_cmd copygc_pred(struct bch_fs *c, void *arg, if (i >= 0 && p.ptr.offset < h->data[i].offset + ca->mi.bucket_size && p.ptr.gen == h->data[i].gen) { + /* + * We need to use the journal reserve here, because + * - journal reclaim depends on btree key cache + * flushing to make forward progress, + * - which has to make forward progress when the + * journal is pre-reservation full, + * - and depends on allocation - meaning allocator and + * copygc + */ + data_opts->target = io_opts->background_target; data_opts->nr_replicas = 1; - data_opts->btree_insert_flags = BTREE_INSERT_USE_RESERVE; + data_opts->btree_insert_flags = BTREE_INSERT_USE_RESERVE| + BTREE_INSERT_JOURNAL_RESERVED; data_opts->rewrite_dev = p.ptr.dev; if (p.has_ec) @@ -108,7 +119,7 @@ static bool have_copygc_reserve(struct bch_dev *ca) spin_lock(&ca->fs->freelist_lock); ret = fifo_full(&ca->free[RESERVE_MOVINGGC]) || - ca->allocator_state != ALLOCATOR_RUNNING; + ca->allocator_state != ALLOCATOR_running; spin_unlock(&ca->fs->freelist_lock); return ret; @@ -222,7 +233,7 @@ static int bch2_copygc(struct bch_fs *c) ret = bch2_move_data(c, 0, POS_MIN, BTREE_ID_NR, POS_MAX, - &c->copygc_pd.rate, + NULL, writepoint_ptr(&c->copygc_write_point), copygc_pred, NULL, &move_stats); @@ -282,18 +293,19 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c) { struct bch_dev *ca; unsigned dev_idx; - u64 fragmented_allowed = c->copygc_threshold; - u64 fragmented = 0; + s64 wait = S64_MAX, fragmented_allowed, fragmented; for_each_rw_member(ca, c, dev_idx) { struct bch_dev_usage usage = bch2_dev_usage_read(ca); - fragmented_allowed += ((__dev_buckets_available(ca, usage) * + fragmented_allowed = ((__dev_buckets_reclaimable(ca, usage) * ca->mi.bucket_size) >> 1); - fragmented += usage.d[BCH_DATA_user].fragmented; + fragmented = usage.d[BCH_DATA_user].fragmented; + + wait = min(wait, max(0LL, fragmented_allowed - fragmented)); } - return max_t(s64, 0, fragmented_allowed - fragmented); + return wait; } static int bch2_copygc_thread(void *arg) @@ -305,6 +317,8 @@ static int bch2_copygc_thread(void *arg) set_freezable(); while (!kthread_should_stop()) { + cond_resched(); + if (kthread_wait_freezable(c->copy_gc_enabled)) break; @@ -312,11 +326,15 @@ static int bch2_copygc_thread(void *arg) wait = bch2_copygc_wait_amount(c); if (wait > clock->max_slop) { + trace_copygc_wait(c, wait, last + wait); + c->copygc_wait = last + wait; bch2_kthread_io_clock_wait(clock, last + wait, MAX_SCHEDULE_TIMEOUT); continue; } + c->copygc_wait = 0; + if (bch2_copygc(c)) break; } @@ -326,9 +344,6 @@ static int bch2_copygc_thread(void *arg) void bch2_copygc_stop(struct bch_fs *c) { - c->copygc_pd.rate.rate = UINT_MAX; - bch2_ratelimit_reset(&c->copygc_pd.rate); - if (c->copygc_thread) { kthread_stop(c->copygc_thread); put_task_struct(c->copygc_thread); @@ -365,6 +380,4 @@ int bch2_copygc_start(struct bch_fs *c) void bch2_fs_copygc_init(struct bch_fs *c) { - bch2_pd_controller_init(&c->copygc_pd); - c->copygc_pd.d_term = 0; } diff --git a/libbcachefs/opts.c b/libbcachefs/opts.c index 0cfbb56..5de2960 100644 --- a/libbcachefs/opts.c +++ b/libbcachefs/opts.c @@ -63,6 +63,18 @@ const char * const bch2_member_states[] = { #undef x +const char * const bch2_d_types[DT_MAX] = { + [DT_UNKNOWN] = "unknown", + [DT_FIFO] = "fifo", + [DT_CHR] = "chr", + [DT_DIR] = "dir", + [DT_BLK] = "blk", + [DT_REG] = "reg", + [DT_LNK] = "lnk", + [DT_SOCK] = "sock", + [DT_WHT] = "whiteout", +}; + void bch2_opts_apply(struct bch_opts *dst, struct bch_opts src) { #define x(_name, ...) \ @@ -315,11 +327,20 @@ int bch2_opts_check_may_set(struct bch_fs *c) int bch2_parse_mount_opts(struct bch_fs *c, struct bch_opts *opts, char *options) { + char *copied_opts, *copied_opts_start; char *opt, *name, *val; int ret, id; u64 v; - while ((opt = strsep(&options, ",")) != NULL) { + if (!options) + return 0; + + copied_opts = kstrdup(options, GFP_KERNEL); + if (!copied_opts) + return -1; + copied_opts_start = copied_opts; + + while ((opt = strsep(&copied_opts, ",")) != NULL) { name = strsep(&opt, "="); val = opt; @@ -363,16 +384,24 @@ int bch2_parse_mount_opts(struct bch_fs *c, struct bch_opts *opts, bch2_opt_set_by_id(opts, id, v); } - return 0; + ret = 0; + goto out; + bad_opt: pr_err("Bad mount option %s", name); - return -1; + ret = -1; + goto out; bad_val: pr_err("Invalid value %s for mount option %s", val, name); - return -1; + ret = -1; + goto out; no_val: pr_err("Mount option %s requires a value", name); - return -1; + ret = -1; + goto out; +out: + kfree(copied_opts_start); + return ret; } /* io opts: */ diff --git a/libbcachefs/opts.h b/libbcachefs/opts.h index 001e865..003c00f 100644 --- a/libbcachefs/opts.h +++ b/libbcachefs/opts.h @@ -18,6 +18,7 @@ extern const char * const bch2_str_hash_types[]; extern const char * const bch2_data_types[]; extern const char * const bch2_cache_replacement_policies[]; extern const char * const bch2_member_states[]; +extern const char * const bch2_d_types[]; /* * Mount options; we also store defaults in the superblock. @@ -165,8 +166,23 @@ enum opt_type { x(inodes_32bit, u8, \ OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \ OPT_BOOL(), \ - BCH_SB_INODE_32BIT, false, \ + BCH_SB_INODE_32BIT, true, \ NULL, "Constrain inode numbers to 32 bits") \ + x(shard_inode_numbers, u8, \ + OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \ + OPT_BOOL(), \ + BCH_SB_SHARD_INUMS, false, \ + NULL, "Shard new inode numbers by CPU id") \ + x(inodes_use_key_cache, u8, \ + OPT_FORMAT|OPT_MOUNT, \ + OPT_BOOL(), \ + BCH_SB_INODES_USE_KEY_CACHE, true, \ + NULL, "Use the btree key cache for the inodes btree") \ + x(btree_node_mem_ptr_optimization, u8, \ + OPT_MOUNT|OPT_RUNTIME, \ + OPT_BOOL(), \ + NO_SB_OPT, true, \ + NULL, "Stash pointer to in memory btree node in btree ptr")\ x(gc_reserve_percent, u8, \ OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \ OPT_UINT(5, 21), \ diff --git a/libbcachefs/quota.c b/libbcachefs/quota.c index 8e27251..7861781 100644 --- a/libbcachefs/quota.c +++ b/libbcachefs/quota.c @@ -372,6 +372,7 @@ static int bch2_quota_init_type(struct bch_fs *c, enum quota_types type) if (ret) break; } + bch2_trans_iter_put(&trans, iter); return bch2_trans_exit(&trans) ?: ret; } @@ -449,6 +450,8 @@ int bch2_fs_quota_read(struct bch_fs *c) KEY_TYPE_QUOTA_NOCHECK); } } + bch2_trans_iter_put(&trans, iter); + return bch2_trans_exit(&trans) ?: ret; } @@ -739,7 +742,9 @@ static int bch2_set_quota_trans(struct btree_trans *trans, if (qdq->d_fieldmask & QC_INO_HARD) new_quota->v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit); - return bch2_trans_update(trans, iter, &new_quota->k_i, 0); + ret = bch2_trans_update(trans, iter, &new_quota->k_i, 0); + bch2_trans_iter_put(trans, iter); + return ret; } static int bch2_set_quota(struct super_block *sb, struct kqid qid, @@ -755,7 +760,7 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid, bkey_quota_init(&new_quota.k_i); new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid)); - ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_NOUNLOCK, + ret = bch2_trans_do(c, NULL, NULL, 0, bch2_set_quota_trans(&trans, &new_quota, qdq)) ?: __bch2_quota_set(c, bkey_i_to_s_c(&new_quota.k_i)); diff --git a/libbcachefs/recovery.c b/libbcachefs/recovery.c index 86593e9..afb7264 100644 --- a/libbcachefs/recovery.c +++ b/libbcachefs/recovery.c @@ -39,6 +39,20 @@ static void drop_alloc_keys(struct journal_keys *keys) keys->nr = dst; } +/* + * Btree node pointers have a field to stack a pointer to the in memory btree + * node; we need to zero out this field when reading in btree nodes, or when + * reading in keys from the journal: + */ +static void zero_out_btree_mem_ptr(struct journal_keys *keys) +{ + struct journal_key *i; + + for (i = keys->d; i < keys->d + keys->nr; i++) + if (i->k->k.type == KEY_TYPE_btree_ptr_v2) + bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0; +} + /* iterate over keys read from the journal: */ static int __journal_key_cmp(enum btree_id l_btree_id, @@ -323,9 +337,7 @@ static void btree_and_journal_iter_prefetch(struct bch_fs *c, struct btree *b, } static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b, - struct journal_keys *journal_keys, enum btree_id btree_id, - btree_walk_node_fn node_fn, btree_walk_key_fn key_fn) { struct btree_and_journal_iter iter; @@ -338,15 +350,9 @@ static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b bch2_btree_and_journal_iter_init_node_iter(&iter, c, b); while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { - ret = key_fn(c, btree_id, b->c.level, k); - if (ret) - break; - if (b->c.level) { bch2_bkey_buf_reassemble(&tmp, c, k); - bch2_btree_and_journal_iter_advance(&iter); - child = bch2_btree_node_get_noiter(c, tmp.k, b->c.btree_id, b->c.level - 1, false); @@ -357,16 +363,17 @@ static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b btree_and_journal_iter_prefetch(c, b, iter); - ret = (node_fn ? node_fn(c, b) : 0) ?: - bch2_btree_and_journal_walk_recurse(c, child, - journal_keys, btree_id, node_fn, key_fn); + ret = bch2_btree_and_journal_walk_recurse(c, child, + btree_id, key_fn); six_unlock_read(&child->c.lock); - - if (ret) - break; } else { - bch2_btree_and_journal_iter_advance(&iter); + ret = key_fn(c, k); } + + if (ret) + break; + + bch2_btree_and_journal_iter_advance(&iter); } bch2_btree_and_journal_iter_exit(&iter); @@ -374,9 +381,7 @@ static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b return ret; } -int bch2_btree_and_journal_walk(struct bch_fs *c, struct journal_keys *journal_keys, - enum btree_id btree_id, - btree_walk_node_fn node_fn, +int bch2_btree_and_journal_walk(struct bch_fs *c, enum btree_id btree_id, btree_walk_key_fn key_fn) { struct btree *b = c->btree_roots[btree_id].b; @@ -386,10 +391,7 @@ int bch2_btree_and_journal_walk(struct bch_fs *c, struct journal_keys *journal_k return 0; six_lock_read(&b->c.lock, NULL, NULL); - ret = (node_fn ? node_fn(c, b) : 0) ?: - bch2_btree_and_journal_walk_recurse(c, b, journal_keys, btree_id, - node_fn, key_fn) ?: - key_fn(c, btree_id, b->c.level + 1, bkey_i_to_s_c(&b->key)); + ret = bch2_btree_and_journal_walk_recurse(c, b, btree_id, key_fn); six_unlock_read(&b->c.lock); return ret; @@ -521,16 +523,8 @@ static int __bch2_journal_replay_key(struct btree_trans *trans, iter = bch2_trans_get_node_iter(trans, id, k->k.p, BTREE_MAX_DEPTH, level, - BTREE_ITER_INTENT); - - /* - * iter->flags & BTREE_ITER_IS_EXTENTS triggers the update path to run - * extent_handle_overwrites() and extent_update_to_keys() - but we don't - * want that here, journal replay is supposed to treat extents like - * regular keys: - */ - BUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS); - + BTREE_ITER_INTENT| + BTREE_ITER_NOT_EXTENTS); ret = bch2_btree_iter_traverse(iter) ?: bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN); bch2_trans_iter_put(trans, iter); @@ -558,7 +552,8 @@ static int __bch2_alloc_replay_key(struct btree_trans *trans, struct bkey_i *k) BTREE_ITER_CACHED| BTREE_ITER_CACHED_NOFILL| BTREE_ITER_INTENT); - ret = bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN); + ret = bch2_btree_iter_traverse(iter) ?: + bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN); bch2_trans_iter_put(trans, iter); return ret; } @@ -728,7 +723,7 @@ static int journal_replay_entry_early(struct bch_fs *c, case BCH_JSET_ENTRY_dev_usage: { struct jset_entry_dev_usage *u = container_of(entry, struct jset_entry_dev_usage, entry); - struct bch_dev *ca = bch_dev_bkey_exists(c, u->dev); + struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev)); unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); unsigned nr_types = (bytes - sizeof(struct jset_entry_dev_usage)) / sizeof(struct jset_entry_dev_usage_type); @@ -737,7 +732,7 @@ static int journal_replay_entry_early(struct bch_fs *c, ca->usage_base->buckets_ec = le64_to_cpu(u->buckets_ec); ca->usage_base->buckets_unavailable = le64_to_cpu(u->buckets_unavailable); - for (i = 0; i < nr_types; i++) { + for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) { ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets); ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors); ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented); @@ -767,7 +762,7 @@ static int journal_replay_entry_early(struct bch_fs *c, struct jset_entry_clock *clock = container_of(entry, struct jset_entry_clock, entry); - atomic64_set(&c->io_clock[clock->rw].now, clock->time); + atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time)); } } @@ -973,7 +968,7 @@ int bch2_fs_recovery(struct bch_fs *c) struct jset *last_journal_entry = NULL; u64 blacklist_seq, journal_seq; bool write_sb = false; - int ret; + int ret = 0; if (c->sb.clean) clean = read_superblock_clean(c); @@ -1017,6 +1012,18 @@ int bch2_fs_recovery(struct bch_fs *c) set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags); } + if (c->sb.version < bcachefs_metadata_version_inode_backpointers) { + bch_info(c, "version prior to inode backpointers, upgrade and fsck required"); + c->opts.version_upgrade = true; + c->opts.fsck = true; + c->opts.fix_errors = FSCK_OPT_YES; + } + + if (c->sb.version < bcachefs_metadata_version_btree_ptr_sectors_written) { + bch_info(c, "version prior to btree_ptr_sectors_written, upgrade required"); + c->opts.version_upgrade = true; + } + ret = bch2_blacklist_table_initialize(c); if (ret) { bch_err(c, "error initializing blacklist table"); @@ -1079,6 +1086,8 @@ use_clean: drop_alloc_keys(&c->journal_keys); } + zero_out_btree_mem_ptr(&c->journal_keys); + ret = journal_replay_early(c, clean, &c->journal_entries); if (ret) goto err; @@ -1113,14 +1122,14 @@ use_clean: bch_verbose(c, "starting alloc read"); err = "error reading allocation information"; - ret = bch2_alloc_read(c, &c->journal_keys); + ret = bch2_alloc_read(c); if (ret) goto err; bch_verbose(c, "alloc read done"); bch_verbose(c, "starting stripes_read"); err = "error reading stripes"; - ret = bch2_stripes_read(c, &c->journal_keys); + ret = bch2_stripes_read(c); if (ret) goto err; bch_verbose(c, "stripes_read done"); @@ -1131,9 +1140,11 @@ use_clean: !(c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)) || !(c->sb.compat & (1ULL << BCH_COMPAT_alloc_metadata)) || test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags)) { + bool metadata_only = c->opts.norecovery; + bch_info(c, "starting mark and sweep"); err = "error in mark and sweep"; - ret = bch2_gc(c, true); + ret = bch2_gc(c, true, metadata_only); if (ret) goto err; bch_verbose(c, "mark and sweep done"); @@ -1179,25 +1190,6 @@ use_clean: bch_verbose(c, "alloc write done"); } - if (!c->sb.clean) { - if (!(c->sb.features & (1 << BCH_FEATURE_atomic_nlink))) { - bch_info(c, "checking inode link counts"); - err = "error in recovery"; - ret = bch2_fsck_inode_nlink(c); - if (ret) - goto err; - bch_verbose(c, "check inodes done"); - - } else { - bch_verbose(c, "checking for deleted inodes"); - err = "error in recovery"; - ret = bch2_fsck_walk_inodes_only(c); - if (ret) - goto err; - bch_verbose(c, "check inodes done"); - } - } - if (c->opts.fsck) { bch_info(c, "starting fsck"); err = "error in fsck"; @@ -1205,6 +1197,13 @@ use_clean: if (ret) goto err; bch_verbose(c, "fsck done"); + } else if (!c->sb.clean) { + bch_verbose(c, "checking for deleted inodes"); + err = "error in recovery"; + ret = bch2_fsck_walk_inodes_only(c); + if (ret) + goto err; + bch_verbose(c, "check inodes done"); } if (enabled_qtypes(c)) { @@ -1232,20 +1231,21 @@ use_clean: mutex_lock(&c->sb_lock); if (c->opts.version_upgrade) { - c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current); - c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL; + c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current); + c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL); write_sb = true; } if (!test_bit(BCH_FS_ERROR, &c->flags)) { - c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_alloc_info; + c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info); write_sb = true; } if (c->opts.fsck && - !test_bit(BCH_FS_ERROR, &c->flags)) { - c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_atomic_nlink; + !test_bit(BCH_FS_ERROR, &c->flags) && + !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) { SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0); + SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0); write_sb = true; } @@ -1256,10 +1256,9 @@ use_clean: if (c->journal_seq_blacklist_table && c->journal_seq_blacklist_table->nr > 128) queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work); -out: + ret = 0; -err: -fsck_err: +out: set_bit(BCH_FS_FSCK_DONE, &c->flags); bch2_flush_fsck_errs(c); @@ -1273,6 +1272,10 @@ fsck_err: else bch_verbose(c, "ret %i", ret); return ret; +err: +fsck_err: + bch2_fs_emergency_read_only(c); + goto out; } int bch2_fs_initialize(struct bch_fs *c) @@ -1289,12 +1292,12 @@ int bch2_fs_initialize(struct bch_fs *c) bch_notice(c, "initializing new filesystem"); mutex_lock(&c->sb_lock); - c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_extents_above_btree_updates_done; - c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_bformat_overflow_done; + c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done); + c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done); if (c->opts.version_upgrade) { - c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current); - c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL; + c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current); + c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL); bch2_write_super(c); } @@ -1336,10 +1339,14 @@ int bch2_fs_initialize(struct bch_fs *c) * Write out the superblock and journal buckets, now that we can do * btree updates */ - err = "error writing alloc info"; - ret = bch2_alloc_write(c, 0); - if (ret) - goto err; + err = "error marking superblock and journal"; + for_each_member_device(ca, c, i) { + ret = bch2_trans_mark_dev_sb(c, ca); + if (ret) { + percpu_ref_put(&ca->ref); + goto err; + } + } bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL); diff --git a/libbcachefs/recovery.h b/libbcachefs/recovery.h index fa91851..e5565e4 100644 --- a/libbcachefs/recovery.h +++ b/libbcachefs/recovery.h @@ -45,12 +45,9 @@ void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *, struct bch_fs *, struct btree *); -typedef int (*btree_walk_node_fn)(struct bch_fs *c, struct btree *b); -typedef int (*btree_walk_key_fn)(struct bch_fs *c, enum btree_id id, - unsigned level, struct bkey_s_c k); +typedef int (*btree_walk_key_fn)(struct bch_fs *c, struct bkey_s_c k); -int bch2_btree_and_journal_walk(struct bch_fs *, struct journal_keys *, enum btree_id, - btree_walk_node_fn, btree_walk_key_fn); +int bch2_btree_and_journal_walk(struct bch_fs *, enum btree_id, btree_walk_key_fn); void bch2_journal_keys_free(struct journal_keys *); void bch2_journal_entries_free(struct list_head *); diff --git a/libbcachefs/reflink.c b/libbcachefs/reflink.c index 0978ad9..3d9c5c5 100644 --- a/libbcachefs/reflink.c +++ b/libbcachefs/reflink.c @@ -2,6 +2,7 @@ #include "bcachefs.h" #include "bkey_buf.h" #include "btree_update.h" +#include "buckets.h" #include "extents.h" #include "inode.h" #include "io.h" @@ -41,24 +42,22 @@ void bch2_reflink_p_to_text(struct printbuf *out, struct bch_fs *c, pr_buf(out, "idx %llu", le64_to_cpu(p.v->idx)); } -enum merge_result bch2_reflink_p_merge(struct bch_fs *c, - struct bkey_s _l, struct bkey_s _r) +bool bch2_reflink_p_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r) { struct bkey_s_reflink_p l = bkey_s_to_reflink_p(_l); - struct bkey_s_reflink_p r = bkey_s_to_reflink_p(_r); + struct bkey_s_c_reflink_p r = bkey_s_c_to_reflink_p(_r); - if (le64_to_cpu(l.v->idx) + l.k->size != le64_to_cpu(r.v->idx)) - return BCH_MERGE_NOMERGE; + /* + * Disabled for now, the triggers code needs to be reworked for merging + * of reflink pointers to work: + */ + return false; - if ((u64) l.k->size + r.k->size > KEY_SIZE_MAX) { - bch2_key_resize(l.k, KEY_SIZE_MAX); - bch2_cut_front_s(l.k->p, _r); - return BCH_MERGE_PARTIAL; - } + if (le64_to_cpu(l.v->idx) + l.k->size != le64_to_cpu(r.v->idx)) + return false; bch2_key_resize(l.k, l.k->size + r.k->size); - - return BCH_MERGE_MERGE; + return true; } /* indirect extents */ @@ -83,6 +82,14 @@ void bch2_reflink_v_to_text(struct printbuf *out, struct bch_fs *c, bch2_bkey_ptrs_to_text(out, c, k); } +bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r) +{ + struct bkey_s_reflink_v l = bkey_s_to_reflink_v(_l); + struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(_r); + + return l.v->refcount == r.v->refcount && bch2_extent_merge(c, _l, _r); +} + /* indirect inline data */ const char *bch2_indirect_inline_data_invalid(const struct bch_fs *c, @@ -135,9 +142,9 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, goto err; /* rewind iter to start of hole, if necessary: */ - bch2_btree_iter_set_pos(reflink_iter, bkey_start_pos(k.k)); + bch2_btree_iter_set_pos_to_extent_start(reflink_iter); - r_v = bch2_trans_kmalloc(trans, sizeof(__le64) + bkey_val_bytes(&orig->k)); + r_v = bch2_trans_kmalloc(trans, sizeof(__le64) + bkey_bytes(&orig->k)); ret = PTR_ERR_OR_ZERO(r_v); if (ret) goto err; @@ -150,24 +157,20 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, set_bkey_val_bytes(&r_v->k, sizeof(__le64) + bkey_val_bytes(&orig->k)); - refcount = (void *) &r_v->v; + refcount = bkey_refcount(r_v); *refcount = 0; memcpy(refcount + 1, &orig->v, bkey_val_bytes(&orig->k)); - bch2_trans_update(trans, reflink_iter, r_v, 0); - - r_p = bch2_trans_kmalloc(trans, sizeof(*r_p)); - if (IS_ERR(r_p)) { - ret = PTR_ERR(r_p); + ret = bch2_trans_update(trans, reflink_iter, r_v, 0); + if (ret) goto err; - } orig->k.type = KEY_TYPE_reflink_p; r_p = bkey_i_to_reflink_p(orig); set_bkey_val_bytes(&r_p->k, sizeof(r_p->v)); r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k)); - bch2_trans_update(trans, extent_iter, &r_p->k_i, 0); + ret = bch2_trans_update(trans, extent_iter, &r_p->k_i, 0); err: if (!IS_ERR(reflink_iter)) c->reflink_hint = reflink_iter->pos.offset; @@ -178,18 +181,20 @@ err: static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end) { - struct bkey_s_c k = bch2_btree_iter_peek(iter); + struct bkey_s_c k; int ret; for_each_btree_key_continue(iter, 0, k, ret) { if (bkey_cmp(iter->pos, end) >= 0) - return bkey_s_c_null; + break; if (bkey_extent_is_data(k.k)) - break; + return k; } - return k; + if (bkey_cmp(iter->pos, end) >= 0) + bch2_btree_iter_set_pos(iter, end); + return ret ? bkey_s_c_err(ret) : bkey_s_c_null; } s64 bch2_remap_range(struct bch_fs *c, @@ -202,8 +207,8 @@ s64 bch2_remap_range(struct bch_fs *c, struct bkey_s_c src_k; struct bkey_buf new_dst, new_src; struct bpos dst_end = dst_start, src_end = src_start; - struct bpos dst_want, src_want; - u64 src_done, dst_done; + struct bpos src_want; + u64 dst_done; int ret = 0, ret2 = 0; if (!percpu_ref_tryget(&c->writes)) @@ -223,7 +228,10 @@ s64 bch2_remap_range(struct bch_fs *c, dst_iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, dst_start, BTREE_ITER_INTENT); - while (ret == 0 || ret == -EINTR) { + while ((ret == 0 || ret == -EINTR) && + bkey_cmp(dst_iter->pos, dst_end) < 0) { + struct disk_reservation disk_res = { 0 }; + bch2_trans_begin(&trans); if (fatal_signal_pending(current)) { @@ -231,33 +239,30 @@ s64 bch2_remap_range(struct bch_fs *c, break; } + dst_done = dst_iter->pos.offset - dst_start.offset; + src_want = POS(src_start.inode, src_start.offset + dst_done); + bch2_btree_iter_set_pos(src_iter, src_want); + src_k = get_next_src(src_iter, src_end); ret = bkey_err(src_k); if (ret) continue; - src_done = bpos_min(src_iter->pos, src_end).offset - - src_start.offset; - dst_want = POS(dst_start.inode, dst_start.offset + src_done); - - if (bkey_cmp(dst_iter->pos, dst_want) < 0) { - ret = bch2_fpunch_at(&trans, dst_iter, dst_want, - journal_seq, i_sectors_delta); + if (bkey_cmp(src_want, src_iter->pos) < 0) { + ret = bch2_fpunch_at(&trans, dst_iter, + bpos_min(dst_end, + POS(dst_iter->pos.inode, dst_iter->pos.offset + + src_iter->pos.offset - src_want.offset)), + journal_seq, i_sectors_delta); continue; } - BUG_ON(bkey_cmp(dst_iter->pos, dst_want)); - - if (!bkey_cmp(dst_iter->pos, dst_end)) - break; - if (src_k.k->type != KEY_TYPE_reflink_p) { + bch2_btree_iter_set_pos_to_extent_start(src_iter); + bch2_bkey_buf_reassemble(&new_src, c, src_k); src_k = bkey_i_to_s_c(new_src.k); - bch2_cut_front(src_iter->pos, new_src.k); - bch2_cut_back(src_end, new_src.k); - ret = bch2_make_extent_indirect(&trans, src_iter, new_src.k); if (ret) @@ -273,7 +278,7 @@ s64 bch2_remap_range(struct bch_fs *c, bkey_reflink_p_init(new_dst.k); u64 offset = le64_to_cpu(src_p.v->idx) + - (src_iter->pos.offset - + (src_want.offset - bkey_start_offset(src_k.k)); dst_p->v.idx = cpu_to_le64(offset); @@ -283,18 +288,13 @@ s64 bch2_remap_range(struct bch_fs *c, new_dst.k->k.p = dst_iter->pos; bch2_key_resize(&new_dst.k->k, - min(src_k.k->p.offset - src_iter->pos.offset, + min(src_k.k->p.offset - src_want.offset, dst_end.offset - dst_iter->pos.offset)); - ret = bch2_extent_update(&trans, dst_iter, new_dst.k, - NULL, journal_seq, - new_i_size, i_sectors_delta); - if (ret) - continue; - - dst_done = dst_iter->pos.offset - dst_start.offset; - src_want = POS(src_start.inode, src_start.offset + dst_done); - bch2_btree_iter_set_pos(src_iter, src_want); + &disk_res, journal_seq, + new_i_size, i_sectors_delta, + true); + bch2_disk_reservation_put(c, &disk_res); } bch2_trans_iter_put(&trans, dst_iter); bch2_trans_iter_put(&trans, src_iter); @@ -305,12 +305,12 @@ s64 bch2_remap_range(struct bch_fs *c, dst_done = dst_iter->pos.offset - dst_start.offset; new_i_size = min(dst_iter->pos.offset << 9, new_i_size); - bch2_trans_begin(&trans); - do { struct bch_inode_unpacked inode_u; struct btree_iter *inode_iter; + bch2_trans_begin(&trans); + inode_iter = bch2_inode_peek(&trans, &inode_u, dst_start.inode, BTREE_ITER_INTENT); ret2 = PTR_ERR_OR_ZERO(inode_iter); diff --git a/libbcachefs/reflink.h b/libbcachefs/reflink.h index 9d5e7dc..68c5cb5 100644 --- a/libbcachefs/reflink.h +++ b/libbcachefs/reflink.h @@ -5,8 +5,7 @@ const char *bch2_reflink_p_invalid(const struct bch_fs *, struct bkey_s_c); void bch2_reflink_p_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); -enum merge_result bch2_reflink_p_merge(struct bch_fs *, - struct bkey_s, struct bkey_s); +bool bch2_reflink_p_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c); #define bch2_bkey_ops_reflink_p (struct bkey_ops) { \ .key_invalid = bch2_reflink_p_invalid, \ @@ -34,6 +33,30 @@ void bch2_indirect_inline_data_to_text(struct printbuf *, .val_to_text = bch2_indirect_inline_data_to_text, \ } +static inline const __le64 *bkey_refcount_c(struct bkey_s_c k) +{ + switch (k.k->type) { + case KEY_TYPE_reflink_v: + return &bkey_s_c_to_reflink_v(k).v->refcount; + case KEY_TYPE_indirect_inline_data: + return &bkey_s_c_to_indirect_inline_data(k).v->refcount; + default: + return NULL; + } +} + +static inline __le64 *bkey_refcount(struct bkey_i *k) +{ + switch (k->k.type) { + case KEY_TYPE_reflink_v: + return &bkey_i_to_reflink_v(k)->v.refcount; + case KEY_TYPE_indirect_inline_data: + return &bkey_i_to_indirect_inline_data(k)->v.refcount; + default: + return NULL; + } +} + s64 bch2_remap_range(struct bch_fs *, struct bpos, struct bpos, u64, u64 *, u64, s64 *); diff --git a/libbcachefs/replicas.c b/libbcachefs/replicas.c index 1e29717..dbbbcc6 100644 --- a/libbcachefs/replicas.c +++ b/libbcachefs/replicas.c @@ -313,8 +313,8 @@ static int replicas_table_update(struct bch_fs *c, out: free_percpu(new_gc); kfree(new_scratch); - free_percpu(new_usage[1]); - free_percpu(new_usage[0]); + for (i = 0; i < ARRAY_SIZE(new_usage); i++) + free_percpu(new_usage[i]); kfree(new_base); return ret; err: @@ -435,6 +435,8 @@ static int __bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k, unsigned i; int ret; + memset(&search, 0, sizeof(search)); + for (i = 0; i < cached.nr; i++) { bch2_replicas_entry_cached(&search.e, cached.devs[i]); @@ -1063,11 +1065,27 @@ unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca) return ret; } +void bch2_fs_replicas_exit(struct bch_fs *c) +{ + unsigned i; + + kfree(c->usage_scratch); + for (i = 0; i < ARRAY_SIZE(c->usage); i++) + free_percpu(c->usage[i]); + kfree(c->usage_base); + kfree(c->replicas.entries); + kfree(c->replicas_gc.entries); + + mempool_exit(&c->replicas_delta_pool); +} + int bch2_fs_replicas_init(struct bch_fs *c) { bch2_journal_entry_res_resize(&c->journal, &c->replicas_journal_res, reserve_journal_replicas(c, &c->replicas)); - return replicas_table_update(c, &c->replicas); + return mempool_init_kmalloc_pool(&c->replicas_delta_pool, 1, + REPLICAS_DELTA_LIST_MAX) ?: + replicas_table_update(c, &c->replicas); } diff --git a/libbcachefs/replicas.h b/libbcachefs/replicas.h index c77e873..72ac544 100644 --- a/libbcachefs/replicas.h +++ b/libbcachefs/replicas.h @@ -102,6 +102,7 @@ int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *); extern const struct bch_sb_field_ops bch_sb_field_ops_replicas; extern const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0; +void bch2_fs_replicas_exit(struct bch_fs *); int bch2_fs_replicas_init(struct bch_fs *); #endif /* _BCACHEFS_REPLICAS_H */ diff --git a/libbcachefs/str_hash.h b/libbcachefs/str_hash.h index 9f0bd44..2360234 100644 --- a/libbcachefs/str_hash.h +++ b/libbcachefs/str_hash.h @@ -12,7 +12,7 @@ #include #include -#include +#include static inline enum bch_str_hash_type bch2_str_hash_opt_to_type(struct bch_fs *c, enum bch_str_hash_opts opt) @@ -33,10 +33,11 @@ bch2_str_hash_opt_to_type(struct bch_fs *c, enum bch_str_hash_opts opt) struct bch_hash_info { u8 type; - union { - __le64 crc_key; - SIPHASH_KEY siphash_key; - }; + /* + * For crc32 or crc64 string hashes the first key value of + * the siphash_key (k0) is used as the key. + */ + SIPHASH_KEY siphash_key; }; static inline struct bch_hash_info @@ -46,7 +47,7 @@ bch2_hash_info_init(struct bch_fs *c, const struct bch_inode_unpacked *bi) struct bch_hash_info info = { .type = (bi->bi_flags >> INODE_STR_HASH_OFFSET) & ~(~0U << INODE_STR_HASH_BITS), - .crc_key = bi->bi_hash_seed, + .siphash_key = { .k0 = bi->bi_hash_seed } }; if (unlikely(info.type == BCH_STR_HASH_SIPHASH_OLD)) { @@ -76,10 +77,12 @@ static inline void bch2_str_hash_init(struct bch_str_hash_ctx *ctx, { switch (info->type) { case BCH_STR_HASH_CRC32C: - ctx->crc32c = crc32c(~0, &info->crc_key, sizeof(info->crc_key)); + ctx->crc32c = crc32c(~0, &info->siphash_key.k0, + sizeof(info->siphash_key.k0)); break; case BCH_STR_HASH_CRC64: - ctx->crc64 = crc64_be(~0, &info->crc_key, sizeof(info->crc_key)); + ctx->crc64 = crc64_be(~0, &info->siphash_key.k0, + sizeof(info->siphash_key.k0)); break; case BCH_STR_HASH_SIPHASH_OLD: case BCH_STR_HASH_SIPHASH: @@ -206,7 +209,7 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans, iter = bch2_trans_copy_iter(trans, start); - bch2_btree_iter_next_slot(iter); + bch2_btree_iter_advance(iter); for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k, ret) { if (k.k->type != desc.key_type && @@ -278,7 +281,7 @@ not_found: swap(iter, slot); insert->k.p = iter->pos; - bch2_trans_update(trans, iter, insert, 0); + ret = bch2_trans_update(trans, iter, insert, 0); } goto out; @@ -293,20 +296,20 @@ int bch2_hash_delete_at(struct btree_trans *trans, struct bkey_i *delete; int ret; + delete = bch2_trans_kmalloc(trans, sizeof(*delete)); + ret = PTR_ERR_OR_ZERO(delete); + if (ret) + return ret; + ret = bch2_hash_needs_whiteout(trans, desc, info, iter); if (ret < 0) return ret; - delete = bch2_trans_kmalloc(trans, sizeof(*delete)); - if (IS_ERR(delete)) - return PTR_ERR(delete); - bkey_init(&delete->k); delete->k.p = iter->pos; delete->k.type = ret ? KEY_TYPE_hash_whiteout : KEY_TYPE_deleted; - bch2_trans_update(trans, iter, delete, 0); - return 0; + return bch2_trans_update(trans, iter, delete, 0); } static __always_inline diff --git a/libbcachefs/super-io.c b/libbcachefs/super-io.c index 1793697..3903b73 100644 --- a/libbcachefs/super-io.c +++ b/libbcachefs/super-io.c @@ -50,8 +50,7 @@ static struct bch_sb_field *__bch2_sb_field_resize(struct bch_sb_handle *sb, unsigned old_u64s = f ? le32_to_cpu(f->u64s) : 0; unsigned sb_u64s = le32_to_cpu(sb->sb->u64s) + u64s - old_u64s; - BUG_ON(get_order(__vstruct_bytes(struct bch_sb, sb_u64s)) > - sb->page_order); + BUG_ON(__vstruct_bytes(struct bch_sb, sb_u64s) > sb->buffer_size); if (!f && !u64s) { /* nothing to do: */ @@ -101,18 +100,23 @@ void bch2_free_super(struct bch_sb_handle *sb) if (!IS_ERR_OR_NULL(sb->bdev)) blkdev_put(sb->bdev, sb->mode); - free_pages((unsigned long) sb->sb, sb->page_order); + kfree(sb->sb); memset(sb, 0, sizeof(*sb)); } int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s) { size_t new_bytes = __vstruct_bytes(struct bch_sb, u64s); - unsigned order = get_order(new_bytes); + size_t new_buffer_size; struct bch_sb *new_sb; struct bio *bio; - if (sb->sb && sb->page_order >= order) + if (sb->bdev) + new_bytes = max_t(size_t, new_bytes, bdev_logical_block_size(sb->bdev)); + + new_buffer_size = roundup_pow_of_two(new_bytes); + + if (sb->sb && sb->buffer_size >= new_buffer_size) return 0; if (sb->have_layout) { @@ -127,14 +131,15 @@ int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s) } } - if (sb->page_order >= order && sb->sb) + if (sb->buffer_size >= new_buffer_size && sb->sb) return 0; if (dynamic_fault("bcachefs:add:super_realloc")) return -ENOMEM; if (sb->have_bio) { - bio = bio_kmalloc(GFP_KERNEL, 1 << order); + bio = bio_kmalloc(GFP_KERNEL, + DIV_ROUND_UP(new_buffer_size, PAGE_SIZE)); if (!bio) return -ENOMEM; @@ -143,17 +148,12 @@ int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s) sb->bio = bio; } - new_sb = (void *) __get_free_pages(GFP_NOFS|__GFP_ZERO, order); + new_sb = krealloc(sb->sb, new_buffer_size, GFP_NOFS|__GFP_ZERO); if (!new_sb) return -ENOMEM; - if (sb->sb) - memcpy(new_sb, sb->sb, PAGE_SIZE << sb->page_order); - - free_pages((unsigned long) sb->sb, sb->page_order); sb->sb = new_sb; - - sb->page_order = order; + sb->buffer_size = new_buffer_size; return 0; } @@ -367,9 +367,15 @@ static void bch2_sb_update(struct bch_fs *c) c->sb.clean = BCH_SB_CLEAN(src); c->sb.encryption_type = BCH_SB_ENCRYPTION_TYPE(src); c->sb.encoded_extent_max= 1 << BCH_SB_ENCODED_EXTENT_MAX_BITS(src); - c->sb.time_base_lo = le64_to_cpu(src->time_base_lo); + + c->sb.nsec_per_time_unit = le32_to_cpu(src->time_precision); + c->sb.time_units_per_sec = NSEC_PER_SEC / c->sb.nsec_per_time_unit; + + /* XXX this is wrong, we need a 96 or 128 bit integer type */ + c->sb.time_base_lo = div_u64(le64_to_cpu(src->time_base_lo), + c->sb.nsec_per_time_unit); c->sb.time_base_hi = le32_to_cpu(src->time_base_hi); - c->sb.time_precision = le32_to_cpu(src->time_precision); + c->sb.features = le64_to_cpu(src->features[0]); c->sb.compat = le64_to_cpu(src->compat[0]); @@ -433,6 +439,11 @@ int bch2_sb_to_fs(struct bch_fs *c, struct bch_sb *src) __copy_super(&c->disk_sb, src); + if (BCH_SB_HAS_ERRORS(c->disk_sb.sb)) + set_bit(BCH_FS_ERROR, &c->flags); + if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)) + set_bit(BCH_FS_TOPOLOGY_ERROR, &c->flags); + ret = bch2_sb_replicas_to_cpu_replicas(c); if (ret) return ret; @@ -475,7 +486,7 @@ reread: bio_set_dev(sb->bio, sb->bdev); sb->bio->bi_iter.bi_sector = offset; bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META); - bch2_bio_map(sb->bio, sb->sb, PAGE_SIZE << sb->page_order); + bch2_bio_map(sb->bio, sb->sb, sb->buffer_size); if (submit_bio_wait(sb->bio)) return "IO error"; @@ -492,7 +503,7 @@ reread: if (bytes > 512 << sb->sb->layout.sb_max_size_bits) return "Bad superblock: too big"; - if (get_order(bytes) > sb->page_order) { + if (bytes > sb->buffer_size) { if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s))) return "cannot allocate memory"; goto reread; @@ -669,7 +680,7 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx) sb->offset = sb->layout.sb_offset[idx]; - SET_BCH_SB_CSUM_TYPE(sb, c->opts.metadata_checksum); + SET_BCH_SB_CSUM_TYPE(sb, bch2_csum_opt_to_type(c->opts.metadata_checksum, false)); sb->csum = csum_vstruct(c, BCH_SB_CSUM_TYPE(sb), null_nonce(), sb); @@ -698,8 +709,12 @@ int bch2_write_super(struct bch_fs *c) const char *err; struct bch_devs_mask sb_written; bool wrote, can_mount_without_written, can_mount_with_written; + unsigned degraded_flags = BCH_FORCE_IF_DEGRADED; int ret = 0; + if (c->opts.very_degraded) + degraded_flags |= BCH_FORCE_IF_LOST; + lockdep_assert_held(&c->sb_lock); closure_init_stack(cl); @@ -709,6 +724,8 @@ int bch2_write_super(struct bch_fs *c) if (test_bit(BCH_FS_ERROR, &c->flags)) SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 1); + if (test_bit(BCH_FS_TOPOLOGY_ERROR, &c->flags)) + SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 1); SET_BCH_SB_BIG_ENDIAN(c->disk_sb.sb, CPU_BIG_ENDIAN); @@ -770,13 +787,13 @@ int bch2_write_super(struct bch_fs *c) nr_wrote = dev_mask_nr(&sb_written); can_mount_with_written = - bch2_have_enough_devs(c, sb_written, BCH_FORCE_IF_DEGRADED, false); + bch2_have_enough_devs(c, sb_written, degraded_flags, false); for (i = 0; i < ARRAY_SIZE(sb_written.d); i++) sb_written.d[i] = ~sb_written.d[i]; can_mount_without_written = - bch2_have_enough_devs(c, sb_written, BCH_FORCE_IF_DEGRADED, false); + bch2_have_enough_devs(c, sb_written, degraded_flags, false); /* * If we would be able to mount _without_ the devices we successfully @@ -965,7 +982,8 @@ int bch2_fs_mark_dirty(struct bch_fs *c) mutex_lock(&c->sb_lock); SET_BCH_SB_CLEAN(c->disk_sb.sb, false); - c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALWAYS; + c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALWAYS); + c->disk_sb.sb->compat[0] &= cpu_to_le64((1ULL << BCH_COMPAT_NR) - 1); ret = bch2_write_super(c); mutex_unlock(&c->sb_lock); @@ -982,7 +1000,7 @@ static struct jset_entry *jset_entry_init(struct jset_entry **end, size_t size) * The u64s field counts from the start of data, ignoring the shared * fields. */ - entry->u64s = u64s - 1; + entry->u64s = cpu_to_le16(u64s - 1); *end = vstruct_next(*end); return entry; @@ -1075,7 +1093,7 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c, clock->entry.type = BCH_JSET_ENTRY_clock; clock->rw = i; - clock->time = atomic64_read(&c->io_clock[i].now); + clock->time = cpu_to_le64(atomic64_read(&c->io_clock[i].now)); } } @@ -1092,10 +1110,10 @@ void bch2_fs_mark_clean(struct bch_fs *c) SET_BCH_SB_CLEAN(c->disk_sb.sb, true); - c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_alloc_info; - c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_alloc_metadata; - c->disk_sb.sb->features[0] &= ~(1ULL << BCH_FEATURE_extents_above_btree_updates); - c->disk_sb.sb->features[0] &= ~(1ULL << BCH_FEATURE_btree_updates_journalled); + c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info); + c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_metadata); + c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_extents_above_btree_updates)); + c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_btree_updates_journalled)); u64s = sizeof(*sb_clean) / sizeof(u64) + c->journal.entry_u64s_reserved; diff --git a/libbcachefs/super.c b/libbcachefs/super.c index 670e9cd..ce8e5d4 100644 --- a/libbcachefs/super.c +++ b/libbcachefs/super.c @@ -99,7 +99,7 @@ static int bch2_dev_alloc(struct bch_fs *, unsigned); static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *); static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *); -struct bch_fs *bch2_bdev_to_fs(struct block_device *bdev) +struct bch_fs *bch2_dev_to_fs(dev_t dev) { struct bch_fs *c; struct bch_dev *ca; @@ -110,7 +110,7 @@ struct bch_fs *bch2_bdev_to_fs(struct block_device *bdev) list_for_each_entry(c, &bch_fs_list, list) for_each_member_device_rcu(ca, c, i, NULL) - if (ca->disk_sb.bdev == bdev) { + if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) { closure_get(&c->cl); goto found; } @@ -269,7 +269,7 @@ static void bch2_writes_disabled(struct percpu_ref *writes) void bch2_fs_read_only(struct bch_fs *c) { if (!test_bit(BCH_FS_RW, &c->flags)) { - BUG_ON(c->journal.reclaim_thread); + bch2_journal_reclaim_stop(&c->journal); return; } @@ -286,7 +286,6 @@ void bch2_fs_read_only(struct bch_fs *c) percpu_ref_kill(&c->writes); cancel_work_sync(&c->ec_stripe_delete_work); - cancel_delayed_work(&c->pd_controllers_update); /* * If we're not doing an emergency shutdown, we want to wait on @@ -371,8 +370,6 @@ static int bch2_fs_read_write_late(struct bch_fs *c) return ret; } - schedule_delayed_work(&c->pd_controllers_update, 5 * HZ); - schedule_work(&c->ec_stripe_delete_work); return 0; @@ -384,6 +381,11 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early) unsigned i; int ret; + if (test_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags)) { + bch_err(c, "cannot go rw, unfixed btree errors"); + return -EROFS; + } + if (test_bit(BCH_FS_RW, &c->flags)) return 0; @@ -429,12 +431,6 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early) for_each_rw_member(ca, c, i) bch2_wake_allocator(ca); - ret = bch2_journal_reclaim_start(&c->journal); - if (ret) { - bch_err(c, "error starting journal reclaim: %i", ret); - return ret; - } - if (!early) { ret = bch2_fs_read_write_late(c); if (ret) @@ -443,6 +439,7 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early) percpu_ref_reinit(&c->writes); set_bit(BCH_FS_RW, &c->flags); + set_bit(BCH_FS_WAS_RW, &c->flags); return 0; err: __bch2_fs_read_only(c); @@ -480,6 +477,7 @@ static void __bch2_fs_free(struct bch_fs *c) bch2_fs_btree_iter_exit(c); bch2_fs_btree_key_cache_exit(&c->btree_key_cache); bch2_fs_btree_cache_exit(c); + bch2_fs_replicas_exit(c); bch2_fs_journal_exit(&c->journal); bch2_io_clock_exit(&c->io_clock[WRITE]); bch2_io_clock_exit(&c->io_clock[READ]); @@ -487,10 +485,6 @@ static void __bch2_fs_free(struct bch_fs *c) bch2_journal_keys_free(&c->journal_keys); bch2_journal_entries_free(&c->journal_entries); percpu_free_rwsem(&c->mark_lock); - kfree(c->usage_scratch); - for (i = 0; i < ARRAY_SIZE(c->usage); i++) - free_percpu(c->usage[i]); - kfree(c->usage_base); if (c->btree_iters_bufs) for_each_possible_cpu(cpu) @@ -504,20 +498,21 @@ static void __bch2_fs_free(struct bch_fs *c) bioset_exit(&c->btree_bio); mempool_exit(&c->fill_iter); percpu_ref_exit(&c->writes); - kfree(c->replicas.entries); - kfree(c->replicas_gc.entries); kfree(rcu_dereference_protected(c->disk_groups, 1)); kfree(c->journal_seq_blacklist_table); kfree(c->unused_inode_hints); free_heap(&c->copygc_heap); + if (c->io_complete_wq ) + destroy_workqueue(c->io_complete_wq ); if (c->copygc_wq) destroy_workqueue(c->copygc_wq); - if (c->wq) - destroy_workqueue(c->wq); + if (c->btree_io_complete_wq) + destroy_workqueue(c->btree_io_complete_wq); + if (c->btree_update_wq) + destroy_workqueue(c->btree_update_wq); - free_pages((unsigned long) c->disk_sb.sb, - c->disk_sb.page_order); + bch2_free_super(&c->disk_sb); kvpfree(c, sizeof(*c)); module_put(THIS_MODULE); } @@ -547,8 +542,7 @@ void __bch2_fs_stop(struct bch_fs *c) for_each_member_device(ca, c, i) if (ca->kobj.state_in_sysfs && ca->disk_sb.bdev) - sysfs_remove_link(&part_to_dev(ca->disk_sb.bdev->bd_part)->kobj, - "bcachefs"); + sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs"); if (c->kobj.state_in_sysfs) kobject_del(&c->kobj); @@ -566,8 +560,6 @@ void __bch2_fs_stop(struct bch_fs *c) for_each_member_device(ca, c, i) cancel_work_sync(&ca->io_error_work); - cancel_work_sync(&c->btree_write_error_work); - cancel_delayed_work_sync(&c->pd_controllers_update); cancel_work_sync(&c->read_only_work); for (i = 0; i < c->sb.nr_devices; i++) @@ -632,9 +624,11 @@ static const char *bch2_fs_online(struct bch_fs *c) down_write(&c->state_lock); err = "error creating sysfs objects"; - __for_each_member_device(ca, c, i, NULL) - if (bch2_dev_sysfs_online(c, ca)) + for_each_member_device(ca, c, i) + if (bch2_dev_sysfs_online(c, ca)) { + percpu_ref_put(&ca->ref); goto err; + } list_add(&c->list, &bch_fs_list); err = NULL; @@ -693,9 +687,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) mutex_init(&c->bio_bounce_pages_lock); - bio_list_init(&c->btree_write_error_list); spin_lock_init(&c->btree_write_error_lock); - INIT_WORK(&c->btree_write_error_work, bch2_btree_write_error_work); INIT_WORK(&c->journal_seq_blacklist_gc_work, bch2_blacklist_entries_gc); @@ -763,10 +755,14 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) c->inode_shard_bits = ilog2(roundup_pow_of_two(num_possible_cpus())); - if (!(c->wq = alloc_workqueue("bcachefs", + if (!(c->btree_update_wq = alloc_workqueue("bcachefs", + WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) || + !(c->btree_io_complete_wq = alloc_workqueue("bcachefs_btree_io", WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) || !(c->copygc_wq = alloc_workqueue("bcachefs_copygc", WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) || + !(c->io_complete_wq = alloc_workqueue("bcachefs_io", + WQ_FREEZABLE|WQ_HIGHPRI|WQ_MEM_RECLAIM, 1)) || percpu_ref_init(&c->writes, bch2_writes_disabled, PERCPU_REF_INIT_DEAD, GFP_KERNEL) || mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) || @@ -909,9 +905,16 @@ int bch2_fs_start(struct bch_fs *c) /* * Allocator threads don't start filling copygc reserve until after we * set BCH_FS_STARTED - wake them now: + * + * XXX ugly hack: + * Need to set ca->allocator_state here instead of relying on the + * allocator threads to do it to avoid racing with the copygc threads + * checking it and thinking they have no alloc reserve: */ - for_each_online_member(ca, c, i) + for_each_online_member(ca, c, i) { + ca->allocator_state = ALLOCATOR_running; bch2_wake_allocator(ca); + } if (c->opts.read_only || c->opts.nochanges) { bch2_fs_read_only(c); @@ -1014,8 +1017,7 @@ static void bch2_dev_free(struct bch_dev *ca) if (ca->kobj.state_in_sysfs && ca->disk_sb.bdev) - sysfs_remove_link(&part_to_dev(ca->disk_sb.bdev->bd_part)->kobj, - "bcachefs"); + sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs"); if (ca->kobj.state_in_sysfs) kobject_del(&ca->kobj); @@ -1051,10 +1053,7 @@ static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca) wait_for_completion(&ca->io_ref_completion); if (ca->kobj.state_in_sysfs) { - struct kobject *block = - &part_to_dev(ca->disk_sb.bdev->bd_part)->kobj; - - sysfs_remove_link(block, "bcachefs"); + sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs"); sysfs_remove_link(&ca->kobj, "block"); } @@ -1091,12 +1090,12 @@ static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca) } if (ca->disk_sb.bdev) { - struct kobject *block = - &part_to_dev(ca->disk_sb.bdev->bd_part)->kobj; + struct kobject *block = bdev_kobj(ca->disk_sb.bdev); ret = sysfs_create_link(block, &ca->kobj, "bcachefs"); if (ret) return ret; + ret = sysfs_create_link(&ca->kobj, block, "block"); if (ret) return ret; @@ -1437,7 +1436,7 @@ int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca, /* Device add/removal: */ -int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca) +static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca) { struct btree_trans trans; size_t i; @@ -1680,7 +1679,7 @@ have_slot: bch2_dev_usage_journal_reserve(c); err = "error marking superblock"; - ret = bch2_trans_mark_dev_sb(c, NULL, ca); + ret = bch2_trans_mark_dev_sb(c, ca); if (ret) goto err_late; @@ -1740,7 +1739,7 @@ int bch2_dev_online(struct bch_fs *c, const char *path) ca = bch_dev_locked(c, dev_idx); - if (bch2_trans_mark_dev_sb(c, NULL, ca)) { + if (bch2_trans_mark_dev_sb(c, ca)) { err = "bch2_trans_mark_dev_sb() error"; goto err; } @@ -1818,6 +1817,11 @@ int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) goto err; } + ret = bch2_trans_mark_dev_sb(c, ca); + if (ret) { + goto err; + } + mutex_lock(&c->sb_lock); mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx]; mi->nbuckets = cpu_to_le64(nbuckets); @@ -1834,20 +1838,23 @@ err: /* return with ref on ca->ref: */ struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *path) { - struct block_device *bdev = lookup_bdev(path); struct bch_dev *ca; + dev_t dev; unsigned i; + int ret; - if (IS_ERR(bdev)) - return ERR_CAST(bdev); + ret = lookup_bdev(path, &dev); + if (ret) + return ERR_PTR(ret); - for_each_member_device(ca, c, i) - if (ca->disk_sb.bdev == bdev) + rcu_read_lock(); + for_each_member_device_rcu(ca, c, i, NULL) + if (ca->disk_sb.bdev->bd_dev == dev) goto found; - ca = ERR_PTR(-ENOENT); found: - bdput(bdev); + rcu_read_unlock(); + return ca; } diff --git a/libbcachefs/super.h b/libbcachefs/super.h index bef2790..739e8fd 100644 --- a/libbcachefs/super.h +++ b/libbcachefs/super.h @@ -107,11 +107,8 @@ static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter, return ca; } -#define __for_each_member_device(ca, c, iter, mask) \ - for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter), mask)); (iter)++) - #define for_each_member_device_rcu(ca, c, iter, mask) \ - __for_each_member_device(ca, c, iter, mask) + for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter), mask)); (iter)++) static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, unsigned *iter) { @@ -197,7 +194,7 @@ static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c) return devs; } -struct bch_fs *bch2_bdev_to_fs(struct block_device *); +struct bch_fs *bch2_dev_to_fs(dev_t); struct bch_fs *bch2_uuid_to_fs(uuid_le); bool bch2_dev_state_allowed(struct bch_fs *, struct bch_dev *, diff --git a/libbcachefs/super_types.h b/libbcachefs/super_types.h index 069973a..96023f3 100644 --- a/libbcachefs/super_types.h +++ b/libbcachefs/super_types.h @@ -6,7 +6,7 @@ struct bch_sb_handle { struct bch_sb *sb; struct block_device *bdev; struct bio *bio; - unsigned page_order; + size_t buffer_size; fmode_t mode; unsigned have_layout:1; unsigned have_bio:1; diff --git a/libbcachefs/sysfs.c b/libbcachefs/sysfs.c index 2d00897..9b1ffbf 100644 --- a/libbcachefs/sysfs.c +++ b/libbcachefs/sysfs.c @@ -132,10 +132,10 @@ do { \ } while (0) write_attribute(trigger_journal_flush); -write_attribute(trigger_btree_coalesce); write_attribute(trigger_gc); write_attribute(prune_cache); rw_attribute(btree_gc_periodic); +rw_attribute(gc_gens_pos); read_attribute(uuid); read_attribute(minor); @@ -171,6 +171,7 @@ read_attribute(btree_cache); read_attribute(btree_key_cache); read_attribute(btree_transactions); read_attribute(stripes_heap); +read_attribute(open_buckets); read_attribute(internal_uuid); @@ -190,7 +191,7 @@ rw_attribute(cache_replacement_policy); rw_attribute(label); rw_attribute(copy_gc_enabled); -sysfs_pd_controller_attribute(copy_gc); +read_attribute(copy_gc_wait); rw_attribute(rebalance_enabled); sysfs_pd_controller_attribute(rebalance); @@ -199,8 +200,6 @@ rw_attribute(promote_whole_extents); read_attribute(new_stripes); -rw_attribute(pd_controllers_update_seconds); - read_attribute(io_timers_read); read_attribute(io_timers_write); @@ -314,6 +313,13 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c return 0; } +static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c) +{ + pr_buf(out, "%s: ", bch2_btree_ids[c->gc_gens_btree]); + bch2_bpos_to_text(out, c->gc_gens_pos); + pr_buf(out, "\n"); +} + SHOW(bch2_fs) { struct bch_fs *c = container_of(kobj, struct bch_fs, kobj); @@ -339,14 +345,18 @@ SHOW(bch2_fs) sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic); - sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); + if (attr == &sysfs_gc_gens_pos) { + bch2_gc_gens_pos_to_text(&out, c); + return out.pos - buf; + } - sysfs_print(pd_controllers_update_seconds, - c->pd_controllers_update_seconds); + sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled); sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */ - sysfs_pd_controller_show(copy_gc, &c->copygc_pd); + sysfs_hprint(copy_gc_wait, + max(0LL, c->copygc_wait - + atomic64_read(&c->io_clock[WRITE].now)) << 9); if (attr == &sysfs_rebalance_work) { bch2_rebalance_work_to_text(&out, c); @@ -400,6 +410,11 @@ SHOW(bch2_fs) return out.pos - buf; } + if (attr == &sysfs_open_buckets) { + bch2_open_buckets_to_text(&out, c); + return out.pos - buf; + } + if (attr == &sysfs_compression_stats) { bch2_compression_stats_to_text(&out, c); return out.pos - buf; @@ -454,10 +469,7 @@ STORE(bch2_fs) return ret; } - sysfs_strtoul(pd_controllers_update_seconds, - c->pd_controllers_update_seconds); sysfs_pd_controller_store(rebalance, &c->rebalance.pd); - sysfs_pd_controller_store(copy_gc, &c->copygc_pd); sysfs_strtoul(promote_whole_extents, c->promote_whole_extents); @@ -471,9 +483,6 @@ STORE(bch2_fs) if (attr == &sysfs_trigger_journal_flush) bch2_journal_meta(&c->journal); - if (attr == &sysfs_trigger_btree_coalesce) - bch2_coalesce(c); - if (attr == &sysfs_trigger_gc) { /* * Full gc is currently incompatible with btree key cache: @@ -564,22 +573,23 @@ struct attribute *bch2_fs_internal_files[] = { &sysfs_btree_key_cache, &sysfs_btree_transactions, &sysfs_stripes_heap, + &sysfs_open_buckets, &sysfs_read_realloc_races, &sysfs_extent_migrate_done, &sysfs_extent_migrate_raced, &sysfs_trigger_journal_flush, - &sysfs_trigger_btree_coalesce, &sysfs_trigger_gc, + &sysfs_gc_gens_pos, &sysfs_prune_cache, &sysfs_copy_gc_enabled, + &sysfs_copy_gc_wait, &sysfs_rebalance_enabled, &sysfs_rebalance_work, sysfs_pd_controller_files(rebalance), - sysfs_pd_controller_files(copy_gc), &sysfs_new_stripes, @@ -817,23 +827,28 @@ static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca) "free[RESERVE_MOVINGGC]\t%zu/%zu\n" "free[RESERVE_NONE]\t%zu/%zu\n" "freelist_wait\t\t%s\n" - "open buckets\t\t%u/%u (reserved %u)\n" + "open buckets allocated\t%u\n" + "open buckets this dev\t%u\n" + "open buckets total\t%u\n" "open_buckets_wait\t%s\n" "open_buckets_btree\t%u\n" "open_buckets_user\t%u\n" - "btree reserve cache\t%u\n", + "btree reserve cache\t%u\n" + "thread state:\t\t%s\n", stats.buckets_ec, __dev_buckets_available(ca, stats), fifo_used(&ca->free_inc), ca->free_inc.size, fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size, fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size, c->freelist_wait.list.first ? "waiting" : "empty", - c->open_buckets_nr_free, OPEN_BUCKETS_COUNT, - BTREE_NODE_OPEN_BUCKET_RESERVE, + OPEN_BUCKETS_COUNT - c->open_buckets_nr_free, + ca->nr_open_buckets, + OPEN_BUCKETS_COUNT, c->open_buckets_wait.list.first ? "waiting" : "empty", nr[BCH_DATA_btree], nr[BCH_DATA_user], - c->btree_reserve_cache_nr); + c->btree_reserve_cache_nr, + bch2_allocator_states[ca->allocator_state]); } static const char * const bch2_rw[] = { diff --git a/libbcachefs/tests.c b/libbcachefs/tests.c index 7507b6b..4d8d50f 100644 --- a/libbcachefs/tests.c +++ b/libbcachefs/tests.c @@ -34,19 +34,15 @@ static int test_delete(struct bch_fs *c, u64 nr) int ret; bkey_cookie_init(&k.k_i); + k.k.p.snapshot = U32_MAX; bch2_trans_init(&trans, c, 0, 0); iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, k.k.p, BTREE_ITER_INTENT); - ret = bch2_btree_iter_traverse(iter); - if (ret) { - bch_err(c, "lookup error in test_delete: %i", ret); - goto err; - } - ret = __bch2_trans_do(&trans, NULL, NULL, 0, + bch2_btree_iter_traverse(iter) ?: bch2_trans_update(&trans, iter, &k.k_i, 0)); if (ret) { bch_err(c, "update error in test_delete: %i", ret); @@ -54,14 +50,18 @@ static int test_delete(struct bch_fs *c, u64 nr) } pr_info("deleting once"); - ret = bch2_btree_delete_at(&trans, iter, 0); + ret = __bch2_trans_do(&trans, NULL, NULL, 0, + bch2_btree_iter_traverse(iter) ?: + bch2_btree_delete_at(&trans, iter, 0)); if (ret) { bch_err(c, "delete error (first) in test_delete: %i", ret); goto err; } pr_info("deleting twice"); - ret = bch2_btree_delete_at(&trans, iter, 0); + ret = __bch2_trans_do(&trans, NULL, NULL, 0, + bch2_btree_iter_traverse(iter) ?: + bch2_btree_delete_at(&trans, iter, 0)); if (ret) { bch_err(c, "delete error (second) in test_delete: %i", ret); goto err; @@ -80,28 +80,27 @@ static int test_delete_written(struct bch_fs *c, u64 nr) int ret; bkey_cookie_init(&k.k_i); + k.k.p.snapshot = U32_MAX; bch2_trans_init(&trans, c, 0, 0); iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, k.k.p, BTREE_ITER_INTENT); - ret = bch2_btree_iter_traverse(iter); - if (ret) { - bch_err(c, "lookup error in test_delete_written: %i", ret); - goto err; - } - ret = __bch2_trans_do(&trans, NULL, NULL, 0, + bch2_btree_iter_traverse(iter) ?: bch2_trans_update(&trans, iter, &k.k_i, 0)); if (ret) { bch_err(c, "update error in test_delete_written: %i", ret); goto err; } + bch2_trans_unlock(&trans); bch2_journal_flush_all_pins(&c->journal); - ret = bch2_btree_delete_at(&trans, iter, 0); + ret = __bch2_trans_do(&trans, NULL, NULL, 0, + bch2_btree_iter_traverse(iter) ?: + bch2_btree_delete_at(&trans, iter, 0)); if (ret) { bch_err(c, "delete error in test_delete_written: %i", ret); goto err; @@ -131,6 +130,7 @@ static int test_iterate(struct bch_fs *c, u64 nr) bkey_cookie_init(&k.k_i); k.k.p.offset = i; + k.k.p.snapshot = U32_MAX; ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i, NULL, NULL, 0); @@ -185,6 +185,7 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr) bkey_cookie_init(&k.k_i); k.k.p.offset = i + 8; + k.k.p.snapshot = U32_MAX; k.k.size = 8; ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i, @@ -240,6 +241,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr) bkey_cookie_init(&k.k_i); k.k.p.offset = i * 2; + k.k.p.snapshot = U32_MAX; ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i, NULL, NULL, 0); @@ -303,6 +305,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr) bkey_cookie_init(&k.k_i); k.k.p.offset = i + 16; + k.k.p.snapshot = U32_MAX; k.k.size = 8; ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i, @@ -410,6 +413,7 @@ static int insert_test_extent(struct bch_fs *c, bkey_cookie_init(&k.k_i); k.k_i.k.p.offset = end; + k.k_i.k.p.snapshot = U32_MAX; k.k_i.k.size = end - start; k.k_i.k.version.lo = test_version++; @@ -497,6 +501,42 @@ static int rand_insert(struct bch_fs *c, u64 nr) return ret; } +static int rand_insert_multi(struct bch_fs *c, u64 nr) +{ + struct btree_trans trans; + struct bkey_i_cookie k[8]; + int ret = 0; + unsigned j; + u64 i; + + bch2_trans_init(&trans, c, 0, 0); + + for (i = 0; i < nr; i += ARRAY_SIZE(k)) { + for (j = 0; j < ARRAY_SIZE(k); j++) { + bkey_cookie_init(&k[j].k_i); + k[j].k.p.offset = test_rand(); + k[j].k.p.snapshot = U32_MAX; + } + + ret = __bch2_trans_do(&trans, NULL, NULL, 0, + __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[0].k_i) ?: + __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[1].k_i) ?: + __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[2].k_i) ?: + __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[3].k_i) ?: + __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[4].k_i) ?: + __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[5].k_i) ?: + __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[6].k_i) ?: + __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[7].k_i)); + if (ret) { + bch_err(c, "error in rand_insert_multi: %i", ret); + break; + } + } + + bch2_trans_exit(&trans); + return ret; +} + static int rand_lookup(struct bch_fs *c, u64 nr) { struct btree_trans trans; @@ -552,6 +592,7 @@ static int rand_mixed(struct bch_fs *c, u64 nr) k.k.p = iter->pos; ret = __bch2_trans_do(&trans, NULL, NULL, 0, + bch2_btree_iter_traverse(iter) ?: bch2_trans_update(&trans, iter, &k.k_i, 0)); if (ret) { bch_err(c, "update error in rand_mixed: %i", ret); @@ -585,7 +626,7 @@ static int __do_delete(struct btree_trans *trans, struct bpos pos) bkey_init(&delete.k); delete.k.p = k.k->p; - bch2_trans_update(trans, iter, &delete, 0); + ret = bch2_trans_update(trans, iter, &delete, 0); err: bch2_trans_iter_put(trans, iter); return ret; @@ -632,6 +673,7 @@ static int seq_insert(struct bch_fs *c, u64 nr) insert.k.p = iter->pos; ret = __bch2_trans_do(&trans, NULL, NULL, 0, + bch2_btree_iter_traverse(iter) ?: bch2_trans_update(&trans, iter, &insert.k_i, 0)); if (ret) { bch_err(c, "error in seq_insert: %i", ret); @@ -680,6 +722,7 @@ static int seq_overwrite(struct bch_fs *c, u64 nr) bkey_reassemble(&u.k_i, k); ret = __bch2_trans_do(&trans, NULL, NULL, 0, + bch2_btree_iter_traverse(iter) ?: bch2_trans_update(&trans, iter, &u.k_i, 0)); if (ret) { bch_err(c, "error in seq_overwrite: %i", ret); @@ -765,6 +808,7 @@ int bch2_btree_perf_test(struct bch_fs *c, const char *testname, if (!strcmp(testname, #_test)) j.fn = _test perf_test(rand_insert); + perf_test(rand_insert_multi); perf_test(rand_lookup); perf_test(rand_mixed); perf_test(rand_delete); diff --git a/libbcachefs/util.c b/libbcachefs/util.c index 2709163..463260c 100644 --- a/libbcachefs/util.c +++ b/libbcachefs/util.c @@ -154,7 +154,7 @@ void bch2_flags_to_text(struct printbuf *out, u64 bch2_read_flag_list(char *opt, const char * const list[]) { u64 ret = 0; - char *p, *s, *d = kstrndup(opt, PAGE_SIZE - 1, GFP_KERNEL); + char *p, *s, *d = kstrdup(opt, GFP_KERNEL); if (!d) return -ENOMEM; @@ -887,13 +887,9 @@ void eytzinger0_find_test(void) */ u64 *bch2_acc_percpu_u64s(u64 __percpu *p, unsigned nr) { - u64 *ret; + u64 *ret = this_cpu_ptr(p); int cpu; - preempt_disable(); - ret = this_cpu_ptr(p); - preempt_enable(); - for_each_possible_cpu(cpu) { u64 *i = per_cpu_ptr(p, cpu); diff --git a/libbcachefs/util.h b/libbcachefs/util.h index c69b05d..84ef4d6 100644 --- a/libbcachefs/util.h +++ b/libbcachefs/util.h @@ -712,10 +712,7 @@ static inline void percpu_u64_set(u64 __percpu *dst, u64 src) for_each_possible_cpu(cpu) *per_cpu_ptr(dst, cpu) = 0; - - preempt_disable(); - *this_cpu_ptr(dst) = src; - preempt_enable(); + this_cpu_write(*dst, src); } static inline void acc_u64s(u64 *acc, const u64 *src, unsigned nr) diff --git a/libbcachefs/varint.c b/libbcachefs/varint.c index a3d252c..e6a0415 100644 --- a/libbcachefs/varint.c +++ b/libbcachefs/varint.c @@ -1,10 +1,18 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include #include #include "varint.h" +/** + * bch2_varint_encode - encode a variable length integer + * @out - destination to encode to + * @v - unsigned integer to encode + * + * Returns the size in bytes of the encoded integer - at most 9 bytes + */ int bch2_varint_encode(u8 *out, u64 v) { unsigned bits = fls64(v|1); @@ -13,16 +21,79 @@ int bch2_varint_encode(u8 *out, u64 v) if (likely(bytes < 9)) { v <<= bytes; v |= ~(~0 << (bytes - 1)); + v = cpu_to_le64(v); + memcpy(out, &v, bytes); } else { *out++ = 255; bytes = 9; + put_unaligned_le64(v, out); } - put_unaligned_le64(v, out); return bytes; } +/** + * bch2_varint_decode - encode a variable length integer + * @in - varint to decode + * @end - end of buffer to decode from + * @out - on success, decoded integer + * + * Returns the size in bytes of the decoded integer - or -1 on failure (would + * have read past the end of the buffer) + */ int bch2_varint_decode(const u8 *in, const u8 *end, u64 *out) +{ + unsigned bytes = likely(in < end) + ? ffz(*in & 255) + 1 + : 1; + u64 v; + + if (unlikely(in + bytes > end)) + return -1; + + if (likely(bytes < 9)) { + v = 0; + memcpy(&v, in, bytes); + v = le64_to_cpu(v); + v >>= bytes; + } else { + v = get_unaligned_le64(++in); + } + + *out = v; + return bytes; +} + +/** + * bch2_varint_encode_fast - fast version of bch2_varint_encode + * + * This version assumes it's always safe to write 8 bytes to @out, even if the + * encoded integer would be smaller. + */ +int bch2_varint_encode_fast(u8 *out, u64 v) +{ + unsigned bits = fls64(v|1); + unsigned bytes = DIV_ROUND_UP(bits, 7); + + if (likely(bytes < 9)) { + v <<= bytes; + v |= ~(~0 << (bytes - 1)); + } else { + *out++ = 255; + bytes = 9; + } + + put_unaligned_le64(v, out); + return bytes; +} + +/** + * bch2_varint_decode_fast - fast version of bch2_varint_decode + * + * This version assumes that it is safe to read at most 8 bytes past the end of + * @end (we still return an error if the varint extends past @end). + */ +int bch2_varint_decode_fast(const u8 *in, const u8 *end, u64 *out) { u64 v = get_unaligned_le64(in); unsigned bytes = ffz(v & 255) + 1; diff --git a/libbcachefs/varint.h b/libbcachefs/varint.h index 8daf813..92a182f 100644 --- a/libbcachefs/varint.h +++ b/libbcachefs/varint.h @@ -5,4 +5,7 @@ int bch2_varint_encode(u8 *, u64); int bch2_varint_decode(const u8 *, const u8 *, u64 *); +int bch2_varint_encode_fast(u8 *, u64); +int bch2_varint_decode_fast(const u8 *, const u8 *, u64 *); + #endif /* _BCACHEFS_VARINT_H */ diff --git a/libbcachefs/xattr.c b/libbcachefs/xattr.c index 858aa87..e4d400b 100644 --- a/libbcachefs/xattr.c +++ b/libbcachefs/xattr.c @@ -118,18 +118,16 @@ void bch2_xattr_to_text(struct printbuf *out, struct bch_fs *c, le16_to_cpu(xattr.v->x_val_len)); } -int bch2_xattr_get(struct bch_fs *c, struct bch_inode_info *inode, - const char *name, void *buffer, size_t size, int type) +static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info *inode, + const char *name, void *buffer, size_t size, int type) { - struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode); - struct btree_trans trans; + struct bch_hash_info hash = bch2_hash_info_init(trans->c, &inode->ei_inode); struct btree_iter *iter; struct bkey_s_c_xattr xattr; + struct bkey_s_c k; int ret; - bch2_trans_init(&trans, c, 0, 0); - - iter = bch2_hash_lookup(&trans, bch2_xattr_hash_desc, &hash, + iter = bch2_hash_lookup(trans, bch2_xattr_hash_desc, &hash, inode->v.i_ino, &X_SEARCH(type, name, strlen(name)), 0); @@ -137,7 +135,12 @@ int bch2_xattr_get(struct bch_fs *c, struct bch_inode_info *inode, if (ret) goto err; - xattr = bkey_s_c_to_xattr(bch2_btree_iter_peek_slot(iter)); + k = bch2_btree_iter_peek_slot(iter); + ret = bkey_err(k); + if (ret) + goto err; + + xattr = bkey_s_c_to_xattr(k); ret = le16_to_cpu(xattr.v->x_val_len); if (buffer) { if (ret > size) @@ -145,14 +148,18 @@ int bch2_xattr_get(struct bch_fs *c, struct bch_inode_info *inode, else memcpy(buffer, xattr_val(xattr.v), ret); } - bch2_trans_iter_put(&trans, iter); + bch2_trans_iter_put(trans, iter); err: - bch2_trans_exit(&trans); - - BUG_ON(ret == -EINTR); return ret == -ENOENT ? -ENODATA : ret; } +int bch2_xattr_get(struct bch_fs *c, struct bch_inode_info *inode, + const char *name, void *buffer, size_t size, int type) +{ + return bch2_trans_do(c, NULL, NULL, 0, + bch2_xattr_get_trans(&trans, inode, name, buffer, size, type)); +} + int bch2_xattr_set(struct btree_trans *trans, u64 inum, const struct bch_hash_info *hash_info, const char *name, const void *value, size_t size, @@ -323,6 +330,7 @@ static int bch2_xattr_get_handler(const struct xattr_handler *handler, } static int bch2_xattr_set_handler(const struct xattr_handler *handler, + struct user_namespace *mnt_userns, struct dentry *dentry, struct inode *vinode, const char *name, const void *value, size_t size, int flags) @@ -455,6 +463,7 @@ static int inode_opt_set_fn(struct bch_inode_info *inode, } static int bch2_xattr_bcachefs_set(const struct xattr_handler *handler, + struct user_namespace *mnt_userns, struct dentry *dentry, struct inode *vinode, const char *name, const void *value, size_t size, int flags) @@ -560,8 +569,10 @@ static const struct xattr_handler bch_xattr_bcachefs_effective_handler = { const struct xattr_handler *bch2_xattr_handlers[] = { &bch_xattr_user_handler, +#ifdef CONFIG_BCACHEFS_POSIX_ACL &posix_acl_access_xattr_handler, &posix_acl_default_xattr_handler, +#endif &bch_xattr_trusted_handler, &bch_xattr_security_handler, #ifndef NO_BCACHEFS_FS diff --git a/linux/blkdev.c b/linux/blkdev.c index 709e770..270d3c8 100644 --- a/linux/blkdev.c +++ b/linux/blkdev.c @@ -215,6 +215,7 @@ struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, strncpy(bdev->name, path, sizeof(bdev->name)); bdev->name[sizeof(bdev->name) - 1] = '\0'; + bdev->bd_dev = xfstat(fd).st_rdev; bdev->bd_fd = fd; bdev->bd_sync_fd = sync_fd; bdev->bd_holder = holder; @@ -230,9 +231,9 @@ void bdput(struct block_device *bdev) BUG(); } -struct block_device *lookup_bdev(const char *path) +int lookup_bdev(const char *path, dev_t *dev) { - return ERR_PTR(-EINVAL); + return -EINVAL; } static int aio_completion_thread(void *arg) diff --git a/linux/closure.c b/linux/closure.c index 26a2935..b38ded0 100644 --- a/linux/closure.c +++ b/linux/closure.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Asynchronous refcounty things * @@ -9,6 +10,7 @@ #include #include #include +#include static inline void closure_put_after_sub(struct closure *cl, int flags) { @@ -44,7 +46,7 @@ void closure_sub(struct closure *cl, int v) } EXPORT_SYMBOL(closure_sub); -/** +/* * closure_put - decrement a closure's refcount */ void closure_put(struct closure *cl) @@ -53,24 +55,22 @@ void closure_put(struct closure *cl) } EXPORT_SYMBOL(closure_put); -/** +/* * closure_wake_up - wake up all closures on a wait list, without memory barrier */ void __closure_wake_up(struct closure_waitlist *wait_list) { - struct llist_node *list, *next; - struct closure *cl; + struct llist_node *list; + struct closure *cl, *t; + struct llist_node *reverse = NULL; + + list = llist_del_all(&wait_list->list); - /* - * Grab entire list, reverse order to preserve FIFO ordering, and wake - * everything up - */ - for (list = llist_reverse_order(llist_del_all(&wait_list->list)); - list; - list = next) { - next = llist_next(list); - cl = container_of(list, struct closure, list); + /* We first reverse the list to preserve FIFO ordering and fairness */ + reverse = llist_reverse_order(list); + /* Then do the wakeups */ + llist_for_each_entry_safe(cl, t, reverse, list) { closure_set_waiting(cl, 0); closure_sub(cl, CLOSURE_WAITING + 1); } @@ -79,9 +79,9 @@ EXPORT_SYMBOL(__closure_wake_up); /** * closure_wait - add a closure to a waitlist - * - * @waitlist will own a ref on @cl, which will be released when + * @waitlist: will own a ref on @cl, which will be released when * closure_wake_up() is called on @waitlist. + * @cl: closure pointer. * */ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl) @@ -104,8 +104,14 @@ struct closure_syncer { static void closure_sync_fn(struct closure *cl) { - cl->s->done = 1; - wake_up_process(cl->s->task); + struct closure_syncer *s = cl->s; + struct task_struct *p; + + rcu_read_lock(); + p = READ_ONCE(s->task); + s->done = 1; + wake_up_process(p); + rcu_read_unlock(); } void __sched __closure_sync(struct closure *cl) @@ -113,11 +119,10 @@ void __sched __closure_sync(struct closure *cl) struct closure_syncer s = { .task = current }; cl->s = &s; - continue_at_noreturn(cl, closure_sync_fn, NULL); + continue_at(cl, closure_sync_fn, NULL); while (1) { - __set_current_state(TASK_UNINTERRUPTIBLE); - smp_mb(); + set_current_state(TASK_UNINTERRUPTIBLE); if (s.done) break; schedule(); @@ -158,9 +163,7 @@ void closure_debug_destroy(struct closure *cl) } EXPORT_SYMBOL(closure_debug_destroy); -static struct dentry *debug; - -static int debug_seq_show(struct seq_file *f, void *data) +static int debug_show(struct seq_file *f, void *data) { struct closure *cl; @@ -169,7 +172,7 @@ static int debug_seq_show(struct seq_file *f, void *data) list_for_each_entry(cl, &closure_list, all) { int r = atomic_read(&cl->remaining); - seq_printf(f, "%p: %pF -> %pf p %p r %i ", + seq_printf(f, "%p: %pS -> %pS p %p r %i ", cl, (void *) cl->ip, cl->fn, cl->parent, r & CLOSURE_REMAINING_MASK); @@ -179,7 +182,7 @@ static int debug_seq_show(struct seq_file *f, void *data) r & CLOSURE_RUNNING ? "R" : ""); if (r & CLOSURE_WAITING) - seq_printf(f, " W %pF\n", + seq_printf(f, " W %pS\n", (void *) cl->waiting_on); seq_puts(f, "\n"); @@ -189,21 +192,11 @@ static int debug_seq_show(struct seq_file *f, void *data) return 0; } -static int debug_seq_open(struct inode *inode, struct file *file) -{ - return single_open(file, debug_seq_show, NULL); -} - -static const struct file_operations debug_ops = { - .owner = THIS_MODULE, - .open = debug_seq_open, - .read = seq_read, - .release = single_release -}; +DEFINE_SHOW_ATTRIBUTE(debug); static int __init closure_debug_init(void) { - debug = debugfs_create_file("closures", 0400, NULL, NULL, &debug_ops); + debugfs_create_file("closures", 0400, NULL, NULL, &debug_fops); return 0; } late_initcall(closure_debug_init) diff --git a/linux/mempool.c b/linux/mempool.c new file mode 100644 index 0000000..74d4fbb --- /dev/null +++ b/linux/mempool.c @@ -0,0 +1,541 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/mm/mempool.c + * + * memory buffer pool support. Such pools are mostly used + * for guaranteed, deadlock-free memory allocations during + * extreme VM load. + * + * started by Ingo Molnar, Copyright (C) 2001 + * debugging by David Rientjes, Copyright (C) 2015 + */ + +#include +//#include +//#include +#include +#include +#include +#include +#include + +#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) +static void poison_error(mempool_t *pool, void *element, size_t size, + size_t byte) +{ + const int nr = pool->curr_nr; + const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0); + const int end = min_t(int, byte + (BITS_PER_LONG / 8), size); + int i; + + pr_err("BUG: mempool element poison mismatch\n"); + pr_err("Mempool %p size %zu\n", pool, size); + pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : ""); + for (i = start; i < end; i++) + pr_cont("%x ", *(u8 *)(element + i)); + pr_cont("%s\n", end < size ? "..." : ""); + dump_stack(); +} + +static void __check_element(mempool_t *pool, void *element, size_t size) +{ + u8 *obj = element; + size_t i; + + for (i = 0; i < size; i++) { + u8 exp = (i < size - 1) ? POISON_FREE : POISON_END; + + if (obj[i] != exp) { + poison_error(pool, element, size, i); + return; + } + } + memset(obj, POISON_INUSE, size); +} + +static void check_element(mempool_t *pool, void *element) +{ + /* Mempools backed by slab allocator */ + if (pool->free == mempool_free_slab || pool->free == mempool_kfree) { + __check_element(pool, element, ksize(element)); + } else if (pool->free == mempool_free_pages) { + /* Mempools backed by page allocator */ + int order = (int)(long)pool->pool_data; + void *addr = kmap_atomic((struct page *)element); + + __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); + kunmap_atomic(addr); + } +} + +static void __poison_element(void *element, size_t size) +{ + u8 *obj = element; + + memset(obj, POISON_FREE, size - 1); + obj[size - 1] = POISON_END; +} + +static void poison_element(mempool_t *pool, void *element) +{ + /* Mempools backed by slab allocator */ + if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) { + __poison_element(element, ksize(element)); + } else if (pool->alloc == mempool_alloc_pages) { + /* Mempools backed by page allocator */ + int order = (int)(long)pool->pool_data; + void *addr = kmap_atomic((struct page *)element); + + __poison_element(addr, 1UL << (PAGE_SHIFT + order)); + kunmap_atomic(addr); + } +} +#else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ +static inline void check_element(mempool_t *pool, void *element) +{ +} +static inline void poison_element(mempool_t *pool, void *element) +{ +} +#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ + +static __always_inline void kasan_poison_element(mempool_t *pool, void *element) +{ +#if 0 + if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) + kasan_poison_kfree(element, _RET_IP_); + else if (pool->alloc == mempool_alloc_pages) + kasan_free_pages(element, (unsigned long)pool->pool_data); +#endif +} + +static void kasan_unpoison_element(mempool_t *pool, void *element) +{ +#if 0 + if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) + kasan_unpoison_slab(element); + else if (pool->alloc == mempool_alloc_pages) + kasan_alloc_pages(element, (unsigned long)pool->pool_data); +#endif +} + +static __always_inline void add_element(mempool_t *pool, void *element) +{ + BUG_ON(pool->curr_nr >= pool->min_nr); + poison_element(pool, element); + kasan_poison_element(pool, element); + pool->elements[pool->curr_nr++] = element; +} + +static void *remove_element(mempool_t *pool) +{ + void *element = pool->elements[--pool->curr_nr]; + + BUG_ON(pool->curr_nr < 0); + kasan_unpoison_element(pool, element); + check_element(pool, element); + return element; +} + +/** + * mempool_exit - exit a mempool initialized with mempool_init() + * @pool: pointer to the memory pool which was initialized with + * mempool_init(). + * + * Free all reserved elements in @pool and @pool itself. This function + * only sleeps if the free_fn() function sleeps. + * + * May be called on a zeroed but uninitialized mempool (i.e. allocated with + * kzalloc()). + */ +void mempool_exit(mempool_t *pool) +{ + while (pool->curr_nr) { + void *element = remove_element(pool); + pool->free(element, pool->pool_data); + } + kfree(pool->elements); + pool->elements = NULL; +} +EXPORT_SYMBOL(mempool_exit); + +/** + * mempool_destroy - deallocate a memory pool + * @pool: pointer to the memory pool which was allocated via + * mempool_create(). + * + * Free all reserved elements in @pool and @pool itself. This function + * only sleeps if the free_fn() function sleeps. + */ +void mempool_destroy(mempool_t *pool) +{ + if (unlikely(!pool)) + return; + + mempool_exit(pool); + kfree(pool); +} +EXPORT_SYMBOL(mempool_destroy); + +int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data, + gfp_t gfp_mask, int node_id) +{ + spin_lock_init(&pool->lock); + pool->min_nr = min_nr; + pool->pool_data = pool_data; + pool->alloc = alloc_fn; + pool->free = free_fn; + init_waitqueue_head(&pool->wait); + + pool->elements = kmalloc_array(min_nr, sizeof(void *), gfp_mask); + if (!pool->elements) + return -ENOMEM; + + /* + * First pre-allocate the guaranteed number of buffers. + */ + while (pool->curr_nr < pool->min_nr) { + void *element; + + element = pool->alloc(gfp_mask, pool->pool_data); + if (unlikely(!element)) { + mempool_exit(pool); + return -ENOMEM; + } + add_element(pool, element); + } + + return 0; +} +EXPORT_SYMBOL(mempool_init_node); + +/** + * mempool_init - initialize a memory pool + * @pool: pointer to the memory pool that should be initialized + * @min_nr: the minimum number of elements guaranteed to be + * allocated for this pool. + * @alloc_fn: user-defined element-allocation function. + * @free_fn: user-defined element-freeing function. + * @pool_data: optional private data available to the user-defined functions. + * + * Like mempool_create(), but initializes the pool in (i.e. embedded in another + * structure). + * + * Return: %0 on success, negative error code otherwise. + */ +int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data) +{ + return mempool_init_node(pool, min_nr, alloc_fn, free_fn, + pool_data, GFP_KERNEL, 0); + +} +EXPORT_SYMBOL(mempool_init); + +/** + * mempool_create - create a memory pool + * @min_nr: the minimum number of elements guaranteed to be + * allocated for this pool. + * @alloc_fn: user-defined element-allocation function. + * @free_fn: user-defined element-freeing function. + * @pool_data: optional private data available to the user-defined functions. + * + * this function creates and allocates a guaranteed size, preallocated + * memory pool. The pool can be used from the mempool_alloc() and mempool_free() + * functions. This function might sleep. Both the alloc_fn() and the free_fn() + * functions might sleep - as long as the mempool_alloc() function is not called + * from IRQ contexts. + * + * Return: pointer to the created memory pool object or %NULL on error. + */ +mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data) +{ + return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data, + GFP_KERNEL, 0); +} +EXPORT_SYMBOL(mempool_create); + +mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data, + gfp_t gfp_mask, int node_id) +{ + mempool_t *pool; + + pool = kzalloc(sizeof(*pool), gfp_mask); + if (!pool) + return NULL; + + if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data, + gfp_mask, node_id)) { + kfree(pool); + return NULL; + } + + return pool; +} +EXPORT_SYMBOL(mempool_create_node); + +/** + * mempool_resize - resize an existing memory pool + * @pool: pointer to the memory pool which was allocated via + * mempool_create(). + * @new_min_nr: the new minimum number of elements guaranteed to be + * allocated for this pool. + * + * This function shrinks/grows the pool. In the case of growing, + * it cannot be guaranteed that the pool will be grown to the new + * size immediately, but new mempool_free() calls will refill it. + * This function may sleep. + * + * Note, the caller must guarantee that no mempool_destroy is called + * while this function is running. mempool_alloc() & mempool_free() + * might be called (eg. from IRQ contexts) while this function executes. + * + * Return: %0 on success, negative error code otherwise. + */ +int mempool_resize(mempool_t *pool, int new_min_nr) +{ + void *element; + void **new_elements; + unsigned long flags; + + BUG_ON(new_min_nr <= 0); + might_sleep(); + + spin_lock_irqsave(&pool->lock, flags); + if (new_min_nr <= pool->min_nr) { + while (new_min_nr < pool->curr_nr) { + element = remove_element(pool); + spin_unlock_irqrestore(&pool->lock, flags); + pool->free(element, pool->pool_data); + spin_lock_irqsave(&pool->lock, flags); + } + pool->min_nr = new_min_nr; + goto out_unlock; + } + spin_unlock_irqrestore(&pool->lock, flags); + + /* Grow the pool */ + new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements), + GFP_KERNEL); + if (!new_elements) + return -ENOMEM; + + spin_lock_irqsave(&pool->lock, flags); + if (unlikely(new_min_nr <= pool->min_nr)) { + /* Raced, other resize will do our work */ + spin_unlock_irqrestore(&pool->lock, flags); + kfree(new_elements); + goto out; + } + memcpy(new_elements, pool->elements, + pool->curr_nr * sizeof(*new_elements)); + kfree(pool->elements); + pool->elements = new_elements; + pool->min_nr = new_min_nr; + + while (pool->curr_nr < pool->min_nr) { + spin_unlock_irqrestore(&pool->lock, flags); + element = pool->alloc(GFP_KERNEL, pool->pool_data); + if (!element) + goto out; + spin_lock_irqsave(&pool->lock, flags); + if (pool->curr_nr < pool->min_nr) { + add_element(pool, element); + } else { + spin_unlock_irqrestore(&pool->lock, flags); + pool->free(element, pool->pool_data); /* Raced */ + goto out; + } + } +out_unlock: + spin_unlock_irqrestore(&pool->lock, flags); +out: + return 0; +} +EXPORT_SYMBOL(mempool_resize); + +/** + * mempool_alloc - allocate an element from a specific memory pool + * @pool: pointer to the memory pool which was allocated via + * mempool_create(). + * @gfp_mask: the usual allocation bitmask. + * + * this function only sleeps if the alloc_fn() function sleeps or + * returns NULL. Note that due to preallocation, this function + * *never* fails when called from process contexts. (it might + * fail if called from an IRQ context.) + * Note: using __GFP_ZERO is not supported. + * + * Return: pointer to the allocated element or %NULL on error. + */ +void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) +{ + void *element; + unsigned long flags; + DEFINE_WAIT(wait); + gfp_t gfp_temp; + + WARN_ON_ONCE(gfp_mask & __GFP_ZERO); + + gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ + gfp_mask |= __GFP_NOWARN; /* failures are OK */ + + gfp_temp = gfp_mask & ~(__GFP_IO); + +repeat_alloc: + + element = pool->alloc(gfp_temp, pool->pool_data); + if (likely(element != NULL)) + return element; + + spin_lock_irqsave(&pool->lock, flags); + if (likely(pool->curr_nr)) { + element = remove_element(pool); + spin_unlock_irqrestore(&pool->lock, flags); + /* paired with rmb in mempool_free(), read comment there */ + smp_wmb(); + return element; + } + + /* + * We use gfp mask w/o direct reclaim or IO for the first round. If + * alloc failed with that and @pool was empty, retry immediately. + */ + if (gfp_temp != gfp_mask) { + spin_unlock_irqrestore(&pool->lock, flags); + gfp_temp = gfp_mask; + goto repeat_alloc; + } + + /* Let's wait for someone else to return an element to @pool */ + prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); + + spin_unlock_irqrestore(&pool->lock, flags); + + /* + * FIXME: this should be io_schedule(). The timeout is there as a + * workaround for some DM problems in 2.6.18. + */ + io_schedule_timeout(5*HZ); + + finish_wait(&pool->wait, &wait); + goto repeat_alloc; +} +EXPORT_SYMBOL(mempool_alloc); + +/** + * mempool_free - return an element to the pool. + * @element: pool element pointer. + * @pool: pointer to the memory pool which was allocated via + * mempool_create(). + * + * this function only sleeps if the free_fn() function sleeps. + */ +void mempool_free(void *element, mempool_t *pool) +{ + unsigned long flags; + + if (unlikely(element == NULL)) + return; + + /* + * Paired with the wmb in mempool_alloc(). The preceding read is + * for @element and the following @pool->curr_nr. This ensures + * that the visible value of @pool->curr_nr is from after the + * allocation of @element. This is necessary for fringe cases + * where @element was passed to this task without going through + * barriers. + * + * For example, assume @p is %NULL at the beginning and one task + * performs "p = mempool_alloc(...);" while another task is doing + * "while (!p) cpu_relax(); mempool_free(p, ...);". This function + * may end up using curr_nr value which is from before allocation + * of @p without the following rmb. + */ + smp_rmb(); + + /* + * For correctness, we need a test which is guaranteed to trigger + * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr + * without locking achieves that and refilling as soon as possible + * is desirable. + * + * Because curr_nr visible here is always a value after the + * allocation of @element, any task which decremented curr_nr below + * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets + * incremented to min_nr afterwards. If curr_nr gets incremented + * to min_nr after the allocation of @element, the elements + * allocated after that are subject to the same guarantee. + * + * Waiters happen iff curr_nr is 0 and the above guarantee also + * ensures that there will be frees which return elements to the + * pool waking up the waiters. + */ + if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) { + spin_lock_irqsave(&pool->lock, flags); + if (likely(pool->curr_nr < pool->min_nr)) { + add_element(pool, element); + spin_unlock_irqrestore(&pool->lock, flags); + wake_up(&pool->wait); + return; + } + spin_unlock_irqrestore(&pool->lock, flags); + } + pool->free(element, pool->pool_data); +} +EXPORT_SYMBOL(mempool_free); + +/* + * A commonly used alloc and free fn. + */ +void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) +{ + struct kmem_cache *mem = pool_data; + return kmem_cache_alloc(mem, gfp_mask); +} +EXPORT_SYMBOL(mempool_alloc_slab); + +void mempool_free_slab(void *element, void *pool_data) +{ + struct kmem_cache *mem = pool_data; + kmem_cache_free(mem, element); +} +EXPORT_SYMBOL(mempool_free_slab); + +/* + * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory + * specified by pool_data + */ +void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) +{ + size_t size = (size_t)pool_data; + return kmalloc(size, gfp_mask); +} +EXPORT_SYMBOL(mempool_kmalloc); + +void mempool_kfree(void *element, void *pool_data) +{ + kfree(element); +} +EXPORT_SYMBOL(mempool_kfree); + +/* + * A simple mempool-backed page allocator that allocates pages + * of the order specified by pool_data. + */ +void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) +{ + int order = (int)(long)pool_data; + return alloc_pages(gfp_mask, order); +} +EXPORT_SYMBOL(mempool_alloc_pages); + +void mempool_free_pages(void *element, void *pool_data) +{ + int order = (int)(long)pool_data; + __free_pages(element, order); +} +EXPORT_SYMBOL(mempool_free_pages); diff --git a/linux/xxhash.c b/linux/xxhash.c new file mode 100644 index 0000000..d5bb9ff --- /dev/null +++ b/linux/xxhash.c @@ -0,0 +1,500 @@ +/* + * xxHash - Extremely Fast Hash algorithm + * Copyright (C) 2012-2016, Yann Collet. + * + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. This program is dual-licensed; you may select + * either version 2 of the GNU General Public License ("GPL") or BSD license + * ("BSD"). + * + * You can contact the author at: + * - xxHash homepage: https://cyan4973.github.io/xxHash/ + * - xxHash source repository: https://github.com/Cyan4973/xxHash + */ + +#include +#include +#include +#include +#include +#include +#include + +/*-************************************* + * Macros + **************************************/ +#define xxh_rotl32(x, r) ((x << r) | (x >> (32 - r))) +#define xxh_rotl64(x, r) ((x << r) | (x >> (64 - r))) + +#ifdef __LITTLE_ENDIAN +# define XXH_CPU_LITTLE_ENDIAN 1 +#else +# define XXH_CPU_LITTLE_ENDIAN 0 +#endif + +/*-************************************* + * Constants + **************************************/ +static const uint32_t PRIME32_1 = 2654435761U; +static const uint32_t PRIME32_2 = 2246822519U; +static const uint32_t PRIME32_3 = 3266489917U; +static const uint32_t PRIME32_4 = 668265263U; +static const uint32_t PRIME32_5 = 374761393U; + +static const uint64_t PRIME64_1 = 11400714785074694791ULL; +static const uint64_t PRIME64_2 = 14029467366897019727ULL; +static const uint64_t PRIME64_3 = 1609587929392839161ULL; +static const uint64_t PRIME64_4 = 9650029242287828579ULL; +static const uint64_t PRIME64_5 = 2870177450012600261ULL; + +/*-************************** + * Utils + ***************************/ +void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src) +{ + memcpy(dst, src, sizeof(*dst)); +} +EXPORT_SYMBOL(xxh32_copy_state); + +void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src) +{ + memcpy(dst, src, sizeof(*dst)); +} +EXPORT_SYMBOL(xxh64_copy_state); + +/*-*************************** + * Simple Hash Functions + ****************************/ +static uint32_t xxh32_round(uint32_t seed, const uint32_t input) +{ + seed += input * PRIME32_2; + seed = xxh_rotl32(seed, 13); + seed *= PRIME32_1; + return seed; +} + +uint32_t xxh32(const void *input, const size_t len, const uint32_t seed) +{ + const uint8_t *p = (const uint8_t *)input; + const uint8_t *b_end = p + len; + uint32_t h32; + + if (len >= 16) { + const uint8_t *const limit = b_end - 16; + uint32_t v1 = seed + PRIME32_1 + PRIME32_2; + uint32_t v2 = seed + PRIME32_2; + uint32_t v3 = seed + 0; + uint32_t v4 = seed - PRIME32_1; + + do { + v1 = xxh32_round(v1, get_unaligned_le32(p)); + p += 4; + v2 = xxh32_round(v2, get_unaligned_le32(p)); + p += 4; + v3 = xxh32_round(v3, get_unaligned_le32(p)); + p += 4; + v4 = xxh32_round(v4, get_unaligned_le32(p)); + p += 4; + } while (p <= limit); + + h32 = xxh_rotl32(v1, 1) + xxh_rotl32(v2, 7) + + xxh_rotl32(v3, 12) + xxh_rotl32(v4, 18); + } else { + h32 = seed + PRIME32_5; + } + + h32 += (uint32_t)len; + + while (p + 4 <= b_end) { + h32 += get_unaligned_le32(p) * PRIME32_3; + h32 = xxh_rotl32(h32, 17) * PRIME32_4; + p += 4; + } + + while (p < b_end) { + h32 += (*p) * PRIME32_5; + h32 = xxh_rotl32(h32, 11) * PRIME32_1; + p++; + } + + h32 ^= h32 >> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + + return h32; +} +EXPORT_SYMBOL(xxh32); + +static uint64_t xxh64_round(uint64_t acc, const uint64_t input) +{ + acc += input * PRIME64_2; + acc = xxh_rotl64(acc, 31); + acc *= PRIME64_1; + return acc; +} + +static uint64_t xxh64_merge_round(uint64_t acc, uint64_t val) +{ + val = xxh64_round(0, val); + acc ^= val; + acc = acc * PRIME64_1 + PRIME64_4; + return acc; +} + +uint64_t xxh64(const void *input, const size_t len, const uint64_t seed) +{ + const uint8_t *p = (const uint8_t *)input; + const uint8_t *const b_end = p + len; + uint64_t h64; + + if (len >= 32) { + const uint8_t *const limit = b_end - 32; + uint64_t v1 = seed + PRIME64_1 + PRIME64_2; + uint64_t v2 = seed + PRIME64_2; + uint64_t v3 = seed + 0; + uint64_t v4 = seed - PRIME64_1; + + do { + v1 = xxh64_round(v1, get_unaligned_le64(p)); + p += 8; + v2 = xxh64_round(v2, get_unaligned_le64(p)); + p += 8; + v3 = xxh64_round(v3, get_unaligned_le64(p)); + p += 8; + v4 = xxh64_round(v4, get_unaligned_le64(p)); + p += 8; + } while (p <= limit); + + h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) + + xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18); + h64 = xxh64_merge_round(h64, v1); + h64 = xxh64_merge_round(h64, v2); + h64 = xxh64_merge_round(h64, v3); + h64 = xxh64_merge_round(h64, v4); + + } else { + h64 = seed + PRIME64_5; + } + + h64 += (uint64_t)len; + + while (p + 8 <= b_end) { + const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p)); + + h64 ^= k1; + h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4; + p += 8; + } + + if (p + 4 <= b_end) { + h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1; + h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + p += 4; + } + + while (p < b_end) { + h64 ^= (*p) * PRIME64_5; + h64 = xxh_rotl64(h64, 11) * PRIME64_1; + p++; + } + + h64 ^= h64 >> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + + return h64; +} +EXPORT_SYMBOL(xxh64); + +/*-************************************************** + * Advanced Hash Functions + ***************************************************/ +void xxh32_reset(struct xxh32_state *statePtr, const uint32_t seed) +{ + /* use a local state for memcpy() to avoid strict-aliasing warnings */ + struct xxh32_state state; + + memset(&state, 0, sizeof(state)); + state.v1 = seed + PRIME32_1 + PRIME32_2; + state.v2 = seed + PRIME32_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME32_1; + memcpy(statePtr, &state, sizeof(state)); +} +EXPORT_SYMBOL(xxh32_reset); + +void xxh64_reset(struct xxh64_state *statePtr, const uint64_t seed) +{ + /* use a local state for memcpy() to avoid strict-aliasing warnings */ + struct xxh64_state state; + + memset(&state, 0, sizeof(state)); + state.v1 = seed + PRIME64_1 + PRIME64_2; + state.v2 = seed + PRIME64_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME64_1; + memcpy(statePtr, &state, sizeof(state)); +} +EXPORT_SYMBOL(xxh64_reset); + +int xxh32_update(struct xxh32_state *state, const void *input, const size_t len) +{ + const uint8_t *p = (const uint8_t *)input; + const uint8_t *const b_end = p + len; + + if (input == NULL) + return -EINVAL; + + state->total_len_32 += (uint32_t)len; + state->large_len |= (len >= 16) | (state->total_len_32 >= 16); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + memcpy((uint8_t *)(state->mem32) + state->memsize, input, len); + state->memsize += (uint32_t)len; + return 0; + } + + if (state->memsize) { /* some data left from previous update */ + const uint32_t *p32 = state->mem32; + + memcpy((uint8_t *)(state->mem32) + state->memsize, input, + 16 - state->memsize); + + state->v1 = xxh32_round(state->v1, get_unaligned_le32(p32)); + p32++; + state->v2 = xxh32_round(state->v2, get_unaligned_le32(p32)); + p32++; + state->v3 = xxh32_round(state->v3, get_unaligned_le32(p32)); + p32++; + state->v4 = xxh32_round(state->v4, get_unaligned_le32(p32)); + p32++; + + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= b_end - 16) { + const uint8_t *const limit = b_end - 16; + uint32_t v1 = state->v1; + uint32_t v2 = state->v2; + uint32_t v3 = state->v3; + uint32_t v4 = state->v4; + + do { + v1 = xxh32_round(v1, get_unaligned_le32(p)); + p += 4; + v2 = xxh32_round(v2, get_unaligned_le32(p)); + p += 4; + v3 = xxh32_round(v3, get_unaligned_le32(p)); + p += 4; + v4 = xxh32_round(v4, get_unaligned_le32(p)); + p += 4; + } while (p <= limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < b_end) { + memcpy(state->mem32, p, (size_t)(b_end-p)); + state->memsize = (uint32_t)(b_end-p); + } + + return 0; +} +EXPORT_SYMBOL(xxh32_update); + +uint32_t xxh32_digest(const struct xxh32_state *state) +{ + const uint8_t *p = (const uint8_t *)state->mem32; + const uint8_t *const b_end = (const uint8_t *)(state->mem32) + + state->memsize; + uint32_t h32; + + if (state->large_len) { + h32 = xxh_rotl32(state->v1, 1) + xxh_rotl32(state->v2, 7) + + xxh_rotl32(state->v3, 12) + xxh_rotl32(state->v4, 18); + } else { + h32 = state->v3 /* == seed */ + PRIME32_5; + } + + h32 += state->total_len_32; + + while (p + 4 <= b_end) { + h32 += get_unaligned_le32(p) * PRIME32_3; + h32 = xxh_rotl32(h32, 17) * PRIME32_4; + p += 4; + } + + while (p < b_end) { + h32 += (*p) * PRIME32_5; + h32 = xxh_rotl32(h32, 11) * PRIME32_1; + p++; + } + + h32 ^= h32 >> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + + return h32; +} +EXPORT_SYMBOL(xxh32_digest); + +int xxh64_update(struct xxh64_state *state, const void *input, const size_t len) +{ + const uint8_t *p = (const uint8_t *)input; + const uint8_t *const b_end = p + len; + + if (input == NULL) + return -EINVAL; + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + memcpy(((uint8_t *)state->mem64) + state->memsize, input, len); + state->memsize += (uint32_t)len; + return 0; + } + + if (state->memsize) { /* tmp buffer is full */ + uint64_t *p64 = state->mem64; + + memcpy(((uint8_t *)p64) + state->memsize, input, + 32 - state->memsize); + + state->v1 = xxh64_round(state->v1, get_unaligned_le64(p64)); + p64++; + state->v2 = xxh64_round(state->v2, get_unaligned_le64(p64)); + p64++; + state->v3 = xxh64_round(state->v3, get_unaligned_le64(p64)); + p64++; + state->v4 = xxh64_round(state->v4, get_unaligned_le64(p64)); + + p += 32 - state->memsize; + state->memsize = 0; + } + + if (p + 32 <= b_end) { + const uint8_t *const limit = b_end - 32; + uint64_t v1 = state->v1; + uint64_t v2 = state->v2; + uint64_t v3 = state->v3; + uint64_t v4 = state->v4; + + do { + v1 = xxh64_round(v1, get_unaligned_le64(p)); + p += 8; + v2 = xxh64_round(v2, get_unaligned_le64(p)); + p += 8; + v3 = xxh64_round(v3, get_unaligned_le64(p)); + p += 8; + v4 = xxh64_round(v4, get_unaligned_le64(p)); + p += 8; + } while (p <= limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < b_end) { + memcpy(state->mem64, p, (size_t)(b_end-p)); + state->memsize = (uint32_t)(b_end - p); + } + + return 0; +} +EXPORT_SYMBOL(xxh64_update); + +uint64_t xxh64_digest(const struct xxh64_state *state) +{ + const uint8_t *p = (const uint8_t *)state->mem64; + const uint8_t *const b_end = (const uint8_t *)state->mem64 + + state->memsize; + uint64_t h64; + + if (state->total_len >= 32) { + const uint64_t v1 = state->v1; + const uint64_t v2 = state->v2; + const uint64_t v3 = state->v3; + const uint64_t v4 = state->v4; + + h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) + + xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18); + h64 = xxh64_merge_round(h64, v1); + h64 = xxh64_merge_round(h64, v2); + h64 = xxh64_merge_round(h64, v3); + h64 = xxh64_merge_round(h64, v4); + } else { + h64 = state->v3 + PRIME64_5; + } + + h64 += (uint64_t)state->total_len; + + while (p + 8 <= b_end) { + const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p)); + + h64 ^= k1; + h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4; + p += 8; + } + + if (p + 4 <= b_end) { + h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1; + h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + p += 4; + } + + while (p < b_end) { + h64 ^= (*p) * PRIME64_5; + h64 = xxh_rotl64(h64, 11) * PRIME64_1; + p++; + } + + h64 ^= h64 >> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + + return h64; +} +EXPORT_SYMBOL(xxh64_digest); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("xxHash"); diff --git a/smoke_test b/smoke_test index 076806d..15a9fce 100755 --- a/smoke_test +++ b/smoke_test @@ -20,8 +20,9 @@ set -e +PYTEST="${PYTEST:-pytest-3}" spam=$(tempfile) -unset BCACHEFS_FUSE BCACHEFS_TEST_USE_VALGRIND D +unset BCACHEFS_FUSE BCACHEFS_TEST_USE_VALGRIND BCACHEFS_DEBUG trap "set +x; cat ${spam}; rm -f ${spam} ; echo; echo FAILED." EXIT @@ -44,7 +45,7 @@ function test() { echo Running tests. ( cd tests - pytest-3 -n${JOBS} + ${PYTEST} -n${JOBS} ) > ${spam} 2>&1 } @@ -53,17 +54,17 @@ function test_vg() { ( export BCACHEFS_TEST_USE_VALGRIND=yes cd tests - pytest-3 -n${JOBS} + ${PYTEST} -n${JOBS} ) > ${spam} 2>&1 } echo -- Test: default -- build -test +test echo -- Test: debug -- -export D=1 +export BCACHEFS_DEBUG=1 build test diff --git a/tests/test_basic.py b/tests/test_basic.py index 47b621c..6278102 100644 --- a/tests/test_basic.py +++ b/tests/test_basic.py @@ -37,7 +37,9 @@ def test_list(tmpdir): assert ret.returncode == 0 assert len(ret.stderr) == 0 assert "recovering from clean shutdown" in ret.stdout - assert len(ret.stdout.splitlines()) == 95 + + # Totally arbitrary, feel free to update or remove after inspecting. + assert len(ret.stdout.splitlines()) == 97 def test_list_inodes(tmpdir): dev = util.format_1g(tmpdir) diff --git a/tests/test_fuse.py b/tests/test_fuse.py index 660d92d..69c512e 100644 --- a/tests/test_fuse.py +++ b/tests/test_fuse.py @@ -14,6 +14,7 @@ def test_mount(bfuse): bfuse.unmount() bfuse.verify() +@pytest.mark.skipif(util.ENABLE_VALGRIND, reason="test broken") def test_remount(bfuse): bfuse.mount() bfuse.unmount() diff --git a/tests/util.py b/tests/util.py index c4d7431..b5e02c1 100644 --- a/tests/util.py +++ b/tests/util.py @@ -1,5 +1,6 @@ #!/usr/bin/python3 +import errno import os import pytest import re @@ -49,6 +50,8 @@ def run(cmd, *args, valgrind=False, check=False): vout = tempfile.NamedTemporaryFile() vcmd = ['valgrind', '--leak-check=full', + '--gen-suppressions=all', + '--suppressions=valgrind-suppressions.txt', '--log-file={}'.format(vout.name)] cmds = vcmd + cmds @@ -161,6 +164,8 @@ class BFuse: vlog = tempfile.NamedTemporaryFile() cmd += [ 'valgrind', '--leak-check=full', + '--gen-suppressions=all', + '--suppressions=valgrind-suppressions.txt', '--log-file={}'.format(vlog.name) ] cmd += [ BCH_PATH, @@ -179,9 +184,17 @@ class BFuse: (out2, _) = self.proc.communicate() print("Process exited.") + self.returncode = self.proc.returncode + if self.returncode == 0: + errors = [ 'btree iterators leaked!', + 'emergency read only!' ] + for e in errors: + if e in out2: + print('Debug error found in output: "{}"'.format(e)) + self.returncode = errno.ENOMSG + self.stdout = out1 + out2 self.stderr = err.read() - self.returncode = self.proc.returncode self.vout = vlog.read().decode('utf-8') def expect(self, pipe, regex): diff --git a/tests/valgrind-suppressions.txt b/tests/valgrind-suppressions.txt new file mode 100644 index 0000000..4ed4de3 --- /dev/null +++ b/tests/valgrind-suppressions.txt @@ -0,0 +1,8 @@ +{ + + Memcheck:Leak + match-leak-kinds: possible,definite + ... + fun:get_default_call_rcu_data_memb + fun:call_rcu_memb +} diff --git a/tools-util.c b/tools-util.c index 361419a..3cc0de4 100644 --- a/tools-util.c +++ b/tools-util.c @@ -289,6 +289,8 @@ int open_for_format(const char *dev, bool force) fputs("Proceed anyway?", stdout); if (!ask_yn()) exit(EXIT_FAILURE); + while (blkid_do_probe(pr) == 0) + blkid_do_wipe(pr, 0); } blkid_free_probe(pr); diff --git a/tools-util.h b/tools-util.h index 01898e2..568707b 100644 --- a/tools-util.h +++ b/tools-util.h @@ -20,7 +20,9 @@ #include #include "ccan/darray/darray.h" -void die(const char *, ...); +#define noreturn __attribute__((noreturn)) + +void die(const char *, ...) noreturn; char *mprintf(const char *, ...) __attribute__ ((format (printf, 1, 2))); void *xcalloc(size_t, size_t); -- 2.39.2