PKG_CONFIG?=pkg-config
INSTALL=install
-CFLAGS+=-std=gnu89 -O2 -g -MMD -Wall -fPIC \
+ifeq ("$(origin V)", "command line")
+ BUILD_VERBOSE = $(V)
+endif
+ifndef BUILD_VERBOSE
+ BUILD_VERBOSE = 0
+endif
+
+ifeq ($(BUILD_VERBOSE),1)
+ Q =
+else
+ Q = @
+endif
+
+CFLAGS+=-std=gnu11 -O2 -g -MMD -Wall -fPIC \
-Wno-pointer-sign \
-fno-strict-aliasing \
-fno-delete-null-pointer-checks \
PYTEST_ARGS?=
PYTEST_CMD?=$(shell \
command -v pytest-3 \
- || which pytest-3 \
+ || which pytest-3 2>/dev/null \
)
PYTEST:=$(PYTEST_CMD) $(PYTEST_ARGS)
-include $(DEPS)
OBJS=$(SRCS:.c=.o)
+
+%.o: %.c
+ @echo " [CC] $@"
+ $(Q)$(CC) $(CPPFLAGS) $(CFLAGS) -c -o $@ $<
+
bcachefs: $(filter-out ./tests/%.o, $(OBJS))
+ @echo " [LD] $@"
+ $(Q)$(CC) $(LDFLAGS) $+ $(LOADLIBES) $(LDLIBS) -o $@
RUST_SRCS=$(shell find rust-src/ -type f -iname '*.rs')
MOUNT_SRCS=$(filter %mount, $(RUST_SRCS))
MOUNT_OBJ=$(filter-out ./bcachefs.o ./tests/%.o ./cmd_%.o , $(OBJS))
libbcachefs.so: LDFLAGS+=-shared
libbcachefs.so: $(MOUNT_OBJ)
- $(CC) $(LDFLAGS) $+ -o $@ $(LDLIBS)
+ @echo " [CC] $@"
+ $(Q)$(CC) $(LDFLAGS) $+ -o $@ $(LDLIBS)
MOUNT_TOML=rust-src/mount/Cargo.toml
mount.bcachefs: lib $(MOUNT_SRCS)
tests/test_helper: $(filter ./tests/%.o, $(OBJS))
+ @echo " [LD] $@"
+ $(Q)$(CC) $(LDFLAGS) $+ $(LOADLIBES) $(LDLIBS) -o $@
# If the version string differs from the last build, update the last version
ifneq ($(VERSION),$(shell cat .version 2>/dev/null))
.PHONY: .version
endif
.version:
- echo '$(VERSION)' > $@
+ @echo " [VERS] $@"
+ $(Q)echo '$(VERSION)' > $@
# Rebuild the 'version' command any time the version string changes
cmd_version.o : .version
.PHONY: clean
clean:
- $(RM) bcachefs mount.bcachefs libbcachefs_mount.a tests/test_helper .version $(OBJS) $(DEPS) $(DOCGENERATED)
- $(RM) -rf rust-src/*/target
+ @echo "Cleaning all"
+ $(Q)$(RM) bcachefs mount.bcachefs libbcachefs_mount.a tests/test_helper .version *.tar.xz $(OBJS) $(DEPS) $(DOCGENERATED)
+ $(Q)$(RM) -rf rust-src/*/target
.PHONY: deb
deb: all
.PHONY: update-commit-bcachefs-sources
update-commit-bcachefs-sources: update-bcachefs-sources
git commit -m "Update bcachefs sources to $(shell git -C $(LINUX_DIR) show --oneline --no-patch)"
+
+SRCTARXZ = bcachefs-tools-$(VERSION).tar.xz
+SRCDIR=bcachefs-tools-$(VERSION)
+
+.PHONY: tarball
+tarball: $(SRCTARXZ)
+
+$(SRCTARXZ) : .gitcensus
+ $(Q)tar --transform "s,^,$(SRCDIR)/," -Jcf $(SRCDIR).tar.xz \
+ `cat .gitcensus`
+ @echo Wrote: $@
+
+.PHONY: .gitcensus
+.gitcensus:
+ $(Q)if test -d .git; then \
+ git ls-files > .gitcensus; \
+ fi
--- /dev/null
+#include <fcntl.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include "cmds.h"
+#include "libbcachefs.h"
+#include "tools-util.h"
+
+#include "libbcachefs/bcachefs.h"
+#include "libbcachefs/btree_iter.h"
+#include "libbcachefs/error.h"
+#include "libbcachefs/super.h"
+
+static void kill_btree_node_usage(void)
+{
+ puts("bcachefs kill_btree_node - make btree nodes unreadable\n"
+ "Usage: bcachefs kill_btree_node [OPTION]... <devices>\n"
+ "\n"
+ "Options:\n"
+ " -b (extents|inodes|dirents|xattrs) Btree to delete from\n"
+ " -l level Levle to delete from (0 == leaves)\n"
+ " -i index Index of btree node to kill\n"
+ " -h Display this help and exit\n"
+ "Report bugs to <linux-bcachefs@vger.kernel.org>");
+}
+
+int cmd_kill_btree_node(int argc, char *argv[])
+{
+ struct bch_opts opts = bch2_opts_empty();
+ enum btree_id btree_id = 0;
+ unsigned level = 0;
+ u64 node_index = 0;
+ int opt;
+
+ opt_set(opts, read_only, true);
+
+ while ((opt = getopt(argc, argv, "b:l:i:h")) != -1)
+ switch (opt) {
+ case 'b':
+ btree_id = read_string_list_or_die(optarg,
+ bch2_btree_ids, "btree id");
+ break;
+ case 'l':
+ if (kstrtouint(optarg, 10, &level) || level >= BTREE_MAX_DEPTH)
+ die("invalid level");
+ break;
+ case 'i':
+ if (kstrtoull(optarg, 10, &node_index))
+ die("invalid index %s", optarg);
+ break;
+ case 'h':
+ kill_btree_node_usage();
+ exit(EXIT_SUCCESS);
+ }
+ args_shift(optind);
+
+ if (!argc)
+ die("Please supply device(s)");
+
+ struct bch_fs *c = bch2_fs_open(argv, argc, opts);
+ if (IS_ERR(c))
+ die("error opening %s: %s", argv[0], strerror(-PTR_ERR(c)));
+
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct btree *b;
+ int ret;
+ void *zeroes;
+
+ ret = posix_memalign(&zeroes, c->opts.block_size, c->opts.block_size);
+ if (ret)
+ die("error %s from posix_memalign", strerror(ret));
+
+ bch2_trans_init(&trans, c, 0, 0);
+
+ __for_each_btree_node(&trans, iter, btree_id, POS_MIN, 0, level, 0, b, ret) {
+ if (b->c.level != level)
+ continue;
+
+ if (!node_index) {
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(&b->key));
+ const struct bch_extent_ptr *ptr;
+
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+ bch_info(c, "killing btree node %s", buf.buf);
+ printbuf_exit(&buf);
+
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+
+ ret = pwrite(ca->disk_sb.bdev->bd_fd, zeroes,
+ c->opts.block_size, ptr->offset << 9);
+ if (ret != c->opts.block_size) {
+ bch_err(c, "pwrite error: expected %u got %i %s",
+ c->opts.block_size, ret, strerror(errno));
+ ret = EXIT_FAILURE;
+ goto done;
+ }
+ }
+ goto done;
+ }
+
+ node_index--;
+ }
+ if (ret)
+ bch_err(c, "error %i walking btree nodes", ret);
+ else
+ bch_err(c, "node at specified index not found");
+ ret = EXIT_FAILURE;
+done:
+ bch2_trans_iter_exit(&trans, &iter);
+ bch2_trans_exit(&trans);
+
+ bch2_fs_stop(c);
+ return ret;
+}
#include "cmds.h"
#include "libbcachefs.h"
-#include "qcow2.h"
#include "tools-util.h"
#include "libbcachefs/bcachefs.h"
bch2_fs_stop(c);
return 0;
}
-
-static void kill_btree_node_usage(void)
-{
- puts("bcachefs kill_btree_node - make btree nodes unreadable\n"
- "Usage: bcachefs kill_btree_node [OPTION]... <devices>\n"
- "\n"
- "Options:\n"
- " -b (extents|inodes|dirents|xattrs) Btree to delete from\n"
- " -l level Levle to delete from (0 == leaves)\n"
- " -i index Index of btree node to kill\n"
- " -h Display this help and exit\n"
- "Report bugs to <linux-bcachefs@vger.kernel.org>");
-}
-
-int cmd_kill_btree_node(int argc, char *argv[])
-{
- struct bch_opts opts = bch2_opts_empty();
- enum btree_id btree_id = 0;
- unsigned level = 0;
- u64 node_index = 0;
- int opt;
-
- opt_set(opts, read_only, true);
-
- while ((opt = getopt(argc, argv, "b:l:i:h")) != -1)
- switch (opt) {
- case 'b':
- btree_id = read_string_list_or_die(optarg,
- bch2_btree_ids, "btree id");
- break;
- case 'l':
- if (kstrtouint(optarg, 10, &level) || level >= BTREE_MAX_DEPTH)
- die("invalid level");
- break;
- case 'i':
- if (kstrtoull(optarg, 10, &node_index))
- die("invalid index %s", optarg);
- break;
- case 'h':
- kill_btree_node_usage();
- exit(EXIT_SUCCESS);
- }
- args_shift(optind);
-
- if (!argc)
- die("Please supply device(s)");
-
- struct bch_fs *c = bch2_fs_open(argv, argc, opts);
- if (IS_ERR(c))
- die("error opening %s: %s", argv[0], strerror(-PTR_ERR(c)));
-
- struct btree_trans trans;
- struct btree_iter iter;
- struct btree *b;
- int ret;
- void *zeroes;
-
- ret = posix_memalign(&zeroes, c->opts.block_size, c->opts.block_size);
- if (ret)
- die("error %s from posix_memalign", strerror(ret));
-
- bch2_trans_init(&trans, c, 0, 0);
-
- __for_each_btree_node(&trans, iter, btree_id, POS_MIN, 0, level, 0, b, ret) {
- if (b->c.level != level)
- continue;
-
- if (!node_index) {
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(&b->key));
- const struct bch_extent_ptr *ptr;
-
- struct printbuf buf = PRINTBUF;
-
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
- bch_info(c, "killing btree node %s", buf.buf);
- printbuf_exit(&buf);
-
- bkey_for_each_ptr(ptrs, ptr) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
-
- ret = pwrite(ca->disk_sb.bdev->bd_fd, zeroes,
- c->opts.block_size, ptr->offset << 9);
- if (ret != c->opts.block_size) {
- bch_err(c, "pwrite error: expected %u got %i %s",
- c->opts.block_size, ret, strerror(errno));
- ret = EXIT_FAILURE;
- goto done;
- }
- }
- goto done;
- }
-
- node_index--;
- }
- if (ret)
- bch_err(c, "error %i walking btree nodes", ret);
- else
- bch_err(c, "node at specified index not found");
- ret = EXIT_FAILURE;
-done:
- bch2_trans_iter_exit(&trans, &iter);
- bch2_trans_exit(&trans);
-
- bch2_fs_stop(c);
- return ret;
-}
op.nr_replicas = 1;
op.subvol = 1;
op.pos = SPOS(dst_inode->bi_inum, dst_offset >> 9, U32_MAX);
+ op.flags |= BCH_WRITE_SYNC;
int ret = bch2_disk_reservation_get(c, &op.res, len >> 9,
c->opts.data_replicas, 0);
die("error reserving space in new filesystem: %s", strerror(-ret));
closure_call(&op.cl, bch2_write, NULL, &cl);
- closure_sync(&cl);
dst_inode->bi_sectors += len >> 9;
}
--- /dev/null
+bcachefs-tools*
+debhelper-build-stamp
+files
+bcachefs-tools (24-1) unstable; urgency=medium
+
+ * New upstream release
+
+ -- Jonathan Carter <jcc@debian.org> Tue, 29 Nov 2022 09:40:27 +0200
+
bcachefs-tools (23-1) unstable; urgency=medium
* New upstream release
-bcachefs-tools_23-1_source.buildinfo utils optional
+bcachefs-tools_24-1_source.buildinfo utils optional
#define __ATOMIC_ADD_RETURN_RELEASE(v, p) \
__atomic_add_fetch(p, v, __ATOMIC_RELEASE)
#define __ATOMIC_SUB_RETURN(v, p) __atomic_sub_fetch(p, v, __ATOMIC_RELAXED)
+#define __ATOMIC_SUB_RETURN_RELEASE(v, p) \
+ __atomic_sub_fetch(p, v, __ATOMIC_RELEASE)
#define xchg(p, v) __atomic_exchange_n(p, v, __ATOMIC_SEQ_CST)
#define xchg_acquire(p, v) __atomic_exchange_n(p, v, __ATOMIC_ACQUIRE)
({ smp_mb__before_atomic(); __ATOMIC_ADD_RETURN(i, v); })
#endif
+#ifndef __ATOMIC_SUB_RETURN_RELEASE
+#define __ATOMIC_SUB_RETURN_RELEASE(i, v) \
+ ({ smp_mb__before_atomic(); __ATOMIC_SUB_RETURN(i, v); })
+#endif
+
#ifndef __ATOMIC_SUB
#define __ATOMIC_SUB(i, v) __ATOMIC_SUB_RETURN(i, v)
#endif
return __ATOMIC_ADD_RETURN_RELEASE(i, &v->counter); \
} \
\
+static inline i_type a_type##_sub_return_release(i_type i, a_type##_t *v)\
+{ \
+ return __ATOMIC_SUB_RETURN_RELEASE(i, &v->counter); \
+} \
+ \
static inline i_type a_type##_sub_return(i_type i, a_type##_t *v) \
{ \
return __ATOMIC_SUB_RETURN(i, &v->counter); \
}
struct printbuf;
+extern void prt_u64(struct printbuf *out, u64 num);
+
extern __printf(2, 0) void prt_vprintf(struct printbuf *out, const char *fmt, va_list args);
extern __printf(2, 3) void prt_printf(struct printbuf *out, const char *fmt, ...);
u64 variance;
};
-inline s64 fast_divpow2(s64 n, u8 d);
+s64 fast_divpow2(s64 n, u8 d);
struct mean_and_variance mean_and_variance_update(struct mean_and_variance s1, s64 v1);
s64 mean_and_variance_get_mean(struct mean_and_variance s);
#include <urcu.h>
#include <linux/compiler.h>
+#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
+
#define rcu_dereference_check(p, c) rcu_dereference(p)
#define rcu_dereference_raw(p) rcu_dereference(p)
#define rcu_dereference_protected(p, c) rcu_dereference(p)
} wait_queue_head_t;
void wake_up(wait_queue_head_t *);
+void wake_up_all(wait_queue_head_t *);
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
TRACE_EVENT(btree_reserve_get_fail,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip,
- size_t required),
- TP_ARGS(trans_fn, caller_ip, required),
+ size_t required,
+ int ret),
+ TP_ARGS(trans_fn, caller_ip, required, ret),
TP_STRUCT__entry(
__array(char, trans_fn, 32 )
__field(unsigned long, caller_ip )
__field(size_t, required )
+ __array(char, ret, 32 )
),
TP_fast_assign(
- strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->required = required;
+ strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
),
- TP_printk("%s %pS required %zu",
+ TP_printk("%s %pS required %zu ret %s",
__entry->trans_fn,
(void *) __entry->caller_ip,
- __entry->required)
+ __entry->required,
+ __entry->ret)
);
DEFINE_EVENT(btree_node, btree_node_compact,
TP_fast_assign(
struct btree *b = btree_path_node(path, level);
- strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id;
__entry->level = path->level;
TP_fast_assign(
struct six_lock_count c;
- strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id;
__entry->level = level;
TP_fast_assign(
__entry->dev = ca->dev;
- strlcpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
+ strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
__entry->user = user;
__entry->bucket = bucket;
),
u64 avail,
u64 copygc_wait_amount,
s64 copygc_waiting_for,
- u64 seen,
- u64 open,
- u64 need_journal_commit,
- u64 nouse,
+ struct bucket_alloc_state *s,
bool nonblocking,
const char *err),
TP_ARGS(ca, alloc_reserve, free, avail, copygc_wait_amount, copygc_waiting_for,
- seen, open, need_journal_commit, nouse, nonblocking, err),
+ s, nonblocking, err),
TP_STRUCT__entry(
__field(dev_t, dev )
TP_fast_assign(
__entry->dev = ca->dev;
- strlcpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
+ strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
__entry->free = free;
__entry->avail = avail;
__entry->copygc_wait_amount = copygc_wait_amount;
__entry->copygc_waiting_for = copygc_waiting_for;
- __entry->seen = seen;
- __entry->open = open;
- __entry->need_journal_commit = need_journal_commit;
- __entry->nouse = nouse;
+ __entry->seen = s->buckets_seen;
+ __entry->open = s->skipped_open;
+ __entry->need_journal_commit = s->skipped_need_journal_commit;
+ __entry->nouse = s->skipped_nouse;
__entry->nonblocking = nonblocking;
- strlcpy(__entry->err, err, sizeof(__entry->err));
+ strscpy(__entry->err, err, sizeof(__entry->err));
),
TP_printk("%d,%d reserve %s free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nonblocking %u err %s",
__entry->open = open;
__entry->need_journal_commit = need_journal_commit;
__entry->discarded = discarded;
- strlcpy(__entry->err, err, sizeof(__entry->err));
+ strscpy(__entry->err, err, sizeof(__entry->err));
),
TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
),
TP_fast_assign(
- strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
),
),
TP_fast_assign(
- strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->flags = flags;
),
),
TP_fast_assign(
- strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id;
TRACE_BPOS_assign(pos, path->pos)
),
TP_fast_assign(
- strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id;
__entry->old_locks_want = old_locks_want;
),
TP_fast_assign(
- strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
),
TP_printk("%s", __entry->trans_fn)
),
TP_fast_assign(
- strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->bytes = bytes;
),
),
TP_fast_assign(
- strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id;
return -EINVAL;
}
+ /*
+ * XXX this is wrong, we'll be checking updates that happened from
+ * before BCH_FS_CHECK_BACKPOINTERS_DONE
+ */
+ if (rw == WRITE && test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
+ unsigned i, bp_len = 0;
+
+ for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a.v); i++)
+ bp_len += alloc_v4_backpointers_c(a.v)[i].bucket_len;
+
+ if (bp_len > a.v->dirty_sectors) {
+ prt_printf(err, "too many backpointers");
+ return -EINVAL;
+ }
+ }
+
if (rw == WRITE) {
if (alloc_data_type(*a.v, a.v->data_type) != a.v->data_type) {
prt_printf(err, "invalid data type (got %u should be %u)",
void bch2_alloc_v4_swab(struct bkey_s);
void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-#define bch2_bkey_ops_alloc (struct bkey_ops) { \
+#define bch2_bkey_ops_alloc ((struct bkey_ops) { \
.key_invalid = bch2_alloc_v1_invalid, \
.val_to_text = bch2_alloc_to_text, \
.trans_trigger = bch2_trans_mark_alloc, \
.atomic_trigger = bch2_mark_alloc, \
-}
+})
-#define bch2_bkey_ops_alloc_v2 (struct bkey_ops) { \
+#define bch2_bkey_ops_alloc_v2 ((struct bkey_ops) { \
.key_invalid = bch2_alloc_v2_invalid, \
.val_to_text = bch2_alloc_to_text, \
.trans_trigger = bch2_trans_mark_alloc, \
.atomic_trigger = bch2_mark_alloc, \
-}
+})
-#define bch2_bkey_ops_alloc_v3 (struct bkey_ops) { \
+#define bch2_bkey_ops_alloc_v3 ((struct bkey_ops) { \
.key_invalid = bch2_alloc_v3_invalid, \
.val_to_text = bch2_alloc_to_text, \
.trans_trigger = bch2_trans_mark_alloc, \
.atomic_trigger = bch2_mark_alloc, \
-}
+})
-#define bch2_bkey_ops_alloc_v4 (struct bkey_ops) { \
+#define bch2_bkey_ops_alloc_v4 ((struct bkey_ops) { \
.key_invalid = bch2_alloc_v4_invalid, \
.val_to_text = bch2_alloc_to_text, \
.swab = bch2_alloc_v4_swab, \
.trans_trigger = bch2_trans_mark_alloc, \
.atomic_trigger = bch2_mark_alloc, \
-}
+})
static inline bool bkey_is_alloc(const struct bkey *k)
{
#include "bcachefs.h"
#include "alloc_background.h"
#include "alloc_foreground.h"
+#include "backpointers.h"
#include "btree_iter.h"
#include "btree_update.h"
#include "btree_gc.h"
u64 bucket,
enum alloc_reserve reserve,
struct bch_alloc_v4 *a,
- u64 *skipped_open,
- u64 *skipped_need_journal_commit,
- u64 *skipped_nouse,
+ struct bucket_alloc_state *s,
struct closure *cl)
{
struct open_bucket *ob;
if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
- (*skipped_nouse)++;
+ s->skipped_nouse++;
return NULL;
}
if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
- (*skipped_open)++;
+ s->skipped_open++;
return NULL;
}
if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
- (*skipped_need_journal_commit)++;
+ s->skipped_need_journal_commit++;
+ return NULL;
+ }
+
+ if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) {
+ s->skipped_nocow++;
return NULL;
}
/* Recheck under lock: */
if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
spin_unlock(&c->freelist_lock);
- (*skipped_open)++;
+ s->skipped_open++;
return NULL;
}
static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
enum alloc_reserve reserve, u64 free_entry,
- u64 *skipped_open,
- u64 *skipped_need_journal_commit,
- u64 *skipped_nouse,
+ struct bucket_alloc_state *s,
struct bkey_s_c freespace_k,
struct closure *cl)
{
goto err;
}
- ob = __try_alloc_bucket(c, ca, b, reserve, &a,
- skipped_open,
- skipped_need_journal_commit,
- skipped_nouse,
- cl);
+ if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
+ struct bch_backpointer bp;
+ u64 bp_offset = 0;
+
+ ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1,
+ &bp_offset, &bp,
+ BTREE_ITER_NOPRESERVE);
+ if (ret) {
+ ob = ERR_PTR(ret);
+ goto err;
+ }
+
+ if (bp_offset != U64_MAX) {
+ /*
+ * Bucket may have data in it - we don't call
+ * bc2h_trans_inconnsistent() because fsck hasn't
+ * finished yet
+ */
+ ob = NULL;
+ goto err;
+ }
+ }
+
+ ob = __try_alloc_bucket(c, ca, b, reserve, &a, s, cl);
if (!ob)
iter.path->preserve = false;
err:
bch2_bucket_alloc_early(struct btree_trans *trans,
struct bch_dev *ca,
enum alloc_reserve reserve,
- u64 *cur_bucket,
- u64 *buckets_seen,
- u64 *skipped_open,
- u64 *skipped_need_journal_commit,
- u64 *skipped_nouse,
+ struct bucket_alloc_state *s,
struct closure *cl)
{
struct btree_iter iter;
struct open_bucket *ob = NULL;
int ret;
- *cur_bucket = max_t(u64, *cur_bucket, ca->mi.first_bucket);
- *cur_bucket = max_t(u64, *cur_bucket, ca->new_fs_bucket_idx);
+ s->cur_bucket = max_t(u64, s->cur_bucket, ca->mi.first_bucket);
+ s->cur_bucket = max_t(u64, s->cur_bucket, ca->new_fs_bucket_idx);
- for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, *cur_bucket),
+ for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, s->cur_bucket),
BTREE_ITER_SLOTS, k, ret) {
struct bch_alloc_v4 a;
if (a.data_type != BCH_DATA_free)
continue;
- (*buckets_seen)++;
+ s->buckets_seen++;
- ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, &a,
- skipped_open,
- skipped_need_journal_commit,
- skipped_nouse,
- cl);
+ ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, &a, s, cl);
if (ob)
break;
}
bch2_trans_iter_exit(trans, &iter);
- *cur_bucket = iter.pos.offset;
+ s->cur_bucket = iter.pos.offset;
return ob ?: ERR_PTR(ret ?: -BCH_ERR_no_buckets_found);
}
static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
struct bch_dev *ca,
enum alloc_reserve reserve,
- u64 *cur_bucket,
- u64 *buckets_seen,
- u64 *skipped_open,
- u64 *skipped_need_journal_commit,
- u64 *skipped_nouse,
+ struct bucket_alloc_state *s,
struct closure *cl)
{
struct btree_iter iter;
* at previously
*/
for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace,
- POS(ca->dev_idx, *cur_bucket), 0, k, ret) {
+ POS(ca->dev_idx, s->cur_bucket), 0, k, ret) {
if (k.k->p.inode != ca->dev_idx)
break;
- for (*cur_bucket = max(*cur_bucket, bkey_start_offset(k.k));
- *cur_bucket < k.k->p.offset;
- (*cur_bucket)++) {
+ for (s->cur_bucket = max(s->cur_bucket, bkey_start_offset(k.k));
+ s->cur_bucket < k.k->p.offset;
+ s->cur_bucket++) {
ret = btree_trans_too_many_iters(trans);
if (ret)
break;
- (*buckets_seen)++;
+ s->buckets_seen++;
ob = try_alloc_bucket(trans, ca, reserve,
- *cur_bucket,
- skipped_open,
- skipped_need_journal_commit,
- skipped_nouse,
- k, cl);
+ s->cur_bucket, s, k, cl);
if (ob)
break;
}
bool freespace_initialized = READ_ONCE(ca->mi.freespace_initialized);
u64 start = freespace_initialized ? 0 : ca->bucket_alloc_trans_early_cursor;
u64 avail;
- u64 cur_bucket = start;
- u64 buckets_seen = 0;
- u64 skipped_open = 0;
- u64 skipped_need_journal_commit = 0;
- u64 skipped_nouse = 0;
+ struct bucket_alloc_state s = { .cur_bucket = start };
bool waiting = false;
again:
bch2_dev_usage_read_fast(ca, usage);
}
ob = likely(ca->mi.freespace_initialized)
- ? bch2_bucket_alloc_freelist(trans, ca, reserve,
- &cur_bucket,
- &buckets_seen,
- &skipped_open,
- &skipped_need_journal_commit,
- &skipped_nouse,
- cl)
- : bch2_bucket_alloc_early(trans, ca, reserve,
- &cur_bucket,
- &buckets_seen,
- &skipped_open,
- &skipped_need_journal_commit,
- &skipped_nouse,
- cl);
-
- if (skipped_need_journal_commit * 2 > avail)
+ ? bch2_bucket_alloc_freelist(trans, ca, reserve, &s, cl)
+ : bch2_bucket_alloc_early(trans, ca, reserve, &s, cl);
+
+ if (s.skipped_need_journal_commit * 2 > avail)
bch2_journal_flush_async(&c->journal, NULL);
if (!ob && !freespace_initialized && start) {
- start = cur_bucket = 0;
+ start = s.cur_bucket = 0;
goto again;
}
if (!freespace_initialized)
- ca->bucket_alloc_trans_early_cursor = cur_bucket;
+ ca->bucket_alloc_trans_early_cursor = s.cur_bucket;
err:
if (!ob)
ob = ERR_PTR(-BCH_ERR_no_buckets_found);
avail,
bch2_copygc_wait_amount(c),
c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
- buckets_seen,
- skipped_open,
- skipped_need_journal_commit,
- skipped_nouse,
+ &s,
cl == NULL,
bch2_err_str(PTR_ERR(ob)));
/*
* Get us an open_bucket we can allocate from, return with it locked:
*/
-struct write_point *bch2_alloc_sectors_start_trans(struct btree_trans *trans,
- unsigned target,
- unsigned erasure_code,
- struct write_point_specifier write_point,
- struct bch_devs_list *devs_have,
- unsigned nr_replicas,
- unsigned nr_replicas_required,
- enum alloc_reserve reserve,
- unsigned flags,
- struct closure *cl)
+int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
+ unsigned target,
+ unsigned erasure_code,
+ struct write_point_specifier write_point,
+ struct bch_devs_list *devs_have,
+ unsigned nr_replicas,
+ unsigned nr_replicas_required,
+ enum alloc_reserve reserve,
+ unsigned flags,
+ struct closure *cl,
+ struct write_point **wp_ret)
{
struct bch_fs *c = trans->c;
struct write_point *wp;
write_points_nr = c->write_points_nr;
have_cache = false;
- wp = writepoint_find(trans, write_point.v);
+ *wp_ret = wp = writepoint_find(trans, write_point.v);
if (wp->data_type == BCH_DATA_user)
ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
- return wp;
+ return 0;
err:
open_bucket_for_each(c, &wp->ptrs, ob, i)
if (ptrs.nr < ARRAY_SIZE(ptrs.v))
if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
bch2_err_matches(ret, BCH_ERR_freelist_empty))
return cl
- ? ERR_PTR(-EAGAIN)
- : ERR_PTR(-BCH_ERR_ENOSPC_bucket_alloc);
+ ? -EAGAIN
+ : -BCH_ERR_ENOSPC_bucket_alloc;
if (bch2_err_matches(ret, BCH_ERR_insufficient_devices))
- return ERR_PTR(-EROFS);
-
- return ERR_PTR(ret);
-}
-
-struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
- unsigned target,
- unsigned erasure_code,
- struct write_point_specifier write_point,
- struct bch_devs_list *devs_have,
- unsigned nr_replicas,
- unsigned nr_replicas_required,
- enum alloc_reserve reserve,
- unsigned flags,
- struct closure *cl)
-{
- struct write_point *wp;
-
- bch2_trans_do(c, NULL, NULL, 0,
- PTR_ERR_OR_ZERO(wp = bch2_alloc_sectors_start_trans(&trans, target,
- erasure_code,
- write_point,
- devs_have,
- nr_replicas,
- nr_replicas_required,
- reserve,
- flags, cl)));
- return wp;
+ return -EROFS;
+ return ret;
}
struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
{
mutex_init(&wp->lock);
wp->data_type = type;
+
+ INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates);
+ INIT_LIST_HEAD(&wp->writes);
+ spin_lock_init(&wp->writes_lock);
}
void bch2_fs_allocator_foreground_init(struct bch_fs *c)
unsigned, unsigned *, bool *, enum alloc_reserve,
unsigned, struct closure *);
-struct write_point *bch2_alloc_sectors_start_trans(struct btree_trans *,
- unsigned, unsigned,
- struct write_point_specifier,
- struct bch_devs_list *,
- unsigned, unsigned,
- enum alloc_reserve,
- unsigned,
- struct closure *);
-struct write_point *bch2_alloc_sectors_start(struct bch_fs *,
- unsigned, unsigned,
- struct write_point_specifier,
- struct bch_devs_list *,
- unsigned, unsigned,
- enum alloc_reserve,
- unsigned,
- struct closure *);
+int bch2_alloc_sectors_start_trans(struct btree_trans *,
+ unsigned, unsigned,
+ struct write_point_specifier,
+ struct bch_devs_list *,
+ unsigned, unsigned,
+ enum alloc_reserve,
+ unsigned,
+ struct closure *,
+ struct write_point **);
struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *, struct open_bucket *);
void bch2_alloc_sectors_append_ptrs(struct bch_fs *, struct write_point *,
#include "clock_types.h"
#include "fifo.h"
+struct bucket_alloc_state {
+ u64 cur_bucket;
+ u64 buckets_seen;
+ u64 skipped_open;
+ u64 skipped_need_journal_commit;
+ u64 skipped_nocow;
+ u64 skipped_nouse;
+};
+
struct ec_bucket_buf;
#define BCH_ALLOC_RESERVES() \
struct open_buckets ptrs;
struct dev_stripe_state stripe;
+
+ struct work_struct index_update_work;
+
+ struct list_head writes;
+ spinlock_t writes_lock;
};
struct write_point_specifier {
#include <linux/mm.h>
-#define MAX_EXTENT_COMPRESS_RATIO_SHIFT 10
-
/*
* Convert from pos in backpointer btree to pos of corresponding bucket in alloc
* btree:
return ret;
}
-void bch2_extent_ptr_to_bp(struct bch_fs *c,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c k, struct extent_ptr_decoded p,
- struct bpos *bucket_pos, struct bch_backpointer *bp)
-{
- enum bch_data_type data_type = level ? BCH_DATA_btree : BCH_DATA_user;
- s64 sectors = level ? btree_sectors(c) : k.k->size;
- u32 bucket_offset;
-
- *bucket_pos = PTR_BUCKET_POS_OFFSET(c, &p.ptr, &bucket_offset);
- *bp = (struct bch_backpointer) {
- .btree_id = btree_id,
- .level = level,
- .data_type = data_type,
- .bucket_offset = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) +
- p.crc.offset,
- .bucket_len = ptr_disk_sectors(sectors, p),
- .pos = k.k->p,
- };
-}
-
static bool extent_matches_bp(struct bch_fs *c,
enum btree_id btree_id, unsigned level,
struct bkey_s_c k,
break;
if (!bpos_cmp(start, POS_MIN) && bpos_cmp(end, SPOS_MAX))
- bch_verbose(c, "check_extents_to_backpointers(): alloc info does not fit in ram,"
- "running in multiple passes with %zu nodes per pass",
- btree_nodes_fit_in_ram(c));
+ bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
+ __func__, btree_nodes_fit_in_ram(c));
if (bpos_cmp(start, POS_MIN) || bpos_cmp(end, SPOS_MAX)) {
struct printbuf buf = PRINTBUF;
if (!bbpos_cmp(start, BBPOS_MIN) &&
bbpos_cmp(end, BBPOS_MAX))
- bch_verbose(c, "check_backpointers_to_extents(): extents do not fit in ram,"
- "running in multiple passes with %zu nodes per pass",
- btree_nodes_fit_in_ram(c));
+ bch_verbose(c, "%s(): extents do not fit in ram, running in multiple passes with %zu nodes per pass",
+ __func__, btree_nodes_fit_in_ram(c));
if (bbpos_cmp(start, BBPOS_MIN) ||
bbpos_cmp(end, BBPOS_MAX)) {
#ifndef _BCACHEFS_BACKPOINTERS_BACKGROUND_H
#define _BCACHEFS_BACKPOINTERS_BACKGROUND_H
+#include "buckets.h"
#include "super.h"
int bch2_backpointer_invalid(const struct bch_fs *, struct bkey_s_c k,
void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
void bch2_backpointer_swab(struct bkey_s);
-#define bch2_bkey_ops_backpointer (struct bkey_ops) { \
+#define bch2_bkey_ops_backpointer ((struct bkey_ops) { \
.key_invalid = bch2_backpointer_invalid, \
.val_to_text = bch2_backpointer_k_to_text, \
.swab = bch2_backpointer_swab, \
-}
+})
+
+#define MAX_EXTENT_COMPRESS_RATIO_SHIFT 10
-void bch2_extent_ptr_to_bp(struct bch_fs *, enum btree_id, unsigned,
- struct bkey_s_c, struct extent_ptr_decoded,
- struct bpos *, struct bch_backpointer *);
+static inline void bch2_extent_ptr_to_bp(struct bch_fs *c,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, struct extent_ptr_decoded p,
+ struct bpos *bucket_pos, struct bch_backpointer *bp)
+{
+ enum bch_data_type data_type = level ? BCH_DATA_btree : BCH_DATA_user;
+ s64 sectors = level ? btree_sectors(c) : k.k->size;
+ u32 bucket_offset;
+
+ *bucket_pos = PTR_BUCKET_POS_OFFSET(c, &p.ptr, &bucket_offset);
+ *bp = (struct bch_backpointer) {
+ .btree_id = btree_id,
+ .level = level,
+ .data_type = data_type,
+ .bucket_offset = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) +
+ p.crc.offset,
+ .bucket_len = ptr_disk_sectors(sectors, p),
+ .pos = k.k->p,
+ };
+}
int bch2_bucket_backpointer_del(struct btree_trans *, struct bkey_i_alloc_v4 *,
struct bch_backpointer, struct bkey_s_c);
#include "bcachefs_format.h"
#include "errcode.h"
#include "fifo.h"
+#include "nocow_locking.h"
#include "opts.h"
#include "util.h"
dynamic_fault("bcachefs:meta:write:" name)
#ifdef __KERNEL__
-#define bch2_log_msg(_c, fmt) "bcachefs (%s): " fmt, ((_c)->name)
-#define bch2_fmt(_c, fmt) bch2_log_msg(_c, fmt "\n")
-#define bch2_fmt_inum(_c, _inum, fmt) "bcachefs (%s inum %llu): " fmt "\n", ((_c)->name), (_inum)
+#define BCACHEFS_LOG_PREFIX
+#endif
+
+#ifdef BCACHEFS_LOG_PREFIX
+
+#define bch2_log_msg(_c, fmt) "bcachefs (%s): " fmt, ((_c)->name)
+#define bch2_fmt_dev(_ca, fmt) "bcachefs (%s): " fmt "\n", ((_ca)->name)
+#define bch2_fmt_dev_offset(_ca, _offset, fmt) "bcachefs (%s sector %llu): " fmt "\n", ((_ca)->name), (_offset)
+#define bch2_fmt_inum(_c, _inum, fmt) "bcachefs (%s inum %llu): " fmt "\n", ((_c)->name), (_inum)
+#define bch2_fmt_inum_offset(_c, _inum, _offset, fmt) \
+ "bcachefs (%s inum %llu offset %llu): " fmt "\n", ((_c)->name), (_inum), (_offset)
+
#else
-#define bch2_log_msg(_c, fmt) fmt
-#define bch2_fmt(_c, fmt) fmt "\n"
-#define bch2_fmt_inum(_c, _inum, fmt) "inum %llu: " fmt "\n", (_inum)
+
+#define bch2_log_msg(_c, fmt) fmt
+#define bch2_fmt_dev(_ca, fmt) "%s: " fmt "\n", ((_ca)->name)
+#define bch2_fmt_dev_offset(_ca, _offset, fmt) "%s sector %llu: " fmt "\n", ((_ca)->name), (_offset)
+#define bch2_fmt_inum(_c, _inum, fmt) "inum %llu: " fmt "\n", (_inum)
+#define bch2_fmt_inum_offset(_c, _inum, _offset, fmt) \
+ "inum %llu offset %llu: " fmt "\n", (_inum), (_offset)
+
#endif
+#define bch2_fmt(_c, fmt) bch2_log_msg(_c, fmt "\n")
+
#define bch_info(c, fmt, ...) \
printk(KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__)
#define bch_notice(c, fmt, ...) \
printk(KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
#define bch_warn_ratelimited(c, fmt, ...) \
printk_ratelimited(KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
+
#define bch_err(c, fmt, ...) \
printk(KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
+#define bch_err_dev(ca, fmt, ...) \
+ printk(KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
+#define bch_err_dev_offset(ca, _offset, fmt, ...) \
+ printk(KERN_ERR bch2_fmt_dev_offset(ca, _offset, fmt), ##__VA_ARGS__)
+#define bch_err_inum(c, _inum, fmt, ...) \
+ printk(KERN_ERR bch2_fmt_inum(c, _inum, fmt), ##__VA_ARGS__)
+#define bch_err_inum_offset(c, _inum, _offset, fmt, ...) \
+ printk(KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
#define bch_err_ratelimited(c, fmt, ...) \
printk_ratelimited(KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
+#define bch_err_dev_ratelimited(ca, fmt, ...) \
+ printk_ratelimited(KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
+#define bch_err_dev_offset_ratelimited(ca, _offset, fmt, ...) \
+ printk_ratelimited(KERN_ERR bch2_fmt_dev_offset(ca, _offset, fmt), ##__VA_ARGS__)
#define bch_err_inum_ratelimited(c, _inum, fmt, ...) \
printk_ratelimited(KERN_ERR bch2_fmt_inum(c, _inum, fmt), ##__VA_ARGS__)
+#define bch_err_inum_offset_ratelimited(c, _inum, _offset, fmt, ...) \
+ printk_ratelimited(KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
#define bch_verbose(c, fmt, ...) \
do { \
"When reading btree nodes, read all replicas and " \
"compare them")
-/* Parameters that should only be compiled in in debug mode: */
+/* Parameters that should only be compiled in debug mode: */
#define BCH_DEBUG_PARAMS_DEBUG() \
BCH_DEBUG_PARAM(expensive_debug_checks, \
"Enables various runtime debugging checks that " \
x(journal_flush_seq) \
x(blocked_journal) \
x(blocked_allocate) \
- x(blocked_allocate_open_bucket)
+ x(blocked_allocate_open_bucket) \
+ x(nocow_lock_contended)
enum bch_time_stats {
#define x(name) BCH_TIME_##name,
struct bch_sb *sb_read_scratch;
int sb_write_error;
dev_t dev;
+ atomic_t flush_seq;
struct bch_devs_mask self;
struct workqueue_struct *btree_interior_update_worker;
struct work_struct btree_interior_update_work;
+ /* btree_io.c: */
+ spinlock_t btree_write_error_lock;
+ struct btree_write_stats {
+ atomic64_t nr;
+ atomic64_t bytes;
+ } btree_write_stats[BTREE_WRITE_TYPE_NR];
+
/* btree_iter.c: */
struct mutex btree_trans_lock;
struct list_head btree_trans_list;
struct bio_set bio_write;
struct mutex bio_bounce_pages_lock;
mempool_t bio_bounce_pages;
+ struct bucket_nocow_lock_table
+ nocow_locks;
struct rhashtable promote_table;
mempool_t compression_bounce[2];
struct bio_set writepage_bioset;
struct bio_set dio_write_bioset;
struct bio_set dio_read_bioset;
-
-
- atomic64_t btree_writes_nr;
- atomic64_t btree_writes_sectors;
- spinlock_t btree_write_error_lock;
+ struct bio_set nocow_flush_bioset;
/* ERRORS */
struct list_head fsck_errors;
#else
#error edit for your odd byteorder.
#endif
-} __attribute__((packed, aligned(4)));
+} __packed __aligned(4);
#define KEY_INODE_MAX ((__u64)~0ULL)
#define KEY_OFFSET_MAX ((__u64)~0ULL)
__u32 hi;
__u64 lo;
#endif
-} __attribute__((packed, aligned(4)));
+} __packed __aligned(4);
struct bkey {
/* Size of combined key and value, in u64s */
__u8 pad[1];
#endif
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bkey_packed {
__u64 _data[0];
* to the same size as struct bkey should hopefully be safest.
*/
__u8 pad[sizeof(struct bkey) - 3];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
#define BKEY_U64s_MAX U8_MAX
struct bch_csum {
__le64 lo;
__le64 hi;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define BCH_EXTENT_ENTRY_TYPES() \
x(ptr, 0) \
_compressed_size:7,
type:2;
#endif
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define CRC32_SIZE_MAX (1U << 7)
#define CRC32_NONCE_MAX 0
type:3;
#endif
__u64 csum_lo;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define CRC64_SIZE_MAX (1U << 9)
#define CRC64_NONCE_MAX ((1U << 10) - 1)
type:4;
#endif
struct bch_csum csum;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define CRC128_SIZE_MAX (1U << 13)
#define CRC128_NONCE_MAX ((1U << 13) - 1)
__u64 type:1,
cached:1,
unused:1,
- reservation:1,
+ unwritten:1,
offset:44, /* 8 petabytes */
dev:8,
gen:8;
__u64 gen:8,
dev:8,
offset:44,
- reservation:1,
+ unwritten:1,
unused:1,
cached:1,
type:1;
#endif
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_extent_stripe_ptr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 _data[0];
struct bch_extent_ptr start[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_btree_ptr_v2 {
struct bch_val v;
struct bpos min_key;
__u64 _data[0];
struct bch_extent_ptr start[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
__u64 _data[0];
union bch_extent_entry start[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_reservation {
struct bch_val v;
__le32 generation;
__u8 nr_replicas;
__u8 pad[3];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/* Maximum size (in u64s) a single pointer could be: */
#define BKEY_EXTENT_PTR_U64s_MAX\
__le32 bi_flags;
__le16 bi_mode;
__u8 fields[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_inode_v2 {
struct bch_val v;
__le64 bi_flags;
__le16 bi_mode;
__u8 fields[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_inode_v3 {
struct bch_val v;
__le64 bi_size;
__le64 bi_version;
__u8 fields[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define INODEv3_FIELDS_START_INITIAL 6
#define INODEv3_FIELDS_START_CUR (offsetof(struct bch_inode_v3, fields) / sizeof(u64))
__le32 bi_generation;
__le32 pad;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/*
* bi_subvol and bi_parent_subvol are only set for subvolume roots:
x(bi_dir, 64) \
x(bi_dir_offset, 64) \
x(bi_subvol, 32) \
- x(bi_parent_subvol, 32)
+ x(bi_parent_subvol, 32) \
+ x(bi_nocow, 8)
/* subset of BCH_INODE_FIELDS */
#define BCH_INODE_OPTS() \
x(promote_target, 16) \
x(foreground_target, 16) \
x(background_target, 16) \
- x(erasure_code, 16)
+ x(erasure_code, 16) \
+ x(nocow, 8)
enum inode_opt_id {
#define x(name, ...) \
__u8 d_type;
__u8 d_name[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define DT_SUBVOL 16
#define BCH_DT_MAX 17
__u8 x_name_len;
__le16 x_val_len;
__u8 x_name[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/* Bucket/allocation information: */
__u8 fields;
__u8 gen;
__u8 data[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define BCH_ALLOC_FIELDS_V1() \
x(read_time, 16) \
__u8 oldest_gen;
__u8 data_type;
__u8 data[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define BCH_ALLOC_FIELDS_V2() \
x(read_time, 64) \
__u8 oldest_gen;
__u8 data_type;
__u8 data[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1)
LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2)
__u64 io_time[2];
__u32 stripe;
__u32 nr_external_backpointers;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define BCH_ALLOC_V4_U64s_V0 6
#define BCH_ALLOC_V4_U64s (sizeof(struct bch_alloc_v4) / sizeof(u64))
__u64 bucket_offset:40;
__u32 bucket_len;
struct bpos pos;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/* Quotas: */
struct bch_quota {
struct bch_val v;
struct bch_quota_counter c[Q_COUNTERS];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/* Erasure coding */
__u8 pad;
struct bch_extent_ptr ptrs[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/* Reflink: */
*/
__le32 front_pad;
__le32 back_pad;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_reflink_v {
struct bch_val v;
__le64 refcount;
union bch_extent_entry start[0];
__u64 _data[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_indirect_inline_data {
struct bch_val v;
struct bch_lru {
struct bch_val v;
__le64 idx;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define LRU_ID_STRIPES (1U << 16)
__u8 data_type;
__u8 nr_devs;
__u8 devs[];
-} __attribute__((packed));
+} __packed;
struct bch_sb_field_replicas_v0 {
struct bch_sb_field field;
struct bch_replicas_entry_v0 entries[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_replicas_entry {
__u8 data_type;
__u8 nr_devs;
__u8 nr_required;
__u8 devs[];
-} __attribute__((packed));
+} __packed;
#define replicas_entry_bytes(_i) \
(offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
struct bch_sb_field_replicas {
struct bch_sb_field field;
struct bch_replicas_entry entries[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/* BCH_SB_FIELD_quota: */
struct bch_sb_field_quota {
struct bch_sb_field field;
struct bch_sb_quota_type q[QTYP_NR];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/* BCH_SB_FIELD_disk_groups: */
struct bch_disk_group {
__u8 label[BCH_SB_LABEL_SIZE];
__le64 flags[2];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1)
LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6)
struct bch_sb_field_disk_groups {
struct bch_sb_field field;
struct bch_disk_group entries[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/* BCH_SB_FIELD_counters */
x(alloc_v4, 20) \
x(new_data_types, 21) \
x(backpointers, 22) \
- x(inode_v3, 23)
+ x(inode_v3, 23) \
+ x(unwritten_extents, 24)
enum bcachefs_metadata_version {
bcachefs_metadata_version_min = 9,
__u8 nr_superblocks;
__u8 pad[5];
__le64 sb_offset[61];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define BCH_SB_LAYOUT_SECTOR 7
struct bch_sb_field start[0];
__le64 _data[0];
};
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/*
* Flags:
LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
/* Obsolete, always enabled: */
LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
+LE64_BITMASK(BCH_SB_NOCOW, struct bch_sb, flags[4], 33, 34);
/*
* Features:
static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
{
__le64 ret;
+
memcpy(&ret, &sb->uuid, sizeof(ret));
return ret;
}
struct jset_entry_usage {
struct jset_entry entry;
__le64 v;
-} __attribute__((packed));
+} __packed;
struct jset_entry_data_usage {
struct jset_entry entry;
__le64 v;
struct bch_replicas_entry r;
-} __attribute__((packed));
+} __packed;
struct jset_entry_clock {
struct jset_entry entry;
__u8 rw;
__u8 pad[7];
__le64 time;
-} __attribute__((packed));
+} __packed;
struct jset_entry_dev_usage_type {
__le64 buckets;
__le64 sectors;
__le64 fragmented;
-} __attribute__((packed));
+} __packed;
struct jset_entry_dev_usage {
struct jset_entry entry;
__le64 _buckets_unavailable; /* No longer used */
struct jset_entry_dev_usage_type d[];
-} __attribute__((packed));
+} __packed;
static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
{
struct jset_entry_log {
struct jset_entry entry;
u8 d[];
-} __attribute__((packed));
+} __packed;
/*
* On disk format for a journal entry:
struct jset_entry start[0];
__u64 _data[0];
};
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
struct bkey_packed start[0];
__u64 _data[0];
};
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
};
};
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
LE64_BITMASK(BTREE_NODE_ID, struct btree_node, flags, 0, 4);
LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8);
};
};
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#endif /* _BCACHEFS_FORMAT_H */
__u64 pad[8];
};
};
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
enum bch_data_event {
BCH_DATA_EVENT_PROGRESS = 0,
__u64 sectors_done;
__u64 sectors_total;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_ioctl_data_event {
__u8 type;
struct bch_ioctl_data_progress p;
__u64 pad2[15];
};
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_replicas_usage {
__u64 sectors;
struct bch_replicas_entry r;
-} __attribute__((packed));
+} __packed;
static inline struct bch_replicas_usage *
replicas_usage_next(struct bch_replicas_usage *u)
const struct bkey_format bch2_bkey_format_current = BKEY_FORMAT_CURRENT;
-struct bkey __bch2_bkey_unpack_key(const struct bkey_format *,
- const struct bkey_packed *);
-
void bch2_bkey_packed_to_binary_text(struct printbuf *out,
const struct bkey_format *f,
const struct bkey_packed *k)
return 0;
}
-#define bch2_bkey_ops_deleted (struct bkey_ops) { \
+#define bch2_bkey_ops_deleted ((struct bkey_ops) { \
.key_invalid = deleted_key_invalid, \
-}
+})
-#define bch2_bkey_ops_whiteout (struct bkey_ops) { \
+#define bch2_bkey_ops_whiteout ((struct bkey_ops) { \
.key_invalid = deleted_key_invalid, \
-}
+})
static int empty_val_key_invalid(const struct bch_fs *c, struct bkey_s_c k,
int rw, struct printbuf *err)
return 0;
}
-#define bch2_bkey_ops_error (struct bkey_ops) { \
+#define bch2_bkey_ops_error ((struct bkey_ops) { \
.key_invalid = empty_val_key_invalid, \
-}
+})
static int key_type_cookie_invalid(const struct bch_fs *c, struct bkey_s_c k,
int rw, struct printbuf *err)
return 0;
}
-#define bch2_bkey_ops_cookie (struct bkey_ops) { \
+#define bch2_bkey_ops_cookie ((struct bkey_ops) { \
.key_invalid = key_type_cookie_invalid, \
-}
+})
-#define bch2_bkey_ops_hash_whiteout (struct bkey_ops) { \
+#define bch2_bkey_ops_hash_whiteout ((struct bkey_ops) {\
.key_invalid = empty_val_key_invalid, \
-}
+})
static int key_type_inline_data_invalid(const struct bch_fs *c, struct bkey_s_c k,
int rw, struct printbuf *err)
datalen, min(datalen, 32U), d.v->data);
}
-#define bch2_bkey_ops_inline_data (struct bkey_ops) { \
+#define bch2_bkey_ops_inline_data ((struct bkey_ops) { \
.key_invalid = key_type_inline_data_invalid, \
.val_to_text = key_type_inline_data_to_text, \
-}
+})
static int key_type_set_invalid(const struct bch_fs *c, struct bkey_s_c k,
int rw, struct printbuf *err)
return true;
}
-#define bch2_bkey_ops_set (struct bkey_ops) { \
+#define bch2_bkey_ops_set ((struct bkey_ops) { \
.key_invalid = key_type_set_invalid, \
.key_merge = key_type_set_merge, \
-}
+})
const struct bkey_ops bch2_bkey_ops[] = {
#define x(name, nr) [KEY_TYPE_##name] = bch2_bkey_ops_##name,
btree_id == BTREE_ID_inodes) {
if (!bkey_packed(k)) {
struct bkey_i *u = packed_to_bkey(k);
+
swap(u->k.p.inode, u->k.p.offset);
} else if (f->bits_per_field[BKEY_FIELD_INODE] &&
f->bits_per_field[BKEY_FIELD_OFFSET]) {
*
* When invalid, error string is returned via @err. @rw indicates whether key is
* being read or written; more aggressive checks can be enabled when rw == WRITE.
-*/
+ */
struct bkey_ops {
int (*key_invalid)(const struct bch_fs *c, struct bkey_s_c k,
int rw, struct printbuf *err);
else
bch2_bkey_unpack(src, (void *) out, in);
+ out->needs_whiteout = false;
+
btree_keys_account_key_add(&nr, 0, out);
out = bkey_next(out);
}
continue;
while ((next = sort_iter_peek(iter)) &&
- !bch2_bkey_cmp_packed(iter->b, in, next)) {
+ !bch2_bkey_cmp_packed_inlined(iter->b, in, next)) {
BUG_ON(in->needs_whiteout &&
next->needs_whiteout);
needs_whiteout |= in->needs_whiteout;
struct btree *,
struct bkey *);
+#define for_each_btree_node_key(b, k, iter) \
+ for (bch2_btree_node_iter_init_from_start((iter), (b)); \
+ (k = bch2_btree_node_iter_peek((iter), (b))); \
+ bch2_btree_node_iter_advance(iter, b))
+
#define for_each_btree_node_key_unpack(b, k, iter, unpacked) \
for (bch2_btree_node_iter_init_from_start((iter), (b)); \
(k = bch2_btree_node_iter_peek_unpack((iter), (b), (unpacked))).k;\
static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
{
- struct btree *b = kzalloc(sizeof(struct btree), gfp);
+ struct btree *b;
+
+ b = kzalloc(sizeof(struct btree), gfp);
if (!b)
return NULL;
struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
{
struct btree_cache *bc = &c->btree_cache;
- struct btree *b = __btree_node_mem_alloc(c, GFP_KERNEL);
+ struct btree *b;
+
+ b = __btree_node_mem_alloc(c, GFP_KERNEL);
if (!b)
return NULL;
void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
{
int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params);
+
BUG_ON(ret);
/* Cause future lookups for this node to fail: */
* the post write cleanup:
*/
if (bch2_verify_btree_ondisk)
- bch2_btree_node_write(c, b, SIX_LOCK_intent, 0);
+ bch2_btree_node_write(c, b, SIX_LOCK_intent,
+ BTREE_WRITE_cache_reclaim);
else
- __bch2_btree_node_write(c, b, 0);
+ __bch2_btree_node_write(c, b,
+ BTREE_WRITE_cache_reclaim);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
six_trylock_read(&b->c.lock)) {
list_move(&bc->live, &b->list);
mutex_unlock(&bc->lock);
- __bch2_btree_node_write(c, b, 0);
+ __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim);
six_unlock_read(&b->c.lock);
if (touched >= nr)
goto out_nounlock;
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
if (btree_node_dirty(b)) {
- __bch2_btree_node_write(c, b, 0);
+ __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
goto wait_on_io;
struct bkey_i_btree_ptr_v2 *new;
int ret;
- new = kmalloc(BKEY_BTREE_PTR_U64s_MAX * sizeof(u64), GFP_KERNEL);
+ new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
if (!new)
return -ENOMEM;
if (ret)
return ret;
- new = kmalloc(BKEY_BTREE_PTR_U64s_MAX * sizeof(u64), GFP_KERNEL);
+ new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
if (!new)
return -ENOMEM;
t == bset_tree_last(b));
}
+/*
+ * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
+ *
+ * The first bset is going to be of similar order to the size of the node, the
+ * last bset is bounded by btree_write_set_buffer(), which is set to keep the
+ * memmove on insert from being too expensive: the middle bset should, ideally,
+ * be the geometric mean of the first and the last.
+ *
+ * Returns true if the middle bset is greater than that geometric mean:
+ */
+static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
+{
+ unsigned mid_u64s_bits =
+ (ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
+
+ return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
+}
+
/*
* @bch_btree_init_next - initialize a new (unwritten) bset that can then be
* inserted into
EBUG_ON(!(b->c.lock.state.seq & 1));
BUG_ON(bset_written(b, bset(b, &b->set[1])));
+ BUG_ON(btree_node_just_written(b));
if (b->nsets == MAX_BSETS &&
- !btree_node_write_in_flight(b)) {
- unsigned log_u64s[] = {
- ilog2(bset_u64s(&b->set[0])),
- ilog2(bset_u64s(&b->set[1])),
- ilog2(bset_u64s(&b->set[2])),
- };
-
- if (log_u64s[1] >= (log_u64s[0] + log_u64s[2]) / 2) {
- bch2_btree_node_write(c, b, SIX_LOCK_write, 0);
- reinit_iter = true;
- }
+ !btree_node_write_in_flight(b) &&
+ should_compact_all(c, b)) {
+ bch2_btree_node_write(c, b, SIX_LOCK_write,
+ BTREE_WRITE_init_next_bset);
+ reinit_iter = true;
}
if (b->nsets == MAX_BSETS &&
if (rb->have_ioref) {
struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+
bch2_latency_acct(ca, rb->start_time, READ);
}
if (rb->have_ioref) {
struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+
bch2_latency_acct(ca, rb->start_time, READ);
}
{
struct btree_write *w = btree_prev_write(b);
unsigned long old, new, v;
+ unsigned type = 0;
bch2_btree_complete_write(c, b, w);
new |= (1U << BTREE_NODE_write_in_flight_inner);
new |= (1U << BTREE_NODE_just_written);
new ^= (1U << BTREE_NODE_write_idx);
+
+ type = new & BTREE_WRITE_TYPE_MASK;
+ new &= ~BTREE_WRITE_TYPE_MASK;
} else {
new &= ~(1U << BTREE_NODE_write_in_flight);
new &= ~(1U << BTREE_NODE_write_in_flight_inner);
} while ((v = cmpxchg(&b->flags, old, new)) != old);
if (new & (1U << BTREE_NODE_write_in_flight))
- __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED);
+ __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
else
wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
}
bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
ptr->offset += wbio->sector_offset;
- bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &tmp.k);
+ bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
+ &tmp.k, false);
}
void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
bool used_mempool;
unsigned long old, new;
bool validate_before_checksum = false;
+ enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
void *data;
int ret;
if (old & (1 << BTREE_NODE_write_in_flight))
return;
+ if (flags & BTREE_WRITE_ONLY_IF_NEED)
+ type = new & BTREE_WRITE_TYPE_MASK;
+ new &= ~BTREE_WRITE_TYPE_MASK;
+
new &= ~(1 << BTREE_NODE_dirty);
new &= ~(1 << BTREE_NODE_need_write);
new |= (1 << BTREE_NODE_write_in_flight);
if (new & (1U << BTREE_NODE_need_write))
return;
do_write:
+ BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
+
atomic_dec(&c->btree_cache.dirty);
BUG_ON(btree_node_fake(b));
bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
cpu_to_le16(b->written);
- atomic64_inc(&c->btree_writes_nr);
- atomic64_add(sectors_to_write, &c->btree_writes_sectors);
+ atomic64_inc(&c->btree_write_stats[type].nr);
+ atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
INIT_WORK(&wbio->work, btree_write_submit);
queue_work(c->io_complete_wq, &wbio->work);
{
return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
}
+
+const char * const bch2_btree_write_types[] = {
+#define x(t, n) [n] = #t,
+ BCH_BTREE_WRITE_TYPES()
+ NULL
+};
+
+void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ printbuf_tabstop_push(out, 20);
+ printbuf_tabstop_push(out, 10);
+
+ prt_tab(out);
+ prt_str(out, "nr");
+ prt_tab(out);
+ prt_str(out, "size");
+ prt_newline(out);
+
+ for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
+ u64 nr = atomic64_read(&c->btree_write_stats[i].nr);
+ u64 bytes = atomic64_read(&c->btree_write_stats[i].bytes);
+
+ prt_printf(out, "%s:", bch2_btree_write_types[i]);
+ prt_tab(out);
+ prt_u64(out, nr);
+ prt_tab(out);
+ prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
+ prt_newline(out);
+ }
+}
bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
-#define BTREE_WRITE_ONLY_IF_NEED (1U << 0)
-#define BTREE_WRITE_ALREADY_STARTED (1U << 1)
+enum btree_write_flags {
+ __BTREE_WRITE_ONLY_IF_NEED = BTREE_WRITE_TYPE_BITS,
+ __BTREE_WRITE_ALREADY_STARTED,
+};
+#define BTREE_WRITE_ONLY_IF_NEED (1U << __BTREE_WRITE_ONLY_IF_NEED )
+#define BTREE_WRITE_ALREADY_STARTED (1U << __BTREE_WRITE_ALREADY_STARTED)
void __bch2_btree_node_write(struct bch_fs *, struct btree *, unsigned);
void bch2_btree_node_write(struct bch_fs *, struct btree *,
bn->min_key = bpos_nosnap_successor(bn->min_key);
}
+void bch2_btree_write_stats_to_text(struct printbuf *, struct bch_fs *);
+
#endif /* _BCACHEFS_BTREE_IO_H */
if (p) {
struct bkey uk = bkey_unpack_key(l->b, p);
+
bch2_bkey_to_text(&buf2, &uk);
} else {
prt_printf(&buf2, "(none)");
if (k) {
struct bkey uk = bkey_unpack_key(l->b, k);
+
bch2_bkey_to_text(&buf3, &uk);
} else {
prt_printf(&buf3, "(none)");
bch2_btree_node_iter_peek(&l->iter, l->b);
}
-inline void bch2_btree_path_level_init(struct btree_trans *trans,
- struct btree_path *path,
- struct btree *b)
+void bch2_btree_path_level_init(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b)
{
BUG_ON(path->cached);
unsigned long trace_ip)
{
unsigned depth_want = path->level;
- int ret = trans->restarted;
+ int ret = -((int) trans->restarted);
if (unlikely(ret))
goto out;
btree_path_traverse_one(trans, path, flags, _RET_IP_);
}
-static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
+static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
struct btree_path *src)
{
unsigned i, offset = offsetof(struct btree_path, pos);
- int cmp = btree_path_cmp(dst, src);
memcpy((void *) dst + offset,
(void *) src + offset,
if (t != BTREE_NODE_UNLOCKED)
six_lock_increment(&dst->l[i].b->c.lock, t);
}
-
- if (cmp)
- bch2_btree_path_check_sort_fast(trans, dst, cmp);
}
static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
return new;
}
+__flatten
struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *trans,
struct btree_path *path, bool intent,
unsigned long ip)
{
- if (path->ref > 1 || path->preserve) {
- __btree_path_put(path, intent);
- path = btree_path_clone(trans, path, intent);
- path->preserve = false;
+ __btree_path_put(path, intent);
+ path = btree_path_clone(trans, path, intent);
+ path->preserve = false;
#ifdef CONFIG_BCACHEFS_DEBUG
- path->ip_allocated = ip;
+ path->ip_allocated = ip;
#endif
- btree_trans_verify_sorted(trans);
- }
-
- path->should_be_locked = false;
+ btree_trans_verify_sorted(trans);
return path;
}
struct btree_path * __must_check
-bch2_btree_path_set_pos(struct btree_trans *trans,
+__bch2_btree_path_set_pos(struct btree_trans *trans,
struct btree_path *path, struct bpos new_pos,
- bool intent, unsigned long ip)
+ bool intent, unsigned long ip, int cmp)
{
- int cmp = bpos_cmp(new_pos, path->pos);
unsigned l = path->level;
EBUG_ON(trans->restarted);
EBUG_ON(!path->ref);
- if (!cmp)
- return path;
-
path = bch2_btree_path_make_mut(trans, path, intent, ip);
path->pos = new_pos;
return path;
}
-inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
+struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
{
struct btree_path_level *l = path_l(path);
if (bpos_cmp(start_pos, iter->journal_pos) < 0)
iter->journal_idx = 0;
- k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id, 0,
+ k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
+ iter->path->level,
start_pos, end_pos,
&iter->journal_idx);
{
struct bkey_i *next_journal =
bch2_btree_journal_peek(trans, iter, iter->path->pos,
- k.k ? k.k->p : iter->path->l[0].b->key.k.p);
+ k.k ? k.k->p : path_l(iter->path)->b->key.k.p);
if (next_journal) {
iter->k = next_journal->k;
btree_path_verify_sorted_ref(trans, r);
}
+static inline struct btree_path *sib_btree_path(struct btree_trans *trans,
+ struct btree_path *path, int sib)
+{
+ unsigned idx = (unsigned) path->sorted_idx + sib;
+
+ EBUG_ON(sib != -1 && sib != 1);
+
+ return idx < trans->nr_sorted
+ ? trans->paths + trans->sorted[idx]
+ : NULL;
+}
+
static __always_inline void bch2_btree_path_check_sort_fast(struct btree_trans *trans,
struct btree_path *path,
int cmp)
EBUG_ON(!cmp);
- while ((n = cmp < 0
- ? prev_btree_path(trans, path)
- : next_btree_path(trans, path)) &&
+ while ((n = sib_btree_path(trans, path, cmp)) &&
(cmp2 = btree_path_cmp(n, path)) &&
cmp2 != cmp)
btree_path_swap(trans, n, path);
bch2_trans_alloc_paths(trans, c);
s = btree_trans_stats(trans);
- if (s) {
+ if (s && s->max_mem) {
unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
} else {
trans->mem_bytes = expected_mem_bytes;
}
-
- trans->nr_max_paths = s->nr_max_paths;
}
+ if (s)
+ trans->nr_max_paths = s->nr_max_paths;
trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
}
struct btree_path * __must_check
-bch2_btree_path_set_pos(struct btree_trans *, struct btree_path *,
- struct bpos, bool, unsigned long);
+__bch2_btree_path_set_pos(struct btree_trans *, struct btree_path *,
+ struct bpos, bool, unsigned long, int);
+
+static inline struct btree_path * __must_check
+bch2_btree_path_set_pos(struct btree_trans *trans,
+ struct btree_path *path, struct bpos new_pos,
+ bool intent, unsigned long ip)
+{
+ int cmp = bpos_cmp(new_pos, path->pos);
+
+ return cmp
+ ? __bch2_btree_path_set_pos(trans, path, new_pos, intent, ip, cmp)
+ : path;
+}
+
int __must_check bch2_btree_path_traverse(struct btree_trans *,
struct btree_path *, unsigned);
struct btree_path *bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
unsigned, unsigned, unsigned, unsigned long);
-inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
+struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
struct btree_iter *, struct bpos);
-inline void bch2_btree_path_level_init(struct btree_trans *,
- struct btree_path *, struct btree *);
+void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *);
#ifdef CONFIG_BCACHEFS_DEBUG
void bch2_trans_verify_paths(struct btree_trans *);
return b;
}
+/*
+ * XXX
+ * this does not handle transaction restarts from bch2_btree_iter_next_node()
+ * correctly
+ */
#define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
_locks_want, _depth, _flags, _b, _ret) \
for (bch2_trans_node_iter_init((_trans), &(_iter), (_btree_id), \
\
while (1) { \
u32 _restart_count = bch2_trans_begin(_trans); \
+ \
+ _ret = 0; \
(_k) = bch2_btree_iter_peek_type(&(_iter), (_flags)); \
- if (!(_k).k) { \
- _ret = 0; \
+ if (!(_k).k) \
break; \
- } \
\
_ret = bkey_err(_k) ?: (_do); \
if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
six_unlock_intent(&ck->c.lock);
}
+#ifdef __KERNEL__
static void __bkey_cached_move_to_freelist_ordered(struct btree_key_cache *bc,
struct bkey_cached *ck)
{
list_move(&ck->list, &bc->freed_nonpcpu);
}
+#endif
static void bkey_cached_move_to_freelist(struct btree_key_cache *bc,
struct bkey_cached *ck)
{
- struct btree_key_cache_freelist *f;
- bool freed = false;
-
BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
if (!ck->c.lock.readers) {
#ifdef __KERNEL__
+ struct btree_key_cache_freelist *f;
+ bool freed = false;
+
preempt_disable();
f = this_cpu_ptr(bc->pcpu_freed);
}
static struct bkey_cached *
-bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path)
+bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
+ bool *was_new)
{
struct bch_fs *c = trans->c;
struct btree_key_cache *bc = &c->btree_key_cache;
struct bkey_cached *ck = NULL;
- struct btree_key_cache_freelist *f;
bool pcpu_readers = btree_uses_pcpu_readers(path->btree_id);
if (!pcpu_readers) {
#ifdef __KERNEL__
+ struct btree_key_cache_freelist *f;
+
preempt_disable();
f = this_cpu_ptr(bc->pcpu_freed);
if (f->nr)
ck->c.cached = true;
BUG_ON(!six_trylock_intent(&ck->c.lock));
BUG_ON(!six_trylock_write(&ck->c.lock));
+ *was_new = true;
return ck;
}
struct bch_fs *c = trans->c;
struct btree_key_cache *bc = &c->btree_key_cache;
struct bkey_cached *ck;
- bool was_new = true;
+ bool was_new = false;
- ck = bkey_cached_alloc(trans, path);
+ ck = bkey_cached_alloc(trans, path, &was_new);
if (IS_ERR(ck))
return ck;
}
mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
- was_new = false;
} else {
if (path->btree_id == BTREE_ID_subvolumes)
six_lock_pcpu_alloc(&ck->c.lock);
if (likely(was_new)) {
six_unlock_write(&ck->c.lock);
six_unlock_intent(&ck->c.lock);
- mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED);
kfree(ck);
} else {
bkey_cached_free_fast(bc, ck);
}
+ mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED);
return NULL;
}
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_BTREE_KEY_CACHE_H
#define _BCACHEFS_BTREE_KEY_CACHE_H
}
if (unlikely(!best)) {
- struct bch_fs *c = g->g->trans->c;
struct printbuf buf = PRINTBUF;
- bch_err(c, "cycle of nofail locks");
+ prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
for (i = g->g; i < g->g + g->nr; i++) {
struct btree_trans *trans = i->trans;
struct btree_bkey_cached_common *b)
{
int ret = __btree_node_lock_write(trans, path, b, true);
+
BUG_ON(ret);
}
struct bkey_cached_key {
u32 btree_id;
struct bpos pos;
-} __attribute__((packed, aligned(4)));
+} __packed __aligned(4);
#define BKEY_CACHED_ACCESSED 0
#define BKEY_CACHED_DIRTY 1
struct replicas_delta_list *fs_usage_deltas;
};
+#define BCH_BTREE_WRITE_TYPES() \
+ x(initial, 0) \
+ x(init_next_bset, 1) \
+ x(cache_reclaim, 2) \
+ x(journal_reclaim, 3) \
+ x(interior, 4)
+
+enum btree_write_type {
+#define x(t, n) BTREE_WRITE_##t,
+ BCH_BTREE_WRITE_TYPES()
+#undef x
+ BTREE_WRITE_TYPE_NR,
+};
+
+#define BTREE_WRITE_TYPE_MASK (roundup_pow_of_two(BTREE_WRITE_TYPE_NR) - 1)
+#define BTREE_WRITE_TYPE_BITS ilog2(roundup_pow_of_two(BTREE_WRITE_TYPE_NR))
+
#define BTREE_FLAGS() \
x(read_in_flight) \
x(read_error) \
x(never_write)
enum btree_flags {
+ /* First bits for btree node write type */
+ BTREE_NODE_FLAGS_START = BTREE_WRITE_TYPE_BITS - 1,
#define x(flag) BTREE_NODE_##flag,
BTREE_FLAGS()
#undef x
struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
unsigned nr_reserve;
enum alloc_reserve alloc_reserve;
+ int ret;
if (flags & BTREE_INSERT_USE_RESERVE) {
nr_reserve = 0;
mutex_unlock(&c->btree_reserve_cache_lock);
retry:
- wp = bch2_alloc_sectors_start_trans(trans,
+ ret = bch2_alloc_sectors_start_trans(trans,
c->opts.metadata_target ?:
c->opts.foreground_target,
0,
&devs_have,
res->nr_replicas,
c->opts.metadata_replicas_required,
- alloc_reserve, 0, cl);
- if (IS_ERR(wp))
- return ERR_CAST(wp);
+ alloc_reserve, 0, cl, &wp);
+ if (unlikely(ret))
+ return ERR_PTR(ret);
if (wp->sectors_free < btree_sectors(c)) {
struct open_bucket *ob;
b->data->max_key = pos;
}
-struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *as,
- struct btree_trans *trans,
- struct btree *b,
- struct bkey_format format)
+static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree *b)
{
- struct btree *n;
+ struct btree *n = bch2_btree_node_alloc(as, trans, b->c.level);
+ struct bkey_format format = bch2_btree_calc_format(b);
- n = bch2_btree_node_alloc(as, trans, b->c.level);
+ /*
+ * The keys might expand with the new format - if they wouldn't fit in
+ * the btree node anymore, use the old format for now:
+ */
+ if (!bch2_btree_node_format_fits(as->c, b, &format))
+ format = b->format;
SET_BTREE_NODE_SEQ(n->data, BTREE_NODE_SEQ(b->data) + 1);
bch2_btree_sort_into(as->c, n, b);
btree_node_reset_sib_u64s(n);
-
- n->key.k.p = b->key.k.p;
return n;
}
-static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as,
- struct btree_trans *trans,
- struct btree *b)
-{
- struct bkey_format new_f = bch2_btree_calc_format(b);
-
- /*
- * The keys might expand with the new format - if they wouldn't fit in
- * the btree node anymore, use the old format for now:
- */
- if (!bch2_btree_node_format_fits(as->c, b, &new_f))
- new_f = b->format;
-
- return __bch2_btree_node_alloc_replacement(as, trans, b, new_f);
-}
-
static struct btree *__btree_root_alloc(struct btree_update *as,
struct btree_trans *trans, unsigned level)
{
bch2_trans_unlock(&trans);
bch2_fs_fatal_err_on(ret && !bch2_journal_error(&c->journal), c,
- "error %i in btree_update_nodes_written()", ret);
+ "%s(): error %s", __func__, bch2_err_str(ret));
err:
if (as->b) {
struct btree_path *path;
}
if (ret) {
- trace_and_count(c, btree_reserve_get_fail, trans->fn, _RET_IP_, nr_nodes[0] + nr_nodes[1]);
+ trace_and_count(c, btree_reserve_get_fail, trans->fn,
+ _RET_IP_, nr_nodes[0] + nr_nodes[1], ret);
goto err;
}
struct bch_fs *c = as->c;
struct bkey_packed *k;
struct printbuf buf = PRINTBUF;
+ unsigned long old, new, v;
BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 &&
!btree_ptr_sectors_written(insert));
bch2_btree_bset_insert_key(trans, path, b, node_iter, insert);
set_btree_node_dirty_acct(c, b);
- set_btree_node_need_write(b);
+
+ v = READ_ONCE(b->flags);
+ do {
+ old = new = v;
+
+ new &= ~BTREE_WRITE_TYPE_MASK;
+ new |= BTREE_WRITE_interior;
+ new |= 1 << BTREE_NODE_need_write;
+ } while ((v = cmpxchg(&b->flags, old, new)) != old);
printbuf_exit(&buf);
}
;
while (!bch2_keylist_empty(keys)) {
- bch2_insert_fixup_btree_ptr(as, trans, path, b,
- &node_iter, bch2_keylist_front(keys));
+ struct bkey_i *k = bch2_keylist_front(keys);
+
+ if (bpos_cmp(k->k.p, b->key.k.p) > 0)
+ break;
+
+ bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, k);
bch2_keylist_pop_front(keys);
}
}
* Move keys from n1 (original replacement node, now lower node) to n2 (higher
* node)
*/
-static struct btree *__btree_split_node(struct btree_update *as,
- struct btree_trans *trans,
- struct btree *n1)
+static void __btree_split_node(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree *b,
+ struct btree *n[2])
{
- struct bkey_format_state s;
- size_t nr_packed = 0, nr_unpacked = 0;
- struct btree *n2;
- struct bset *set1, *set2;
- struct bkey_packed *k, *set2_start, *set2_end, *out, *prev = NULL;
+ struct bkey_packed *k;
struct bpos n1_pos;
+ struct btree_node_iter iter;
+ struct bset *bsets[2];
+ struct bkey_format_state format[2];
+ struct bkey_packed *out[2];
+ struct bkey uk;
+ unsigned u64s, n1_u64s = (b->nr.live_u64s * 3) / 5;
+ int i;
- n2 = bch2_btree_node_alloc(as, trans, n1->c.level);
+ for (i = 0; i < 2; i++) {
+ BUG_ON(n[i]->nsets != 1);
- n2->data->max_key = n1->data->max_key;
- n2->data->format = n1->format;
- SET_BTREE_NODE_SEQ(n2->data, BTREE_NODE_SEQ(n1->data));
- n2->key.k.p = n1->key.k.p;
+ bsets[i] = btree_bset_first(n[i]);
+ out[i] = bsets[i]->start;
- set1 = btree_bset_first(n1);
- set2 = btree_bset_first(n2);
+ SET_BTREE_NODE_SEQ(n[i]->data, BTREE_NODE_SEQ(b->data) + 1);
+ bch2_bkey_format_init(&format[i]);
+ }
- /*
- * Has to be a linear search because we don't have an auxiliary
- * search tree yet
- */
- k = set1->start;
- while (1) {
- struct bkey_packed *n = bkey_next(k);
+ u64s = 0;
+ for_each_btree_node_key(b, k, &iter) {
+ if (bkey_deleted(k))
+ continue;
+
+ i = u64s >= n1_u64s;
+ u64s += k->u64s;
+ uk = bkey_unpack_key(b, k);
+ if (!i)
+ n1_pos = uk.p;
+ bch2_bkey_format_add_key(&format[i], &uk);
+ }
- if (n == vstruct_last(set1))
- break;
- if (k->_data - set1->_data >= (le16_to_cpu(set1->u64s) * 3) / 5)
- break;
+ btree_set_min(n[0], b->data->min_key);
+ btree_set_max(n[0], n1_pos);
+ btree_set_min(n[1], bpos_successor(n1_pos));
+ btree_set_max(n[1], b->data->max_key);
- if (bkey_packed(k))
- nr_packed++;
- else
- nr_unpacked++;
+ for (i = 0; i < 2; i++) {
+ bch2_bkey_format_add_pos(&format[i], n[i]->data->min_key);
+ bch2_bkey_format_add_pos(&format[i], n[i]->data->max_key);
- prev = k;
- k = n;
+ n[i]->data->format = bch2_bkey_format_done(&format[i]);
+ btree_node_set_format(n[i], n[i]->data->format);
}
- BUG_ON(!prev);
- set2_start = k;
- set2_end = vstruct_last(set1);
-
- set1->u64s = cpu_to_le16((u64 *) set2_start - set1->_data);
- set_btree_bset_end(n1, n1->set);
-
- n1->nr.live_u64s = le16_to_cpu(set1->u64s);
- n1->nr.bset_u64s[0] = le16_to_cpu(set1->u64s);
- n1->nr.packed_keys = nr_packed;
- n1->nr.unpacked_keys = nr_unpacked;
+ u64s = 0;
+ for_each_btree_node_key(b, k, &iter) {
+ if (bkey_deleted(k))
+ continue;
- n1_pos = bkey_unpack_pos(n1, prev);
- if (as->c->sb.version < bcachefs_metadata_version_snapshot)
- n1_pos.snapshot = U32_MAX;
+ i = u64s >= n1_u64s;
+ u64s += k->u64s;
- btree_set_max(n1, n1_pos);
- btree_set_min(n2, bpos_successor(n1->key.k.p));
+ if (bch2_bkey_transform(&n[i]->format, out[i], bkey_packed(k)
+ ? &b->format: &bch2_bkey_format_current, k))
+ out[i]->format = KEY_FORMAT_LOCAL_BTREE;
+ else
+ bch2_bkey_unpack(b, (void *) out[i], k);
- bch2_bkey_format_init(&s);
- bch2_bkey_format_add_pos(&s, n2->data->min_key);
- bch2_bkey_format_add_pos(&s, n2->data->max_key);
+ out[i]->needs_whiteout = false;
- for (k = set2_start; k != set2_end; k = bkey_next(k)) {
- struct bkey uk = bkey_unpack_key(n1, k);
- bch2_bkey_format_add_key(&s, &uk);
+ btree_keys_account_key_add(&n[i]->nr, 0, out[i]);
+ out[i] = bkey_next(out[i]);
}
- n2->data->format = bch2_bkey_format_done(&s);
- btree_node_set_format(n2, n2->data->format);
-
- out = set2->start;
- memset(&n2->nr, 0, sizeof(n2->nr));
-
- for (k = set2_start; k != set2_end; k = bkey_next(k)) {
- BUG_ON(!bch2_bkey_transform(&n2->format, out, bkey_packed(k)
- ? &n1->format : &bch2_bkey_format_current, k));
- out->format = KEY_FORMAT_LOCAL_BTREE;
- btree_keys_account_key_add(&n2->nr, 0, out);
- out = bkey_next(out);
- }
+ for (i = 0; i < 2; i++) {
+ bsets[i]->u64s = cpu_to_le16((u64 *) out[i] - bsets[i]->_data);
- set2->u64s = cpu_to_le16((u64 *) out - set2->_data);
- set_btree_bset_end(n2, n2->set);
+ BUG_ON(!bsets[i]->u64s);
- BUG_ON(!set1->u64s);
- BUG_ON(!set2->u64s);
+ set_btree_bset_end(n[i], n[i]->set);
- btree_node_reset_sib_u64s(n1);
- btree_node_reset_sib_u64s(n2);
+ btree_node_reset_sib_u64s(n[i]);
- bch2_verify_btree_nr_keys(n1);
- bch2_verify_btree_nr_keys(n2);
+ bch2_verify_btree_nr_keys(n[i]);
- if (n1->c.level) {
- btree_node_interior_verify(as->c, n1);
- btree_node_interior_verify(as->c, n2);
+ if (b->c.level)
+ btree_node_interior_verify(as->c, n[i]);
}
-
- return n2;
}
/*
struct btree *b,
struct keylist *keys)
{
- struct btree_node_iter node_iter;
- struct bkey_i *k = bch2_keylist_front(keys);
- struct bkey_packed *src, *dst, *n;
- struct bset *i;
+ if (!bch2_keylist_empty(keys) &&
+ bpos_cmp(bch2_keylist_front(keys)->k.p,
+ b->data->max_key) <= 0) {
+ struct btree_node_iter node_iter;
- bch2_btree_node_iter_init(&node_iter, b, &k->k.p);
+ bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p);
- __bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys);
+ __bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys);
- /*
- * We can't tolerate whiteouts here - with whiteouts there can be
- * duplicate keys, and it would be rather bad if we picked a duplicate
- * for the pivot:
- */
- i = btree_bset_first(b);
- src = dst = i->start;
- while (src != vstruct_last(i)) {
- n = bkey_next(src);
- if (!bkey_deleted(src)) {
- memmove_u64s_down(dst, src, src->u64s);
- dst = bkey_next(dst);
- }
- src = n;
+ btree_node_interior_verify(as->c, b);
}
-
- /* Also clear out the unwritten whiteouts area: */
- b->whiteout_u64s = 0;
-
- i->u64s = cpu_to_le16((u64 *) dst - i->_data);
- set_btree_bset_end(b, b->set);
-
- BUG_ON(b->nsets != 1 ||
- b->nr.live_u64s != le16_to_cpu(btree_bset_first(b)->u64s));
-
- btree_node_interior_verify(as->c, b);
}
static int btree_split(struct btree_update *as, struct btree_trans *trans,
bch2_btree_interior_update_will_free_node(as, b);
- n1 = bch2_btree_node_alloc_replacement(as, trans, b);
-
- if (keys)
- btree_split_insert_keys(as, trans, path, n1, keys);
+ if (b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c)) {
+ struct btree *n[2];
- if (bset_u64s(&n1->set[0]) > BTREE_SPLIT_THRESHOLD(c)) {
trace_and_count(c, btree_node_split, c, b);
- n2 = __btree_split_node(as, trans, n1);
+ n[0] = n1 = bch2_btree_node_alloc(as, trans, b->c.level);
+ n[1] = n2 = bch2_btree_node_alloc(as, trans, b->c.level);
+
+ __btree_split_node(as, trans, b, n);
+
+ if (keys) {
+ btree_split_insert_keys(as, trans, path, n1, keys);
+ btree_split_insert_keys(as, trans, path, n2, keys);
+ BUG_ON(!bch2_keylist_empty(keys));
+ }
bch2_btree_build_aux_trees(n2);
bch2_btree_build_aux_trees(n1);
} else {
trace_and_count(c, btree_node_compact, c, b);
+ n1 = bch2_btree_node_alloc_replacement(as, trans, b);
+
+ if (keys) {
+ btree_split_insert_keys(as, trans, path, n1, keys);
+ BUG_ON(!bch2_keylist_empty(keys));
+ }
+
bch2_btree_build_aux_trees(n1);
bch2_btree_update_add_new_node(as, n1);
six_unlock_write(&n1->c.lock);
bch2_bpos_to_text(&buf1, prev->data->max_key);
bch2_bpos_to_text(&buf2, next->data->min_key);
bch_err(c,
- "btree topology error in btree merge:\n"
+ "%s(): btree topology error:\n"
" prev ends at %s\n"
" next starts at %s",
- buf1.buf, buf2.buf);
+ __func__, buf1.buf, buf2.buf);
printbuf_exit(&buf1);
printbuf_exit(&buf2);
bch2_topology_error(c);
struct bkey_packed k;
BUG_ON(bch_btree_keys_u64s_remaining(c, b) < BKEY_U64s);
+ EBUG_ON(btree_node_just_written(b));
if (!bkey_pack_pos(&k, pos, b)) {
struct bkey *u = (void *) &k;
w->journal.seq != seq)
break;
+ new &= ~BTREE_WRITE_TYPE_MASK;
+ new |= BTREE_WRITE_journal_reclaim;
new |= 1 << BTREE_NODE_need_write;
} while ((v = cmpxchg(&b->flags, old, new)) != old);
return 0;
}
-static inline int bch2_trans_journal_res_get(struct btree_trans *trans,
+static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans,
unsigned flags)
{
struct bch_fs *c = trans->c;
return ret;
}
+static noinline int trans_lock_write_fail(struct btree_trans *trans, struct btree_insert_entry *i)
+{
+ while (--i >= trans->updates) {
+ if (same_leaf_as_prev(trans, i))
+ continue;
+
+ bch2_btree_node_unlock_write(trans, i->path, insert_l(i)->b);
+ }
+
+ trace_and_count(trans->c, trans_restart_would_deadlock_write, trans);
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write);
+}
+
static inline int trans_lock_write(struct btree_trans *trans)
{
struct btree_insert_entry *i;
- int ret;
trans_for_each_update(trans, i) {
if (same_leaf_as_prev(trans, i))
continue;
- ret = bch2_btree_node_lock_write(trans, i->path, &insert_l(i)->b->c);
- if (ret)
- goto fail;
+ if (bch2_btree_node_lock_write(trans, i->path, &insert_l(i)->b->c))
+ return trans_lock_write_fail(trans, i);
bch2_btree_node_prep_for_write(trans, i->path, insert_l(i)->b);
}
return 0;
-fail:
- while (--i >= trans->updates) {
- if (same_leaf_as_prev(trans, i))
- continue;
-
- bch2_btree_node_unlock_write_inlined(trans, i->path, insert_l(i)->b);
- }
-
- trace_and_count(trans->c, trans_restart_would_deadlock_write, trans);
- return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write);
}
static noinline void bch2_drop_overwrites_from_journal(struct btree_trans *trans)
bch2_journal_key_overwritten(trans->c, i->btree_id, i->level, i->k->k.p);
}
+static noinline int bch2_trans_commit_bkey_invalid(struct btree_trans *trans,
+ struct btree_insert_entry *i,
+ struct printbuf *err)
+{
+ struct bch_fs *c = trans->c;
+ int rw = (trans->flags & BTREE_INSERT_JOURNAL_REPLAY) ? READ : WRITE;
+
+ printbuf_reset(err);
+ prt_printf(err, "invalid bkey on insert from %s -> %ps",
+ trans->fn, (void *) i->ip_allocated);
+ prt_newline(err);
+ printbuf_indent_add(err, 2);
+
+ bch2_bkey_val_to_text(err, c, bkey_i_to_s_c(i->k));
+ prt_newline(err);
+
+ bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
+ i->bkey_type, rw, err);
+ bch2_print_string_as_lines(KERN_ERR, err->buf);
+
+ bch2_inconsistent_error(c);
+ bch2_dump_trans_updates(trans);
+ printbuf_exit(err);
+
+ return -EINVAL;
+}
+
/*
* Get journal reservation, take write locks, and attempt to do btree update(s):
*/
int rw = (trans->flags & BTREE_INSERT_JOURNAL_REPLAY) ? READ : WRITE;
trans_for_each_update(trans, i) {
- if (bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
- i->bkey_type, rw, &buf)) {
- printbuf_reset(&buf);
- prt_printf(&buf, "invalid bkey on insert from %s -> %ps",
- trans->fn, (void *) i->ip_allocated);
- prt_newline(&buf);
- printbuf_indent_add(&buf, 2);
-
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k));
- prt_newline(&buf);
-
- bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
- i->bkey_type, rw, &buf);
-
- bch2_trans_inconsistent(trans, "%s", buf.buf);
- printbuf_exit(&buf);
- return -EINVAL;
- }
+ if (unlikely(bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
+ i->bkey_type, rw, &buf)))
+ return bch2_trans_commit_bkey_invalid(trans, i, &buf);
btree_insert_entry_checks(trans, i);
}
-((s64) old_a.cached_sectors),
journal_seq, gc);
if (ret) {
- bch2_fs_fatal_error(c, "bch2_mark_alloc(): no replicas entry while updating cached sectors");
+ bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
+ __func__);
return ret;
}
}
ret = update_cached_sectors(c, k, p.ptr.dev,
disk_sectors, journal_seq, true);
if (ret) {
- bch2_fs_fatal_error(c, "bch2_mark_extent(): no replicas entry while updating cached sectors");
+ bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
+ __func__);
return ret;
}
}
struct printbuf buf = PRINTBUF;
bch2_bkey_val_to_text(&buf, c, k);
- bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
+ bch2_fs_fatal_error(c, "%s(): no replicas entry for %s", __func__, buf.buf);
printbuf_exit(&buf);
return ret;
}
struct btree_insert_entry *i;
struct printbuf buf = PRINTBUF;
- bch_err(c, "disk usage increased %lli more than %u sectors reserved",
- should_not_have_added, disk_res_sectors);
+ prt_printf(&buf,
+ bch2_fmt(c, "disk usage increased %lli more than %u sectors reserved)"),
+ should_not_have_added, disk_res_sectors);
trans_for_each_update(trans, i) {
struct bkey_s_c old = { &i->old_k, i->old_v };
- pr_err("while inserting");
- printbuf_reset(&buf);
+ prt_str(&buf, "new ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k));
- pr_err(" %s", buf.buf);
- pr_err("overlapping with");
- printbuf_reset(&buf);
+ prt_newline(&buf);
+
+ prt_str(&buf, "old ");
bch2_bkey_val_to_text(&buf, c, old);
- pr_err(" %s", buf.buf);
+ prt_newline(&buf);
}
__WARN();
+ bch2_print_string_as_lines(KERN_ERR, buf.buf);
printbuf_exit(&buf);
}
#define SECTORS_CACHE 1024
-int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
+int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
u64 sectors, int flags)
{
struct bch_fs_pcpu *pcpu;
static inline void bch2_disk_reservation_put(struct bch_fs *c,
struct disk_reservation *res)
{
- this_cpu_sub(*c->online_reserved, res->sectors);
- res->sectors = 0;
+ if (res->sectors) {
+ this_cpu_sub(*c->online_reserved, res->sectors);
+ res->sectors = 0;
+ }
}
#define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
-int bch2_disk_reservation_add(struct bch_fs *,
- struct disk_reservation *,
- u64, int);
+int __bch2_disk_reservation_add(struct bch_fs *,
+ struct disk_reservation *,
+ u64, int);
+
+static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
+ u64 sectors, int flags)
+{
+#ifdef __KERNEL__
+ u64 old, new;
+
+ do {
+ old = this_cpu_read(c->pcpu->sectors_available);
+ if (sectors > old)
+ return __bch2_disk_reservation_add(c, res, sectors, flags);
+
+ new = old - sectors;
+ } while (this_cpu_cmpxchg(c->pcpu->sectors_available, old, new) != old);
+
+ this_cpu_add(*c->online_reserved, sectors);
+ res->sectors += sectors;
+ return 0;
+#else
+ return __bch2_disk_reservation_add(c, res, sectors, flags);
+#endif
+}
static inline struct disk_reservation
bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
#include "bcachefs.h"
#include "buckets_waiting_for_journal.h"
#include <linux/random.h>
+#include <linux/siphash.h>
static inline struct bucket_hashed *
bucket_hash(struct buckets_waiting_for_journal_table *t,
{
unsigned h = siphash_1u64(dev_bucket, &t->hash_seeds[hash_seed_idx]);
- BUG_ON(!is_power_of_2(t->size));
+ EBUG_ON(!is_power_of_2(t->size));
return t->d + (h & (t->size - 1));
}
return __bch2_checksum_bio(c, type, nonce, bio, &iter);
}
-int bch2_encrypt_bio(struct bch_fs *c, unsigned type,
+int __bch2_encrypt_bio(struct bch_fs *c, unsigned type,
struct nonce nonce, struct bio *bio)
{
struct bio_vec bv;
struct bch_extent_crc_unpacked *,
unsigned, unsigned, unsigned);
-int bch2_encrypt_bio(struct bch_fs *, unsigned,
- struct nonce, struct bio *);
+int __bch2_encrypt_bio(struct bch_fs *, unsigned,
+ struct nonce, struct bio *);
+
+static inline int bch2_encrypt_bio(struct bch_fs *c, unsigned type,
+ struct nonce nonce, struct bio *bio)
+{
+ return bch2_csum_type_is_encryption(type)
+ ? __bch2_encrypt_bio(c, type, nonce, bio)
+ : 0;
+}
int bch2_decrypt_sb_key(struct bch_fs *, struct bch_sb_field_crypt *,
struct bch_key *);
{
switch (type) {
case BCH_CSUM_OPT_none:
- return BCH_CSUM_none;
+ return BCH_CSUM_none;
case BCH_CSUM_OPT_crc32c:
- return data ? BCH_CSUM_crc32c : BCH_CSUM_crc32c_nonzero;
+ return data ? BCH_CSUM_crc32c : BCH_CSUM_crc32c_nonzero;
case BCH_CSUM_OPT_crc64:
- return data ? BCH_CSUM_crc64 : BCH_CSUM_crc64_nonzero;
+ return data ? BCH_CSUM_crc64 : BCH_CSUM_crc64_nonzero;
case BCH_CSUM_OPT_xxhash:
- return BCH_CSUM_xxhash;
+ return BCH_CSUM_xxhash;
default:
- BUG();
+ BUG();
}
}
static inline enum bch_csum_type bch2_data_checksum_type(struct bch_fs *c,
- unsigned opt)
+ struct bch_io_opts opts)
{
+ if (opts.nocow)
+ return 0;
+
if (c->sb.encryption_type)
return c->opts.wide_macs
? BCH_CSUM_chacha20_poly1305_128
: BCH_CSUM_chacha20_poly1305_80;
- return bch2_csum_opt_to_type(opt, true);
+ return bch2_csum_opt_to_type(opts.data_checksum, true);
}
static inline enum bch_csum_type bch2_meta_checksum_type(struct bch_fs *c)
ptr->cached = true;
}
-static int bch2_data_update_index_update(struct bch_write_op *op)
+static int __bch2_data_update_index_update(struct btree_trans *trans,
+ struct bch_write_op *op)
{
struct bch_fs *c = op->c;
- struct btree_trans trans;
struct btree_iter iter;
struct data_update *m =
container_of(op, struct data_update, op);
bch2_bkey_buf_init(&_insert);
bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
- bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
-
- bch2_trans_iter_init(&trans, &iter, m->btree_id,
+ bch2_trans_iter_init(trans, &iter, m->btree_id,
bkey_start_pos(&bch2_keylist_front(keys)->k),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
s64 i_sectors_delta = 0, disk_sectors_delta = 0;
unsigned i;
- bch2_trans_begin(&trans);
+ bch2_trans_begin(trans);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
bch2_extent_normalize(c, bkey_i_to_s(insert));
- ret = bch2_sum_sector_overwrites(&trans, &iter, insert,
+ ret = bch2_sum_sector_overwrites(trans, &iter, insert,
&should_check_enospc,
&i_sectors_delta,
&disk_sectors_delta);
next_pos = insert->k.p;
- ret = insert_snapshot_whiteouts(&trans, m->btree_id,
+ ret = insert_snapshot_whiteouts(trans, m->btree_id,
k.k->p, insert->k.p) ?:
- bch2_trans_update(&trans, &iter, insert,
+ bch2_trans_update(trans, &iter, insert,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
- bch2_trans_commit(&trans, &op->res,
- op_journal_seq(op),
+ bch2_trans_commit(trans, &op->res,
+ NULL,
BTREE_INSERT_NOFAIL|
m->data_opts.btree_insert_flags);
if (!ret) {
goto next;
}
out:
- bch2_trans_iter_exit(&trans, &iter);
- bch2_trans_exit(&trans);
+ bch2_trans_iter_exit(trans, &iter);
bch2_bkey_buf_exit(&_insert, c);
bch2_bkey_buf_exit(&_new, c);
BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
return ret;
}
+int bch2_data_update_index_update(struct bch_write_op *op)
+{
+ struct bch_fs *c = op->c;
+ struct btree_trans trans;
+ int ret;
+
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
+ ret = __bch2_data_update_index_update(&trans, op);
+ bch2_trans_exit(&trans);
+
+ return ret;
+}
+
void bch2_data_update_read_done(struct data_update *m,
- struct bch_extent_crc_unpacked crc,
- struct closure *cl)
+ struct bch_extent_crc_unpacked crc)
{
/* write bio must own pages: */
BUG_ON(!m->op.wbio.bio.bi_vcnt);
m->op.crc = crc;
m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
- closure_call(&m->op.cl, bch2_write, NULL, cl);
+ closure_call(&m->op.cl, bch2_write, NULL, NULL);
}
void bch2_data_update_exit(struct data_update *update)
{
struct bch_fs *c = update->op.c;
+ struct bkey_ptrs_c ptrs =
+ bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
+ const struct bch_extent_ptr *ptr;
+
+ bkey_for_each_ptr(ptrs, ptr)
+ bch2_bucket_nocow_unlock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, ptr), 0);
bch2_bkey_buf_exit(&update->k, c);
bch2_disk_reservation_put(c, &update->op.res);
bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
}
+void bch2_update_unwritten_extent(struct btree_trans *trans,
+ struct data_update *update)
+{
+ struct bch_fs *c = update->op.c;
+ struct bio *bio = &update->op.wbio.bio;
+ struct bkey_i_extent *e;
+ struct write_point *wp;
+ struct bch_extent_ptr *ptr;
+ struct closure cl;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ closure_init_stack(&cl);
+ bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
+
+ while (bio_sectors(bio)) {
+ unsigned sectors = bio_sectors(bio);
+
+ bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
+ BTREE_ITER_SLOTS);
+ ret = lockrestart_do(trans, ({
+ k = bch2_btree_iter_peek_slot(&iter);
+ bkey_err(k);
+ }));
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
+ break;
+
+ e = bkey_extent_init(update->op.insert_keys.top);
+ e->k.p = update->op.pos;
+
+ ret = bch2_alloc_sectors_start_trans(trans,
+ update->op.target,
+ false,
+ update->op.write_point,
+ &update->op.devs_have,
+ update->op.nr_replicas,
+ update->op.nr_replicas,
+ update->op.alloc_reserve,
+ 0, &cl, &wp);
+ if (ret == -EAGAIN) {
+ bch2_trans_unlock(trans);
+ closure_sync(&cl);
+ continue;
+ }
+
+ if (ret)
+ return;
+
+ sectors = min(sectors, wp->sectors_free);
+
+ bch2_key_resize(&e->k, sectors);
+
+ bch2_open_bucket_get(c, wp, &update->op.open_buckets);
+ bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
+ bch2_alloc_sectors_done(c, wp);
+
+ bio_advance(bio, sectors << 9);
+ update->op.pos.offset += sectors;
+
+ extent_for_each_ptr(extent_i_to_s(e), ptr)
+ ptr->unwritten = true;
+ bch2_keylist_push(&update->op.insert_keys);
+
+ ret = __bch2_data_update_index_update(trans, &update->op);
+
+ bch2_open_buckets_put(c, &update->op.open_buckets);
+
+ if (ret)
+ break;
+ }
+
+ if ((atomic_read(&cl.remaining) & CLOSURE_REMAINING_MASK) != 1) {
+ bch2_trans_unlock(trans);
+ closure_sync(&cl);
+ }
+}
+
int bch2_data_update_init(struct bch_fs *c, struct data_update *m,
struct write_point_specifier wp,
struct bch_io_opts io_opts,
m->op.flags |= BCH_WRITE_PAGES_STABLE|
BCH_WRITE_PAGES_OWNED|
BCH_WRITE_DATA_ENCODED|
- BCH_WRITE_FROM_INTERNAL|
+ BCH_WRITE_MOVE|
m->data_opts.write_flags;
m->op.compression_type =
bch2_compression_opt_to_type[io_opts.background_compression ?:
io_opts.compression];
if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE)
m->op.alloc_reserve = RESERVE_movinggc;
- m->op.index_update_fn = bch2_data_update_index_update;
i = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
m->op.incompressible = true;
i++;
+
+ bch2_bucket_nocow_lock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, &p.ptr), 0);
}
if (reserve_sectors) {
hweight32(m->data_opts.rewrite_ptrs) + m->data_opts.extra_replicas;
BUG_ON(!m->op.nr_replicas);
+
+ /* Special handling required: */
+ if (bkey_extent_is_unwritten(k))
+ return -BCH_ERR_unwritten_extent_update;
return 0;
}
struct bch_write_op op;
};
+int bch2_data_update_index_update(struct bch_write_op *);
+
void bch2_data_update_read_done(struct data_update *,
- struct bch_extent_crc_unpacked,
- struct closure *);
+ struct bch_extent_crc_unpacked);
void bch2_data_update_exit(struct data_update *);
+void bch2_update_unwritten_extent(struct btree_trans *, struct data_update *);
int bch2_data_update_init(struct bch_fs *, struct data_update *,
struct write_point_specifier,
struct bch_io_opts, struct data_update_opts,
int bch2_dirent_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
void bch2_dirent_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-#define bch2_bkey_ops_dirent (struct bkey_ops) { \
+#define bch2_bkey_ops_dirent ((struct bkey_ops) { \
.key_invalid = bch2_dirent_invalid, \
.val_to_text = bch2_dirent_to_text, \
-}
+})
struct qstr;
struct file;
void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
-#define bch2_bkey_ops_stripe (struct bkey_ops) { \
+#define bch2_bkey_ops_stripe ((struct bkey_ops) { \
.key_invalid = bch2_stripe_invalid, \
.val_to_text = bch2_stripe_to_text, \
.swab = bch2_ptr_swab, \
.trans_trigger = bch2_trans_mark_stripe, \
.atomic_trigger = bch2_mark_stripe, \
-}
+})
static inline unsigned stripe_csums_per_device(const struct bch_stripe *s)
{
const char *bch2_err_str(int err)
{
const char *errstr;
+
err = abs(err);
BUG_ON(err >= BCH_ERR_MAX);
x(BCH_ERR_fsck, fsck_repair_unimplemented) \
x(BCH_ERR_fsck, fsck_repair_impossible) \
x(0, need_snapshot_cleanup) \
- x(0, need_topology_repair)
+ x(0, need_topology_repair) \
+ x(0, unwritten_extent_update)
enum bch_errcode {
BCH_ERR_START = 2048,
s->nr++;
}
+#ifdef BCACHEFS_LOG_PREFIX
if (!strncmp(fmt, "bcachefs:", 9))
prt_printf(out, bch2_log_msg(c, ""));
+#endif
va_start(args, fmt);
prt_vprintf(out, fmt, args);
/* Does the error handling without logging a message */
void bch2_io_error(struct bch_dev *);
-/* Logs message and handles the error: */
-#define bch2_dev_io_error(ca, fmt, ...) \
-do { \
- printk_ratelimited(KERN_ERR "bcachefs (%s): " fmt, \
- (ca)->name, ##__VA_ARGS__); \
- bch2_io_error(ca); \
-} while (0)
-
-#define bch2_dev_inum_io_error(ca, _inum, _offset, fmt, ...) \
-do { \
- printk_ratelimited(KERN_ERR "bcachefs (%s inum %llu offset %llu): " fmt,\
- (ca)->name, (_inum), (_offset), ##__VA_ARGS__); \
- bch2_io_error(ca); \
-} while (0)
-
#define bch2_dev_io_err_on(cond, ca, ...) \
({ \
bool _ret = (cond); \
\
- if (_ret) \
- bch2_dev_io_error(ca, __VA_ARGS__); \
+ if (_ret) { \
+ bch_err_dev_ratelimited(ca, __VA_ARGS__); \
+ bch2_io_error(ca); \
+ } \
_ret; \
})
-#define bch2_dev_inum_io_err_on(cond, ca, _inum, _offset, ...) \
+#define bch2_dev_inum_io_err_on(cond, ca, ...) \
({ \
bool _ret = (cond); \
\
- if (_ret) \
- bch2_dev_inum_io_error(ca, _inum, _offset, __VA_ARGS__);\
+ if (_ret) { \
+ bch_err_inum_offset_ratelimited(ca, __VA_ARGS__); \
+ bch2_io_error(ca); \
+ } \
_ret; \
})
return -EIO;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ /*
+ * Unwritten extent: no need to actually read, treat it as a
+ * hole and return 0s:
+ */
+ if (p.ptr.unwritten)
+ return 0;
+
ca = bch_dev_bkey_exists(c, p.ptr.dev);
/*
rp.ptr.offset + rp.crc.offset ||
lp.ptr.dev != rp.ptr.dev ||
lp.ptr.gen != rp.ptr.gen ||
+ lp.ptr.unwritten != rp.ptr.unwritten ||
lp.has_ec != rp.has_ec)
return false;
return replicas;
}
-static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
- struct extent_ptr_decoded p)
+unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
{
unsigned durability = 0;
struct bch_dev *ca;
- if (p.ptr.cached)
+ if (p->ptr.cached)
return 0;
- ca = bch_dev_bkey_exists(c, p.ptr.dev);
+ ca = bch_dev_bkey_exists(c, p->ptr.dev);
if (ca->mi.state != BCH_MEMBER_STATE_failed)
durability = max_t(unsigned, durability, ca->mi.durability);
- if (p.has_ec)
- durability += p.ec.redundancy;
+ if (p->has_ec)
+ durability += p->ec.redundancy;
return durability;
}
unsigned durability = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- durability += bch2_extent_ptr_durability(c, p);
+ durability += bch2_extent_ptr_durability(c,& p);
return durability;
}
*/
bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
{
- struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
- struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
- const union bch_extent_entry *entry1, *entry2;
- struct extent_ptr_decoded p1, p2;
+ if (k1.k->type != k2.k->type)
+ return false;
+
+ if (bkey_extent_is_direct_data(k1.k)) {
+ struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
+ struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
+ const union bch_extent_entry *entry1, *entry2;
+ struct extent_ptr_decoded p1, p2;
- bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
- bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
+ if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2))
+ return false;
+
+ bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
+ bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
if (p1.ptr.dev == p2.ptr.dev &&
p1.ptr.gen == p2.ptr.gen &&
(s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
(s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
return true;
- return false;
+ return false;
+ } else {
+ /* KEY_TYPE_deleted, etc. */
+ return true;
+ }
}
bool bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1,
u32 offset;
u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
- prt_printf(out, "ptr: %u:%llu:%u gen %u%s", ptr->dev,
- b, offset, ptr->gen,
- ptr->cached ? " cached" : "");
-
+ prt_printf(out, "ptr: %u:%llu:%u gen %u",
+ ptr->dev, b, offset, ptr->gen);
+ if (ptr->cached)
+ prt_str(out, " cached");
+ if (ptr->unwritten)
+ prt_str(out, " unwritten");
if (ca && ptr_stale(ca, ptr))
prt_printf(out, " stale");
}
unsigned size_ondisk = k.k->size;
unsigned nonce = UINT_MAX;
unsigned nr_ptrs = 0;
+ bool unwritten = false;
int ret;
if (bkey_is_btree_ptr(k.k))
false, err);
if (ret)
return ret;
+
+ if (nr_ptrs && unwritten != entry->ptr.unwritten) {
+ prt_printf(err, "extent with unwritten and written ptrs");
+ return -EINVAL;
+ }
+
+ if (k.k->type != KEY_TYPE_extent && entry->ptr.unwritten) {
+ prt_printf(err, "has unwritten ptrs");
+ return -EINVAL;
+ }
+
+ unwritten = entry->ptr.unwritten;
nr_ptrs++;
break;
case BCH_EXTENT_ENTRY_crc32:
switch (k.k->type) {
case KEY_TYPE_btree_ptr: {
struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k);
+
return (struct bkey_ptrs_c) {
to_entry(&e.v->start[0]),
to_entry(extent_entry_last(e))
}
case KEY_TYPE_extent: {
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+
return (struct bkey_ptrs_c) {
e.v->start,
extent_entry_last(e)
}
case KEY_TYPE_stripe: {
struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
+
return (struct bkey_ptrs_c) {
to_entry(&s.v->ptrs[0]),
to_entry(&s.v->ptrs[s.v->nr_blocks]),
}
case KEY_TYPE_btree_ptr_v2: {
struct bkey_s_c_btree_ptr_v2 e = bkey_s_c_to_btree_ptr_v2(k);
+
return (struct bkey_ptrs_c) {
to_entry(&e.v->start[0]),
to_entry(extent_entry_last(e))
#define extent_for_each_entry_from(_e, _entry, _start) \
__bkey_extent_entry_for_each_from(_start, \
- extent_entry_last(_e),_entry)
+ extent_entry_last(_e), _entry)
#define extent_for_each_entry(_e, _entry) \
extent_for_each_entry_from(_e, _entry, (_e).v->start)
void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
int, struct bkey_s);
-#define bch2_bkey_ops_btree_ptr (struct bkey_ops) { \
+#define bch2_bkey_ops_btree_ptr ((struct bkey_ops) { \
.key_invalid = bch2_btree_ptr_invalid, \
.val_to_text = bch2_btree_ptr_to_text, \
.swab = bch2_ptr_swab, \
.trans_trigger = bch2_trans_mark_extent, \
.atomic_trigger = bch2_mark_extent, \
-}
+})
-#define bch2_bkey_ops_btree_ptr_v2 (struct bkey_ops) { \
+#define bch2_bkey_ops_btree_ptr_v2 ((struct bkey_ops) { \
.key_invalid = bch2_btree_ptr_v2_invalid, \
.val_to_text = bch2_btree_ptr_v2_to_text, \
.swab = bch2_ptr_swab, \
.compat = bch2_btree_ptr_v2_compat, \
.trans_trigger = bch2_trans_mark_extent, \
.atomic_trigger = bch2_mark_extent, \
-}
+})
/* KEY_TYPE_extent: */
bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
-#define bch2_bkey_ops_extent (struct bkey_ops) { \
+#define bch2_bkey_ops_extent ((struct bkey_ops) { \
.key_invalid = bch2_bkey_ptrs_invalid, \
.val_to_text = bch2_bkey_ptrs_to_text, \
.swab = bch2_ptr_swab, \
.key_merge = bch2_extent_merge, \
.trans_trigger = bch2_trans_mark_extent, \
.atomic_trigger = bch2_mark_extent, \
-}
+})
/* KEY_TYPE_reservation: */
void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
-#define bch2_bkey_ops_reservation (struct bkey_ops) { \
+#define bch2_bkey_ops_reservation ((struct bkey_ops) { \
.key_invalid = bch2_reservation_invalid, \
.val_to_text = bch2_reservation_to_text, \
.key_merge = bch2_reservation_merge, \
.trans_trigger = bch2_trans_mark_reservation, \
.atomic_trigger = bch2_mark_reservation, \
-}
+})
/* Extent checksum entries: */
}
}
+static inline bool bkey_extent_is_unwritten(struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+
+ bkey_for_each_ptr(ptrs, ptr)
+ if (ptr->unwritten)
+ return true;
+ return false;
+}
+
+static inline bool bkey_extent_is_reservation(struct bkey_s_c k)
+{
+ return k.k->type == KEY_TYPE_reservation ||
+ bkey_extent_is_unwritten(k);
+}
+
static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k)
{
struct bch_devs_list ret = (struct bch_devs_list) { 0 };
unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c);
+unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
void bch2_bkey_extent_entry_drop(struct bkey_i *, union bch_extent_entry *);
(((p) - (fifo)->data)))
#define fifo_entry_idx(fifo, p) (((p) - &fifo_peek_front(fifo)) & (fifo)->mask)
-#define fifo_idx_entry(fifo, i) (fifo)->data[((fifo)->front + (i)) & (fifo)->mask]
+#define fifo_idx_entry(fifo, i) ((fifo)->data[((fifo)->front + (i)) & (fifo)->mask])
#define fifo_push_back_ref(f) \
(fifo_full((f)) ? NULL : &(f)->data[(f)->back++ & (f)->mask])
#include <trace/events/bcachefs.h>
#include <trace/events/writeback.h>
+struct nocow_flush {
+ struct closure *cl;
+ struct bch_dev *ca;
+ struct bio bio;
+};
+
+static void nocow_flush_endio(struct bio *_bio)
+{
+
+ struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
+
+ closure_put(bio->cl);
+ percpu_ref_put(&bio->ca->io_ref);
+ bio_put(&bio->bio);
+}
+
+static void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct closure *cl)
+{
+ struct nocow_flush *bio;
+ struct bch_dev *ca;
+ struct bch_devs_mask devs;
+ unsigned dev;
+
+ dev = find_first_bit(inode->ei_devs_need_flush.d, BCH_SB_MEMBERS_MAX);
+ if (dev == BCH_SB_MEMBERS_MAX)
+ return;
+
+ devs = inode->ei_devs_need_flush;
+ memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
+
+ for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
+ rcu_read_lock();
+ ca = rcu_dereference(c->devs[dev]);
+ if (ca && !percpu_ref_tryget(&ca->io_ref))
+ ca = NULL;
+ rcu_read_unlock();
+
+ if (!ca)
+ continue;
+
+ bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
+ REQ_OP_FLUSH,
+ GFP_KERNEL,
+ &c->nocow_flush_bioset),
+ struct nocow_flush, bio);
+ bio->cl = cl;
+ bio->ca = ca;
+ bio->bio.bi_end_io = nocow_flush_endio;
+ closure_bio_submit(&bio->bio, cl);
+ }
+}
+
+static int bch2_inode_flush_nocow_writes(struct bch_fs *c,
+ struct bch_inode_info *inode)
+{
+ struct closure cl;
+
+ closure_init_stack(&cl);
+ bch2_inode_flush_nocow_writes_async(c, inode, &cl);
+ closure_sync(&cl);
+
+ return 0;
+}
+
static inline bool bio_full(struct bio *bio, unsigned len)
{
if (bio->bi_vcnt >= bio->bi_max_vecs)
};
struct bch_writepage_io {
- struct closure cl;
struct bch_inode_info *inode;
/* must be last: */
};
struct dio_write {
- struct completion done;
struct kiocb *req;
+ struct address_space *mapping;
+ struct bch_inode_info *inode;
struct mm_struct *mm;
unsigned loop:1,
+ extending:1,
sync:1,
+ flush:1,
free_iov:1;
struct quota_res quota_res;
u64 written;
};
/* pagecache_block must be held */
-static int write_invalidate_inode_pages_range(struct address_space *mapping,
+static noinline int write_invalidate_inode_pages_range(struct address_space *mapping,
loff_t start, loff_t end)
{
int ret;
#ifdef CONFIG_BCACHEFS_QUOTA
-static void bch2_quota_reservation_put(struct bch_fs *c,
- struct bch_inode_info *inode,
- struct quota_res *res)
+static void __bch2_quota_reservation_put(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res)
{
- if (!res->sectors)
- return;
-
- mutex_lock(&inode->ei_quota_lock);
BUG_ON(res->sectors > inode->ei_quota_reserved);
bch2_quota_acct(c, inode->ei_qid, Q_SPC,
-((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
inode->ei_quota_reserved -= res->sectors;
- mutex_unlock(&inode->ei_quota_lock);
-
res->sectors = 0;
}
+static void bch2_quota_reservation_put(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res)
+{
+ if (res->sectors) {
+ mutex_lock(&inode->ei_quota_lock);
+ __bch2_quota_reservation_put(c, inode, res);
+ mutex_unlock(&inode->ei_quota_lock);
+ }
+}
+
static int bch2_quota_reservation_add(struct bch_fs *c,
struct bch_inode_info *inode,
struct quota_res *res,
#else
+static void __bch2_quota_reservation_put(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res) {}
+
static void bch2_quota_reservation_put(struct bch_fs *c,
struct bch_inode_info *inode,
- struct quota_res *res)
-{
-}
+ struct quota_res *res) {}
static int bch2_quota_reservation_add(struct bch_fs *c,
struct bch_inode_info *inode,
return bch2_write_inode(c, inode, inode_set_size, &s, fields);
}
-static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
+static void __i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
struct quota_res *quota_res, s64 sectors)
{
- if (!sectors)
- return;
-
- mutex_lock(&inode->ei_quota_lock);
bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
"inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
}
#endif
- mutex_unlock(&inode->ei_quota_lock);
+}
+
+static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
+ struct quota_res *quota_res, s64 sectors)
+{
+ if (sectors) {
+ mutex_lock(&inode->ei_quota_lock);
+ __i_sectors_acct(c, inode, quota_res, sectors);
+ mutex_unlock(&inode->ei_quota_lock);
+ }
}
/* page state: */
return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp);
}
-static unsigned bkey_to_sector_state(const struct bkey *k)
+static unsigned bkey_to_sector_state(struct bkey_s_c k)
{
- if (k->type == KEY_TYPE_reservation)
+ if (bkey_extent_is_reservation(k))
return SECTOR_RESERVED;
- if (bkey_extent_is_allocation(k))
+ if (bkey_extent_is_allocation(k.k))
return SECTOR_ALLOCATED;
return SECTOR_UNALLOCATED;
}
SPOS(inum.inum, offset, snapshot),
BTREE_ITER_SLOTS, k, ret) {
unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
- unsigned state = bkey_to_sector_state(k.k);
+ unsigned state = bkey_to_sector_state(k);
while (pg_idx < nr_pages) {
struct page *page = pages[pg_idx];
struct bio_vec bv;
unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
- unsigned state = bkey_to_sector_state(k.k);
+ unsigned state = bkey_to_sector_state(k);
bio_for_each_segment(bv, bio, iter)
__bch2_page_state_set(bv.bv_page, bv.bv_offset >> 9,
if (fdm > mapping) {
struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
- if (bch2_pagecache_add_tryget(&inode->ei_pagecache_lock))
+ if (bch2_pagecache_add_tryget(inode))
goto got_lock;
- bch2_pagecache_block_put(&fdm_host->ei_pagecache_lock);
+ bch2_pagecache_block_put(fdm_host);
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_get(inode);
+ bch2_pagecache_add_put(inode);
- bch2_pagecache_block_get(&fdm_host->ei_pagecache_lock);
+ bch2_pagecache_block_get(fdm_host);
/* Signal that lock has been dropped: */
set_fdm_dropped_locks();
return VM_FAULT_SIGBUS;
}
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_get(inode);
got_lock:
ret = filemap_fault(vmf);
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_put(inode);
return ret;
}
* a write_invalidate_inode_pages_range() that works without dropping
* page lock before invalidating page
*/
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_get(inode);
lock_page(page);
isize = i_size_read(&inode->v);
wait_for_stable_page(page);
ret = VM_FAULT_LOCKED;
out:
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_put(inode);
sb_end_pagefault(inode->v.i_sb);
return ret;
goto retry;
if (ret) {
- bch_err_inum_ratelimited(c, inum.inum,
+ bch_err_inum_offset_ratelimited(c,
+ iter.pos.inode,
+ iter.pos.offset << 9,
"read error %i from btree lookup", ret);
rbio->bio.bi_status = BLK_STS_IOERR;
bio_endio(&rbio->bio);
bch2_trans_init(&trans, c, 0, 0);
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_get(inode);
while ((page = readpage_iter_next(&readpages_iter))) {
pgoff_t index = readpages_iter.offset + readpages_iter.idx;
&readpages_iter);
}
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_put(inode);
bch2_trans_exit(&trans);
kfree(readpages_iter.pages);
};
}
-static void bch2_writepage_io_free(struct closure *cl)
-{
- struct bch_writepage_io *io = container_of(cl,
- struct bch_writepage_io, cl);
-
- bio_put(&io->op.wbio.bio);
-}
-
-static void bch2_writepage_io_done(struct closure *cl)
+static void bch2_writepage_io_done(struct bch_write_op *op)
{
- struct bch_writepage_io *io = container_of(cl,
- struct bch_writepage_io, cl);
+ struct bch_writepage_io *io =
+ container_of(op, struct bch_writepage_io, op);
struct bch_fs *c = io->op.c;
struct bio *bio = &io->op.wbio.bio;
struct bvec_iter_all iter;
end_page_writeback(bvec->bv_page);
}
- closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
+ bio_put(&io->op.wbio.bio);
}
static void bch2_writepage_do_io(struct bch_writepage_state *w)
struct bch_writepage_io *io = w->io;
w->io = NULL;
- closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
- continue_at(&io->cl, bch2_writepage_io_done, NULL);
+ closure_call(&io->op.cl, bch2_write, NULL, NULL);
}
/*
&c->writepage_bioset),
struct bch_writepage_io, op.wbio.bio);
- closure_init(&w->io->cl, NULL);
w->io->inode = inode;
-
op = &w->io->op;
bch2_write_op_init(op, c, w->opts);
op->target = w->opts.foreground_target;
op->write_point = writepoint_hashed(inode->ei_last_dirtied);
op->subvol = inode->ei_subvol;
op->pos = POS(inode->v.i_ino, sector);
+ op->end_io = bch2_writepage_io_done;
+ op->devs_need_flush = &inode->ei_devs_need_flush;
op->wbio.bio.bi_iter.bi_sector = sector;
op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
}
sectors << 9, offset << 9));
/* Check for writing past i_size: */
- WARN_ON_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
- round_up(i_size, block_bytes(c)));
+ WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
+ round_up(i_size, block_bytes(c)) &&
+ !test_bit(BCH_FS_EMERGENCY_RO, &c->flags),
+ "writing past i_size: %llu > %llu (unrounded %llu)\n",
+ bio_end_sector(&w->io->op.wbio.bio) << 9,
+ round_up(i_size, block_bytes(c)),
+ i_size);
w->io->op.res.sectors += reserved_sectors;
w->io->op.i_sectors_delta -= dirty_sectors;
bch2_page_reservation_init(c, inode, res);
*fsdata = res;
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_get(inode);
page = grab_cache_page_write_begin(mapping, index);
if (!page)
put_page(page);
*pagep = NULL;
err_unlock:
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_put(inode);
kfree(res);
*fsdata = NULL;
return bch2_err_class(ret);
unlock_page(page);
put_page(page);
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_put(inode);
bch2_page_reservation_put(c, inode, res);
kfree(res);
goto out;
}
+ /*
+ * XXX: per POSIX and fstests generic/275, on -ENOSPC we're
+ * supposed to write as much as we have disk space for.
+ *
+ * On failure here we should still write out a partial page if
+ * we aren't completely out of disk space - we don't do that
+ * yet:
+ */
ret = bch2_page_reservation_get(c, inode, page, &res,
pg_offset, pg_len);
- if (ret)
- goto out;
+ if (unlikely(ret)) {
+ if (!reserved)
+ goto out;
+ break;
+ }
reserved += pg_len;
}
for (i = 0; i < nr_pages; i++)
flush_dcache_page(pages[i]);
- while (copied < len) {
+ while (copied < reserved) {
struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
- unsigned pg_len = min_t(unsigned, len - copied,
+ unsigned pg_len = min_t(unsigned, reserved - copied,
PAGE_SIZE - pg_offset);
unsigned pg_copied = copy_page_from_iter_atomic(page,
pg_offset, pg_len, iter);
ssize_t written = 0;
int ret = 0;
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_get(inode);
do {
unsigned offset = pos & (PAGE_SIZE - 1);
balance_dirty_pages_ratelimited(mapping);
} while (iov_iter_count(iter));
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_put(inode);
return written ? written : ret;
}
if (iocb->ki_flags & IOCB_DIRECT) {
struct blk_plug plug;
- ret = filemap_write_and_wait_range(mapping,
- iocb->ki_pos,
- iocb->ki_pos + count - 1);
- if (ret < 0)
- goto out;
+ if (unlikely(mapping->nrpages)) {
+ ret = filemap_write_and_wait_range(mapping,
+ iocb->ki_pos,
+ iocb->ki_pos + count - 1);
+ if (ret < 0)
+ goto out;
+ }
file_accessed(file);
if (ret >= 0)
iocb->ki_pos += ret;
} else {
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_get(inode);
ret = generic_file_read_iter(iocb, iter);
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_put(inode);
}
out:
return bch2_err_class(ret);
return err ? false : ret;
}
+static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
+{
+ struct bch_fs *c = dio->op.c;
+ struct bch_inode_info *inode = dio->inode;
+ struct bio *bio = &dio->op.wbio.bio;
+
+ return bch2_check_range_allocated(c, inode_inum(inode),
+ dio->op.pos.offset, bio_sectors(bio),
+ dio->op.opts.data_replicas,
+ dio->op.opts.compression != 0);
+}
+
static void bch2_dio_write_loop_async(struct bch_write_op *);
+static __always_inline long bch2_dio_write_done(struct dio_write *dio);
-static long bch2_dio_write_loop(struct dio_write *dio)
+static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
+{
+ struct iovec *iov = dio->inline_vecs;
+
+ if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
+ iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
+ GFP_KERNEL);
+ if (unlikely(!iov))
+ return -ENOMEM;
+
+ dio->free_iov = true;
+ }
+
+ memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
+ dio->iter.iov = iov;
+ return 0;
+}
+
+static void bch2_dio_write_flush_done(struct closure *cl)
+{
+ struct dio_write *dio = container_of(cl, struct dio_write, op.cl);
+ struct bch_fs *c = dio->op.c;
+
+ closure_debug_destroy(cl);
+
+ dio->op.error = bch2_journal_error(&c->journal);
+
+ bch2_dio_write_done(dio);
+}
+
+static noinline void bch2_dio_write_flush(struct dio_write *dio)
+{
+ struct bch_fs *c = dio->op.c;
+ struct bch_inode_unpacked inode;
+ int ret;
+
+ dio->flush = 0;
+
+ closure_init(&dio->op.cl, NULL);
+
+ if (!dio->op.error) {
+ ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
+ if (ret) {
+ dio->op.error = ret;
+ } else {
+ bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq, &dio->op.cl);
+ bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
+ }
+ }
+
+ if (dio->sync) {
+ closure_sync(&dio->op.cl);
+ closure_debug_destroy(&dio->op.cl);
+ } else {
+ continue_at(&dio->op.cl, bch2_dio_write_flush_done, NULL);
+ }
+}
+
+static __always_inline long bch2_dio_write_done(struct dio_write *dio)
{
- bool kthread = (current->flags & PF_KTHREAD) != 0;
struct kiocb *req = dio->req;
- struct address_space *mapping = req->ki_filp->f_mapping;
- struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch_inode_info *inode = dio->inode;
+ bool sync = dio->sync;
+ long ret;
+
+ if (unlikely(dio->flush)) {
+ bch2_dio_write_flush(dio);
+ if (!sync)
+ return -EIOCBQUEUED;
+ }
+
+ bch2_pagecache_block_put(inode);
+
+ if (dio->free_iov)
+ kfree(dio->iter.iov);
+
+ ret = dio->op.error ?: ((long) dio->written << 9);
+ bio_put(&dio->op.wbio.bio);
+
+ /* inode->i_dio_count is our ref on inode and thus bch_fs */
+ inode_dio_end(&inode->v);
+
+ if (ret < 0)
+ ret = bch2_err_class(ret);
+
+ if (!sync) {
+ req->ki_complete(req, ret);
+ ret = -EIOCBQUEUED;
+ }
+ return ret;
+}
+
+static __always_inline void bch2_dio_write_end(struct dio_write *dio)
+{
+ struct bch_fs *c = dio->op.c;
+ struct kiocb *req = dio->req;
+ struct bch_inode_info *inode = dio->inode;
struct bio *bio = &dio->op.wbio.bio;
struct bvec_iter_all iter;
struct bio_vec *bv;
+
+ req->ki_pos += (u64) dio->op.written << 9;
+ dio->written += dio->op.written;
+
+ if (dio->extending) {
+ spin_lock(&inode->v.i_lock);
+ if (req->ki_pos > inode->v.i_size)
+ i_size_write(&inode->v, req->ki_pos);
+ spin_unlock(&inode->v.i_lock);
+ }
+
+ if (dio->op.i_sectors_delta || dio->quota_res.sectors) {
+ mutex_lock(&inode->ei_quota_lock);
+ __i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta);
+ __bch2_quota_reservation_put(c, inode, &dio->quota_res);
+ mutex_unlock(&inode->ei_quota_lock);
+ }
+
+ if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
+ bio_for_each_segment_all(bv, bio, iter)
+ put_page(bv->bv_page);
+
+ if (unlikely(dio->op.error))
+ set_bit(EI_INODE_ERROR, &inode->ei_flags);
+}
+
+static long bch2_dio_write_loop(struct dio_write *dio)
+{
+ struct bch_fs *c = dio->op.c;
+ struct kiocb *req = dio->req;
+ struct address_space *mapping = dio->mapping;
+ struct bch_inode_info *inode = dio->inode;
+ struct bio *bio = &dio->op.wbio.bio;
unsigned unaligned, iter_count;
bool sync = dio->sync, dropped_locks;
long ret;
- if (dio->loop)
- goto loop;
-
while (1) {
iter_count = dio->iter.count;
- if (kthread && dio->mm)
- kthread_use_mm(dio->mm);
- BUG_ON(current->faults_disabled_mapping);
+ EBUG_ON(current->faults_disabled_mapping);
current->faults_disabled_mapping = mapping;
ret = bio_iov_iter_get_pages(bio, &dio->iter);
dropped_locks = fdm_dropped_locks();
current->faults_disabled_mapping = NULL;
- if (kthread && dio->mm)
- kthread_unuse_mm(dio->mm);
/*
* If the fault handler returned an error but also signalled
}
bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode));
- dio->op.end_io = bch2_dio_write_loop_async;
+ dio->op.end_io = sync
+ ? NULL
+ : bch2_dio_write_loop_async;
dio->op.target = dio->op.opts.foreground_target;
dio->op.write_point = writepoint_hashed((unsigned long) current);
dio->op.nr_replicas = dio->op.opts.data_replicas;
dio->op.subvol = inode->ei_subvol;
dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
+ dio->op.devs_need_flush = &inode->ei_devs_need_flush;
- if ((req->ki_flags & IOCB_DSYNC) &&
- !c->opts.journal_flush_disabled)
- dio->op.flags |= BCH_WRITE_FLUSH;
+ if (sync)
+ dio->op.flags |= BCH_WRITE_SYNC;
dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
+ ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
+ bio_sectors(bio), true);
+ if (unlikely(ret))
+ goto err;
+
ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
dio->op.opts.data_replicas, 0);
if (unlikely(ret) &&
- !bch2_check_range_allocated(c, inode_inum(inode),
- dio->op.pos.offset, bio_sectors(bio),
- dio->op.opts.data_replicas,
- dio->op.opts.compression != 0))
+ !bch2_dio_write_check_allocated(dio))
goto err;
task_io_account_write(bio->bi_iter.bi_size);
- if (!dio->sync && !dio->loop && dio->iter.count) {
- struct iovec *iov = dio->inline_vecs;
+ if (unlikely(dio->iter.count) &&
+ !dio->sync &&
+ !dio->loop &&
+ bch2_dio_write_copy_iov(dio))
+ dio->sync = sync = true;
- if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
- iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
- GFP_KERNEL);
- if (unlikely(!iov)) {
- dio->sync = sync = true;
- goto do_io;
- }
-
- dio->free_iov = true;
- }
-
- memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
- dio->iter.iov = iov;
- }
-do_io:
dio->loop = true;
closure_call(&dio->op.cl, bch2_write, NULL, NULL);
- if (sync)
- wait_for_completion(&dio->done);
- else
+ if (!sync)
return -EIOCBQUEUED;
-loop:
- i_sectors_acct(c, inode, &dio->quota_res,
- dio->op.i_sectors_delta);
- req->ki_pos += (u64) dio->op.written << 9;
- dio->written += dio->op.written;
-
- spin_lock(&inode->v.i_lock);
- if (req->ki_pos > inode->v.i_size)
- i_size_write(&inode->v, req->ki_pos);
- spin_unlock(&inode->v.i_lock);
-
- if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
- bio_for_each_segment_all(bv, bio, iter)
- put_page(bv->bv_page);
- bio->bi_vcnt = 0;
- if (dio->op.error) {
- set_bit(EI_INODE_ERROR, &inode->ei_flags);
- break;
- }
+ bch2_dio_write_end(dio);
- if (!dio->iter.count)
+ if (likely(!dio->iter.count) || dio->op.error)
break;
bio_reset(bio, NULL, REQ_OP_WRITE);
- reinit_completion(&dio->done);
}
-
- ret = dio->op.error ?: ((long) dio->written << 9);
+out:
+ return bch2_dio_write_done(dio);
err:
- bch2_pagecache_block_put(&inode->ei_pagecache_lock);
- bch2_quota_reservation_put(c, inode, &dio->quota_res);
+ dio->op.error = ret;
- if (dio->free_iov)
- kfree(dio->iter.iov);
+ if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
+ struct bvec_iter_all iter;
+ struct bio_vec *bv;
- if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
bio_for_each_segment_all(bv, bio, iter)
put_page(bv->bv_page);
- bio_put(bio);
-
- /* inode->i_dio_count is our ref on inode and thus bch_fs */
- inode_dio_end(&inode->v);
-
- if (ret < 0)
- ret = bch2_err_class(ret);
-
- if (!sync) {
- req->ki_complete(req, ret);
- ret = -EIOCBQUEUED;
}
- return ret;
+
+ bch2_quota_reservation_put(c, inode, &dio->quota_res);
+ goto out;
}
static void bch2_dio_write_loop_async(struct bch_write_op *op)
{
struct dio_write *dio = container_of(op, struct dio_write, op);
+ struct mm_struct *mm = dio->mm;
- if (dio->sync)
- complete(&dio->done);
- else
- bch2_dio_write_loop(dio);
+ bch2_dio_write_end(dio);
+
+ if (likely(!dio->iter.count) || dio->op.error) {
+ bch2_dio_write_done(dio);
+ return;
+ }
+
+ bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE);
+
+ if (mm)
+ kthread_use_mm(mm);
+ bch2_dio_write_loop(dio);
+ if (mm)
+ kthread_unuse_mm(mm);
}
static noinline
goto err;
inode_dio_begin(&inode->v);
- bch2_pagecache_block_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_block_get(inode);
extending = req->ki_pos + iter->count > inode->v.i_size;
if (!extending) {
GFP_KERNEL,
&c->dio_write_bioset);
dio = container_of(bio, struct dio_write, op.wbio.bio);
- init_completion(&dio->done);
dio->req = req;
+ dio->mapping = mapping;
+ dio->inode = inode;
dio->mm = current->mm;
dio->loop = false;
+ dio->extending = extending;
dio->sync = is_sync_kiocb(req) || extending;
+ dio->flush = iocb_is_dsync(req) && !c->opts.journal_flush_disabled;
dio->free_iov = false;
dio->quota_res.sectors = 0;
dio->written = 0;
dio->iter = *iter;
+ dio->op.c = c;
- ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
- iter->count >> 9, true);
- if (unlikely(ret))
- goto err_put_bio;
-
- ret = write_invalidate_inode_pages_range(mapping,
- req->ki_pos,
- req->ki_pos + iter->count - 1);
- if (unlikely(ret))
- goto err_put_bio;
+ if (unlikely(mapping->nrpages)) {
+ ret = write_invalidate_inode_pages_range(mapping,
+ req->ki_pos,
+ req->ki_pos + iter->count - 1);
+ if (unlikely(ret))
+ goto err_put_bio;
+ }
ret = bch2_dio_write_loop(dio);
err:
inode_unlock(&inode->v);
return ret;
err_put_bio:
- bch2_pagecache_block_put(&inode->ei_pagecache_lock);
- bch2_quota_reservation_put(c, inode, &dio->quota_res);
+ bch2_pagecache_block_put(inode);
bio_put(bio);
inode_dio_end(&inode->v);
goto err;
* inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
* insert trigger: look up the btree inode instead
*/
-static int bch2_flush_inode(struct bch_fs *c, subvol_inum inum)
+static int bch2_flush_inode(struct bch_fs *c,
+ struct bch_inode_info *inode)
{
- struct bch_inode_unpacked inode;
+ struct bch_inode_unpacked u;
int ret;
if (c->opts.journal_flush_disabled)
return 0;
- ret = bch2_inode_find_by_inum(c, inum, &inode);
+ ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u);
if (ret)
return ret;
- return bch2_journal_flush_seq(&c->journal, inode.bi_journal_seq);
+ return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
+ bch2_inode_flush_nocow_writes(c, inode);
}
int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
ret = file_write_and_wait_range(file, start, end);
ret2 = sync_inode_metadata(&inode->v, 1);
- ret3 = bch2_flush_inode(c, inode_inum(inode));
+ ret3 = bch2_flush_inode(c, inode);
return bch2_err_class(ret ?: ret2 ?: ret3);
}
}
inode_dio_wait(&inode->v);
- bch2_pagecache_block_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_block_get(inode);
ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
if (ret)
if (ret)
goto err;
- WARN_ON(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
- inode->v.i_size < inode_u.bi_size);
+ WARN_ONCE(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
+ inode->v.i_size < inode_u.bi_size,
+ "truncate spotted in mem i_size < btree i_size: %llu < %llu\n",
+ (u64) inode->v.i_size, inode_u.bi_size);
if (iattr->ia_size > inode->v.i_size) {
ret = bch2_extend(mnt_userns, inode, &inode_u, iattr);
ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
err:
- bch2_pagecache_block_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_block_put(inode);
return bch2_err_class(ret);
}
struct btree_trans trans;
struct btree_iter iter;
struct bpos end_pos = POS(inode->v.i_ino, end_sector);
- unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
+ struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
int ret = 0;
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
while (!ret && bkey_cmp(iter.pos, end_pos) < 0) {
s64 i_sectors_delta = 0;
- struct disk_reservation disk_res = { 0 };
struct quota_res quota_res = { 0 };
- struct bkey_i_reservation reservation;
struct bkey_s_c k;
unsigned sectors;
u32 snapshot;
goto bkey_err;
/* already reserved */
- if (k.k->type == KEY_TYPE_reservation &&
- bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
+ if (bkey_extent_is_reservation(k) &&
+ bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
bch2_btree_iter_advance(&iter);
continue;
}
continue;
}
- bkey_reservation_init(&reservation.k_i);
- reservation.k.type = KEY_TYPE_reservation;
- reservation.k.p = k.k->p;
- reservation.k.size = k.k->size;
-
- bch2_cut_front(iter.pos, &reservation.k_i);
- bch2_cut_back(end_pos, &reservation.k_i);
+ /*
+ * XXX: for nocow mode, we should promote shared extents to
+ * unshared here
+ */
- sectors = reservation.k.size;
- reservation.v.nr_replicas = bch2_bkey_nr_ptrs_allocated(k);
+ sectors = bpos_min(k.k->p, end_pos).offset - iter.pos.offset;
if (!bkey_extent_is_allocation(k.k)) {
ret = bch2_quota_reservation_add(c, inode,
goto bkey_err;
}
- if (reservation.v.nr_replicas < replicas ||
- bch2_bkey_sectors_compressed(k)) {
- ret = bch2_disk_reservation_get(c, &disk_res, sectors,
- replicas, 0);
- if (unlikely(ret))
- goto bkey_err;
-
- reservation.v.nr_replicas = disk_res.nr_replicas;
- }
-
- ret = bch2_extent_update(&trans, inode_inum(inode), &iter,
- &reservation.k_i,
- &disk_res, NULL,
- 0, &i_sectors_delta, true);
+ ret = bch2_extent_fallocate(&trans, inode_inum(inode), &iter,
+ sectors, opts, &i_sectors_delta,
+ writepoint_hashed((unsigned long) current));
if (ret)
goto bkey_err;
+
i_sectors_acct(c, inode, "a_res, i_sectors_delta);
bkey_err:
bch2_quota_reservation_put(c, inode, "a_res);
- bch2_disk_reservation_put(c, &disk_res);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
ret = 0;
}
inode_lock(&inode->v);
inode_dio_wait(&inode->v);
- bch2_pagecache_block_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_block_get(inode);
ret = file_modified(file);
if (ret)
else
ret = -EOPNOTSUPP;
err:
- bch2_pagecache_block_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_block_put(inode);
inode_unlock(&inode->v);
percpu_ref_put(&c->writes);
if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
IS_SYNC(file_inode(file_dst)))
- ret = bch2_flush_inode(c, inode_inum(dst));
+ ret = bch2_flush_inode(c, dst);
err:
bch2_quota_reservation_put(c, dst, "a_res);
bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
void bch2_fs_fsio_exit(struct bch_fs *c)
{
+ bioset_exit(&c->nocow_flush_bioset);
bioset_exit(&c->dio_write_bioset);
bioset_exit(&c->dio_read_bioset);
bioset_exit(&c->writepage_bioset);
BIOSET_NEED_BVECS) ||
bioset_init(&c->dio_write_bioset,
4, offsetof(struct dio_write, op.wbio.bio),
- BIOSET_NEED_BVECS))
+ BIOSET_NEED_BVECS) ||
+ bioset_init(&c->nocow_flush_bioset,
+ 1, offsetof(struct nocow_flush, bio), 0))
ret = -ENOMEM;
pr_verbose_init(c->opts, "ret %i", ret);
struct bch_inode_unpacked *,
struct bch_subvolume *);
-static void __pagecache_lock_put(struct pagecache_lock *lock, long i)
-{
- BUG_ON(atomic_long_read(&lock->v) == 0);
-
- if (atomic_long_sub_return_release(i, &lock->v) == 0)
- wake_up_all(&lock->wait);
-}
-
-static bool __pagecache_lock_tryget(struct pagecache_lock *lock, long i)
-{
- long v = atomic_long_read(&lock->v), old;
-
- do {
- old = v;
-
- if (i > 0 ? v < 0 : v > 0)
- return false;
- } while ((v = atomic_long_cmpxchg_acquire(&lock->v,
- old, old + i)) != old);
- return true;
-}
-
-static void __pagecache_lock_get(struct pagecache_lock *lock, long i)
-{
- wait_event(lock->wait, __pagecache_lock_tryget(lock, i));
-}
-
-void bch2_pagecache_add_put(struct pagecache_lock *lock)
-{
- __pagecache_lock_put(lock, 1);
-}
-
-bool bch2_pagecache_add_tryget(struct pagecache_lock *lock)
-{
- return __pagecache_lock_tryget(lock, 1);
-}
-
-void bch2_pagecache_add_get(struct pagecache_lock *lock)
-{
- __pagecache_lock_get(lock, 1);
-}
-
-void bch2_pagecache_block_put(struct pagecache_lock *lock)
-{
- __pagecache_lock_put(lock, -1);
-}
-
-void bch2_pagecache_block_get(struct pagecache_lock *lock)
-{
- __pagecache_lock_get(lock, -1);
-}
-
void bch2_inode_update_after_write(struct btree_trans *trans,
struct bch_inode_info *inode,
struct bch_inode_unpacked *bi,
int flags2 = 0;
u64 offset = p.ptr.offset;
+ if (p.ptr.unwritten)
+ flags2 |= FIEMAP_EXTENT_UNWRITTEN;
+
if (p.crc.compression_type)
flags2 |= FIEMAP_EXTENT_ENCODED;
else
inode_init_once(&inode->v);
mutex_init(&inode->ei_update_lock);
- pagecache_lock_init(&inode->ei_pagecache_lock);
+ two_state_lock_init(&inode->ei_pagecache_lock);
mutex_init(&inode->ei_quota_lock);
return &inode->v;
#include "opts.h"
#include "str_hash.h"
#include "quota_types.h"
+#include "two_state_shared_lock.h"
#include <linux/seqlock.h>
#include <linux/stat.h>
-/*
- * Two-state lock - can be taken for add or block - both states are shared,
- * like read side of rwsem, but conflict with other state:
- */
-struct pagecache_lock {
- atomic_long_t v;
- wait_queue_head_t wait;
-};
-
-static inline void pagecache_lock_init(struct pagecache_lock *lock)
-{
- atomic_long_set(&lock->v, 0);
- init_waitqueue_head(&lock->wait);
-}
-
-void bch2_pagecache_add_put(struct pagecache_lock *);
-bool bch2_pagecache_add_tryget(struct pagecache_lock *);
-void bch2_pagecache_add_get(struct pagecache_lock *);
-void bch2_pagecache_block_put(struct pagecache_lock *);
-void bch2_pagecache_block_get(struct pagecache_lock *);
-
struct bch_inode_info {
struct inode v;
unsigned long ei_flags;
u64 ei_quota_reserved;
unsigned long ei_last_dirtied;
- struct pagecache_lock ei_pagecache_lock;
+ two_state_lock_t ei_pagecache_lock;
struct mutex ei_quota_lock;
struct bch_qid ei_qid;
u32 ei_subvol;
+ /*
+ * When we've been doing nocow writes we'll need to issue flushes to the
+ * underlying block devices
+ *
+ * XXX: a device may have had a flush issued by some other codepath. It
+ * would be better to keep for each device a sequence number that's
+ * incremented when we isusue a cache flush, and track here the sequence
+ * number that needs flushing.
+ */
+ struct bch_devs_mask ei_devs_need_flush;
+
/* copy of inode in btree: */
struct bch_inode_unpacked ei_inode;
};
+#define bch2_pagecache_add_put(i) bch2_two_state_unlock(&i->ei_pagecache_lock, 0)
+#define bch2_pagecache_add_tryget(i) bch2_two_state_trylock(&i->ei_pagecache_lock, 0)
+#define bch2_pagecache_add_get(i) bch2_two_state_lock(&i->ei_pagecache_lock, 0)
+
+#define bch2_pagecache_block_put(i) bch2_two_state_unlock(&i->ei_pagecache_lock, 1)
+#define bch2_pagecache_block_get(i) bch2_two_state_lock(&i->ei_pagecache_lock, 1)
+
static inline subvol_inum inode_inum(struct bch_inode_info *inode)
{
return (subvol_inum) {
if ((_locks) & INODE_LOCK) \
down_write_nested(&a[i]->v.i_rwsem, i); \
if ((_locks) & INODE_PAGECACHE_BLOCK) \
- bch2_pagecache_block_get(&a[i]->ei_pagecache_lock);\
+ bch2_pagecache_block_get(a[i]);\
if ((_locks) & INODE_UPDATE_LOCK) \
mutex_lock_nested(&a[i]->ei_update_lock, i);\
} \
if ((_locks) & INODE_LOCK) \
up_write(&a[i]->v.i_rwsem); \
if ((_locks) & INODE_PAGECACHE_BLOCK) \
- bch2_pagecache_block_put(&a[i]->ei_pagecache_lock);\
+ bch2_pagecache_block_put(a[i]);\
if ((_locks) & INODE_UPDATE_LOCK) \
mutex_unlock(&a[i]->ei_update_lock); \
} \
bch2_trans_iter_exit(trans, &iter);
err:
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
- bch_err(c, "error from __remove_dirent(): %s", bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
break;
if (i->equiv == n.equiv) {
- bch_err(c, "adding duplicate snapshot in snapshots_seen_add()");
+ bch_err(c, "%s(): adding duplicate snapshot", __func__);
return -EINVAL;
}
}
printbuf_exit(&buf);
return ret;
bad_hash:
- if (fsck_err(c, "hash table key at wrong offset: btree %s inode %llu offset %llu, "
- "hashed to %llu\n%s",
+ if (fsck_err(c, "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s",
bch2_btree_ids[desc.btree_id], hash_k.k->p.inode, hash_k.k->p.offset, hash,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) {
err:
fsck_err:
if (ret)
- bch_err(c, "error from check_inode(): %s", bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
bch2_trans_exit(&trans);
snapshots_seen_exit(&s);
if (ret)
- bch_err(c, "error from check_inodes(): %s", bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
}
fsck_err:
if (ret)
- bch_err(c, "error from check_i_sectors(): %s", bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
if (!ret && trans_was_restarted(trans, restart_count))
ret = -BCH_ERR_transaction_restart_nested;
return ret;
continue;
if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
- k.k->type != KEY_TYPE_reservation &&
- k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9, c,
+ k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 &&
+ !bkey_extent_is_reservation(k), c,
"extent type past end of inode %llu:%u, i_size %llu\n %s",
i->inode.bi_inum, i->snapshot, i->inode.bi_size,
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
printbuf_exit(&buf);
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
- bch_err(c, "error from check_extent(): %s", bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
snapshots_seen_exit(&s);
if (ret)
- bch_err(c, "error from check_extents(): %s", bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
}
fsck_err:
if (ret)
- bch_err(c, "error from check_subdir_count(): %s", bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
if (!ret && trans_was_restarted(trans, restart_count))
ret = -BCH_ERR_transaction_restart_nested;
return ret;
printbuf_exit(&buf);
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
- bch_err(c, "error from check_target(): %s", bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
printbuf_exit(&buf);
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
- bch_err(c, "error from check_dirent(): %s", bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
inode_walker_exit(&target);
if (ret)
- bch_err(c, "error from check_dirents(): %s", bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
ret = hash_check_key(trans, bch2_xattr_hash_desc, hash_info, iter, k);
fsck_err:
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
- bch_err(c, "error from check_xattr(): %s", bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
bch2_trans_exit(&trans);
if (ret)
- bch_err(c, "error from check_xattrs(): %s", bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
* iterator:
*/
bch2_trans_iter_init(trans, &iter, id, POS(inum.inum, 0),
- BTREE_ITER_NOT_EXTENTS|
BTREE_ITER_INTENT);
while (1) {
bkey_init(&delete.k);
delete.k.p = iter.pos;
+ if (iter.flags & BTREE_ITER_IS_EXTENTS) {
+ bch2_key_resize(&delete.k, k.k->p.offset - iter.pos.offset);
+
+ ret = bch2_extent_trim_atomic(trans, &iter, &delete);
+ if (ret)
+ goto err;
+ }
+
ret = bch2_trans_update(trans, &iter, &delete, 0) ?:
bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_NOFAIL);
int bch2_inode_v3_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
void bch2_inode_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-#define bch2_bkey_ops_inode (struct bkey_ops) { \
+#define bch2_bkey_ops_inode ((struct bkey_ops) { \
.key_invalid = bch2_inode_invalid, \
.val_to_text = bch2_inode_to_text, \
.trans_trigger = bch2_trans_mark_inode, \
.atomic_trigger = bch2_mark_inode, \
-}
+})
-#define bch2_bkey_ops_inode_v2 (struct bkey_ops) { \
+#define bch2_bkey_ops_inode_v2 ((struct bkey_ops) { \
.key_invalid = bch2_inode_v2_invalid, \
.val_to_text = bch2_inode_to_text, \
.trans_trigger = bch2_trans_mark_inode, \
.atomic_trigger = bch2_mark_inode, \
-}
+})
-#define bch2_bkey_ops_inode_v3 (struct bkey_ops) { \
+#define bch2_bkey_ops_inode_v3 ((struct bkey_ops) { \
.key_invalid = bch2_inode_v3_invalid, \
.val_to_text = bch2_inode_to_text, \
.trans_trigger = bch2_trans_mark_inode, \
.atomic_trigger = bch2_mark_inode, \
-}
+})
static inline bool bkey_is_inode(const struct bkey *k)
{
int, struct printbuf *);
void bch2_inode_generation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-#define bch2_bkey_ops_inode_generation (struct bkey_ops) { \
+#define bch2_bkey_ops_inode_generation ((struct bkey_ops) { \
.key_invalid = bch2_inode_generation_invalid, \
.val_to_text = bch2_inode_generation_to_text, \
-}
+})
#if 0
typedef struct {
#define x(_name, _bits) + 8 + _bits / 8
u8 _pad[0 + BCH_INODE_FIELDS_v3()];
#undef x
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
void bch2_inode_pack(struct bkey_inode_buf *, const struct bch_inode_unpacked *);
int bch2_inode_unpack(struct bkey_s_c, struct bch_inode_unpacked *);
struct bch_io_opts opts = bch2_opts_to_inode_opts(c->opts);
bch2_io_opts_apply(&opts, bch2_inode_opts_get(inode));
+ if (opts.nocow)
+ opts.compression = opts.background_compression = opts.data_checksum = opts.erasure_code;
return opts;
}
#include "checksum.h"
#include "compress.h"
#include "clock.h"
+#include "data_update.h"
#include "debug.h"
#include "disk_groups.h"
#include "ec.h"
return ret;
}
+static int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
+ struct btree_iter *extent_iter,
+ u64 new_i_size,
+ s64 i_sectors_delta)
+{
+ struct btree_iter iter;
+ struct bkey_s_c inode_k;
+ struct bkey_s_c_inode_v3 inode;
+ struct bkey_i_inode_v3 *new_inode;
+ int ret;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
+ SPOS(0,
+ extent_iter->pos.inode,
+ extent_iter->snapshot),
+ BTREE_ITER_INTENT|BTREE_ITER_CACHED);
+ inode_k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(inode_k);
+ if (unlikely(ret))
+ goto err;
+
+ ret = bkey_is_inode(inode_k.k) ? 0 : -ENOENT;
+ if (unlikely(ret))
+ goto err;
+
+ if (unlikely(inode_k.k->type != KEY_TYPE_inode_v3)) {
+ inode_k = bch2_inode_to_v3(trans, inode_k);
+ ret = bkey_err(inode_k);
+ if (unlikely(ret))
+ goto err;
+ }
+
+ inode = bkey_s_c_to_inode_v3(inode_k);
+
+ new_inode = bch2_trans_kmalloc(trans, bkey_bytes(inode_k.k));
+ ret = PTR_ERR_OR_ZERO(new_inode);
+ if (unlikely(ret))
+ goto err;
+
+ bkey_reassemble(&new_inode->k_i, inode.s_c);
+
+ if (!(le64_to_cpu(inode.v->bi_flags) & BCH_INODE_I_SIZE_DIRTY) &&
+ new_i_size > le64_to_cpu(inode.v->bi_size))
+ new_inode->v.bi_size = cpu_to_le64(new_i_size);
+
+ le64_add_cpu(&new_inode->v.bi_sectors, i_sectors_delta);
+
+ new_inode->k.p.snapshot = iter.snapshot;
+
+ ret = bch2_trans_update(trans, &iter, &new_inode->k_i,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
int bch2_extent_update(struct btree_trans *trans,
subvol_inum inum,
struct btree_iter *iter,
struct bkey_i *k,
struct disk_reservation *disk_res,
- u64 *journal_seq,
u64 new_i_size,
s64 *i_sectors_delta_total,
bool check_enospc)
{
- struct btree_iter inode_iter = { NULL };
struct bpos next_pos;
bool usage_increasing;
s64 i_sectors_delta = 0, disk_sectors_delta = 0;
if (ret)
return ret;
- new_i_size = min(k->k.p.offset << 9, new_i_size);
next_pos = k->k.p;
ret = bch2_sum_sector_overwrites(trans, iter, k,
return ret;
}
- if (new_i_size || i_sectors_delta) {
- struct bkey_s_c k;
- struct bkey_s_c_inode_v3 inode;
- struct bkey_i_inode_v3 *new_inode;
- bool i_size_update;
+ /*
+ * Note:
+ * We always have to do an inode update - even when i_size/i_sectors
+ * aren't changing - for fsync to work properly; fsync relies on
+ * inode->bi_journal_seq which is updated by the trigger code:
+ */
+ ret = bch2_extent_update_i_size_sectors(trans, iter,
+ min(k->k.p.offset << 9, new_i_size),
+ i_sectors_delta) ?:
+ bch2_trans_update(trans, iter, k, 0) ?:
+ bch2_trans_commit(trans, disk_res, NULL,
+ BTREE_INSERT_NOCHECK_RW|
+ BTREE_INSERT_NOFAIL);
+ if (unlikely(ret))
+ return ret;
- bch2_trans_iter_init(trans, &inode_iter, BTREE_ID_inodes,
- SPOS(0, inum.inum, iter->snapshot),
- BTREE_ITER_INTENT|BTREE_ITER_CACHED);
- k = bch2_btree_iter_peek_slot(&inode_iter);
- ret = bkey_err(k);
- if (unlikely(ret))
- goto err;
+ if (i_sectors_delta_total)
+ *i_sectors_delta_total += i_sectors_delta;
+ bch2_btree_iter_set_pos(iter, next_pos);
+ return 0;
+}
+
+/* Overwrites whatever was present with zeroes: */
+int bch2_extent_fallocate(struct btree_trans *trans,
+ subvol_inum inum,
+ struct btree_iter *iter,
+ unsigned sectors,
+ struct bch_io_opts opts,
+ s64 *i_sectors_delta,
+ struct write_point_specifier write_point)
+{
+ struct bch_fs *c = trans->c;
+ struct disk_reservation disk_res = { 0 };
+ struct closure cl;
+ struct open_buckets open_buckets;
+ struct bkey_s_c k;
+ struct bkey_buf old, new;
+ bool have_reservation = false;
+ bool unwritten = opts.nocow &&
+ c->sb.version >= bcachefs_metadata_version_unwritten_extents;
+ int ret;
+
+ bch2_bkey_buf_init(&old);
+ bch2_bkey_buf_init(&new);
+ closure_init_stack(&cl);
+ open_buckets.nr = 0;
+retry:
+ k = bch2_btree_iter_peek_slot(iter);
+ ret = bkey_err(k);
+ if (ret)
+ return ret;
- ret = bkey_is_inode(k.k) ? 0 : -ENOENT;
+ sectors = min_t(u64, sectors, k.k->p.offset - iter->pos.offset);
+
+ if (!have_reservation) {
+ unsigned new_replicas =
+ max(0, (int) opts.data_replicas -
+ (int) bch2_bkey_nr_ptrs_fully_allocated(k));
+ /*
+ * Get a disk reservation before (in the nocow case) calling
+ * into the allocator:
+ */
+ ret = bch2_disk_reservation_get(c, &disk_res, sectors, new_replicas, 0);
if (unlikely(ret))
- goto err;
+ goto out;
- if (unlikely(k.k->type != KEY_TYPE_inode_v3)) {
- k = bch2_inode_to_v3(trans, k);
- ret = bkey_err(k);
- if (unlikely(ret))
- goto err;
+ bch2_bkey_buf_reassemble(&old, c, k);
+ }
+
+ if (have_reservation) {
+ if (!bch2_extents_match(k, bkey_i_to_s_c(old.k)))
+ goto out;
+
+ bch2_key_resize(&new.k->k, sectors);
+ } else if (!unwritten) {
+ struct bkey_i_reservation *reservation;
+
+ bch2_bkey_buf_realloc(&new, c, sizeof(*reservation) / sizeof(u64));
+ reservation = bkey_reservation_init(new.k);
+ reservation->k.p = iter->pos;
+ bch2_key_resize(&reservation->k, sectors);
+ reservation->v.nr_replicas = opts.data_replicas;
+ } else {
+ struct bkey_i_extent *e;
+ struct bch_devs_list devs_have;
+ struct write_point *wp;
+ struct bch_extent_ptr *ptr;
+
+ devs_have.nr = 0;
+
+ bch2_bkey_buf_realloc(&new, c, BKEY_EXTENT_U64s_MAX);
+
+ e = bkey_extent_init(new.k);
+ e->k.p = iter->pos;
+
+ ret = bch2_alloc_sectors_start_trans(trans,
+ opts.foreground_target,
+ false,
+ write_point,
+ &devs_have,
+ opts.data_replicas,
+ opts.data_replicas,
+ RESERVE_none, 0, &cl, &wp);
+ if (ret == -EAGAIN) {
+ bch2_trans_unlock(trans);
+ closure_sync(&cl);
+ goto retry;
}
+ if (ret)
+ return ret;
- inode = bkey_s_c_to_inode_v3(k);
- i_size_update = !(le64_to_cpu(inode.v->bi_flags) & BCH_INODE_I_SIZE_DIRTY) &&
- new_i_size > le64_to_cpu(inode.v->bi_size);
+ sectors = min(sectors, wp->sectors_free);
- if (!i_sectors_delta && !i_size_update)
- goto no_inode_update;
+ bch2_key_resize(&e->k, sectors);
- new_inode = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
- ret = PTR_ERR_OR_ZERO(new_inode);
- if (unlikely(ret))
- goto err;
+ bch2_open_bucket_get(c, wp, &open_buckets);
+ bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
+ bch2_alloc_sectors_done(c, wp);
- bkey_reassemble(&new_inode->k_i, k);
+ extent_for_each_ptr(extent_i_to_s(e), ptr)
+ ptr->unwritten = true;
+ }
- if (i_size_update)
- new_inode->v.bi_size = cpu_to_le64(new_i_size);
+ have_reservation = true;
- le64_add_cpu(&new_inode->v.bi_sectors, i_sectors_delta);
- ret = bch2_trans_update(trans, &inode_iter, &new_inode->k_i, 0);
- if (unlikely(ret))
- goto err;
+ ret = bch2_extent_update(trans, inum, iter, new.k, &disk_res,
+ 0, i_sectors_delta, true);
+out:
+ if ((atomic_read(&cl.remaining) & CLOSURE_REMAINING_MASK) != 1) {
+ bch2_trans_unlock(trans);
+ closure_sync(&cl);
}
-no_inode_update:
- ret = bch2_trans_update(trans, iter, k, 0) ?:
- bch2_trans_commit(trans, disk_res, journal_seq,
- BTREE_INSERT_NOCHECK_RW|
- BTREE_INSERT_NOFAIL);
- if (unlikely(ret))
- goto err;
- if (i_sectors_delta_total)
- *i_sectors_delta_total += i_sectors_delta;
- bch2_btree_iter_set_pos(iter, next_pos);
-err:
- bch2_trans_iter_exit(trans, &inode_iter);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
+ bch2_trans_begin(trans);
+ goto retry;
+ }
+
+ bch2_open_buckets_put(c, &open_buckets);
+ bch2_disk_reservation_put(c, &disk_res);
+ bch2_bkey_buf_exit(&new, c);
+ bch2_bkey_buf_exit(&old, c);
+
return ret;
}
bch2_cut_back(end_pos, &delete);
ret = bch2_extent_update(trans, inum, iter, &delete,
- &disk_res, NULL,
- 0, i_sectors_delta, false);
+ &disk_res, 0, i_sectors_delta, false);
bch2_disk_reservation_put(c, &disk_res);
}
return ret;
}
-int bch2_write_index_default(struct bch_write_op *op)
+static int bch2_write_index_default(struct bch_write_op *op)
{
struct bch_fs *c = op->c;
struct bkey_buf sk;
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
ret = bch2_extent_update(&trans, inum, &iter, sk.k,
- &op->res, op_journal_seq(op),
+ &op->res,
op->new_i_size, &op->i_sectors_delta,
op->flags & BCH_WRITE_CHECK_ENOSPC);
bch2_trans_iter_exit(&trans, &iter);
void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
enum bch_data_type type,
- const struct bkey_i *k)
+ const struct bkey_i *k,
+ bool nocow)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
const struct bch_extent_ptr *ptr;
n->c = c;
n->dev = ptr->dev;
- n->have_ioref = bch2_dev_get_ioref(ca,
+ n->have_ioref = nocow || bch2_dev_get_ioref(ca,
type == BCH_DATA_btree ? READ : WRITE);
+ n->nocow = nocow;
n->submit_time = local_clock();
+ n->inode_offset = bkey_start_offset(&k->k);
n->bio.bi_iter.bi_sector = ptr->offset;
if (likely(n->have_ioref)) {
}
}
-static void __bch2_write(struct closure *);
+static void __bch2_write(struct bch_write_op *);
static void bch2_write_done(struct closure *cl)
{
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
struct bch_fs *c = op->c;
- if (!op->error && (op->flags & BCH_WRITE_FLUSH))
- op->error = bch2_journal_error(&c->journal);
-
bch2_disk_reservation_put(c, &op->res);
percpu_ref_put(&c->writes);
bch2_keylist_free(&op->insert_keys, op->inline_keys);
bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
- if (op->end_io) {
- EBUG_ON(cl->parent);
- closure_debug_destroy(cl);
+ closure_debug_destroy(cl);
+ if (op->end_io)
op->end_io(op);
- } else {
- closure_return(cl);
- }
}
static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
struct keylist *keys = &op->insert_keys;
struct bkey_i *k;
unsigned dev;
- int ret;
+ int ret = 0;
if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
ret = bch2_write_drop_io_error_ptrs(op);
if (!bch2_keylist_empty(keys)) {
u64 sectors_start = keylist_sectors(keys);
- int ret = op->index_update_fn(op);
+
+ ret = !(op->flags & BCH_WRITE_MOVE)
+ ? bch2_write_index_default(op)
+ : bch2_data_update_index_update(op);
BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
BUG_ON(keylist_sectors(keys) && !ret);
op->written += sectors_start - keylist_sectors(keys);
if (ret) {
- bch_err_inum_ratelimited(c, op->pos.inode,
- "write error while doing btree update: %s", bch2_err_str(ret));
- op->error = ret;
+ struct bkey_i *k = bch2_keylist_front(&op->insert_keys);
+
+ bch_err_inum_offset_ratelimited(c,
+ k->k.p.inode, k->k.p.offset << 9,
+ "write error while doing btree update: %s",
+ bch2_err_str(ret));
+ goto err;
}
}
out:
err:
keys->top = keys->keys;
op->error = ret;
+ op->flags |= BCH_WRITE_DONE;
goto out;
}
static void bch2_write_index(struct closure *cl)
{
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
- struct bch_fs *c = op->c;
+ struct write_point *wp = op->wp;
+ struct workqueue_struct *wq = index_update_wq(op);
- __bch2_write_index(op);
+ barrier();
+ op->btree_update_ready = true;
+ queue_work(wq, &wp->index_update_work);
+}
- if (!(op->flags & BCH_WRITE_DONE)) {
- continue_at(cl, __bch2_write, index_update_wq(op));
- } else if (!op->error && (op->flags & BCH_WRITE_FLUSH)) {
- bch2_journal_flush_seq_async(&c->journal,
- *op_journal_seq(op),
- cl);
- continue_at(cl, bch2_write_done, index_update_wq(op));
- } else {
- continue_at_nobarrier(cl, bch2_write_done, NULL);
+void bch2_write_point_do_index_updates(struct work_struct *work)
+{
+ struct write_point *wp =
+ container_of(work, struct write_point, index_update_work);
+ struct bch_write_op *op;
+
+ while (1) {
+ spin_lock(&wp->writes_lock);
+ op = list_first_entry_or_null(&wp->writes, struct bch_write_op, wp_list);
+ if (op && !op->btree_update_ready)
+ op = NULL;
+ if (op)
+ list_del(&op->wp_list);
+ spin_unlock(&wp->writes_lock);
+
+ if (!op)
+ break;
+
+ __bch2_write_index(op);
+
+ if (!(op->flags & BCH_WRITE_DONE))
+ __bch2_write(op);
+ else
+ bch2_write_done(&op->cl);
}
}
if (bch2_dev_inum_io_err_on(bio->bi_status, ca,
op->pos.inode,
- op->pos.offset - bio_sectors(bio), /* XXX definitely wrong */
+ wbio->inode_offset << 9,
"data write error: %s",
bch2_blk_status_to_str(bio->bi_status))) {
set_bit(wbio->dev, op->failed.d);
op->flags |= BCH_WRITE_IO_ERROR;
}
+ if (wbio->nocow)
+ set_bit(wbio->dev, op->devs_need_flush->d);
+
if (wbio->have_ioref) {
bch2_latency_acct(ca, wbio->submit_time, WRITE);
percpu_ref_put(&ca->io_ref);
if (wbio->put_bio)
bio_put(bio);
- if (parent)
+ if (parent) {
bio_endio(&parent->bio);
- else if (!(op->flags & BCH_WRITE_SKIP_CLOSURE_PUT))
- closure_put(cl);
- else
- continue_at_nobarrier(cl, bch2_write_index, index_update_wq(op));
+ return;
+ }
+
+ closure_put(cl);
}
static void init_append_extent(struct bch_write_op *op,
return ret;
}
-static void __bch2_write(struct closure *cl)
+static bool bch2_extent_is_writeable(struct bch_write_op *op,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = op->c;
+ struct bkey_s_c_extent e;
+ struct extent_ptr_decoded p;
+ const union bch_extent_entry *entry;
+ unsigned replicas = 0;
+
+ if (k.k->type != KEY_TYPE_extent)
+ return false;
+
+ e = bkey_s_c_to_extent(k);
+ extent_for_each_ptr_decode(e, p, entry) {
+ if (p.crc.csum_type ||
+ crc_is_compressed(p.crc) ||
+ p.has_ec)
+ return false;
+
+ replicas += bch2_extent_ptr_durability(c, &p);
+ }
+
+ return replicas >= op->opts.data_replicas;
+}
+
+static inline void bch2_nocow_write_unlock(struct bch_write_op *op)
+{
+ struct bch_fs *c = op->c;
+ const struct bch_extent_ptr *ptr;
+ struct bkey_i *k;
+
+ for_each_keylist_key(&op->insert_keys, k) {
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
+
+ bkey_for_each_ptr(ptrs, ptr)
+ bch2_bucket_nocow_unlock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, ptr),
+ BUCKET_NOCOW_LOCK_UPDATE);
+ }
+}
+
+static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_i *orig,
+ struct bkey_s_c k,
+ u64 new_i_size)
+{
+ struct bkey_i *new;
+ struct bkey_ptrs ptrs;
+ struct bch_extent_ptr *ptr;
+ int ret;
+
+ if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) {
+ /* trace this */
+ return 0;
+ }
+
+ new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ ret = PTR_ERR_OR_ZERO(new);
+ if (ret)
+ return ret;
+
+ bkey_reassemble(new, k);
+
+ bch2_cut_front(bkey_start_pos(&orig->k), new);
+ bch2_cut_back(orig->k.p, new);
+
+ ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
+ bkey_for_each_ptr(ptrs, ptr)
+ ptr->unwritten = 0;
+
+ /*
+ * Note that we're not calling bch2_subvol_get_snapshot() in this path -
+ * that was done when we kicked off the write, and here it's important
+ * that we update the extent that we wrote to - even if a snapshot has
+ * since been created. The write is still outstanding, so we're ok
+ * w.r.t. snapshot atomicity:
+ */
+ return bch2_extent_update_i_size_sectors(trans, iter,
+ min(new->k.p.offset << 9, new_i_size), 0) ?:
+ bch2_trans_update(trans, iter, new,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+}
+
+static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
+{
+ struct bch_fs *c = op->c;
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_i *orig;
+ struct bkey_s_c k;
+ int ret;
+
+ bch2_trans_init(&trans, c, 0, 0);
+
+ for_each_keylist_key(&op->insert_keys, orig) {
+ ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_extents,
+ bkey_start_pos(&orig->k),
+ BTREE_ITER_INTENT, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL, ({
+ if (bkey_cmp(bkey_start_pos(k.k), orig->k.p) >= 0)
+ break;
+
+ bch2_nocow_write_convert_one_unwritten(&trans, &iter, orig, k, op->new_i_size);
+ }));
+
+ if (ret) {
+ struct bkey_i *k = bch2_keylist_front(&op->insert_keys);
+
+ bch_err_inum_offset_ratelimited(c,
+ k->k.p.inode, k->k.p.offset << 9,
+ "write error while doing btree update: %s",
+ bch2_err_str(ret));
+ op->error = ret;
+ break;
+ }
+ }
+
+ bch2_trans_exit(&trans);
+}
+
+static void __bch2_nocow_write_done(struct bch_write_op *op)
+{
+ bch2_nocow_write_unlock(op);
+
+ if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
+ op->error = -EIO;
+ } else if (unlikely(op->flags & BCH_WRITE_CONVERT_UNWRITTEN))
+ bch2_nocow_write_convert_unwritten(op);
+}
+
+static void bch2_nocow_write_done(struct closure *cl)
{
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
+
+ __bch2_nocow_write_done(op);
+ bch2_write_done(cl);
+}
+
+static void bch2_nocow_write(struct bch_write_op *op)
+{
+ struct bch_fs *c = op->c;
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bkey_ptrs_c ptrs;
+ const struct bch_extent_ptr *ptr, *ptr2;
+ u32 snapshot;
+ int ret;
+
+ if (op->flags & BCH_WRITE_MOVE)
+ return;
+
+ bch2_trans_init(&trans, c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
+
+ ret = bch2_subvolume_get_snapshot(&trans, op->subvol, &snapshot);
+ if (unlikely(ret))
+ goto err;
+
+ bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
+ SPOS(op->pos.inode, op->pos.offset, snapshot),
+ BTREE_ITER_SLOTS);
+ while (1) {
+ struct bio *bio = &op->wbio.bio;
+
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ break;
+
+ /* fall back to normal cow write path? */
+ if (unlikely(k.k->p.snapshot != snapshot ||
+ !bch2_extent_is_writeable(op, k)))
+ break;
+
+ if (bch2_keylist_realloc(&op->insert_keys,
+ op->inline_keys,
+ ARRAY_SIZE(op->inline_keys),
+ k.k->u64s))
+ break;
+
+ /* Get iorefs before dropping btree locks: */
+ ptrs = bch2_bkey_ptrs_c(k);
+ bkey_for_each_ptr(ptrs, ptr)
+ if (unlikely(!bch2_dev_get_ioref(bch_dev_bkey_exists(c, ptr->dev), WRITE)))
+ goto err_get_ioref;
+
+ /* Unlock before taking nocow locks, doing IO: */
+ bkey_reassemble(op->insert_keys.top, k);
+ bch2_trans_unlock(&trans);
+
+ bch2_cut_front(op->pos, op->insert_keys.top);
+ bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
+
+ ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(op->insert_keys.top));
+ bkey_for_each_ptr(ptrs, ptr) {
+ bch2_bucket_nocow_lock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, ptr),
+ BUCKET_NOCOW_LOCK_UPDATE);
+ if (unlikely(ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
+ goto err_bucket_stale;
+
+ if (ptr->unwritten)
+ op->flags |= BCH_WRITE_CONVERT_UNWRITTEN;
+ }
+
+ bio = &op->wbio.bio;
+ if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) {
+ bio = bio_split(bio, k.k->p.offset - op->pos.offset,
+ GFP_KERNEL, &c->bio_write);
+ wbio_init(bio)->put_bio = true;
+ bio->bi_opf = op->wbio.bio.bi_opf;
+ } else {
+ op->flags |= BCH_WRITE_DONE;
+ }
+
+ op->pos.offset += bio_sectors(bio);
+ op->written += bio_sectors(bio);
+
+ bio->bi_end_io = bch2_write_endio;
+ bio->bi_private = &op->cl;
+ bio->bi_opf |= REQ_OP_WRITE;
+ closure_get(&op->cl);
+ bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
+ op->insert_keys.top, true);
+
+ bch2_keylist_push(&op->insert_keys);
+ if (op->flags & BCH_WRITE_DONE)
+ break;
+ bch2_btree_iter_advance(&iter);
+ }
+out:
+ bch2_trans_iter_exit(&trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ if (ret) {
+ bch_err_inum_offset_ratelimited(c,
+ op->pos.inode,
+ op->pos.offset << 9,
+ "%s: btree lookup error %s",
+ __func__, bch2_err_str(ret));
+ op->error = ret;
+ op->flags |= BCH_WRITE_DONE;
+ }
+
+ bch2_trans_exit(&trans);
+
+ /* fallback to cow write path? */
+ if (!(op->flags & BCH_WRITE_DONE)) {
+ closure_sync(&op->cl);
+ __bch2_nocow_write_done(op);
+ op->insert_keys.top = op->insert_keys.keys;
+ } else if (op->flags & BCH_WRITE_SYNC) {
+ closure_sync(&op->cl);
+ bch2_nocow_write_done(&op->cl);
+ } else {
+ /*
+ * XXX
+ * needs to run out of process context because ei_quota_lock is
+ * a mutex
+ */
+ continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op));
+ }
+ return;
+err_get_ioref:
+ bkey_for_each_ptr(ptrs, ptr2) {
+ if (ptr2 == ptr)
+ break;
+
+ percpu_ref_put(&bch_dev_bkey_exists(c, ptr2->dev)->io_ref);
+ }
+
+ /* Fall back to COW path: */
+ goto out;
+err_bucket_stale:
+ bkey_for_each_ptr(ptrs, ptr2) {
+ bch2_bucket_nocow_unlock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, ptr2),
+ BUCKET_NOCOW_LOCK_UPDATE);
+ if (ptr2 == ptr)
+ break;
+ }
+
+ bkey_for_each_ptr(ptrs, ptr2)
+ percpu_ref_put(&bch_dev_bkey_exists(c, ptr2->dev)->io_ref);
+
+ /* We can retry this: */
+ ret = BCH_ERR_transaction_restart;
+ goto out;
+}
+
+static void __bch2_write(struct bch_write_op *op)
+{
struct bch_fs *c = op->c;
- struct write_point *wp;
+ struct write_point *wp = NULL;
struct bio *bio = NULL;
- bool skip_put = true;
unsigned nofs_flags;
int ret;
nofs_flags = memalloc_nofs_save();
+
+ if (unlikely(op->opts.nocow)) {
+ bch2_nocow_write(op);
+ if (op->flags & BCH_WRITE_DONE)
+ goto out_nofs_restore;
+ }
again:
memset(&op->failed, 0, sizeof(op->failed));
+ op->btree_update_ready = false;
do {
struct bkey_i *key_to_write;
/* +1 for possible cache device: */
if (op->open_buckets.nr + op->nr_replicas + 1 >
ARRAY_SIZE(op->open_buckets.v))
- goto flush_io;
+ break;
if (bch2_keylist_realloc(&op->insert_keys,
op->inline_keys,
ARRAY_SIZE(op->inline_keys),
BKEY_EXTENT_U64s_MAX))
- goto flush_io;
+ break;
/*
* The copygc thread is now global, which means it's no longer
* freeing up space on specific disks, which means that
* allocations for specific disks may hang arbitrarily long:
*/
- wp = bch2_alloc_sectors_start(c,
- op->target,
- op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
- op->write_point,
- &op->devs_have,
- op->nr_replicas,
- op->nr_replicas_required,
- op->alloc_reserve,
- op->flags,
- (op->flags & (BCH_WRITE_ALLOC_NOWAIT|
- BCH_WRITE_ONLY_SPECIFIED_DEVS)) ? NULL : cl);
- EBUG_ON(!wp);
-
- if (IS_ERR(wp)) {
- if (unlikely(wp != ERR_PTR(-EAGAIN))) {
- ret = PTR_ERR(wp);
- goto err;
+ ret = bch2_trans_do(c, NULL, NULL, 0,
+ bch2_alloc_sectors_start_trans(&trans,
+ op->target,
+ op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
+ op->write_point,
+ &op->devs_have,
+ op->nr_replicas,
+ op->nr_replicas_required,
+ op->alloc_reserve,
+ op->flags,
+ (op->flags & (BCH_WRITE_ALLOC_NOWAIT|
+ BCH_WRITE_ONLY_SPECIFIED_DEVS))
+ ? NULL : &op->cl, &wp));
+ if (unlikely(ret)) {
+ if (unlikely(ret != -EAGAIN)) {
+ op->error = ret;
+ op->flags |= BCH_WRITE_DONE;
}
- goto flush_io;
+ break;
}
- /*
- * It's possible for the allocator to fail, put us on the
- * freelist waitlist, and then succeed in one of various retry
- * paths: if that happens, we need to disable the skip_put
- * optimization because otherwise there won't necessarily be a
- * barrier before we free the bch_write_op:
- */
- if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
- skip_put = false;
-
bch2_open_bucket_get(c, wp, &op->open_buckets);
ret = bch2_write_extent(op, wp, &bio);
- bch2_alloc_sectors_done(c, wp);
- if (ret < 0)
- goto err;
+ bch2_alloc_sectors_done(c, wp);
- if (ret) {
- skip_put = false;
- } else {
- /*
- * for the skip_put optimization this has to be set
- * before we submit the bio:
- */
+ if (ret < 0) {
+ op->error = ret;
op->flags |= BCH_WRITE_DONE;
+ break;
}
+ if (!ret)
+ op->flags |= BCH_WRITE_DONE;
+
bio->bi_end_io = bch2_write_endio;
bio->bi_private = &op->cl;
bio->bi_opf |= REQ_OP_WRITE;
- if (!skip_put)
- closure_get(bio->bi_private);
- else
- op->flags |= BCH_WRITE_SKIP_CLOSURE_PUT;
+ closure_get(bio->bi_private);
key_to_write = (void *) (op->insert_keys.keys_p +
key_to_write_offset);
bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
- key_to_write);
+ key_to_write, false);
} while (ret);
- if (!skip_put)
- continue_at(cl, bch2_write_index, index_update_wq(op));
-out:
- memalloc_nofs_restore(nofs_flags);
- return;
-err:
- op->error = ret;
- op->flags |= BCH_WRITE_DONE;
-
- continue_at(cl, bch2_write_index, index_update_wq(op));
- goto out;
-flush_io:
/*
- * If the write can't all be submitted at once, we generally want to
- * block synchronously as that signals backpressure to the caller.
+ * Sync or no?
*
- * However, if we're running out of a workqueue, we can't block here
- * because we'll be blocking other work items from completing:
+ * If we're running asynchronously, wne may still want to block
+ * synchronously here if we weren't able to submit all of the IO at
+ * once, as that signals backpressure to the caller.
*/
- if (current->flags & PF_WQ_WORKER) {
- continue_at(cl, bch2_write_index, index_update_wq(op));
- goto out;
- }
-
- closure_sync(cl);
-
- if (!bch2_keylist_empty(&op->insert_keys)) {
+ if ((op->flags & BCH_WRITE_SYNC) || !(op->flags & BCH_WRITE_DONE)) {
+ closure_sync(&op->cl);
__bch2_write_index(op);
- if (op->error) {
- op->flags |= BCH_WRITE_DONE;
- continue_at_nobarrier(cl, bch2_write_done, NULL);
- goto out;
- }
- }
+ if (!(op->flags & BCH_WRITE_DONE))
+ goto again;
+ bch2_write_done(&op->cl);
+ } else {
+ spin_lock(&wp->writes_lock);
+ op->wp = wp;
+ list_add_tail(&op->wp_list, &wp->writes);
+ spin_unlock(&wp->writes_lock);
- goto again;
+ continue_at(&op->cl, bch2_write_index, NULL);
+ }
+out_nofs_restore:
+ memalloc_nofs_restore(nofs_flags);
}
static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
{
- struct closure *cl = &op->cl;
struct bio *bio = &op->wbio.bio;
struct bvec_iter iter;
struct bkey_i_inline_data *id;
op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
op->flags |= BCH_WRITE_DONE;
- continue_at_nobarrier(cl, bch2_write_index, NULL);
- return;
+ __bch2_write_index(op);
err:
bch2_write_done(&op->cl);
}
struct bch_fs *c = op->c;
unsigned data_len;
+ EBUG_ON(op->cl.parent);
BUG_ON(!op->nr_replicas);
BUG_ON(!op->write_point.v);
BUG_ON(!bkey_cmp(op->pos, POS_MAX));
wbio_init(bio)->put_bio = false;
if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) {
- bch_err_inum_ratelimited(c, op->pos.inode,
- "misaligned write");
+ bch_err_inum_offset_ratelimited(c,
+ op->pos.inode,
+ op->pos.offset << 9,
+ "misaligned write");
op->error = -EIO;
goto err;
}
return;
}
- continue_at_nobarrier(cl, __bch2_write, NULL);
+ __bch2_write(op);
return;
err:
bch2_disk_reservation_put(c, &op->res);
- if (op->end_io) {
- EBUG_ON(cl->parent);
- closure_debug_destroy(cl);
+ closure_debug_destroy(&op->cl);
+ if (op->end_io)
op->end_io(op);
- } else {
- closure_return(cl);
- }
}
/* Cache promotion on read */
struct promote_op {
- struct closure cl;
struct rcu_head rcu;
u64 start_time;
if (bch2_bkey_has_target(c, k, opts.promote_target))
return false;
+ if (bkey_extent_is_unwritten(k))
+ return false;
+
if (bch2_target_congested(c, opts.promote_target)) {
/* XXX trace this */
return false;
kfree_rcu(op, rcu);
}
-static void promote_done(struct closure *cl)
+static void promote_done(struct bch_write_op *wop)
{
struct promote_op *op =
- container_of(cl, struct promote_op, cl);
+ container_of(wop, struct promote_op, write.op);
struct bch_fs *c = op->write.op.c;
bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
{
- struct closure *cl = &op->cl;
struct bio *bio = &op->write.op.wbio.bio;
trace_and_count(op->write.op.c, read_promote, &rbio->bio);
sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
- closure_init(cl, NULL);
- bch2_data_update_read_done(&op->write, rbio->pick.crc, cl);
- closure_return_with_destructor(cl, promote_done);
+ bch2_data_update_read_done(&op->write, rbio->pick.crc);
}
static struct promote_op *__promote_alloc(struct bch_fs *c,
},
btree_id, k);
BUG_ON(ret);
+ op->write.op.end_io = promote_done;
return op;
err:
goto out;
}
- bch2_dev_inum_io_error(ca, rbio->read_pos.inode, (u64) rbio->bvec_iter.bi_sector,
+ bch_err_inum_offset_ratelimited(ca,
+ rbio->read_pos.inode,
+ rbio->read_pos.offset << 9,
"data checksum error: expected %0llx:%0llx got %0llx:%0llx (type %s)",
rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
csum.hi, csum.lo, bch2_csum_types[crc.csum_type]);
+ bch2_io_error(ca);
bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
goto out;
decompression_err:
- bch_err_inum_ratelimited(c, rbio->read_pos.inode,
- "decompression error");
+ bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
+ rbio->read_pos.offset << 9,
+ "decompression error");
bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
goto out;
decrypt_err:
- bch_err_inum_ratelimited(c, rbio->read_pos.inode,
- "decrypt error");
+ bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
+ rbio->read_pos.offset << 9,
+ "decrypt error");
bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
goto out;
}
if (k.k->type != KEY_TYPE_reflink_v &&
k.k->type != KEY_TYPE_indirect_inline_data) {
- bch_err_inum_ratelimited(trans->c, orig_k->k->k.p.inode,
+ bch_err_inum_offset_ratelimited(trans->c,
+ orig_k->k->k.p.inode,
+ orig_k->k->k.p.offset << 9,
"%llu len %u points to nonexistent indirect extent %llu",
orig_k->k->k.p.offset,
orig_k->k->k.size,
goto hole;
if (pick_ret < 0) {
- bch_err_inum_ratelimited(c, k.k->p.inode,
- "no device to read from");
+ bch_err_inum_offset_ratelimited(c,
+ read_pos.inode, read_pos.offset << 9,
+ "no device to read from");
goto err;
}
if (!rbio->pick.idx) {
if (!rbio->have_ioref) {
- bch_err_inum_ratelimited(c, k.k->p.inode,
- "no device to read from");
+ bch_err_inum_offset_ratelimited(c,
+ read_pos.inode,
+ read_pos.offset << 9,
+ "no device to read from");
bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
goto out;
}
bch2_bkey_buf_exit(&sk, c);
if (ret) {
- bch_err_inum_ratelimited(c, inum.inum,
- "read error %i from btree lookup", ret);
+ bch_err_inum_offset_ratelimited(c, inum.inum,
+ bvec_iter.bi_sector << 9,
+ "read error %i from btree lookup", ret);
rbio->bio.bi_status = BLK_STS_IOERR;
bch2_rbio_done(rbio);
}
int bch2_fs_io_init(struct bch_fs *c)
{
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(c->nocow_locks.l); i++)
+ two_state_lock_init(&c->nocow_locks.l[i]);
+
if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
BIOSET_NEED_BVECS) ||
bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
void bch2_latency_acct(struct bch_dev *, u64, int);
void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
- enum bch_data_type, const struct bkey_i *);
+ enum bch_data_type, const struct bkey_i *, bool);
#define BLK_STS_REMOVED ((__force blk_status_t)128)
enum bch_write_flags {
BCH_WRITE_ALLOC_NOWAIT = (1 << 0),
BCH_WRITE_CACHED = (1 << 1),
- BCH_WRITE_FLUSH = (1 << 2),
- BCH_WRITE_DATA_ENCODED = (1 << 3),
- BCH_WRITE_PAGES_STABLE = (1 << 4),
- BCH_WRITE_PAGES_OWNED = (1 << 5),
- BCH_WRITE_ONLY_SPECIFIED_DEVS = (1 << 6),
- BCH_WRITE_WROTE_DATA_INLINE = (1 << 7),
- BCH_WRITE_FROM_INTERNAL = (1 << 8),
- BCH_WRITE_CHECK_ENOSPC = (1 << 9),
+ BCH_WRITE_DATA_ENCODED = (1 << 2),
+ BCH_WRITE_PAGES_STABLE = (1 << 3),
+ BCH_WRITE_PAGES_OWNED = (1 << 4),
+ BCH_WRITE_ONLY_SPECIFIED_DEVS = (1 << 5),
+ BCH_WRITE_WROTE_DATA_INLINE = (1 << 6),
+ BCH_WRITE_CHECK_ENOSPC = (1 << 7),
+ BCH_WRITE_SYNC = (1 << 8),
+ BCH_WRITE_MOVE = (1 << 9),
/* Internal: */
- BCH_WRITE_JOURNAL_SEQ_PTR = (1 << 10),
- BCH_WRITE_SKIP_CLOSURE_PUT = (1 << 11),
- BCH_WRITE_DONE = (1 << 12),
- BCH_WRITE_IO_ERROR = (1 << 13),
+ BCH_WRITE_DONE = (1 << 10),
+ BCH_WRITE_IO_ERROR = (1 << 11),
+ BCH_WRITE_CONVERT_UNWRITTEN = (1 << 12),
};
-static inline u64 *op_journal_seq(struct bch_write_op *op)
-{
- return (op->flags & BCH_WRITE_JOURNAL_SEQ_PTR)
- ? op->journal_seq_p : &op->journal_seq;
-}
-
static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
{
return op->alloc_reserve == RESERVE_movinggc
struct bkey_i *, bool *, s64 *, s64 *);
int bch2_extent_update(struct btree_trans *, subvol_inum,
struct btree_iter *, struct bkey_i *,
- struct disk_reservation *, u64 *, u64, s64 *, bool);
+ struct disk_reservation *, u64, s64 *, bool);
+int bch2_extent_fallocate(struct btree_trans *, subvol_inum, struct btree_iter *,
+ unsigned, struct bch_io_opts, s64 *,
+ struct write_point_specifier);
int bch2_fpunch_at(struct btree_trans *, struct btree_iter *,
subvol_inum, u64, s64 *);
int bch2_fpunch(struct bch_fs *c, subvol_inum, u64, u64, s64 *);
-int bch2_write_index_default(struct bch_write_op *);
-
static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
struct bch_io_opts opts)
{
op->flags = 0;
op->written = 0;
op->error = 0;
- op->csum_type = bch2_data_checksum_type(c, opts.data_checksum);
+ op->csum_type = bch2_data_checksum_type(c, opts);
op->compression_type = bch2_compression_opt_to_type[opts.compression];
op->nr_replicas = 0;
op->nr_replicas_required = c->opts.data_replicas_required;
op->version = ZERO_VERSION;
op->write_point = (struct write_point_specifier) { 0 };
op->res = (struct disk_reservation) { 0 };
- op->journal_seq = 0;
op->new_i_size = U64_MAX;
op->i_sectors_delta = 0;
- op->index_update_fn = bch2_write_index_default;
+ op->devs_need_flush = NULL;
}
void bch2_write(struct closure *);
+void bch2_write_point_do_index_updates(struct work_struct *);
+
static inline struct bch_write_bio *wbio_init(struct bio *bio)
{
struct bch_write_bio *wbio = to_wbio(bio);
struct bch_write_bio *parent;
u64 submit_time;
+ u64 inode_offset;
struct bch_devs_list failed;
u8 dev;
bounce:1,
put_bio:1,
have_ioref:1,
+ nocow:1,
used_mempool:1,
first_btree_write:1;
unsigned nr_replicas_required:4;
unsigned alloc_reserve:3;
unsigned incompressible:1;
+ unsigned btree_update_ready:1;
struct bch_devs_list devs_have;
u16 target;
struct write_point_specifier write_point;
+ struct write_point *wp;
+ struct list_head wp_list;
+
struct disk_reservation res;
struct open_buckets open_buckets;
- /*
- * If caller wants to flush but hasn't passed us a journal_seq ptr, we
- * still need to stash the journal_seq somewhere:
- */
- union {
- u64 *journal_seq_p;
- u64 journal_seq;
- };
u64 new_i_size;
s64 i_sectors_delta;
- int (*index_update_fn)(struct bch_write_op *);
-
struct bch_devs_mask failed;
struct keylist insert_keys;
u64 inline_keys[BKEY_EXTENT_U64s_MAX * 2];
+ /*
+ * Bitmask of devices that have had nocow writes issued to them since
+ * last flush:
+ */
+ struct bch_devs_mask *devs_need_flush;
+
/* Must be last: */
struct bch_write_bio wbio;
};
*
* Synchronous updates are specified by passing a closure (@flush_cl) to
* bch2_btree_insert() or bch_btree_insert_node(), which then pass that parameter
- * down to the journalling code. That closure will will wait on the journal
- * write to complete (via closure_wait()).
+ * down to the journalling code. That closure will wait on the journal write to
+ * complete (via closure_wait()).
*
* If the index update wasn't synchronous, the journal entry will be
* written out after 10 ms have elapsed, by default (the delay_ms field
struct bch_dev *ca = bch_dev_bkey_exists(c, i->ptrs[ptr].dev);
if (!i->ptrs[ptr].csum_good)
- printk(KERN_ERR "bcachefs (%s) sector %llu: invalid journal checksum, seq %llu%s\n",
- ca->name, i->ptrs[ptr].sector,
- le64_to_cpu(i->j.seq),
- i->csum_good ? " (had good copy on another device)" : "");
+ bch_err_dev_offset(ca, i->ptrs[ptr].sector,
+ "invalid journal checksum, seq %llu%s",
+ le64_to_cpu(i->j.seq),
+ i->csum_good ? " (had good copy on another device)" : "");
}
ret = jset_validate(c,
{
if (l->keys_p != inline_keys)
kfree(l->keys_p);
- bch2_keylist_init(l, inline_keys);
}
static inline void bch2_keylist_push(struct keylist *l)
int bch2_lru_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
void bch2_lru_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-#define bch2_bkey_ops_lru (struct bkey_ops) { \
+#define bch2_bkey_ops_lru ((struct bkey_ops) { \
.key_invalid = bch2_lru_invalid, \
.val_to_text = bch2_lru_to_text, \
-}
+})
int bch2_lru_delete(struct btree_trans *, u64, u64, u64, struct bkey_s_c);
int bch2_lru_set(struct btree_trans *, u64, u64, u64 *);
struct bio_vec bi_inline_vecs[0];
};
-static void move_free(struct closure *cl)
+static void move_free(struct moving_io *io)
{
- struct moving_io *io = container_of(cl, struct moving_io, cl);
struct moving_context *ctxt = io->write.ctxt;
struct bch_fs *c = ctxt->c;
kfree(io);
}
-static void move_write_done(struct closure *cl)
+static void move_write_done(struct bch_write_op *op)
{
- struct moving_io *io = container_of(cl, struct moving_io, cl);
+ struct moving_io *io = container_of(op, struct moving_io, write.op);
struct moving_context *ctxt = io->write.ctxt;
if (io->write.op.error)
ctxt->write_error = true;
atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors);
- closure_return_with_destructor(cl, move_free);
+ move_free(io);
+ closure_put(&ctxt->cl);
}
-static void move_write(struct closure *cl)
+static void move_write(struct moving_io *io)
{
- struct moving_io *io = container_of(cl, struct moving_io, cl);
-
if (unlikely(io->rbio.bio.bi_status || io->rbio.hole)) {
- closure_return_with_destructor(cl, move_free);
+ move_free(io);
return;
}
+ closure_get(&io->write.ctxt->cl);
atomic_add(io->write_sectors, &io->write.ctxt->write_sectors);
- bch2_data_update_read_done(&io->write, io->rbio.pick.crc, cl);
- continue_at(cl, move_write_done, NULL);
+ bch2_data_update_read_done(&io->write, io->rbio.pick.crc);
}
static inline struct moving_io *next_pending_write(struct moving_context *ctxt)
while ((io = next_pending_write(ctxt))) {
list_del(&io->list);
- closure_call(&io->cl, move_write, NULL, &ctxt->cl);
+ move_write(io);
}
}
}
}
-void bch_move_stats_init(struct bch_move_stats *stats, char *name)
+void bch2_move_stats_init(struct bch_move_stats *stats, char *name)
{
memset(stats, 0, sizeof(*stats));
scnprintf(stats->name, sizeof(stats->name), "%s", name);
if (!percpu_ref_tryget_live(&c->writes))
return -EROFS;
+ /*
+ * Before memory allocations & taking nocow locks in
+ * bch2_data_update_init():
+ */
+ bch2_trans_unlock(trans);
+
/* write path might have to decompress data: */
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
sectors = max_t(unsigned, sectors, p.crc.uncompressed_size);
ret = bch2_data_update_init(c, &io->write, ctxt->wp, io_opts,
data_opts, btree_id, k);
- if (ret)
+ if (ret && ret != -BCH_ERR_unwritten_extent_update)
goto err_free_pages;
io->write.ctxt = ctxt;
+ io->write.op.end_io = move_write_done;
atomic64_inc(&ctxt->stats->keys_moved);
atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
+
+ if (ret == -BCH_ERR_unwritten_extent_update) {
+ bch2_update_unwritten_extent(trans, &io->write);
+ move_free(io);
+ return 0;
+ }
+
+ BUG_ON(ret);
+
this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size);
this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size);
trace_move_extent_read(k.k);
*/
bch2_bkey_buf_reassemble(&sk, c, k);
k = bkey_i_to_s_c(sk.k);
+ bch2_trans_unlock(&trans);
ret2 = bch2_move_extent(&trans, &iter, ctxt, io_opts,
btree_id, k, data_opts);
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
+ struct printbuf buf = PRINTBUF;
+ struct bch_backpointer bp;
+ u64 bp_offset = 0;
int ret;
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
if (a.v->gen == gen &&
a.v->dirty_sectors) {
- struct printbuf buf = PRINTBUF;
-
if (a.v->data_type == BCH_DATA_btree) {
bch2_trans_unlock(trans);
if (bch2_btree_interior_updates_flush(c))
goto again;
+ goto failed_to_evacuate;
}
-
- prt_str(&buf, "failed to evacuate bucket ");
- bch2_bkey_val_to_text(&buf, c, k);
-
- bch_err(c, "%s", buf.buf);
- printbuf_exit(&buf);
}
}
bch2_trans_iter_exit(trans, &iter);
return ret;
+failed_to_evacuate:
+ bch2_trans_iter_exit(trans, &iter);
+
+ prt_printf(&buf, bch2_log_msg(c, "failed to evacuate bucket "));
+ bch2_bkey_val_to_text(&buf, c, k);
+
+ while (1) {
+ bch2_trans_begin(trans);
+
+ ret = bch2_get_next_backpointer(trans, bucket, gen,
+ &bp_offset, &bp,
+ BTREE_ITER_CACHED);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
+ break;
+ if (bp_offset == U64_MAX)
+ break;
+
+ k = bch2_backpointer_get_key(trans, &iter,
+ bucket, bp_offset, bp);
+ ret = bkey_err(k);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
+ break;
+ if (!k.k)
+ continue;
+ prt_newline(&buf);
+ bch2_bkey_val_to_text(&buf, c, k);
+ bch2_trans_iter_exit(trans, &iter);
+ }
+
+ bch2_print_string_as_lines(KERN_ERR, buf.buf);
+ printbuf_exit(&buf);
+ return 0;
}
int __bch2_evacuate_bucket(struct moving_context *ctxt,
switch (op.op) {
case BCH_DATA_OP_REREPLICATE:
- bch_move_stats_init(stats, "rereplicate");
+ bch2_move_stats_init(stats, "rereplicate");
stats->data_type = BCH_DATA_journal;
ret = bch2_journal_flush_device_pins(&c->journal, -1);
if (op.migrate.dev >= c->sb.nr_devices)
return -EINVAL;
- bch_move_stats_init(stats, "migrate");
+ bch2_move_stats_init(stats, "migrate");
stats->data_type = BCH_DATA_journal;
ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev);
ret = bch2_replicas_gc2(c) ?: ret;
break;
case BCH_DATA_OP_REWRITE_OLD_NODES:
- bch_move_stats_init(stats, "rewrite_old_nodes");
+ bch2_move_stats_init(stats, "rewrite_old_nodes");
ret = bch2_scan_old_btree_nodes(c, stats);
break;
default:
struct bch_move_stats *,
struct bch_ioctl_data);
-inline void bch_move_stats_init(struct bch_move_stats *stats,
- char *name);
+void bch2_move_stats_init(struct bch_move_stats *stats, char *name);
#endif /* _BCACHEFS_MOVE_H */
};
int ret = 0;
- bch_move_stats_init(&move_stats, "copygc");
+ bch2_move_stats_init(&move_stats, "copygc");
for_each_rw_member(ca, c, dev_idx)
heap_size += ca->mi.nbuckets >> 7;
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "nocow_locking.h"
+#include "util.h"
+
+void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t,
+ struct bpos bucket, int flags)
+{
+ struct bch_fs *c = container_of(t, struct bch_fs, nocow_locks);
+ two_state_lock_t *l = bucket_nocow_lock(t, bucket);
+ u64 start_time = local_clock();
+
+ bch2_two_state_lock(l, flags & BUCKET_NOCOW_LOCK_UPDATE);
+ bch2_time_stats_update(&c->times[BCH_TIME_nocow_lock_contended], start_time);
+}
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_NOCOW_LOCKING_H
+#define _BCACHEFS_NOCOW_LOCKING_H
+
+#include "bcachefs_format.h"
+#include "two_state_shared_lock.h"
+
+#include <linux/siphash.h>
+
+#define BUCKET_NOCOW_LOCKS (1U << 10)
+
+struct bucket_nocow_lock_table {
+ siphash_key_t key;
+ two_state_lock_t l[BUCKET_NOCOW_LOCKS];
+};
+
+#define BUCKET_NOCOW_LOCK_UPDATE (1 << 0)
+
+static inline two_state_lock_t *bucket_nocow_lock(struct bucket_nocow_lock_table *t,
+ struct bpos bucket)
+{
+ u64 dev_bucket = bucket.inode << 56 | bucket.offset;
+ unsigned h = siphash_1u64(dev_bucket, &t->key);
+
+ return t->l + (h & (BUCKET_NOCOW_LOCKS - 1));
+}
+
+static inline bool bch2_bucket_nocow_is_locked(struct bucket_nocow_lock_table *t,
+ struct bpos bucket)
+{
+ two_state_lock_t *l = bucket_nocow_lock(t, bucket);
+
+ return atomic_long_read(&l->v) != 0;
+}
+
+static inline void bch2_bucket_nocow_unlock(struct bucket_nocow_lock_table *t,
+ struct bpos bucket, int flags)
+{
+ two_state_lock_t *l = bucket_nocow_lock(t, bucket);
+
+ bch2_two_state_unlock(l, flags & BUCKET_NOCOW_LOCK_UPDATE);
+}
+
+void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *, struct bpos, int);
+
+static inline void bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t,
+ struct bpos bucket, int flags)
+{
+ two_state_lock_t *l = bucket_nocow_lock(t, bucket);
+
+ if (!bch2_two_state_trylock(l, flags & BUCKET_NOCOW_LOCK_UPDATE))
+ __bch2_bucket_nocow_lock(t, bucket, flags);
+}
+
+#endif /* _BCACHEFS_NOCOW_LOCKING_H */
OPT_BOOL(), \
BCH2_NO_SB_OPT, false, \
NULL, NULL) \
+ x(nocow, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME|OPT_INODE, \
+ OPT_BOOL(), \
+ BCH_SB_NOCOW, false, \
+ NULL, "Nocow mode: Writes will be done in place when possible.\n"\
+ "Snapshots and reflink will still caused writes to be COW\n"\
+ "Implicitly disables data checksumming, compression and encryption")\
x(fs_size, u64, \
OPT_DEVICE, \
OPT_UINT(0, S64_MAX), \
memset(&msgs, 0, sizeof(msgs));
+ for_each_set_qtype(c, i, q, qtypes) {
+ mq[i] = genradix_ptr_alloc(&q->table, qid.q[i], GFP_KERNEL);
+ if (!mq[i])
+ return -ENOMEM;
+ }
+
for_each_set_qtype(c, i, q, qtypes)
mutex_lock_nested(&q->lock, i);
for_each_set_qtype(c, i, q, qtypes) {
- mq[i] = genradix_ptr_alloc(&q->table, qid.q[i], GFP_NOFS);
- if (!mq[i]) {
- ret = -ENOMEM;
- goto err;
- }
-
ret = bch2_quota_check_limit(c, i, mq[i], &msgs, counter, v, mode);
if (ret)
goto err;
memset(&msgs, 0, sizeof(msgs));
+ for_each_set_qtype(c, i, q, qtypes) {
+ src_q[i] = genradix_ptr_alloc(&q->table, src.q[i], GFP_KERNEL);
+ dst_q[i] = genradix_ptr_alloc(&q->table, dst.q[i], GFP_KERNEL);
+ if (!src_q[i] || !dst_q[i])
+ return -ENOMEM;
+ }
+
for_each_set_qtype(c, i, q, qtypes)
mutex_lock_nested(&q->lock, i);
for_each_set_qtype(c, i, q, qtypes) {
- src_q[i] = genradix_ptr_alloc(&q->table, src.q[i], GFP_NOFS);
- dst_q[i] = genradix_ptr_alloc(&q->table, dst.q[i], GFP_NOFS);
-
- if (!src_q[i] || !dst_q[i]) {
- ret = -ENOMEM;
- goto err;
- }
-
ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_SPC,
dst_q[i]->c[Q_SPC].v + space,
mode);
int bch2_quota_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
void bch2_quota_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-#define bch2_bkey_ops_quota (struct bkey_ops) { \
+#define bch2_bkey_ops_quota ((struct bkey_ops) { \
.key_invalid = bch2_quota_invalid, \
.val_to_text = bch2_quota_to_text, \
-}
+})
static inline struct bch_qid bch_qid(struct bch_inode_unpacked *u)
{
prev_start = jiffies;
prev_cputime = curr_cputime();
- bch_move_stats_init(&move_stats, "rebalance");
+ bch2_move_stats_init(&move_stats, "rebalance");
while (!kthread_wait_freezable(r->enabled)) {
cond_resched();
goto err;
bch_verbose(c, "done checking need_discard and freespace btrees");
+ if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
+ err = "error creating root snapshot node";
+ ret = bch2_fs_initialize_subvolumes(c);
+ if (ret)
+ goto err;
+ }
+
+ bch_verbose(c, "reading snapshots table");
+ err = "error reading snapshots table";
+ ret = bch2_fs_snapshots_start(c);
+ if (ret)
+ goto err;
+ bch_verbose(c, "reading snapshots done");
+
set_bit(BCH_FS_MAY_GO_RW, &c->flags);
bch_info(c, "starting journal replay, %zu keys", c->journal_keys.nr);
bch_verbose(c, "done checking alloc to lru refs");
set_bit(BCH_FS_CHECK_ALLOC_TO_LRU_REFS_DONE, &c->flags);
} else {
- set_bit(BCH_FS_MAY_GO_RW, &c->flags);
set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
set_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags);
set_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags);
if (c->opts.norecovery)
goto out;
+ if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
+ err = "error creating root snapshot node";
+ ret = bch2_fs_initialize_subvolumes(c);
+ if (ret)
+ goto err;
+ }
+
+ bch_verbose(c, "reading snapshots table");
+ err = "error reading snapshots table";
+ ret = bch2_fs_snapshots_start(c);
+ if (ret)
+ goto err;
+ bch_verbose(c, "reading snapshots done");
+
+ set_bit(BCH_FS_MAY_GO_RW, &c->flags);
+
bch_verbose(c, "starting journal replay, %zu keys", c->journal_keys.nr);
err = "journal replay failed";
ret = bch2_journal_replay(c);
if (ret)
goto err;
- if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
- bch2_fs_lazy_rw(c);
-
- err = "error creating root snapshot node";
- ret = bch2_fs_initialize_subvolumes(c);
- if (ret)
- goto err;
- }
-
- bch_verbose(c, "reading snapshots table");
- err = "error reading snapshots table";
- ret = bch2_fs_snapshots_start(c);
- if (ret)
- goto err;
- bch_verbose(c, "reading snapshots done");
-
if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
/* set bi_subvol on root inode */
err = "error upgrade root inode for subvolumes";
le16_to_cpu(c->sb.version_min) < bcachefs_metadata_version_btree_ptr_sectors_written) {
struct bch_move_stats stats;
- bch_move_stats_init(&stats, "recovery");
+ bch2_move_stats_init(&stats, "recovery");
bch_info(c, "scanning for old btree nodes");
ret = bch2_fs_read_write(c);
set_bit(BCH_FS_FSCK_DONE, &c->flags);
bch2_flush_fsck_errs(c);
- if (!c->opts.keep_journal) {
+ if (!c->opts.keep_journal &&
+ test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) {
bch2_journal_keys_free(&c->journal_keys);
bch2_journal_entries_free(c);
}
mutex_unlock(&c->sb_lock);
set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
+ set_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags);
+ set_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags);
+ set_bit(BCH_FS_CHECK_ALLOC_TO_LRU_REFS_DONE, &c->flags);
set_bit(BCH_FS_MAY_GO_RW, &c->flags);
set_bit(BCH_FS_FSCK_DONE, &c->flags);
goto err;
bch_verbose(c, "reading snapshots done");
- bch2_inode_init(c, &root_inode, 0, 0,
- S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
+ bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL);
root_inode.bi_inum = BCACHEFS_ROOT_INO;
root_inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
bch2_inode_pack(&packed_inode, &root_inode);
if (bkey_cmp(iter->pos, end) >= 0)
break;
+ if (bkey_extent_is_unwritten(k))
+ continue;
+
if (bkey_extent_is_data(k.k))
return k;
}
dst_end.offset - dst_iter.pos.offset));
ret = bch2_extent_update(&trans, dst_inum, &dst_iter,
- new_dst.k, &disk_res, NULL,
+ new_dst.k, &disk_res,
new_i_size, i_sectors_delta,
true);
bch2_disk_reservation_put(c, &disk_res);
struct bkey_s_c);
bool bch2_reflink_p_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
-#define bch2_bkey_ops_reflink_p (struct bkey_ops) { \
+#define bch2_bkey_ops_reflink_p ((struct bkey_ops) { \
.key_invalid = bch2_reflink_p_invalid, \
.val_to_text = bch2_reflink_p_to_text, \
.key_merge = bch2_reflink_p_merge, \
.trans_trigger = bch2_trans_mark_reflink_p, \
.atomic_trigger = bch2_mark_reflink_p, \
-}
+})
int bch2_reflink_v_invalid(const struct bch_fs *, struct bkey_s_c,
int, struct printbuf *);
int bch2_trans_mark_reflink_v(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_i *, unsigned);
-#define bch2_bkey_ops_reflink_v (struct bkey_ops) { \
+#define bch2_bkey_ops_reflink_v ((struct bkey_ops) { \
.key_invalid = bch2_reflink_v_invalid, \
.val_to_text = bch2_reflink_v_to_text, \
.swab = bch2_ptr_swab, \
.trans_trigger = bch2_trans_mark_reflink_v, \
.atomic_trigger = bch2_mark_extent, \
-}
+})
int bch2_indirect_inline_data_invalid(const struct bch_fs *, struct bkey_s_c,
int, struct printbuf *);
struct bkey_s_c, struct bkey_i *,
unsigned);
-#define bch2_bkey_ops_indirect_inline_data (struct bkey_ops) { \
+#define bch2_bkey_ops_indirect_inline_data ((struct bkey_ops) { \
.key_invalid = bch2_indirect_inline_data_invalid, \
.val_to_text = bch2_indirect_inline_data_to_text, \
.trans_trigger = bch2_trans_mark_indirect_inline_data, \
-}
+})
static inline const __le64 *bkey_refcount_c(struct bkey_s_c k)
{
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_REPLICAS_TYPES_H
#define _BCACHEFS_REPLICAS_TYPES_H
for (i = 0; i < 2; i++) {
int ret = snapshot_live(trans, child[i]);
+
if (ret < 0)
return ret;
int bch2_snapshot_invalid(const struct bch_fs *, struct bkey_s_c,
int rw, struct printbuf *);
-#define bch2_bkey_ops_snapshot (struct bkey_ops) { \
+#define bch2_bkey_ops_snapshot ((struct bkey_ops) { \
.key_invalid = bch2_snapshot_invalid, \
.val_to_text = bch2_snapshot_to_text, \
-}
+})
int bch2_mark_snapshot(struct btree_trans *, struct bkey_s_c,
struct bkey_s_c, unsigned);
int rw, struct printbuf *);
void bch2_subvolume_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-#define bch2_bkey_ops_subvolume (struct bkey_ops) { \
+#define bch2_bkey_ops_subvolume ((struct bkey_ops) { \
.key_invalid = bch2_subvolume_invalid, \
.val_to_text = bch2_subvolume_to_text, \
-}
+})
int bch2_subvolume_get(struct btree_trans *, unsigned,
bool, int, struct bch_subvolume *);
static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs,
unsigned dev)
{
- BUG_ON(bch2_dev_list_has_dev(*devs, dev));
- BUG_ON(devs->nr >= ARRAY_SIZE(devs->devs));
- devs->devs[devs->nr++] = dev;
+ if (!bch2_dev_list_has_dev(*devs, dev)) {
+ BUG_ON(devs->nr >= ARRAY_SIZE(devs->devs));
+ devs->devs[devs->nr++] = dev;
+ }
}
static inline struct bch_devs_list bch2_dev_list_single(unsigned dev)
static struct attribute sysfs_##_name = \
{ .name = #_name, .mode = _mode }
-#define write_attribute(n) __sysfs_attribute(n, S_IWUSR)
-#define read_attribute(n) __sysfs_attribute(n, S_IRUGO)
-#define rw_attribute(n) __sysfs_attribute(n, S_IRUGO|S_IWUSR)
+#define write_attribute(n) __sysfs_attribute(n, 0200)
+#define read_attribute(n) __sysfs_attribute(n, 0444)
+#define rw_attribute(n) __sysfs_attribute(n, 0644)
#define sysfs_printf(file, fmt, ...) \
do { \
read_attribute(io_latency_stats_write);
read_attribute(congested);
-read_attribute(btree_avg_write_size);
+read_attribute(btree_write_stats);
read_attribute(btree_cache_size);
read_attribute(compression_stats);
#define x(_name) \
static struct attribute sysfs_time_stat_##_name = \
- { .name = #_name, .mode = S_IRUGO };
+ { .name = #_name, .mode = 0444 };
BCH_TIME_STATS()
#undef x
static struct attribute sysfs_state_rw = {
.name = "state",
- .mode = S_IRUGO
+ .mode = 0444,
};
static size_t bch2_btree_cache_size(struct bch_fs *c)
return ret;
}
-static size_t bch2_btree_avg_write_size(struct bch_fs *c)
-{
- u64 nr = atomic64_read(&c->btree_writes_nr);
- u64 sectors = atomic64_read(&c->btree_writes_sectors);
-
- return nr ? div64_u64(sectors, nr) : 0;
-}
-
static long data_progress_to_text(struct printbuf *out, struct bch_fs *c)
{
long ret = 0;
sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
- sysfs_hprint(btree_avg_write_size, bch2_btree_avg_write_size(c));
+
+ if (attr == &sysfs_btree_write_stats)
+ bch2_btree_write_stats_to_text(out, c);
sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic);
struct attribute *bch2_fs_files[] = {
&sysfs_minor,
&sysfs_btree_cache_size,
- &sysfs_btree_avg_write_size,
+ &sysfs_btree_write_stats,
&sysfs_promote_whole_extents,
SHOW(bch2_fs_internal)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
+
return bch2_fs_to_text(out, &c->kobj, attr);
}
STORE(bch2_fs_internal)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
+
return bch2_fs_store(&c->kobj, attr, buf, size);
}
SYSFS_OPS(bch2_fs_internal);
bch2_btree_iter_traverse(&iter) ?:
bch2_trans_update(&trans, &iter, &k.k_i, 0));
if (ret) {
- bch_err(c, "update error in test_delete: %s", bch2_err_str(ret));
+ bch_err(c, "%s(): update error in: %s", __func__, bch2_err_str(ret));
goto err;
}
bch2_btree_iter_traverse(&iter) ?:
bch2_btree_delete_at(&trans, &iter, 0));
if (ret) {
- bch_err(c, "delete error (first) in test_delete: %s", bch2_err_str(ret));
+ bch_err(c, "%s(): delete error (first): %s", __func__, bch2_err_str(ret));
goto err;
}
bch2_btree_iter_traverse(&iter) ?:
bch2_btree_delete_at(&trans, &iter, 0));
if (ret) {
- bch_err(c, "delete error (second) in test_delete: %s", bch2_err_str(ret));
+ bch_err(c, "%s(): delete error (second): %s", __func__, bch2_err_str(ret));
goto err;
}
err:
bch2_btree_iter_traverse(&iter) ?:
bch2_trans_update(&trans, &iter, &k.k_i, 0));
if (ret) {
- bch_err(c, "update error in test_delete_written: %s", bch2_err_str(ret));
+ bch_err(c, "%s(): update error: %s", __func__, bch2_err_str(ret));
goto err;
}
bch2_btree_iter_traverse(&iter) ?:
bch2_btree_delete_at(&trans, &iter, 0));
if (ret) {
- bch_err(c, "delete error in test_delete_written: %s", bch2_err_str(ret));
+ bch_err(c, "%s(): delete error: %s", __func__, bch2_err_str(ret));
goto err;
}
err:
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
NULL, NULL, 0);
if (ret) {
- bch_err(c, "insert error in test_iterate: %s", bch2_err_str(ret));
+ bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
goto err;
}
}
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
NULL, NULL, 0);
if (ret) {
- bch_err(c, "insert error in test_iterate_extents: %s", bch2_err_str(ret));
+ bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
goto err;
}
}
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
NULL, NULL, 0);
if (ret) {
- bch_err(c, "insert error in test_iterate_slots: %s", bch2_err_str(ret));
+ bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
goto err;
}
}
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
NULL, NULL, 0);
if (ret) {
- bch_err(c, "insert error in test_iterate_slots_extents: %s", bch2_err_str(ret));
+ bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
goto err;
}
}
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
NULL, NULL, 0);
if (ret)
- bch_err(c, "insert error in insert_test_extent: %s", bch2_err_str(ret));
+ bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
return ret;
}
ret = test_snapshot_filter(c, snapids[0], snapids[1]);
if (ret) {
- bch_err(c, "err from test_snapshot_filter: %s", bch2_err_str(ret));
+ bch_err(c, "%s(): err from test_snapshot_filter: %s", __func__, bch2_err_str(ret));
return ret;
}
static u64 test_rand(void)
{
u64 v;
-#if 0
- v = prandom_u32();
-#else
+
prandom_bytes(&v, sizeof(v));
-#endif
return v;
}
ret = commit_do(&trans, NULL, NULL, 0,
__bch2_btree_insert(&trans, BTREE_ID_xattrs, &k.k_i));
if (ret) {
- bch_err(c, "error in rand_insert: %s", bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
break;
}
}
__bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[6].k_i) ?:
__bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[7].k_i));
if (ret) {
- bch_err(c, "error in rand_insert_multi: %s", bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
break;
}
}
lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
ret = bkey_err(k);
if (ret) {
- bch_err(c, "error in rand_lookup: %s", bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
break;
}
}
k = bch2_btree_iter_peek(iter);
ret = bkey_err(k);
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
- bch_err(trans->c, "lookup error in rand_mixed: %s", bch2_err_str(ret));
+ bch_err(trans->c, "%s(): lookup error: %s", __func__, bch2_err_str(ret));
if (ret)
return ret;
ret = commit_do(&trans, NULL, NULL, 0,
rand_mixed_trans(&trans, &iter, &cookie, i, rand));
if (ret) {
- bch_err(c, "update error in rand_mixed: %s", bch2_err_str(ret));
+ bch_err(c, "%s(): update error: %s", __func__, bch2_err_str(ret));
break;
}
}
ret = commit_do(&trans, NULL, NULL, 0,
__do_delete(&trans, pos));
if (ret) {
- bch_err(c, "error in rand_delete: %s", bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
break;
}
}
bch2_trans_update(&trans, &iter, &insert.k_i, 0);
}));
if (ret)
- bch_err(c, "error in %s(): %s", __func__, bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
bch2_trans_exit(&trans);
return ret;
SPOS(0, 0, U32_MAX), 0, k,
0);
if (ret)
- bch_err(c, "error in %s(): %s", __func__, bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
bch2_trans_exit(&trans);
return ret;
bch2_trans_update(&trans, &iter, &u.k_i, 0);
}));
if (ret)
- bch_err(c, "error in %s(): %s", __func__, bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
bch2_trans_exit(&trans);
return ret;
SPOS(0, 0, U32_MAX), SPOS_MAX,
0, NULL);
if (ret)
- bch_err(c, "error in seq_delete: %s", bch2_err_str(ret));
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+
+#include "two_state_shared_lock.h"
+
+void bch2_two_state_unlock(two_state_lock_t *lock, int s)
+{
+ long i = s ? 1 : -1;
+
+ BUG_ON(atomic_long_read(&lock->v) == 0);
+
+ if (atomic_long_sub_return_release(i, &lock->v) == 0)
+ wake_up_all(&lock->wait);
+}
+
+bool bch2_two_state_trylock(two_state_lock_t *lock, int s)
+{
+ long i = s ? 1 : -1;
+ long v = atomic_long_read(&lock->v), old;
+
+ do {
+ old = v;
+
+ if (i > 0 ? v < 0 : v > 0)
+ return false;
+ } while ((v = atomic_long_cmpxchg_acquire(&lock->v,
+ old, old + i)) != old);
+ return true;
+}
+
+void bch2_two_state_lock(two_state_lock_t *lock, int s)
+{
+ wait_event(lock->wait, bch2_two_state_trylock(lock, s));
+}
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_TWO_STATE_LOCK_H
+#define _BCACHEFS_TWO_STATE_LOCK_H
+
+#include <linux/atomic.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+/*
+ * Two-state lock - can be taken for add or block - both states are shared,
+ * like read side of rwsem, but conflict with other state:
+ */
+typedef struct {
+ atomic_long_t v;
+ wait_queue_head_t wait;
+} two_state_lock_t;
+
+static inline void two_state_lock_init(two_state_lock_t *lock)
+{
+ atomic_long_set(&lock->v, 0);
+ init_waitqueue_head(&lock->wait);
+}
+
+void bch2_two_state_unlock(two_state_lock_t *, int);
+bool bch2_two_state_trylock(two_state_lock_t *, int);
+void bch2_two_state_lock(two_state_lock_t *, int);
+
+#endif /* _BCACHEFS_TWO_STATE_LOCK_H */
struct closure;
#ifdef CONFIG_BCACHEFS_DEBUG
-
#define EBUG_ON(cond) BUG_ON(cond)
-#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
-#define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
-#define atomic_sub_bug(i, v) BUG_ON(atomic_sub_return(i, v) < 0)
-#define atomic_add_bug(i, v) BUG_ON(atomic_add_return(i, v) < 0)
-#define atomic_long_dec_bug(v) BUG_ON(atomic_long_dec_return(v) < 0)
-#define atomic_long_sub_bug(i, v) BUG_ON(atomic_long_sub_return(i, v) < 0)
-#define atomic64_dec_bug(v) BUG_ON(atomic64_dec_return(v) < 0)
-#define atomic64_inc_bug(v, i) BUG_ON(atomic64_inc_return(v) <= i)
-#define atomic64_sub_bug(i, v) BUG_ON(atomic64_sub_return(i, v) < 0)
-#define atomic64_add_bug(i, v) BUG_ON(atomic64_add_return(i, v) < 0)
-
-#else /* DEBUG */
-
+#else
#define EBUG_ON(cond)
-#define atomic_dec_bug(v) atomic_dec(v)
-#define atomic_inc_bug(v, i) atomic_inc(v)
-#define atomic_sub_bug(i, v) atomic_sub(i, v)
-#define atomic_add_bug(i, v) atomic_add(i, v)
-#define atomic_long_dec_bug(v) atomic_long_dec(v)
-#define atomic_long_sub_bug(i, v) atomic_long_sub(i, v)
-#define atomic64_dec_bug(v) atomic64_dec(v)
-#define atomic64_inc_bug(v, i) atomic64_inc(v)
-#define atomic64_sub_bug(i, v) atomic64_sub(i, v)
-#define atomic64_add_bug(i, v) atomic64_add(i, v)
-
#endif
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
int bch2_xattr_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
void bch2_xattr_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-#define bch2_bkey_ops_xattr (struct bkey_ops) { \
+#define bch2_bkey_ops_xattr ((struct bkey_ops) { \
.key_invalid = bch2_xattr_invalid, \
.val_to_text = bch2_xattr_to_text, \
-}
+})
static inline unsigned xattr_val_u64s(unsigned name_len, unsigned val_len)
{
*
* note: this rounds towards 0.
*/
-inline s64 fast_divpow2(s64 n, u8 d)
+s64 fast_divpow2(s64 n, u8 d)
{
return (n + ((n < 0) ? ((1 << d) - 1) : 0)) >> d;
}
prt_vprintf(out, fmt, args);
va_end(args);
}
+
+void prt_u64(struct printbuf *out, u64 v)
+{
+ prt_printf(out, "%llu", v);
+}
return true;
}
-#ifdef CONFIG_LOCK_SPIN_ON_OWNER
+/*
+ * We don't see stable performance with SIX_LOCK_SPIN_ON_OWNER enabled, so it's
+ * off for now:
+ */
+#ifdef SIX_LOCK_SPIN_ON_OWNER
static inline bool six_optimistic_spin(struct six_lock *lock,
struct six_lock_waiter *wait)
__wake_up(q, TASK_NORMAL, 1, NULL);
}
+void wake_up_all(wait_queue_head_t *q)
+{
+ __wake_up(q, TASK_NORMAL, 0, NULL);
+}
+
static void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
{
__wake_up_common(q, mode, nr, 0, NULL);