X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_io.c;h=fa1229eb15ec757be3df85ebc7828aec6eccbfe9;hb=21ae8a4b715acd326e6404ce6409ae329566eb64;hp=12894f8959bfccbe4f7291e290ab703c8f304f02;hpb=700d013b5280b72a1fb3830d8f70ecce5decb0ab;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_io.c b/libbcachefs/btree_io.c index 12894f8..fa1229e 100644 --- a/libbcachefs/btree_io.c +++ b/libbcachefs/btree_io.c @@ -18,21 +18,22 @@ #include "journal_reclaim.h" #include "journal_seq_blacklist.h" #include "super-io.h" +#include "trace.h" #include -#include void bch2_btree_node_io_unlock(struct btree *b) { EBUG_ON(!btree_node_write_in_flight(b)); + clear_btree_node_write_in_flight_inner(b); clear_btree_node_write_in_flight(b); wake_up_bit(&b->flags, BTREE_NODE_write_in_flight); } void bch2_btree_node_io_lock(struct btree *b) { - BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key)); + bch2_assert_btree_nodes_not_locked(); wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight, TASK_UNINTERRUPTIBLE); @@ -52,7 +53,7 @@ void __bch2_btree_node_wait_on_write(struct btree *b) void bch2_btree_node_wait_on_read(struct btree *b) { - BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key)); + bch2_assert_btree_nodes_not_locked(); wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, TASK_UNINTERRUPTIBLE); @@ -60,7 +61,7 @@ void bch2_btree_node_wait_on_read(struct btree *b) void bch2_btree_node_wait_on_write(struct btree *b) { - BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key)); + bch2_assert_btree_nodes_not_locked(); wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight, TASK_UNINTERRUPTIBLE); @@ -76,13 +77,13 @@ static void verify_no_dups(struct btree *b, if (start == end) return; - for (p = start, k = bkey_next(start); + for (p = start, k = bkey_p_next(start); k != end; - p = k, k = bkey_next(k)) { + p = k, k = bkey_p_next(k)) { struct bkey l = bkey_unpack_key(b, p); struct bkey r = bkey_unpack_key(b, k); - BUG_ON(bpos_cmp(l.p, bkey_start_pos(&r)) >= 0); + BUG_ON(bpos_ge(l.p, bkey_start_pos(&r))); } #endif } @@ -91,7 +92,7 @@ static void set_needs_whiteout(struct bset *i, int v) { struct bkey_packed *k; - for (k = i->start; k != vstruct_last(i); k = bkey_next(k)) + for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) k->needs_whiteout = v; } @@ -104,8 +105,8 @@ static void btree_bounce_free(struct bch_fs *c, size_t size, vpfree(p, size); } -static void *btree_bounce_alloc(struct bch_fs *c, size_t size, - bool *used_mempool) +static void *btree_bounce_alloc_noprof(struct bch_fs *c, size_t size, + bool *used_mempool) { unsigned flags = memalloc_nofs_save(); void *p; @@ -113,14 +114,16 @@ static void *btree_bounce_alloc(struct bch_fs *c, size_t size, BUG_ON(size > btree_bytes(c)); *used_mempool = false; - p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT); + p = vpmalloc_noprof(size, __GFP_NOWARN|GFP_NOWAIT); if (!p) { *used_mempool = true; - p = mempool_alloc(&c->btree_bounce_pool, GFP_NOIO); + p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS); } memalloc_nofs_restore(flags); return p; } +#define btree_bounce_alloc(_c, _size, _used_mempool) \ + alloc_hooks(btree_bounce_alloc_noprof(_c, _size, _used_mempool)) static void sort_bkey_ptrs(const struct btree *bt, struct bkey_packed **ptrs, unsigned nr) @@ -174,7 +177,7 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b) for (k = unwritten_whiteouts_start(c, b); k != unwritten_whiteouts_end(c, b); - k = bkey_next(k)) + k = bkey_p_next(k)) *--ptrs = k; sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs); @@ -183,7 +186,7 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b) while (ptrs != ptrs_end) { bkey_copy(k, *ptrs); - k = bkey_next(k); + k = bkey_p_next(k); ptrs++; } @@ -255,11 +258,11 @@ static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode) out = i->start; for (k = start; k != end; k = n) { - n = bkey_next(k); + n = bkey_p_next(k); if (!bkey_deleted(k)) { bkey_copy(out, k); - out = bkey_next(out); + out = bkey_p_next(out); } else { BUG_ON(k->needs_whiteout); } @@ -390,16 +393,10 @@ void bch2_btree_sort_into(struct bch_fs *c, bch2_btree_node_iter_init_from_start(&src_iter, src); - if (btree_node_is_extents(src)) - nr = bch2_sort_repack_merge(c, btree_bset_first(dst), - src, &src_iter, - &dst->format, - true); - else - nr = bch2_sort_repack(btree_bset_first(dst), - src, &src_iter, - &dst->format, - true); + nr = bch2_sort_repack(btree_bset_first(dst), + src, &src_iter, + &dst->format, + true); bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort], start_time); @@ -455,6 +452,24 @@ void bch2_btree_build_aux_trees(struct btree *b) t == bset_tree_last(b)); } +/* + * If we have MAX_BSETS (3) bsets, should we sort them all down to just one? + * + * The first bset is going to be of similar order to the size of the node, the + * last bset is bounded by btree_write_set_buffer(), which is set to keep the + * memmove on insert from being too expensive: the middle bset should, ideally, + * be the geometric mean of the first and the last. + * + * Returns true if the middle bset is greater than that geometric mean: + */ +static inline bool should_compact_all(struct bch_fs *c, struct btree *b) +{ + unsigned mid_u64s_bits = + (ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2; + + return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits; +} + /* * @bch_btree_init_next - initialize a new (unwritten) bset that can then be * inserted into @@ -464,30 +479,22 @@ void bch2_btree_build_aux_trees(struct btree *b) * * Returns true if we sorted (i.e. invalidated iterators */ -void bch2_btree_init_next(struct btree_trans *trans, - struct btree_iter *iter, - struct btree *b) +void bch2_btree_init_next(struct btree_trans *trans, struct btree *b) { struct bch_fs *c = trans->c; struct btree_node_entry *bne; bool reinit_iter = false; - EBUG_ON(!(b->c.lock.state.seq & 1)); - EBUG_ON(iter && iter->l[b->c.level].b != b); + EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]); BUG_ON(bset_written(b, bset(b, &b->set[1]))); + BUG_ON(btree_node_just_written(b)); if (b->nsets == MAX_BSETS && - !btree_node_write_in_flight(b)) { - unsigned log_u64s[] = { - ilog2(bset_u64s(&b->set[0])), - ilog2(bset_u64s(&b->set[1])), - ilog2(bset_u64s(&b->set[2])), - }; - - if (log_u64s[1] >= (log_u64s[0] + log_u64s[2]) / 2) { - bch2_btree_node_write(c, b, SIX_LOCK_write); - reinit_iter = true; - } + !btree_node_write_in_flight(b) && + should_compact_all(c, b)) { + bch2_btree_node_write(c, b, SIX_LOCK_write, + BTREE_WRITE_init_next_bset); + reinit_iter = true; } if (b->nsets == MAX_BSETS && @@ -502,17 +509,17 @@ void bch2_btree_init_next(struct btree_trans *trans, bch2_btree_build_aux_trees(b); - if (iter && reinit_iter) - bch2_btree_iter_reinit_node(iter, b); + if (reinit_iter) + bch2_trans_node_reinit_iter(trans, b); } static void btree_pos_to_text(struct printbuf *out, struct bch_fs *c, struct btree *b) { - pr_buf(out, "%s level %u/%u\n ", + prt_printf(out, "%s level %u/%u\n ", bch2_btree_ids[b->c.btree_id], b->c.level, - c->btree_roots[b->c.btree_id].level); + bch2_btree_id_root(c, b->c.btree_id)->level); bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key)); } @@ -521,86 +528,112 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c, struct btree *b, struct bset *i, unsigned offset, int write) { - pr_buf(out, "error validating btree node "); - if (write) - pr_buf(out, "before write "); + prt_printf(out, bch2_log_msg(c, "%s"), + write == READ + ? "error validating btree node " + : "corrupt btree node before write "); if (ca) - pr_buf(out, "on %s ", ca->name); - pr_buf(out, "at btree "); + prt_printf(out, "on %s ", ca->name); + prt_printf(out, "at btree "); btree_pos_to_text(out, c, b); - pr_buf(out, "\n node offset %u", b->written); + prt_printf(out, "\n node offset %u", b->written); if (i) - pr_buf(out, " bset u64s %u", le16_to_cpu(i->u64s)); + prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s)); + prt_str(out, ": "); } enum btree_err_type { + /* + * We can repair this locally, and we're after the checksum check so + * there's no need to try another replica: + */ BTREE_ERR_FIXABLE, + /* + * We can repair this if we have to, but we should try reading another + * replica if we can: + */ BTREE_ERR_WANT_RETRY, + /* + * Read another replica if we have one, otherwise consider the whole + * node bad: + */ BTREE_ERR_MUST_RETRY, - BTREE_ERR_FATAL, + BTREE_ERR_BAD_NODE, + BTREE_ERR_INCOMPATIBLE, }; enum btree_validate_ret { BTREE_RETRY_READ = 64, }; +static int __btree_err(enum btree_err_type type, + struct bch_fs *c, + struct bch_dev *ca, + struct btree *b, + struct bset *i, + int write, + bool have_retry, + const char *fmt, ...) +{ + struct printbuf out = PRINTBUF; + va_list args; + int ret = -BCH_ERR_fsck_fix; + + btree_err_msg(&out, c, ca, b, i, b->written, write); + + va_start(args, fmt); + prt_vprintf(&out, fmt, args); + va_end(args); + + if (write == WRITE) { + bch2_print_string_as_lines(KERN_ERR, out.buf); + ret = c->opts.errors == BCH_ON_ERROR_continue + ? 0 + : -BCH_ERR_fsck_errors_not_fixed; + goto out; + } + + if (!have_retry && type == BTREE_ERR_WANT_RETRY) + type = BTREE_ERR_FIXABLE; + if (!have_retry && type == BTREE_ERR_MUST_RETRY) + type = BTREE_ERR_BAD_NODE; + + switch (type) { + case BTREE_ERR_FIXABLE: + mustfix_fsck_err(c, "%s", out.buf); + ret = -BCH_ERR_fsck_fix; + break; + case BTREE_ERR_WANT_RETRY: + case BTREE_ERR_MUST_RETRY: + bch2_print_string_as_lines(KERN_ERR, out.buf); + ret = BTREE_RETRY_READ; + break; + case BTREE_ERR_BAD_NODE: + bch2_print_string_as_lines(KERN_ERR, out.buf); + bch2_topology_error(c); + ret = -BCH_ERR_need_topology_repair; + break; + case BTREE_ERR_INCOMPATIBLE: + bch2_print_string_as_lines(KERN_ERR, out.buf); + ret = -BCH_ERR_fsck_errors_not_fixed; + break; + default: + BUG(); + } +out: +fsck_err: + printbuf_exit(&out); + return ret; +} + #define btree_err(type, c, ca, b, i, msg, ...) \ ({ \ - __label__ out; \ - char _buf[300]; \ - char *_buf2 = _buf; \ - struct printbuf out = PBUF(_buf); \ - \ - _buf2 = kmalloc(4096, GFP_ATOMIC); \ - if (_buf2) \ - out = _PBUF(_buf2, 4986); \ - \ - btree_err_msg(&out, c, ca, b, i, b->written, write); \ - pr_buf(&out, ": " msg, ##__VA_ARGS__); \ - \ - if (type == BTREE_ERR_FIXABLE && \ - write == READ && \ - !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) { \ - mustfix_fsck_err(c, "%s", _buf2); \ - goto out; \ - } \ - \ - switch (write) { \ - case READ: \ - bch_err(c, "%s", _buf2); \ - \ - switch (type) { \ - case BTREE_ERR_FIXABLE: \ - ret = BCH_FSCK_ERRORS_NOT_FIXED; \ - goto fsck_err; \ - case BTREE_ERR_WANT_RETRY: \ - if (have_retry) { \ - ret = BTREE_RETRY_READ; \ - goto fsck_err; \ - } \ - break; \ - case BTREE_ERR_MUST_RETRY: \ - ret = BTREE_RETRY_READ; \ - goto fsck_err; \ - case BTREE_ERR_FATAL: \ - ret = BCH_FSCK_ERRORS_NOT_FIXED; \ - goto fsck_err; \ - } \ - break; \ - case WRITE: \ - bch_err(c, "corrupt metadata before write: %s", _buf2); \ + int _ret = __btree_err(type, c, ca, b, i, write, have_retry, msg, ##__VA_ARGS__);\ \ - if (bch2_fs_inconsistent(c)) { \ - ret = BCH_FSCK_ERRORS_NOT_FIXED; \ - goto fsck_err; \ - } \ - break; \ - } \ -out: \ - if (_buf2 != _buf) \ - kfree(_buf2); \ - true; \ + if (_ret != -BCH_ERR_fsck_fix) \ + goto fsck_err; \ + *saw_error = true; \ }) #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false) @@ -609,6 +642,7 @@ out: \ * When btree topology repair changes the start or end of a node, that might * mean we have to drop keys that are no longer inside the node: */ +__cold void bch2_btree_node_drop_keys_outside_node(struct btree *b) { struct bset_tree *t; @@ -620,7 +654,7 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b) struct bset *i = bset(b, t); struct bkey_packed *k; - for (k = i->start; k != vstruct_last(i); k = bkey_next(k)) + for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0) break; @@ -631,43 +665,47 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b) (u64 *) vstruct_end(i) - (u64 *) k); i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift); set_btree_bset_end(b, t); - bch2_bset_set_no_aux_tree(b, t); } - for (k = i->start; k != vstruct_last(i); k = bkey_next(k)) + for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0) break; if (k != vstruct_last(i)) { i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start); set_btree_bset_end(b, t); - bch2_bset_set_no_aux_tree(b, t); } } + /* + * Always rebuild search trees: eytzinger search tree nodes directly + * depend on the values of min/max key: + */ + bch2_bset_set_no_aux_tree(b, b->set); bch2_btree_build_aux_trees(b); for_each_btree_node_key_unpack(b, k, &iter, &unpacked) { - BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0); - BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0); + BUG_ON(bpos_lt(k.k->p, b->data->min_key)); + BUG_ON(bpos_gt(k.k->p, b->data->max_key)); } } static int validate_bset(struct bch_fs *c, struct bch_dev *ca, struct btree *b, struct bset *i, - unsigned sectors, int write, bool have_retry) + unsigned offset, unsigned sectors, + int write, bool have_retry, bool *saw_error) { unsigned version = le16_to_cpu(i->version); const char *err; - char buf1[100]; - char buf2[100]; + struct printbuf buf1 = PRINTBUF; + struct printbuf buf2 = PRINTBUF; int ret = 0; - btree_err_on((version != BCH_BSET_VERSION_OLD && - version < bcachefs_metadata_version_min) || - version >= bcachefs_metadata_version_max, - BTREE_ERR_FATAL, c, ca, b, i, - "unsupported bset version"); + btree_err_on(!bch2_version_compatible(version), + BTREE_ERR_INCOMPATIBLE, c, ca, b, i, + "unsupported bset version %u.%u", + BCH_VERSION_MAJOR(version), + BCH_VERSION_MINOR(version)); if (btree_err_on(version < c->sb.version_min, BTREE_ERR_FIXABLE, c, NULL, b, i, @@ -679,7 +717,8 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, mutex_unlock(&c->sb_lock); } - if (btree_err_on(version > c->sb.version, + if (btree_err_on(BCH_VERSION_MAJOR(version) > + BCH_VERSION_MAJOR(c->sb.version), BTREE_ERR_FIXABLE, c, NULL, b, i, "bset version %u newer than superblock version %u", version, c->sb.version)) { @@ -690,21 +729,27 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, } btree_err_on(BSET_SEPARATE_WHITEOUTS(i), - BTREE_ERR_FATAL, c, ca, b, i, + BTREE_ERR_INCOMPATIBLE, c, ca, b, i, "BSET_SEPARATE_WHITEOUTS no longer supported"); - if (btree_err_on(b->written + sectors > c->opts.btree_node_size, + if (btree_err_on(offset + sectors > btree_sectors(c), BTREE_ERR_FIXABLE, c, ca, b, i, "bset past end of btree node")) { i->u64s = 0; - return 0; + ret = 0; + goto out; } - btree_err_on(b->written && !i->u64s, + btree_err_on(offset && !i->u64s, BTREE_ERR_FIXABLE, c, ca, b, i, "empty bset"); - if (!b->written) { + btree_err_on(BSET_OFFSET(i) && + BSET_OFFSET(i) != offset, + BTREE_ERR_WANT_RETRY, c, ca, b, i, + "bset at wrong sector offset"); + + if (!offset) { struct btree_node *bn = container_of(i, struct btree_node, keys); /* These indicate that we read the wrong btree node: */ @@ -740,17 +785,20 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, b->data->max_key = b->key.k.p; } - btree_err_on(bpos_cmp(b->data->min_key, bp->min_key), + btree_err_on(!bpos_eq(b->data->min_key, bp->min_key), BTREE_ERR_MUST_RETRY, c, ca, b, NULL, "incorrect min_key: got %s should be %s", - (bch2_bpos_to_text(&PBUF(buf1), bn->min_key), buf1), - (bch2_bpos_to_text(&PBUF(buf2), bp->min_key), buf2)); + (printbuf_reset(&buf1), + bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf), + (printbuf_reset(&buf2), + bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf)); } - btree_err_on(bpos_cmp(bn->max_key, b->key.k.p), + btree_err_on(!bpos_eq(bn->max_key, b->key.k.p), BTREE_ERR_MUST_RETRY, c, ca, b, i, "incorrect max key %s", - (bch2_bpos_to_text(&PBUF(buf1), bn->max_key), buf1)); + (printbuf_reset(&buf1), + bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf)); if (write) compat_btree_node(b->c.level, b->c.btree_id, version, @@ -758,23 +806,37 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, err = bch2_bkey_format_validate(&bn->format); btree_err_on(err, - BTREE_ERR_FATAL, c, ca, b, i, + BTREE_ERR_BAD_NODE, c, ca, b, i, "invalid bkey format: %s", err); compat_bformat(b->c.level, b->c.btree_id, version, BSET_BIG_ENDIAN(i), write, &bn->format); } +out: fsck_err: + printbuf_exit(&buf2); + printbuf_exit(&buf1); return ret; } +static int bset_key_invalid(struct bch_fs *c, struct btree *b, + struct bkey_s_c k, + bool updated_range, int rw, + struct printbuf *err) +{ + return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?: + (!updated_range ? bch2_bkey_in_btree_node(b, k, err) : 0) ?: + (rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0); +} + static int validate_bset_keys(struct bch_fs *c, struct btree *b, - struct bset *i, unsigned *whiteout_u64s, - int write, bool have_retry) + struct bset *i, int write, + bool have_retry, bool *saw_error) { unsigned version = le16_to_cpu(i->version); struct bkey_packed *k, *prev = NULL; + struct printbuf buf = PRINTBUF; bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); int ret = 0; @@ -783,9 +845,8 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b, k != vstruct_last(i);) { struct bkey_s u; struct bkey tmp; - const char *invalid; - if (btree_err_on(bkey_next(k) > vstruct_last(i), + if (btree_err_on(bkey_p_next(k) > vstruct_last(i), BTREE_ERR_FIXABLE, c, NULL, b, i, "key extends past end of bset")) { i->u64s = cpu_to_le16((u64 *) k - i->_data); @@ -796,7 +857,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b, BTREE_ERR_FIXABLE, c, NULL, b, i, "invalid bkey format %u", k->format)) { i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); - memmove_u64s_down(k, bkey_next(k), + memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k); continue; } @@ -809,18 +870,18 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b, u = __bkey_disassemble(b, k, &tmp); - invalid = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b)) ?: - (!updated_range ? bch2_bkey_in_btree_node(b, u.s_c) : NULL) ?: - (write ? bch2_bkey_val_invalid(c, u.s_c) : NULL); - if (invalid) { - char buf[160]; + printbuf_reset(&buf); + if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) { + printbuf_reset(&buf); + prt_printf(&buf, "invalid bkey: "); + bset_key_invalid(c, b, u.s_c, updated_range, write, &buf); + prt_printf(&buf, "\n "); + bch2_bkey_val_to_text(&buf, c, u.s_c); - bch2_bkey_val_to_text(&PBUF(buf), c, u.s_c); - btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, - "invalid bkey: %s\n%s", invalid, buf); + btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf); i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); - memmove_u64s_down(k, bkey_next(k), + memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k); continue; } @@ -831,34 +892,34 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b, &b->format, k); if (prev && bkey_iter_cmp(b, prev, k) > 0) { - char buf1[80]; - char buf2[80]; struct bkey up = bkey_unpack_key(b, prev); - bch2_bkey_to_text(&PBUF(buf1), &up); - bch2_bkey_to_text(&PBUF(buf2), u.k); + printbuf_reset(&buf); + prt_printf(&buf, "keys out of order: "); + bch2_bkey_to_text(&buf, &up); + prt_printf(&buf, " > "); + bch2_bkey_to_text(&buf, u.k); bch2_dump_bset(c, b, i, 0); - if (btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, - "keys out of order: %s > %s", - buf1, buf2)) { + if (btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf)) { i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); - memmove_u64s_down(k, bkey_next(k), + memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k); continue; } } prev = k; - k = bkey_next(k); + k = bkey_p_next(k); } fsck_err: + printbuf_exit(&buf); return ret; } int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, - struct btree *b, bool have_retry) + struct btree *b, bool have_retry, bool *saw_error) { struct btree_node_entry *bne; struct sort_iter *iter; @@ -870,12 +931,16 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); unsigned u64s; - unsigned nonblacklisted_written = 0; - int ret, retry_read = 0, write = READ; + unsigned blacklisted_written, nonblacklisted_written = 0; + unsigned ptr_written = btree_ptr_sectors_written(&b->key); + struct printbuf buf = PRINTBUF; + int ret = 0, retry_read = 0, write = READ; b->version_ondisk = U16_MAX; + /* We might get called multiple times on read retry: */ + b->written = 0; - iter = mempool_alloc(&c->fill_iter, GFP_NOIO); + iter = mempool_alloc(&c->fill_iter, GFP_NOFS); sort_iter_init(iter, b); iter->size = (btree_blocks(c) + 1) * 2; @@ -885,11 +950,12 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c), BTREE_ERR_MUST_RETRY, c, ca, b, NULL, - "bad magic"); + "bad magic: want %llx, got %llx", + bset_magic(c), le64_to_cpu(b->data->magic)); btree_err_on(!b->data->keys.seq, BTREE_ERR_MUST_RETRY, c, ca, b, NULL, - "bad btree header"); + "bad btree header: seq 0"); if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { struct bch_btree_ptr_v2 *bp = @@ -901,8 +967,8 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, b->data->keys.seq, bp->seq); } - while (b->written < c->opts.btree_node_size) { - unsigned sectors, whiteout_u64s = 0; + while (b->written < (ptr_written ?: btree_sectors(c))) { + unsigned sectors; struct nonce nonce; struct bch_csum csum; bool first = !b->written; @@ -922,11 +988,14 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, BTREE_ERR_WANT_RETRY, c, ca, b, i, "invalid checksum"); - bset_encrypt(c, i, b->written << 9); + ret = bset_encrypt(c, i, b->written << 9); + if (bch2_fs_fatal_err_on(ret, c, + "error decrypting btree node: %i", ret)) + goto fsck_err; - btree_err_on(btree_node_is_extents(b) && + btree_err_on(btree_node_type_is_extents(btree_node_type(b)) && !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data), - BTREE_ERR_FATAL, c, NULL, b, NULL, + BTREE_ERR_INCOMPATIBLE, c, NULL, b, NULL, "btree node does not have NEW_EXTENT_OVERWRITE set"); sectors = vstruct_sectors(b->data, c->block_bits); @@ -949,7 +1018,10 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, BTREE_ERR_WANT_RETRY, c, ca, b, i, "invalid checksum"); - bset_encrypt(c, i, b->written << 9); + ret = bset_encrypt(c, i, b->written << 9); + if (bch2_fs_fatal_err_on(ret, c, + "error decrypting btree node: %i\n", ret)) + goto fsck_err; sectors = vstruct_sectors(bne, c->block_bits); } @@ -957,63 +1029,75 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, b->version_ondisk = min(b->version_ondisk, le16_to_cpu(i->version)); - ret = validate_bset(c, ca, b, i, sectors, - READ, have_retry); + ret = validate_bset(c, ca, b, i, b->written, sectors, + READ, have_retry, saw_error); if (ret) goto fsck_err; if (!b->written) btree_node_set_format(b, b->data->format); - ret = validate_bset_keys(c, b, i, &whiteout_u64s, - READ, have_retry); + ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error); if (ret) goto fsck_err; SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN); - b->written += sectors; - blacklisted = bch2_journal_seq_is_blacklisted(c, le64_to_cpu(i->journal_seq), true); btree_err_on(blacklisted && first, BTREE_ERR_FIXABLE, c, ca, b, i, - "first btree node bset has blacklisted journal seq"); + "first btree node bset has blacklisted journal seq (%llu)", + le64_to_cpu(i->journal_seq)); + + btree_err_on(blacklisted && ptr_written, + BTREE_ERR_FIXABLE, c, ca, b, i, + "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u", + le64_to_cpu(i->journal_seq), + b->written, b->written + sectors, ptr_written); + + b->written += sectors; + if (blacklisted && !first) continue; - sort_iter_add(iter, i->start, - vstruct_idx(i, whiteout_u64s)); - sort_iter_add(iter, - vstruct_idx(i, whiteout_u64s), + vstruct_idx(i, 0), vstruct_last(i)); nonblacklisted_written = b->written; } - for (bne = write_block(b); - bset_byte_offset(b, bne) < btree_bytes(c); - bne = (void *) bne + block_bytes(c)) - btree_err_on(bne->keys.seq == b->data->keys.seq && - !bch2_journal_seq_is_blacklisted(c, - le64_to_cpu(bne->keys.journal_seq), - true), + if (ptr_written) { + btree_err_on(b->written < ptr_written, BTREE_ERR_WANT_RETRY, c, ca, b, NULL, - "found bset signature after last bset"); + "btree node data missing: expected %u sectors, found %u", + ptr_written, b->written); + } else { + for (bne = write_block(b); + bset_byte_offset(b, bne) < btree_bytes(c); + bne = (void *) bne + block_bytes(c)) + btree_err_on(bne->keys.seq == b->data->keys.seq && + !bch2_journal_seq_is_blacklisted(c, + le64_to_cpu(bne->keys.journal_seq), + true), + BTREE_ERR_WANT_RETRY, c, ca, b, NULL, + "found bset signature after last bset"); - /* - * Blacklisted bsets are those that were written after the most recent - * (flush) journal write. Since there wasn't a flush, they may not have - * made it to all devices - which means we shouldn't write new bsets - * after them, as that could leave a gap and then reads from that device - * wouldn't find all the bsets in that btree node - which means it's - * important that we start writing new bsets after the most recent _non_ - * blacklisted bset: - */ - b->written = nonblacklisted_written; + /* + * Blacklisted bsets are those that were written after the most recent + * (flush) journal write. Since there wasn't a flush, they may not have + * made it to all devices - which means we shouldn't write new bsets + * after them, as that could leave a gap and then reads from that device + * wouldn't find all the bsets in that btree node - which means it's + * important that we start writing new bsets after the most recent _non_ + * blacklisted bset: + */ + blacklisted_written = b->written; + b->written = nonblacklisted_written; + } sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool); sorted->keys.u64s = 0; @@ -1040,21 +1124,25 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, for (k = i->start; k != vstruct_last(i);) { struct bkey tmp; struct bkey_s u = __bkey_disassemble(b, k, &tmp); - const char *invalid = bch2_bkey_val_invalid(c, u.s_c); - if (invalid || + printbuf_reset(&buf); + + if (bch2_bkey_val_invalid(c, u.s_c, READ, &buf) || (bch2_inject_invalid_keys && !bversion_cmp(u.k->version, MAX_VERSION))) { - char buf[160]; + printbuf_reset(&buf); - bch2_bkey_val_to_text(&PBUF(buf), c, u.s_c); - btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, - "invalid bkey %s: %s", buf, invalid); + prt_printf(&buf, "invalid bkey: "); + bch2_bkey_val_invalid(c, u.s_c, READ, &buf); + prt_printf(&buf, "\n "); + bch2_bkey_val_to_text(&buf, c, u.s_c); + + btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf); btree_keys_account_key_drop(&b->nr, 0, k); i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); - memmove_u64s_down(k, bkey_next(k), + memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k); set_btree_bset_end(b, b->set); continue; @@ -1066,7 +1154,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, bp.v->mem_ptr = 0; } - k = bkey_next(k); + k = bkey_p_next(k); } bch2_bset_build_aux_tree(b, b->set, false); @@ -1081,16 +1169,18 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, if (ca->mi.state != BCH_MEMBER_STATE_rw) set_btree_node_need_rewrite(b); } + + if (!ptr_written) + set_btree_node_need_rewrite(b); out: mempool_free(iter, &c->fill_iter); + printbuf_exit(&buf); return retry_read; fsck_err: - if (ret == BTREE_RETRY_READ) { + if (ret == BTREE_RETRY_READ) retry_read = 1; - } else { - bch2_inconsistent_error(c); + else set_btree_node_read_error(b); - } goto out; } @@ -1103,18 +1193,18 @@ static void btree_node_read_work(struct work_struct *work) struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev); struct bio *bio = &rb->bio; struct bch_io_failures failed = { .nr = 0 }; - char buf[200]; - struct printbuf out; + struct printbuf buf = PRINTBUF; bool saw_error = false; + bool retry = false; bool can_retry; goto start; while (1) { + retry = true; bch_info(c, "retrying read"); ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev); rb->have_ioref = bch2_dev_get_ioref(ca, READ); - bio_reset(bio); - bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META; + bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META); bio->bi_iter.bi_sector = rb->pick.ptr.offset; bio->bi_iter.bi_size = btree_bytes(c); @@ -1125,10 +1215,10 @@ static void btree_node_read_work(struct work_struct *work) bio->bi_status = BLK_STS_REMOVED; } start: - out = PBUF(buf); - btree_pos_to_text(&out, c, b); + printbuf_reset(&buf); + btree_pos_to_text(&buf, c, b); bch2_dev_io_err_on(bio->bi_status, ca, "btree read error %s for %s", - bch2_blk_status_to_str(bio->bi_status), buf); + bch2_blk_status_to_str(bio->bi_status), buf.buf); if (rb->have_ioref) percpu_ref_put(&ca->io_ref); rb->have_ioref = false; @@ -1140,8 +1230,11 @@ start: &failed, &rb->pick) > 0; if (!bio->bi_status && - !bch2_btree_node_read_done(c, ca, b, can_retry)) + !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) { + if (retry) + bch_info(c, "retry success"); break; + } saw_error = true; @@ -1154,9 +1247,18 @@ start: bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read], rb->start_time); bio_put(&rb->bio); + printbuf_exit(&buf); + + if (saw_error && !btree_node_read_error(b)) { + struct printbuf buf = PRINTBUF; + + bch2_bpos_to_text(&buf, b->key.k.p); + bch_info(c, "%s: rewriting btree node at btree=%s level=%u %s due to error", + __func__, bch2_btree_ids[b->c.btree_id], b->c.level, buf.buf); + printbuf_exit(&buf); - if (saw_error && !btree_node_read_error(b)) bch2_btree_node_rewrite_async(c, b); + } clear_btree_node_read_in_flight(b); wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); @@ -1170,6 +1272,7 @@ static void btree_node_read_endio(struct bio *bio) if (rb->have_ioref) { struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev); + bch2_latency_acct(ca, rb->start_time, READ); } @@ -1183,7 +1286,7 @@ struct btree_node_read_all { unsigned nr; void *buf[BCH_REPLICAS_MAX]; struct bio *bio[BCH_REPLICAS_MAX]; - int err[BCH_REPLICAS_MAX]; + blk_status_t err[BCH_REPLICAS_MAX]; }; static unsigned btree_node_sectors_written(struct bch_fs *c, void *data) @@ -1195,7 +1298,7 @@ static unsigned btree_node_sectors_written(struct bch_fs *c, void *data) if (le64_to_cpu(bn->magic) != bset_magic(c)) return 0; - while (offset < c->opts.btree_node_size) { + while (offset < btree_sectors(c)) { if (!offset) { offset += vstruct_sectors(bn, c->block_bits); } else { @@ -1217,7 +1320,7 @@ static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void * if (!offset) return false; - while (offset < c->opts.btree_node_size) { + while (offset < btree_sectors(c)) { bne = data + (offset << 9); if (bne->keys.seq == bn->keys.seq) return true; @@ -1234,12 +1337,14 @@ static void btree_node_read_all_replicas_done(struct closure *cl) container_of(cl, struct btree_node_read_all, cl); struct bch_fs *c = ra->c; struct btree *b = ra->b; + struct printbuf buf = PRINTBUF; bool dump_bset_maps = false; bool have_retry = false; int ret = 0, best = -1, write = READ; - unsigned i, written, written2; + unsigned i, written = 0, written2 = 0; __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2 ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0; + bool _saw_error = false, *saw_error = &_saw_error; for (i = 0; i < ra->nr; i++) { struct btree_node *bn = ra->buf[i]; @@ -1277,8 +1382,6 @@ static void btree_node_read_all_replicas_done(struct closure *cl) fsck_err: if (dump_bset_maps) { for (i = 0; i < ra->nr; i++) { - char buf[200]; - struct printbuf out = PBUF(buf); struct btree_node *bn = ra->buf[i]; struct btree_node_entry *bne = NULL; unsigned offset = 0, sectors; @@ -1287,7 +1390,9 @@ fsck_err: if (ra->err[i]) continue; - while (offset < c->opts.btree_node_size) { + printbuf_reset(&buf); + + while (offset < btree_sectors(c)) { if (!offset) { sectors = vstruct_sectors(bn, c->block_bits); } else { @@ -1297,42 +1402,44 @@ fsck_err: sectors = vstruct_sectors(bne, c->block_bits); } - pr_buf(&out, " %u-%u", offset, offset + sectors); + prt_printf(&buf, " %u-%u", offset, offset + sectors); if (bne && bch2_journal_seq_is_blacklisted(c, le64_to_cpu(bne->keys.journal_seq), false)) - pr_buf(&out, "*"); + prt_printf(&buf, "*"); offset += sectors; } - while (offset < c->opts.btree_node_size) { + while (offset < btree_sectors(c)) { bne = ra->buf[i] + (offset << 9); if (bne->keys.seq == bn->keys.seq) { if (!gap) - pr_buf(&out, " GAP"); + prt_printf(&buf, " GAP"); gap = true; sectors = vstruct_sectors(bne, c->block_bits); - pr_buf(&out, " %u-%u", offset, offset + sectors); + prt_printf(&buf, " %u-%u", offset, offset + sectors); if (bch2_journal_seq_is_blacklisted(c, le64_to_cpu(bne->keys.journal_seq), false)) - pr_buf(&out, "*"); + prt_printf(&buf, "*"); } offset++; } - bch_err(c, "replica %u:%s", i, buf); + bch_err(c, "replica %u:%s", i, buf.buf); } } if (best >= 0) { memcpy(b->data, ra->buf[best], btree_bytes(c)); - ret = bch2_btree_node_read_done(c, NULL, b, false); + ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error); } else { ret = -1; } if (ret) set_btree_node_read_error(b); + else if (*saw_error) + bch2_btree_node_rewrite_async(c, b); for (i = 0; i < ra->nr; i++) { mempool_free(ra->buf[i], &c->btree_bounce_pool); @@ -1341,6 +1448,7 @@ fsck_err: closure_debug_destroy(&ra->cl); kfree(ra); + printbuf_exit(&buf); clear_btree_node_read_in_flight(b); wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); @@ -1355,6 +1463,7 @@ static void btree_node_read_all_replicas_endio(struct bio *bio) if (rb->have_ioref) { struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev); + bch2_latency_acct(ca, rb->start_time, READ); } @@ -1377,7 +1486,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool ra = kzalloc(sizeof(*ra), GFP_NOFS); if (!ra) - return -ENOMEM; + return -BCH_ERR_ENOMEM_btree_node_read_all_replicas; closure_init(&ra->cl, NULL); ra->c = c; @@ -1386,8 +1495,10 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool for (i = 0; i < ra->nr; i++) { ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS); - ra->bio[i] = bio_alloc_bioset(GFP_NOFS, buf_pages(ra->buf[i], - btree_bytes(c)), + ra->bio[i] = bio_alloc_bioset(NULL, + buf_pages(ra->buf[i], btree_bytes(c)), + REQ_OP_READ|REQ_SYNC|REQ_META, + GFP_NOFS, &c->btree_bio); } @@ -1403,7 +1514,6 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool rb->have_ioref = bch2_dev_get_ioref(ca, READ); rb->idx = i; rb->pick = pick; - rb->bio.bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META; rb->bio.bi_iter.bi_sector = pick.ptr.offset; rb->bio.bi_end_io = btree_node_read_all_replicas_endio; bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c)); @@ -1440,11 +1550,9 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, struct btree_read_bio *rb; struct bch_dev *ca; struct bio *bio; - char buf[200]; int ret; - btree_pos_to_text(&PBUF(buf), c, b); - trace_btree_read(c, b); + trace_and_count(c, btree_node_read, c, b); if (bch2_verify_all_btree_replicas && !btree_node_read_all_replicas(c, b, sync)) @@ -1452,17 +1560,30 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), NULL, &pick); - if (bch2_fs_fatal_err_on(ret <= 0, c, - "btree node read error: no device to read from\n" - " at %s", buf)) { + + if (ret <= 0) { + struct printbuf buf = PRINTBUF; + + prt_str(&buf, "btree node read error: no device to read from\n at "); + btree_pos_to_text(&buf, c, b); + bch_err(c, "%s", buf.buf); + + if (test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) + bch2_fatal_error(c); + set_btree_node_read_error(b); + clear_btree_node_read_in_flight(b); + wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); + printbuf_exit(&buf); return; } ca = bch_dev_bkey_exists(c, pick.ptr.dev); - bio = bio_alloc_bioset(GFP_NOIO, buf_pages(b->data, - btree_bytes(c)), + bio = bio_alloc_bioset(NULL, + buf_pages(b->data, btree_bytes(c)), + REQ_OP_READ|REQ_SYNC|REQ_META, + GFP_NOFS, &c->btree_bio); rb = container_of(bio, struct btree_read_bio, bio); rb->c = c; @@ -1472,7 +1593,6 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, rb->have_ioref = bch2_dev_get_ioref(ca, READ); rb->pick = pick; INIT_WORK(&rb->work, btree_node_read_work); - bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META; bio->bi_iter.bi_sector = pick.ptr.offset; bio->bi_end_io = btree_node_read_endio; bch2_bio_map(bio, b->data, btree_bytes(c)); @@ -1499,9 +1619,10 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, } } -int bch2_btree_root_read(struct bch_fs *c, enum btree_id id, - const struct bkey_i *k, unsigned level) +static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id, + const struct bkey_i *k, unsigned level) { + struct bch_fs *c = trans->c; struct closure cl; struct btree *b; int ret; @@ -1513,7 +1634,7 @@ int bch2_btree_root_read(struct bch_fs *c, enum btree_id id, closure_sync(&cl); } while (ret); - b = bch2_btree_node_mem_alloc(c); + b = bch2_btree_node_mem_alloc(trans, level != 0); bch2_btree_cache_cannibalize_unlock(c); BUG_ON(IS_ERR(b)); @@ -1544,6 +1665,13 @@ err: return ret; } +int bch2_btree_root_read(struct bch_fs *c, enum btree_id id, + const struct bkey_i *k, unsigned level) +{ + return bch2_trans_run(c, __bch2_btree_root_read(&trans, id, k, level)); + +} + void bch2_btree_complete_write(struct bch_fs *c, struct btree *b, struct btree_write *w) { @@ -1563,86 +1691,55 @@ void bch2_btree_complete_write(struct bch_fs *c, struct btree *b, bch2_journal_pin_drop(&c->journal, &w->journal); } -static void btree_node_write_done(struct bch_fs *c, struct btree *b) +static void __btree_node_write_done(struct bch_fs *c, struct btree *b) { struct btree_write *w = btree_prev_write(b); + unsigned long old, new, v; + unsigned type = 0; bch2_btree_complete_write(c, b, w); - bch2_btree_node_io_unlock(b); -} - -static void bch2_btree_node_write_error(struct bch_fs *c, - struct btree_write_bio *wbio) -{ - struct btree *b = wbio->wbio.bio.bi_private; - struct bkey_buf k; - struct bch_extent_ptr *ptr; - struct btree_trans trans; - struct btree_iter *iter; - int ret; - - bch2_bkey_buf_init(&k); - bch2_trans_init(&trans, c, 0, 0); - iter = bch2_trans_get_node_iter(&trans, b->c.btree_id, b->key.k.p, - BTREE_MAX_DEPTH, b->c.level, 0); -retry: - ret = bch2_btree_iter_traverse(iter); - if (ret) - goto err; - - /* has node been freed? */ - if (iter->l[b->c.level].b != b) { - /* node has been freed: */ - BUG_ON(!btree_node_dying(b)); - goto out; - } - - BUG_ON(!btree_node_hashed(b)); - - bch2_bkey_buf_copy(&k, c, &b->key); - - bch2_bkey_drop_ptrs(bkey_i_to_s(k.k), ptr, - bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev)); + v = READ_ONCE(b->flags); + do { + old = new = v; - if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(k.k))) - goto err; + if ((old & (1U << BTREE_NODE_dirty)) && + (old & (1U << BTREE_NODE_need_write)) && + !(old & (1U << BTREE_NODE_never_write)) && + !(old & (1U << BTREE_NODE_write_blocked)) && + !(old & (1U << BTREE_NODE_will_make_reachable))) { + new &= ~(1U << BTREE_NODE_dirty); + new &= ~(1U << BTREE_NODE_need_write); + new |= (1U << BTREE_NODE_write_in_flight); + new |= (1U << BTREE_NODE_write_in_flight_inner); + new |= (1U << BTREE_NODE_just_written); + new ^= (1U << BTREE_NODE_write_idx); + + type = new & BTREE_WRITE_TYPE_MASK; + new &= ~BTREE_WRITE_TYPE_MASK; + } else { + new &= ~(1U << BTREE_NODE_write_in_flight); + new &= ~(1U << BTREE_NODE_write_in_flight_inner); + } + } while ((v = cmpxchg(&b->flags, old, new)) != old); - ret = bch2_btree_node_update_key(&trans, iter, b, k.k); - if (ret == -EINTR) - goto retry; - if (ret) - goto err; -out: - bch2_trans_iter_put(&trans, iter); - bch2_trans_exit(&trans); - bch2_bkey_buf_exit(&k, c); - bio_put(&wbio->wbio.bio); - btree_node_write_done(c, b); - return; -err: - set_btree_node_noevict(b); - bch2_fs_fatal_error(c, "fatal error writing btree node"); - goto out; + if (new & (1U << BTREE_NODE_write_in_flight)) + __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type); + else + wake_up_bit(&b->flags, BTREE_NODE_write_in_flight); } -void bch2_btree_write_error_work(struct work_struct *work) +static void btree_node_write_done(struct bch_fs *c, struct btree *b) { - struct bch_fs *c = container_of(work, struct bch_fs, - btree_write_error_work); - struct bio *bio; + struct btree_trans trans; - while (1) { - spin_lock_irq(&c->btree_write_error_lock); - bio = bio_list_pop(&c->btree_write_error_list); - spin_unlock_irq(&c->btree_write_error_lock); + bch2_trans_init(&trans, c, 0, 0); - if (!bio) - break; + btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_read); + __btree_node_write_done(c, b); + six_unlock_read(&b->c.lock); - bch2_btree_node_write_error(c, - container_of(bio, struct btree_write_bio, wbio.bio)); - } + bch2_trans_exit(&trans); } static void btree_node_write_work(struct work_struct *work) @@ -1651,25 +1748,44 @@ static void btree_node_write_work(struct work_struct *work) container_of(work, struct btree_write_bio, work); struct bch_fs *c = wbio->wbio.c; struct btree *b = wbio->wbio.bio.bi_private; + struct bch_extent_ptr *ptr; + int ret = 0; btree_bounce_free(c, - wbio->bytes, + wbio->data_bytes, wbio->wbio.used_mempool, wbio->data); - if (wbio->wbio.failed.nr) { - unsigned long flags; + bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr, + bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev)); - spin_lock_irqsave(&c->btree_write_error_lock, flags); - bio_list_add(&c->btree_write_error_list, &wbio->wbio.bio); - spin_unlock_irqrestore(&c->btree_write_error_lock, flags); + if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) + goto err; - queue_work(c->btree_error_wq, &c->btree_write_error_work); - return; - } + if (wbio->wbio.first_btree_write) { + if (wbio->wbio.failed.nr) { + } + } else { + ret = bch2_trans_do(c, NULL, NULL, 0, + bch2_btree_node_update_key_get_iter(&trans, b, &wbio->key, + BCH_WATERMARK_reclaim| + BTREE_INSERT_JOURNAL_RECLAIM| + BTREE_INSERT_NOFAIL| + BTREE_INSERT_NOCHECK_RW, + !wbio->wbio.failed.nr)); + if (ret) + goto err; + } +out: bio_put(&wbio->wbio.bio); btree_node_write_done(c, b); + return; +err: + set_btree_node_noevict(b); + if (!bch2_err_matches(ret, EROFS)) + bch2_fs_fatal_error(c, "fatal error writing btree node: %s", bch2_err_str(ret)); + goto out; } static void btree_node_write_endio(struct bio *bio) @@ -1677,7 +1793,9 @@ static void btree_node_write_endio(struct bio *bio) struct bch_write_bio *wbio = to_wbio(bio); struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL; struct bch_write_bio *orig = parent ?: wbio; + struct btree_write_bio *wb = container_of(orig, struct btree_write_bio, wbio); struct bch_fs *c = wbio->c; + struct btree *b = wbio->bio.bi_private; struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev); unsigned long flags; @@ -1698,26 +1816,33 @@ static void btree_node_write_endio(struct bio *bio) if (parent) { bio_put(bio); bio_endio(&parent->bio); - } else { - struct btree_write_bio *wb = - container_of(orig, struct btree_write_bio, wbio); - - INIT_WORK(&wb->work, btree_node_write_work); - queue_work(c->io_complete_wq, &wb->work); + return; } + + clear_btree_node_write_in_flight_inner(b); + wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner); + INIT_WORK(&wb->work, btree_node_write_work); + queue_work(c->btree_io_complete_wq, &wb->work); } static int validate_bset_for_write(struct bch_fs *c, struct btree *b, struct bset *i, unsigned sectors) { - unsigned whiteout_u64s = 0; + struct printbuf buf = PRINTBUF; + bool saw_error; int ret; - if (bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), BKEY_TYPE_btree)) - return -1; + ret = bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), + BKEY_TYPE_btree, WRITE, &buf); + + if (ret) + bch2_fs_inconsistent(c, "invalid btree node key before write: %s", buf.buf); + printbuf_exit(&buf); + if (ret) + return ret; - ret = validate_bset_keys(c, b, i, &whiteout_u64s, WRITE, false) ?: - validate_bset(c, NULL, b, i, sectors, WRITE, false); + ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?: + validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error); if (ret) { bch2_inconsistent_error(c); dump_stack(); @@ -1729,18 +1854,25 @@ static int validate_bset_for_write(struct bch_fs *c, struct btree *b, static void btree_write_submit(struct work_struct *work) { struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work); + struct bch_extent_ptr *ptr; + BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp; + + bkey_copy(&tmp.k, &wbio->key); + + bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr) + ptr->offset += wbio->sector_offset; - bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &wbio->key); + bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, + &tmp.k, false); } -void __bch2_btree_node_write(struct bch_fs *c, struct btree *b) +void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags) { struct btree_write_bio *wbio; struct bset_tree *t; struct bset *i; struct btree_node *bn = NULL; struct btree_node_entry *bne = NULL; - struct bch_extent_ptr *ptr; struct sort_iter sort_iter; struct nonce nonce; unsigned bytes_to_write, sectors_to_write, bytes, u64s; @@ -1748,12 +1880,12 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b) bool used_mempool; unsigned long old, new; bool validate_before_checksum = false; + enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK; void *data; + int ret; - BUG_ON(btree_node_write_in_flight(b)); - - if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags)) - return; + if (flags & BTREE_WRITE_ALREADY_STARTED) + goto do_write; /* * We may only have a read lock on the btree node - the dirty bit is our @@ -1768,35 +1900,46 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b) if (!(old & (1 << BTREE_NODE_dirty))) return; - if (!btree_node_may_write(b)) + if ((flags & BTREE_WRITE_ONLY_IF_NEED) && + !(old & (1 << BTREE_NODE_need_write))) return; - if (old & (1 << BTREE_NODE_never_write)) + if (old & + ((1 << BTREE_NODE_never_write)| + (1 << BTREE_NODE_write_blocked))) return; - if (old & (1 << BTREE_NODE_write_in_flight)) { - /* - * XXX waiting on btree writes with btree locks held - - * this can deadlock, and we hit the write error path - */ - bch2_btree_node_wait_on_write(b); - continue; - } + if (b->written && + (old & (1 << BTREE_NODE_will_make_reachable))) + return; + + if (old & (1 << BTREE_NODE_write_in_flight)) + return; + + if (flags & BTREE_WRITE_ONLY_IF_NEED) + type = new & BTREE_WRITE_TYPE_MASK; + new &= ~BTREE_WRITE_TYPE_MASK; new &= ~(1 << BTREE_NODE_dirty); new &= ~(1 << BTREE_NODE_need_write); new |= (1 << BTREE_NODE_write_in_flight); + new |= (1 << BTREE_NODE_write_in_flight_inner); new |= (1 << BTREE_NODE_just_written); new ^= (1 << BTREE_NODE_write_idx); } while (cmpxchg_acquire(&b->flags, old, new) != old); + if (new & (1U << BTREE_NODE_need_write)) + return; +do_write: + BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0)); + atomic_dec(&c->btree_cache.dirty); BUG_ON(btree_node_fake(b)); BUG_ON((b->will_make_reachable != 0) != !b->written); - BUG_ON(b->written >= c->opts.btree_node_size); - BUG_ON(b->written & (c->opts.block_size - 1)); + BUG_ON(b->written >= btree_sectors(c)); + BUG_ON(b->written & (block_sectors(c) - 1)); BUG_ON(bset_written(b, btree_bset_last(b))); BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c)); BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format))); @@ -1857,6 +2000,8 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b) u64s = bch2_sort_keys(i->start, &sort_iter, false); le16_add_cpu(&i->u64s, u64s); + BUG_ON(!b->written && i->u64s != b->data->keys.u64s); + set_needs_whiteout(i, false); /* do we have data to write? */ @@ -1866,16 +2011,19 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b) bytes_to_write = vstruct_end(i) - data; sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9; + if (!b->written && + b->key.k.type == KEY_TYPE_btree_ptr_v2) + BUG_ON(btree_ptr_sectors_written(&b->key) != sectors_to_write); + memset(data + bytes_to_write, 0, (sectors_to_write << 9) - bytes_to_write); - BUG_ON(b->written + sectors_to_write > c->opts.btree_node_size); + BUG_ON(b->written + sectors_to_write > btree_sectors(c)); BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN); BUG_ON(i->seq != b->data->keys.seq); - i->version = c->sb.version < bcachefs_metadata_version_new_versioning - ? cpu_to_le16(BCH_BSET_VERSION_OLD) - : cpu_to_le16(c->sb.version); + i->version = cpu_to_le16(c->sb.version); + SET_BSET_OFFSET(i, b->written); SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c)); if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i))) @@ -1890,7 +2038,10 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b) validate_bset_for_write(c, b, i, sectors_to_write)) goto err; - bset_encrypt(c, i, b->written << 9); + ret = bset_encrypt(c, i, b->written << 9); + if (bch2_fs_fatal_err_on(ret, c, + "error encrypting btree node: %i\n", ret)) + goto err; nonce = btree_nonce(i, b->written << 9); @@ -1926,47 +2077,36 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b) c->opts.nochanges) goto err; - trace_btree_write(b, bytes_to_write, sectors_to_write); + trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write); - wbio = container_of(bio_alloc_bioset(GFP_NOIO, + wbio = container_of(bio_alloc_bioset(NULL, buf_pages(data, sectors_to_write << 9), + REQ_OP_WRITE|REQ_META, + GFP_NOFS, &c->btree_bio), struct btree_write_bio, wbio.bio); wbio_init(&wbio->wbio.bio); wbio->data = data; - wbio->bytes = bytes; + wbio->data_bytes = bytes; + wbio->sector_offset = b->written; wbio->wbio.c = c; wbio->wbio.used_mempool = used_mempool; - wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META; + wbio->wbio.first_btree_write = !b->written; wbio->wbio.bio.bi_end_io = btree_node_write_endio; wbio->wbio.bio.bi_private = b; bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9); - /* - * If we're appending to a leaf node, we don't technically need FUA - - * this write just needs to be persisted before the next journal write, - * which will be marked FLUSH|FUA. - * - * Similarly if we're writing a new btree root - the pointer is going to - * be in the next journal entry. - * - * But if we're writing a new btree node (that isn't a root) or - * appending to a non leaf btree node, we need either FUA or a flush - * when we write the parent with the new pointer. FUA is cheaper than a - * flush, and writes appending to leaf nodes aren't blocking anything so - * just make all btree node writes FUA to keep things sane. - */ - bkey_copy(&wbio->key, &b->key); - bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&wbio->key)), ptr) - ptr->offset += b->written; - b->written += sectors_to_write; - atomic64_inc(&c->btree_writes_nr); - atomic64_add(sectors_to_write, &c->btree_writes_sectors); + if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2) + bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written = + cpu_to_le16(b->written); + + atomic64_inc(&c->btree_write_stats[type].nr); + atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes); INIT_WORK(&wbio->work, btree_write_submit); queue_work(c->io_complete_wq, &wbio->work); @@ -1976,7 +2116,7 @@ err: b->written += sectors_to_write; nowrite: btree_bounce_free(c, bytes, used_mempool, data); - btree_node_write_done(c, b); + __btree_node_write_done(c, b); } /* @@ -2039,12 +2179,13 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b) * Use this one if the node is intent locked: */ void bch2_btree_node_write(struct bch_fs *c, struct btree *b, - enum six_lock_type lock_type_held) + enum six_lock_type lock_type_held, + unsigned flags) { if (lock_type_held == SIX_LOCK_intent || (lock_type_held == SIX_LOCK_read && six_lock_tryupgrade(&b->c.lock))) { - __bch2_btree_node_write(c, b); + __bch2_btree_node_write(c, b, flags); /* don't cycle lock unnecessarily: */ if (btree_node_just_written(b) && @@ -2056,64 +2197,70 @@ void bch2_btree_node_write(struct bch_fs *c, struct btree *b, if (lock_type_held == SIX_LOCK_read) six_lock_downgrade(&b->c.lock); } else { - __bch2_btree_node_write(c, b); + __bch2_btree_node_write(c, b, flags); if (lock_type_held == SIX_LOCK_write && btree_node_just_written(b)) bch2_btree_post_write_cleanup(c, b); } } -static void __bch2_btree_flush_all(struct bch_fs *c, unsigned flag) +static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag) { struct bucket_table *tbl; struct rhash_head *pos; struct btree *b; unsigned i; + bool ret = false; restart: rcu_read_lock(); for_each_cached_btree(b, c, tbl, i, pos) if (test_bit(flag, &b->flags)) { rcu_read_unlock(); wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE); + ret = true; goto restart; - } rcu_read_unlock(); -} -void bch2_btree_flush_all_reads(struct bch_fs *c) -{ - __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight); + return ret; } -void bch2_btree_flush_all_writes(struct bch_fs *c) +bool bch2_btree_flush_all_reads(struct bch_fs *c) { - __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight); + return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight); } -void bch2_dirty_btree_nodes_to_text(struct printbuf *out, struct bch_fs *c) +bool bch2_btree_flush_all_writes(struct bch_fs *c) { - struct bucket_table *tbl; - struct rhash_head *pos; - struct btree *b; - unsigned i; - - rcu_read_lock(); - for_each_cached_btree(b, c, tbl, i, pos) { - unsigned long flags = READ_ONCE(b->flags); + return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight); +} - if (!(flags & (1 << BTREE_NODE_dirty))) - continue; +static const char * const bch2_btree_write_types[] = { +#define x(t, n) [n] = #t, + BCH_BTREE_WRITE_TYPES() + NULL +}; - pr_buf(out, "%p d %u n %u l %u w %u b %u r %u:%lu\n", - b, - (flags & (1 << BTREE_NODE_dirty)) != 0, - (flags & (1 << BTREE_NODE_need_write)) != 0, - b->c.level, - b->written, - !list_empty_careful(&b->write_blocked), - b->will_make_reachable != 0, - b->will_make_reachable & 1); +void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c) +{ + printbuf_tabstop_push(out, 20); + printbuf_tabstop_push(out, 10); + + prt_tab(out); + prt_str(out, "nr"); + prt_tab(out); + prt_str(out, "size"); + prt_newline(out); + + for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) { + u64 nr = atomic64_read(&c->btree_write_stats[i].nr); + u64 bytes = atomic64_read(&c->btree_write_stats[i].bytes); + + prt_printf(out, "%s:", bch2_btree_write_types[i]); + prt_tab(out); + prt_u64(out, nr); + prt_tab(out); + prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0); + prt_newline(out); } - rcu_read_unlock(); }