X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;ds=sidebyside;f=libbcachefs%2Fbtree_io.c;h=86415701b824076f10fc79e8bee220ed7075e328;hb=HEAD;hp=29163b46ba5980033fdbc9da5a74db9019c5c063;hpb=b0c9ad15f4e5cee60973a8f5f6dc49acfeec9755;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_io.c b/libbcachefs/btree_io.c index 29163b4..8641570 100644 --- a/libbcachefs/btree_io.c +++ b/libbcachefs/btree_io.c @@ -14,13 +14,14 @@ #include "debug.h" #include "error.h" #include "extents.h" -#include "io.h" +#include "io_write.h" #include "journal_reclaim.h" #include "journal_seq_blacklist.h" +#include "recovery.h" #include "super-io.h" +#include "trace.h" #include -#include void bch2_btree_node_io_unlock(struct btree *b) { @@ -102,7 +103,7 @@ static void btree_bounce_free(struct bch_fs *c, size_t size, if (used_mempool) mempool_free(p, &c->btree_bounce_pool); else - vpfree(p, size); + kvfree(p); } static void *btree_bounce_alloc(struct bch_fs *c, size_t size, @@ -111,13 +112,13 @@ static void *btree_bounce_alloc(struct bch_fs *c, size_t size, unsigned flags = memalloc_nofs_save(); void *p; - BUG_ON(size > btree_bytes(c)); + BUG_ON(size > c->opts.btree_node_size); *used_mempool = false; - p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT); + p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT); if (!p) { *used_mempool = true; - p = mempool_alloc(&c->btree_bounce_pool, GFP_NOIO); + p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS); } memalloc_nofs_restore(flags); return p; @@ -173,8 +174,8 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b) ptrs = ptrs_end = ((void *) new_whiteouts + bytes); - for (k = unwritten_whiteouts_start(c, b); - k != unwritten_whiteouts_end(c, b); + for (k = unwritten_whiteouts_start(b); + k != unwritten_whiteouts_end(b); k = bkey_p_next(k)) *--ptrs = k; @@ -183,7 +184,7 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b) k = new_whiteouts; while (ptrs != ptrs_end) { - bkey_copy(k, *ptrs); + bkey_p_copy(k, *ptrs); k = bkey_p_next(k); ptrs++; } @@ -191,7 +192,7 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b) verify_no_dups(b, new_whiteouts, (void *) ((u64 *) new_whiteouts + b->whiteout_u64s)); - memcpy_u64s(unwritten_whiteouts_start(c, b), + memcpy_u64s(unwritten_whiteouts_start(b), new_whiteouts, b->whiteout_u64s); btree_bounce_free(c, bytes, used_mempool, new_whiteouts); @@ -259,7 +260,7 @@ static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode) n = bkey_p_next(k); if (!bkey_deleted(k)) { - bkey_copy(out, k); + bkey_p_copy(out, k); out = bkey_p_next(out); } else { BUG_ON(k->needs_whiteout); @@ -291,7 +292,7 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b, bool filter_whiteouts) { struct btree_node *out; - struct sort_iter sort_iter; + struct sort_iter_stack sort_iter; struct bset_tree *t; struct bset *start_bset = bset(b, &b->set[start_idx]); bool used_mempool = false; @@ -300,33 +301,33 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b, bool sorting_entire_node = start_idx == 0 && end_idx == b->nsets; - sort_iter_init(&sort_iter, b); + sort_iter_stack_init(&sort_iter, b); for (t = b->set + start_idx; t < b->set + end_idx; t++) { u64s += le16_to_cpu(bset(b, t)->u64s); - sort_iter_add(&sort_iter, + sort_iter_add(&sort_iter.iter, btree_bkey_first(b, t), btree_bkey_last(b, t)); } bytes = sorting_entire_node - ? btree_bytes(c) + ? btree_buf_bytes(b) : __vstruct_bytes(struct btree_node, u64s); out = btree_bounce_alloc(c, bytes, &used_mempool); start_time = local_clock(); - u64s = bch2_sort_keys(out->keys.start, &sort_iter, filter_whiteouts); + u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter, filter_whiteouts); out->keys.u64s = cpu_to_le16(u64s); BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes); if (sorting_entire_node) - bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort], + time_stats_update(&c->times[BCH_TIME_btree_node_sort], start_time); /* Make sure we preserve bset journal_seq: */ @@ -335,9 +336,9 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b, start_bset->journal_seq = cpu_to_le64(seq); if (sorting_entire_node) { - unsigned u64s = le16_to_cpu(out->keys.u64s); + u64s = le16_to_cpu(out->keys.u64s); - BUG_ON(bytes != btree_bytes(c)); + BUG_ON(bytes != btree_buf_bytes(b)); /* * Our temporary buffer is the same size as the btree node's @@ -396,7 +397,7 @@ void bch2_btree_sort_into(struct bch_fs *c, &dst->format, true); - bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort], + time_stats_update(&c->times[BCH_TIME_btree_node_sort], start_time); set_btree_bset_end(dst, dst->set); @@ -409,8 +410,6 @@ void bch2_btree_sort_into(struct bch_fs *c, bch2_verify_btree_nr_keys(dst); } -#define SORT_CRIT (4096 / sizeof(u64)) - /* * We're about to add another bset to the btree node, so if there's currently * too many bsets - sort some of them together: @@ -483,7 +482,7 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b) struct btree_node_entry *bne; bool reinit_iter = false; - EBUG_ON(!(b->c.lock.state.seq & 1)); + EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]); BUG_ON(bset_written(b, bset(b, &b->set[1]))); BUG_ON(btree_node_just_written(b)); @@ -503,7 +502,7 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b) bne = want_new_bset(c, b); if (bne) - bch2_bset_init_next(c, b, bne); + bch2_bset_init_next(b, bne); bch2_btree_build_aux_trees(b); @@ -511,16 +510,6 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b) bch2_trans_node_reinit_iter(trans, b); } -static void btree_pos_to_text(struct printbuf *out, struct bch_fs *c, - struct btree *b) -{ - prt_printf(out, "%s level %u/%u\n ", - bch2_btree_ids[b->c.btree_id], - b->c.level, - c->btree_roots[b->c.btree_id].level); - bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key)); -} - static void btree_err_msg(struct printbuf *out, struct bch_fs *c, struct bch_dev *ca, struct btree *b, struct bset *i, @@ -533,50 +522,28 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c, if (ca) prt_printf(out, "on %s ", ca->name); prt_printf(out, "at btree "); - btree_pos_to_text(out, c, b); + bch2_btree_pos_to_text(out, c, b); - prt_printf(out, "\n node offset %u", b->written); + prt_printf(out, "\n node offset %u/%u", + b->written, btree_ptr_sectors_written(&b->key)); if (i) prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s)); prt_str(out, ": "); } -enum btree_err_type { - /* - * We can repair this locally, and we're after the checksum check so - * there's no need to try another replica: - */ - BTREE_ERR_FIXABLE, - /* - * We can repair this if we have to, but we should try reading another - * replica if we can: - */ - BTREE_ERR_WANT_RETRY, - /* - * Read another replica if we have one, otherwise consider the whole - * node bad: - */ - BTREE_ERR_MUST_RETRY, - BTREE_ERR_BAD_NODE, - BTREE_ERR_INCOMPATIBLE, -}; - -enum btree_validate_ret { - BTREE_RETRY_READ = 64, -}; - -static int __btree_err(enum btree_err_type type, +__printf(9, 10) +static int __btree_err(int ret, struct bch_fs *c, struct bch_dev *ca, struct btree *b, struct bset *i, int write, bool have_retry, + enum bch_sb_error_id err_type, const char *fmt, ...) { struct printbuf out = PRINTBUF; va_list args; - int ret = -BCH_ERR_fsck_fix; btree_err_msg(&out, c, ca, b, i, b->written, write); @@ -592,27 +559,31 @@ static int __btree_err(enum btree_err_type type, goto out; } - if (!have_retry && type == BTREE_ERR_WANT_RETRY) - type = BTREE_ERR_FIXABLE; - if (!have_retry && type == BTREE_ERR_MUST_RETRY) - type = BTREE_ERR_BAD_NODE; + if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry) + ret = -BCH_ERR_btree_node_read_err_fixable; + if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry) + ret = -BCH_ERR_btree_node_read_err_bad_node; + + if (ret != -BCH_ERR_btree_node_read_err_fixable) + bch2_sb_error_count(c, err_type); - switch (type) { - case BTREE_ERR_FIXABLE: - mustfix_fsck_err(c, "%s", out.buf); + switch (ret) { + case -BCH_ERR_btree_node_read_err_fixable: + ret = bch2_fsck_err(c, FSCK_CAN_FIX, err_type, "%s", out.buf); + if (ret != -BCH_ERR_fsck_fix && + ret != -BCH_ERR_fsck_ignore) + goto fsck_err; ret = -BCH_ERR_fsck_fix; break; - case BTREE_ERR_WANT_RETRY: - case BTREE_ERR_MUST_RETRY: + case -BCH_ERR_btree_node_read_err_want_retry: + case -BCH_ERR_btree_node_read_err_must_retry: bch2_print_string_as_lines(KERN_ERR, out.buf); - ret = BTREE_RETRY_READ; break; - case BTREE_ERR_BAD_NODE: + case -BCH_ERR_btree_node_read_err_bad_node: bch2_print_string_as_lines(KERN_ERR, out.buf); - bch2_topology_error(c); - ret = -BCH_ERR_need_topology_repair; + ret = bch2_topology_error(c); break; - case BTREE_ERR_INCOMPATIBLE: + case -BCH_ERR_btree_node_read_err_incompatible: bch2_print_string_as_lines(KERN_ERR, out.buf); ret = -BCH_ERR_fsck_errors_not_fixed; break; @@ -625,12 +596,17 @@ fsck_err: return ret; } -#define btree_err(type, c, ca, b, i, msg, ...) \ +#define btree_err(type, c, ca, b, i, _err_type, msg, ...) \ ({ \ - int _ret = __btree_err(type, c, ca, b, i, write, have_retry, msg, ##__VA_ARGS__);\ + int _ret = __btree_err(type, c, ca, b, i, write, have_retry, \ + BCH_FSCK_ERR_##_err_type, \ + msg, ##__VA_ARGS__); \ \ - if (_ret != -BCH_ERR_fsck_fix) \ + if (_ret != -BCH_ERR_fsck_fix) { \ + ret = _ret; \ goto fsck_err; \ + } \ + \ *saw_error = true; \ }) @@ -644,9 +620,6 @@ __cold void bch2_btree_node_drop_keys_outside_node(struct btree *b) { struct bset_tree *t; - struct bkey_s_c k; - struct bkey unpacked; - struct btree_node_iter iter; for_each_bset(b, t) { struct bset *i = bset(b, t); @@ -682,6 +655,9 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b) bch2_bset_set_no_aux_tree(b, b->set); bch2_btree_build_aux_trees(b); + struct bkey_s_c k; + struct bkey unpacked; + struct btree_node_iter iter; for_each_btree_node_key_unpack(b, k, &iter, &unpacked) { BUG_ON(bpos_lt(k.k->p, b->data->min_key)); BUG_ON(bpos_gt(k.k->p, b->data->max_key)); @@ -694,19 +670,22 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, int write, bool have_retry, bool *saw_error) { unsigned version = le16_to_cpu(i->version); - const char *err; struct printbuf buf1 = PRINTBUF; struct printbuf buf2 = PRINTBUF; int ret = 0; - btree_err_on((version != BCH_BSET_VERSION_OLD && - version < bcachefs_metadata_version_min) || - version >= bcachefs_metadata_version_max, - BTREE_ERR_INCOMPATIBLE, c, ca, b, i, - "unsupported bset version"); + btree_err_on(!bch2_version_compatible(version), + -BCH_ERR_btree_node_read_err_incompatible, + c, ca, b, i, + btree_node_unsupported_version, + "unsupported bset version %u.%u", + BCH_VERSION_MAJOR(version), + BCH_VERSION_MINOR(version)); if (btree_err_on(version < c->sb.version_min, - BTREE_ERR_FIXABLE, c, NULL, b, i, + -BCH_ERR_btree_node_read_err_fixable, + c, NULL, b, i, + btree_node_bset_older_than_sb_min, "bset version %u older than superblock version_min %u", version, c->sb.version_min)) { mutex_lock(&c->sb_lock); @@ -715,8 +694,11 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, mutex_unlock(&c->sb_lock); } - if (btree_err_on(version > c->sb.version, - BTREE_ERR_FIXABLE, c, NULL, b, i, + if (btree_err_on(BCH_VERSION_MAJOR(version) > + BCH_VERSION_MAJOR(c->sb.version), + -BCH_ERR_btree_node_read_err_fixable, + c, NULL, b, i, + btree_node_bset_newer_than_sb, "bset version %u newer than superblock version %u", version, c->sb.version)) { mutex_lock(&c->sb_lock); @@ -726,11 +708,15 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, } btree_err_on(BSET_SEPARATE_WHITEOUTS(i), - BTREE_ERR_INCOMPATIBLE, c, ca, b, i, + -BCH_ERR_btree_node_read_err_incompatible, + c, ca, b, i, + btree_node_unsupported_version, "BSET_SEPARATE_WHITEOUTS no longer supported"); if (btree_err_on(offset + sectors > btree_sectors(c), - BTREE_ERR_FIXABLE, c, ca, b, i, + -BCH_ERR_btree_node_read_err_fixable, + c, ca, b, i, + bset_past_end_of_btree_node, "bset past end of btree node")) { i->u64s = 0; ret = 0; @@ -738,12 +724,15 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, } btree_err_on(offset && !i->u64s, - BTREE_ERR_FIXABLE, c, ca, b, i, + -BCH_ERR_btree_node_read_err_fixable, + c, ca, b, i, + bset_empty, "empty bset"); - btree_err_on(BSET_OFFSET(i) && - BSET_OFFSET(i) != offset, - BTREE_ERR_WANT_RETRY, c, ca, b, i, + btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset, + -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, i, + bset_wrong_sector_offset, "bset at wrong sector offset"); if (!offset) { @@ -757,16 +746,22 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, /* XXX endianness */ btree_err_on(bp->seq != bn->keys.seq, - BTREE_ERR_MUST_RETRY, c, ca, b, NULL, + -BCH_ERR_btree_node_read_err_must_retry, + c, ca, b, NULL, + bset_bad_seq, "incorrect sequence number (wrong btree node)"); } btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id, - BTREE_ERR_MUST_RETRY, c, ca, b, i, + -BCH_ERR_btree_node_read_err_must_retry, + c, ca, b, i, + btree_node_bad_btree, "incorrect btree id"); btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level, - BTREE_ERR_MUST_RETRY, c, ca, b, i, + -BCH_ERR_btree_node_read_err_must_retry, + c, ca, b, i, + btree_node_bad_level, "incorrect level"); if (!write) @@ -783,7 +778,9 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, } btree_err_on(!bpos_eq(b->data->min_key, bp->min_key), - BTREE_ERR_MUST_RETRY, c, ca, b, NULL, + -BCH_ERR_btree_node_read_err_must_retry, + c, ca, b, NULL, + btree_node_bad_min_key, "incorrect min_key: got %s should be %s", (printbuf_reset(&buf1), bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf), @@ -792,7 +789,9 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, } btree_err_on(!bpos_eq(bn->max_key, b->key.k.p), - BTREE_ERR_MUST_RETRY, c, ca, b, i, + -BCH_ERR_btree_node_read_err_must_retry, + c, ca, b, i, + btree_node_bad_max_key, "incorrect max key %s", (printbuf_reset(&buf1), bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf)); @@ -801,10 +800,14 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, compat_btree_node(b->c.level, b->c.btree_id, version, BSET_BIG_ENDIAN(i), write, bn); - err = bch2_bkey_format_validate(&bn->format); - btree_err_on(err, - BTREE_ERR_BAD_NODE, c, ca, b, i, - "invalid bkey format: %s", err); + btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1), + -BCH_ERR_btree_node_read_err_bad_node, + c, ca, b, i, + btree_node_bad_format, + "invalid bkey format: %s\n %s", buf1.buf, + (printbuf_reset(&buf2), + bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf)); + printbuf_reset(&buf1); compat_bformat(b->c.level, b->c.btree_id, version, BSET_BIG_ENDIAN(i), write, @@ -823,10 +826,27 @@ static int bset_key_invalid(struct bch_fs *c, struct btree *b, struct printbuf *err) { return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?: - (!updated_range ? bch2_bkey_in_btree_node(b, k, err) : 0) ?: + (!updated_range ? bch2_bkey_in_btree_node(c, b, k, err) : 0) ?: (rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0); } +static bool __bkey_valid(struct bch_fs *c, struct btree *b, + struct bset *i, struct bkey_packed *k) +{ + if (bkey_p_next(k) > vstruct_last(i)) + return false; + + if (k->format > KEY_FORMAT_CURRENT) + return false; + + struct printbuf buf = PRINTBUF; + struct bkey tmp; + struct bkey_s u = __bkey_disassemble(b, k, &tmp); + bool ret = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b), READ, &buf); + printbuf_exit(&buf); + return ret; +} + static int validate_bset_keys(struct bch_fs *c, struct btree *b, struct bset *i, int write, bool have_retry, bool *saw_error) @@ -842,22 +862,23 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b, k != vstruct_last(i);) { struct bkey_s u; struct bkey tmp; + unsigned next_good_key; if (btree_err_on(bkey_p_next(k) > vstruct_last(i), - BTREE_ERR_FIXABLE, c, NULL, b, i, + -BCH_ERR_btree_node_read_err_fixable, + c, NULL, b, i, + btree_node_bkey_past_bset_end, "key extends past end of bset")) { i->u64s = cpu_to_le16((u64 *) k - i->_data); break; } if (btree_err_on(k->format > KEY_FORMAT_CURRENT, - BTREE_ERR_FIXABLE, c, NULL, b, i, - "invalid bkey format %u", k->format)) { - i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); - memmove_u64s_down(k, bkey_p_next(k), - (u64 *) vstruct_end(i) - (u64 *) k); - continue; - } + -BCH_ERR_btree_node_read_err_fixable, + c, NULL, b, i, + btree_node_bkey_bad_format, + "invalid bkey format %u", k->format)) + goto drop_this_key; /* XXX: validate k->u64s */ if (!write) @@ -870,17 +891,15 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b, printbuf_reset(&buf); if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) { printbuf_reset(&buf); - prt_printf(&buf, "invalid bkey: "); bset_key_invalid(c, b, u.s_c, updated_range, write, &buf); prt_printf(&buf, "\n "); bch2_bkey_val_to_text(&buf, c, u.s_c); - btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf); - - i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); - memmove_u64s_down(k, bkey_p_next(k), - (u64 *) vstruct_end(i) - (u64 *) k); - continue; + btree_err(-BCH_ERR_btree_node_read_err_fixable, + c, NULL, b, i, + btree_node_bad_bkey, + "invalid bkey: %s", buf.buf); + goto drop_this_key; } if (write) @@ -897,18 +916,45 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b, prt_printf(&buf, " > "); bch2_bkey_to_text(&buf, u.k); - bch2_dump_bset(c, b, i, 0); - - if (btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf)) { - i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); - memmove_u64s_down(k, bkey_p_next(k), - (u64 *) vstruct_end(i) - (u64 *) k); - continue; - } + if (btree_err(-BCH_ERR_btree_node_read_err_fixable, + c, NULL, b, i, + btree_node_bkey_out_of_order, + "%s", buf.buf)) + goto drop_this_key; } prev = k; k = bkey_p_next(k); + continue; +drop_this_key: + next_good_key = k->u64s; + + if (!next_good_key || + (BSET_BIG_ENDIAN(i) == CPU_BIG_ENDIAN && + version >= bcachefs_metadata_version_snapshot)) { + /* + * only do scanning if bch2_bkey_compat() has nothing to + * do + */ + + if (!__bkey_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) { + for (next_good_key = 1; + next_good_key < (u64 *) vstruct_last(i) - (u64 *) k; + next_good_key++) + if (__bkey_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) + goto got_good_key; + + } + + /* + * didn't find a good key, have to truncate the rest of + * the bset + */ + next_good_key = (u64 *) vstruct_last(i) - (u64 *) k; + } +got_good_key: + le16_add_cpu(&i->u64s, -next_good_key); + memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k); } fsck_err: printbuf_exit(&buf); @@ -922,68 +968,92 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, struct sort_iter *iter; struct btree_node *sorted; struct bkey_packed *k; - struct bch_extent_ptr *ptr; struct bset *i; bool used_mempool, blacklisted; bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); unsigned u64s; - unsigned blacklisted_written, nonblacklisted_written = 0; unsigned ptr_written = btree_ptr_sectors_written(&b->key); struct printbuf buf = PRINTBUF; int ret = 0, retry_read = 0, write = READ; + u64 start_time = local_clock(); b->version_ondisk = U16_MAX; /* We might get called multiple times on read retry: */ b->written = 0; - iter = mempool_alloc(&c->fill_iter, GFP_NOIO); - sort_iter_init(iter, b); - iter->size = (btree_blocks(c) + 1) * 2; + iter = mempool_alloc(&c->fill_iter, GFP_NOFS); + sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2); if (bch2_meta_read_fault("btree")) - btree_err(BTREE_ERR_MUST_RETRY, c, ca, b, NULL, + btree_err(-BCH_ERR_btree_node_read_err_must_retry, + c, ca, b, NULL, + btree_node_fault_injected, "dynamic fault"); btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c), - BTREE_ERR_MUST_RETRY, c, ca, b, NULL, + -BCH_ERR_btree_node_read_err_must_retry, + c, ca, b, NULL, + btree_node_bad_magic, "bad magic: want %llx, got %llx", bset_magic(c), le64_to_cpu(b->data->magic)); - btree_err_on(!b->data->keys.seq, - BTREE_ERR_MUST_RETRY, c, ca, b, NULL, - "bad btree header: seq 0"); - if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { struct bch_btree_ptr_v2 *bp = &bkey_i_to_btree_ptr_v2(&b->key)->v; + bch2_bpos_to_text(&buf, b->data->min_key); + prt_str(&buf, "-"); + bch2_bpos_to_text(&buf, b->data->max_key); + btree_err_on(b->data->keys.seq != bp->seq, - BTREE_ERR_MUST_RETRY, c, ca, b, NULL, - "got wrong btree node (seq %llx want %llx)", - b->data->keys.seq, bp->seq); + -BCH_ERR_btree_node_read_err_must_retry, + c, ca, b, NULL, + btree_node_bad_seq, + "got wrong btree node (want %llx got %llx)\n" + "got btree %s level %llu pos %s", + bp->seq, b->data->keys.seq, + bch2_btree_id_str(BTREE_NODE_ID(b->data)), + BTREE_NODE_LEVEL(b->data), + buf.buf); + } else { + btree_err_on(!b->data->keys.seq, + -BCH_ERR_btree_node_read_err_must_retry, + c, ca, b, NULL, + btree_node_bad_seq, + "bad btree header: seq 0"); } while (b->written < (ptr_written ?: btree_sectors(c))) { unsigned sectors; struct nonce nonce; - struct bch_csum csum; bool first = !b->written; + bool csum_bad; if (!b->written) { i = &b->data->keys; btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)), - BTREE_ERR_WANT_RETRY, c, ca, b, i, - "unknown checksum type %llu", - BSET_CSUM_TYPE(i)); + -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, i, + bset_unknown_csum, + "unknown checksum type %llu", BSET_CSUM_TYPE(i)); nonce = btree_nonce(i, b->written << 9); - csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data); - btree_err_on(bch2_crc_cmp(csum, b->data->csum), - BTREE_ERR_WANT_RETRY, c, ca, b, i, - "invalid checksum"); + struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data); + csum_bad = bch2_crc_cmp(b->data->csum, csum); + if (csum_bad) + bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); + + btree_err_on(csum_bad, + -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, i, + bset_bad_csum, + "%s", + (printbuf_reset(&buf), + bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum), + buf.buf)); ret = bset_encrypt(c, i, b->written << 9); if (bch2_fs_fatal_err_on(ret, c, @@ -992,7 +1062,9 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, btree_err_on(btree_node_type_is_extents(btree_node_type(b)) && !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data), - BTREE_ERR_INCOMPATIBLE, c, NULL, b, NULL, + -BCH_ERR_btree_node_read_err_incompatible, + c, NULL, b, NULL, + btree_node_unsupported_version, "btree node does not have NEW_EXTENT_OVERWRITE set"); sectors = vstruct_sectors(b->data, c->block_bits); @@ -1004,16 +1076,25 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, break; btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)), - BTREE_ERR_WANT_RETRY, c, ca, b, i, - "unknown checksum type %llu", - BSET_CSUM_TYPE(i)); + -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, i, + bset_unknown_csum, + "unknown checksum type %llu", BSET_CSUM_TYPE(i)); nonce = btree_nonce(i, b->written << 9); - csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne); - - btree_err_on(bch2_crc_cmp(csum, bne->csum), - BTREE_ERR_WANT_RETRY, c, ca, b, i, - "invalid checksum"); + struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne); + csum_bad = bch2_crc_cmp(bne->csum, csum); + if (csum_bad) + bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); + + btree_err_on(csum_bad, + -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, i, + bset_bad_csum, + "%s", + (printbuf_reset(&buf), + bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum), + buf.buf)); ret = bset_encrypt(c, i, b->written << 9); if (bch2_fs_fatal_err_on(ret, c, @@ -1045,12 +1126,16 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, true); btree_err_on(blacklisted && first, - BTREE_ERR_FIXABLE, c, ca, b, i, + -BCH_ERR_btree_node_read_err_fixable, + c, ca, b, i, + bset_blacklisted_journal_seq, "first btree node bset has blacklisted journal seq (%llu)", le64_to_cpu(i->journal_seq)); btree_err_on(blacklisted && ptr_written, - BTREE_ERR_FIXABLE, c, ca, b, i, + -BCH_ERR_btree_node_read_err_fixable, + c, ca, b, i, + first_bset_blacklisted_journal_seq, "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u", le64_to_cpu(i->journal_seq), b->written, b->written + sectors, ptr_written); @@ -1063,40 +1148,30 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, sort_iter_add(iter, vstruct_idx(i, 0), vstruct_last(i)); - - nonblacklisted_written = b->written; } if (ptr_written) { btree_err_on(b->written < ptr_written, - BTREE_ERR_WANT_RETRY, c, ca, b, NULL, + -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, NULL, + btree_node_data_missing, "btree node data missing: expected %u sectors, found %u", ptr_written, b->written); } else { for (bne = write_block(b); - bset_byte_offset(b, bne) < btree_bytes(c); + bset_byte_offset(b, bne) < btree_buf_bytes(b); bne = (void *) bne + block_bytes(c)) btree_err_on(bne->keys.seq == b->data->keys.seq && !bch2_journal_seq_is_blacklisted(c, le64_to_cpu(bne->keys.journal_seq), true), - BTREE_ERR_WANT_RETRY, c, ca, b, NULL, + -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, NULL, + btree_node_bset_after_end, "found bset signature after last bset"); - - /* - * Blacklisted bsets are those that were written after the most recent - * (flush) journal write. Since there wasn't a flush, they may not have - * made it to all devices - which means we shouldn't write new bsets - * after them, as that could leave a gap and then reads from that device - * wouldn't find all the bsets in that btree node - which means it's - * important that we start writing new bsets after the most recent _non_ - * blacklisted bset: - */ - blacklisted_written = b->written; - b->written = nonblacklisted_written; } - sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool); + sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool); sorted->keys.u64s = 0; set_btree_bset(b, b->set, &b->data->keys); @@ -1112,7 +1187,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, BUG_ON(b->nr.live_u64s != u64s); - btree_bounce_free(c, btree_bytes(c), used_mempool, sorted); + btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted); if (updated_range) bch2_btree_node_drop_keys_outside_node(b); @@ -1134,7 +1209,10 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, prt_printf(&buf, "\n "); bch2_bkey_val_to_text(&buf, c, u.s_c); - btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf); + btree_err(-BCH_ERR_btree_node_read_err_fixable, + c, NULL, b, i, + btree_node_bad_bkey, + "%s", buf.buf); btree_keys_account_key_drop(&b->nr, 0, k); @@ -1161,9 +1239,9 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, btree_node_reset_sib_u64s(b); bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) { - struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); + struct bch_dev *ca2 = bch_dev_bkey_exists(c, ptr->dev); - if (ca->mi.state != BCH_MEMBER_STATE_rw) + if (ca2->mi.state != BCH_MEMBER_STATE_rw) set_btree_node_need_rewrite(b); } @@ -1172,9 +1250,11 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, out: mempool_free(iter, &c->fill_iter); printbuf_exit(&buf); + time_stats_update(&c->times[BCH_TIME_btree_node_read_done], start_time); return retry_read; fsck_err: - if (ret == BTREE_RETRY_READ) + if (ret == -BCH_ERR_btree_node_read_err_want_retry || + ret == -BCH_ERR_btree_node_read_err_must_retry) retry_read = 1; else set_btree_node_read_error(b); @@ -1203,7 +1283,7 @@ static void btree_node_read_work(struct work_struct *work) rb->have_ioref = bch2_dev_get_ioref(ca, READ); bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META); bio->bi_iter.bi_sector = rb->pick.ptr.offset; - bio->bi_iter.bi_size = btree_bytes(c); + bio->bi_iter.bi_size = btree_buf_bytes(b); if (rb->have_ioref) { bio_set_dev(bio, ca->disk_sb.bdev); @@ -1213,8 +1293,9 @@ static void btree_node_read_work(struct work_struct *work) } start: printbuf_reset(&buf); - btree_pos_to_text(&buf, c, b); - bch2_dev_io_err_on(bio->bi_status, ca, "btree read error %s for %s", + bch2_btree_pos_to_text(&buf, c, b); + bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_read, + "btree read error %s for %s", bch2_blk_status_to_str(bio->bi_status), buf.buf); if (rb->have_ioref) percpu_ref_put(&ca->io_ref); @@ -1241,22 +1322,20 @@ start: } } - bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read], + time_stats_update(&c->times[BCH_TIME_btree_node_read], rb->start_time); bio_put(&rb->bio); - printbuf_exit(&buf); if (saw_error && !btree_node_read_error(b)) { - struct printbuf buf = PRINTBUF; - + printbuf_reset(&buf); bch2_bpos_to_text(&buf, b->key.k.p); bch_info(c, "%s: rewriting btree node at btree=%s level=%u %s due to error", - __func__, bch2_btree_ids[b->c.btree_id], b->c.level, buf.buf); - printbuf_exit(&buf); + __func__, bch2_btree_id_str(b->c.btree_id), b->c.level, buf.buf); bch2_btree_node_rewrite_async(c, b); } + printbuf_exit(&buf); clear_btree_node_read_in_flight(b); wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); } @@ -1283,7 +1362,7 @@ struct btree_node_read_all { unsigned nr; void *buf[BCH_REPLICAS_MAX]; struct bio *bio[BCH_REPLICAS_MAX]; - int err[BCH_REPLICAS_MAX]; + blk_status_t err[BCH_REPLICAS_MAX]; }; static unsigned btree_node_sectors_written(struct bch_fs *c, void *data) @@ -1328,10 +1407,9 @@ static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void * return offset; } -static void btree_node_read_all_replicas_done(struct closure *cl) +static CLOSURE_CALLBACK(btree_node_read_all_replicas_done) { - struct btree_node_read_all *ra = - container_of(cl, struct btree_node_read_all, cl); + closure_type(ra, struct btree_node_read_all, cl); struct bch_fs *c = ra->c; struct btree *b = ra->b; struct printbuf buf = PRINTBUF; @@ -1360,14 +1438,20 @@ static void btree_node_read_all_replicas_done(struct closure *cl) } written2 = btree_node_sectors_written(c, ra->buf[i]); - if (btree_err_on(written2 != written, BTREE_ERR_FIXABLE, c, NULL, b, NULL, + if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable, + c, NULL, b, NULL, + btree_node_replicas_sectors_written_mismatch, "btree node sectors written mismatch: %u != %u", written, written2) || btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]), - BTREE_ERR_FIXABLE, c, NULL, b, NULL, + -BCH_ERR_btree_node_read_err_fixable, + c, NULL, b, NULL, + btree_node_bset_after_end, "found bset signature after last bset") || btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9), - BTREE_ERR_FIXABLE, c, NULL, b, NULL, + -BCH_ERR_btree_node_read_err_fixable, + c, NULL, b, NULL, + btree_node_replicas_data_mismatch, "btree node replicas content mismatch")) dump_bset_maps = true; @@ -1427,7 +1511,7 @@ fsck_err: } if (best >= 0) { - memcpy(b->data, ra->buf[best], btree_bytes(c)); + memcpy(b->data, ra->buf[best], btree_buf_bytes(b)); ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error); } else { ret = -1; @@ -1483,7 +1567,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool ra = kzalloc(sizeof(*ra), GFP_NOFS); if (!ra) - return -ENOMEM; + return -BCH_ERR_ENOMEM_btree_node_read_all_replicas; closure_init(&ra->cl, NULL); ra->c = c; @@ -1493,7 +1577,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool for (i = 0; i < ra->nr; i++) { ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS); ra->bio[i] = bio_alloc_bioset(NULL, - buf_pages(ra->buf[i], btree_bytes(c)), + buf_pages(ra->buf[i], btree_buf_bytes(b)), REQ_OP_READ|REQ_SYNC|REQ_META, GFP_NOFS, &c->btree_bio); @@ -1513,7 +1597,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool rb->pick = pick; rb->bio.bi_iter.bi_sector = pick.ptr.offset; rb->bio.bi_end_io = btree_node_read_all_replicas_endio; - bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c)); + bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b)); if (rb->have_ioref) { this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree], @@ -1531,7 +1615,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool if (sync) { closure_sync(&ra->cl); - btree_node_read_all_replicas_done(&ra->cl); + btree_node_read_all_replicas_done(&ra->cl.work); } else { continue_at(&ra->cl, btree_node_read_all_replicas_done, c->io_complete_wq); @@ -1540,16 +1624,17 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool return 0; } -void bch2_btree_node_read(struct bch_fs *c, struct btree *b, +void bch2_btree_node_read(struct btree_trans *trans, struct btree *b, bool sync) { + struct bch_fs *c = trans->c; struct extent_ptr_decoded pick; struct btree_read_bio *rb; struct bch_dev *ca; struct bio *bio; int ret; - trace_and_count(c, btree_node_read, c, b); + trace_and_count(c, btree_node_read, trans, b); if (bch2_verify_all_btree_replicas && !btree_node_read_all_replicas(c, b, sync)) @@ -1562,10 +1647,11 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, struct printbuf buf = PRINTBUF; prt_str(&buf, "btree node read error: no device to read from\n at "); - btree_pos_to_text(&buf, c, b); + bch2_btree_pos_to_text(&buf, c, b); bch_err(c, "%s", buf.buf); - if (test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) + if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) && + c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology) bch2_fatal_error(c); set_btree_node_read_error(b); @@ -1578,9 +1664,9 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, ca = bch_dev_bkey_exists(c, pick.ptr.dev); bio = bio_alloc_bioset(NULL, - buf_pages(b->data, btree_bytes(c)), + buf_pages(b->data, btree_buf_bytes(b)), REQ_OP_READ|REQ_SYNC|REQ_META, - GFP_NOIO, + GFP_NOFS, &c->btree_bio); rb = container_of(bio, struct btree_read_bio, bio); rb->c = c; @@ -1592,7 +1678,7 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, INIT_WORK(&rb->work, btree_node_read_work); bio->bi_iter.bi_sector = pick.ptr.offset; bio->bi_end_io = btree_node_read_endio; - bch2_bio_map(bio, b->data, btree_bytes(c)); + bch2_bio_map(bio, b->data, btree_buf_bytes(b)); if (rb->have_ioref) { this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree], @@ -1601,7 +1687,7 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, if (sync) { submit_bio_wait(bio); - + bch2_latency_acct(ca, rb->start_time, READ); btree_node_read_work(&rb->work); } else { submit_bio(bio); @@ -1627,12 +1713,12 @@ static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id, closure_init_stack(&cl); do { - ret = bch2_btree_cache_cannibalize_lock(c, &cl); + ret = bch2_btree_cache_cannibalize_lock(trans, &cl); closure_sync(&cl); } while (ret); b = bch2_btree_node_mem_alloc(trans, level != 0); - bch2_btree_cache_cannibalize_unlock(c); + bch2_btree_cache_cannibalize_unlock(trans); BUG_ON(IS_ERR(b)); @@ -1641,7 +1727,7 @@ static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id, set_btree_node_read_in_flight(b); - bch2_btree_node_read(c, b, true); + bch2_btree_node_read(trans, b, true); if (btree_node_read_error(b)) { bch2_btree_node_hash_remove(&c->btree_cache, b); @@ -1650,7 +1736,7 @@ static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id, list_move(&b->list, &c->btree_cache.freeable); mutex_unlock(&c->btree_cache.lock); - ret = -EIO; + ret = -BCH_ERR_btree_node_read_error; goto err; } @@ -1665,12 +1751,11 @@ err: int bch2_btree_root_read(struct bch_fs *c, enum btree_id id, const struct bkey_i *k, unsigned level) { - return bch2_trans_run(c, __bch2_btree_root_read(&trans, id, k, level)); - + return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level)); } -void bch2_btree_complete_write(struct bch_fs *c, struct btree *b, - struct btree_write *w) +static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b, + struct btree_write *w) { unsigned long old, new, v = READ_ONCE(b->will_make_reachable); @@ -1728,15 +1813,13 @@ static void __btree_node_write_done(struct bch_fs *c, struct btree *b) static void btree_node_write_done(struct bch_fs *c, struct btree *b) { - struct btree_trans trans; - - bch2_trans_init(&trans, c, 0, 0); + struct btree_trans *trans = bch2_trans_get(c); - btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_read); + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); __btree_node_write_done(c, b); six_unlock_read(&b->c.lock); - bch2_trans_exit(&trans); + bch2_trans_put(trans); } static void btree_node_write_work(struct work_struct *work) @@ -1746,7 +1829,7 @@ static void btree_node_write_work(struct work_struct *work) struct bch_fs *c = wbio->wbio.c; struct btree *b = wbio->wbio.bio.bi_private; struct bch_extent_ptr *ptr; - int ret; + int ret = 0; btree_bounce_free(c, wbio->data_bytes, @@ -1756,8 +1839,10 @@ static void btree_node_write_work(struct work_struct *work) bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr, bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev)); - if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) + if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) { + ret = -BCH_ERR_btree_node_write_all_failed; goto err; + } if (wbio->wbio.first_btree_write) { if (wbio->wbio.failed.nr) { @@ -1765,8 +1850,12 @@ static void btree_node_write_work(struct work_struct *work) } } else { ret = bch2_trans_do(c, NULL, NULL, 0, - bch2_btree_node_update_key_get_iter(&trans, b, &wbio->key, - !wbio->wbio.failed.nr)); + bch2_btree_node_update_key_get_iter(trans, b, &wbio->key, + BCH_WATERMARK_reclaim| + BCH_TRANS_COMMIT_journal_reclaim| + BCH_TRANS_COMMIT_no_enospc| + BCH_TRANS_COMMIT_no_check_rw, + !wbio->wbio.failed.nr)); if (ret) goto err; } @@ -1776,7 +1865,8 @@ out: return; err: set_btree_node_noevict(b); - bch2_fs_fatal_error(c, "fatal error writing btree node"); + if (!bch2_err_matches(ret, EROFS)) + bch2_fs_fatal_error(c, "fatal error writing btree node: %s", bch2_err_str(ret)); goto out; } @@ -1794,7 +1884,8 @@ static void btree_node_write_endio(struct bio *bio) if (wbio->have_ioref) bch2_latency_acct(ca, wbio->submit_time, WRITE); - if (bch2_dev_io_err_on(bio->bi_status, ca, "btree write error: %s", + if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write, + "btree write error: %s", bch2_blk_status_to_str(bio->bi_status)) || bch2_meta_write_fault("btree")) { spin_lock_irqsave(&c->btree_write_error_lock, flags); @@ -1846,7 +1937,6 @@ static int validate_bset_for_write(struct bch_fs *c, struct btree *b, static void btree_write_submit(struct work_struct *work) { struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work); - struct bch_extent_ptr *ptr; BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp; bkey_copy(&tmp.k, &wbio->key); @@ -1865,7 +1955,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags) struct bset *i; struct btree_node *bn = NULL; struct btree_node_entry *bne = NULL; - struct sort_iter sort_iter; + struct sort_iter_stack sort_iter; struct nonce nonce; unsigned bytes_to_write, sectors_to_write, bytes, u64s; u64 seq = 0; @@ -1938,7 +2028,7 @@ do_write: bch2_sort_whiteouts(c, b); - sort_iter_init(&sort_iter, b); + sort_iter_stack_init(&sort_iter, b); bytes = !b->written ? sizeof(struct btree_node) @@ -1953,7 +2043,7 @@ do_write: continue; bytes += le16_to_cpu(i->u64s) * sizeof(u64); - sort_iter_add(&sort_iter, + sort_iter_add(&sort_iter.iter, btree_bkey_first(b, t), btree_bkey_last(b, t)); seq = max(seq, le64_to_cpu(i->journal_seq)); @@ -1982,14 +2072,14 @@ do_write: i->journal_seq = cpu_to_le64(seq); i->u64s = 0; - sort_iter_add(&sort_iter, - unwritten_whiteouts_start(c, b), - unwritten_whiteouts_end(c, b)); + sort_iter_add(&sort_iter.iter, + unwritten_whiteouts_start(b), + unwritten_whiteouts_end(b)); SET_BSET_SEPARATE_WHITEOUTS(i, false); b->whiteout_u64s = 0; - u64s = bch2_sort_keys(i->start, &sort_iter, false); + u64s = bch2_sort_keys(i->start, &sort_iter.iter, false); le16_add_cpu(&i->u64s, u64s); BUG_ON(!b->written && i->u64s != b->data->keys.u64s); @@ -2014,9 +2104,7 @@ do_write: BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN); BUG_ON(i->seq != b->data->keys.seq); - i->version = c->sb.version < bcachefs_metadata_version_bkey_renumber - ? cpu_to_le16(BCH_BSET_VERSION_OLD) - : cpu_to_le16(c->sb.version); + i->version = cpu_to_le16(c->sb.version); SET_BSET_OFFSET(i, b->written); SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c)); @@ -2076,7 +2164,7 @@ do_write: wbio = container_of(bio_alloc_bioset(NULL, buf_pages(data, sectors_to_write << 9), REQ_OP_WRITE|REQ_META, - GFP_NOIO, + GFP_NOFS, &c->btree_bio), struct btree_write_bio, wbio.bio); wbio_init(&wbio->wbio.bio); @@ -2162,7 +2250,7 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b) bne = want_new_bset(c, b); if (bne) - bch2_bset_init_next(c, b, bne); + bch2_bset_init_next(b, bne); bch2_btree_build_aux_trees(b); @@ -2229,7 +2317,7 @@ bool bch2_btree_flush_all_writes(struct bch_fs *c) return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight); } -const char * const bch2_btree_write_types[] = { +static const char * const bch2_btree_write_types[] = { #define x(t, n) [n] = #t, BCH_BTREE_WRITE_TYPES() NULL