#include "journal_seq_blacklist.h"
#include "super-io.h"
+#include <linux/sched/mm.h>
#include <trace/events/bcachefs.h>
+void bch2_btree_node_io_unlock(struct btree *b)
+{
+ EBUG_ON(!btree_node_write_in_flight(b));
+
+ clear_btree_node_write_in_flight_inner(b);
+ clear_btree_node_write_in_flight(b);
+ wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
+}
+
+void bch2_btree_node_io_lock(struct btree *b)
+{
+ bch2_assert_btree_nodes_not_locked();
+
+ wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
+ TASK_UNINTERRUPTIBLE);
+}
+
+void __bch2_btree_node_wait_on_read(struct btree *b)
+{
+ wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
+ TASK_UNINTERRUPTIBLE);
+}
+
+void __bch2_btree_node_wait_on_write(struct btree *b)
+{
+ wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
+ TASK_UNINTERRUPTIBLE);
+}
+
+void bch2_btree_node_wait_on_read(struct btree *b)
+{
+ bch2_assert_btree_nodes_not_locked();
+
+ wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
+ TASK_UNINTERRUPTIBLE);
+}
+
+void bch2_btree_node_wait_on_write(struct btree *b)
+{
+ bch2_assert_btree_nodes_not_locked();
+
+ wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
+ TASK_UNINTERRUPTIBLE);
+}
+
static void verify_no_dups(struct btree *b,
struct bkey_packed *start,
- struct bkey_packed *end,
- bool extents)
+ struct bkey_packed *end)
{
#ifdef CONFIG_BCACHEFS_DEBUG
struct bkey_packed *k, *p;
if (start == end)
return;
- for (p = start, k = bkey_next_skip_noops(start, end);
+ for (p = start, k = bkey_p_next(start);
k != end;
- p = k, k = bkey_next_skip_noops(k, end)) {
+ p = k, k = bkey_p_next(k)) {
struct bkey l = bkey_unpack_key(b, p);
struct bkey r = bkey_unpack_key(b, k);
- BUG_ON(extents
- ? bkey_cmp(l.p, bkey_start_pos(&r)) > 0
- : bkey_cmp(l.p, bkey_start_pos(&r)) >= 0);
- //BUG_ON(bkey_cmp_packed(&b->format, p, k) >= 0);
+ BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
}
#endif
}
{
struct bkey_packed *k;
- for (k = i->start;
- k != vstruct_last(i);
- k = bkey_next_skip_noops(k, vstruct_last(i)))
+ for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
k->needs_whiteout = v;
}
-static void btree_bounce_free(struct bch_fs *c, unsigned order,
+static void btree_bounce_free(struct bch_fs *c, size_t size,
bool used_mempool, void *p)
{
if (used_mempool)
mempool_free(p, &c->btree_bounce_pool);
else
- vpfree(p, PAGE_SIZE << order);
+ vpfree(p, size);
}
-static void *btree_bounce_alloc(struct bch_fs *c, unsigned order,
+static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
bool *used_mempool)
{
+ unsigned flags = memalloc_nofs_save();
void *p;
- BUG_ON(order > btree_page_order(c));
+ BUG_ON(size > btree_bytes(c));
*used_mempool = false;
- p = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, order);
- if (p)
- return p;
-
- *used_mempool = true;
- return mempool_alloc(&c->btree_bounce_pool, GFP_NOIO);
+ p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
+ if (!p) {
+ *used_mempool = true;
+ p = mempool_alloc(&c->btree_bounce_pool, GFP_NOIO);
+ }
+ memalloc_nofs_restore(flags);
+ return p;
}
static void sort_bkey_ptrs(const struct btree *bt,
break;
for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
- b = bkey_cmp_packed(bt,
+ b = bch2_bkey_cmp_packed(bt,
ptrs[c],
ptrs[d]) >= 0 ? c : d;
if (d == n)
b = c;
while (b != a &&
- bkey_cmp_packed(bt,
+ bch2_bkey_cmp_packed(bt,
ptrs[a],
ptrs[b]) >= 0)
b = (b - 1) / 2;
{
struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
bool used_mempool = false;
- unsigned order;
+ size_t bytes = b->whiteout_u64s * sizeof(u64);
if (!b->whiteout_u64s)
return;
- order = get_order(b->whiteout_u64s * sizeof(u64));
-
- new_whiteouts = btree_bounce_alloc(c, order, &used_mempool);
+ new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
- ptrs = ptrs_end = ((void *) new_whiteouts + (PAGE_SIZE << order));
+ ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
for (k = unwritten_whiteouts_start(c, b);
k != unwritten_whiteouts_end(c, b);
- k = bkey_next(k))
+ k = bkey_p_next(k))
*--ptrs = k;
sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
while (ptrs != ptrs_end) {
bkey_copy(k, *ptrs);
- k = bkey_next(k);
+ k = bkey_p_next(k);
ptrs++;
}
verify_no_dups(b, new_whiteouts,
- (void *) ((u64 *) new_whiteouts + b->whiteout_u64s),
- btree_node_old_extent_overwrite(b));
+ (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
memcpy_u64s(unwritten_whiteouts_start(c, b),
new_whiteouts, b->whiteout_u64s);
- btree_bounce_free(c, order, used_mempool, new_whiteouts);
+ btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
}
static bool should_compact_bset(struct btree *b, struct bset_tree *t,
}
}
-static bool bch2_compact_extent_whiteouts(struct bch_fs *c,
- struct btree *b,
- enum compact_mode mode)
-{
- const struct bkey_format *f = &b->format;
- struct bset_tree *t;
- struct bkey_packed *whiteouts = NULL;
- struct bkey_packed *u_start, *u_pos;
- struct sort_iter sort_iter;
- unsigned order, whiteout_u64s = 0, u64s;
- bool used_mempool, compacting = false;
-
- BUG_ON(!btree_node_is_extents(b));
-
- for_each_bset(b, t)
- if (should_compact_bset(b, t, whiteout_u64s != 0, mode))
- whiteout_u64s += bset_dead_u64s(b, t);
-
- if (!whiteout_u64s)
- return false;
-
- bch2_sort_whiteouts(c, b);
-
- sort_iter_init(&sort_iter, b);
-
- whiteout_u64s += b->whiteout_u64s;
- order = get_order(whiteout_u64s * sizeof(u64));
-
- whiteouts = btree_bounce_alloc(c, order, &used_mempool);
- u_start = u_pos = whiteouts;
-
- memcpy_u64s(u_pos, unwritten_whiteouts_start(c, b),
- b->whiteout_u64s);
- u_pos = (void *) u_pos + b->whiteout_u64s * sizeof(u64);
-
- sort_iter_add(&sort_iter, u_start, u_pos);
-
- for_each_bset(b, t) {
- struct bset *i = bset(b, t);
- struct bkey_packed *k, *n, *out, *start, *end;
- struct btree_node_entry *src = NULL, *dst = NULL;
-
- if (t != b->set && !bset_written(b, i)) {
- src = container_of(i, struct btree_node_entry, keys);
- dst = max(write_block(b),
- (void *) btree_bkey_last(b, t - 1));
- }
-
- if (src != dst)
- compacting = true;
-
- if (!should_compact_bset(b, t, compacting, mode)) {
- if (src != dst) {
- memmove(dst, src, sizeof(*src) +
- le16_to_cpu(src->keys.u64s) *
- sizeof(u64));
- i = &dst->keys;
- set_btree_bset(b, t, i);
- }
- continue;
- }
-
- compacting = true;
- u_start = u_pos;
- start = i->start;
- end = vstruct_last(i);
-
- if (src != dst) {
- memmove(dst, src, sizeof(*src));
- i = &dst->keys;
- set_btree_bset(b, t, i);
- }
-
- out = i->start;
-
- for (k = start; k != end; k = n) {
- n = bkey_next_skip_noops(k, end);
-
- if (bkey_deleted(k))
- continue;
-
- BUG_ON(bkey_whiteout(k) &&
- k->needs_whiteout &&
- bkey_written(b, k));
-
- if (bkey_whiteout(k) && !k->needs_whiteout)
- continue;
-
- if (bkey_whiteout(k)) {
- memcpy_u64s(u_pos, k, bkeyp_key_u64s(f, k));
- set_bkeyp_val_u64s(f, u_pos, 0);
- u_pos = bkey_next(u_pos);
- } else {
- bkey_copy(out, k);
- out = bkey_next(out);
- }
- }
-
- sort_iter_add(&sort_iter, u_start, u_pos);
-
- i->u64s = cpu_to_le16((u64 *) out - i->_data);
- set_btree_bset_end(b, t);
- bch2_bset_set_no_aux_tree(b, t);
- }
-
- b->whiteout_u64s = (u64 *) u_pos - (u64 *) whiteouts;
-
- BUG_ON((void *) unwritten_whiteouts_start(c, b) <
- (void *) btree_bkey_last(b, bset_tree_last(b)));
-
- u64s = bch2_sort_extent_whiteouts(unwritten_whiteouts_start(c, b),
- &sort_iter);
-
- BUG_ON(u64s > b->whiteout_u64s);
- BUG_ON(u_pos != whiteouts && !u64s);
-
- if (u64s != b->whiteout_u64s) {
- void *src = unwritten_whiteouts_start(c, b);
-
- b->whiteout_u64s = u64s;
- memmove_u64s_up(unwritten_whiteouts_start(c, b), src, u64s);
- }
-
- verify_no_dups(b,
- unwritten_whiteouts_start(c, b),
- unwritten_whiteouts_end(c, b),
- true);
-
- btree_bounce_free(c, order, used_mempool, whiteouts);
-
- bch2_btree_build_aux_trees(b);
-
- bch_btree_keys_u64s_remaining(c, b);
- bch2_verify_btree_nr_keys(b);
-
- return true;
-}
-
static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
{
struct bset_tree *t;
out = i->start;
for (k = start; k != end; k = n) {
- n = bkey_next_skip_noops(k, end);
+ n = bkey_p_next(k);
- if (!bkey_whiteout(k)) {
+ if (!bkey_deleted(k)) {
bkey_copy(out, k);
- out = bkey_next(out);
+ out = bkey_p_next(out);
} else {
BUG_ON(k->needs_whiteout);
}
bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
enum compact_mode mode)
{
- return !btree_node_old_extent_overwrite(b)
- ? bch2_drop_whiteouts(b, mode)
- : bch2_compact_extent_whiteouts(c, b, mode);
+ return bch2_drop_whiteouts(b, mode);
}
static void btree_node_sort(struct bch_fs *c, struct btree *b,
- struct btree_iter *iter,
unsigned start_idx,
unsigned end_idx,
bool filter_whiteouts)
struct bset *start_bset = bset(b, &b->set[start_idx]);
bool used_mempool = false;
u64 start_time, seq = 0;
- unsigned i, u64s = 0, order, shift = end_idx - start_idx - 1;
+ unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
bool sorting_entire_node = start_idx == 0 &&
end_idx == b->nsets;
btree_bkey_last(b, t));
}
- order = sorting_entire_node
- ? btree_page_order(c)
- : get_order(__vstruct_bytes(struct btree_node, u64s));
+ bytes = sorting_entire_node
+ ? btree_bytes(c)
+ : __vstruct_bytes(struct btree_node, u64s);
- out = btree_bounce_alloc(c, order, &used_mempool);
+ out = btree_bounce_alloc(c, bytes, &used_mempool);
start_time = local_clock();
- if (btree_node_old_extent_overwrite(b))
- filter_whiteouts = bset_written(b, start_bset);
-
- u64s = (btree_node_old_extent_overwrite(b)
- ? bch2_sort_extents
- : bch2_sort_keys)(out->keys.start,
- &sort_iter,
- filter_whiteouts);
+ u64s = bch2_sort_keys(out->keys.start, &sort_iter, filter_whiteouts);
out->keys.u64s = cpu_to_le16(u64s);
- BUG_ON(vstruct_end(&out->keys) > (void *) out + (PAGE_SIZE << order));
+ BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
if (sorting_entire_node)
bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
if (sorting_entire_node) {
unsigned u64s = le16_to_cpu(out->keys.u64s);
- BUG_ON(order != btree_page_order(c));
+ BUG_ON(bytes != btree_bytes(c));
/*
* Our temporary buffer is the same size as the btree node's
set_btree_bset_end(b, &b->set[start_idx]);
bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
- btree_bounce_free(c, order, used_mempool, out);
+ btree_bounce_free(c, bytes, used_mempool, out);
bch2_verify_btree_nr_keys(b);
}
bch2_btree_node_iter_init_from_start(&src_iter, src);
- if (btree_node_is_extents(src))
- nr = bch2_sort_repack_merge(c, btree_bset_first(dst),
- src, &src_iter,
- &dst->format,
- true);
- else
- nr = bch2_sort_repack(btree_bset_first(dst),
- src, &src_iter,
- &dst->format,
- true);
+ nr = bch2_sort_repack(btree_bset_first(dst),
+ src, &src_iter,
+ &dst->format,
+ true);
bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
start_time);
* We're about to add another bset to the btree node, so if there's currently
* too many bsets - sort some of them together:
*/
-static bool btree_node_compact(struct bch_fs *c, struct btree *b,
- struct btree_iter *iter)
+static bool btree_node_compact(struct bch_fs *c, struct btree *b)
{
unsigned unwritten_idx;
bool ret = false;
break;
if (b->nsets - unwritten_idx > 1) {
- btree_node_sort(c, b, iter, unwritten_idx,
+ btree_node_sort(c, b, unwritten_idx,
b->nsets, false);
ret = true;
}
if (unwritten_idx > 1) {
- btree_node_sort(c, b, iter, 0, unwritten_idx, false);
+ btree_node_sort(c, b, 0, unwritten_idx, false);
ret = true;
}
t == bset_tree_last(b));
}
+/*
+ * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
+ *
+ * The first bset is going to be of similar order to the size of the node, the
+ * last bset is bounded by btree_write_set_buffer(), which is set to keep the
+ * memmove on insert from being too expensive: the middle bset should, ideally,
+ * be the geometric mean of the first and the last.
+ *
+ * Returns true if the middle bset is greater than that geometric mean:
+ */
+static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
+{
+ unsigned mid_u64s_bits =
+ (ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
+
+ return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
+}
+
/*
* @bch_btree_init_next - initialize a new (unwritten) bset that can then be
* inserted into
*
* Returns true if we sorted (i.e. invalidated iterators
*/
-void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
- struct btree_iter *iter)
+void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
{
+ struct bch_fs *c = trans->c;
struct btree_node_entry *bne;
- bool did_sort;
+ bool reinit_iter = false;
+
+ EBUG_ON(!(b->c.lock.state.seq & 1));
+ BUG_ON(bset_written(b, bset(b, &b->set[1])));
+ BUG_ON(btree_node_just_written(b));
+
+ if (b->nsets == MAX_BSETS &&
+ !btree_node_write_in_flight(b) &&
+ should_compact_all(c, b)) {
+ bch2_btree_node_write(c, b, SIX_LOCK_write,
+ BTREE_WRITE_init_next_bset);
+ reinit_iter = true;
+ }
- EBUG_ON(!(b->lock.state.seq & 1));
- EBUG_ON(iter && iter->l[b->level].b != b);
+ if (b->nsets == MAX_BSETS &&
+ btree_node_compact(c, b))
+ reinit_iter = true;
- did_sort = btree_node_compact(c, b, iter);
+ BUG_ON(b->nsets >= MAX_BSETS);
bne = want_new_bset(c, b);
if (bne)
bch2_btree_build_aux_trees(b);
- if (iter && did_sort)
- bch2_btree_iter_reinit_node(iter, b);
-}
-
-static struct nonce btree_nonce(struct bset *i, unsigned offset)
-{
- return (struct nonce) {{
- [0] = cpu_to_le32(offset),
- [1] = ((__le32 *) &i->seq)[0],
- [2] = ((__le32 *) &i->seq)[1],
- [3] = ((__le32 *) &i->journal_seq)[0]^BCH_NONCE_BTREE,
- }};
+ if (reinit_iter)
+ bch2_trans_node_reinit_iter(trans, b);
}
-static void bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offset)
+static void btree_pos_to_text(struct printbuf *out, struct bch_fs *c,
+ struct btree *b)
{
- struct nonce nonce = btree_nonce(i, offset);
-
- if (!offset) {
- struct btree_node *bn = container_of(i, struct btree_node, keys);
- unsigned bytes = (void *) &bn->keys - (void *) &bn->flags;
-
- bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, &bn->flags,
- bytes);
-
- nonce = nonce_add(nonce, round_up(bytes, CHACHA_BLOCK_SIZE));
- }
-
- bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
- vstruct_end(i) - (void *) i->_data);
+ prt_printf(out, "%s level %u/%u\n ",
+ bch2_btree_ids[b->c.btree_id],
+ b->c.level,
+ c->btree_roots[b->c.btree_id].level);
+ bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
}
static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
+ struct bch_dev *ca,
struct btree *b, struct bset *i,
unsigned offset, int write)
{
- pr_buf(out, "error validating btree node %s"
- "at btree %u level %u/%u\n"
- "pos %llu:%llu node offset %u",
- write ? "before write " : "",
- b->btree_id, b->level,
- c->btree_roots[b->btree_id].level,
- b->key.k.p.inode, b->key.k.p.offset,
- b->written);
+ prt_printf(out, bch2_log_msg(c, "%s"),
+ write == READ
+ ? "error validating btree node "
+ : "corrupt btree node before write ");
+ if (ca)
+ prt_printf(out, "on %s ", ca->name);
+ prt_printf(out, "at btree ");
+ btree_pos_to_text(out, c, b);
+
+ prt_printf(out, "\n node offset %u", b->written);
if (i)
- pr_buf(out, " bset u64s %u", le16_to_cpu(i->u64s));
+ prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
+ prt_str(out, ": ");
}
enum btree_err_type {
+ /*
+ * We can repair this locally, and we're after the checksum check so
+ * there's no need to try another replica:
+ */
BTREE_ERR_FIXABLE,
+ /*
+ * We can repair this if we have to, but we should try reading another
+ * replica if we can:
+ */
BTREE_ERR_WANT_RETRY,
+ /*
+ * Read another replica if we have one, otherwise consider the whole
+ * node bad:
+ */
BTREE_ERR_MUST_RETRY,
- BTREE_ERR_FATAL,
+ BTREE_ERR_BAD_NODE,
+ BTREE_ERR_INCOMPATIBLE,
};
enum btree_validate_ret {
BTREE_RETRY_READ = 64,
};
-#define btree_err(type, c, b, i, msg, ...) \
+static int __btree_err(enum btree_err_type type,
+ struct bch_fs *c,
+ struct bch_dev *ca,
+ struct btree *b,
+ struct bset *i,
+ int write,
+ bool have_retry,
+ const char *fmt, ...)
+{
+ struct printbuf out = PRINTBUF;
+ va_list args;
+ int ret = -BCH_ERR_fsck_fix;
+
+ btree_err_msg(&out, c, ca, b, i, b->written, write);
+
+ va_start(args, fmt);
+ prt_vprintf(&out, fmt, args);
+ va_end(args);
+
+ if (write == WRITE) {
+ bch2_print_string_as_lines(KERN_ERR, out.buf);
+ ret = c->opts.errors == BCH_ON_ERROR_continue
+ ? 0
+ : -BCH_ERR_fsck_errors_not_fixed;
+ goto out;
+ }
+
+ if (!have_retry && type == BTREE_ERR_WANT_RETRY)
+ type = BTREE_ERR_FIXABLE;
+ if (!have_retry && type == BTREE_ERR_MUST_RETRY)
+ type = BTREE_ERR_BAD_NODE;
+
+ switch (type) {
+ case BTREE_ERR_FIXABLE:
+ mustfix_fsck_err(c, "%s", out.buf);
+ ret = -BCH_ERR_fsck_fix;
+ break;
+ case BTREE_ERR_WANT_RETRY:
+ case BTREE_ERR_MUST_RETRY:
+ bch2_print_string_as_lines(KERN_ERR, out.buf);
+ ret = BTREE_RETRY_READ;
+ break;
+ case BTREE_ERR_BAD_NODE:
+ bch2_print_string_as_lines(KERN_ERR, out.buf);
+ bch2_topology_error(c);
+ ret = -BCH_ERR_need_topology_repair;
+ break;
+ case BTREE_ERR_INCOMPATIBLE:
+ bch2_print_string_as_lines(KERN_ERR, out.buf);
+ ret = -BCH_ERR_fsck_errors_not_fixed;
+ break;
+ default:
+ BUG();
+ }
+out:
+fsck_err:
+ printbuf_exit(&out);
+ return ret;
+}
+
+#define btree_err(type, c, ca, b, i, msg, ...) \
({ \
- __label__ out; \
- char _buf[300]; \
- struct printbuf out = PBUF(_buf); \
- \
- btree_err_msg(&out, c, b, i, b->written, write); \
- pr_buf(&out, ": " msg, ##__VA_ARGS__); \
- \
- if (type == BTREE_ERR_FIXABLE && \
- write == READ && \
- !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) { \
- mustfix_fsck_err(c, "%s", _buf); \
- goto out; \
- } \
- \
- switch (write) { \
- case READ: \
- bch_err(c, "%s", _buf); \
+ int _ret = __btree_err(type, c, ca, b, i, write, have_retry, msg, ##__VA_ARGS__);\
\
- switch (type) { \
- case BTREE_ERR_FIXABLE: \
- ret = BCH_FSCK_ERRORS_NOT_FIXED; \
- goto fsck_err; \
- case BTREE_ERR_WANT_RETRY: \
- if (have_retry) { \
- ret = BTREE_RETRY_READ; \
- goto fsck_err; \
- } \
- break; \
- case BTREE_ERR_MUST_RETRY: \
- ret = BTREE_RETRY_READ; \
- goto fsck_err; \
- case BTREE_ERR_FATAL: \
- ret = BCH_FSCK_ERRORS_NOT_FIXED; \
- goto fsck_err; \
- } \
- break; \
- case WRITE: \
- bch_err(c, "corrupt metadata before write: %s", _buf); \
- \
- if (bch2_fs_inconsistent(c)) { \
- ret = BCH_FSCK_ERRORS_NOT_FIXED; \
- goto fsck_err; \
- } \
- break; \
- } \
-out: \
- true; \
+ if (_ret != -BCH_ERR_fsck_fix) \
+ goto fsck_err; \
+ *saw_error = true; \
})
#define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
-static int validate_bset(struct bch_fs *c, struct btree *b,
- struct bset *i, unsigned sectors,
- unsigned *whiteout_u64s, int write,
- bool have_retry)
+/*
+ * When btree topology repair changes the start or end of a node, that might
+ * mean we have to drop keys that are no longer inside the node:
+ */
+__cold
+void bch2_btree_node_drop_keys_outside_node(struct btree *b)
{
- struct bkey_packed *k, *prev = NULL;
- bool seen_non_whiteout = false;
- unsigned version;
- const char *err;
- int ret = 0;
+ struct bset_tree *t;
+ struct bkey_s_c k;
+ struct bkey unpacked;
+ struct btree_node_iter iter;
- if (!b->written) {
- /* These indicate that we read the wrong btree node: */
- btree_err_on(BTREE_NODE_ID(b->data) != b->btree_id,
- BTREE_ERR_MUST_RETRY, c, b, i,
- "incorrect btree id");
+ for_each_bset(b, t) {
+ struct bset *i = bset(b, t);
+ struct bkey_packed *k;
- btree_err_on(BTREE_NODE_LEVEL(b->data) != b->level,
- BTREE_ERR_MUST_RETRY, c, b, i,
- "incorrect level");
+ for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
+ if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
+ break;
- if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN) {
- u64 *p = (u64 *) &b->data->ptr;
+ if (k != i->start) {
+ unsigned shift = (u64 *) k - (u64 *) i->start;
- *p = swab64(*p);
- bch2_bpos_swab(&b->data->min_key);
- bch2_bpos_swab(&b->data->max_key);
+ memmove_u64s_down(i->start, k,
+ (u64 *) vstruct_end(i) - (u64 *) k);
+ i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
+ set_btree_bset_end(b, t);
}
- if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
- struct bch_btree_ptr_v2 *bp =
- &bkey_i_to_btree_ptr_v2(&b->key)->v;
+ for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
+ if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
+ break;
- btree_err_on(bkey_cmp(b->data->min_key, bp->min_key),
- BTREE_ERR_MUST_RETRY, c, b, NULL,
- "incorrect min_key");
+ if (k != vstruct_last(i)) {
+ i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
+ set_btree_bset_end(b, t);
}
+ }
- btree_err_on(bkey_cmp(b->data->max_key, b->key.k.p),
- BTREE_ERR_MUST_RETRY, c, b, i,
- "incorrect max key");
+ /*
+ * Always rebuild search trees: eytzinger search tree nodes directly
+ * depend on the values of min/max key:
+ */
+ bch2_bset_set_no_aux_tree(b, b->set);
+ bch2_btree_build_aux_trees(b);
- /* XXX: ideally we would be validating min_key too */
-#if 0
- /*
- * not correct anymore, due to btree node write error
- * handling
- *
- * need to add b->data->seq to btree keys and verify
- * against that
- */
- btree_err_on(!extent_contains_ptr(bkey_i_to_s_c_extent(&b->key),
- b->data->ptr),
- BTREE_ERR_FATAL, c, b, i,
- "incorrect backpointer");
-#endif
- err = bch2_bkey_format_validate(&b->data->format);
- btree_err_on(err,
- BTREE_ERR_FATAL, c, b, i,
- "invalid bkey format: %s", err);
+ for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
+ BUG_ON(bpos_lt(k.k->p, b->data->min_key));
+ BUG_ON(bpos_gt(k.k->p, b->data->max_key));
}
+}
+
+static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
+ struct btree *b, struct bset *i,
+ unsigned offset, unsigned sectors,
+ int write, bool have_retry, bool *saw_error)
+{
+ unsigned version = le16_to_cpu(i->version);
+ const char *err;
+ struct printbuf buf1 = PRINTBUF;
+ struct printbuf buf2 = PRINTBUF;
+ int ret = 0;
- version = le16_to_cpu(i->version);
btree_err_on((version != BCH_BSET_VERSION_OLD &&
version < bcachefs_metadata_version_min) ||
version >= bcachefs_metadata_version_max,
- BTREE_ERR_FATAL, c, b, i,
+ BTREE_ERR_INCOMPATIBLE, c, ca, b, i,
"unsupported bset version");
- if (btree_err_on(b->written + sectors > c->opts.btree_node_size,
- BTREE_ERR_FIXABLE, c, b, i,
+ if (btree_err_on(version < c->sb.version_min,
+ BTREE_ERR_FIXABLE, c, NULL, b, i,
+ "bset version %u older than superblock version_min %u",
+ version, c->sb.version_min)) {
+ mutex_lock(&c->sb_lock);
+ c->disk_sb.sb->version_min = cpu_to_le16(version);
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+ }
+
+ if (btree_err_on(version > c->sb.version,
+ BTREE_ERR_FIXABLE, c, NULL, b, i,
+ "bset version %u newer than superblock version %u",
+ version, c->sb.version)) {
+ mutex_lock(&c->sb_lock);
+ c->disk_sb.sb->version = cpu_to_le16(version);
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+ }
+
+ btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
+ BTREE_ERR_INCOMPATIBLE, c, ca, b, i,
+ "BSET_SEPARATE_WHITEOUTS no longer supported");
+
+ if (btree_err_on(offset + sectors > btree_sectors(c),
+ BTREE_ERR_FIXABLE, c, ca, b, i,
"bset past end of btree node")) {
i->u64s = 0;
- return 0;
+ ret = 0;
+ goto out;
}
- btree_err_on(b->written && !i->u64s,
- BTREE_ERR_FIXABLE, c, b, i,
+ btree_err_on(offset && !i->u64s,
+ BTREE_ERR_FIXABLE, c, ca, b, i,
"empty bset");
- if (!BSET_SEPARATE_WHITEOUTS(i)) {
- seen_non_whiteout = true;
- *whiteout_u64s = 0;
+ btree_err_on(BSET_OFFSET(i) &&
+ BSET_OFFSET(i) != offset,
+ BTREE_ERR_WANT_RETRY, c, ca, b, i,
+ "bset at wrong sector offset");
+
+ if (!offset) {
+ struct btree_node *bn =
+ container_of(i, struct btree_node, keys);
+ /* These indicate that we read the wrong btree node: */
+
+ if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
+ struct bch_btree_ptr_v2 *bp =
+ &bkey_i_to_btree_ptr_v2(&b->key)->v;
+
+ /* XXX endianness */
+ btree_err_on(bp->seq != bn->keys.seq,
+ BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
+ "incorrect sequence number (wrong btree node)");
+ }
+
+ btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
+ BTREE_ERR_MUST_RETRY, c, ca, b, i,
+ "incorrect btree id");
+
+ btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
+ BTREE_ERR_MUST_RETRY, c, ca, b, i,
+ "incorrect level");
+
+ if (!write)
+ compat_btree_node(b->c.level, b->c.btree_id, version,
+ BSET_BIG_ENDIAN(i), write, bn);
+
+ if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
+ struct bch_btree_ptr_v2 *bp =
+ &bkey_i_to_btree_ptr_v2(&b->key)->v;
+
+ if (BTREE_PTR_RANGE_UPDATED(bp)) {
+ b->data->min_key = bp->min_key;
+ b->data->max_key = b->key.k.p;
+ }
+
+ btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
+ BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
+ "incorrect min_key: got %s should be %s",
+ (printbuf_reset(&buf1),
+ bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
+ (printbuf_reset(&buf2),
+ bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
+ }
+
+ btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
+ BTREE_ERR_MUST_RETRY, c, ca, b, i,
+ "incorrect max key %s",
+ (printbuf_reset(&buf1),
+ bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
+
+ if (write)
+ compat_btree_node(b->c.level, b->c.btree_id, version,
+ BSET_BIG_ENDIAN(i), write, bn);
+
+ err = bch2_bkey_format_validate(&bn->format);
+ btree_err_on(err,
+ BTREE_ERR_BAD_NODE, c, ca, b, i,
+ "invalid bkey format: %s", err);
+
+ compat_bformat(b->c.level, b->c.btree_id, version,
+ BSET_BIG_ENDIAN(i), write,
+ &bn->format);
}
+out:
+fsck_err:
+ printbuf_exit(&buf2);
+ printbuf_exit(&buf1);
+ return ret;
+}
+
+static int bset_key_invalid(struct bch_fs *c, struct btree *b,
+ struct bkey_s_c k,
+ bool updated_range, int rw,
+ struct printbuf *err)
+{
+ return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?:
+ (!updated_range ? bch2_bkey_in_btree_node(b, k, err) : 0) ?:
+ (rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0);
+}
+
+static int validate_bset_keys(struct bch_fs *c, struct btree *b,
+ struct bset *i, int write,
+ bool have_retry, bool *saw_error)
+{
+ unsigned version = le16_to_cpu(i->version);
+ struct bkey_packed *k, *prev = NULL;
+ struct printbuf buf = PRINTBUF;
+ bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
+ BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
+ int ret = 0;
for (k = i->start;
k != vstruct_last(i);) {
struct bkey_s u;
struct bkey tmp;
- const char *invalid;
- if (btree_err_on(bkey_next(k) > vstruct_last(i),
- BTREE_ERR_FIXABLE, c, b, i,
+ if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
+ BTREE_ERR_FIXABLE, c, NULL, b, i,
"key extends past end of bset")) {
i->u64s = cpu_to_le16((u64 *) k - i->_data);
break;
}
if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
- BTREE_ERR_FIXABLE, c, b, i,
+ BTREE_ERR_FIXABLE, c, NULL, b, i,
"invalid bkey format %u", k->format)) {
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
- memmove_u64s_down(k, bkey_next(k),
+ memmove_u64s_down(k, bkey_p_next(k),
(u64 *) vstruct_end(i) - (u64 *) k);
continue;
}
- if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN)
- bch2_bkey_swab_key(&b->format, k);
-
- if (!write &&
- version < bcachefs_metadata_version_bkey_renumber)
- bch2_bkey_renumber(btree_node_type(b), k, write);
+ /* XXX: validate k->u64s */
+ if (!write)
+ bch2_bkey_compat(b->c.level, b->c.btree_id, version,
+ BSET_BIG_ENDIAN(i), write,
+ &b->format, k);
u = __bkey_disassemble(b, k, &tmp);
- if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN)
- bch2_bkey_swab_val(u);
+ printbuf_reset(&buf);
+ if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) {
+ printbuf_reset(&buf);
+ prt_printf(&buf, "invalid bkey: ");
+ bset_key_invalid(c, b, u.s_c, updated_range, write, &buf);
+ prt_printf(&buf, "\n ");
+ bch2_bkey_val_to_text(&buf, c, u.s_c);
- invalid = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b)) ?:
- bch2_bkey_in_btree_node(b, u.s_c) ?:
- (write ? bch2_bkey_val_invalid(c, u.s_c) : NULL);
- if (invalid) {
- char buf[160];
-
- bch2_bkey_val_to_text(&PBUF(buf), c, u.s_c);
- btree_err(BTREE_ERR_FIXABLE, c, b, i,
- "invalid bkey:\n%s\n%s", invalid, buf);
+ btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf);
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
- memmove_u64s_down(k, bkey_next(k),
+ memmove_u64s_down(k, bkey_p_next(k),
(u64 *) vstruct_end(i) - (u64 *) k);
continue;
}
- if (write &&
- version < bcachefs_metadata_version_bkey_renumber)
- bch2_bkey_renumber(btree_node_type(b), k, write);
-
- /*
- * with the separate whiteouts thing (used for extents), the
- * second set of keys actually can have whiteouts too, so we
- * can't solely go off bkey_whiteout()...
- */
+ if (write)
+ bch2_bkey_compat(b->c.level, b->c.btree_id, version,
+ BSET_BIG_ENDIAN(i), write,
+ &b->format, k);
- if (!seen_non_whiteout &&
- (!bkey_whiteout(k) ||
- (prev && bkey_iter_cmp(b, prev, k) > 0))) {
- *whiteout_u64s = k->_data - i->_data;
- seen_non_whiteout = true;
- } else if (prev && bkey_iter_cmp(b, prev, k) > 0) {
- char buf1[80];
- char buf2[80];
+ if (prev && bkey_iter_cmp(b, prev, k) > 0) {
struct bkey up = bkey_unpack_key(b, prev);
- bch2_bkey_to_text(&PBUF(buf1), &up);
- bch2_bkey_to_text(&PBUF(buf2), u.k);
+ printbuf_reset(&buf);
+ prt_printf(&buf, "keys out of order: ");
+ bch2_bkey_to_text(&buf, &up);
+ prt_printf(&buf, " > ");
+ bch2_bkey_to_text(&buf, u.k);
- bch2_dump_bset(b, i, 0);
- btree_err(BTREE_ERR_FATAL, c, b, i,
- "keys out of order: %s > %s",
- buf1, buf2);
- /* XXX: repair this */
+ bch2_dump_bset(c, b, i, 0);
+
+ if (btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf)) {
+ i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
+ memmove_u64s_down(k, bkey_p_next(k),
+ (u64 *) vstruct_end(i) - (u64 *) k);
+ continue;
+ }
}
prev = k;
- k = bkey_next_skip_noops(k, vstruct_last(i));
+ k = bkey_p_next(k);
}
-
- SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
fsck_err:
+ printbuf_exit(&buf);
return ret;
}
-int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, bool have_retry)
+int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
+ struct btree *b, bool have_retry, bool *saw_error)
{
struct btree_node_entry *bne;
struct sort_iter *iter;
struct btree_node *sorted;
struct bkey_packed *k;
+ struct bch_extent_ptr *ptr;
struct bset *i;
bool used_mempool, blacklisted;
+ bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
+ BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
unsigned u64s;
- int ret, retry_read = 0, write = READ;
+ unsigned blacklisted_written, nonblacklisted_written = 0;
+ unsigned ptr_written = btree_ptr_sectors_written(&b->key);
+ struct printbuf buf = PRINTBUF;
+ int ret = 0, retry_read = 0, write = READ;
+
+ b->version_ondisk = U16_MAX;
+ /* We might get called multiple times on read retry: */
+ b->written = 0;
iter = mempool_alloc(&c->fill_iter, GFP_NOIO);
sort_iter_init(iter, b);
iter->size = (btree_blocks(c) + 1) * 2;
if (bch2_meta_read_fault("btree"))
- btree_err(BTREE_ERR_MUST_RETRY, c, b, NULL,
+ btree_err(BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
"dynamic fault");
btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
- BTREE_ERR_MUST_RETRY, c, b, NULL,
- "bad magic");
+ BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
+ "bad magic: want %llx, got %llx",
+ bset_magic(c), le64_to_cpu(b->data->magic));
btree_err_on(!b->data->keys.seq,
- BTREE_ERR_MUST_RETRY, c, b, NULL,
- "bad btree header");
+ BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
+ "bad btree header: seq 0");
if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
struct bch_btree_ptr_v2 *bp =
&bkey_i_to_btree_ptr_v2(&b->key)->v;
btree_err_on(b->data->keys.seq != bp->seq,
- BTREE_ERR_MUST_RETRY, c, b, NULL,
- "got wrong btree node");
+ BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
+ "got wrong btree node (seq %llx want %llx)",
+ b->data->keys.seq, bp->seq);
}
- while (b->written < c->opts.btree_node_size) {
- unsigned sectors, whiteout_u64s = 0;
+ while (b->written < (ptr_written ?: btree_sectors(c))) {
+ unsigned sectors;
struct nonce nonce;
struct bch_csum csum;
bool first = !b->written;
i = &b->data->keys;
btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
- BTREE_ERR_WANT_RETRY, c, b, i,
- "unknown checksum type");
+ BTREE_ERR_WANT_RETRY, c, ca, b, i,
+ "unknown checksum type %llu",
+ BSET_CSUM_TYPE(i));
nonce = btree_nonce(i, b->written << 9);
csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
btree_err_on(bch2_crc_cmp(csum, b->data->csum),
- BTREE_ERR_WANT_RETRY, c, b, i,
+ BTREE_ERR_WANT_RETRY, c, ca, b, i,
"invalid checksum");
- bset_encrypt(c, i, b->written << 9);
+ ret = bset_encrypt(c, i, b->written << 9);
+ if (bch2_fs_fatal_err_on(ret, c,
+ "error decrypting btree node: %i", ret))
+ goto fsck_err;
- if (btree_node_is_extents(b) &&
- !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data))
- set_btree_node_old_extent_overwrite(b);
+ btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
+ !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
+ BTREE_ERR_INCOMPATIBLE, c, NULL, b, NULL,
+ "btree node does not have NEW_EXTENT_OVERWRITE set");
sectors = vstruct_sectors(b->data, c->block_bits);
-
- btree_node_set_format(b, b->data->format);
} else {
bne = write_block(b);
i = &bne->keys;
break;
btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
- BTREE_ERR_WANT_RETRY, c, b, i,
- "unknown checksum type");
+ BTREE_ERR_WANT_RETRY, c, ca, b, i,
+ "unknown checksum type %llu",
+ BSET_CSUM_TYPE(i));
nonce = btree_nonce(i, b->written << 9);
csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
btree_err_on(bch2_crc_cmp(csum, bne->csum),
- BTREE_ERR_WANT_RETRY, c, b, i,
+ BTREE_ERR_WANT_RETRY, c, ca, b, i,
"invalid checksum");
- bset_encrypt(c, i, b->written << 9);
+ ret = bset_encrypt(c, i, b->written << 9);
+ if (bch2_fs_fatal_err_on(ret, c,
+ "error decrypting btree node: %i\n", ret))
+ goto fsck_err;
sectors = vstruct_sectors(bne, c->block_bits);
}
- ret = validate_bset(c, b, i, sectors, &whiteout_u64s,
- READ, have_retry);
+ b->version_ondisk = min(b->version_ondisk,
+ le16_to_cpu(i->version));
+
+ ret = validate_bset(c, ca, b, i, b->written, sectors,
+ READ, have_retry, saw_error);
if (ret)
goto fsck_err;
- b->written += sectors;
+ if (!b->written)
+ btree_node_set_format(b, b->data->format);
+
+ ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error);
+ if (ret)
+ goto fsck_err;
+
+ SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
blacklisted = bch2_journal_seq_is_blacklisted(c,
le64_to_cpu(i->journal_seq),
true);
btree_err_on(blacklisted && first,
- BTREE_ERR_FIXABLE, c, b, i,
- "first btree node bset has blacklisted journal seq");
+ BTREE_ERR_FIXABLE, c, ca, b, i,
+ "first btree node bset has blacklisted journal seq (%llu)",
+ le64_to_cpu(i->journal_seq));
+
+ btree_err_on(blacklisted && ptr_written,
+ BTREE_ERR_FIXABLE, c, ca, b, i,
+ "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
+ le64_to_cpu(i->journal_seq),
+ b->written, b->written + sectors, ptr_written);
+
+ b->written += sectors;
+
if (blacklisted && !first)
continue;
- sort_iter_add(iter, i->start,
- vstruct_idx(i, whiteout_u64s));
-
sort_iter_add(iter,
- vstruct_idx(i, whiteout_u64s),
+ vstruct_idx(i, 0),
vstruct_last(i));
+
+ nonblacklisted_written = b->written;
}
- for (bne = write_block(b);
- bset_byte_offset(b, bne) < btree_bytes(c);
- bne = (void *) bne + block_bytes(c))
- btree_err_on(bne->keys.seq == b->data->keys.seq,
- BTREE_ERR_WANT_RETRY, c, b, NULL,
- "found bset signature after last bset");
+ if (ptr_written) {
+ btree_err_on(b->written < ptr_written,
+ BTREE_ERR_WANT_RETRY, c, ca, b, NULL,
+ "btree node data missing: expected %u sectors, found %u",
+ ptr_written, b->written);
+ } else {
+ for (bne = write_block(b);
+ bset_byte_offset(b, bne) < btree_bytes(c);
+ bne = (void *) bne + block_bytes(c))
+ btree_err_on(bne->keys.seq == b->data->keys.seq &&
+ !bch2_journal_seq_is_blacklisted(c,
+ le64_to_cpu(bne->keys.journal_seq),
+ true),
+ BTREE_ERR_WANT_RETRY, c, ca, b, NULL,
+ "found bset signature after last bset");
- sorted = btree_bounce_alloc(c, btree_page_order(c), &used_mempool);
+ /*
+ * Blacklisted bsets are those that were written after the most recent
+ * (flush) journal write. Since there wasn't a flush, they may not have
+ * made it to all devices - which means we shouldn't write new bsets
+ * after them, as that could leave a gap and then reads from that device
+ * wouldn't find all the bsets in that btree node - which means it's
+ * important that we start writing new bsets after the most recent _non_
+ * blacklisted bset:
+ */
+ blacklisted_written = b->written;
+ b->written = nonblacklisted_written;
+ }
+
+ sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool);
sorted->keys.u64s = 0;
set_btree_bset(b, b->set, &b->data->keys);
- b->nr = (btree_node_old_extent_overwrite(b)
- ? bch2_extent_sort_fix_overlapping
- : bch2_key_sort_fix_overlapping)(c, &sorted->keys, iter);
+ b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
u64s = le16_to_cpu(sorted->keys.u64s);
*sorted = *b->data;
BUG_ON(b->nr.live_u64s != u64s);
- btree_bounce_free(c, btree_page_order(c), used_mempool, sorted);
+ btree_bounce_free(c, btree_bytes(c), used_mempool, sorted);
+
+ if (updated_range)
+ bch2_btree_node_drop_keys_outside_node(b);
i = &b->data->keys;
for (k = i->start; k != vstruct_last(i);) {
struct bkey tmp;
struct bkey_s u = __bkey_disassemble(b, k, &tmp);
- const char *invalid = bch2_bkey_val_invalid(c, u.s_c);
- if (invalid ||
- (inject_invalid_keys(c) &&
+ printbuf_reset(&buf);
+
+ if (bch2_bkey_val_invalid(c, u.s_c, READ, &buf) ||
+ (bch2_inject_invalid_keys &&
!bversion_cmp(u.k->version, MAX_VERSION))) {
- char buf[160];
+ printbuf_reset(&buf);
+
+ prt_printf(&buf, "invalid bkey: ");
+ bch2_bkey_val_invalid(c, u.s_c, READ, &buf);
+ prt_printf(&buf, "\n ");
+ bch2_bkey_val_to_text(&buf, c, u.s_c);
- bch2_bkey_val_to_text(&PBUF(buf), c, u.s_c);
- btree_err(BTREE_ERR_FIXABLE, c, b, i,
- "invalid bkey %s: %s", buf, invalid);
+ btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf);
btree_keys_account_key_drop(&b->nr, 0, k);
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
- memmove_u64s_down(k, bkey_next(k),
+ memmove_u64s_down(k, bkey_p_next(k),
(u64 *) vstruct_end(i) - (u64 *) k);
set_btree_bset_end(b, b->set);
continue;
bp.v->mem_ptr = 0;
}
- k = bkey_next_skip_noops(k, vstruct_last(i));
+ k = bkey_p_next(k);
}
bch2_bset_build_aux_tree(b, b->set, false);
set_needs_whiteout(btree_bset_first(b), true);
btree_node_reset_sib_u64s(b);
+
+ bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+
+ if (ca->mi.state != BCH_MEMBER_STATE_rw)
+ set_btree_node_need_rewrite(b);
+ }
+
+ if (!ptr_written)
+ set_btree_node_need_rewrite(b);
out:
mempool_free(iter, &c->fill_iter);
+ printbuf_exit(&buf);
return retry_read;
fsck_err:
- if (ret == BTREE_RETRY_READ) {
+ if (ret == BTREE_RETRY_READ)
retry_read = 1;
- } else {
- bch2_inconsistent_error(c);
+ else
set_btree_node_read_error(b);
- }
goto out;
}
struct btree_read_bio *rb =
container_of(work, struct btree_read_bio, work);
struct bch_fs *c = rb->c;
+ struct btree *b = rb->b;
struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
- struct btree *b = rb->bio.bi_private;
struct bio *bio = &rb->bio;
struct bch_io_failures failed = { .nr = 0 };
+ struct printbuf buf = PRINTBUF;
+ bool saw_error = false;
+ bool retry = false;
bool can_retry;
goto start;
while (1) {
+ retry = true;
bch_info(c, "retrying read");
ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
rb->have_ioref = bch2_dev_get_ioref(ca, READ);
- bio_reset(bio);
- bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
+ bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
bio->bi_iter.bi_sector = rb->pick.ptr.offset;
bio->bi_iter.bi_size = btree_bytes(c);
bio->bi_status = BLK_STS_REMOVED;
}
start:
- bch2_dev_io_err_on(bio->bi_status, ca, "btree read");
+ printbuf_reset(&buf);
+ btree_pos_to_text(&buf, c, b);
+ bch2_dev_io_err_on(bio->bi_status, ca, "btree read error %s for %s",
+ bch2_blk_status_to_str(bio->bi_status), buf.buf);
if (rb->have_ioref)
percpu_ref_put(&ca->io_ref);
rb->have_ioref = false;
&failed, &rb->pick) > 0;
if (!bio->bi_status &&
- !bch2_btree_node_read_done(c, b, can_retry))
+ !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) {
+ if (retry)
+ bch_info(c, "retry success");
break;
+ }
+
+ saw_error = true;
if (!can_retry) {
set_btree_node_read_error(b);
bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
rb->start_time);
bio_put(&rb->bio);
+ printbuf_exit(&buf);
+
+ if (saw_error && !btree_node_read_error(b)) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bpos_to_text(&buf, b->key.k.p);
+ bch_info(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
+ __func__, bch2_btree_ids[b->c.btree_id], b->c.level, buf.buf);
+ printbuf_exit(&buf);
+
+ bch2_btree_node_rewrite_async(c, b);
+ }
+
clear_btree_node_read_in_flight(b);
wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
}
if (rb->have_ioref) {
struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+
+ bch2_latency_acct(ca, rb->start_time, READ);
+ }
+
+ queue_work(c->io_complete_wq, &rb->work);
+}
+
+struct btree_node_read_all {
+ struct closure cl;
+ struct bch_fs *c;
+ struct btree *b;
+ unsigned nr;
+ void *buf[BCH_REPLICAS_MAX];
+ struct bio *bio[BCH_REPLICAS_MAX];
+ int err[BCH_REPLICAS_MAX];
+};
+
+static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
+{
+ struct btree_node *bn = data;
+ struct btree_node_entry *bne;
+ unsigned offset = 0;
+
+ if (le64_to_cpu(bn->magic) != bset_magic(c))
+ return 0;
+
+ while (offset < btree_sectors(c)) {
+ if (!offset) {
+ offset += vstruct_sectors(bn, c->block_bits);
+ } else {
+ bne = data + (offset << 9);
+ if (bne->keys.seq != bn->keys.seq)
+ break;
+ offset += vstruct_sectors(bne, c->block_bits);
+ }
+ }
+
+ return offset;
+}
+
+static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
+{
+ struct btree_node *bn = data;
+ struct btree_node_entry *bne;
+
+ if (!offset)
+ return false;
+
+ while (offset < btree_sectors(c)) {
+ bne = data + (offset << 9);
+ if (bne->keys.seq == bn->keys.seq)
+ return true;
+ offset++;
+ }
+
+ return false;
+ return offset;
+}
+
+static void btree_node_read_all_replicas_done(struct closure *cl)
+{
+ struct btree_node_read_all *ra =
+ container_of(cl, struct btree_node_read_all, cl);
+ struct bch_fs *c = ra->c;
+ struct btree *b = ra->b;
+ struct printbuf buf = PRINTBUF;
+ bool dump_bset_maps = false;
+ bool have_retry = false;
+ int ret = 0, best = -1, write = READ;
+ unsigned i, written = 0, written2 = 0;
+ __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
+ ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
+ bool _saw_error = false, *saw_error = &_saw_error;
+
+ for (i = 0; i < ra->nr; i++) {
+ struct btree_node *bn = ra->buf[i];
+
+ if (ra->err[i])
+ continue;
+
+ if (le64_to_cpu(bn->magic) != bset_magic(c) ||
+ (seq && seq != bn->keys.seq))
+ continue;
+
+ if (best < 0) {
+ best = i;
+ written = btree_node_sectors_written(c, bn);
+ continue;
+ }
+
+ written2 = btree_node_sectors_written(c, ra->buf[i]);
+ if (btree_err_on(written2 != written, BTREE_ERR_FIXABLE, c, NULL, b, NULL,
+ "btree node sectors written mismatch: %u != %u",
+ written, written2) ||
+ btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
+ BTREE_ERR_FIXABLE, c, NULL, b, NULL,
+ "found bset signature after last bset") ||
+ btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
+ BTREE_ERR_FIXABLE, c, NULL, b, NULL,
+ "btree node replicas content mismatch"))
+ dump_bset_maps = true;
+
+ if (written2 > written) {
+ written = written2;
+ best = i;
+ }
+ }
+fsck_err:
+ if (dump_bset_maps) {
+ for (i = 0; i < ra->nr; i++) {
+ struct btree_node *bn = ra->buf[i];
+ struct btree_node_entry *bne = NULL;
+ unsigned offset = 0, sectors;
+ bool gap = false;
+
+ if (ra->err[i])
+ continue;
+
+ printbuf_reset(&buf);
+
+ while (offset < btree_sectors(c)) {
+ if (!offset) {
+ sectors = vstruct_sectors(bn, c->block_bits);
+ } else {
+ bne = ra->buf[i] + (offset << 9);
+ if (bne->keys.seq != bn->keys.seq)
+ break;
+ sectors = vstruct_sectors(bne, c->block_bits);
+ }
+
+ prt_printf(&buf, " %u-%u", offset, offset + sectors);
+ if (bne && bch2_journal_seq_is_blacklisted(c,
+ le64_to_cpu(bne->keys.journal_seq), false))
+ prt_printf(&buf, "*");
+ offset += sectors;
+ }
+
+ while (offset < btree_sectors(c)) {
+ bne = ra->buf[i] + (offset << 9);
+ if (bne->keys.seq == bn->keys.seq) {
+ if (!gap)
+ prt_printf(&buf, " GAP");
+ gap = true;
+
+ sectors = vstruct_sectors(bne, c->block_bits);
+ prt_printf(&buf, " %u-%u", offset, offset + sectors);
+ if (bch2_journal_seq_is_blacklisted(c,
+ le64_to_cpu(bne->keys.journal_seq), false))
+ prt_printf(&buf, "*");
+ }
+ offset++;
+ }
+
+ bch_err(c, "replica %u:%s", i, buf.buf);
+ }
+ }
+
+ if (best >= 0) {
+ memcpy(b->data, ra->buf[best], btree_bytes(c));
+ ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error);
+ } else {
+ ret = -1;
+ }
+
+ if (ret)
+ set_btree_node_read_error(b);
+ else if (*saw_error)
+ bch2_btree_node_rewrite_async(c, b);
+
+ for (i = 0; i < ra->nr; i++) {
+ mempool_free(ra->buf[i], &c->btree_bounce_pool);
+ bio_put(ra->bio[i]);
+ }
+
+ closure_debug_destroy(&ra->cl);
+ kfree(ra);
+ printbuf_exit(&buf);
+
+ clear_btree_node_read_in_flight(b);
+ wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
+}
+
+static void btree_node_read_all_replicas_endio(struct bio *bio)
+{
+ struct btree_read_bio *rb =
+ container_of(bio, struct btree_read_bio, bio);
+ struct bch_fs *c = rb->c;
+ struct btree_node_read_all *ra = rb->ra;
+
+ if (rb->have_ioref) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+
bch2_latency_acct(ca, rb->start_time, READ);
}
- queue_work(system_unbound_wq, &rb->work);
+ ra->err[rb->idx] = bio->bi_status;
+ closure_put(&ra->cl);
+}
+
+/*
+ * XXX This allocates multiple times from the same mempools, and can deadlock
+ * under sufficient memory pressure (but is only a debug path)
+ */
+static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
+{
+ struct bkey_s_c k = bkey_i_to_s_c(&b->key);
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded pick;
+ struct btree_node_read_all *ra;
+ unsigned i;
+
+ ra = kzalloc(sizeof(*ra), GFP_NOFS);
+ if (!ra)
+ return -ENOMEM;
+
+ closure_init(&ra->cl, NULL);
+ ra->c = c;
+ ra->b = b;
+ ra->nr = bch2_bkey_nr_ptrs(k);
+
+ for (i = 0; i < ra->nr; i++) {
+ ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
+ ra->bio[i] = bio_alloc_bioset(NULL,
+ buf_pages(ra->buf[i], btree_bytes(c)),
+ REQ_OP_READ|REQ_SYNC|REQ_META,
+ GFP_NOFS,
+ &c->btree_bio);
+ }
+
+ i = 0;
+ bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
+ struct btree_read_bio *rb =
+ container_of(ra->bio[i], struct btree_read_bio, bio);
+ rb->c = c;
+ rb->b = b;
+ rb->ra = ra;
+ rb->start_time = local_clock();
+ rb->have_ioref = bch2_dev_get_ioref(ca, READ);
+ rb->idx = i;
+ rb->pick = pick;
+ rb->bio.bi_iter.bi_sector = pick.ptr.offset;
+ rb->bio.bi_end_io = btree_node_read_all_replicas_endio;
+ bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c));
+
+ if (rb->have_ioref) {
+ this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
+ bio_sectors(&rb->bio));
+ bio_set_dev(&rb->bio, ca->disk_sb.bdev);
+
+ closure_get(&ra->cl);
+ submit_bio(&rb->bio);
+ } else {
+ ra->err[i] = BLK_STS_REMOVED;
+ }
+
+ i++;
+ }
+
+ if (sync) {
+ closure_sync(&ra->cl);
+ btree_node_read_all_replicas_done(&ra->cl);
+ } else {
+ continue_at(&ra->cl, btree_node_read_all_replicas_done,
+ c->io_complete_wq);
+ }
+
+ return 0;
}
void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
struct bio *bio;
int ret;
- trace_btree_read(c, b);
+ trace_and_count(c, btree_node_read, c, b);
+
+ if (bch2_verify_all_btree_replicas &&
+ !btree_node_read_all_replicas(c, b, sync))
+ return;
ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
NULL, &pick);
- if (bch2_fs_fatal_err_on(ret <= 0, c,
- "btree node read error: no device to read from")) {
+
+ if (ret <= 0) {
+ struct printbuf buf = PRINTBUF;
+
+ prt_str(&buf, "btree node read error: no device to read from\n at ");
+ btree_pos_to_text(&buf, c, b);
+ bch_err(c, "%s", buf.buf);
+
+ if (test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags))
+ bch2_fatal_error(c);
+
set_btree_node_read_error(b);
+ clear_btree_node_read_in_flight(b);
+ wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
+ printbuf_exit(&buf);
return;
}
ca = bch_dev_bkey_exists(c, pick.ptr.dev);
- bio = bio_alloc_bioset(GFP_NOIO, buf_pages(b->data,
- btree_bytes(c)),
+ bio = bio_alloc_bioset(NULL,
+ buf_pages(b->data, btree_bytes(c)),
+ REQ_OP_READ|REQ_SYNC|REQ_META,
+ GFP_NOIO,
&c->btree_bio);
rb = container_of(bio, struct btree_read_bio, bio);
rb->c = c;
+ rb->b = b;
+ rb->ra = NULL;
rb->start_time = local_clock();
rb->have_ioref = bch2_dev_get_ioref(ca, READ);
rb->pick = pick;
INIT_WORK(&rb->work, btree_node_read_work);
- bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
bio->bi_iter.bi_sector = pick.ptr.offset;
bio->bi_end_io = btree_node_read_endio;
- bio->bi_private = b;
bch2_bio_map(bio, b->data, btree_bytes(c));
- set_btree_node_read_in_flight(b);
-
if (rb->have_ioref) {
- this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_BTREE],
+ this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
bio_sectors(bio));
bio_set_dev(bio, ca->disk_sb.bdev);
if (sync) {
submit_bio_wait(bio);
- bio->bi_private = b;
btree_node_read_work(&rb->work);
} else {
submit_bio(bio);
if (sync)
btree_node_read_work(&rb->work);
else
- queue_work(system_unbound_wq, &rb->work);
-
+ queue_work(c->io_complete_wq, &rb->work);
}
}
-int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
- const struct bkey_i *k, unsigned level)
+static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
+ const struct bkey_i *k, unsigned level)
{
+ struct bch_fs *c = trans->c;
struct closure cl;
struct btree *b;
int ret;
closure_sync(&cl);
} while (ret);
- b = bch2_btree_node_mem_alloc(c);
+ b = bch2_btree_node_mem_alloc(trans, level != 0);
bch2_btree_cache_cannibalize_unlock(c);
BUG_ON(IS_ERR(b));
bkey_copy(&b->key, k);
BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
+ set_btree_node_read_in_flight(b);
+
bch2_btree_node_read(c, b, true);
if (btree_node_read_error(b)) {
bch2_btree_set_root_for_read(c, b);
err:
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
return ret;
}
+int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
+ const struct bkey_i *k, unsigned level)
+{
+ return bch2_trans_run(c, __bch2_btree_root_read(&trans, id, k, level));
+
+}
+
void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
struct btree_write *w)
{
bch2_journal_pin_drop(&c->journal, &w->journal);
}
-static void btree_node_write_done(struct bch_fs *c, struct btree *b)
+static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
{
struct btree_write *w = btree_prev_write(b);
+ unsigned long old, new, v;
+ unsigned type = 0;
bch2_btree_complete_write(c, b, w);
- btree_node_io_unlock(b);
-}
-
-static void bch2_btree_node_write_error(struct bch_fs *c,
- struct btree_write_bio *wbio)
-{
- struct btree *b = wbio->wbio.bio.bi_private;
- __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
- struct bch_extent_ptr *ptr;
- struct btree_trans trans;
- struct btree_iter *iter;
- int ret;
-
- bch2_trans_init(&trans, c, 0, 0);
-
- iter = bch2_trans_get_node_iter(&trans, b->btree_id, b->key.k.p,
- BTREE_MAX_DEPTH, b->level, 0);
-retry:
- ret = bch2_btree_iter_traverse(iter);
- if (ret)
- goto err;
-
- /* has node been freed? */
- if (iter->l[b->level].b != b) {
- /* node has been freed: */
- BUG_ON(!btree_node_dying(b));
- goto out;
- }
-
- BUG_ON(!btree_node_hashed(b));
-
- bkey_copy(&tmp.k, &b->key);
- bch2_bkey_drop_ptrs(bkey_i_to_s(&tmp.k), ptr,
- bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
+ v = READ_ONCE(b->flags);
+ do {
+ old = new = v;
- if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&tmp.k)))
- goto err;
+ if ((old & (1U << BTREE_NODE_dirty)) &&
+ (old & (1U << BTREE_NODE_need_write)) &&
+ !(old & (1U << BTREE_NODE_never_write)) &&
+ !(old & (1U << BTREE_NODE_write_blocked)) &&
+ !(old & (1U << BTREE_NODE_will_make_reachable))) {
+ new &= ~(1U << BTREE_NODE_dirty);
+ new &= ~(1U << BTREE_NODE_need_write);
+ new |= (1U << BTREE_NODE_write_in_flight);
+ new |= (1U << BTREE_NODE_write_in_flight_inner);
+ new |= (1U << BTREE_NODE_just_written);
+ new ^= (1U << BTREE_NODE_write_idx);
+
+ type = new & BTREE_WRITE_TYPE_MASK;
+ new &= ~BTREE_WRITE_TYPE_MASK;
+ } else {
+ new &= ~(1U << BTREE_NODE_write_in_flight);
+ new &= ~(1U << BTREE_NODE_write_in_flight_inner);
+ }
+ } while ((v = cmpxchg(&b->flags, old, new)) != old);
- ret = bch2_btree_node_update_key(c, iter, b, &tmp.k);
- if (ret == -EINTR)
- goto retry;
- if (ret)
- goto err;
-out:
- bch2_trans_exit(&trans);
- bio_put(&wbio->wbio.bio);
- btree_node_write_done(c, b);
- return;
-err:
- set_btree_node_noevict(b);
- bch2_fs_fatal_error(c, "fatal error writing btree node");
- goto out;
+ if (new & (1U << BTREE_NODE_write_in_flight))
+ __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
+ else
+ wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
}
-void bch2_btree_write_error_work(struct work_struct *work)
+static void btree_node_write_done(struct bch_fs *c, struct btree *b)
{
- struct bch_fs *c = container_of(work, struct bch_fs,
- btree_write_error_work);
- struct bio *bio;
+ struct btree_trans trans;
- while (1) {
- spin_lock_irq(&c->btree_write_error_lock);
- bio = bio_list_pop(&c->btree_write_error_list);
- spin_unlock_irq(&c->btree_write_error_lock);
+ bch2_trans_init(&trans, c, 0, 0);
- if (!bio)
- break;
+ btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_read);
+ __btree_node_write_done(c, b);
+ six_unlock_read(&b->c.lock);
- bch2_btree_node_write_error(c,
- container_of(bio, struct btree_write_bio, wbio.bio));
- }
+ bch2_trans_exit(&trans);
}
static void btree_node_write_work(struct work_struct *work)
container_of(work, struct btree_write_bio, work);
struct bch_fs *c = wbio->wbio.c;
struct btree *b = wbio->wbio.bio.bi_private;
+ struct bch_extent_ptr *ptr;
+ int ret;
btree_bounce_free(c,
- wbio->wbio.order,
+ wbio->data_bytes,
wbio->wbio.used_mempool,
wbio->data);
- if (wbio->wbio.failed.nr) {
- unsigned long flags;
+ bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
+ bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
- spin_lock_irqsave(&c->btree_write_error_lock, flags);
- bio_list_add(&c->btree_write_error_list, &wbio->wbio.bio);
- spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
+ if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key)))
+ goto err;
- queue_work(c->wq, &c->btree_write_error_work);
- return;
- }
+ if (wbio->wbio.first_btree_write) {
+ if (wbio->wbio.failed.nr) {
+ }
+ } else {
+ ret = bch2_trans_do(c, NULL, NULL, 0,
+ bch2_btree_node_update_key_get_iter(&trans, b, &wbio->key,
+ !wbio->wbio.failed.nr));
+ if (ret)
+ goto err;
+ }
+out:
bio_put(&wbio->wbio.bio);
btree_node_write_done(c, b);
+ return;
+err:
+ set_btree_node_noevict(b);
+ bch2_fs_fatal_error(c, "fatal error writing btree node");
+ goto out;
}
static void btree_node_write_endio(struct bio *bio)
struct bch_write_bio *wbio = to_wbio(bio);
struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
struct bch_write_bio *orig = parent ?: wbio;
+ struct btree_write_bio *wb = container_of(orig, struct btree_write_bio, wbio);
struct bch_fs *c = wbio->c;
+ struct btree *b = wbio->bio.bi_private;
struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
unsigned long flags;
if (wbio->have_ioref)
bch2_latency_acct(ca, wbio->submit_time, WRITE);
- if (bio->bi_status == BLK_STS_REMOVED ||
- bch2_dev_io_err_on(bio->bi_status, ca, "btree write") ||
+ if (bch2_dev_io_err_on(bio->bi_status, ca, "btree write error: %s",
+ bch2_blk_status_to_str(bio->bi_status)) ||
bch2_meta_write_fault("btree")) {
spin_lock_irqsave(&c->btree_write_error_lock, flags);
bch2_dev_list_add_dev(&orig->failed, wbio->dev);
if (parent) {
bio_put(bio);
bio_endio(&parent->bio);
- } else {
- struct btree_write_bio *wb =
- container_of(orig, struct btree_write_bio, wbio);
-
- INIT_WORK(&wb->work, btree_node_write_work);
- queue_work(system_unbound_wq, &wb->work);
+ return;
}
+
+ clear_btree_node_write_in_flight_inner(b);
+ wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
+ INIT_WORK(&wb->work, btree_node_write_work);
+ queue_work(c->btree_io_complete_wq, &wb->work);
}
static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
struct bset *i, unsigned sectors)
{
- unsigned whiteout_u64s = 0;
+ struct printbuf buf = PRINTBUF;
+ bool saw_error;
int ret;
- if (bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), BKEY_TYPE_BTREE))
- return -1;
+ ret = bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key),
+ BKEY_TYPE_btree, WRITE, &buf);
- ret = validate_bset(c, b, i, sectors, &whiteout_u64s, WRITE, false);
if (ret)
+ bch2_fs_inconsistent(c, "invalid btree node key before write: %s", buf.buf);
+ printbuf_exit(&buf);
+ if (ret)
+ return ret;
+
+ ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?:
+ validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error);
+ if (ret) {
bch2_inconsistent_error(c);
+ dump_stack();
+ }
return ret;
}
-void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
- enum six_lock_type lock_type_held)
+static void btree_write_submit(struct work_struct *work)
+{
+ struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
+ struct bch_extent_ptr *ptr;
+ BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
+
+ bkey_copy(&tmp.k, &wbio->key);
+
+ bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
+ ptr->offset += wbio->sector_offset;
+
+ bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
+ &tmp.k, false);
+}
+
+void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
{
struct btree_write_bio *wbio;
struct bset_tree *t;
struct bset *i;
struct btree_node *bn = NULL;
struct btree_node_entry *bne = NULL;
- BKEY_PADDED(key) k;
- struct bch_extent_ptr *ptr;
struct sort_iter sort_iter;
struct nonce nonce;
- unsigned bytes_to_write, sectors_to_write, order, bytes, u64s;
+ unsigned bytes_to_write, sectors_to_write, bytes, u64s;
u64 seq = 0;
bool used_mempool;
unsigned long old, new;
bool validate_before_checksum = false;
+ enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
void *data;
+ int ret;
- if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
- return;
+ if (flags & BTREE_WRITE_ALREADY_STARTED)
+ goto do_write;
/*
* We may only have a read lock on the btree node - the dirty bit is our
if (!(old & (1 << BTREE_NODE_dirty)))
return;
- if (!btree_node_may_write(b))
+ if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
+ !(old & (1 << BTREE_NODE_need_write)))
return;
- if (old & (1 << BTREE_NODE_write_in_flight)) {
- btree_node_wait_on_io(b);
- continue;
- }
+ if (old &
+ ((1 << BTREE_NODE_never_write)|
+ (1 << BTREE_NODE_write_blocked)))
+ return;
+
+ if (b->written &&
+ (old & (1 << BTREE_NODE_will_make_reachable)))
+ return;
+
+ if (old & (1 << BTREE_NODE_write_in_flight))
+ return;
+
+ if (flags & BTREE_WRITE_ONLY_IF_NEED)
+ type = new & BTREE_WRITE_TYPE_MASK;
+ new &= ~BTREE_WRITE_TYPE_MASK;
new &= ~(1 << BTREE_NODE_dirty);
new &= ~(1 << BTREE_NODE_need_write);
new |= (1 << BTREE_NODE_write_in_flight);
+ new |= (1 << BTREE_NODE_write_in_flight_inner);
new |= (1 << BTREE_NODE_just_written);
new ^= (1 << BTREE_NODE_write_idx);
} while (cmpxchg_acquire(&b->flags, old, new) != old);
+ if (new & (1U << BTREE_NODE_need_write))
+ return;
+do_write:
+ BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
+
+ atomic_dec(&c->btree_cache.dirty);
+
BUG_ON(btree_node_fake(b));
BUG_ON((b->will_make_reachable != 0) != !b->written);
- BUG_ON(b->written >= c->opts.btree_node_size);
- BUG_ON(b->written & (c->opts.block_size - 1));
+ BUG_ON(b->written >= btree_sectors(c));
+ BUG_ON(b->written & (block_sectors(c) - 1));
BUG_ON(bset_written(b, btree_bset_last(b)));
BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
seq = max(seq, le64_to_cpu(i->journal_seq));
}
- order = get_order(bytes);
- data = btree_bounce_alloc(c, order, &used_mempool);
+ BUG_ON(b->written && !seq);
+
+ /* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
+ bytes += 8;
+
+ /* buffer must be a multiple of the block size */
+ bytes = round_up(bytes, block_bytes(c));
+
+ data = btree_bounce_alloc(c, bytes, &used_mempool);
if (!b->written) {
bn = data;
i->journal_seq = cpu_to_le64(seq);
i->u64s = 0;
- if (!btree_node_old_extent_overwrite(b)) {
- sort_iter_add(&sort_iter,
- unwritten_whiteouts_start(c, b),
- unwritten_whiteouts_end(c, b));
- SET_BSET_SEPARATE_WHITEOUTS(i, false);
- } else {
- memcpy_u64s(i->start,
- unwritten_whiteouts_start(c, b),
- b->whiteout_u64s);
- i->u64s = cpu_to_le16(b->whiteout_u64s);
- SET_BSET_SEPARATE_WHITEOUTS(i, true);
- }
+ sort_iter_add(&sort_iter,
+ unwritten_whiteouts_start(c, b),
+ unwritten_whiteouts_end(c, b));
+ SET_BSET_SEPARATE_WHITEOUTS(i, false);
b->whiteout_u64s = 0;
- u64s = btree_node_old_extent_overwrite(b)
- ? bch2_sort_extents(vstruct_last(i), &sort_iter, false)
- : bch2_sort_keys(i->start, &sort_iter, false);
+ u64s = bch2_sort_keys(i->start, &sort_iter, false);
le16_add_cpu(&i->u64s, u64s);
+ BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
+
set_needs_whiteout(i, false);
/* do we have data to write? */
bytes_to_write = vstruct_end(i) - data;
sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
+ if (!b->written &&
+ b->key.k.type == KEY_TYPE_btree_ptr_v2)
+ BUG_ON(btree_ptr_sectors_written(&b->key) != sectors_to_write);
+
memset(data + bytes_to_write, 0,
(sectors_to_write << 9) - bytes_to_write);
- BUG_ON(b->written + sectors_to_write > c->opts.btree_node_size);
+ BUG_ON(b->written + sectors_to_write > btree_sectors(c));
BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
BUG_ON(i->seq != b->data->keys.seq);
- i->version = c->sb.version < bcachefs_metadata_version_new_versioning
+ i->version = c->sb.version < bcachefs_metadata_version_bkey_renumber
? cpu_to_le16(BCH_BSET_VERSION_OLD)
: cpu_to_le16(c->sb.version);
+ SET_BSET_OFFSET(i, b->written);
SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
validate_before_checksum = true;
/* validate_bset will be modifying: */
- if (le16_to_cpu(i->version) <
- bcachefs_metadata_version_bkey_renumber)
+ if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
validate_before_checksum = true;
/* if we're going to be encrypting, check metadata validity first: */
validate_bset_for_write(c, b, i, sectors_to_write))
goto err;
- bset_encrypt(c, i, b->written << 9);
+ ret = bset_encrypt(c, i, b->written << 9);
+ if (bch2_fs_fatal_err_on(ret, c,
+ "error encrypting btree node: %i\n", ret))
+ goto err;
nonce = btree_nonce(i, b->written << 9);
* reflect that those writes were done and the data flushed from the
* journal:
*
+ * Also on journal error, the pending write may have updates that were
+ * never journalled (interior nodes, see btree_update_nodes_written()) -
+ * it's critical that we don't do the write in that case otherwise we
+ * will have updates visible that weren't in the journal:
+ *
* Make sure to update b->written so bch2_btree_init_next() doesn't
* break:
*/
c->opts.nochanges)
goto err;
- trace_btree_write(b, bytes_to_write, sectors_to_write);
+ trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
- wbio = container_of(bio_alloc_bioset(GFP_NOIO,
+ wbio = container_of(bio_alloc_bioset(NULL,
buf_pages(data, sectors_to_write << 9),
+ REQ_OP_WRITE|REQ_META,
+ GFP_NOIO,
&c->btree_bio),
struct btree_write_bio, wbio.bio);
wbio_init(&wbio->wbio.bio);
wbio->data = data;
- wbio->wbio.order = order;
+ wbio->data_bytes = bytes;
+ wbio->sector_offset = b->written;
+ wbio->wbio.c = c;
wbio->wbio.used_mempool = used_mempool;
- wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META;
+ wbio->wbio.first_btree_write = !b->written;
wbio->wbio.bio.bi_end_io = btree_node_write_endio;
wbio->wbio.bio.bi_private = b;
bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
- /*
- * If we're appending to a leaf node, we don't technically need FUA -
- * this write just needs to be persisted before the next journal write,
- * which will be marked FLUSH|FUA.
- *
- * Similarly if we're writing a new btree root - the pointer is going to
- * be in the next journal entry.
- *
- * But if we're writing a new btree node (that isn't a root) or
- * appending to a non leaf btree node, we need either FUA or a flush
- * when we write the parent with the new pointer. FUA is cheaper than a
- * flush, and writes appending to leaf nodes aren't blocking anything so
- * just make all btree node writes FUA to keep things sane.
- */
+ bkey_copy(&wbio->key, &b->key);
- bkey_copy(&k.key, &b->key);
+ b->written += sectors_to_write;
- bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&k.key)), ptr)
- ptr->offset += b->written;
+ if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
+ bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
+ cpu_to_le16(b->written);
- b->written += sectors_to_write;
+ atomic64_inc(&c->btree_write_stats[type].nr);
+ atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
- /* XXX: submitting IO with btree locks held: */
- bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_BTREE, &k.key);
+ INIT_WORK(&wbio->work, btree_write_submit);
+ queue_work(c->io_complete_wq, &wbio->work);
return;
err:
set_btree_node_noevict(b);
b->written += sectors_to_write;
nowrite:
- btree_bounce_free(c, order, used_mempool, data);
- btree_node_write_done(c, b);
+ btree_bounce_free(c, bytes, used_mempool, data);
+ __btree_node_write_done(c, b);
}
/*
* single bset:
*/
if (b->nsets > 1) {
- btree_node_sort(c, b, NULL, 0, b->nsets, true);
+ btree_node_sort(c, b, 0, b->nsets, true);
invalidated_iter = true;
} else {
invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
* Use this one if the node is intent locked:
*/
void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
- enum six_lock_type lock_type_held)
+ enum six_lock_type lock_type_held,
+ unsigned flags)
{
- BUG_ON(lock_type_held == SIX_LOCK_write);
-
if (lock_type_held == SIX_LOCK_intent ||
- six_lock_tryupgrade(&b->lock)) {
- __bch2_btree_node_write(c, b, SIX_LOCK_intent);
+ (lock_type_held == SIX_LOCK_read &&
+ six_lock_tryupgrade(&b->c.lock))) {
+ __bch2_btree_node_write(c, b, flags);
/* don't cycle lock unnecessarily: */
if (btree_node_just_written(b) &&
- six_trylock_write(&b->lock)) {
+ six_trylock_write(&b->c.lock)) {
bch2_btree_post_write_cleanup(c, b);
- six_unlock_write(&b->lock);
+ six_unlock_write(&b->c.lock);
}
if (lock_type_held == SIX_LOCK_read)
- six_lock_downgrade(&b->lock);
+ six_lock_downgrade(&b->c.lock);
} else {
- __bch2_btree_node_write(c, b, SIX_LOCK_read);
+ __bch2_btree_node_write(c, b, flags);
+ if (lock_type_held == SIX_LOCK_write &&
+ btree_node_just_written(b))
+ bch2_btree_post_write_cleanup(c, b);
}
}
-static void __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
+static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
{
struct bucket_table *tbl;
struct rhash_head *pos;
struct btree *b;
unsigned i;
+ bool ret = false;
restart:
rcu_read_lock();
for_each_cached_btree(b, c, tbl, i, pos)
if (test_bit(flag, &b->flags)) {
rcu_read_unlock();
wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
+ ret = true;
goto restart;
-
}
rcu_read_unlock();
-}
-void bch2_btree_flush_all_reads(struct bch_fs *c)
-{
- __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
+ return ret;
}
-void bch2_btree_flush_all_writes(struct bch_fs *c)
+bool bch2_btree_flush_all_reads(struct bch_fs *c)
{
- __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
+ return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
}
-void bch2_btree_verify_flushed(struct bch_fs *c)
+bool bch2_btree_flush_all_writes(struct bch_fs *c)
{
- struct bucket_table *tbl;
- struct rhash_head *pos;
- struct btree *b;
- unsigned i;
-
- rcu_read_lock();
- for_each_cached_btree(b, c, tbl, i, pos) {
- unsigned long flags = READ_ONCE(b->flags);
-
- BUG_ON((flags & (1 << BTREE_NODE_dirty)) ||
- (flags & (1 << BTREE_NODE_write_in_flight)));
- }
- rcu_read_unlock();
+ return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
}
-ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *c, char *buf)
-{
- struct printbuf out = _PBUF(buf, PAGE_SIZE);
- struct bucket_table *tbl;
- struct rhash_head *pos;
- struct btree *b;
- unsigned i;
-
- rcu_read_lock();
- for_each_cached_btree(b, c, tbl, i, pos) {
- unsigned long flags = READ_ONCE(b->flags);
-
- if (!(flags & (1 << BTREE_NODE_dirty)))
- continue;
+const char * const bch2_btree_write_types[] = {
+#define x(t, n) [n] = #t,
+ BCH_BTREE_WRITE_TYPES()
+ NULL
+};
- pr_buf(&out, "%p d %u n %u l %u w %u b %u r %u:%lu\n",
- b,
- (flags & (1 << BTREE_NODE_dirty)) != 0,
- (flags & (1 << BTREE_NODE_need_write)) != 0,
- b->level,
- b->written,
- !list_empty_careful(&b->write_blocked),
- b->will_make_reachable != 0,
- b->will_make_reachable & 1);
+void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ printbuf_tabstop_push(out, 20);
+ printbuf_tabstop_push(out, 10);
+
+ prt_tab(out);
+ prt_str(out, "nr");
+ prt_tab(out);
+ prt_str(out, "size");
+ prt_newline(out);
+
+ for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
+ u64 nr = atomic64_read(&c->btree_write_stats[i].nr);
+ u64 bytes = atomic64_read(&c->btree_write_stats[i].bytes);
+
+ prt_printf(out, "%s:", bch2_btree_write_types[i]);
+ prt_tab(out);
+ prt_u64(out, nr);
+ prt_tab(out);
+ prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
+ prt_newline(out);
}
- rcu_read_unlock();
-
- return out.pos - buf;
}