#include "bcachefs.h"
#include "bkey_methods.h"
+#include "bkey_sort.h"
#include "btree_cache.h"
-#include "btree_update.h"
#include "btree_io.h"
#include "btree_iter.h"
#include "btree_locking.h"
+#include "btree_update.h"
+#include "btree_update_interior.h"
#include "buckets.h"
#include "checksum.h"
#include "debug.h"
#include "error.h"
#include "extents.h"
#include "io.h"
-#include "journal.h"
+#include "journal_reclaim.h"
+#include "journal_seq_blacklist.h"
#include "super-io.h"
#include <trace/events/bcachefs.h>
bool used_mempool, void *p)
{
if (used_mempool)
- mempool_free(virt_to_page(p), &c->btree_bounce_pool);
+ mempool_free(p, &c->btree_bounce_pool);
else
- free_pages((unsigned long) p, order);
+ vpfree(p, PAGE_SIZE << order);
}
static void *btree_bounce_alloc(struct bch_fs *c, unsigned order,
{
void *p;
- BUG_ON(1 << order > btree_pages(c));
+ BUG_ON(order > btree_page_order(c));
*used_mempool = false;
p = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, order);
return p;
*used_mempool = true;
- return page_address(mempool_alloc(&c->btree_bounce_pool, GFP_NOIO));
-}
-
-typedef int (*sort_cmp_fn)(struct btree *,
- struct bkey_packed *,
- struct bkey_packed *);
-
-struct sort_iter {
- struct btree *b;
- unsigned used;
-
- struct sort_iter_set {
- struct bkey_packed *k, *end;
- } data[MAX_BSETS + 1];
-};
-
-static void sort_iter_init(struct sort_iter *iter, struct btree *b)
-{
- memset(iter, 0, sizeof(*iter));
- iter->b = b;
-}
-
-static inline void __sort_iter_sift(struct sort_iter *iter,
- unsigned from,
- sort_cmp_fn cmp)
-{
- unsigned i;
-
- for (i = from;
- i + 1 < iter->used &&
- cmp(iter->b, iter->data[i].k, iter->data[i + 1].k) > 0;
- i++)
- swap(iter->data[i], iter->data[i + 1]);
-}
-
-static inline void sort_iter_sift(struct sort_iter *iter, sort_cmp_fn cmp)
-{
-
- __sort_iter_sift(iter, 0, cmp);
-}
-
-static inline void sort_iter_sort(struct sort_iter *iter, sort_cmp_fn cmp)
-{
- unsigned i = iter->used;
-
- while (i--)
- __sort_iter_sift(iter, i, cmp);
-}
-
-static void sort_iter_add(struct sort_iter *iter,
- struct bkey_packed *k,
- struct bkey_packed *end)
-{
- BUG_ON(iter->used >= ARRAY_SIZE(iter->data));
-
- if (k != end)
- iter->data[iter->used++] = (struct sort_iter_set) { k, end };
-}
-
-static inline struct bkey_packed *sort_iter_peek(struct sort_iter *iter)
-{
- return iter->used ? iter->data->k : NULL;
-}
-
-static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
-{
- iter->data->k = bkey_next(iter->data->k);
-
- BUG_ON(iter->data->k > iter->data->end);
-
- if (iter->data->k == iter->data->end)
- memmove(&iter->data[0],
- &iter->data[1],
- sizeof(iter->data[0]) * --iter->used);
- else
- sort_iter_sift(iter, cmp);
-}
-
-static inline struct bkey_packed *sort_iter_next(struct sort_iter *iter,
- sort_cmp_fn cmp)
-{
- struct bkey_packed *ret = sort_iter_peek(iter);
-
- if (ret)
- sort_iter_advance(iter, cmp);
-
- return ret;
-}
-
-static inline int sort_key_whiteouts_cmp(struct btree *b,
- struct bkey_packed *l,
- struct bkey_packed *r)
-{
- return bkey_cmp_packed(b, l, r);
-}
-
-static unsigned sort_key_whiteouts(struct bkey_packed *dst,
- struct sort_iter *iter)
-{
- struct bkey_packed *in, *out = dst;
-
- sort_iter_sort(iter, sort_key_whiteouts_cmp);
-
- while ((in = sort_iter_next(iter, sort_key_whiteouts_cmp))) {
- bkey_copy(out, in);
- out = bkey_next(out);
- }
-
- return (u64 *) out - (u64 *) dst;
-}
-
-static inline int sort_extent_whiteouts_cmp(struct btree *b,
- struct bkey_packed *l,
- struct bkey_packed *r)
-{
- struct bkey ul = bkey_unpack_key(b, l);
- struct bkey ur = bkey_unpack_key(b, r);
-
- return bkey_cmp(bkey_start_pos(&ul), bkey_start_pos(&ur));
-}
-
-static unsigned sort_extent_whiteouts(struct bkey_packed *dst,
- struct sort_iter *iter)
-{
- const struct bkey_format *f = &iter->b->format;
- struct bkey_packed *in, *out = dst;
- struct bkey_i l, r;
- bool prev = false, l_packed = false;
- u64 max_packed_size = bkey_field_max(f, BKEY_FIELD_SIZE);
- u64 max_packed_offset = bkey_field_max(f, BKEY_FIELD_OFFSET);
- u64 new_size;
-
- max_packed_size = min_t(u64, max_packed_size, KEY_SIZE_MAX);
-
- sort_iter_sort(iter, sort_extent_whiteouts_cmp);
-
- while ((in = sort_iter_next(iter, sort_extent_whiteouts_cmp))) {
- EBUG_ON(bkeyp_val_u64s(f, in));
- EBUG_ON(in->type != KEY_TYPE_DISCARD);
-
- r.k = bkey_unpack_key(iter->b, in);
-
- if (prev &&
- bkey_cmp(l.k.p, bkey_start_pos(&r.k)) >= 0) {
- if (bkey_cmp(l.k.p, r.k.p) >= 0)
- continue;
-
- new_size = l_packed
- ? min(max_packed_size, max_packed_offset -
- bkey_start_offset(&l.k))
- : KEY_SIZE_MAX;
-
- new_size = min(new_size, r.k.p.offset -
- bkey_start_offset(&l.k));
-
- BUG_ON(new_size < l.k.size);
-
- bch2_key_resize(&l.k, new_size);
-
- if (bkey_cmp(l.k.p, r.k.p) >= 0)
- continue;
-
- bch2_cut_front(l.k.p, &r);
- }
-
- if (prev) {
- if (!bch2_bkey_pack(out, &l, f)) {
- BUG_ON(l_packed);
- bkey_copy(out, &l);
- }
- out = bkey_next(out);
- }
-
- l = r;
- prev = true;
- l_packed = bkey_packed(in);
- }
-
- if (prev) {
- if (!bch2_bkey_pack(out, &l, f)) {
- BUG_ON(l_packed);
- bkey_copy(out, &l);
- }
- out = bkey_next(out);
- }
-
- return (u64 *) out - (u64 *) dst;
+ return mempool_alloc(&c->btree_bounce_pool, GFP_NOIO);
}
static unsigned should_compact_bset(struct btree *b, struct bset_tree *t,
bool compacting,
enum compact_mode mode)
{
- unsigned live_u64s = b->nr.bset_u64s[t - b->set];
unsigned bset_u64s = le16_to_cpu(bset(b, t)->u64s);
-
- if (live_u64s == bset_u64s)
- return 0;
+ unsigned dead_u64s = bset_u64s - b->nr.bset_u64s[t - b->set];
if (mode == COMPACT_LAZY) {
- if (live_u64s * 4 < bset_u64s * 3 ||
- (compacting && bset_unwritten(b, bset(b, t))))
- return bset_u64s - live_u64s;
+ if (should_compact_bset_lazy(b, t) ||
+ (compacting && !bset_written(b, bset(b, t))))
+ return dead_u64s;
} else {
if (bset_written(b, bset(b, t)))
- return bset_u64s - live_u64s;
+ return dead_u64s;
}
return 0;
struct bkey_packed *k, *n, *out, *start, *end;
struct btree_node_entry *src = NULL, *dst = NULL;
- if (t != b->set && bset_unwritten(b, i)) {
+ if (t != b->set && !bset_written(b, i)) {
src = container_of(i, struct btree_node_entry, keys);
dst = max(write_block(b),
(void *) btree_bkey_last(b, t -1));
continue;
if (bkey_whiteout(k)) {
- unreserve_whiteout(b, t, k);
+ unreserve_whiteout(b, k);
memcpy_u64s(u_pos, k, bkeyp_key_u64s(f, k));
set_bkeyp_val_u64s(f, u_pos, 0);
u_pos = bkey_next(u_pos);
BUG_ON((void *) unwritten_whiteouts_start(c, b) <
(void *) btree_bkey_last(b, bset_tree_last(b)));
- u64s = btree_node_is_extents(b)
- ? sort_extent_whiteouts(unwritten_whiteouts_start(c, b),
- &sort_iter)
- : sort_key_whiteouts(unwritten_whiteouts_start(c, b),
- &sort_iter);
+ u64s = (btree_node_is_extents(b)
+ ? bch2_sort_extent_whiteouts
+ : bch2_sort_key_whiteouts)(unwritten_whiteouts_start(c, b),
+ &sort_iter);
BUG_ON(u64s > b->whiteout_u64s);
BUG_ON(u64s != b->whiteout_u64s && !btree_node_is_extents(b));
struct bset *i = bset(b, t);
struct bkey_packed *k, *n, *out, *start, *end;
- if (!should_compact_bset(b, t, true, true))
+ if (!should_compact_bset(b, t, true, COMPACT_WRITTEN))
continue;
start = btree_bkey_first(b, t);
end = btree_bkey_last(b, t);
- if (bset_unwritten(b, i) &&
+ if (!bset_written(b, i) &&
t != b->set) {
struct bset *dst =
max_t(struct bset *, write_block(b),
return ret;
}
-static inline int sort_keys_cmp(struct btree *b,
- struct bkey_packed *l,
- struct bkey_packed *r)
-{
- return bkey_cmp_packed(b, l, r) ?:
- (int) bkey_whiteout(r) - (int) bkey_whiteout(l) ?:
- (int) l->needs_whiteout - (int) r->needs_whiteout;
-}
-
-static unsigned sort_keys(struct bkey_packed *dst,
- struct sort_iter *iter,
- bool filter_whiteouts)
-{
- const struct bkey_format *f = &iter->b->format;
- struct bkey_packed *in, *next, *out = dst;
-
- sort_iter_sort(iter, sort_keys_cmp);
-
- while ((in = sort_iter_next(iter, sort_keys_cmp))) {
- if (bkey_whiteout(in) &&
- (filter_whiteouts || !in->needs_whiteout))
- continue;
-
- if (bkey_whiteout(in) &&
- (next = sort_iter_peek(iter)) &&
- !bkey_cmp_packed(iter->b, in, next)) {
- BUG_ON(in->needs_whiteout &&
- next->needs_whiteout);
- /*
- * XXX racy, called with read lock from write path
- *
- * leads to spurious BUG_ON() in bkey_unpack_key() in
- * debug mode
- */
- next->needs_whiteout |= in->needs_whiteout;
- continue;
- }
-
- if (bkey_whiteout(in)) {
- memcpy_u64s(out, in, bkeyp_key_u64s(f, in));
- set_bkeyp_val_u64s(f, out, 0);
- } else {
- bkey_copy(out, in);
- }
- out = bkey_next(out);
- }
-
- return (u64 *) out - (u64 *) dst;
-}
-
-static inline int sort_extents_cmp(struct btree *b,
- struct bkey_packed *l,
- struct bkey_packed *r)
-{
- return bkey_cmp_packed(b, l, r) ?:
- (int) bkey_deleted(l) - (int) bkey_deleted(r);
-}
-
-static unsigned sort_extents(struct bkey_packed *dst,
- struct sort_iter *iter,
- bool filter_whiteouts)
-{
- struct bkey_packed *in, *out = dst;
-
- sort_iter_sort(iter, sort_extents_cmp);
-
- while ((in = sort_iter_next(iter, sort_extents_cmp))) {
- if (bkey_deleted(in))
- continue;
-
- if (bkey_whiteout(in) &&
- (filter_whiteouts || !in->needs_whiteout))
- continue;
-
- bkey_copy(out, in);
- out = bkey_next(out);
- }
-
- return (u64 *) out - (u64 *) dst;
-}
-
static void btree_node_sort(struct bch_fs *c, struct btree *b,
struct btree_iter *iter,
unsigned start_idx,
struct bset_tree *t;
struct bset *start_bset = bset(b, &b->set[start_idx]);
bool used_mempool = false;
- u64 start_time;
+ u64 start_time, seq = 0;
unsigned i, u64s = 0, order, shift = end_idx - start_idx - 1;
bool sorting_entire_node = start_idx == 0 &&
end_idx == b->nsets;
if (btree_node_is_extents(b))
filter_whiteouts = bset_written(b, start_bset);
- u64s = btree_node_is_extents(b)
- ? sort_extents(out->keys.start, &sort_iter, filter_whiteouts)
- : sort_keys(out->keys.start, &sort_iter, filter_whiteouts);
+ u64s = (btree_node_is_extents(b)
+ ? bch2_sort_extents
+ : bch2_sort_keys)(out->keys.start,
+ &sort_iter,
+ filter_whiteouts);
out->keys.u64s = cpu_to_le16(u64s);
BUG_ON(vstruct_end(&out->keys) > (void *) out + (PAGE_SIZE << order));
if (sorting_entire_node)
- bch2_time_stats_update(&c->btree_sort_time, start_time);
+ bch2_time_stats_update(&c->times[BCH_TIME_btree_sort],
+ start_time);
/* Make sure we preserve bset journal_seq: */
- for (t = b->set + start_idx + 1;
- t < b->set + end_idx;
- t++)
- start_bset->journal_seq =
- max(start_bset->journal_seq,
- bset(b, t)->journal_seq);
+ for (t = b->set + start_idx; t < b->set + end_idx; t++)
+ seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
+ start_bset->journal_seq = cpu_to_le64(seq);
if (sorting_entire_node) {
unsigned u64s = le16_to_cpu(out->keys.u64s);
bch2_verify_btree_nr_keys(b);
}
-/* Sort + repack in a new format: */
-static struct btree_nr_keys sort_repack(struct bset *dst,
- struct btree *src,
- struct btree_node_iter *src_iter,
- struct bkey_format *out_f,
- bool filter_whiteouts)
-{
- struct bkey_format *in_f = &src->format;
- struct bkey_packed *in, *out = vstruct_last(dst);
- struct btree_nr_keys nr;
-
- memset(&nr, 0, sizeof(nr));
-
- while ((in = bch2_btree_node_iter_next_all(src_iter, src))) {
- if (filter_whiteouts && bkey_whiteout(in))
- continue;
-
- if (bch2_bkey_transform(out_f, out, bkey_packed(in)
- ? in_f : &bch2_bkey_format_current, in))
- out->format = KEY_FORMAT_LOCAL_BTREE;
- else
- bch2_bkey_unpack(src, (void *) out, in);
-
- btree_keys_account_key_add(&nr, 0, out);
- out = bkey_next(out);
- }
-
- dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
- return nr;
-}
-
-/* Sort, repack, and merge: */
-static struct btree_nr_keys sort_repack_merge(struct bch_fs *c,
- struct bset *dst,
- struct btree *src,
- struct btree_node_iter *iter,
- struct bkey_format *out_f,
- bool filter_whiteouts,
- key_filter_fn filter,
- key_merge_fn merge)
-{
- struct bkey_packed *k, *prev = NULL, *out;
- struct btree_nr_keys nr;
- BKEY_PADDED(k) tmp;
-
- memset(&nr, 0, sizeof(nr));
-
- while ((k = bch2_btree_node_iter_next_all(iter, src))) {
- if (filter_whiteouts && bkey_whiteout(k))
- continue;
-
- /*
- * The filter might modify pointers, so we have to unpack the
- * key and values to &tmp.k:
- */
- bch2_bkey_unpack(src, &tmp.k, k);
-
- if (filter && filter(c, src, bkey_i_to_s(&tmp.k)))
- continue;
-
- /* prev is always unpacked, for key merging: */
-
- if (prev &&
- merge &&
- merge(c, src, (void *) prev, &tmp.k) == BCH_MERGE_MERGE)
- continue;
-
- /*
- * the current key becomes the new prev: advance prev, then
- * copy the current key - but first pack prev (in place):
- */
- if (prev) {
- bch2_bkey_pack(prev, (void *) prev, out_f);
-
- btree_keys_account_key_add(&nr, 0, prev);
- prev = bkey_next(prev);
- } else {
- prev = vstruct_last(dst);
- }
-
- bkey_copy(prev, &tmp.k);
- }
-
- if (prev) {
- bch2_bkey_pack(prev, (void *) prev, out_f);
- btree_keys_account_key_add(&nr, 0, prev);
- out = bkey_next(prev);
- } else {
- out = vstruct_last(dst);
- }
-
- dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
- return nr;
-}
-
void bch2_btree_sort_into(struct bch_fs *c,
struct btree *dst,
struct btree *src)
bch2_bset_set_no_aux_tree(dst, dst->set);
- bch2_btree_node_iter_init_from_start(&src_iter, src,
- btree_node_is_extents(src));
+ bch2_btree_node_iter_init_from_start(&src_iter, src);
- if (btree_node_ops(src)->key_normalize ||
- btree_node_ops(src)->key_merge)
- nr = sort_repack_merge(c, btree_bset_first(dst),
+ if (btree_node_is_extents(src))
+ nr = bch2_sort_repack_merge(c, btree_bset_first(dst),
src, &src_iter,
&dst->format,
- true,
- btree_node_ops(src)->key_normalize,
- btree_node_ops(src)->key_merge);
+ true);
else
- nr = sort_repack(btree_bset_first(dst),
+ nr = bch2_sort_repack(btree_bset_first(dst),
src, &src_iter,
&dst->format,
true);
- bch2_time_stats_update(&c->btree_sort_time, start_time);
+ bch2_time_stats_update(&c->times[BCH_TIME_btree_sort], start_time);
set_btree_bset_end(dst, dst->set);
for (unwritten_idx = 0;
unwritten_idx < b->nsets;
unwritten_idx++)
- if (bset_unwritten(b, bset(b, &b->set[unwritten_idx])))
+ if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
break;
if (b->nsets - unwritten_idx > 1) {
for_each_bset(b, t)
bch2_bset_build_aux_tree(b, t,
- bset_unwritten(b, bset(b, t)) &&
+ !bset_written(b, bset(b, t)) &&
t == bset_tree_last(b));
}
* Returns true if we sorted (i.e. invalidated iterators
*/
void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
- struct btree_iter *iter)
+ struct btree_iter *iter)
{
struct btree_node_entry *bne;
bool did_sort;
EBUG_ON(!(b->lock.state.seq & 1));
- EBUG_ON(iter && iter->nodes[b->level] != b);
+ EBUG_ON(iter && iter->l[b->level].b != b);
did_sort = btree_node_compact(c, b, iter);
bne = want_new_bset(c, b);
if (bne)
- bch2_bset_init_next(b, &bne->keys);
+ bch2_bset_init_next(c, b, bne);
bch2_btree_build_aux_trees(b);
bch2_btree_iter_reinit_node(iter, b);
}
-static struct nonce btree_nonce(struct btree *b,
- struct bset *i,
- unsigned offset)
+static struct nonce btree_nonce(struct bset *i, unsigned offset)
{
return (struct nonce) {{
[0] = cpu_to_le32(offset),
}};
}
-static void bset_encrypt(struct bch_fs *c, struct bset *i, struct nonce nonce)
+static void bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offset)
{
+ struct nonce nonce = btree_nonce(i, offset);
+
+ if (!offset) {
+ struct btree_node *bn = container_of(i, struct btree_node, keys);
+ unsigned bytes = (void *) &bn->keys - (void *) &bn->flags;
+
+ bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, &bn->flags,
+ bytes);
+
+ nonce = nonce_add(nonce, round_up(bytes, CHACHA20_BLOCK_SIZE));
+ }
+
bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
- vstruct_end(i) - (void *) i->_data);
+ vstruct_end(i) - (void *) i->_data);
}
-#define btree_node_error(b, c, ptr, fmt, ...) \
- bch2_fs_inconsistent(c, \
- "btree node error at btree %u level %u/%u bucket %zu block %u u64s %u: " fmt,\
- (b)->btree_id, (b)->level, btree_node_root(c, b) \
- ? btree_node_root(c, b)->level : -1, \
- PTR_BUCKET_NR(ca, ptr), (b)->written, \
- le16_to_cpu((i)->u64s), ##__VA_ARGS__)
-
-static const char *validate_bset(struct bch_fs *c, struct btree *b,
- struct bch_dev *ca,
- const struct bch_extent_ptr *ptr,
- struct bset *i, unsigned sectors,
- unsigned *whiteout_u64s)
+static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
+ struct btree *b, struct bset *i,
+ unsigned offset, int write)
+{
+ pr_buf(out, "error validating btree node %s"
+ "at btree %u level %u/%u\n"
+ "pos %llu:%llu node offset %u",
+ write ? "before write " : "",
+ b->btree_id, b->level,
+ c->btree_roots[b->btree_id].level,
+ b->key.k.p.inode, b->key.k.p.offset,
+ b->written);
+ if (i)
+ pr_buf(out, " bset u64s %u", le16_to_cpu(i->u64s));
+}
+
+enum btree_err_type {
+ BTREE_ERR_FIXABLE,
+ BTREE_ERR_WANT_RETRY,
+ BTREE_ERR_MUST_RETRY,
+ BTREE_ERR_FATAL,
+};
+
+enum btree_validate_ret {
+ BTREE_RETRY_READ = 64,
+};
+
+#define btree_err(type, c, b, i, msg, ...) \
+({ \
+ __label__ out; \
+ char _buf[300]; \
+ struct printbuf out = PBUF(_buf); \
+ \
+ btree_err_msg(&out, c, b, i, b->written, write); \
+ pr_buf(&out, ": " msg, ##__VA_ARGS__); \
+ \
+ if (type == BTREE_ERR_FIXABLE && \
+ write == READ && \
+ !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) { \
+ mustfix_fsck_err(c, "%s", _buf); \
+ goto out; \
+ } \
+ \
+ switch (write) { \
+ case READ: \
+ bch_err(c, "%s", _buf); \
+ \
+ switch (type) { \
+ case BTREE_ERR_FIXABLE: \
+ ret = BCH_FSCK_ERRORS_NOT_FIXED; \
+ goto fsck_err; \
+ case BTREE_ERR_WANT_RETRY: \
+ if (have_retry) { \
+ ret = BTREE_RETRY_READ; \
+ goto fsck_err; \
+ } \
+ break; \
+ case BTREE_ERR_MUST_RETRY: \
+ ret = BTREE_RETRY_READ; \
+ goto fsck_err; \
+ case BTREE_ERR_FATAL: \
+ ret = BCH_FSCK_ERRORS_NOT_FIXED; \
+ goto fsck_err; \
+ } \
+ break; \
+ case WRITE: \
+ bch_err(c, "corrupt metadata before write: %s", _buf); \
+ \
+ if (bch2_fs_inconsistent(c)) { \
+ ret = BCH_FSCK_ERRORS_NOT_FIXED; \
+ goto fsck_err; \
+ } \
+ break; \
+ } \
+out: \
+ true; \
+})
+
+#define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
+
+static int validate_bset(struct bch_fs *c, struct btree *b,
+ struct bset *i, unsigned sectors,
+ unsigned *whiteout_u64s, int write,
+ bool have_retry)
{
struct bkey_packed *k, *prev = NULL;
struct bpos prev_pos = POS_MIN;
bool seen_non_whiteout = false;
+ unsigned version;
+ const char *err;
+ int ret = 0;
- if (le16_to_cpu(i->version) != BCACHE_BSET_VERSION)
- return "unsupported bset version";
+ if (i == &b->data->keys) {
+ /* These indicate that we read the wrong btree node: */
+ btree_err_on(BTREE_NODE_ID(b->data) != b->btree_id,
+ BTREE_ERR_MUST_RETRY, c, b, i,
+ "incorrect btree id");
- if (b->written + sectors > c->sb.btree_node_size)
- return "bset past end of btree node";
+ btree_err_on(BTREE_NODE_LEVEL(b->data) != b->level,
+ BTREE_ERR_MUST_RETRY, c, b, i,
+ "incorrect level");
- if (i != &b->data->keys && !i->u64s)
- btree_node_error(b, c, ptr, "empty set");
+ if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN) {
+ u64 *p = (u64 *) &b->data->ptr;
+
+ *p = swab64(*p);
+ bch2_bpos_swab(&b->data->min_key);
+ bch2_bpos_swab(&b->data->max_key);
+ }
+
+ btree_err_on(bkey_cmp(b->data->max_key, b->key.k.p),
+ BTREE_ERR_MUST_RETRY, c, b, i,
+ "incorrect max key");
+
+ /* XXX: ideally we would be validating min_key too */
+#if 0
+ /*
+ * not correct anymore, due to btree node write error
+ * handling
+ *
+ * need to add b->data->seq to btree keys and verify
+ * against that
+ */
+ btree_err_on(!extent_contains_ptr(bkey_i_to_s_c_extent(&b->key),
+ b->data->ptr),
+ BTREE_ERR_FATAL, c, b, i,
+ "incorrect backpointer");
+#endif
+ err = bch2_bkey_format_validate(&b->data->format);
+ btree_err_on(err,
+ BTREE_ERR_FATAL, c, b, i,
+ "invalid bkey format: %s", err);
+ }
+
+ version = le16_to_cpu(i->version);
+ btree_err_on((version != BCH_BSET_VERSION_OLD &&
+ version < bcachefs_metadata_version_min) ||
+ version >= bcachefs_metadata_version_max,
+ BTREE_ERR_FATAL, c, b, i,
+ "unsupported bset version");
+
+ if (btree_err_on(b->written + sectors > c->opts.btree_node_size,
+ BTREE_ERR_FIXABLE, c, b, i,
+ "bset past end of btree node")) {
+ i->u64s = 0;
+ return 0;
+ }
+
+ btree_err_on(b->written && !i->u64s,
+ BTREE_ERR_FIXABLE, c, b, i,
+ "empty bset");
if (!BSET_SEPARATE_WHITEOUTS(i)) {
seen_non_whiteout = true;
- whiteout_u64s = 0;
+ *whiteout_u64s = 0;
}
for (k = i->start;
struct bkey tmp;
const char *invalid;
- if (!k->u64s) {
- btree_node_error(b, c, ptr,
- "KEY_U64s 0: %zu bytes of metadata lost",
- vstruct_end(i) - (void *) k);
-
+ if (btree_err_on(!k->u64s,
+ BTREE_ERR_FIXABLE, c, b, i,
+ "KEY_U64s 0: %zu bytes of metadata lost",
+ vstruct_end(i) - (void *) k)) {
i->u64s = cpu_to_le16((u64 *) k - i->_data);
break;
}
- if (bkey_next(k) > vstruct_last(i)) {
- btree_node_error(b, c, ptr,
- "key extends past end of bset");
-
+ if (btree_err_on(bkey_next(k) > vstruct_last(i),
+ BTREE_ERR_FIXABLE, c, b, i,
+ "key extends past end of bset")) {
i->u64s = cpu_to_le16((u64 *) k - i->_data);
break;
}
- if (k->format > KEY_FORMAT_CURRENT) {
- btree_node_error(b, c, ptr,
- "invalid bkey format %u", k->format);
-
+ if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
+ BTREE_ERR_FIXABLE, c, b, i,
+ "invalid bkey format %u", k->format)) {
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
memmove_u64s_down(k, bkey_next(k),
(u64 *) vstruct_end(i) - (u64 *) k);
}
if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN)
- bch2_bkey_swab(btree_node_type(b), &b->format, k);
+ bch2_bkey_swab(&b->format, k);
+
+ if (!write &&
+ version < bcachefs_metadata_version_bkey_renumber)
+ bch2_bkey_renumber(btree_node_type(b), k, write);
u = bkey_disassemble(b, k, &tmp);
- invalid = bch2_btree_bkey_invalid(c, b, u);
+ invalid = __bch2_bkey_invalid(c, u, btree_node_type(b)) ?:
+ bch2_bkey_in_btree_node(b, u) ?:
+ (write ? bch2_bkey_val_invalid(c, u) : NULL);
if (invalid) {
char buf[160];
- bch2_bkey_val_to_text(c, btree_node_type(b),
- buf, sizeof(buf), u);
- btree_node_error(b, c, ptr,
- "invalid bkey %s: %s", buf, invalid);
+ bch2_bkey_val_to_text(&PBUF(buf), c, u);
+ btree_err(BTREE_ERR_FIXABLE, c, b, i,
+ "invalid bkey:\n%s\n%s", invalid, buf);
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
memmove_u64s_down(k, bkey_next(k),
continue;
}
+ if (write &&
+ version < bcachefs_metadata_version_bkey_renumber)
+ bch2_bkey_renumber(btree_node_type(b), k, write);
+
/*
* with the separate whiteouts thing (used for extents), the
* second set of keys actually can have whiteouts too, so we
*whiteout_u64s = k->_data - i->_data;
seen_non_whiteout = true;
} else if (bkey_cmp(prev_pos, bkey_start_pos(u.k)) > 0) {
- btree_node_error(b, c, ptr,
- "keys out of order: %llu:%llu > %llu:%llu",
- prev_pos.inode,
- prev_pos.offset,
- u.k->p.inode,
- bkey_start_offset(u.k));
+ btree_err(BTREE_ERR_FATAL, c, b, i,
+ "keys out of order: %llu:%llu > %llu:%llu",
+ prev_pos.inode,
+ prev_pos.offset,
+ u.k->p.inode,
+ bkey_start_offset(u.k));
/* XXX: repair this */
}
}
SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
- return NULL;
-}
-
-static bool extent_contains_ptr(struct bkey_s_c_extent e,
- struct bch_extent_ptr match)
-{
- const struct bch_extent_ptr *ptr;
-
- extent_for_each_ptr(e, ptr)
- if (!memcmp(ptr, &match, sizeof(*ptr)))
- return true;
-
- return false;
+fsck_err:
+ return ret;
}
-void bch2_btree_node_read_done(struct bch_fs *c, struct btree *b,
- struct bch_dev *ca,
- const struct bch_extent_ptr *ptr)
+int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, bool have_retry)
{
struct btree_node_entry *bne;
- struct bset *i = &b->data->keys;
- struct btree_node_iter *iter;
+ struct btree_node_iter_large *iter;
struct btree_node *sorted;
+ struct bkey_packed *k;
+ struct bset *i;
bool used_mempool;
unsigned u64s;
- const char *err;
- struct bch_csum csum;
- struct nonce nonce;
- int ret;
+ int ret, retry_read = 0, write = READ;
iter = mempool_alloc(&c->fill_iter, GFP_NOIO);
- __bch2_btree_node_iter_init(iter, btree_node_is_extents(b));
+ iter->used = 0;
- err = "dynamic fault";
if (bch2_meta_read_fault("btree"))
- goto err;
+ btree_err(BTREE_ERR_MUST_RETRY, c, b, NULL,
+ "dynamic fault");
+
+ btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
+ BTREE_ERR_MUST_RETRY, c, b, NULL,
+ "bad magic");
+
+ btree_err_on(!b->data->keys.seq,
+ BTREE_ERR_MUST_RETRY, c, b, NULL,
+ "bad btree header");
- while (b->written < c->sb.btree_node_size) {
+ while (b->written < c->opts.btree_node_size) {
unsigned sectors, whiteout_u64s = 0;
+ struct nonce nonce;
+ struct bch_csum csum;
+ bool first = !b->written;
if (!b->written) {
i = &b->data->keys;
- err = "bad magic";
- if (le64_to_cpu(b->data->magic) != bset_magic(c))
- goto err;
-
- err = "bad btree header";
- if (!b->data->keys.seq)
- goto err;
-
- err = "unknown checksum type";
- if (!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)))
- goto err;
-
- /* XXX: retry checksum errors */
+ btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
+ BTREE_ERR_WANT_RETRY, c, b, i,
+ "unknown checksum type");
- nonce = btree_nonce(b, i, b->written << 9);
+ nonce = btree_nonce(i, b->written << 9);
csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
- err = "bad checksum";
- if (bch2_crc_cmp(csum, b->data->csum))
- goto err;
+ btree_err_on(bch2_crc_cmp(csum, b->data->csum),
+ BTREE_ERR_WANT_RETRY, c, b, i,
+ "invalid checksum");
- bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce,
- &b->data->flags,
- (void *) &b->data->keys -
- (void *) &b->data->flags);
- nonce = nonce_add(nonce,
- round_up((void *) &b->data->keys -
- (void *) &b->data->flags,
- CHACHA20_BLOCK_SIZE));
- bset_encrypt(c, i, nonce);
+ bset_encrypt(c, i, b->written << 9);
sectors = vstruct_sectors(b->data, c->block_bits);
- if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN) {
- u64 *p = (u64 *) &b->data->ptr;
-
- *p = swab64(*p);
- bch2_bpos_swab(&b->data->min_key);
- bch2_bpos_swab(&b->data->max_key);
- }
-
- err = "incorrect btree id";
- if (BTREE_NODE_ID(b->data) != b->btree_id)
- goto err;
-
- err = "incorrect level";
- if (BTREE_NODE_LEVEL(b->data) != b->level)
- goto err;
-
- err = "incorrect max key";
- if (bkey_cmp(b->data->max_key, b->key.k.p))
- goto err;
-
- err = "incorrect backpointer";
- if (!extent_contains_ptr(bkey_i_to_s_c_extent(&b->key),
- b->data->ptr))
- goto err;
-
- err = bch2_bkey_format_validate(&b->data->format);
- if (err)
- goto err;
-
- set_btree_bset(b, b->set, &b->data->keys);
-
btree_node_set_format(b, b->data->format);
} else {
bne = write_block(b);
if (i->seq != b->data->keys.seq)
break;
- err = "unknown checksum type";
- if (!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)))
- goto err;
+ btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
+ BTREE_ERR_WANT_RETRY, c, b, i,
+ "unknown checksum type");
- nonce = btree_nonce(b, i, b->written << 9);
+ nonce = btree_nonce(i, b->written << 9);
csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
- err = "bad checksum";
- if (memcmp(&csum, &bne->csum, sizeof(csum)))
- goto err;
+ btree_err_on(bch2_crc_cmp(csum, bne->csum),
+ BTREE_ERR_WANT_RETRY, c, b, i,
+ "invalid checksum");
- bset_encrypt(c, i, nonce);
+ bset_encrypt(c, i, b->written << 9);
sectors = vstruct_sectors(bne, c->block_bits);
}
- err = validate_bset(c, b, ca, ptr, i, sectors, &whiteout_u64s);
- if (err)
- goto err;
+ ret = validate_bset(c, b, i, sectors, &whiteout_u64s,
+ READ, have_retry);
+ if (ret)
+ goto fsck_err;
b->written += sectors;
- err = "insufficient memory";
ret = bch2_journal_seq_should_ignore(c, le64_to_cpu(i->journal_seq), b);
- if (ret < 0)
+ if (ret < 0) {
+ btree_err(BTREE_ERR_FATAL, c, b, i,
+ "insufficient memory");
goto err;
+ }
- if (ret)
- continue;
+ if (ret) {
+ btree_err_on(first,
+ BTREE_ERR_FIXABLE, c, b, i,
+ "first btree node bset has blacklisted journal seq");
+ if (!first)
+ continue;
+ }
- __bch2_btree_node_iter_push(iter, b,
+ bch2_btree_node_iter_large_push(iter, b,
i->start,
vstruct_idx(i, whiteout_u64s));
- __bch2_btree_node_iter_push(iter, b,
+ bch2_btree_node_iter_large_push(iter, b,
vstruct_idx(i, whiteout_u64s),
vstruct_last(i));
}
- err = "corrupted btree";
for (bne = write_block(b);
bset_byte_offset(b, bne) < btree_bytes(c);
bne = (void *) bne + block_bytes(c))
- if (bne->keys.seq == b->data->keys.seq)
- goto err;
+ btree_err_on(bne->keys.seq == b->data->keys.seq,
+ BTREE_ERR_WANT_RETRY, c, b, NULL,
+ "found bset signature after last bset");
- sorted = btree_bounce_alloc(c, ilog2(btree_pages(c)), &used_mempool);
+ sorted = btree_bounce_alloc(c, btree_page_order(c), &used_mempool);
sorted->keys.u64s = 0;
+ set_btree_bset(b, b->set, &b->data->keys);
+
b->nr = btree_node_is_extents(b)
? bch2_extent_sort_fix_overlapping(c, &sorted->keys, b, iter)
: bch2_key_sort_fix_overlapping(&sorted->keys, b, iter);
BUG_ON(b->nr.live_u64s != u64s);
- btree_bounce_free(c, ilog2(btree_pages(c)), used_mempool, sorted);
+ btree_bounce_free(c, btree_page_order(c), used_mempool, sorted);
+
+ i = &b->data->keys;
+ for (k = i->start; k != vstruct_last(i);) {
+ struct bkey tmp;
+ struct bkey_s_c u = bkey_disassemble(b, k, &tmp);
+ const char *invalid = bch2_bkey_val_invalid(c, u);
+
+ if (invalid ||
+ (inject_invalid_keys(c) &&
+ !bversion_cmp(u.k->version, MAX_VERSION))) {
+ char buf[160];
+
+ bch2_bkey_val_to_text(&PBUF(buf), c, u);
+ btree_err(BTREE_ERR_FIXABLE, c, b, i,
+ "invalid bkey %s: %s", buf, invalid);
+
+ btree_keys_account_key_drop(&b->nr, 0, k);
+
+ i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
+ memmove_u64s_down(k, bkey_next(k),
+ (u64 *) vstruct_end(i) - (u64 *) k);
+ set_btree_bset_end(b, b->set);
+ continue;
+ }
+
+ k = bkey_next(k);
+ }
bch2_bset_build_aux_tree(b, b->set, false);
btree_node_reset_sib_u64s(b);
out:
mempool_free(iter, &c->fill_iter);
- return;
+ return retry_read;
err:
- set_btree_node_read_error(b);
- btree_node_error(b, c, ptr, "%s", err);
+fsck_err:
+ if (ret == BTREE_RETRY_READ) {
+ retry_read = 1;
+ } else {
+ bch2_inconsistent_error(c);
+ set_btree_node_read_error(b);
+ }
goto out;
}
-void bch2_btree_node_read(struct bch_fs *c, struct btree *b)
+static void btree_node_read_work(struct work_struct *work)
{
- uint64_t start_time = local_clock();
+ struct btree_read_bio *rb =
+ container_of(work, struct btree_read_bio, work);
+ struct bch_fs *c = rb->c;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+ struct btree *b = rb->bio.bi_private;
+ struct bio *bio = &rb->bio;
+ struct bch_io_failures failed = { .nr = 0 };
+ bool can_retry;
+
+ goto start;
+ while (1) {
+ bch_info(c, "retrying read");
+ ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+ rb->have_ioref = bch2_dev_get_ioref(ca, READ);
+ bio_reset(bio);
+ bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
+ bio->bi_iter.bi_sector = rb->pick.ptr.offset;
+ bio->bi_iter.bi_size = btree_bytes(c);
+
+ if (rb->have_ioref) {
+ bio_set_dev(bio, ca->disk_sb.bdev);
+ submit_bio_wait(bio);
+ } else {
+ bio->bi_status = BLK_STS_REMOVED;
+ }
+start:
+ bch2_dev_io_err_on(bio->bi_status, ca, "btree read");
+ if (rb->have_ioref)
+ percpu_ref_put(&ca->io_ref);
+ rb->have_ioref = false;
+
+ bch2_mark_io_failure(&failed, &rb->pick);
+
+ can_retry = bch2_bkey_pick_read_device(c,
+ bkey_i_to_s_c(&b->key),
+ &failed, &rb->pick) > 0;
+
+ if (!bio->bi_status &&
+ !bch2_btree_node_read_done(c, b, can_retry))
+ break;
+
+ if (!can_retry) {
+ set_btree_node_read_error(b);
+ break;
+ }
+ }
+
+ bch2_time_stats_update(&c->times[BCH_TIME_btree_read], rb->start_time);
+ bio_put(&rb->bio);
+ clear_btree_node_read_in_flight(b);
+ wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
+}
+
+static void btree_node_read_endio(struct bio *bio)
+{
+ struct btree_read_bio *rb =
+ container_of(bio, struct btree_read_bio, bio);
+ struct bch_fs *c = rb->c;
+
+ if (rb->have_ioref) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+ bch2_latency_acct(ca, rb->start_time, READ);
+ }
+
+ queue_work(system_unbound_wq, &rb->work);
+}
+
+void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
+ bool sync)
+{
+ struct extent_ptr_decoded pick;
+ struct btree_read_bio *rb;
+ struct bch_dev *ca;
struct bio *bio;
- struct extent_pick_ptr pick;
+ int ret;
trace_btree_read(c, b);
- pick = bch2_btree_pick_ptr(c, b);
- if (bch2_fs_fatal_err_on(!pick.ca, c,
+ ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
+ NULL, &pick);
+ if (bch2_fs_fatal_err_on(ret <= 0, c,
"btree node read error: no device to read from")) {
set_btree_node_read_error(b);
return;
}
- bio = bio_alloc_bioset(GFP_NOIO, btree_pages(c), &c->btree_read_bio);
- bio->bi_bdev = pick.ca->disk_sb.bdev;
+ ca = bch_dev_bkey_exists(c, pick.ptr.dev);
+
+ bio = bio_alloc_bioset(GFP_NOIO, buf_pages(b->data,
+ btree_bytes(c)),
+ &c->btree_bio);
+ rb = container_of(bio, struct btree_read_bio, bio);
+ rb->c = c;
+ rb->start_time = local_clock();
+ rb->have_ioref = bch2_dev_get_ioref(ca, READ);
+ rb->pick = pick;
+ INIT_WORK(&rb->work, btree_node_read_work);
+ bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
bio->bi_iter.bi_sector = pick.ptr.offset;
bio->bi_iter.bi_size = btree_bytes(c);
- bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
+ bio->bi_end_io = btree_node_read_endio;
+ bio->bi_private = b;
bch2_bio_map(bio, b->data);
- submit_bio_wait(bio);
+ set_btree_node_read_in_flight(b);
- if (bch2_dev_fatal_io_err_on(bio->bi_error,
- pick.ca, "IO error reading bucket %zu",
- PTR_BUCKET_NR(pick.ca, &pick.ptr)) ||
- bch2_meta_read_fault("btree")) {
- set_btree_node_read_error(b);
- goto out;
- }
+ if (rb->have_ioref) {
+ this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_BTREE],
+ bio_sectors(bio));
+ bio_set_dev(bio, ca->disk_sb.bdev);
- bch2_btree_node_read_done(c, b, pick.ca, &pick.ptr);
- bch2_time_stats_update(&c->btree_read_time, start_time);
-out:
- bio_put(bio);
- percpu_ref_put(&pick.ca->io_ref);
+ if (sync) {
+ submit_bio_wait(bio);
+
+ bio->bi_private = b;
+ btree_node_read_work(&rb->work);
+ } else {
+ submit_bio(bio);
+ }
+ } else {
+ bio->bi_status = BLK_STS_REMOVED;
+
+ if (sync)
+ btree_node_read_work(&rb->work);
+ else
+ queue_work(system_unbound_wq, &rb->work);
+
+ }
}
int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
closure_init_stack(&cl);
do {
- ret = bch2_btree_node_cannibalize_lock(c, &cl);
+ ret = bch2_btree_cache_cannibalize_lock(c, &cl);
closure_sync(&cl);
} while (ret);
b = bch2_btree_node_mem_alloc(c);
- bch2_btree_node_cannibalize_unlock(c);
+ bch2_btree_cache_cannibalize_unlock(c);
BUG_ON(IS_ERR(b));
bkey_copy(&b->key, k);
- BUG_ON(bch2_btree_node_hash_insert(c, b, level, id));
+ BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
- bch2_btree_node_read(c, b);
- six_unlock_write(&b->lock);
+ bch2_btree_node_read(c, b, true);
if (btree_node_read_error(b)) {
- six_unlock_intent(&b->lock);
- return -EIO;
+ bch2_btree_node_hash_remove(&c->btree_cache, b);
+
+ mutex_lock(&c->btree_cache.lock);
+ list_move(&b->list, &c->btree_cache.freeable);
+ mutex_unlock(&c->btree_cache.lock);
+
+ ret = -EIO;
+ goto err;
}
- bch2_btree_set_root_initial(c, b, NULL);
+ bch2_btree_set_root_for_read(c, b);
+err:
+ six_unlock_write(&b->lock);
six_unlock_intent(&b->lock);
- return 0;
+ return ret;
}
void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
struct btree_write *w)
{
+ unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
+
+ do {
+ old = new = v;
+ if (!(old & 1))
+ break;
+
+ new &= ~1UL;
+ } while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
+
+ if (old & 1)
+ closure_put(&((struct btree_update *) new)->cl);
+
bch2_journal_pin_drop(&c->journal, &w->journal);
closure_wake_up(&w->wait);
}
{
struct btree_write *w = btree_prev_write(b);
- /*
- * Before calling bch2_btree_complete_write() - if the write errored, we
- * have to halt new journal writes before they see this btree node
- * write as completed:
- */
- if (btree_node_write_error(b))
- bch2_journal_halt(&c->journal);
-
bch2_btree_complete_write(c, b, w);
btree_node_io_unlock(b);
}
+static void bch2_btree_node_write_error(struct bch_fs *c,
+ struct btree_write_bio *wbio)
+{
+ struct btree *b = wbio->wbio.bio.bi_private;
+ __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
+ struct bkey_i_btree_ptr *new_key;
+ struct bkey_s_btree_ptr bp;
+ struct bch_extent_ptr *ptr;
+ struct btree_iter iter;
+ int ret;
+
+ __bch2_btree_iter_init(&iter, c, b->btree_id, b->key.k.p,
+ BTREE_MAX_DEPTH,
+ b->level, BTREE_ITER_NODES);
+retry:
+ ret = bch2_btree_iter_traverse(&iter);
+ if (ret)
+ goto err;
+
+ /* has node been freed? */
+ if (iter.l[b->level].b != b) {
+ /* node has been freed: */
+ BUG_ON(!btree_node_dying(b));
+ goto out;
+ }
+
+ BUG_ON(!btree_node_hashed(b));
+
+ bkey_copy(&tmp.k, &b->key);
+
+ new_key = bkey_i_to_btree_ptr(&tmp.k);
+ bp = btree_ptr_i_to_s(new_key);
+
+ bch2_bkey_drop_ptrs(bkey_i_to_s(&tmp.k), ptr,
+ bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
+
+ if (!bch2_bkey_nr_ptrs(bp.s_c))
+ goto err;
+
+ ret = bch2_btree_node_update_key(c, &iter, b, new_key);
+ if (ret == -EINTR)
+ goto retry;
+ if (ret)
+ goto err;
+out:
+ bch2_btree_iter_unlock(&iter);
+ bio_put(&wbio->wbio.bio);
+ btree_node_write_done(c, b);
+ return;
+err:
+ set_btree_node_noevict(b);
+ bch2_fs_fatal_error(c, "fatal error writing btree node");
+ goto out;
+}
+
+void bch2_btree_write_error_work(struct work_struct *work)
+{
+ struct bch_fs *c = container_of(work, struct bch_fs,
+ btree_write_error_work);
+ struct bio *bio;
+
+ while (1) {
+ spin_lock_irq(&c->btree_write_error_lock);
+ bio = bio_list_pop(&c->btree_write_error_list);
+ spin_unlock_irq(&c->btree_write_error_lock);
+
+ if (!bio)
+ break;
+
+ bch2_btree_node_write_error(c,
+ container_of(bio, struct btree_write_bio, wbio.bio));
+ }
+}
+
+static void btree_node_write_work(struct work_struct *work)
+{
+ struct btree_write_bio *wbio =
+ container_of(work, struct btree_write_bio, work);
+ struct bch_fs *c = wbio->wbio.c;
+ struct btree *b = wbio->wbio.bio.bi_private;
+
+ btree_bounce_free(c,
+ wbio->wbio.order,
+ wbio->wbio.used_mempool,
+ wbio->data);
+
+ if (wbio->wbio.failed.nr) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->btree_write_error_lock, flags);
+ bio_list_add(&c->btree_write_error_list, &wbio->wbio.bio);
+ spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
+
+ queue_work(c->wq, &c->btree_write_error_work);
+ return;
+ }
+
+ bio_put(&wbio->wbio.bio);
+ btree_node_write_done(c, b);
+}
+
static void btree_node_write_endio(struct bio *bio)
{
- struct btree *b = bio->bi_private;
- struct bch_write_bio *wbio = to_wbio(bio);
- struct bch_fs *c = wbio->c;
- struct bio *orig = wbio->split ? wbio->orig : NULL;
- struct closure *cl = !wbio->split ? wbio->cl : NULL;
- struct bch_dev *ca = wbio->ca;
-
- if (bch2_dev_fatal_io_err_on(bio->bi_error, ca, "btree write") ||
- bch2_meta_write_fault("btree"))
- set_btree_node_write_error(b);
-
- if (wbio->bounce)
- btree_bounce_free(c,
- wbio->order,
- wbio->used_mempool,
- page_address(bio->bi_io_vec[0].bv_page));
-
- if (wbio->put_bio)
- bio_put(bio);
+ struct bch_write_bio *wbio = to_wbio(bio);
+ struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
+ struct bch_write_bio *orig = parent ?: wbio;
+ struct bch_fs *c = wbio->c;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
+ unsigned long flags;
+
+ if (wbio->have_ioref)
+ bch2_latency_acct(ca, wbio->submit_time, WRITE);
+
+ if (bio->bi_status == BLK_STS_REMOVED ||
+ bch2_dev_io_err_on(bio->bi_status, ca, "btree write") ||
+ bch2_meta_write_fault("btree")) {
+ spin_lock_irqsave(&c->btree_write_error_lock, flags);
+ bch2_dev_list_add_dev(&orig->failed, wbio->dev);
+ spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
+ }
+
+ if (wbio->have_ioref)
+ percpu_ref_put(&ca->io_ref);
- if (orig) {
- bio_endio(orig);
+ if (parent) {
+ bio_put(bio);
+ bio_endio(&parent->bio);
} else {
- btree_node_write_done(c, b);
- if (cl)
- closure_put(cl);
+ struct btree_write_bio *wb =
+ container_of(orig, struct btree_write_bio, wbio);
+
+ INIT_WORK(&wb->work, btree_node_write_work);
+ queue_work(system_unbound_wq, &wb->work);
}
+}
- if (wbio->have_io_ref)
- percpu_ref_put(&ca->io_ref);
+static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
+ struct bset *i, unsigned sectors)
+{
+ unsigned whiteout_u64s = 0;
+ int ret;
+
+ if (bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), BKEY_TYPE_BTREE))
+ return -1;
+
+ ret = validate_bset(c, b, i, sectors, &whiteout_u64s, WRITE, false);
+ if (ret)
+ bch2_inconsistent_error(c);
+
+ return ret;
}
void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
- struct closure *parent,
- enum six_lock_type lock_type_held,
- int idx_to_write)
+ enum six_lock_type lock_type_held)
{
- struct bio *bio;
- struct bch_write_bio *wbio;
+ struct btree_write_bio *wbio;
struct bset_tree *t;
struct bset *i;
struct btree_node *bn = NULL;
struct btree_node_entry *bne = NULL;
BKEY_PADDED(key) k;
- struct bkey_s_extent e;
struct bch_extent_ptr *ptr;
struct sort_iter sort_iter;
struct nonce nonce;
u64 seq = 0;
bool used_mempool;
unsigned long old, new;
+ bool validate_before_checksum = false;
void *data;
+ if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
+ return;
+
/*
* We may only have a read lock on the btree node - the dirty bit is our
* "lock" against racing with other threads that may be trying to start
if (!(old & (1 << BTREE_NODE_dirty)))
return;
- if (idx_to_write >= 0 &&
- idx_to_write != !!(old & (1 << BTREE_NODE_write_idx)))
+ if (b->written &&
+ !btree_node_may_write(b))
return;
if (old & (1 << BTREE_NODE_write_in_flight)) {
- wait_on_bit_io(&b->flags,
- BTREE_NODE_write_in_flight,
- TASK_UNINTERRUPTIBLE);
+ btree_node_wait_on_io(b);
continue;
}
new &= ~(1 << BTREE_NODE_dirty);
+ new &= ~(1 << BTREE_NODE_need_write);
new |= (1 << BTREE_NODE_write_in_flight);
new |= (1 << BTREE_NODE_just_written);
new ^= (1 << BTREE_NODE_write_idx);
} while (cmpxchg_acquire(&b->flags, old, new) != old);
+ BUG_ON(btree_node_fake(b));
BUG_ON(!list_empty(&b->write_blocked));
+ BUG_ON((b->will_make_reachable != 0) != !b->written);
- BUG_ON(b->written >= c->sb.btree_node_size);
+ BUG_ON(b->written >= c->opts.btree_node_size);
+ BUG_ON(b->written & (c->opts.block_size - 1));
BUG_ON(bset_written(b, btree_bset_last(b)));
BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
- if (lock_type_held == SIX_LOCK_intent) {
- six_lock_write(&b->lock);
+ /*
+ * We can't block on six_lock_write() here; another thread might be
+ * trying to get a journal reservation with read locks held, and getting
+ * a journal reservation might be blocked on flushing the journal and
+ * doing btree writes:
+ */
+ if (lock_type_held == SIX_LOCK_intent &&
+ six_trylock_write(&b->lock)) {
__bch2_compact_whiteouts(c, b, COMPACT_WRITTEN);
six_unlock_write(&b->lock);
} else {
b->whiteout_u64s = 0;
u64s = btree_node_is_extents(b)
- ? sort_extents(vstruct_last(i), &sort_iter, false)
- : sort_keys(i->start, &sort_iter, false);
+ ? bch2_sort_extents(vstruct_last(i), &sort_iter, false)
+ : bch2_sort_keys(i->start, &sort_iter, false);
le16_add_cpu(&i->u64s, u64s);
clear_needs_whiteout(i);
- if (b->written && !i->u64s) {
- /* Nothing to write: */
- btree_bounce_free(c, order, used_mempool, data);
- btree_node_write_done(c, b);
- return;
- }
+ /* do we have data to write? */
+ if (b->written && !i->u64s)
+ goto nowrite;
+
+ bytes_to_write = vstruct_end(i) - data;
+ sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
+ memset(data + bytes_to_write, 0,
+ (sectors_to_write << 9) - bytes_to_write);
+
+ BUG_ON(b->written + sectors_to_write > c->opts.btree_node_size);
BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
BUG_ON(i->seq != b->data->keys.seq);
- i->version = cpu_to_le16(BCACHE_BSET_VERSION);
+ i->version = c->sb.version < bcachefs_metadata_version_new_versioning
+ ? cpu_to_le16(BCH_BSET_VERSION_OLD)
+ : cpu_to_le16(c->sb.version);
SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
- nonce = btree_nonce(b, i, b->written << 9);
+ if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
+ validate_before_checksum = true;
- if (bn) {
- bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce,
- &bn->flags,
- (void *) &b->data->keys -
- (void *) &b->data->flags);
- nonce = nonce_add(nonce,
- round_up((void *) &b->data->keys -
- (void *) &b->data->flags,
- CHACHA20_BLOCK_SIZE));
- bset_encrypt(c, i, nonce);
+ /* validate_bset will be modifying: */
+ if (le16_to_cpu(i->version) <
+ bcachefs_metadata_version_bkey_renumber)
+ validate_before_checksum = true;
- nonce = btree_nonce(b, i, b->written << 9);
- bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
- } else {
- bset_encrypt(c, i, nonce);
-
- bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
- }
+ /* if we're going to be encrypting, check metadata validity first: */
+ if (validate_before_checksum &&
+ validate_bset_for_write(c, b, i, sectors_to_write))
+ goto err;
- bytes_to_write = vstruct_end(i) - data;
- sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
+ bset_encrypt(c, i, b->written << 9);
- memset(data + bytes_to_write, 0,
- (sectors_to_write << 9) - bytes_to_write);
+ nonce = btree_nonce(i, b->written << 9);
- BUG_ON(b->written + sectors_to_write > c->sb.btree_node_size);
+ if (bn)
+ bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
+ else
+ bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
- trace_btree_write(b, bytes_to_write, sectors_to_write);
+ /* if we're not encrypting, check metadata after checksumming: */
+ if (!validate_before_checksum &&
+ validate_bset_for_write(c, b, i, sectors_to_write))
+ goto err;
/*
* We handle btree write errors by immediately halting the journal -
* break:
*/
if (bch2_journal_error(&c->journal) ||
- c->opts.nochanges) {
- set_btree_node_noevict(b);
- b->written += sectors_to_write;
-
- btree_bounce_free(c, order, used_mempool, data);
- btree_node_write_done(c, b);
- return;
- }
-
- bio = bio_alloc_bioset(GFP_NOIO, 1 << order, &c->bio_write);
-
- wbio = to_wbio(bio);
- wbio->cl = parent;
- wbio->bounce = true;
- wbio->put_bio = true;
- wbio->order = order;
- wbio->used_mempool = used_mempool;
- bio->bi_iter.bi_size = sectors_to_write << 9;
- bio->bi_end_io = btree_node_write_endio;
- bio->bi_private = b;
- bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA);
+ c->opts.nochanges)
+ goto err;
- if (parent)
- closure_get(parent);
+ trace_btree_write(b, bytes_to_write, sectors_to_write);
- bch2_bio_map(bio, data);
+ wbio = container_of(bio_alloc_bioset(GFP_NOIO,
+ buf_pages(data, sectors_to_write << 9),
+ &c->btree_bio),
+ struct btree_write_bio, wbio.bio);
+ wbio_init(&wbio->wbio.bio);
+ wbio->data = data;
+ wbio->wbio.order = order;
+ wbio->wbio.used_mempool = used_mempool;
+ wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META|REQ_FUA;
+ wbio->wbio.bio.bi_iter.bi_size = sectors_to_write << 9;
+ wbio->wbio.bio.bi_end_io = btree_node_write_endio;
+ wbio->wbio.bio.bi_private = b;
+
+ bch2_bio_map(&wbio->wbio.bio, data);
/*
* If we're appending to a leaf node, we don't technically need FUA -
*/
bkey_copy(&k.key, &b->key);
- e = bkey_i_to_s_extent(&k.key);
- extent_for_each_ptr(e, ptr)
+ bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&k.key)), ptr)
ptr->offset += b->written;
- extent_for_each_ptr(e, ptr)
- atomic64_add(sectors_to_write,
- &c->devs[ptr->dev]->btree_sectors_written);
-
b->written += sectors_to_write;
- bch2_submit_wbio_replicas(wbio, c, &k.key);
+ bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_BTREE, &k.key);
+ return;
+err:
+ set_btree_node_noevict(b);
+ b->written += sectors_to_write;
+nowrite:
+ btree_bounce_free(c, order, used_mempool, data);
+ btree_node_write_done(c, b);
}
/*
clear_btree_node_just_written(b);
/*
- * Note: immediately after write, bset_unwritten()/bset_written() don't
- * work - the amount of data we had to write after compaction might have
- * been smaller than the offset of the last bset.
+ * Note: immediately after write, bset_written() doesn't work - the
+ * amount of data we had to write after compaction might have been
+ * smaller than the offset of the last bset.
*
* However, we know that all bsets have been written here, as long as
* we're still holding the write lock:
bne = want_new_bset(c, b);
if (bne)
- bch2_bset_init_next(b, &bne->keys);
+ bch2_bset_init_next(c, b, bne);
bch2_btree_build_aux_trees(b);
* Use this one if the node is intent locked:
*/
void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
- struct closure *parent,
- enum six_lock_type lock_type_held,
- int idx_to_write)
+ enum six_lock_type lock_type_held)
{
BUG_ON(lock_type_held == SIX_LOCK_write);
if (lock_type_held == SIX_LOCK_intent ||
- six_trylock_convert(&b->lock, SIX_LOCK_read,
- SIX_LOCK_intent)) {
- __bch2_btree_node_write(c, b, parent, SIX_LOCK_intent, idx_to_write);
-
- six_lock_write(&b->lock);
- bch2_btree_post_write_cleanup(c, b);
- six_unlock_write(&b->lock);
+ six_lock_tryupgrade(&b->lock)) {
+ __bch2_btree_node_write(c, b, SIX_LOCK_intent);
+
+ /* don't cycle lock unnecessarily: */
+ if (btree_node_just_written(b) &&
+ six_trylock_write(&b->lock)) {
+ bch2_btree_post_write_cleanup(c, b);
+ six_unlock_write(&b->lock);
+ }
if (lock_type_held == SIX_LOCK_read)
six_lock_downgrade(&b->lock);
} else {
- __bch2_btree_node_write(c, b, parent, SIX_LOCK_read, idx_to_write);
+ __bch2_btree_node_write(c, b, SIX_LOCK_read);
}
}
-static void bch2_btree_node_write_dirty(struct bch_fs *c, struct btree *b,
- struct closure *parent)
+static void __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
{
- six_lock_read(&b->lock);
- BUG_ON(b->level);
+ struct bucket_table *tbl;
+ struct rhash_head *pos;
+ struct btree *b;
+ unsigned i;
+restart:
+ rcu_read_lock();
+ for_each_cached_btree(b, c, tbl, i, pos)
+ if (test_bit(flag, &b->flags)) {
+ rcu_read_unlock();
+ wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
+ goto restart;
- bch2_btree_node_write(c, b, parent, SIX_LOCK_read, -1);
- six_unlock_read(&b->lock);
+ }
+ rcu_read_unlock();
}
-/*
- * Write all dirty btree nodes to disk, including roots
- */
-void bch2_btree_flush(struct bch_fs *c)
+void bch2_btree_flush_all_reads(struct bch_fs *c)
+{
+ __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
+}
+
+void bch2_btree_flush_all_writes(struct bch_fs *c)
+{
+ __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
+}
+
+void bch2_btree_verify_flushed(struct bch_fs *c)
{
- struct closure cl;
- struct btree *b;
struct bucket_table *tbl;
struct rhash_head *pos;
- bool dropped_lock;
+ struct btree *b;
unsigned i;
- closure_init_stack(&cl);
-
rcu_read_lock();
+ for_each_cached_btree(b, c, tbl, i, pos) {
+ unsigned long flags = READ_ONCE(b->flags);
- do {
- dropped_lock = false;
- i = 0;
-restart:
- tbl = rht_dereference_rcu(c->btree_cache_table.tbl,
- &c->btree_cache_table);
-
- for (; i < tbl->size; i++)
- rht_for_each_entry_rcu(b, pos, tbl, i, hash)
- /*
- * XXX - locking for b->level, when called from
- * bch2_journal_move()
- */
- if (!b->level && btree_node_dirty(b)) {
- rcu_read_unlock();
- bch2_btree_node_write_dirty(c, b, &cl);
- dropped_lock = true;
- rcu_read_lock();
- goto restart;
- }
- } while (dropped_lock);
-
+ BUG_ON((flags & (1 << BTREE_NODE_dirty)) ||
+ (flags & (1 << BTREE_NODE_write_in_flight)));
+ }
rcu_read_unlock();
-
- closure_sync(&cl);
}
-/**
- * bch_btree_node_flush_journal - flush any journal entries that contain keys
- * from this node
- *
- * The bset's journal sequence number is used for preserving ordering of index
- * updates across unclean shutdowns - it's used to ignore bsets newer than the
- * most recent journal entry.
- *
- * But when rewriting btree nodes we compact all the bsets in a btree node - and
- * if we compacted a bset that should be ignored with bsets we do need, that
- * would be bad. So to avoid that, prior to making the new node visible ensure
- * that the journal has been flushed so that all the bsets we compacted should
- * be visible.
- */
-void bch2_btree_node_flush_journal_entries(struct bch_fs *c,
- struct btree *b,
- struct closure *cl)
+ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *c, char *buf)
{
- int i = b->nsets;
+ struct printbuf out = _PBUF(buf, PAGE_SIZE);
+ struct bucket_table *tbl;
+ struct rhash_head *pos;
+ struct btree *b;
+ unsigned i;
- /*
- * Journal sequence numbers in the different bsets will always be in
- * ascending order, we only need to flush the highest - except that the
- * most recent bset might not have a journal sequence number yet, so we
- * need to loop:
- */
- while (i--) {
- u64 seq = le64_to_cpu(bset(b, &b->set[i])->journal_seq);
+ rcu_read_lock();
+ for_each_cached_btree(b, c, tbl, i, pos) {
+ unsigned long flags = READ_ONCE(b->flags);
+ unsigned idx = (flags & (1 << BTREE_NODE_write_idx)) != 0;
+
+ if (//!(flags & (1 << BTREE_NODE_dirty)) &&
+ !b->writes[0].wait.list.first &&
+ !b->writes[1].wait.list.first &&
+ !(b->will_make_reachable & 1))
+ continue;
- if (seq) {
- bch2_journal_flush_seq_async(&c->journal, seq, cl);
- break;
- }
+ pr_buf(&out, "%p d %u l %u w %u b %u r %u:%lu c %u p %u\n",
+ b,
+ (flags & (1 << BTREE_NODE_dirty)) != 0,
+ b->level,
+ b->written,
+ !list_empty_careful(&b->write_blocked),
+ b->will_make_reachable != 0,
+ b->will_make_reachable & 1,
+ b->writes[ idx].wait.list.first != NULL,
+ b->writes[!idx].wait.list.first != NULL);
}
+ rcu_read_unlock();
+
+ return out.pos - buf;
}