#include <linux/sched/mm.h>
#include <trace/events/bcachefs.h>
+void bch2_btree_node_io_unlock(struct btree *b)
+{
+ EBUG_ON(!btree_node_write_in_flight(b));
+
+ clear_btree_node_write_in_flight_inner(b);
+ clear_btree_node_write_in_flight(b);
+ wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
+}
+
+void bch2_btree_node_io_lock(struct btree *b)
+{
+ BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
+
+ wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
+ TASK_UNINTERRUPTIBLE);
+}
+
+void __bch2_btree_node_wait_on_read(struct btree *b)
+{
+ wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
+ TASK_UNINTERRUPTIBLE);
+}
+
+void __bch2_btree_node_wait_on_write(struct btree *b)
+{
+ wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
+ TASK_UNINTERRUPTIBLE);
+}
+
+void bch2_btree_node_wait_on_read(struct btree *b)
+{
+ BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
+
+ wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
+ TASK_UNINTERRUPTIBLE);
+}
+
+void bch2_btree_node_wait_on_write(struct btree *b)
+{
+ BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
+
+ wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
+ TASK_UNINTERRUPTIBLE);
+}
+
static void verify_no_dups(struct btree *b,
struct bkey_packed *start,
struct bkey_packed *end)
}
static void btree_node_sort(struct bch_fs *c, struct btree *b,
- struct btree_iter *iter,
unsigned start_idx,
unsigned end_idx,
bool filter_whiteouts)
* We're about to add another bset to the btree node, so if there's currently
* too many bsets - sort some of them together:
*/
-static bool btree_node_compact(struct bch_fs *c, struct btree *b,
- struct btree_iter *iter)
+static bool btree_node_compact(struct bch_fs *c, struct btree *b)
{
unsigned unwritten_idx;
bool ret = false;
break;
if (b->nsets - unwritten_idx > 1) {
- btree_node_sort(c, b, iter, unwritten_idx,
+ btree_node_sort(c, b, unwritten_idx,
b->nsets, false);
ret = true;
}
if (unwritten_idx > 1) {
- btree_node_sort(c, b, iter, 0, unwritten_idx, false);
+ btree_node_sort(c, b, 0, unwritten_idx, false);
ret = true;
}
*
* Returns true if we sorted (i.e. invalidated iterators
*/
-void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
- struct btree_iter *iter)
+void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
{
+ struct bch_fs *c = trans->c;
struct btree_node_entry *bne;
- bool did_sort;
+ bool reinit_iter = false;
EBUG_ON(!(b->c.lock.state.seq & 1));
- EBUG_ON(iter && iter->l[b->c.level].b != b);
+ BUG_ON(bset_written(b, bset(b, &b->set[1])));
+
+ if (b->nsets == MAX_BSETS &&
+ !btree_node_write_in_flight(b)) {
+ unsigned log_u64s[] = {
+ ilog2(bset_u64s(&b->set[0])),
+ ilog2(bset_u64s(&b->set[1])),
+ ilog2(bset_u64s(&b->set[2])),
+ };
+
+ if (log_u64s[1] >= (log_u64s[0] + log_u64s[2]) / 2) {
+ bch2_btree_node_write(c, b, SIX_LOCK_write);
+ reinit_iter = true;
+ }
+ }
- did_sort = btree_node_compact(c, b, iter);
+ if (b->nsets == MAX_BSETS &&
+ btree_node_compact(c, b))
+ reinit_iter = true;
+
+ BUG_ON(b->nsets >= MAX_BSETS);
bne = want_new_bset(c, b);
if (bne)
bch2_btree_build_aux_trees(b);
- if (iter && did_sort)
- bch2_btree_iter_reinit_node(iter, b);
+ if (reinit_iter)
+ bch2_trans_node_reinit_iter(trans, b);
}
static void btree_pos_to_text(struct printbuf *out, struct bch_fs *c,
\
switch (write) { \
case READ: \
- bch_err(c, "%s", _buf2); \
+ bch_err(c, "%s", _buf2); \
\
switch (type) { \
case BTREE_ERR_FIXABLE: \
#define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
+/*
+ * When btree topology repair changes the start or end of a node, that might
+ * mean we have to drop keys that are no longer inside the node:
+ */
+void bch2_btree_node_drop_keys_outside_node(struct btree *b)
+{
+ struct bset_tree *t;
+ struct bkey_s_c k;
+ struct bkey unpacked;
+ struct btree_node_iter iter;
+
+ for_each_bset(b, t) {
+ struct bset *i = bset(b, t);
+ struct bkey_packed *k;
+
+ for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
+ if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
+ break;
+
+ if (k != i->start) {
+ unsigned shift = (u64 *) k - (u64 *) i->start;
+
+ memmove_u64s_down(i->start, k,
+ (u64 *) vstruct_end(i) - (u64 *) k);
+ i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
+ set_btree_bset_end(b, t);
+ bch2_bset_set_no_aux_tree(b, t);
+ }
+
+ for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
+ if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
+ break;
+
+ if (k != vstruct_last(i)) {
+ i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
+ set_btree_bset_end(b, t);
+ bch2_bset_set_no_aux_tree(b, t);
+ }
+ }
+
+ bch2_btree_build_aux_trees(b);
+
+ for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
+ BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
+ BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
+ }
+}
+
static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
struct btree *b, struct bset *i,
- unsigned sectors, int write, bool have_retry)
+ unsigned offset, unsigned sectors,
+ int write, bool have_retry)
{
unsigned version = le16_to_cpu(i->version);
const char *err;
BTREE_ERR_FATAL, c, ca, b, i,
"BSET_SEPARATE_WHITEOUTS no longer supported");
- if (btree_err_on(b->written + sectors > c->opts.btree_node_size,
+ if (btree_err_on(offset + sectors > c->opts.btree_node_size,
BTREE_ERR_FIXABLE, c, ca, b, i,
"bset past end of btree node")) {
i->u64s = 0;
return 0;
}
- btree_err_on(b->written && !i->u64s,
+ btree_err_on(offset && !i->u64s,
BTREE_ERR_FIXABLE, c, ca, b, i,
"empty bset");
- if (!b->written) {
+ btree_err_on(BSET_OFFSET(i) &&
+ BSET_OFFSET(i) != offset,
+ BTREE_ERR_WANT_RETRY, c, ca, b, i,
+ "bset at wrong sector offset");
+
+ if (!offset) {
struct btree_node *bn =
container_of(i, struct btree_node, keys);
/* These indicate that we read the wrong btree node: */
{
unsigned version = le16_to_cpu(i->version);
struct bkey_packed *k, *prev = NULL;
+ bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
+ BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
int ret = 0;
for (k = i->start;
u = __bkey_disassemble(b, k, &tmp);
invalid = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b)) ?:
- bch2_bkey_in_btree_node(b, u.s_c) ?:
+ (!updated_range ? bch2_bkey_in_btree_node(b, u.s_c) : NULL) ?:
(write ? bch2_bkey_val_invalid(c, u.s_c) : NULL);
if (invalid) {
char buf[160];
struct bch_extent_ptr *ptr;
struct bset *i;
bool used_mempool, blacklisted;
+ bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
+ BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
unsigned u64s;
+ unsigned blacklisted_written, nonblacklisted_written = 0;
+ unsigned ptr_written = btree_ptr_sectors_written(&b->key);
int ret, retry_read = 0, write = READ;
b->version_ondisk = U16_MAX;
b->data->keys.seq, bp->seq);
}
- while (b->written < c->opts.btree_node_size) {
+ while (b->written < (ptr_written ?: c->opts.btree_node_size)) {
unsigned sectors, whiteout_u64s = 0;
struct nonce nonce;
struct bch_csum csum;
b->version_ondisk = min(b->version_ondisk,
le16_to_cpu(i->version));
- ret = validate_bset(c, ca, b, i, sectors,
+ ret = validate_bset(c, ca, b, i, b->written, sectors,
READ, have_retry);
if (ret)
goto fsck_err;
btree_err_on(blacklisted && first,
BTREE_ERR_FIXABLE, c, ca, b, i,
"first btree node bset has blacklisted journal seq");
+
+ btree_err_on(blacklisted && ptr_written,
+ BTREE_ERR_FIXABLE, c, ca, b, i,
+ "found blacklisted bset in btree node with sectors_written");
if (blacklisted && !first)
continue;
sort_iter_add(iter,
vstruct_idx(i, whiteout_u64s),
vstruct_last(i));
+
+ nonblacklisted_written = b->written;
}
- for (bne = write_block(b);
- bset_byte_offset(b, bne) < btree_bytes(c);
- bne = (void *) bne + block_bytes(c))
- btree_err_on(bne->keys.seq == b->data->keys.seq,
+ if (ptr_written) {
+ btree_err_on(b->written < ptr_written,
BTREE_ERR_WANT_RETRY, c, ca, b, NULL,
- "found bset signature after last bset");
+ "btree node data missing: expected %u sectors, found %u",
+ ptr_written, b->written);
+ } else {
+ for (bne = write_block(b);
+ bset_byte_offset(b, bne) < btree_bytes(c);
+ bne = (void *) bne + block_bytes(c))
+ btree_err_on(bne->keys.seq == b->data->keys.seq &&
+ !bch2_journal_seq_is_blacklisted(c,
+ le64_to_cpu(bne->keys.journal_seq),
+ true),
+ BTREE_ERR_WANT_RETRY, c, ca, b, NULL,
+ "found bset signature after last bset");
+
+ /*
+ * Blacklisted bsets are those that were written after the most recent
+ * (flush) journal write. Since there wasn't a flush, they may not have
+ * made it to all devices - which means we shouldn't write new bsets
+ * after them, as that could leave a gap and then reads from that device
+ * wouldn't find all the bsets in that btree node - which means it's
+ * important that we start writing new bsets after the most recent _non_
+ * blacklisted bset:
+ */
+ blacklisted_written = b->written;
+ b->written = nonblacklisted_written;
+ }
sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool);
sorted->keys.u64s = 0;
btree_bounce_free(c, btree_bytes(c), used_mempool, sorted);
+ if (updated_range)
+ bch2_btree_node_drop_keys_outside_node(b);
+
i = &b->data->keys;
for (k = i->start; k != vstruct_last(i);) {
struct bkey tmp;
if (ca->mi.state != BCH_MEMBER_STATE_rw)
set_btree_node_need_rewrite(b);
}
+
+ if (!ptr_written)
+ set_btree_node_need_rewrite(b);
out:
mempool_free(iter, &c->fill_iter);
return retry_read;
struct btree_read_bio *rb =
container_of(work, struct btree_read_bio, work);
struct bch_fs *c = rb->c;
+ struct btree *b = rb->b;
struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
- struct btree *b = rb->bio.bi_private;
struct bio *bio = &rb->bio;
struct bch_io_failures failed = { .nr = 0 };
char buf[200];
struct printbuf out;
+ bool saw_error = false;
bool can_retry;
goto start;
!bch2_btree_node_read_done(c, ca, b, can_retry))
break;
+ saw_error = true;
+
if (!can_retry) {
set_btree_node_read_error(b);
break;
bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
rb->start_time);
bio_put(&rb->bio);
+
+ if (saw_error && !btree_node_read_error(b))
+ bch2_btree_node_rewrite_async(c, b);
+
clear_btree_node_read_in_flight(b);
wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
}
bch2_latency_acct(ca, rb->start_time, READ);
}
- queue_work(system_unbound_wq, &rb->work);
+ queue_work(c->io_complete_wq, &rb->work);
+}
+
+struct btree_node_read_all {
+ struct closure cl;
+ struct bch_fs *c;
+ struct btree *b;
+ unsigned nr;
+ void *buf[BCH_REPLICAS_MAX];
+ struct bio *bio[BCH_REPLICAS_MAX];
+ int err[BCH_REPLICAS_MAX];
+};
+
+static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
+{
+ struct btree_node *bn = data;
+ struct btree_node_entry *bne;
+ unsigned offset = 0;
+
+ if (le64_to_cpu(bn->magic) != bset_magic(c))
+ return 0;
+
+ while (offset < c->opts.btree_node_size) {
+ if (!offset) {
+ offset += vstruct_sectors(bn, c->block_bits);
+ } else {
+ bne = data + (offset << 9);
+ if (bne->keys.seq != bn->keys.seq)
+ break;
+ offset += vstruct_sectors(bne, c->block_bits);
+ }
+ }
+
+ return offset;
+}
+
+static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
+{
+ struct btree_node *bn = data;
+ struct btree_node_entry *bne;
+
+ if (!offset)
+ return false;
+
+ while (offset < c->opts.btree_node_size) {
+ bne = data + (offset << 9);
+ if (bne->keys.seq == bn->keys.seq)
+ return true;
+ offset++;
+ }
+
+ return false;
+ return offset;
+}
+
+static void btree_node_read_all_replicas_done(struct closure *cl)
+{
+ struct btree_node_read_all *ra =
+ container_of(cl, struct btree_node_read_all, cl);
+ struct bch_fs *c = ra->c;
+ struct btree *b = ra->b;
+ bool dump_bset_maps = false;
+ bool have_retry = false;
+ int ret = 0, best = -1, write = READ;
+ unsigned i, written = 0, written2 = 0;
+ __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
+ ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
+
+ for (i = 0; i < ra->nr; i++) {
+ struct btree_node *bn = ra->buf[i];
+
+ if (ra->err[i])
+ continue;
+
+ if (le64_to_cpu(bn->magic) != bset_magic(c) ||
+ (seq && seq != bn->keys.seq))
+ continue;
+
+ if (best < 0) {
+ best = i;
+ written = btree_node_sectors_written(c, bn);
+ continue;
+ }
+
+ written2 = btree_node_sectors_written(c, ra->buf[i]);
+ if (btree_err_on(written2 != written, BTREE_ERR_FIXABLE, c, NULL, b, NULL,
+ "btree node sectors written mismatch: %u != %u",
+ written, written2) ||
+ btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
+ BTREE_ERR_FIXABLE, c, NULL, b, NULL,
+ "found bset signature after last bset") ||
+ btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
+ BTREE_ERR_FIXABLE, c, NULL, b, NULL,
+ "btree node replicas content mismatch"))
+ dump_bset_maps = true;
+
+ if (written2 > written) {
+ written = written2;
+ best = i;
+ }
+ }
+fsck_err:
+ if (dump_bset_maps) {
+ for (i = 0; i < ra->nr; i++) {
+ char buf[200];
+ struct printbuf out = PBUF(buf);
+ struct btree_node *bn = ra->buf[i];
+ struct btree_node_entry *bne = NULL;
+ unsigned offset = 0, sectors;
+ bool gap = false;
+
+ if (ra->err[i])
+ continue;
+
+ while (offset < c->opts.btree_node_size) {
+ if (!offset) {
+ sectors = vstruct_sectors(bn, c->block_bits);
+ } else {
+ bne = ra->buf[i] + (offset << 9);
+ if (bne->keys.seq != bn->keys.seq)
+ break;
+ sectors = vstruct_sectors(bne, c->block_bits);
+ }
+
+ pr_buf(&out, " %u-%u", offset, offset + sectors);
+ if (bne && bch2_journal_seq_is_blacklisted(c,
+ le64_to_cpu(bne->keys.journal_seq), false))
+ pr_buf(&out, "*");
+ offset += sectors;
+ }
+
+ while (offset < c->opts.btree_node_size) {
+ bne = ra->buf[i] + (offset << 9);
+ if (bne->keys.seq == bn->keys.seq) {
+ if (!gap)
+ pr_buf(&out, " GAP");
+ gap = true;
+
+ sectors = vstruct_sectors(bne, c->block_bits);
+ pr_buf(&out, " %u-%u", offset, offset + sectors);
+ if (bch2_journal_seq_is_blacklisted(c,
+ le64_to_cpu(bne->keys.journal_seq), false))
+ pr_buf(&out, "*");
+ }
+ offset++;
+ }
+
+ bch_err(c, "replica %u:%s", i, buf);
+ }
+ }
+
+ if (best >= 0) {
+ memcpy(b->data, ra->buf[best], btree_bytes(c));
+ ret = bch2_btree_node_read_done(c, NULL, b, false);
+ } else {
+ ret = -1;
+ }
+
+ if (ret)
+ set_btree_node_read_error(b);
+
+ for (i = 0; i < ra->nr; i++) {
+ mempool_free(ra->buf[i], &c->btree_bounce_pool);
+ bio_put(ra->bio[i]);
+ }
+
+ closure_debug_destroy(&ra->cl);
+ kfree(ra);
+
+ clear_btree_node_read_in_flight(b);
+ wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
+}
+
+static void btree_node_read_all_replicas_endio(struct bio *bio)
+{
+ struct btree_read_bio *rb =
+ container_of(bio, struct btree_read_bio, bio);
+ struct bch_fs *c = rb->c;
+ struct btree_node_read_all *ra = rb->ra;
+
+ if (rb->have_ioref) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+ bch2_latency_acct(ca, rb->start_time, READ);
+ }
+
+ ra->err[rb->idx] = bio->bi_status;
+ closure_put(&ra->cl);
+}
+
+/*
+ * XXX This allocates multiple times from the same mempools, and can deadlock
+ * under sufficient memory pressure (but is only a debug path)
+ */
+static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
+{
+ struct bkey_s_c k = bkey_i_to_s_c(&b->key);
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded pick;
+ struct btree_node_read_all *ra;
+ unsigned i;
+
+ ra = kzalloc(sizeof(*ra), GFP_NOFS);
+ if (!ra)
+ return -ENOMEM;
+
+ closure_init(&ra->cl, NULL);
+ ra->c = c;
+ ra->b = b;
+ ra->nr = bch2_bkey_nr_ptrs(k);
+
+ for (i = 0; i < ra->nr; i++) {
+ ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
+ ra->bio[i] = bio_alloc_bioset(GFP_NOFS, buf_pages(ra->buf[i],
+ btree_bytes(c)),
+ &c->btree_bio);
+ }
+
+ i = 0;
+ bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
+ struct btree_read_bio *rb =
+ container_of(ra->bio[i], struct btree_read_bio, bio);
+ rb->c = c;
+ rb->b = b;
+ rb->ra = ra;
+ rb->start_time = local_clock();
+ rb->have_ioref = bch2_dev_get_ioref(ca, READ);
+ rb->idx = i;
+ rb->pick = pick;
+ rb->bio.bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
+ rb->bio.bi_iter.bi_sector = pick.ptr.offset;
+ rb->bio.bi_end_io = btree_node_read_all_replicas_endio;
+ bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c));
+
+ if (rb->have_ioref) {
+ this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
+ bio_sectors(&rb->bio));
+ bio_set_dev(&rb->bio, ca->disk_sb.bdev);
+
+ closure_get(&ra->cl);
+ submit_bio(&rb->bio);
+ } else {
+ ra->err[i] = BLK_STS_REMOVED;
+ }
+
+ i++;
+ }
+
+ if (sync) {
+ closure_sync(&ra->cl);
+ btree_node_read_all_replicas_done(&ra->cl);
+ } else {
+ continue_at(&ra->cl, btree_node_read_all_replicas_done,
+ c->io_complete_wq);
+ }
+
+ return 0;
}
void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
struct btree_read_bio *rb;
struct bch_dev *ca;
struct bio *bio;
+ char buf[200];
int ret;
+ btree_pos_to_text(&PBUF(buf), c, b);
trace_btree_read(c, b);
+ if (bch2_verify_all_btree_replicas &&
+ !btree_node_read_all_replicas(c, b, sync))
+ return;
+
ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
NULL, &pick);
if (bch2_fs_fatal_err_on(ret <= 0, c,
- "btree node read error: no device to read from")) {
+ "btree node read error: no device to read from\n"
+ " at %s", buf)) {
set_btree_node_read_error(b);
return;
}
&c->btree_bio);
rb = container_of(bio, struct btree_read_bio, bio);
rb->c = c;
+ rb->b = b;
+ rb->ra = NULL;
rb->start_time = local_clock();
rb->have_ioref = bch2_dev_get_ioref(ca, READ);
rb->pick = pick;
bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
bio->bi_iter.bi_sector = pick.ptr.offset;
bio->bi_end_io = btree_node_read_endio;
- bio->bi_private = b;
bch2_bio_map(bio, b->data, btree_bytes(c));
- set_btree_node_read_in_flight(b);
-
if (rb->have_ioref) {
this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
bio_sectors(bio));
if (sync) {
submit_bio_wait(bio);
- bio->bi_private = b;
btree_node_read_work(&rb->work);
} else {
submit_bio(bio);
if (sync)
btree_node_read_work(&rb->work);
else
- queue_work(system_unbound_wq, &rb->work);
-
+ queue_work(c->io_complete_wq, &rb->work);
}
}
bkey_copy(&b->key, k);
BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
+ set_btree_node_read_in_flight(b);
+
bch2_btree_node_read(c, b, true);
if (btree_node_read_error(b)) {
static void btree_node_write_done(struct bch_fs *c, struct btree *b)
{
struct btree_write *w = btree_prev_write(b);
+ unsigned long old, new, v;
bch2_btree_complete_write(c, b, w);
- btree_node_io_unlock(b);
-}
-
-static void bch2_btree_node_write_error(struct bch_fs *c,
- struct btree_write_bio *wbio)
-{
- struct btree *b = wbio->wbio.bio.bi_private;
- struct bkey_buf k;
- struct bch_extent_ptr *ptr;
- struct btree_trans trans;
- struct btree_iter *iter;
- int ret;
-
- bch2_bkey_buf_init(&k);
- bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_trans_get_node_iter(&trans, b->c.btree_id, b->key.k.p,
- BTREE_MAX_DEPTH, b->c.level, 0);
-retry:
- ret = bch2_btree_iter_traverse(iter);
- if (ret)
- goto err;
-
- /* has node been freed? */
- if (iter->l[b->c.level].b != b) {
- /* node has been freed: */
- BUG_ON(!btree_node_dying(b));
- goto out;
- }
-
- BUG_ON(!btree_node_hashed(b));
+ v = READ_ONCE(b->flags);
+ do {
+ old = new = v;
- bch2_bkey_buf_copy(&k, c, &b->key);
+ if (old & (1U << BTREE_NODE_need_write))
+ goto do_write;
- bch2_bkey_drop_ptrs(bkey_i_to_s(k.k), ptr,
- bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
+ new &= ~(1U << BTREE_NODE_write_in_flight);
+ new &= ~(1U << BTREE_NODE_write_in_flight_inner);
+ } while ((v = cmpxchg(&b->flags, old, new)) != old);
- if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(k.k)))
- goto err;
-
- ret = bch2_btree_node_update_key(c, iter, b, k.k);
- if (ret == -EINTR)
- goto retry;
- if (ret)
- goto err;
-out:
- bch2_trans_iter_put(&trans, iter);
- bch2_trans_exit(&trans);
- bch2_bkey_buf_exit(&k, c);
- bio_put(&wbio->wbio.bio);
- btree_node_write_done(c, b);
+ wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
return;
-err:
- set_btree_node_noevict(b);
- bch2_fs_fatal_error(c, "fatal error writing btree node");
- goto out;
-}
-void bch2_btree_write_error_work(struct work_struct *work)
-{
- struct bch_fs *c = container_of(work, struct bch_fs,
- btree_write_error_work);
- struct bio *bio;
+do_write:
+ six_lock_read(&b->c.lock, NULL, NULL);
+ v = READ_ONCE(b->flags);
+ do {
+ old = new = v;
- while (1) {
- spin_lock_irq(&c->btree_write_error_lock);
- bio = bio_list_pop(&c->btree_write_error_list);
- spin_unlock_irq(&c->btree_write_error_lock);
+ if ((old & (1U << BTREE_NODE_dirty)) &&
+ (old & (1U << BTREE_NODE_need_write)) &&
+ !(old & (1U << BTREE_NODE_never_write)) &&
+ btree_node_may_write(b)) {
+ new &= ~(1U << BTREE_NODE_dirty);
+ new &= ~(1U << BTREE_NODE_need_write);
+ new |= (1U << BTREE_NODE_write_in_flight);
+ new |= (1U << BTREE_NODE_write_in_flight_inner);
+ new |= (1U << BTREE_NODE_just_written);
+ new ^= (1U << BTREE_NODE_write_idx);
+ } else {
+ new &= ~(1U << BTREE_NODE_write_in_flight);
+ new &= ~(1U << BTREE_NODE_write_in_flight_inner);
+ }
+ } while ((v = cmpxchg(&b->flags, old, new)) != old);
- if (!bio)
- break;
+ if (new & (1U << BTREE_NODE_write_in_flight))
+ __bch2_btree_node_write(c, b, true);
- bch2_btree_node_write_error(c,
- container_of(bio, struct btree_write_bio, wbio.bio));
- }
+ six_unlock_read(&b->c.lock);
}
static void btree_node_write_work(struct work_struct *work)
container_of(work, struct btree_write_bio, work);
struct bch_fs *c = wbio->wbio.c;
struct btree *b = wbio->wbio.bio.bi_private;
+ struct bch_extent_ptr *ptr;
+ int ret;
btree_bounce_free(c,
- wbio->bytes,
+ wbio->data_bytes,
wbio->wbio.used_mempool,
wbio->data);
- if (wbio->wbio.failed.nr) {
- unsigned long flags;
+ bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
+ bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
- spin_lock_irqsave(&c->btree_write_error_lock, flags);
- bio_list_add(&c->btree_write_error_list, &wbio->wbio.bio);
- spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
+ if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key)))
+ goto err;
- queue_work(c->wq, &c->btree_write_error_work);
- return;
- }
+ if (wbio->wbio.first_btree_write) {
+ if (wbio->wbio.failed.nr) {
+ }
+ } else {
+ ret = bch2_trans_do(c, NULL, NULL, 0,
+ bch2_btree_node_update_key_get_iter(&trans, b, &wbio->key,
+ !wbio->wbio.failed.nr));
+ if (ret)
+ goto err;
+ }
+out:
bio_put(&wbio->wbio.bio);
btree_node_write_done(c, b);
+ return;
+err:
+ set_btree_node_noevict(b);
+ bch2_fs_fatal_error(c, "fatal error writing btree node");
+ goto out;
}
static void btree_node_write_endio(struct bio *bio)
struct bch_write_bio *wbio = to_wbio(bio);
struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
struct bch_write_bio *orig = parent ?: wbio;
+ struct btree_write_bio *wb = container_of(orig, struct btree_write_bio, wbio);
struct bch_fs *c = wbio->c;
+ struct btree *b = wbio->bio.bi_private;
struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
unsigned long flags;
if (parent) {
bio_put(bio);
bio_endio(&parent->bio);
- } else {
- struct btree_write_bio *wb =
- container_of(orig, struct btree_write_bio, wbio);
-
- INIT_WORK(&wb->work, btree_node_write_work);
- queue_work(system_unbound_wq, &wb->work);
+ return;
}
+
+ clear_btree_node_write_in_flight_inner(b);
+ wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
+ INIT_WORK(&wb->work, btree_node_write_work);
+ queue_work(c->btree_io_complete_wq, &wb->work);
}
static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
return -1;
ret = validate_bset_keys(c, b, i, &whiteout_u64s, WRITE, false) ?:
- validate_bset(c, NULL, b, i, sectors, WRITE, false);
+ validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false);
if (ret) {
bch2_inconsistent_error(c);
dump_stack();
return ret;
}
-void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
- enum six_lock_type lock_type_held)
+static void btree_write_submit(struct work_struct *work)
+{
+ struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
+ struct bch_extent_ptr *ptr;
+ __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
+
+ bkey_copy(&tmp.k, &wbio->key);
+
+ bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
+ ptr->offset += wbio->sector_offset;
+
+ bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &tmp.k);
+}
+
+void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, bool already_started)
{
struct btree_write_bio *wbio;
struct bset_tree *t;
struct bset *i;
struct btree_node *bn = NULL;
struct btree_node_entry *bne = NULL;
- struct bkey_buf k;
- struct bch_extent_ptr *ptr;
struct sort_iter sort_iter;
struct nonce nonce;
unsigned bytes_to_write, sectors_to_write, bytes, u64s;
bool validate_before_checksum = false;
void *data;
- bch2_bkey_buf_init(&k);
+ if (already_started)
+ goto do_write;
if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
return;
if (old & (1 << BTREE_NODE_never_write))
return;
- if (old & (1 << BTREE_NODE_write_in_flight)) {
- btree_node_wait_on_io(b);
- continue;
- }
+ BUG_ON(old & (1 << BTREE_NODE_write_in_flight));
new &= ~(1 << BTREE_NODE_dirty);
new &= ~(1 << BTREE_NODE_need_write);
new |= (1 << BTREE_NODE_write_in_flight);
+ new |= (1 << BTREE_NODE_write_in_flight_inner);
new |= (1 << BTREE_NODE_just_written);
new ^= (1 << BTREE_NODE_write_idx);
} while (cmpxchg_acquire(&b->flags, old, new) != old);
+ if (new & (1U << BTREE_NODE_need_write))
+ return;
+do_write:
atomic_dec(&c->btree_cache.dirty);
BUG_ON(btree_node_fake(b));
/* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
bytes += 8;
+ /* buffer must be a multiple of the block size */
+ bytes = round_up(bytes, block_bytes(c));
+
data = btree_bounce_alloc(c, bytes, &used_mempool);
if (!b->written) {
i->version = c->sb.version < bcachefs_metadata_version_new_versioning
? cpu_to_le16(BCH_BSET_VERSION_OLD)
: cpu_to_le16(c->sb.version);
+ SET_BSET_OFFSET(i, b->written);
SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
struct btree_write_bio, wbio.bio);
wbio_init(&wbio->wbio.bio);
wbio->data = data;
- wbio->bytes = bytes;
+ wbio->data_bytes = bytes;
+ wbio->sector_offset = b->written;
+ wbio->wbio.c = c;
wbio->wbio.used_mempool = used_mempool;
+ wbio->wbio.first_btree_write = !b->written;
wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META;
wbio->wbio.bio.bi_end_io = btree_node_write_endio;
wbio->wbio.bio.bi_private = b;
bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
- /*
- * If we're appending to a leaf node, we don't technically need FUA -
- * this write just needs to be persisted before the next journal write,
- * which will be marked FLUSH|FUA.
- *
- * Similarly if we're writing a new btree root - the pointer is going to
- * be in the next journal entry.
- *
- * But if we're writing a new btree node (that isn't a root) or
- * appending to a non leaf btree node, we need either FUA or a flush
- * when we write the parent with the new pointer. FUA is cheaper than a
- * flush, and writes appending to leaf nodes aren't blocking anything so
- * just make all btree node writes FUA to keep things sane.
- */
+ bkey_copy(&wbio->key, &b->key);
- bch2_bkey_buf_copy(&k, c, &b->key);
+ b->written += sectors_to_write;
- bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(k.k)), ptr)
- ptr->offset += b->written;
+ if (wbio->wbio.first_btree_write &&
+ b->key.k.type == KEY_TYPE_btree_ptr_v2)
+ bkey_i_to_btree_ptr_v2(&b->key)->v.sectors_written =
+ cpu_to_le16(b->written);
- b->written += sectors_to_write;
+ if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
+ bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
+ cpu_to_le16(b->written);
atomic64_inc(&c->btree_writes_nr);
atomic64_add(sectors_to_write, &c->btree_writes_sectors);
- /* XXX: submitting IO with btree locks held: */
- bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_btree, k.k);
- bch2_bkey_buf_exit(&k, c);
+ INIT_WORK(&wbio->work, btree_write_submit);
+ queue_work(c->io_complete_wq, &wbio->work);
return;
err:
set_btree_node_noevict(b);
+ if (!b->written &&
+ b->key.k.type == KEY_TYPE_btree_ptr_v2)
+ bkey_i_to_btree_ptr_v2(&b->key)->v.sectors_written =
+ cpu_to_le16(sectors_to_write);
b->written += sectors_to_write;
nowrite:
btree_bounce_free(c, bytes, used_mempool, data);
* single bset:
*/
if (b->nsets > 1) {
- btree_node_sort(c, b, NULL, 0, b->nsets, true);
+ btree_node_sort(c, b, 0, b->nsets, true);
invalidated_iter = true;
} else {
invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
* Use this one if the node is intent locked:
*/
void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
- enum six_lock_type lock_type_held)
+ enum six_lock_type lock_type_held)
{
- BUG_ON(lock_type_held == SIX_LOCK_write);
-
if (lock_type_held == SIX_LOCK_intent ||
- six_lock_tryupgrade(&b->c.lock)) {
- __bch2_btree_node_write(c, b, SIX_LOCK_intent);
+ (lock_type_held == SIX_LOCK_read &&
+ six_lock_tryupgrade(&b->c.lock))) {
+ __bch2_btree_node_write(c, b, false);
/* don't cycle lock unnecessarily: */
if (btree_node_just_written(b) &&
if (lock_type_held == SIX_LOCK_read)
six_lock_downgrade(&b->c.lock);
} else {
- __bch2_btree_node_write(c, b, SIX_LOCK_read);
+ __bch2_btree_node_write(c, b, false);
+ if (lock_type_held == SIX_LOCK_write &&
+ btree_node_just_written(b))
+ bch2_btree_post_write_cleanup(c, b);
}
}