X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_gc.c;h=9f27cb3ea5633c355dc07a936408908389ac10d7;hb=1f79cf3825e94fcb146d417b6dda9b94c93c7a53;hp=3dd1094d10c9cdf61ef233ac9c94616141ce4286;hpb=700d013b5280b72a1fb3830d8f70ecce5decb0ab;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_gc.c b/libbcachefs/btree_gc.c index 3dd1094..9f27cb3 100644 --- a/libbcachefs/btree_gc.c +++ b/libbcachefs/btree_gc.c @@ -9,6 +9,8 @@ #include "alloc_foreground.h" #include "bkey_methods.h" #include "bkey_buf.h" +#include "btree_journal_iter.h" +#include "btree_key_cache.h" #include "btree_locking.h" #include "btree_update_interior.h" #include "btree_io.h" @@ -26,6 +28,7 @@ #include "reflink.h" #include "replicas.h" #include "super-io.h" +#include "trace.h" #include #include @@ -34,11 +37,16 @@ #include #include #include -#include #define DROP_THIS_NODE 10 #define DROP_PREV_NODE 11 +static bool should_restart_for_topology_repair(struct bch_fs *c) +{ + return c->opts.fix_errors != FSCK_FIX_no && + !(c->recovery_passes_complete & BIT_ULL(BCH_RECOVERY_PASS_check_topology)); +} + static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos) { preempt_disable(); @@ -69,65 +77,72 @@ static int bch2_gc_check_topology(struct bch_fs *c, struct bpos expected_start = bkey_deleted(&prev->k->k) ? node_start : bpos_successor(prev->k->k.p); - char buf1[200], buf2[200]; + struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF; int ret = 0; if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) { struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k); - if (bkey_deleted(&prev->k->k)) { - struct printbuf out = PBUF(buf1); - pr_buf(&out, "start of node: "); - bch2_bpos_to_text(&out, node_start); - } else { - bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(prev->k)); - } - - if (bpos_cmp(expected_start, bp->v.min_key)) { + if (!bpos_eq(expected_start, bp->v.min_key)) { bch2_topology_error(c); + if (bkey_deleted(&prev->k->k)) { + prt_printf(&buf1, "start of node: "); + bch2_bpos_to_text(&buf1, node_start); + } else { + bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(prev->k)); + } + bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(cur.k)); + if (__fsck_err(c, - FSCK_CAN_FIX| - FSCK_CAN_IGNORE| - FSCK_NO_RATELIMIT, - "btree node with incorrect min_key at btree %s level %u:\n" - " prev %s\n" - " cur %s", - bch2_btree_ids[b->c.btree_id], b->c.level, - buf1, - (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(cur.k)), buf2)) && - !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) { + FSCK_CAN_FIX| + FSCK_CAN_IGNORE| + FSCK_NO_RATELIMIT, + btree_node_topology_bad_min_key, + "btree node with incorrect min_key at btree %s level %u:\n" + " prev %s\n" + " cur %s", + bch2_btree_id_str(b->c.btree_id), b->c.level, + buf1.buf, buf2.buf) && should_restart_for_topology_repair(c)) { bch_info(c, "Halting mark and sweep to start topology repair pass"); - return FSCK_ERR_START_TOPOLOGY_REPAIR; + ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology); + goto err; } else { - set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags); + set_bit(BCH_FS_initial_gc_unfixed, &c->flags); } } } - if (is_last && bpos_cmp(cur.k->k.p, node_end)) { + if (is_last && !bpos_eq(cur.k->k.p, node_end)) { bch2_topology_error(c); - if (__fsck_err(c, - FSCK_CAN_FIX| - FSCK_CAN_IGNORE| - FSCK_NO_RATELIMIT, + printbuf_reset(&buf1); + printbuf_reset(&buf2); + + bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(cur.k)); + bch2_bpos_to_text(&buf2, node_end); + + if (__fsck_err(c, FSCK_CAN_FIX|FSCK_CAN_IGNORE|FSCK_NO_RATELIMIT, + btree_node_topology_bad_max_key, "btree node with incorrect max_key at btree %s level %u:\n" " %s\n" " expected %s", - bch2_btree_ids[b->c.btree_id], b->c.level, - (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(cur.k)), buf1), - (bch2_bpos_to_text(&PBUF(buf2), node_end), buf2)) && - !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) { + bch2_btree_id_str(b->c.btree_id), b->c.level, + buf1.buf, buf2.buf) && + should_restart_for_topology_repair(c)) { bch_info(c, "Halting mark and sweep to start topology repair pass"); - return FSCK_ERR_START_TOPOLOGY_REPAIR; + ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology); + goto err; } else { - set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags); + set_bit(BCH_FS_initial_gc_unfixed, &c->flags); } } bch2_bkey_buf_copy(prev, c, cur.k); +err: fsck_err: + printbuf_exit(&buf2); + printbuf_exit(&buf1); return ret; } @@ -155,28 +170,57 @@ static void btree_ptr_to_v2(struct btree *b, struct bkey_i_btree_ptr_v2 *dst) } } +static void bch2_btree_node_update_key_early(struct btree_trans *trans, + enum btree_id btree, unsigned level, + struct bkey_s_c old, struct bkey_i *new) +{ + struct bch_fs *c = trans->c; + struct btree *b; + struct bkey_buf tmp; + int ret; + + bch2_bkey_buf_init(&tmp); + bch2_bkey_buf_reassemble(&tmp, c, old); + + b = bch2_btree_node_get_noiter(trans, tmp.k, btree, level, true); + if (!IS_ERR_OR_NULL(b)) { + mutex_lock(&c->btree_cache.lock); + + bch2_btree_node_hash_remove(&c->btree_cache, b); + + bkey_copy(&b->key, new); + ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); + BUG_ON(ret); + + mutex_unlock(&c->btree_cache.lock); + six_unlock_read(&b->c.lock); + } + + bch2_bkey_buf_exit(&tmp, c); +} + static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min) { struct bkey_i_btree_ptr_v2 *new; int ret; - new = kmalloc(BKEY_BTREE_PTR_U64s_MAX * sizeof(u64), GFP_KERNEL); + new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL); if (!new) - return -ENOMEM; + return -BCH_ERR_ENOMEM_gc_repair_key; btree_ptr_to_v2(b, new); b->data->min_key = new_min; new->v.min_key = new_min; SET_BTREE_PTR_RANGE_UPDATED(&new->v, true); - ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level + 1, &new->k_i); + ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i); if (ret) { kfree(new); return ret; } bch2_btree_node_drop_keys_outside_node(b); - + bkey_copy(&b->key, &new->k_i); return 0; } @@ -189,16 +233,16 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max) if (ret) return ret; - new = kmalloc(BKEY_BTREE_PTR_U64s_MAX * sizeof(u64), GFP_KERNEL); + new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL); if (!new) - return -ENOMEM; + return -BCH_ERR_ENOMEM_gc_repair_key; btree_ptr_to_v2(b, new); b->data->max_key = new_max; new->k.p = new_max; SET_BTREE_PTR_RANGE_UPDATED(&new->v, true); - ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level + 1, &new->k_i); + ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i); if (ret) { kfree(new); return ret; @@ -222,95 +266,112 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b, struct bpos expected_start = !prev ? b->data->min_key : bpos_successor(prev->key.k.p); - char buf1[200], buf2[200]; + struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF; int ret = 0; if (!prev) { - struct printbuf out = PBUF(buf1); - pr_buf(&out, "start of node: "); - bch2_bpos_to_text(&out, b->data->min_key); + prt_printf(&buf1, "start of node: "); + bch2_bpos_to_text(&buf1, b->data->min_key); } else { - bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&prev->key)); + bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&prev->key)); } - bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(&cur->key)); + bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&cur->key)); if (prev && - bpos_cmp(expected_start, cur->data->min_key) > 0 && + bpos_gt(expected_start, cur->data->min_key) && BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) { /* cur overwrites prev: */ - if (mustfix_fsck_err_on(bpos_cmp(prev->data->min_key, - cur->data->min_key) >= 0, c, + if (mustfix_fsck_err_on(bpos_ge(prev->data->min_key, + cur->data->min_key), c, + btree_node_topology_overwritten_by_next_node, "btree node overwritten by next node at btree %s level %u:\n" " node %s\n" " next %s", - bch2_btree_ids[b->c.btree_id], b->c.level, - buf1, buf2)) - return DROP_PREV_NODE; + bch2_btree_id_str(b->c.btree_id), b->c.level, + buf1.buf, buf2.buf)) { + ret = DROP_PREV_NODE; + goto out; + } - if (mustfix_fsck_err_on(bpos_cmp(prev->key.k.p, + if (mustfix_fsck_err_on(!bpos_eq(prev->key.k.p, bpos_predecessor(cur->data->min_key)), c, + btree_node_topology_bad_max_key, "btree node with incorrect max_key at btree %s level %u:\n" " node %s\n" " next %s", - bch2_btree_ids[b->c.btree_id], b->c.level, - buf1, buf2)) + bch2_btree_id_str(b->c.btree_id), b->c.level, + buf1.buf, buf2.buf)) ret = set_node_max(c, prev, bpos_predecessor(cur->data->min_key)); } else { /* prev overwrites cur: */ - if (mustfix_fsck_err_on(bpos_cmp(expected_start, - cur->data->max_key) >= 0, c, + if (mustfix_fsck_err_on(bpos_ge(expected_start, + cur->data->max_key), c, + btree_node_topology_overwritten_by_prev_node, "btree node overwritten by prev node at btree %s level %u:\n" " prev %s\n" " node %s", - bch2_btree_ids[b->c.btree_id], b->c.level, - buf1, buf2)) - return DROP_THIS_NODE; + bch2_btree_id_str(b->c.btree_id), b->c.level, + buf1.buf, buf2.buf)) { + ret = DROP_THIS_NODE; + goto out; + } - if (mustfix_fsck_err_on(bpos_cmp(expected_start, cur->data->min_key), c, + if (mustfix_fsck_err_on(!bpos_eq(expected_start, cur->data->min_key), c, + btree_node_topology_bad_min_key, "btree node with incorrect min_key at btree %s level %u:\n" " prev %s\n" " node %s", - bch2_btree_ids[b->c.btree_id], b->c.level, - buf1, buf2)) - ret = set_node_min(c, cur, expected_start); + bch2_btree_id_str(b->c.btree_id), b->c.level, + buf1.buf, buf2.buf)) + ret = set_node_min(c, cur, expected_start); } +out: fsck_err: + printbuf_exit(&buf2); + printbuf_exit(&buf1); return ret; } static int btree_repair_node_end(struct bch_fs *c, struct btree *b, struct btree *child) { - char buf1[200], buf2[200]; + struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF; int ret = 0; - if (mustfix_fsck_err_on(bpos_cmp(child->key.k.p, b->key.k.p), c, + bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&child->key)); + bch2_bpos_to_text(&buf2, b->key.k.p); + + if (mustfix_fsck_err_on(!bpos_eq(child->key.k.p, b->key.k.p), c, + btree_node_topology_bad_max_key, "btree node with incorrect max_key at btree %s level %u:\n" " %s\n" " expected %s", - bch2_btree_ids[b->c.btree_id], b->c.level, - (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&child->key)), buf1), - (bch2_bpos_to_text(&PBUF(buf2), b->key.k.p), buf2))) { + bch2_btree_id_str(b->c.btree_id), b->c.level, + buf1.buf, buf2.buf)) { ret = set_node_max(c, child, b->key.k.p); if (ret) - return ret; + goto err; } +err: fsck_err: + printbuf_exit(&buf2); + printbuf_exit(&buf1); return ret; } -static int bch2_btree_repair_topology_recurse(struct bch_fs *c, struct btree *b) +static int bch2_btree_repair_topology_recurse(struct btree_trans *trans, struct btree *b) { + struct bch_fs *c = trans->c; struct btree_and_journal_iter iter; struct bkey_s_c k; struct bkey_buf prev_k, cur_k; struct btree *prev = NULL, *cur = NULL; bool have_child, dropped_children = false; - char buf[200]; + struct printbuf buf = PRINTBUF; int ret = 0; if (!b->c.level) @@ -323,44 +384,48 @@ again: bch2_btree_and_journal_iter_init_node_iter(&iter, c, b); while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { - BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0); - BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0); + BUG_ON(bpos_lt(k.k->p, b->data->min_key)); + BUG_ON(bpos_gt(k.k->p, b->data->max_key)); bch2_btree_and_journal_iter_advance(&iter); bch2_bkey_buf_reassemble(&cur_k, c, k); - cur = bch2_btree_node_get_noiter(c, cur_k.k, + cur = bch2_btree_node_get_noiter(trans, cur_k.k, b->c.btree_id, b->c.level - 1, false); ret = PTR_ERR_OR_ZERO(cur); + printbuf_reset(&buf); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k)); + if (mustfix_fsck_err_on(ret == -EIO, c, - "Unreadable btree node at btree %s level %u:\n" + btree_node_unreadable, + "Topology repair: unreadable btree node at btree %s level %u:\n" " %s", - bch2_btree_ids[b->c.btree_id], + bch2_btree_id_str(b->c.btree_id), b->c.level - 1, - (bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(cur_k.k)), buf))) { - bch2_btree_node_evict(c, cur_k.k); + buf.buf)) { + bch2_btree_node_evict(trans, cur_k.k); ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level, cur_k.k->k.p); + cur = NULL; if (ret) break; continue; } - if (ret) { - bch_err(c, "%s: error %i getting btree node", - __func__, ret); + bch_err_msg(c, ret, "getting btree node"); + if (ret) break; - } ret = btree_repair_node_boundaries(c, b, prev, cur); if (ret == DROP_THIS_NODE) { six_unlock_read(&cur->c.lock); - bch2_btree_node_evict(c, cur_k.k); + bch2_btree_node_evict(trans, cur_k.k); ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level, cur_k.k->k.p); + cur = NULL; if (ret) break; continue; @@ -371,7 +436,7 @@ again: prev = NULL; if (ret == DROP_PREV_NODE) { - bch2_btree_node_evict(c, prev_k.k); + bch2_btree_node_evict(trans, prev_k.k); ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level, prev_k.k->k.p); if (ret) @@ -411,23 +476,21 @@ again: bch2_bkey_buf_reassemble(&cur_k, c, k); bch2_btree_and_journal_iter_advance(&iter); - cur = bch2_btree_node_get_noiter(c, cur_k.k, + cur = bch2_btree_node_get_noiter(trans, cur_k.k, b->c.btree_id, b->c.level - 1, false); ret = PTR_ERR_OR_ZERO(cur); - if (ret) { - bch_err(c, "%s: error %i getting btree node", - __func__, ret); + bch_err_msg(c, ret, "getting btree node"); + if (ret) goto err; - } - ret = bch2_btree_repair_topology_recurse(c, cur); + ret = bch2_btree_repair_topology_recurse(trans, cur); six_unlock_read(&cur->c.lock); cur = NULL; if (ret == DROP_THIS_NODE) { - bch2_btree_node_evict(c, cur_k.k); + bch2_btree_node_evict(trans, cur_k.k); ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level, cur_k.k->k.p); dropped_children = true; @@ -439,12 +502,15 @@ again: have_child = true; } + printbuf_reset(&buf); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); + if (mustfix_fsck_err_on(!have_child, c, + btree_node_topology_interior_node_empty, "empty interior btree node at btree %s level %u\n" " %s", - bch2_btree_ids[b->c.btree_id], - b->c.level, - (bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(&b->key)), buf))) + bch2_btree_id_str(b->c.btree_id), + b->c.level, buf.buf)) ret = DROP_THIS_NODE; err: fsck_err: @@ -460,127 +526,167 @@ fsck_err: if (!ret && dropped_children) goto again; + printbuf_exit(&buf); return ret; } -static int bch2_repair_topology(struct bch_fs *c) +int bch2_check_topology(struct bch_fs *c) { + struct btree_trans *trans = bch2_trans_get(c); struct btree *b; unsigned i; int ret = 0; - for (i = 0; i < BTREE_ID_NR && !ret; i++) { - b = c->btree_roots[i].b; + for (i = 0; i < btree_id_nr_alive(c) && !ret; i++) { + struct btree_root *r = bch2_btree_id_root(c, i); + + if (!r->alive) + continue; + + b = r->b; if (btree_node_fake(b)) continue; - six_lock_read(&b->c.lock, NULL, NULL); - ret = bch2_btree_repair_topology_recurse(c, b); + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); + ret = bch2_btree_repair_topology_recurse(trans, b); six_unlock_read(&b->c.lock); if (ret == DROP_THIS_NODE) { bch_err(c, "empty btree root - repair unimplemented"); - ret = FSCK_ERR_EXIT; + ret = -BCH_ERR_fsck_repair_unimplemented; } } + bch2_trans_put(trans); + return ret; } -static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id, +static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id, unsigned level, bool is_root, struct bkey_s_c *k) { - struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(*k); - const union bch_extent_entry *entry; + struct bch_fs *c = trans->c; + struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(*k); + const union bch_extent_entry *entry_c; struct extent_ptr_decoded p = { 0 }; bool do_update = false; - char buf[200]; + struct printbuf buf = PRINTBUF; int ret = 0; - bkey_for_each_ptr_decode(k->k, ptrs, p, entry) { + /* + * XXX + * use check_bucket_ref here + */ + bkey_for_each_ptr_decode(k->k, ptrs_c, p, entry_c) { struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev); - struct bucket *g = PTR_BUCKET(ca, &p.ptr, true); - struct bucket *g2 = PTR_BUCKET(ca, &p.ptr, false); - enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry->ptr); - - if (fsck_err_on(g->mark.data_type && - g->mark.data_type != data_type, c, - "bucket %u:%zu different types of data in same bucket: %s, %s\n" - "while marking %s", - p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), - bch2_data_types[g->mark.data_type], - bch2_data_types[data_type], - (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) { - if (data_type == BCH_DATA_btree) { - g2->_mark.data_type = g->_mark.data_type = data_type; - set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); + struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr); + enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry_c->ptr); + + if (!g->gen_valid && + (c->opts.reconstruct_alloc || + fsck_err(c, ptr_to_missing_alloc_key, + "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n" + "while marking %s", + p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), + bch2_data_types[ptr_data_type(k->k, &p.ptr)], + p.ptr.gen, + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))) { + if (!p.ptr.cached) { + g->gen_valid = true; + g->gen = p.ptr.gen; } else { do_update = true; } } - if (fsck_err_on(!g->gen_valid, c, - "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n" - "while marking %s", - p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), - bch2_data_types[ptr_data_type(k->k, &p.ptr)], - p.ptr.gen, - (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) { + if (gen_cmp(p.ptr.gen, g->gen) > 0 && + (c->opts.reconstruct_alloc || + fsck_err(c, ptr_gen_newer_than_bucket_gen, + "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n" + "while marking %s", + p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), + bch2_data_types[ptr_data_type(k->k, &p.ptr)], + p.ptr.gen, g->gen, + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))) { if (!p.ptr.cached) { - g2->_mark.gen = g->_mark.gen = p.ptr.gen; - g2->gen_valid = g->gen_valid = true; - set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); + g->gen_valid = true; + g->gen = p.ptr.gen; + g->data_type = 0; + g->dirty_sectors = 0; + g->cached_sectors = 0; + set_bit(BCH_FS_need_another_gc, &c->flags); } else { do_update = true; } } - if (fsck_err_on(gen_cmp(p.ptr.gen, g->mark.gen) > 0, c, - "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n" + if (gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX && + (c->opts.reconstruct_alloc || + fsck_err(c, ptr_gen_newer_than_bucket_gen, + "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n" + "while marking %s", + p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen, + bch2_data_types[ptr_data_type(k->k, &p.ptr)], + p.ptr.gen, + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))) + do_update = true; + + if (!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0 && + (c->opts.reconstruct_alloc || + fsck_err(c, stale_dirty_ptr, + "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n" + "while marking %s", + p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), + bch2_data_types[ptr_data_type(k->k, &p.ptr)], + p.ptr.gen, g->gen, + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))) + do_update = true; + + if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen) + continue; + + if (fsck_err_on(bucket_data_type(g->data_type) && + bucket_data_type(g->data_type) != data_type, c, + ptr_bucket_data_type_mismatch, + "bucket %u:%zu different types of data in same bucket: %s, %s\n" "while marking %s", p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), - bch2_data_types[ptr_data_type(k->k, &p.ptr)], - p.ptr.gen, g->mark.gen, - (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) { - if (!p.ptr.cached) { - g2->_mark.gen = g->_mark.gen = p.ptr.gen; - g2->gen_valid = g->gen_valid = true; - g2->_mark.data_type = 0; - g2->_mark.dirty_sectors = 0; - g2->_mark.cached_sectors = 0; - set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags); - set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); + bch2_data_types[g->data_type], + bch2_data_types[data_type], + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) { + if (data_type == BCH_DATA_btree) { + g->data_type = data_type; + set_bit(BCH_FS_need_another_gc, &c->flags); } else { do_update = true; } } - if (fsck_err_on(!p.ptr.cached && - gen_cmp(p.ptr.gen, g->mark.gen) < 0, c, - "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n" - "while marking %s", - p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), - bch2_data_types[ptr_data_type(k->k, &p.ptr)], - p.ptr.gen, g->mark.gen, - (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) - do_update = true; - if (p.has_ec) { - struct stripe *m = genradix_ptr(&c->stripes[true], p.ec.idx); + struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx); if (fsck_err_on(!m || !m->alive, c, + ptr_to_missing_stripe, "pointer to nonexistent stripe %llu\n" "while marking %s", (u64) p.ec.idx, - (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) do_update = true; - if (fsck_err_on(!bch2_ptr_matches_stripe_m(m, p), c, + if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), c, + ptr_to_incorrect_stripe, "pointer does not match stripe %llu\n" "while marking %s", (u64) p.ec.idx, - (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) do_update = true; } } @@ -593,13 +699,15 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id, if (is_root) { bch_err(c, "cannot update btree roots yet"); - return -EINVAL; + ret = -EINVAL; + goto err; } new = kmalloc(bkey_bytes(k->k), GFP_KERNEL); if (!new) { - bch_err(c, "%s: error allocating new key", __func__); - return -ENOMEM; + ret = -BCH_ERR_ENOMEM_gc_repair_key; + bch_err_msg(c, ret, "allocating new key"); + goto err; } bkey_reassemble(new, *k); @@ -613,28 +721,29 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id, ptrs = bch2_bkey_ptrs(bkey_i_to_s(new)); bkey_for_each_ptr(ptrs, ptr) { struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); - struct bucket *g = PTR_BUCKET(ca, ptr, true); + struct bucket *g = PTR_GC_BUCKET(ca, ptr); - ptr->gen = g->mark.gen; + ptr->gen = g->gen; } } else { bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, ({ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); - struct bucket *g = PTR_BUCKET(ca, ptr, true); + struct bucket *g = PTR_GC_BUCKET(ca, ptr); enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, ptr); (ptr->cached && - (!g->gen_valid || gen_cmp(ptr->gen, g->mark.gen) > 0)) || + (!g->gen_valid || gen_cmp(ptr->gen, g->gen) > 0)) || (!ptr->cached && - gen_cmp(ptr->gen, g->mark.gen) < 0) || - (g->mark.data_type && - g->mark.data_type != data_type); + gen_cmp(ptr->gen, g->gen) < 0) || + gen_cmp(g->gen, ptr->gen) > BUCKET_GC_GEN_MAX || + (g->data_type && + g->data_type != data_type); })); again: ptrs = bch2_bkey_ptrs(bkey_i_to_s(new)); bkey_extent_entry_for_each(ptrs, entry) { if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) { - struct stripe *m = genradix_ptr(&c->stripes[true], + struct gc_stripe *m = genradix_ptr(&c->gc_stripes, entry->stripe_ptr.idx); union bch_extent_entry *next_ptr; @@ -659,87 +768,83 @@ found: } } - ret = bch2_journal_key_insert(c, btree_id, level, new); - if (ret) + ret = bch2_journal_key_insert_take(c, btree_id, level, new); + if (ret) { kfree(new); - else - *k = bkey_i_to_s_c(new); + goto err; + } + + if (level) + bch2_btree_node_update_key_early(trans, btree_id, level - 1, *k, new); + + if (0) { + printbuf_reset(&buf); + bch2_bkey_val_to_text(&buf, c, *k); + bch_info(c, "updated %s", buf.buf); + + printbuf_reset(&buf); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new)); + bch_info(c, "new key %s", buf.buf); + } + + *k = bkey_i_to_s_c(new); } +err: fsck_err: + printbuf_exit(&buf); return ret; } /* marking of btree keys/nodes: */ -static int bch2_gc_mark_key(struct bch_fs *c, enum btree_id btree_id, +static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id, unsigned level, bool is_root, struct bkey_s_c *k, - u8 *max_stale, bool initial) + bool initial) { - struct bkey_ptrs_c ptrs; - const struct bch_extent_ptr *ptr; + struct bch_fs *c = trans->c; + struct bkey deleted = KEY(0, 0, 0); + struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL }; unsigned flags = - BTREE_TRIGGER_INSERT| BTREE_TRIGGER_GC| (initial ? BTREE_TRIGGER_NOATOMIC : 0); int ret = 0; + deleted.p = k->k->p; + if (initial) { BUG_ON(bch2_journal_seq_verify && - k->k->version.lo > journal_cur_seq(&c->journal)); + k->k->version.lo > atomic64_read(&c->journal.seq)); - ret = bch2_check_fix_ptrs(c, btree_id, level, is_root, k); + ret = bch2_check_fix_ptrs(trans, btree_id, level, is_root, k); if (ret) goto err; if (fsck_err_on(k->k->version.lo > atomic64_read(&c->key_version), c, + bkey_version_in_future, "key version number higher than recorded: %llu > %llu", k->k->version.lo, atomic64_read(&c->key_version))) atomic64_set(&c->key_version, k->k->version.lo); - - if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) || - fsck_err_on(!bch2_bkey_replicas_marked(c, *k), c, - "superblock not marked as containing replicas (type %u)", - k->k->type)) { - ret = bch2_mark_bkey_replicas(c, *k); - if (ret) { - bch_err(c, "error marking bkey replicas: %i", ret); - goto err; - } - } } - ptrs = bch2_bkey_ptrs_c(*k); - bkey_for_each_ptr(ptrs, ptr) { - struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); - struct bucket *g = PTR_BUCKET(ca, ptr, true); - - if (gen_after(g->oldest_gen, ptr->gen)) - g->oldest_gen = ptr->gen; - - *max_stale = max(*max_stale, ptr_stale(ca, ptr)); - } - - bch2_mark_key(c, *k, flags); + ret = commit_do(trans, NULL, NULL, 0, + bch2_mark_key(trans, btree_id, level, old, *k, flags)); fsck_err: err: - if (ret) - bch_err(c, "%s: ret %i", __func__, ret); + bch_err_fn(c, ret); return ret; } -static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale, - bool initial) +static int btree_gc_mark_node(struct btree_trans *trans, struct btree *b, bool initial) { + struct bch_fs *c = trans->c; struct btree_node_iter iter; struct bkey unpacked; struct bkey_s_c k; struct bkey_buf prev, cur; int ret = 0; - *max_stale = 0; - if (!btree_node_type_needs_gc(btree_node_type(b))) return 0; @@ -749,8 +854,8 @@ static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale, bkey_init(&prev.k->k); while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) { - ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, false, - &k, max_stale, initial); + ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false, + &k, initial); if (ret) break; @@ -771,62 +876,39 @@ static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale, return ret; } -static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id, +static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree_id, bool initial, bool metadata_only) { - struct btree_trans trans; - struct btree_iter *iter; + struct bch_fs *c = trans->c; + struct btree_iter iter; struct btree *b; - unsigned depth = metadata_only ? 1 - : bch2_expensive_debug_checks ? 0 - : !btree_node_type_needs_gc(btree_id) ? 1 - : 0; - u8 max_stale = 0; + unsigned depth = metadata_only ? 1 : 0; int ret = 0; - bch2_trans_init(&trans, c, 0, 0); - gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0)); - __for_each_btree_node(&trans, iter, btree_id, POS_MIN, - 0, depth, BTREE_ITER_PREFETCH, b) { + __for_each_btree_node(trans, iter, btree_id, POS_MIN, + 0, depth, BTREE_ITER_PREFETCH, b, ret) { bch2_verify_btree_nr_keys(b); gc_pos_set(c, gc_pos_btree_node(b)); - ret = btree_gc_mark_node(c, b, &max_stale, initial); + ret = btree_gc_mark_node(trans, b, initial); if (ret) break; - - if (!initial) { - if (max_stale > 64) - bch2_btree_node_rewrite(&trans, iter, - b->data->keys.seq, - BTREE_INSERT_NOWAIT| - BTREE_INSERT_GC_LOCK_HELD); - else if (!bch2_btree_gc_rewrite_disabled && - (bch2_btree_gc_always_rewrite || max_stale > 16)) - bch2_btree_node_rewrite(&trans, iter, - b->data->keys.seq, - BTREE_INSERT_NOWAIT| - BTREE_INSERT_GC_LOCK_HELD); - } - - bch2_trans_cond_resched(&trans); } - bch2_trans_iter_put(&trans, iter); + bch2_trans_iter_exit(trans, &iter); - ret = bch2_trans_exit(&trans) ?: ret; if (ret) return ret; mutex_lock(&c->btree_root_lock); - b = c->btree_roots[btree_id].b; + b = bch2_btree_id_root(c, btree_id)->b; if (!btree_node_fake(b)) { struct bkey_s_c k = bkey_i_to_s_c(&b->key); - ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, true, - &k, &max_stale, initial); + ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level + 1, + true, &k, initial); } gc_pos_set(c, gc_pos_btree_root(b->c.btree_id)); mutex_unlock(&c->btree_root_lock); @@ -834,14 +916,14 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id, return ret; } -static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b, +static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b, unsigned target_depth) { + struct bch_fs *c = trans->c; struct btree_and_journal_iter iter; struct bkey_s_c k; struct bkey_buf cur, prev; - u8 max_stale = 0; - char buf[200]; + struct printbuf buf = PRINTBUF; int ret = 0; bch2_btree_and_journal_iter_init_node_iter(&iter, c, b); @@ -850,15 +932,13 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b, bkey_init(&prev.k->k); while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { - BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0); - BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0); + BUG_ON(bpos_lt(k.k->p, b->data->min_key)); + BUG_ON(bpos_gt(k.k->p, b->data->max_key)); - ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, false, - &k, &max_stale, true); - if (ret) { - bch_err(c, "%s: error %i from bch2_gc_mark_key", __func__, ret); + ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, + false, &k, true); + if (ret) goto fsck_err; - } if (b->c.level) { bch2_bkey_buf_reassemble(&cur, c, k); @@ -886,7 +966,7 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b, bch2_bkey_buf_reassemble(&cur, c, k); bch2_btree_and_journal_iter_advance(&iter); - child = bch2_btree_node_get_noiter(c, cur.k, + child = bch2_btree_node_get_noiter(trans, cur.k, b->c.btree_id, b->c.level - 1, false); ret = PTR_ERR_OR_ZERO(child); @@ -898,29 +978,30 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b, FSCK_CAN_FIX| FSCK_CAN_IGNORE| FSCK_NO_RATELIMIT, + btree_node_read_error, "Unreadable btree node at btree %s level %u:\n" " %s", - bch2_btree_ids[b->c.btree_id], + bch2_btree_id_str(b->c.btree_id), b->c.level - 1, - (bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(cur.k)), buf)) && - !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) { - ret = FSCK_ERR_START_TOPOLOGY_REPAIR; + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur.k)), buf.buf)) && + should_restart_for_topology_repair(c)) { bch_info(c, "Halting mark and sweep to start topology repair pass"); + ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology); goto fsck_err; } else { /* Continue marking when opted to not * fix the error: */ ret = 0; - set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags); + set_bit(BCH_FS_initial_gc_unfixed, &c->flags); continue; } } else if (ret) { - bch_err(c, "%s: error %i getting btree node", - __func__, ret); + bch_err_msg(c, ret, "getting btree node"); break; } - ret = bch2_gc_btree_init_recurse(c, child, + ret = bch2_gc_btree_init_recurse(trans, child, target_depth); six_unlock_read(&child->c.lock); @@ -932,58 +1013,60 @@ fsck_err: bch2_bkey_buf_exit(&cur, c); bch2_bkey_buf_exit(&prev, c); bch2_btree_and_journal_iter_exit(&iter); + printbuf_exit(&buf); return ret; } -static int bch2_gc_btree_init(struct bch_fs *c, +static int bch2_gc_btree_init(struct btree_trans *trans, enum btree_id btree_id, bool metadata_only) { + struct bch_fs *c = trans->c; struct btree *b; - unsigned target_depth = metadata_only ? 1 - : bch2_expensive_debug_checks ? 0 - : !btree_node_type_needs_gc(btree_id) ? 1 - : 0; - u8 max_stale = 0; - char buf[100]; + unsigned target_depth = metadata_only ? 1 : 0; + struct printbuf buf = PRINTBUF; int ret = 0; - b = c->btree_roots[btree_id].b; + b = bch2_btree_id_root(c, btree_id)->b; if (btree_node_fake(b)) return 0; six_lock_read(&b->c.lock, NULL, NULL); - if (mustfix_fsck_err_on(bpos_cmp(b->data->min_key, POS_MIN), c, - "btree root with incorrect min_key: %s", - (bch2_bpos_to_text(&PBUF(buf), b->data->min_key), buf))) { + printbuf_reset(&buf); + bch2_bpos_to_text(&buf, b->data->min_key); + if (mustfix_fsck_err_on(!bpos_eq(b->data->min_key, POS_MIN), c, + btree_root_bad_min_key, + "btree root with incorrect min_key: %s", buf.buf)) { bch_err(c, "repair unimplemented"); - ret = FSCK_ERR_EXIT; + ret = -BCH_ERR_fsck_repair_unimplemented; goto fsck_err; } - if (mustfix_fsck_err_on(bpos_cmp(b->data->max_key, SPOS_MAX), c, - "btree root with incorrect max_key: %s", - (bch2_bpos_to_text(&PBUF(buf), b->data->max_key), buf))) { + printbuf_reset(&buf); + bch2_bpos_to_text(&buf, b->data->max_key); + if (mustfix_fsck_err_on(!bpos_eq(b->data->max_key, SPOS_MAX), c, + btree_root_bad_max_key, + "btree root with incorrect max_key: %s", buf.buf)) { bch_err(c, "repair unimplemented"); - ret = FSCK_ERR_EXIT; + ret = -BCH_ERR_fsck_repair_unimplemented; goto fsck_err; } if (b->c.level >= target_depth) - ret = bch2_gc_btree_init_recurse(c, b, target_depth); + ret = bch2_gc_btree_init_recurse(trans, b, target_depth); if (!ret) { struct bkey_s_c k = bkey_i_to_s_c(&b->key); - ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, true, - &k, &max_stale, true); + ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level + 1, true, + &k, true); } fsck_err: six_unlock_read(&b->c.lock); - if (ret < 0) - bch_err(c, "%s: ret %i", __func__, ret); + bch_err_fn(c, ret); + printbuf_exit(&buf); return ret; } @@ -995,6 +1078,7 @@ static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r) static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only) { + struct btree_trans *trans = bch2_trans_get(c); enum btree_id ids[BTREE_ID_NR]; unsigned i; int ret = 0; @@ -1005,11 +1089,20 @@ static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only) for (i = 0; i < BTREE_ID_NR && !ret; i++) ret = initial - ? bch2_gc_btree_init(c, ids[i], metadata_only) - : bch2_gc_btree(c, ids[i], initial, metadata_only); + ? bch2_gc_btree_init(trans, ids[i], metadata_only) + : bch2_gc_btree(trans, ids[i], initial, metadata_only); - if (ret < 0) - bch_err(c, "%s: ret %i", __func__, ret); + for (i = BTREE_ID_NR; i < btree_id_nr_alive(c) && !ret; i++) { + if (!bch2_btree_id_root(c, i)->alive) + continue; + + ret = initial + ? bch2_gc_btree_init(trans, i, metadata_only) + : bch2_gc_btree(trans, i, initial, metadata_only); + } + + bch2_trans_put(trans); + bch_err_fn(c, ret); return ret; } @@ -1031,23 +1124,13 @@ static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca, } while (start < end); } -void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca, - unsigned flags) +static void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca, + unsigned flags) { struct bch_sb_layout *layout = &ca->disk_sb.sb->layout; unsigned i; u64 b; - /* - * This conditional is kind of gross, but we may be called from the - * device add path, before the new device has actually been added to the - * running filesystem: - */ - if (c) { - lockdep_assert_held(&c->sb_lock); - percpu_down_read(&c->mark_lock); - } - for (i = 0; i < layout->nr_superblocks; i++) { u64 offset = le64_to_cpu(layout->sb_offset[i]); @@ -1066,20 +1149,14 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca, ca->mi.bucket_size, gc_phase(GC_PHASE_SB), flags); } - - if (c) - percpu_up_read(&c->mark_lock); } static void bch2_mark_superblocks(struct bch_fs *c) { - struct bch_dev *ca; - unsigned i; - mutex_lock(&c->sb_lock); gc_pos_set(c, gc_phase(GC_PHASE_SB)); - for_each_online_member(ca, c, i) + for_each_online_member(c, ca) bch2_mark_dev_superblock(c, ca, BTREE_TRIGGER_GC); mutex_unlock(&c->sb_lock); } @@ -1096,8 +1173,7 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c) for_each_pending_btree_node_free(c, as, d) if (d->index_update_done) - bch2_mark_key(c, bkey_i_to_s_c(&d->key), - BTREE_TRIGGER_INSERT|BTREE_TRIGGER_GC); + bch2_mark_key(c, bkey_i_to_s_c(&d->key), BTREE_TRIGGER_GC); mutex_unlock(&c->btree_interior_update_lock); } @@ -1105,16 +1181,14 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c) static void bch2_gc_free(struct bch_fs *c) { - struct bch_dev *ca; - unsigned i; - - genradix_free(&c->stripes[1]); + genradix_free(&c->reflink_gc_table); + genradix_free(&c->gc_stripes); - for_each_member_device(ca, c, i) { - kvpfree(rcu_dereference_protected(ca->buckets[1], 1), + for_each_member_device(c, ca) { + kvpfree(rcu_dereference_protected(ca->buckets_gc, 1), sizeof(struct bucket_array) + ca->mi.nbuckets * sizeof(struct bucket)); - ca->buckets[1] = NULL; + ca->buckets_gc = NULL; free_percpu(ca->usage_gc); ca->usage_gc = NULL; @@ -1128,249 +1202,369 @@ static int bch2_gc_done(struct bch_fs *c, bool initial, bool metadata_only) { struct bch_dev *ca = NULL; - bool verify = !metadata_only && (!initial || - (c->sb.compat & (1ULL << BCH_COMPAT_alloc_info))); - unsigned i, dev; + struct printbuf buf = PRINTBUF; + bool verify = !metadata_only && + !c->opts.reconstruct_alloc && + (!initial || (c->sb.compat & (1ULL << BCH_COMPAT_alloc_info))); + unsigned i; int ret = 0; -#define copy_field(_f, _msg, ...) \ - if (dst->_f != src->_f) { \ - if (verify) \ - fsck_err(c, _msg ": got %llu, should be %llu" \ - , ##__VA_ARGS__, dst->_f, src->_f); \ - dst->_f = src->_f; \ - set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); \ - } -#define copy_stripe_field(_f, _msg, ...) \ - if (dst->_f != src->_f) { \ - if (verify) \ - fsck_err(c, "stripe %zu has wrong "_msg \ - ": got %u, should be %u", \ - iter.pos, ##__VA_ARGS__, \ - dst->_f, src->_f); \ - dst->_f = src->_f; \ - set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); \ - } -#define copy_bucket_field(_f) \ - if (dst->b[b].mark._f != src->b[b].mark._f) { \ - if (verify) \ - fsck_err(c, "bucket %u:%zu gen %u data type %s has wrong " #_f \ - ": got %u, should be %u", dev, b, \ - dst->b[b].mark.gen, \ - bch2_data_types[dst->b[b].mark.data_type],\ - dst->b[b].mark._f, src->b[b].mark._f); \ - dst->b[b]._mark._f = src->b[b].mark._f; \ - set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); \ - } -#define copy_dev_field(_f, _msg, ...) \ - copy_field(_f, "dev %u has wrong " _msg, dev, ##__VA_ARGS__) -#define copy_fs_field(_f, _msg, ...) \ - copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__) - - if (!metadata_only) { - struct genradix_iter iter = genradix_iter_init(&c->stripes[1], 0); - struct stripe *dst, *src; - - while ((src = genradix_iter_peek(&iter, &c->stripes[1]))) { - dst = genradix_ptr_alloc(&c->stripes[0], iter.pos, GFP_KERNEL); - - if (dst->alive != src->alive || - dst->sectors != src->sectors || - dst->algorithm != src->algorithm || - dst->nr_blocks != src->nr_blocks || - dst->nr_redundant != src->nr_redundant) { - bch_err(c, "unexpected stripe inconsistency at bch2_gc_done, confused"); - ret = -EINVAL; - goto fsck_err; - } - - for (i = 0; i < ARRAY_SIZE(dst->block_sectors); i++) - copy_stripe_field(block_sectors[i], - "block_sectors[%u]", i); - - dst->blocks_nonempty = 0; - for (i = 0; i < dst->nr_blocks; i++) - dst->blocks_nonempty += dst->block_sectors[i] != 0; + percpu_down_write(&c->mark_lock); - genradix_iter_advance(&iter, &c->stripes[1]); - } - } +#define copy_field(_err, _f, _msg, ...) \ + if (dst->_f != src->_f && \ + (!verify || \ + fsck_err(c, _err, _msg ": got %llu, should be %llu" \ + , ##__VA_ARGS__, dst->_f, src->_f))) \ + dst->_f = src->_f +#define copy_dev_field(_err, _f, _msg, ...) \ + copy_field(_err, _f, "dev %u has wrong " _msg, ca->dev_idx, ##__VA_ARGS__) +#define copy_fs_field(_err, _f, _msg, ...) \ + copy_field(_err, _f, "fs has wrong " _msg, ##__VA_ARGS__) for (i = 0; i < ARRAY_SIZE(c->usage); i++) bch2_fs_usage_acc_to_base(c, i); - for_each_member_device(ca, c, dev) { - struct bucket_array *dst = __bucket_array(ca, 0); - struct bucket_array *src = __bucket_array(ca, 1); - size_t b; - - for (b = 0; b < src->nbuckets; b++) { - copy_bucket_field(gen); - copy_bucket_field(data_type); - copy_bucket_field(stripe); - copy_bucket_field(dirty_sectors); - copy_bucket_field(cached_sectors); - - dst->b[b].oldest_gen = src->b[b].oldest_gen; - } - - { - struct bch_dev_usage *dst = ca->usage_base; - struct bch_dev_usage *src = (void *) - bch2_acc_percpu_u64s((void *) ca->usage_gc, - dev_usage_u64s()); - - copy_dev_field(buckets_ec, "buckets_ec"); - copy_dev_field(buckets_unavailable, "buckets_unavailable"); - - for (i = 0; i < BCH_DATA_NR; i++) { - copy_dev_field(d[i].buckets, "%s buckets", bch2_data_types[i]); - copy_dev_field(d[i].sectors, "%s sectors", bch2_data_types[i]); - copy_dev_field(d[i].fragmented, "%s fragmented", bch2_data_types[i]); - } + __for_each_member_device(c, ca) { + struct bch_dev_usage *dst = ca->usage_base; + struct bch_dev_usage *src = (void *) + bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc, + dev_usage_u64s()); + + for (i = 0; i < BCH_DATA_NR; i++) { + copy_dev_field(dev_usage_buckets_wrong, + d[i].buckets, "%s buckets", bch2_data_types[i]); + copy_dev_field(dev_usage_sectors_wrong, + d[i].sectors, "%s sectors", bch2_data_types[i]); + copy_dev_field(dev_usage_fragmented_wrong, + d[i].fragmented, "%s fragmented", bch2_data_types[i]); } - }; + } { unsigned nr = fs_usage_u64s(c); struct bch_fs_usage *dst = c->usage_base; struct bch_fs_usage *src = (void *) - bch2_acc_percpu_u64s((void *) c->usage_gc, nr); + bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr); - copy_fs_field(hidden, "hidden"); - copy_fs_field(btree, "btree"); + copy_fs_field(fs_usage_hidden_wrong, + hidden, "hidden"); + copy_fs_field(fs_usage_btree_wrong, + btree, "btree"); if (!metadata_only) { - copy_fs_field(data, "data"); - copy_fs_field(cached, "cached"); - copy_fs_field(reserved, "reserved"); - copy_fs_field(nr_inodes,"nr_inodes"); + copy_fs_field(fs_usage_data_wrong, + data, "data"); + copy_fs_field(fs_usage_cached_wrong, + cached, "cached"); + copy_fs_field(fs_usage_reserved_wrong, + reserved, "reserved"); + copy_fs_field(fs_usage_nr_inodes_wrong, + nr_inodes,"nr_inodes"); for (i = 0; i < BCH_REPLICAS_MAX; i++) - copy_fs_field(persistent_reserved[i], + copy_fs_field(fs_usage_persistent_reserved_wrong, + persistent_reserved[i], "persistent_reserved[%i]", i); } for (i = 0; i < c->replicas.nr; i++) { - struct bch_replicas_entry *e = + struct bch_replicas_entry_v1 *e = cpu_replicas_entry(&c->replicas, i); - char buf[80]; if (metadata_only && (e->data_type == BCH_DATA_user || e->data_type == BCH_DATA_cached)) continue; - bch2_replicas_entry_to_text(&PBUF(buf), e); + printbuf_reset(&buf); + bch2_replicas_entry_to_text(&buf, e); - copy_fs_field(replicas[i], "%s", buf); + copy_fs_field(fs_usage_replicas_wrong, + replicas[i], "%s", buf.buf); } } #undef copy_fs_field #undef copy_dev_field -#undef copy_bucket_field #undef copy_stripe_field #undef copy_field fsck_err: if (ca) percpu_ref_put(&ca->ref); - if (ret) - bch_err(c, "%s: ret %i", __func__, ret); + bch_err_fn(c, ret); + + percpu_up_write(&c->mark_lock); + printbuf_exit(&buf); return ret; } -static int bch2_gc_start(struct bch_fs *c, - bool metadata_only) +static int bch2_gc_start(struct bch_fs *c) { - struct bch_dev *ca = NULL; - unsigned i; - int ret; - BUG_ON(c->usage_gc); c->usage_gc = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64), sizeof(u64), GFP_KERNEL); if (!c->usage_gc) { bch_err(c, "error allocating c->usage_gc"); - return -ENOMEM; + return -BCH_ERR_ENOMEM_gc_start; } - for_each_member_device(ca, c, i) { - BUG_ON(ca->buckets[1]); + for_each_member_device(c, ca) { BUG_ON(ca->usage_gc); - ca->buckets[1] = kvpmalloc(sizeof(struct bucket_array) + - ca->mi.nbuckets * sizeof(struct bucket), - GFP_KERNEL|__GFP_ZERO); - if (!ca->buckets[1]) { - percpu_ref_put(&ca->ref); - bch_err(c, "error allocating ca->buckets[gc]"); - return -ENOMEM; - } - ca->usage_gc = alloc_percpu(struct bch_dev_usage); if (!ca->usage_gc) { bch_err(c, "error allocating ca->usage_gc"); percpu_ref_put(&ca->ref); - return -ENOMEM; + return -BCH_ERR_ENOMEM_gc_start; } + + this_cpu_write(ca->usage_gc->d[BCH_DATA_free].buckets, + ca->mi.nbuckets - ca->mi.first_bucket); } - ret = bch2_ec_mem_alloc(c, true); - if (ret) { - bch_err(c, "error allocating ec gc mem"); - return ret; + return 0; +} + +static int bch2_gc_reset(struct bch_fs *c) +{ + for_each_member_device(c, ca) { + free_percpu(ca->usage_gc); + ca->usage_gc = NULL; } - percpu_down_write(&c->mark_lock); + free_percpu(c->usage_gc); + c->usage_gc = NULL; + + return bch2_gc_start(c); +} + +/* returns true if not equal */ +static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l, + struct bch_alloc_v4 r) +{ + return l.gen != r.gen || + l.oldest_gen != r.oldest_gen || + l.data_type != r.data_type || + l.dirty_sectors != r.dirty_sectors || + l.cached_sectors != r.cached_sectors || + l.stripe_redundancy != r.stripe_redundancy || + l.stripe != r.stripe; +} + +static int bch2_alloc_write_key(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_s_c k, + bool metadata_only) +{ + struct bch_fs *c = trans->c; + struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode); + struct bucket gc, *b; + struct bkey_i_alloc_v4 *a; + struct bch_alloc_v4 old_convert, new; + const struct bch_alloc_v4 *old; + enum bch_data_type type; + int ret; + + old = bch2_alloc_to_v4(k, &old_convert); + new = *old; + + percpu_down_read(&c->mark_lock); + b = gc_bucket(ca, iter->pos.offset); /* - * indicate to stripe code that we need to allocate for the gc stripes - * radix tree, too + * b->data_type doesn't yet include need_discard & need_gc_gen states - + * fix that here: */ - gc_pos_set(c, gc_phase(GC_PHASE_START)); + type = __alloc_data_type(b->dirty_sectors, + b->cached_sectors, + b->stripe, + *old, + b->data_type); + if (b->data_type != type) { + struct bch_dev_usage *u; + + preempt_disable(); + u = this_cpu_ptr(ca->usage_gc); + u->d[b->data_type].buckets--; + b->data_type = type; + u->d[b->data_type].buckets++; + preempt_enable(); + } + + gc = *b; + percpu_up_read(&c->mark_lock); - for_each_member_device(ca, c, i) { - struct bucket_array *dst = __bucket_array(ca, 1); - struct bucket_array *src = __bucket_array(ca, 0); - size_t b; + if (metadata_only && + gc.data_type != BCH_DATA_sb && + gc.data_type != BCH_DATA_journal && + gc.data_type != BCH_DATA_btree) + return 0; - dst->first_bucket = src->first_bucket; - dst->nbuckets = src->nbuckets; + if (gen_after(old->gen, gc.gen)) + return 0; - for (b = 0; b < src->nbuckets; b++) { - struct bucket *d = &dst->b[b]; - struct bucket *s = &src->b[b]; + if (c->opts.reconstruct_alloc || + fsck_err_on(new.data_type != gc.data_type, c, + alloc_key_data_type_wrong, + "bucket %llu:%llu gen %u has wrong data_type" + ": got %s, should be %s", + iter->pos.inode, iter->pos.offset, + gc.gen, + bch2_data_types[new.data_type], + bch2_data_types[gc.data_type])) + new.data_type = gc.data_type; + +#define copy_bucket_field(_errtype, _f) \ + if (c->opts.reconstruct_alloc || \ + fsck_err_on(new._f != gc._f, c, _errtype, \ + "bucket %llu:%llu gen %u data type %s has wrong " #_f \ + ": got %u, should be %u", \ + iter->pos.inode, iter->pos.offset, \ + gc.gen, \ + bch2_data_types[gc.data_type], \ + new._f, gc._f)) \ + new._f = gc._f; \ + + copy_bucket_field(alloc_key_gen_wrong, + gen); + copy_bucket_field(alloc_key_dirty_sectors_wrong, + dirty_sectors); + copy_bucket_field(alloc_key_cached_sectors_wrong, + cached_sectors); + copy_bucket_field(alloc_key_stripe_wrong, + stripe); + copy_bucket_field(alloc_key_stripe_redundancy_wrong, + stripe_redundancy); +#undef copy_bucket_field - d->_mark.gen = dst->b[b].oldest_gen = s->mark.gen; - d->gen_valid = s->gen_valid; + if (!bch2_alloc_v4_cmp(*old, new)) + return 0; - if (metadata_only && - (s->mark.data_type == BCH_DATA_user || - s->mark.data_type == BCH_DATA_cached)) - d->_mark = s->mark; + a = bch2_alloc_to_v4_mut(trans, k); + ret = PTR_ERR_OR_ZERO(a); + if (ret) + return ret; + + a->v = new; + + /* + * The trigger normally makes sure this is set, but we're not running + * triggers: + */ + if (a->v.data_type == BCH_DATA_cached && !a->v.io_time[READ]) + a->v.io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now)); + + ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_NORUN); +fsck_err: + return ret; +} + +static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only) +{ + int ret = 0; + + for_each_member_device(c, ca) { + ret = bch2_trans_run(c, + for_each_btree_key_upto_commit(trans, iter, BTREE_ID_alloc, + POS(ca->dev_idx, ca->mi.first_bucket), + POS(ca->dev_idx, ca->mi.nbuckets - 1), + BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k, + NULL, NULL, BCH_TRANS_COMMIT_lazy_rw, + bch2_alloc_write_key(trans, &iter, k, metadata_only))); + if (ret) { + percpu_ref_put(&ca->ref); + break; } - }; + } - percpu_up_write(&c->mark_lock); + bch_err_fn(c, ret); + return ret; +} - return 0; +static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only) +{ + for_each_member_device(c, ca) { + struct bucket_array *buckets = kvpmalloc(sizeof(struct bucket_array) + + ca->mi.nbuckets * sizeof(struct bucket), + GFP_KERNEL|__GFP_ZERO); + if (!buckets) { + percpu_ref_put(&ca->ref); + bch_err(c, "error allocating ca->buckets[gc]"); + return -BCH_ERR_ENOMEM_gc_alloc_start; + } + + buckets->first_bucket = ca->mi.first_bucket; + buckets->nbuckets = ca->mi.nbuckets; + rcu_assign_pointer(ca->buckets_gc, buckets); + } + + int ret = bch2_trans_run(c, + for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN, + BTREE_ITER_PREFETCH, k, ({ + struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode); + struct bucket *g = gc_bucket(ca, k.k->p.offset); + + struct bch_alloc_v4 a_convert; + const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert); + + g->gen_valid = 1; + g->gen = a->gen; + + if (metadata_only && + (a->data_type == BCH_DATA_user || + a->data_type == BCH_DATA_cached || + a->data_type == BCH_DATA_parity)) { + g->data_type = a->data_type; + g->dirty_sectors = a->dirty_sectors; + g->cached_sectors = a->cached_sectors; + g->stripe = a->stripe; + g->stripe_redundancy = a->stripe_redundancy; + } + + 0; + }))); + bch_err_fn(c, ret); + return ret; } -static int bch2_gc_reflink_done_initial_fn(struct bch_fs *c, struct bkey_s_c k) +static void bch2_gc_alloc_reset(struct bch_fs *c, bool metadata_only) { - struct reflink_gc *r; + for_each_member_device(c, ca) { + struct bucket_array *buckets = gc_bucket_array(ca); + struct bucket *g; + + for_each_bucket(g, buckets) { + if (metadata_only && + (g->data_type == BCH_DATA_user || + g->data_type == BCH_DATA_cached || + g->data_type == BCH_DATA_parity)) + continue; + g->data_type = 0; + g->dirty_sectors = 0; + g->cached_sectors = 0; + } + } +} + +static int bch2_gc_write_reflink_key(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_s_c k, + size_t *idx) +{ + struct bch_fs *c = trans->c; const __le64 *refcount = bkey_refcount_c(k); - char buf[200]; + struct printbuf buf = PRINTBUF; + struct reflink_gc *r; int ret = 0; if (!refcount) return 0; - r = genradix_ptr(&c->reflink_gc_table, c->reflink_gc_idx++); - if (!r) - return -ENOMEM; + while ((r = genradix_ptr(&c->reflink_gc_table, *idx)) && + r->offset < k.k->p.offset) + ++*idx; if (!r || r->offset != k.k->p.offset || @@ -1380,182 +1574,168 @@ static int bch2_gc_reflink_done_initial_fn(struct bch_fs *c, struct bkey_s_c k) } if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c, + reflink_v_refcount_wrong, "reflink key has wrong refcount:\n" " %s\n" " should be %u", - (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf), + (bch2_bkey_val_to_text(&buf, c, k), buf.buf), r->refcount)) { - struct bkey_i *new; - - new = kmalloc(bkey_bytes(k.k), GFP_KERNEL); - if (!new) { - ret = -ENOMEM; - goto fsck_err; - } + struct bkey_i *new = bch2_bkey_make_mut(trans, iter, &k, 0); - bkey_reassemble(new, k); + ret = PTR_ERR_OR_ZERO(new); + if (ret) + return ret; - if (!r->refcount) { + if (!r->refcount) new->k.type = KEY_TYPE_deleted; - new->k.size = 0; - } else { + else *bkey_refcount(new) = cpu_to_le64(r->refcount); - } - - ret = bch2_journal_key_insert(c, BTREE_ID_reflink, 0, new); - if (ret) - kfree(new); } fsck_err: + printbuf_exit(&buf); return ret; } -static int bch2_gc_reflink_done(struct bch_fs *c, bool initial, - bool metadata_only) +static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only) { - struct btree_trans trans; - struct btree_iter *iter; - struct bkey_s_c k; - struct reflink_gc *r; size_t idx = 0; - char buf[200]; - int ret = 0; if (metadata_only) return 0; - if (initial) { - c->reflink_gc_idx = 0; + int ret = bch2_trans_run(c, + for_each_btree_key_commit(trans, iter, + BTREE_ID_reflink, POS_MIN, + BTREE_ITER_PREFETCH, k, + NULL, NULL, BCH_TRANS_COMMIT_no_enospc, + bch2_gc_write_reflink_key(trans, &iter, k, &idx))); + c->reflink_gc_nr = 0; + return ret; +} - ret = bch2_btree_and_journal_walk(c, BTREE_ID_reflink, - bch2_gc_reflink_done_initial_fn); - goto out; - } +static int bch2_gc_reflink_start(struct bch_fs *c, + bool metadata_only) +{ - bch2_trans_init(&trans, c, 0, 0); + if (metadata_only) + return 0; - for_each_btree_key(&trans, iter, BTREE_ID_reflink, POS_MIN, - BTREE_ITER_PREFETCH, k, ret) { - const __le64 *refcount = bkey_refcount_c(k); + c->reflink_gc_nr = 0; - if (!refcount) - continue; + int ret = bch2_trans_run(c, + for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN, + BTREE_ITER_PREFETCH, k, ({ + const __le64 *refcount = bkey_refcount_c(k); - r = genradix_ptr(&c->reflink_gc_table, idx); - if (!r || - r->offset != k.k->p.offset || - r->size != k.k->size) { - bch_err(c, "unexpected inconsistency walking reflink table at gc finish"); - ret = -EINVAL; - break; - } + if (!refcount) + continue; - if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c, - "reflink key has wrong refcount:\n" - " %s\n" - " should be %u", - (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf), - r->refcount)) { - struct bkey_i *new; - - new = kmalloc(bkey_bytes(k.k), GFP_KERNEL); - if (!new) { - ret = -ENOMEM; + struct reflink_gc *r = genradix_ptr_alloc(&c->reflink_gc_table, + c->reflink_gc_nr++, GFP_KERNEL); + if (!r) { + ret = -BCH_ERR_ENOMEM_gc_reflink_start; break; } - bkey_reassemble(new, k); - - if (!r->refcount) - new->k.type = KEY_TYPE_deleted; - else - *bkey_refcount(new) = cpu_to_le64(r->refcount); + r->offset = k.k->p.offset; + r->size = k.k->size; + r->refcount = 0; + 0; + }))); - ret = __bch2_trans_do(&trans, NULL, NULL, 0, - __bch2_btree_insert(&trans, BTREE_ID_reflink, new)); - kfree(new); - - if (ret) - break; - } - } -fsck_err: - bch2_trans_iter_put(&trans, iter); - bch2_trans_exit(&trans); -out: - genradix_free(&c->reflink_gc_table); - c->reflink_gc_nr = 0; + bch_err_fn(c, ret); return ret; } -static int bch2_gc_reflink_start_initial_fn(struct bch_fs *c, struct bkey_s_c k) +static void bch2_gc_reflink_reset(struct bch_fs *c, bool metadata_only) { - + struct genradix_iter iter; struct reflink_gc *r; - const __le64 *refcount = bkey_refcount_c(k); - if (!refcount) - return 0; - - r = genradix_ptr_alloc(&c->reflink_gc_table, c->reflink_gc_nr++, - GFP_KERNEL); - if (!r) - return -ENOMEM; - - r->offset = k.k->p.offset; - r->size = k.k->size; - r->refcount = 0; - return 0; + genradix_for_each(&c->reflink_gc_table, iter, r) + r->refcount = 0; } -static int bch2_gc_reflink_start(struct bch_fs *c, bool initial, - bool metadata_only) +static int bch2_gc_write_stripes_key(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_s_c k) { - struct btree_trans trans; - struct btree_iter *iter; - struct bkey_s_c k; - struct reflink_gc *r; - int ret; + struct bch_fs *c = trans->c; + struct printbuf buf = PRINTBUF; + const struct bch_stripe *s; + struct gc_stripe *m; + bool bad = false; + unsigned i; + int ret = 0; - if (metadata_only) + if (k.k->type != KEY_TYPE_stripe) return 0; - genradix_free(&c->reflink_gc_table); - c->reflink_gc_nr = 0; + s = bkey_s_c_to_stripe(k).v; + m = genradix_ptr(&c->gc_stripes, k.k->p.offset); - if (initial) - return bch2_btree_and_journal_walk(c, BTREE_ID_reflink, - bch2_gc_reflink_start_initial_fn); + for (i = 0; i < s->nr_blocks; i++) { + u32 old = stripe_blockcount_get(s, i); + u32 new = (m ? m->block_sectors[i] : 0); - bch2_trans_init(&trans, c, 0, 0); + if (old != new) { + prt_printf(&buf, "stripe block %u has wrong sector count: got %u, should be %u\n", + i, old, new); + bad = true; + } + } - for_each_btree_key(&trans, iter, BTREE_ID_reflink, POS_MIN, - BTREE_ITER_PREFETCH, k, ret) { - const __le64 *refcount = bkey_refcount_c(k); + if (bad) + bch2_bkey_val_to_text(&buf, c, k); - if (!refcount) - continue; + if (fsck_err_on(bad, c, stripe_sector_count_wrong, + "%s", buf.buf)) { + struct bkey_i_stripe *new; - r = genradix_ptr_alloc(&c->reflink_gc_table, c->reflink_gc_nr++, - GFP_KERNEL); - if (!r) { - ret = -ENOMEM; - break; - } + new = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); + ret = PTR_ERR_OR_ZERO(new); + if (ret) + return ret; + + bkey_reassemble(&new->k_i, k); - r->offset = k.k->p.offset; - r->size = k.k->size; - r->refcount = 0; + for (i = 0; i < new->v.nr_blocks; i++) + stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0); + + ret = bch2_trans_update(trans, iter, &new->k_i, 0); } - bch2_trans_iter_put(&trans, iter); +fsck_err: + printbuf_exit(&buf); + return ret; +} - bch2_trans_exit(&trans); - return 0; +static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only) +{ + if (metadata_only) + return 0; + + return bch2_trans_run(c, + for_each_btree_key_commit(trans, iter, + BTREE_ID_stripes, POS_MIN, + BTREE_ITER_PREFETCH, k, + NULL, NULL, BCH_TRANS_COMMIT_no_enospc, + bch2_gc_write_stripes_key(trans, &iter, k))); +} + +static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only) +{ + genradix_free(&c->gc_stripes); } /** * bch2_gc - walk _all_ references to buckets, and recompute them: * + * @c: filesystem object + * @initial: are we in recovery? + * @metadata_only: are we just checking metadata references, or everything? + * + * Returns: 0 on success, or standard errcode on failure + * * Order matters here: * - Concurrent GC relies on the fact that we have a total ordering for * everything that GC walks - see gc_will_visit_node(), @@ -1573,51 +1753,27 @@ static int bch2_gc_reflink_start(struct bch_fs *c, bool initial, */ int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only) { - struct bch_dev *ca; - u64 start_time = local_clock(); - unsigned i, iter = 0; + unsigned iter = 0; int ret; lockdep_assert_held(&c->state_lock); - trace_gc_start(c); down_write(&c->gc_lock); - /* flush interior btree updates: */ - closure_wait_event(&c->btree_interior_update_wait, - !bch2_btree_interior_updates_nr_pending(c)); -again: - ret = bch2_gc_start(c, metadata_only) ?: - bch2_gc_reflink_start(c, initial, metadata_only); + bch2_btree_interior_updates_flush(c); + + ret = bch2_gc_start(c) ?: + bch2_gc_alloc_start(c, metadata_only) ?: + bch2_gc_reflink_start(c, metadata_only); if (ret) goto out; +again: + gc_pos_set(c, gc_phase(GC_PHASE_START)); bch2_mark_superblocks(c); - if (test_bit(BCH_FS_TOPOLOGY_ERROR, &c->flags) && - !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags) && - c->opts.fix_errors != FSCK_OPT_NO) { - bch_info(c, "starting topology repair pass"); - ret = bch2_repair_topology(c); - if (ret) - goto out; - bch_info(c, "topology repair pass done"); - - set_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags); - } - ret = bch2_gc_btrees(c, initial, metadata_only); - if (ret == FSCK_ERR_START_TOPOLOGY_REPAIR && - !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags) && - !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) { - set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags); - ret = 0; - } - - if (ret == FSCK_ERR_START_TOPOLOGY_REPAIR) - ret = FSCK_ERR_EXIT; - if (ret) goto out; @@ -1626,41 +1782,45 @@ again: #endif c->gc_count++; - if (test_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags) || + if (test_bit(BCH_FS_need_another_gc, &c->flags) || (!iter && bch2_test_restart_gc)) { + if (iter++ > 2) { + bch_info(c, "Unable to fix bucket gens, looping"); + ret = -EINVAL; + goto out; + } + /* * XXX: make sure gens we fixed got saved */ - if (iter++ <= 2) { - bch_info(c, "Second GC pass needed, restarting:"); - clear_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags); - __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING)); - - percpu_down_write(&c->mark_lock); - bch2_gc_free(c); - percpu_up_write(&c->mark_lock); - /* flush fsck errors, reset counters */ - bch2_flush_fsck_errs(c); - - goto again; - } + bch_info(c, "Second GC pass needed, restarting:"); + clear_bit(BCH_FS_need_another_gc, &c->flags); + __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING)); + + bch2_gc_stripes_reset(c, metadata_only); + bch2_gc_alloc_reset(c, metadata_only); + bch2_gc_reflink_reset(c, metadata_only); + ret = bch2_gc_reset(c); + if (ret) + goto out; - bch_info(c, "Unable to fix bucket gens, looping"); - ret = -EINVAL; + /* flush fsck errors, reset counters */ + bch2_flush_fsck_errs(c); + goto again; } out: if (!ret) { bch2_journal_block(&c->journal); - percpu_down_write(&c->mark_lock); - ret = bch2_gc_reflink_done(c, initial, metadata_only) ?: + ret = bch2_gc_stripes_done(c, metadata_only) ?: + bch2_gc_reflink_done(c, metadata_only) ?: + bch2_gc_alloc_done(c, metadata_only) ?: bch2_gc_done(c, initial, metadata_only); bch2_journal_unblock(&c->journal); - } else { - percpu_down_write(&c->mark_lock); } + percpu_down_write(&c->mark_lock); /* Indicates that gc is no longer in progress: */ __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING)); @@ -1669,108 +1829,79 @@ out: up_write(&c->gc_lock); - trace_gc_end(c); - bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time); - - /* - * Wake up allocator in case it was waiting for buckets - * because of not being able to inc gens - */ - for_each_member_device(ca, c, i) - bch2_wake_allocator(ca); - /* * At startup, allocations can happen directly instead of via the * allocator thread - issue wakeup in case they blocked on gc_lock: */ closure_wake_up(&c->freelist_wait); + bch_err_fn(c, ret); return ret; } -static bool gc_btree_gens_key(struct bch_fs *c, struct bkey_s_c k) +static int gc_btree_gens_key(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_s_c k) { + struct bch_fs *c = trans->c; struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); - const struct bch_extent_ptr *ptr; + struct bkey_i *u; + int ret; percpu_down_read(&c->mark_lock); bkey_for_each_ptr(ptrs, ptr) { struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); - struct bucket *g = PTR_BUCKET(ca, ptr, false); - if (gen_after(g->mark.gen, ptr->gen) > 16) { + if (ptr_stale(ca, ptr) > 16) { percpu_up_read(&c->mark_lock); - return true; + goto update; } } bkey_for_each_ptr(ptrs, ptr) { struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); - struct bucket *g = PTR_BUCKET(ca, ptr, false); + u8 *gen = &ca->oldest_gen[PTR_BUCKET_NR(ca, ptr)]; - if (gen_after(g->gc_gen, ptr->gen)) - g->gc_gen = ptr->gen; + if (gen_after(*gen, ptr->gen)) + *gen = ptr->gen; } percpu_up_read(&c->mark_lock); + return 0; +update: + u = bch2_bkey_make_mut(trans, iter, &k, 0); + ret = PTR_ERR_OR_ZERO(u); + if (ret) + return ret; - return false; + bch2_extent_normalize(c, bkey_i_to_s(u)); + return 0; } -/* - * For recalculating oldest gen, we only need to walk keys in leaf nodes; btree - * node pointers currently never have cached pointers that can become stale: - */ -static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id) +static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_iter *iter, + struct bkey_s_c k) { - struct btree_trans trans; - struct btree_iter *iter; - struct bkey_s_c k; - struct bkey_buf sk; - int ret = 0, commit_err = 0; - - bch2_bkey_buf_init(&sk); - bch2_trans_init(&trans, c, 0, 0); - - iter = bch2_trans_get_iter(&trans, btree_id, POS_MIN, - BTREE_ITER_PREFETCH| - BTREE_ITER_NOT_EXTENTS| - BTREE_ITER_ALL_SNAPSHOTS); - - while ((k = bch2_btree_iter_peek(iter)).k && - !(ret = bkey_err(k))) { - c->gc_gens_pos = iter->pos; - - if (gc_btree_gens_key(c, k) && !commit_err) { - bch2_bkey_buf_reassemble(&sk, c, k); - bch2_extent_normalize(c, bkey_i_to_s(sk.k)); - + struct bch_dev *ca = bch_dev_bkey_exists(trans->c, iter->pos.inode); + struct bch_alloc_v4 a_convert; + const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert); + struct bkey_i_alloc_v4 *a_mut; + int ret; - commit_err = - bch2_trans_update(&trans, iter, sk.k, 0) ?: - bch2_trans_commit(&trans, NULL, NULL, - BTREE_INSERT_NOWAIT| - BTREE_INSERT_NOFAIL); - if (commit_err == -EINTR) { - commit_err = 0; - continue; - } - } + if (a->oldest_gen == ca->oldest_gen[iter->pos.offset]) + return 0; - bch2_btree_iter_advance(iter); - } - bch2_trans_iter_put(&trans, iter); + a_mut = bch2_alloc_to_v4_mut(trans, k); + ret = PTR_ERR_OR_ZERO(a_mut); + if (ret) + return ret; - bch2_trans_exit(&trans); - bch2_bkey_buf_exit(&sk, c); + a_mut->v.oldest_gen = ca->oldest_gen[iter->pos.offset]; + a_mut->v.data_type = alloc_data_type(a_mut->v, a_mut->v.data_type); - return ret; + return bch2_trans_update(trans, iter, &a_mut->k_i, 0); } int bch2_gc_gens(struct bch_fs *c) { - struct bch_dev *ca; - struct bucket_array *buckets; - struct bucket *g; - unsigned i; + u64 b, start_time = local_clock(); int ret; /* @@ -1778,43 +1909,74 @@ int bch2_gc_gens(struct bch_fs *c) * introduces a deadlock in the RO path - we currently take the state * lock at the start of going RO, thus the gc thread may get stuck: */ + if (!mutex_trylock(&c->gc_gens_lock)) + return 0; + + trace_and_count(c, gc_gens_start, c); down_read(&c->gc_lock); - for_each_member_device(ca, c, i) { - down_read(&ca->bucket_lock); - buckets = bucket_array(ca); + for_each_member_device(c, ca) { + struct bucket_gens *gens = bucket_gens(ca); + + BUG_ON(ca->oldest_gen); - for_each_bucket(g, buckets) - g->gc_gen = g->mark.gen; - up_read(&ca->bucket_lock); + ca->oldest_gen = kvmalloc(gens->nbuckets, GFP_KERNEL); + if (!ca->oldest_gen) { + percpu_ref_put(&ca->ref); + ret = -BCH_ERR_ENOMEM_gc_gens; + goto err; + } + + for (b = gens->first_bucket; + b < gens->nbuckets; b++) + ca->oldest_gen[b] = gens->b[b]; } - for (i = 0; i < BTREE_ID_NR; i++) - if ((1 << i) & BTREE_ID_HAS_PTRS) { + for (unsigned i = 0; i < BTREE_ID_NR; i++) + if (btree_type_has_ptrs(i)) { c->gc_gens_btree = i; c->gc_gens_pos = POS_MIN; - ret = bch2_gc_btree_gens(c, i); - if (ret) { - bch_err(c, "error recalculating oldest_gen: %i", ret); + + ret = bch2_trans_run(c, + for_each_btree_key_commit(trans, iter, i, + POS_MIN, + BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, + k, + NULL, NULL, + BCH_TRANS_COMMIT_no_enospc, + gc_btree_gens_key(trans, &iter, k))); + if (ret) goto err; - } } - for_each_member_device(ca, c, i) { - down_read(&ca->bucket_lock); - buckets = bucket_array(ca); - - for_each_bucket(g, buckets) - g->oldest_gen = g->gc_gen; - up_read(&ca->bucket_lock); - } + ret = bch2_trans_run(c, + for_each_btree_key_commit(trans, iter, BTREE_ID_alloc, + POS_MIN, + BTREE_ITER_PREFETCH, + k, + NULL, NULL, + BCH_TRANS_COMMIT_no_enospc, + bch2_alloc_write_oldest_gen(trans, &iter, k))); + if (ret) + goto err; c->gc_gens_btree = 0; c->gc_gens_pos = POS_MIN; c->gc_count++; + + bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time); + trace_and_count(c, gc_gens_end, c); err: + for_each_member_device(c, ca) { + kvfree(ca->oldest_gen); + ca->oldest_gen = NULL; + } + up_read(&c->gc_lock); + mutex_unlock(&c->gc_gens_lock); + if (!bch2_err_matches(ret, EROFS)) + bch_err_fn(c, ret); return ret; } @@ -1824,7 +1986,6 @@ static int bch2_gc_thread(void *arg) struct io_clock *clock = &c->io_clock[WRITE]; unsigned long last = atomic64_read(&clock->now); unsigned last_kick = atomic_read(&c->kick_gc); - int ret; set_freezable(); @@ -1864,11 +2025,8 @@ static int bch2_gc_thread(void *arg) #if 0 ret = bch2_gc(c, false, false); #else - ret = bch2_gc_gens(c); + bch2_gc_gens(c); #endif - if (ret < 0) - bch_err(c, "btree gc failed: %i", ret); - debug_check_no_locks_held(); } @@ -1897,7 +2055,7 @@ int bch2_gc_thread_start(struct bch_fs *c) p = kthread_create(bch2_gc_thread, c, "bch-gc/%s", c->name); if (IS_ERR(p)) { - bch_err(c, "error creating gc thread: %li", PTR_ERR(p)); + bch_err_fn(c, PTR_ERR(p)); return PTR_ERR(p); }