X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_gc.c;h=6c52f116098f7d24723771d79e5374880d743a56;hb=e773e86495534ede8efc0f3059cf0939c374a925;hp=ae880661fddbae0aede6623f231dea358ec1cecc;hpb=b6740e5392a0e0a5fddc5ad4ffac0567078e114a;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_gc.c b/libbcachefs/btree_gc.c index ae88066..6c52f11 100644 --- a/libbcachefs/btree_gc.c +++ b/libbcachefs/btree_gc.c @@ -41,6 +41,14 @@ #define DROP_THIS_NODE 10 #define DROP_PREV_NODE 11 +static struct bkey_s unsafe_bkey_s_c_to_s(struct bkey_s_c k) +{ + return (struct bkey_s) {{{ + (struct bkey *) k.k, + (struct bch_val *) k.v + }}}; +} + static bool should_restart_for_topology_repair(struct bch_fs *c) { return c->opts.fix_errors != FSCK_FIX_no && @@ -381,7 +389,8 @@ again: have_child = dropped_children = false; bch2_bkey_buf_init(&prev_k); bch2_bkey_buf_init(&cur_k); - bch2_btree_and_journal_iter_init_node_iter(&iter, c, b); + bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b); + iter.prefetch = true; while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { BUG_ON(bpos_lt(k.k->p, b->data->min_key)); @@ -398,7 +407,7 @@ again: printbuf_reset(&buf); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k)); - if (mustfix_fsck_err_on(ret == -EIO, c, + if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO), c, btree_node_unreadable, "Topology repair: unreadable btree node at btree %s level %u:\n" " %s", @@ -414,10 +423,9 @@ again: continue; } - if (ret) { - bch_err_msg(c, ret, "getting btree node"); + bch_err_msg(c, ret, "getting btree node"); + if (ret) break; - } ret = btree_repair_node_boundaries(c, b, prev, cur); @@ -471,7 +479,8 @@ again: goto err; bch2_btree_and_journal_iter_exit(&iter); - bch2_btree_and_journal_iter_init_node_iter(&iter, c, b); + bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b); + iter.prefetch = true; while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { bch2_bkey_buf_reassemble(&cur_k, c, k); @@ -482,10 +491,9 @@ again: false); ret = PTR_ERR_OR_ZERO(cur); - if (ret) { - bch_err_msg(c, ret, "getting btree node"); + bch_err_msg(c, ret, "getting btree node"); + if (ret) goto err; - } ret = bch2_btree_repair_topology_recurse(trans, cur); six_unlock_read(&cur->c.lock); @@ -591,7 +599,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n" "while marking %s", p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), - bch2_data_types[ptr_data_type(k->k, &p.ptr)], + bch2_data_type_str(ptr_data_type(k->k, &p.ptr)), p.ptr.gen, (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))) { @@ -609,7 +617,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n" "while marking %s", p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), - bch2_data_types[ptr_data_type(k->k, &p.ptr)], + bch2_data_type_str(ptr_data_type(k->k, &p.ptr)), p.ptr.gen, g->gen, (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))) { @@ -631,7 +639,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n" "while marking %s", p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen, - bch2_data_types[ptr_data_type(k->k, &p.ptr)], + bch2_data_type_str(ptr_data_type(k->k, &p.ptr)), p.ptr.gen, (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))) @@ -643,7 +651,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n" "while marking %s", p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), - bch2_data_types[ptr_data_type(k->k, &p.ptr)], + bch2_data_type_str(ptr_data_type(k->k, &p.ptr)), p.ptr.gen, g->gen, (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))) @@ -658,8 +666,8 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id "bucket %u:%zu different types of data in same bucket: %s, %s\n" "while marking %s", p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), - bch2_data_types[g->data_type], - bch2_data_types[data_type], + bch2_data_type_str(g->data_type), + bch2_data_type_str(data_type), (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) { if (data_type == BCH_DATA_btree) { @@ -707,8 +715,8 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id new = kmalloc(bkey_bytes(k->k), GFP_KERNEL); if (!new) { - bch_err_msg(c, ret, "allocating new key"); ret = -BCH_ERR_ENOMEM_gc_repair_key; + bch_err_msg(c, ret, "allocating new key"); goto err; } @@ -807,9 +815,6 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id, struct bch_fs *c = trans->c; struct bkey deleted = KEY(0, 0, 0); struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL }; - unsigned flags = - BTREE_TRIGGER_GC| - (initial ? BTREE_TRIGGER_NOATOMIC : 0); int ret = 0; deleted.p = k->k->p; @@ -831,11 +836,10 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id, } ret = commit_do(trans, NULL, NULL, 0, - bch2_mark_key(trans, btree_id, level, old, *k, flags)); + bch2_key_trigger(trans, btree_id, level, old, unsafe_bkey_s_c_to_s(*k), BTREE_TRIGGER_GC)); fsck_err: err: - if (ret) - bch_err_fn(c, ret); + bch_err_fn(c, ret); return ret; } @@ -929,7 +933,7 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b struct printbuf buf = PRINTBUF; int ret = 0; - bch2_btree_and_journal_iter_init_node_iter(&iter, c, b); + bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b); bch2_bkey_buf_init(&prev); bch2_bkey_buf_init(&cur); bkey_init(&prev.k->k); @@ -961,7 +965,8 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b if (b->c.level > target_depth) { bch2_btree_and_journal_iter_exit(&iter); - bch2_btree_and_journal_iter_init_node_iter(&iter, c, b); + bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b); + iter.prefetch = true; while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { struct btree *child; @@ -974,7 +979,7 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b false); ret = PTR_ERR_OR_ZERO(child); - if (ret == -EIO) { + if (bch2_err_matches(ret, EIO)) { bch2_topology_error(c); if (__fsck_err(c, @@ -1068,8 +1073,7 @@ static int bch2_gc_btree_init(struct btree_trans *trans, fsck_err: six_unlock_read(&b->c.lock); - if (ret < 0) - bch_err_fn(c, ret); + bch_err_fn(c, ret); printbuf_exit(&buf); return ret; } @@ -1105,10 +1109,8 @@ static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only) : bch2_gc_btree(trans, i, initial, metadata_only); } - if (ret < 0) - bch_err_fn(c, ret); - bch2_trans_put(trans); + bch_err_fn(c, ret); return ret; } @@ -1159,13 +1161,10 @@ static void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca, static void bch2_mark_superblocks(struct bch_fs *c) { - struct bch_dev *ca; - unsigned i; - mutex_lock(&c->sb_lock); gc_pos_set(c, gc_phase(GC_PHASE_SB)); - for_each_online_member(ca, c, i) + for_each_online_member(c, ca) bch2_mark_dev_superblock(c, ca, BTREE_TRIGGER_GC); mutex_unlock(&c->sb_lock); } @@ -1190,16 +1189,11 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c) static void bch2_gc_free(struct bch_fs *c) { - struct bch_dev *ca; - unsigned i; - genradix_free(&c->reflink_gc_table); genradix_free(&c->gc_stripes); - for_each_member_device(ca, c, i) { - kvpfree(rcu_dereference_protected(ca->buckets_gc, 1), - sizeof(struct bucket_array) + - ca->mi.nbuckets * sizeof(struct bucket)); + for_each_member_device(c, ca) { + kvfree(rcu_dereference_protected(ca->buckets_gc, 1)); ca->buckets_gc = NULL; free_percpu(ca->usage_gc); @@ -1218,7 +1212,7 @@ static int bch2_gc_done(struct bch_fs *c, bool verify = !metadata_only && !c->opts.reconstruct_alloc && (!initial || (c->sb.compat & (1ULL << BCH_COMPAT_alloc_info))); - unsigned i, dev; + unsigned i; int ret = 0; percpu_down_write(&c->mark_lock); @@ -1230,14 +1224,14 @@ static int bch2_gc_done(struct bch_fs *c, , ##__VA_ARGS__, dst->_f, src->_f))) \ dst->_f = src->_f #define copy_dev_field(_err, _f, _msg, ...) \ - copy_field(_err, _f, "dev %u has wrong " _msg, dev, ##__VA_ARGS__) + copy_field(_err, _f, "dev %u has wrong " _msg, ca->dev_idx, ##__VA_ARGS__) #define copy_fs_field(_err, _f, _msg, ...) \ copy_field(_err, _f, "fs has wrong " _msg, ##__VA_ARGS__) for (i = 0; i < ARRAY_SIZE(c->usage); i++) bch2_fs_usage_acc_to_base(c, i); - for_each_member_device(ca, c, dev) { + __for_each_member_device(c, ca) { struct bch_dev_usage *dst = ca->usage_base; struct bch_dev_usage *src = (void *) bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc, @@ -1245,11 +1239,11 @@ static int bch2_gc_done(struct bch_fs *c, for (i = 0; i < BCH_DATA_NR; i++) { copy_dev_field(dev_usage_buckets_wrong, - d[i].buckets, "%s buckets", bch2_data_types[i]); + d[i].buckets, "%s buckets", bch2_data_type_str(i)); copy_dev_field(dev_usage_sectors_wrong, - d[i].sectors, "%s sectors", bch2_data_types[i]); + d[i].sectors, "%s sectors", bch2_data_type_str(i)); copy_dev_field(dev_usage_fragmented_wrong, - d[i].fragmented, "%s fragmented", bch2_data_types[i]); + d[i].fragmented, "%s fragmented", bch2_data_type_str(i)); } } @@ -1260,19 +1254,19 @@ static int bch2_gc_done(struct bch_fs *c, bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr); copy_fs_field(fs_usage_hidden_wrong, - hidden, "hidden"); + b.hidden, "hidden"); copy_fs_field(fs_usage_btree_wrong, - btree, "btree"); + b.btree, "btree"); if (!metadata_only) { copy_fs_field(fs_usage_data_wrong, - data, "data"); + b.data, "data"); copy_fs_field(fs_usage_cached_wrong, - cached, "cached"); + b.cached, "cached"); copy_fs_field(fs_usage_reserved_wrong, - reserved, "reserved"); + b.reserved, "reserved"); copy_fs_field(fs_usage_nr_inodes_wrong, - nr_inodes,"nr_inodes"); + b.nr_inodes,"nr_inodes"); for (i = 0; i < BCH_REPLICAS_MAX; i++) copy_fs_field(fs_usage_persistent_reserved_wrong, @@ -1304,8 +1298,7 @@ static int bch2_gc_done(struct bch_fs *c, fsck_err: if (ca) percpu_ref_put(&ca->ref); - if (ret) - bch_err_fn(c, ret); + bch_err_fn(c, ret); percpu_up_write(&c->mark_lock); printbuf_exit(&buf); @@ -1314,9 +1307,6 @@ fsck_err: static int bch2_gc_start(struct bch_fs *c) { - struct bch_dev *ca = NULL; - unsigned i; - BUG_ON(c->usage_gc); c->usage_gc = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64), @@ -1326,7 +1316,7 @@ static int bch2_gc_start(struct bch_fs *c) return -BCH_ERR_ENOMEM_gc_start; } - for_each_member_device(ca, c, i) { + for_each_member_device(c, ca) { BUG_ON(ca->usage_gc); ca->usage_gc = alloc_percpu(struct bch_dev_usage); @@ -1345,10 +1335,7 @@ static int bch2_gc_start(struct bch_fs *c) static int bch2_gc_reset(struct bch_fs *c) { - struct bch_dev *ca; - unsigned i; - - for_each_member_device(ca, c, i) { + for_each_member_device(c, ca) { free_percpu(ca->usage_gc); ca->usage_gc = NULL; } @@ -1386,9 +1373,6 @@ static int bch2_alloc_write_key(struct btree_trans *trans, enum bch_data_type type; int ret; - if (bkey_ge(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets))) - return 1; - old = bch2_alloc_to_v4(k, &old_convert); new = *old; @@ -1434,8 +1418,8 @@ static int bch2_alloc_write_key(struct btree_trans *trans, ": got %s, should be %s", iter->pos.inode, iter->pos.offset, gc.gen, - bch2_data_types[new.data_type], - bch2_data_types[gc.data_type])) + bch2_data_type_str(new.data_type), + bch2_data_type_str(gc.data_type))) new.data_type = gc.data_type; #define copy_bucket_field(_errtype, _f) \ @@ -1445,7 +1429,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans, ": got %u, should be %u", \ iter->pos.inode, iter->pos.offset, \ gc.gen, \ - bch2_data_types[gc.data_type], \ + bch2_data_type_str(gc.data_type), \ new._f, gc._f)) \ new._f = gc._f; \ @@ -1485,52 +1469,36 @@ fsck_err: static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only) { - struct btree_trans *trans = bch2_trans_get(c); - struct btree_iter iter; - struct bkey_s_c k; - struct bch_dev *ca; - unsigned i; int ret = 0; - for_each_member_device(ca, c, i) { - ret = for_each_btree_key_commit(trans, iter, BTREE_ID_alloc, - POS(ca->dev_idx, ca->mi.first_bucket), - BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k, - NULL, NULL, BCH_TRANS_COMMIT_lazy_rw, - bch2_alloc_write_key(trans, &iter, k, metadata_only)); - - if (ret < 0) { - bch_err_fn(c, ret); + for_each_member_device(c, ca) { + ret = bch2_trans_run(c, + for_each_btree_key_upto_commit(trans, iter, BTREE_ID_alloc, + POS(ca->dev_idx, ca->mi.first_bucket), + POS(ca->dev_idx, ca->mi.nbuckets - 1), + BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k, + NULL, NULL, BCH_TRANS_COMMIT_lazy_rw, + bch2_alloc_write_key(trans, &iter, k, metadata_only))); + if (ret) { percpu_ref_put(&ca->ref); break; } } - bch2_trans_put(trans); - return ret < 0 ? ret : 0; + bch_err_fn(c, ret); + return ret; } static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only) { - struct bch_dev *ca; - struct btree_trans *trans = bch2_trans_get(c); - struct btree_iter iter; - struct bkey_s_c k; - struct bucket *g; - struct bch_alloc_v4 a_convert; - const struct bch_alloc_v4 *a; - unsigned i; - int ret; - - for_each_member_device(ca, c, i) { - struct bucket_array *buckets = kvpmalloc(sizeof(struct bucket_array) + + for_each_member_device(c, ca) { + struct bucket_array *buckets = kvmalloc(sizeof(struct bucket_array) + ca->mi.nbuckets * sizeof(struct bucket), GFP_KERNEL|__GFP_ZERO); if (!buckets) { percpu_ref_put(&ca->ref); bch_err(c, "error allocating ca->buckets[gc]"); - ret = -BCH_ERR_ENOMEM_gc_alloc_start; - goto err; + return -BCH_ERR_ENOMEM_gc_alloc_start; } buckets->first_bucket = ca->mi.first_bucket; @@ -1538,42 +1506,38 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only) rcu_assign_pointer(ca->buckets_gc, buckets); } - ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN, - BTREE_ITER_PREFETCH, k, ({ - ca = bch_dev_bkey_exists(c, k.k->p.inode); - g = gc_bucket(ca, k.k->p.offset); - - a = bch2_alloc_to_v4(k, &a_convert); - - g->gen_valid = 1; - g->gen = a->gen; - - if (metadata_only && - (a->data_type == BCH_DATA_user || - a->data_type == BCH_DATA_cached || - a->data_type == BCH_DATA_parity)) { - g->data_type = a->data_type; - g->dirty_sectors = a->dirty_sectors; - g->cached_sectors = a->cached_sectors; - g->stripe = a->stripe; - g->stripe_redundancy = a->stripe_redundancy; - } + int ret = bch2_trans_run(c, + for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN, + BTREE_ITER_PREFETCH, k, ({ + struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode); + struct bucket *g = gc_bucket(ca, k.k->p.offset); - 0; - })); -err: - bch2_trans_put(trans); - if (ret) - bch_err_fn(c, ret); + struct bch_alloc_v4 a_convert; + const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert); + + g->gen_valid = 1; + g->gen = a->gen; + + if (metadata_only && + (a->data_type == BCH_DATA_user || + a->data_type == BCH_DATA_cached || + a->data_type == BCH_DATA_parity)) { + g->data_type = a->data_type; + g->dirty_sectors = a->dirty_sectors; + g->cached_sectors = a->cached_sectors; + g->stripe = a->stripe; + g->stripe_redundancy = a->stripe_redundancy; + } + + 0; + }))); + bch_err_fn(c, ret); return ret; } static void bch2_gc_alloc_reset(struct bch_fs *c, bool metadata_only) { - struct bch_dev *ca; - unsigned i; - - for_each_member_device(ca, c, i) { + for_each_member_device(c, ca) { struct bucket_array *buckets = gc_bucket_array(ca); struct bucket *g; @@ -1631,7 +1595,7 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans, if (!r->refcount) new->k.type = KEY_TYPE_deleted; else - *bkey_refcount(new) = cpu_to_le64(r->refcount); + *bkey_refcount(bkey_i_to_s(new)) = cpu_to_le64(r->refcount); } fsck_err: printbuf_exit(&buf); @@ -1640,42 +1604,31 @@ fsck_err: static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only) { - struct btree_trans *trans; - struct btree_iter iter; - struct bkey_s_c k; size_t idx = 0; - int ret = 0; if (metadata_only) return 0; - trans = bch2_trans_get(c); - - ret = for_each_btree_key_commit(trans, iter, - BTREE_ID_reflink, POS_MIN, - BTREE_ITER_PREFETCH, k, - NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_gc_write_reflink_key(trans, &iter, k, &idx)); - + int ret = bch2_trans_run(c, + for_each_btree_key_commit(trans, iter, + BTREE_ID_reflink, POS_MIN, + BTREE_ITER_PREFETCH, k, + NULL, NULL, BCH_TRANS_COMMIT_no_enospc, + bch2_gc_write_reflink_key(trans, &iter, k, &idx))); c->reflink_gc_nr = 0; - bch2_trans_put(trans); return ret; } static int bch2_gc_reflink_start(struct bch_fs *c, bool metadata_only) { - struct btree_iter iter; - struct bkey_s_c k; - struct reflink_gc *r; - int ret = 0; if (metadata_only) return 0; c->reflink_gc_nr = 0; - ret = bch2_trans_run(c, + int ret = bch2_trans_run(c, for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN, BTREE_ITER_PREFETCH, k, ({ const __le64 *refcount = bkey_refcount_c(k); @@ -1683,8 +1636,8 @@ static int bch2_gc_reflink_start(struct bch_fs *c, if (!refcount) continue; - r = genradix_ptr_alloc(&c->reflink_gc_table, c->reflink_gc_nr++, - GFP_KERNEL); + struct reflink_gc *r = genradix_ptr_alloc(&c->reflink_gc_table, + c->reflink_gc_nr++, GFP_KERNEL); if (!r) { ret = -BCH_ERR_ENOMEM_gc_reflink_start; break; @@ -1764,24 +1717,15 @@ fsck_err: static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only) { - struct btree_trans *trans; - struct btree_iter iter; - struct bkey_s_c k; - int ret = 0; - if (metadata_only) return 0; - trans = bch2_trans_get(c); - - ret = for_each_btree_key_commit(trans, iter, - BTREE_ID_stripes, POS_MIN, - BTREE_ITER_PREFETCH, k, - NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_gc_write_stripes_key(trans, &iter, k)); - - bch2_trans_put(trans); - return ret; + return bch2_trans_run(c, + for_each_btree_key_commit(trans, iter, + BTREE_ID_stripes, POS_MIN, + BTREE_ITER_PREFETCH, k, + NULL, NULL, BCH_TRANS_COMMIT_no_enospc, + bch2_gc_write_stripes_key(trans, &iter, k))); } static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only) @@ -1896,9 +1840,7 @@ out: * allocator thread - issue wakeup in case they blocked on gc_lock: */ closure_wake_up(&c->freelist_wait); - - if (ret) - bch_err_fn(c, ret); + bch_err_fn(c, ret); return ret; } @@ -1908,7 +1850,6 @@ static int gc_btree_gens_key(struct btree_trans *trans, { struct bch_fs *c = trans->c; struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); - const struct bch_extent_ptr *ptr; struct bkey_i *u; int ret; @@ -1966,12 +1907,7 @@ static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_i int bch2_gc_gens(struct bch_fs *c) { - struct btree_trans *trans; - struct btree_iter iter; - struct bkey_s_c k; - struct bch_dev *ca; u64 b, start_time = local_clock(); - unsigned i; int ret; /* @@ -1984,9 +1920,8 @@ int bch2_gc_gens(struct bch_fs *c) trace_and_count(c, gc_gens_start, c); down_read(&c->gc_lock); - trans = bch2_trans_get(c); - for_each_member_device(ca, c, i) { + for_each_member_device(c, ca) { struct bucket_gens *gens = bucket_gens(ca); BUG_ON(ca->oldest_gen); @@ -2003,33 +1938,31 @@ int bch2_gc_gens(struct bch_fs *c) ca->oldest_gen[b] = gens->b[b]; } - for (i = 0; i < BTREE_ID_NR; i++) + for (unsigned i = 0; i < BTREE_ID_NR; i++) if (btree_type_has_ptrs(i)) { c->gc_gens_btree = i; c->gc_gens_pos = POS_MIN; - ret = for_each_btree_key_commit(trans, iter, i, - POS_MIN, - BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, - k, - NULL, NULL, - BCH_TRANS_COMMIT_no_enospc, - gc_btree_gens_key(trans, &iter, k)); - if (ret && !bch2_err_matches(ret, EROFS)) - bch_err_fn(c, ret); + ret = bch2_trans_run(c, + for_each_btree_key_commit(trans, iter, i, + POS_MIN, + BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, + k, + NULL, NULL, + BCH_TRANS_COMMIT_no_enospc, + gc_btree_gens_key(trans, &iter, k))); if (ret) goto err; } - ret = for_each_btree_key_commit(trans, iter, BTREE_ID_alloc, - POS_MIN, - BTREE_ITER_PREFETCH, - k, - NULL, NULL, - BCH_TRANS_COMMIT_no_enospc, - bch2_alloc_write_oldest_gen(trans, &iter, k)); - if (ret && !bch2_err_matches(ret, EROFS)) - bch_err_fn(c, ret); + ret = bch2_trans_run(c, + for_each_btree_key_commit(trans, iter, BTREE_ID_alloc, + POS_MIN, + BTREE_ITER_PREFETCH, + k, + NULL, NULL, + BCH_TRANS_COMMIT_no_enospc, + bch2_alloc_write_oldest_gen(trans, &iter, k))); if (ret) goto err; @@ -2038,17 +1971,18 @@ int bch2_gc_gens(struct bch_fs *c) c->gc_count++; - bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time); + time_stats_update(&c->times[BCH_TIME_btree_gc], start_time); trace_and_count(c, gc_gens_end, c); err: - for_each_member_device(ca, c, i) { + for_each_member_device(c, ca) { kvfree(ca->oldest_gen); ca->oldest_gen = NULL; } - bch2_trans_put(trans); up_read(&c->gc_lock); mutex_unlock(&c->gc_gens_lock); + if (!bch2_err_matches(ret, EROFS)) + bch_err_fn(c, ret); return ret; } @@ -2058,7 +1992,6 @@ static int bch2_gc_thread(void *arg) struct io_clock *clock = &c->io_clock[WRITE]; unsigned long last = atomic64_read(&clock->now); unsigned last_kick = atomic_read(&c->kick_gc); - int ret; set_freezable(); @@ -2098,11 +2031,8 @@ static int bch2_gc_thread(void *arg) #if 0 ret = bch2_gc(c, false, false); #else - ret = bch2_gc_gens(c); + bch2_gc_gens(c); #endif - if (ret < 0) - bch_err_fn(c, ret); - debug_check_no_locks_held(); }