bch2_topology_error(c);
if (bkey_deleted(&prev->k->k)) {
- pr_buf(&buf1, "start of node: ");
+ prt_printf(&buf1, "start of node: ");
bch2_bpos_to_text(&buf1, node_start);
} else {
bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(prev->k));
}
bch2_btree_node_drop_keys_outside_node(b);
-
+ bkey_copy(&b->key, &new->k_i);
return 0;
}
int ret = 0;
if (!prev) {
- pr_buf(&buf1, "start of node: ");
+ prt_printf(&buf1, "start of node: ");
bch2_bpos_to_text(&buf1, b->data->min_key);
} else {
bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&prev->key));
struct bkey_buf prev_k, cur_k;
struct btree *prev = NULL, *cur = NULL;
bool have_child, dropped_children = false;
- struct printbuf buf;
+ struct printbuf buf = PRINTBUF;
int ret = 0;
if (!b->c.level)
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k));
if (mustfix_fsck_err_on(ret == -EIO, c,
- "Unreadable btree node at btree %s level %u:\n"
+ "Topology repair: unreadable btree node at btree %s level %u:\n"
" %s",
bch2_btree_ids[b->c.btree_id],
b->c.level - 1,
bch2_btree_node_evict(c, cur_k.k);
ret = bch2_journal_key_delete(c, b->c.btree_id,
b->c.level, cur_k.k->k.p);
+ cur = NULL;
if (ret)
break;
continue;
bch2_btree_node_evict(c, cur_k.k);
ret = bch2_journal_key_delete(c, b->c.btree_id,
b->c.level, cur_k.k->k.p);
+ cur = NULL;
if (ret)
break;
continue;
struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry->ptr);
- if (fsck_err_on(!g->gen_valid, c,
+ if (c->opts.reconstruct_alloc ||
+ fsck_err_on(!g->gen_valid, c,
"bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
"while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
if (!p.ptr.cached) {
- g->_mark.gen = p.ptr.gen;
g->gen_valid = true;
+ g->gen = p.ptr.gen;
} else {
do_update = true;
}
}
- if (fsck_err_on(gen_cmp(p.ptr.gen, g->mark.gen) > 0, c,
+ if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0, c,
"bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
"while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
bch2_data_types[ptr_data_type(k->k, &p.ptr)],
- p.ptr.gen, g->mark.gen,
+ p.ptr.gen, g->gen,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
if (!p.ptr.cached) {
- g->_mark.gen = p.ptr.gen;
g->gen_valid = true;
- g->_mark.data_type = 0;
- g->_mark.dirty_sectors = 0;
- g->_mark.cached_sectors = 0;
+ g->gen = p.ptr.gen;
+ g->data_type = 0;
+ g->dirty_sectors = 0;
+ g->cached_sectors = 0;
set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
} else {
do_update = true;
}
}
- if (fsck_err_on(gen_cmp(g->mark.gen, p.ptr.gen) > BUCKET_GC_GEN_MAX, c,
+ if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX, c,
"bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
"while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->mark.gen,
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
bch2_data_types[ptr_data_type(k->k, &p.ptr)],
p.ptr.gen,
(printbuf_reset(&buf),
do_update = true;
if (fsck_err_on(!p.ptr.cached &&
- gen_cmp(p.ptr.gen, g->mark.gen) < 0, c,
+ gen_cmp(p.ptr.gen, g->gen) < 0, c,
"bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
"while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
bch2_data_types[ptr_data_type(k->k, &p.ptr)],
- p.ptr.gen, g->mark.gen,
+ p.ptr.gen, g->gen,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
do_update = true;
- if (data_type != BCH_DATA_btree && p.ptr.gen != g->mark.gen)
+ if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
continue;
- if (fsck_err_on(g->mark.data_type &&
- g->mark.data_type != data_type, c,
+ if (fsck_err_on(g->data_type &&
+ g->data_type != data_type, c,
"bucket %u:%zu different types of data in same bucket: %s, %s\n"
"while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_types[g->mark.data_type],
+ bch2_data_types[g->data_type],
bch2_data_types[data_type],
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
if (data_type == BCH_DATA_btree) {
- g->_mark.data_type = data_type;
+ g->data_type = data_type;
set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
} else {
do_update = true;
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
struct bucket *g = PTR_GC_BUCKET(ca, ptr);
- ptr->gen = g->mark.gen;
+ ptr->gen = g->gen;
}
} else {
bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, ({
enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, ptr);
(ptr->cached &&
- (!g->gen_valid || gen_cmp(ptr->gen, g->mark.gen) > 0)) ||
+ (!g->gen_valid || gen_cmp(ptr->gen, g->gen) > 0)) ||
(!ptr->cached &&
- gen_cmp(ptr->gen, g->mark.gen) < 0) ||
- gen_cmp(g->mark.gen, ptr->gen) > BUCKET_GC_GEN_MAX ||
- (g->mark.data_type &&
- g->mark.data_type != data_type);
+ gen_cmp(ptr->gen, g->gen) < 0) ||
+ gen_cmp(g->gen, ptr->gen) > BUCKET_GC_GEN_MAX ||
+ (g->data_type &&
+ g->data_type != data_type);
}));
again:
ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
if (level)
bch2_btree_node_update_key_early(c, btree_id, level - 1, *k, new);
- printbuf_reset(&buf);
- bch2_bkey_val_to_text(&buf, c, *k);
- bch_info(c, "updated %s", buf.buf);
+ if (c->opts.verbose) {
+ printbuf_reset(&buf);
+ bch2_bkey_val_to_text(&buf, c, *k);
+ bch_info(c, "updated %s", buf.buf);
- printbuf_reset(&buf);
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
- bch_info(c, "new key %s", buf.buf);
+ printbuf_reset(&buf);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
+ bch_info(c, "new key %s", buf.buf);
+ }
*k = bkey_i_to_s_c(new);
}
atomic64_set(&c->key_version, k->k->version.lo);
}
- ret = __bch2_trans_do(trans, NULL, NULL, 0,
+ ret = commit_do(trans, NULL, NULL, 0,
bch2_mark_key(trans, old, *k, flags));
fsck_err:
err:
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct btree *b;
- unsigned depth = metadata_only ? 1
- : bch2_expensive_debug_checks ? 0
- : !btree_node_type_needs_gc(btree_id) ? 1
- : 0;
+ unsigned depth = metadata_only ? 1 : 0;
int ret = 0;
gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0));
{
struct bch_fs *c = trans->c;
struct btree *b;
- unsigned target_depth = metadata_only ? 1
- : bch2_expensive_debug_checks ? 0
- : !btree_node_type_needs_gc(btree_id) ? 1
- : 0;
+ unsigned target_depth = metadata_only ? 1 : 0;
struct printbuf buf = PRINTBUF;
int ret = 0;
genradix_free(&c->gc_stripes);
for_each_member_device(ca, c, i) {
- kvpfree(rcu_dereference_protected(ca->buckets[1], 1),
+ kvpfree(rcu_dereference_protected(ca->buckets_gc, 1),
sizeof(struct bucket_array) +
ca->mi.nbuckets * sizeof(struct bucket));
- ca->buckets[1] = NULL;
+ ca->buckets_gc = NULL;
free_percpu(ca->usage_gc);
ca->usage_gc = NULL;
{
struct bch_dev *ca = NULL;
struct printbuf buf = PRINTBUF;
- bool verify = !metadata_only && (!initial ||
- (c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)));
+ bool verify = !metadata_only &&
+ !c->opts.reconstruct_alloc &&
+ (!initial || (c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)));
unsigned i, dev;
int ret = 0;
percpu_down_write(&c->mark_lock);
#define copy_field(_f, _msg, ...) \
- if (dst->_f != src->_f) { \
- if (verify) \
- fsck_err(c, _msg ": got %llu, should be %llu" \
- , ##__VA_ARGS__, dst->_f, src->_f); \
- dst->_f = src->_f; \
- }
+ if (dst->_f != src->_f && \
+ (!verify || \
+ fsck_err(c, _msg ": got %llu, should be %llu" \
+ , ##__VA_ARGS__, dst->_f, src->_f))) \
+ dst->_f = src->_f
#define copy_stripe_field(_f, _msg, ...) \
- if (dst->_f != src->_f) { \
- if (verify) \
- fsck_err(c, "stripe %zu has wrong "_msg \
- ": got %u, should be %u", \
- iter.pos, ##__VA_ARGS__, \
- dst->_f, src->_f); \
- dst->_f = src->_f; \
- }
+ if (dst->_f != src->_f && \
+ (!verify || \
+ fsck_err(c, "stripe %zu has wrong "_msg \
+ ": got %u, should be %u", \
+ iter.pos, ##__VA_ARGS__, \
+ dst->_f, src->_f))) \
+ dst->_f = src->_f
#define copy_dev_field(_f, _msg, ...) \
copy_field(_f, "dev %u has wrong " _msg, dev, ##__VA_ARGS__)
#define copy_fs_field(_f, _msg, ...) \
dev_usage_u64s());
copy_dev_field(buckets_ec, "buckets_ec");
- copy_dev_field(buckets_unavailable, "buckets_unavailable");
for (i = 0; i < BCH_DATA_NR; i++) {
copy_dev_field(d[i].buckets, "%s buckets", bch2_data_types[i]);
}
for_each_member_device(ca, c, i) {
- BUG_ON(ca->buckets[1]);
+ BUG_ON(ca->buckets_gc);
BUG_ON(ca->usage_gc);
ca->usage_gc = alloc_percpu(struct bch_dev_usage);
percpu_ref_put(&ca->ref);
return -ENOMEM;
}
+
+ this_cpu_write(ca->usage_gc->d[BCH_DATA_free].buckets,
+ ca->mi.nbuckets - ca->mi.first_bucket);
}
return 0;
}
+/* returns true if not equal */
+static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l,
+ struct bch_alloc_v4 r)
+{
+ return l.gen != r.gen ||
+ l.oldest_gen != r.oldest_gen ||
+ l.data_type != r.data_type ||
+ l.dirty_sectors != r.dirty_sectors ||
+ l.cached_sectors != r.cached_sectors ||
+ l.stripe_redundancy != r.stripe_redundancy ||
+ l.stripe != r.stripe;
+}
+
static int bch2_alloc_write_key(struct btree_trans *trans,
struct btree_iter *iter,
bool metadata_only)
{
struct bch_fs *c = trans->c;
struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode);
- struct bucket *g;
+ struct bucket gc, *b;
struct bkey_s_c k;
- struct bkey_alloc_unpacked old_u, new_u, gc_u;
- struct bkey_alloc_buf *a;
+ struct bkey_i_alloc_v4 *a;
+ struct bch_alloc_v4 old, new;
+ enum bch_data_type type;
int ret;
k = bch2_btree_iter_peek_slot(iter);
if (ret)
return ret;
- old_u = new_u = bch2_alloc_unpack(k);
+ bch2_alloc_to_v4(k, &old);
+ new = old;
percpu_down_read(&c->mark_lock);
- g = gc_bucket(ca, iter->pos.offset);
- gc_u = (struct bkey_alloc_unpacked) {
- .dev = iter->pos.inode,
- .bucket = iter->pos.offset,
- .gen = g->mark.gen,
- .data_type = g->mark.data_type,
- .dirty_sectors = g->mark.dirty_sectors,
- .cached_sectors = g->mark.cached_sectors,
- .read_time = g->io_time[READ],
- .write_time = g->io_time[WRITE],
- .stripe = g->stripe,
- .stripe_redundancy = g->stripe_redundancy,
- };
+ b = gc_bucket(ca, iter->pos.offset);
+
+ /*
+ * b->data_type doesn't yet include need_discard & need_gc_gen states -
+ * fix that here:
+ */
+ type = __alloc_data_type(b->dirty_sectors,
+ b->cached_sectors,
+ b->stripe,
+ old,
+ b->data_type);
+ if (b->data_type != type) {
+ struct bch_dev_usage *u;
+
+ preempt_disable();
+ u = this_cpu_ptr(ca->usage_gc);
+ u->d[b->data_type].buckets--;
+ b->data_type = type;
+ u->d[b->data_type].buckets++;
+ preempt_enable();
+ }
+
+ gc = *b;
percpu_up_read(&c->mark_lock);
if (metadata_only &&
- gc_u.data_type != BCH_DATA_sb &&
- gc_u.data_type != BCH_DATA_journal &&
- gc_u.data_type != BCH_DATA_btree)
+ gc.data_type != BCH_DATA_sb &&
+ gc.data_type != BCH_DATA_journal &&
+ gc.data_type != BCH_DATA_btree)
return 0;
- if (gen_after(old_u.gen, gc_u.gen))
+ if (gen_after(old.gen, gc.gen))
return 0;
#define copy_bucket_field(_f) \
- if (fsck_err_on(new_u._f != gc_u._f, c, \
+ if (c->opts.reconstruct_alloc || \
+ fsck_err_on(new._f != gc._f, c, \
"bucket %llu:%llu gen %u data type %s has wrong " #_f \
": got %u, should be %u", \
iter->pos.inode, iter->pos.offset, \
- new_u.gen, \
- bch2_data_types[new_u.data_type], \
- new_u._f, gc_u._f)) \
- new_u._f = gc_u._f; \
+ gc.gen, \
+ bch2_data_types[gc.data_type], \
+ new._f, gc._f)) \
+ new._f = gc._f; \
copy_bucket_field(gen);
copy_bucket_field(data_type);
- copy_bucket_field(stripe);
copy_bucket_field(dirty_sectors);
copy_bucket_field(cached_sectors);
copy_bucket_field(stripe_redundancy);
copy_bucket_field(stripe);
#undef copy_bucket_field
- if (!bkey_alloc_unpacked_cmp(old_u, new_u))
+ if (!bch2_alloc_v4_cmp(old, new))
return 0;
- a = bch2_alloc_pack(trans, new_u);
- if (IS_ERR(a))
- return PTR_ERR(a);
+ a = bch2_alloc_to_v4_mut(trans, k);
+ ret = PTR_ERR_OR_ZERO(a);
+ if (ret)
+ return ret;
+
+ a->v = new;
+
+ /*
+ * The trigger normally makes sure this is set, but we're not running
+ * triggers:
+ */
+ if (a->v.data_type == BCH_DATA_cached && !a->v.io_time[READ])
+ a->v.io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
- ret = bch2_trans_update(trans, iter, &a->k, BTREE_TRIGGER_NORUN);
+ ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_NORUN);
fsck_err:
return ret;
}
if (bkey_cmp(iter.pos, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
break;
- ret = __bch2_trans_do(&trans, NULL, NULL,
+ ret = commit_do(&trans, NULL, NULL,
BTREE_INSERT_LAZY_RW,
bch2_alloc_write_key(&trans, &iter,
metadata_only));
static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
{
struct bch_dev *ca;
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bucket *g;
+ struct bch_alloc_v4 a;
unsigned i;
+ int ret;
for_each_member_device(ca, c, i) {
struct bucket_array *buckets = kvpmalloc(sizeof(struct bucket_array) +
GFP_KERNEL|__GFP_ZERO);
if (!buckets) {
percpu_ref_put(&ca->ref);
- percpu_up_write(&c->mark_lock);
bch_err(c, "error allocating ca->buckets[gc]");
return -ENOMEM;
}
buckets->first_bucket = ca->mi.first_bucket;
buckets->nbuckets = ca->mi.nbuckets;
- rcu_assign_pointer(ca->buckets[1], buckets);
+ rcu_assign_pointer(ca->buckets_gc, buckets);
};
- return bch2_alloc_read(c, true, metadata_only);
+ bch2_trans_init(&trans, c, 0, 0);
+
+ for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
+ BTREE_ITER_PREFETCH, k, ret) {
+ ca = bch_dev_bkey_exists(c, k.k->p.inode);
+ g = gc_bucket(ca, k.k->p.offset);
+
+ bch2_alloc_to_v4(k, &a);
+
+ g->gen_valid = 1;
+ g->gen = a.gen;
+
+ if (metadata_only &&
+ (a.data_type == BCH_DATA_user ||
+ a.data_type == BCH_DATA_cached ||
+ a.data_type == BCH_DATA_parity)) {
+ g->data_type = a.data_type;
+ g->dirty_sectors = a.dirty_sectors;
+ g->cached_sectors = a.cached_sectors;
+ g->stripe = a.stripe;
+ g->stripe_redundancy = a.stripe_redundancy;
+ }
+ }
+ bch2_trans_iter_exit(&trans, &iter);
+
+ bch2_trans_exit(&trans);
+
+ if (ret)
+ bch_err(c, "error reading alloc info at gc start: %i", ret);
+
+ return ret;
}
static void bch2_gc_alloc_reset(struct bch_fs *c, bool metadata_only)
unsigned i;
for_each_member_device(ca, c, i) {
- struct bucket_array *buckets = __bucket_array(ca, true);
+ struct bucket_array *buckets = gc_bucket_array(ca);
struct bucket *g;
for_each_bucket(g, buckets) {
if (metadata_only &&
- (g->mark.data_type == BCH_DATA_user ||
- g->mark.data_type == BCH_DATA_cached ||
- g->mark.data_type == BCH_DATA_parity))
+ (g->data_type == BCH_DATA_user ||
+ g->data_type == BCH_DATA_cached ||
+ g->data_type == BCH_DATA_parity))
continue;
- g->_mark.dirty_sectors = 0;
- g->_mark.cached_sectors = 0;
+ g->data_type = 0;
+ g->dirty_sectors = 0;
+ g->cached_sectors = 0;
}
};
}
else
*bkey_refcount(new) = cpu_to_le64(r->refcount);
- ret = __bch2_trans_do(&trans, NULL, NULL, 0,
+ ret = commit_do(&trans, NULL, NULL, 0,
__bch2_btree_insert(&trans, BTREE_ID_reflink, new));
kfree(new);
for (i = 0; i < new->v.nr_blocks; i++)
stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0);
- ret = __bch2_trans_do(&trans, NULL, NULL, 0,
+ ret = commit_do(&trans, NULL, NULL, 0,
__bch2_btree_insert(&trans, BTREE_ID_reflink, &new->k_i));
kfree(new);
}
*/
int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
{
- struct bch_dev *ca;
- u64 start_time = local_clock();
- unsigned i, iter = 0;
+ unsigned iter = 0;
int ret;
lockdep_assert_held(&c->state_lock);
- trace_gc_start(c);
down_write(&c->gc_lock);
- /* flush interior btree updates: */
- closure_wait_event(&c->btree_interior_update_wait,
- !bch2_btree_interior_updates_nr_pending(c));
+ bch2_btree_interior_updates_flush(c);
ret = bch2_gc_start(c, metadata_only) ?:
bch2_gc_alloc_start(c, metadata_only) ?:
if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb) &&
!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags) &&
c->opts.fix_errors != FSCK_OPT_NO) {
- bch_info(c, "starting topology repair pass");
+ bch_info(c, "Starting topology repair pass");
ret = bch2_repair_topology(c);
if (ret)
goto out;
- bch_info(c, "topology repair pass done");
+ bch_info(c, "Topology repair pass done");
set_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags);
}
!test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags) &&
!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) {
set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
+ SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, true);
ret = 0;
}
up_write(&c->gc_lock);
- trace_gc_end(c);
- bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
-
- /*
- * Wake up allocator in case it was waiting for buckets
- * because of not being able to inc gens
- */
- for_each_member_device(ca, c, i)
- bch2_wake_allocator(ca);
-
/*
* At startup, allocations can happen directly instead of via the
* allocator thread - issue wakeup in case they blocked on gc_lock:
{
struct bch_dev *ca = bch_dev_bkey_exists(trans->c, iter->pos.inode);
struct bkey_s_c k;
- struct bkey_alloc_unpacked u;
+ struct bch_alloc_v4 a;
+ struct bkey_i_alloc_v4 *a_mut;
int ret;
k = bch2_btree_iter_peek_slot(iter);
if (ret)
return ret;
- u = bch2_alloc_unpack(k);
+ bch2_alloc_to_v4(k, &a);
- if (u.oldest_gen == ca->oldest_gen[iter->pos.offset])
+ if (a.oldest_gen == ca->oldest_gen[iter->pos.offset])
return 0;
- u.oldest_gen = ca->oldest_gen[iter->pos.offset];
+ a_mut = bch2_alloc_to_v4_mut(trans, k);
+ ret = PTR_ERR_OR_ZERO(a_mut);
+ if (ret)
+ return ret;
+
+ a_mut->v.oldest_gen = ca->oldest_gen[iter->pos.offset];
+ a_mut->v.data_type = alloc_data_type(a_mut->v, a_mut->v.data_type);
- return bch2_alloc_write(trans, iter, &u, BTREE_TRIGGER_NORUN);
+ return bch2_trans_update(trans, iter, &a_mut->k_i, 0);
}
int bch2_gc_gens(struct bch_fs *c)
if (!mutex_trylock(&c->gc_gens_lock))
return 0;
+ trace_gc_gens_start(c);
down_read(&c->gc_lock);
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
BTREE_ITER_PREFETCH, k, ret) {
- ret = __bch2_trans_do(&trans, NULL, NULL,
+ ret = commit_do(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL,
bch2_alloc_write_oldest_gen(&trans, &iter));
if (ret) {
c->gc_count++;
bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
+ trace_gc_gens_end(c);
err:
for_each_member_device(ca, c, i) {
kvfree(ca->oldest_gen);