struct bpos expected_start = bkey_deleted(&prev->k->k)
? node_start
: bpos_successor(prev->k->k.p);
- char buf1[200], buf2[200];
+ struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
int ret = 0;
if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) {
struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k);
- if (bkey_deleted(&prev->k->k)) {
- struct printbuf out = PBUF(buf1);
- pr_buf(&out, "start of node: ");
- bch2_bpos_to_text(&out, node_start);
- } else {
- bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(prev->k));
- }
-
if (bpos_cmp(expected_start, bp->v.min_key)) {
bch2_topology_error(c);
+ if (bkey_deleted(&prev->k->k)) {
+ pr_buf(&buf1, "start of node: ");
+ bch2_bpos_to_text(&buf1, node_start);
+ } else {
+ bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(prev->k));
+ }
+ bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(cur.k));
+
if (__fsck_err(c,
FSCK_CAN_FIX|
FSCK_CAN_IGNORE|
" prev %s\n"
" cur %s",
bch2_btree_ids[b->c.btree_id], b->c.level,
- buf1,
- (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(cur.k)), buf2)) &&
+ buf1.buf, buf2.buf) &&
!test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
bch_info(c, "Halting mark and sweep to start topology repair pass");
- return FSCK_ERR_START_TOPOLOGY_REPAIR;
+ ret = FSCK_ERR_START_TOPOLOGY_REPAIR;
+ goto err;
} else {
set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
}
if (is_last && bpos_cmp(cur.k->k.p, node_end)) {
bch2_topology_error(c);
+ printbuf_reset(&buf1);
+ printbuf_reset(&buf2);
+
+ bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(cur.k));
+ bch2_bpos_to_text(&buf2, node_end);
+
if (__fsck_err(c,
FSCK_CAN_FIX|
FSCK_CAN_IGNORE|
" %s\n"
" expected %s",
bch2_btree_ids[b->c.btree_id], b->c.level,
- (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(cur.k)), buf1),
- (bch2_bpos_to_text(&PBUF(buf2), node_end), buf2)) &&
+ buf1.buf, buf2.buf) &&
!test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
bch_info(c, "Halting mark and sweep to start topology repair pass");
- return FSCK_ERR_START_TOPOLOGY_REPAIR;
+ ret = FSCK_ERR_START_TOPOLOGY_REPAIR;
+ goto err;
} else {
set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
}
}
bch2_bkey_buf_copy(prev, c, cur.k);
+err:
fsck_err:
+ printbuf_exit(&buf2);
+ printbuf_exit(&buf1);
return ret;
}
}
}
+static void bch2_btree_node_update_key_early(struct bch_fs *c,
+ enum btree_id btree, unsigned level,
+ struct bkey_s_c old, struct bkey_i *new)
+{
+ struct btree *b;
+ struct bkey_buf tmp;
+ int ret;
+
+ bch2_bkey_buf_init(&tmp);
+ bch2_bkey_buf_reassemble(&tmp, c, old);
+
+ b = bch2_btree_node_get_noiter(c, tmp.k, btree, level, true);
+ if (!IS_ERR_OR_NULL(b)) {
+ mutex_lock(&c->btree_cache.lock);
+
+ bch2_btree_node_hash_remove(&c->btree_cache, b);
+
+ bkey_copy(&b->key, new);
+ ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
+ BUG_ON(ret);
+
+ mutex_unlock(&c->btree_cache.lock);
+ six_unlock_read(&b->c.lock);
+ }
+
+ bch2_bkey_buf_exit(&tmp, c);
+}
+
static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
{
struct bkey_i_btree_ptr_v2 *new;
struct bpos expected_start = !prev
? b->data->min_key
: bpos_successor(prev->key.k.p);
- char buf1[200], buf2[200];
+ struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
int ret = 0;
if (!prev) {
- struct printbuf out = PBUF(buf1);
- pr_buf(&out, "start of node: ");
- bch2_bpos_to_text(&out, b->data->min_key);
+ pr_buf(&buf1, "start of node: ");
+ bch2_bpos_to_text(&buf1, b->data->min_key);
} else {
- bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&prev->key));
+ bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&prev->key));
}
- bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(&cur->key));
+ bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&cur->key));
if (prev &&
bpos_cmp(expected_start, cur->data->min_key) > 0 &&
" node %s\n"
" next %s",
bch2_btree_ids[b->c.btree_id], b->c.level,
- buf1, buf2))
- return DROP_PREV_NODE;
+ buf1.buf, buf2.buf)) {
+ ret = DROP_PREV_NODE;
+ goto out;
+ }
if (mustfix_fsck_err_on(bpos_cmp(prev->key.k.p,
bpos_predecessor(cur->data->min_key)), c,
" node %s\n"
" next %s",
bch2_btree_ids[b->c.btree_id], b->c.level,
- buf1, buf2))
+ buf1.buf, buf2.buf))
ret = set_node_max(c, prev,
bpos_predecessor(cur->data->min_key));
} else {
" prev %s\n"
" node %s",
bch2_btree_ids[b->c.btree_id], b->c.level,
- buf1, buf2))
- return DROP_THIS_NODE;
+ buf1.buf, buf2.buf)) {
+ ret = DROP_THIS_NODE;
+ goto out;
+ }
if (mustfix_fsck_err_on(bpos_cmp(expected_start, cur->data->min_key), c,
"btree node with incorrect min_key at btree %s level %u:\n"
" prev %s\n"
" node %s",
bch2_btree_ids[b->c.btree_id], b->c.level,
- buf1, buf2))
+ buf1.buf, buf2.buf))
ret = set_node_min(c, cur, expected_start);
}
+out:
fsck_err:
+ printbuf_exit(&buf2);
+ printbuf_exit(&buf1);
return ret;
}
static int btree_repair_node_end(struct bch_fs *c, struct btree *b,
struct btree *child)
{
- char buf1[200], buf2[200];
+ struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
int ret = 0;
+ bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&child->key));
+ bch2_bpos_to_text(&buf2, b->key.k.p);
+
if (mustfix_fsck_err_on(bpos_cmp(child->key.k.p, b->key.k.p), c,
"btree node with incorrect max_key at btree %s level %u:\n"
" %s\n"
" expected %s",
bch2_btree_ids[b->c.btree_id], b->c.level,
- (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&child->key)), buf1),
- (bch2_bpos_to_text(&PBUF(buf2), b->key.k.p), buf2))) {
+ buf1.buf, buf2.buf)) {
ret = set_node_max(c, child, b->key.k.p);
if (ret)
- return ret;
+ goto err;
}
+err:
fsck_err:
+ printbuf_exit(&buf2);
+ printbuf_exit(&buf1);
return ret;
}
struct bkey_buf prev_k, cur_k;
struct btree *prev = NULL, *cur = NULL;
bool have_child, dropped_children = false;
- char buf[200];
+ struct printbuf buf;
int ret = 0;
if (!b->c.level)
false);
ret = PTR_ERR_OR_ZERO(cur);
+ printbuf_reset(&buf);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k));
+
if (mustfix_fsck_err_on(ret == -EIO, c,
"Unreadable btree node at btree %s level %u:\n"
" %s",
bch2_btree_ids[b->c.btree_id],
b->c.level - 1,
- (bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(cur_k.k)), buf))) {
+ buf.buf)) {
bch2_btree_node_evict(c, cur_k.k);
ret = bch2_journal_key_delete(c, b->c.btree_id,
b->c.level, cur_k.k->k.p);
have_child = true;
}
+ printbuf_reset(&buf);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+
if (mustfix_fsck_err_on(!have_child, c,
"empty interior btree node at btree %s level %u\n"
" %s",
bch2_btree_ids[b->c.btree_id],
- b->c.level,
- (bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(&b->key)), buf)))
+ b->c.level, buf.buf))
ret = DROP_THIS_NODE;
err:
fsck_err:
if (!ret && dropped_children)
goto again;
+ printbuf_exit(&buf);
return ret;
}
const union bch_extent_entry *entry;
struct extent_ptr_decoded p = { 0 };
bool do_update = false;
- char buf[200];
+ struct printbuf buf = PRINTBUF;
int ret = 0;
/*
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
bch2_data_types[ptr_data_type(k->k, &p.ptr)],
p.ptr.gen,
- (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
if (!p.ptr.cached) {
- g->_mark.gen = p.ptr.gen;
g->gen_valid = true;
+ g->gen = p.ptr.gen;
} else {
do_update = true;
}
}
- if (fsck_err_on(data_type == BCH_DATA_btree &&
- g->mark.gen != p.ptr.gen, c,
- "bucket %u:%zu data type %s has metadata but wrong gen: %u != %u\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_types[ptr_data_type(k->k, &p.ptr)],
- p.ptr.gen, g->mark.gen,
- (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
- g->_mark.data_type = data_type;
- g->gen_valid = true;
- }
-
- if (fsck_err_on(gen_cmp(p.ptr.gen, g->mark.gen) > 0, c,
+ if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0, c,
"bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
"while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
bch2_data_types[ptr_data_type(k->k, &p.ptr)],
- p.ptr.gen, g->mark.gen,
- (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
+ p.ptr.gen, g->gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
if (!p.ptr.cached) {
- g->_mark.gen = p.ptr.gen;
g->gen_valid = true;
- g->_mark.data_type = 0;
- g->_mark.dirty_sectors = 0;
- g->_mark.cached_sectors = 0;
+ g->gen = p.ptr.gen;
+ g->data_type = 0;
+ g->dirty_sectors = 0;
+ g->cached_sectors = 0;
set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
} else {
do_update = true;
}
}
- if (fsck_err_on(gen_cmp(g->mark.gen, p.ptr.gen) > BUCKET_GC_GEN_MAX, c,
+ if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX, c,
"bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
"while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->mark.gen,
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
bch2_data_types[ptr_data_type(k->k, &p.ptr)],
p.ptr.gen,
- (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
do_update = true;
if (fsck_err_on(!p.ptr.cached &&
- gen_cmp(p.ptr.gen, g->mark.gen) < 0, c,
+ gen_cmp(p.ptr.gen, g->gen) < 0, c,
"bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
"while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
bch2_data_types[ptr_data_type(k->k, &p.ptr)],
- p.ptr.gen, g->mark.gen,
- (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
+ p.ptr.gen, g->gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
do_update = true;
- if (p.ptr.gen != g->mark.gen)
+ if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
continue;
- if (fsck_err_on(g->mark.data_type &&
- g->mark.data_type != data_type, c,
+ if (fsck_err_on(g->data_type &&
+ g->data_type != data_type, c,
"bucket %u:%zu different types of data in same bucket: %s, %s\n"
"while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_types[g->mark.data_type],
+ bch2_data_types[g->data_type],
bch2_data_types[data_type],
- (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
if (data_type == BCH_DATA_btree) {
- g->_mark.data_type = data_type;
- g->gen_valid = true;
+ g->data_type = data_type;
+ set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
} else {
do_update = true;
}
"pointer to nonexistent stripe %llu\n"
"while marking %s",
(u64) p.ec.idx,
- (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
do_update = true;
if (fsck_err_on(!bch2_ptr_matches_stripe_m(m, p), c,
"pointer does not match stripe %llu\n"
"while marking %s",
(u64) p.ec.idx,
- (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
do_update = true;
}
}
if (is_root) {
bch_err(c, "cannot update btree roots yet");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
if (!new) {
bch_err(c, "%s: error allocating new key", __func__);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err;
}
bkey_reassemble(new, *k);
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
struct bucket *g = PTR_GC_BUCKET(ca, ptr);
- ptr->gen = g->mark.gen;
+ ptr->gen = g->gen;
}
} else {
bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, ({
enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, ptr);
(ptr->cached &&
- (!g->gen_valid || gen_cmp(ptr->gen, g->mark.gen) > 0)) ||
+ (!g->gen_valid || gen_cmp(ptr->gen, g->gen) > 0)) ||
(!ptr->cached &&
- gen_cmp(ptr->gen, g->mark.gen) < 0) ||
- gen_cmp(g->mark.gen, ptr->gen) > BUCKET_GC_GEN_MAX ||
- (g->mark.data_type &&
- g->mark.data_type != data_type);
+ gen_cmp(ptr->gen, g->gen) < 0) ||
+ gen_cmp(g->gen, ptr->gen) > BUCKET_GC_GEN_MAX ||
+ (g->data_type &&
+ g->data_type != data_type);
}));
again:
ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
}
ret = bch2_journal_key_insert_take(c, btree_id, level, new);
-
- if (ret)
+ if (ret) {
kfree(new);
- else {
- bch2_bkey_val_to_text(&PBUF(buf), c, *k);
- bch_info(c, "updated %s", buf);
- bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(new));
- bch_info(c, "new key %s", buf);
- *k = bkey_i_to_s_c(new);
+ goto err;
}
+
+ if (level)
+ bch2_btree_node_update_key_early(c, btree_id, level - 1, *k, new);
+
+ printbuf_reset(&buf);
+ bch2_bkey_val_to_text(&buf, c, *k);
+ bch_info(c, "updated %s", buf.buf);
+
+ printbuf_reset(&buf);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
+ bch_info(c, "new key %s", buf.buf);
+
+ *k = bkey_i_to_s_c(new);
}
+err:
fsck_err:
+ printbuf_exit(&buf);
return ret;
}
static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
unsigned level, bool is_root,
struct bkey_s_c *k,
- u8 *max_stale, bool initial)
+ bool initial)
{
struct bch_fs *c = trans->c;
- struct bkey_ptrs_c ptrs;
- const struct bch_extent_ptr *ptr;
struct bkey deleted = KEY(0, 0, 0);
struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
unsigned flags =
if (initial) {
BUG_ON(bch2_journal_seq_verify &&
- k->k->version.lo > journal_cur_seq(&c->journal));
+ k->k->version.lo > atomic64_read(&c->journal.seq));
ret = bch2_check_fix_ptrs(c, btree_id, level, is_root, k);
if (ret)
atomic64_set(&c->key_version, k->k->version.lo);
}
- ptrs = bch2_bkey_ptrs_c(*k);
- bkey_for_each_ptr(ptrs, ptr) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- struct bucket *g = PTR_GC_BUCKET(ca, ptr);
-
- if (gen_after(g->oldest_gen, ptr->gen))
- g->oldest_gen = ptr->gen;
-
- *max_stale = max(*max_stale, ptr_stale(ca, ptr));
- }
-
- ret = bch2_mark_key(trans, old, *k, flags);
+ ret = __bch2_trans_do(trans, NULL, NULL, 0,
+ bch2_mark_key(trans, old, *k, flags));
fsck_err:
err:
if (ret)
return ret;
}
-static int btree_gc_mark_node(struct btree_trans *trans, struct btree *b, u8 *max_stale,
- bool initial)
+static int btree_gc_mark_node(struct btree_trans *trans, struct btree *b, bool initial)
{
struct bch_fs *c = trans->c;
struct btree_node_iter iter;
struct bkey_buf prev, cur;
int ret = 0;
- *max_stale = 0;
-
if (!btree_node_type_needs_gc(btree_node_type(b)))
return 0;
while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) {
ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false,
- &k, max_stale, initial);
+ &k, initial);
if (ret)
break;
: bch2_expensive_debug_checks ? 0
: !btree_node_type_needs_gc(btree_id) ? 1
: 0;
- u8 max_stale = 0;
int ret = 0;
gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0));
gc_pos_set(c, gc_pos_btree_node(b));
- ret = btree_gc_mark_node(trans, b, &max_stale, initial);
+ ret = btree_gc_mark_node(trans, b, initial);
if (ret)
break;
-
- if (!initial) {
- if (max_stale > 64)
- bch2_btree_node_rewrite(trans, &iter, b,
- BTREE_INSERT_NOWAIT|
- BTREE_INSERT_GC_LOCK_HELD);
- else if (!bch2_btree_gc_rewrite_disabled &&
- (bch2_btree_gc_always_rewrite || max_stale > 16))
- bch2_btree_node_rewrite(trans, &iter,
- b, BTREE_INSERT_NOWAIT|
- BTREE_INSERT_GC_LOCK_HELD);
- }
}
bch2_trans_iter_exit(trans, &iter);
if (!btree_node_fake(b)) {
struct bkey_s_c k = bkey_i_to_s_c(&b->key);
- ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, true,
- &k, &max_stale, initial);
+ ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level,
+ true, &k, initial);
}
gc_pos_set(c, gc_pos_btree_root(b->c.btree_id));
mutex_unlock(&c->btree_root_lock);
struct btree_and_journal_iter iter;
struct bkey_s_c k;
struct bkey_buf cur, prev;
- u8 max_stale = 0;
- char buf[200];
+ struct printbuf buf = PRINTBUF;
int ret = 0;
bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
- ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false,
- &k, &max_stale, true);
+ ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level,
+ false, &k, true);
if (ret) {
bch_err(c, "%s: error %i from bch2_gc_mark_key", __func__, ret);
goto fsck_err;
" %s",
bch2_btree_ids[b->c.btree_id],
b->c.level - 1,
- (bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(cur.k)), buf)) &&
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur.k)), buf.buf)) &&
!test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
ret = FSCK_ERR_START_TOPOLOGY_REPAIR;
bch_info(c, "Halting mark and sweep to start topology repair pass");
bch2_bkey_buf_exit(&cur, c);
bch2_bkey_buf_exit(&prev, c);
bch2_btree_and_journal_iter_exit(&iter);
+ printbuf_exit(&buf);
return ret;
}
: bch2_expensive_debug_checks ? 0
: !btree_node_type_needs_gc(btree_id) ? 1
: 0;
- u8 max_stale = 0;
- char buf[100];
+ struct printbuf buf = PRINTBUF;
int ret = 0;
b = c->btree_roots[btree_id].b;
return 0;
six_lock_read(&b->c.lock, NULL, NULL);
+ printbuf_reset(&buf);
+ bch2_bpos_to_text(&buf, b->data->min_key);
if (mustfix_fsck_err_on(bpos_cmp(b->data->min_key, POS_MIN), c,
- "btree root with incorrect min_key: %s",
- (bch2_bpos_to_text(&PBUF(buf), b->data->min_key), buf))) {
+ "btree root with incorrect min_key: %s", buf.buf)) {
bch_err(c, "repair unimplemented");
ret = FSCK_ERR_EXIT;
goto fsck_err;
}
+ printbuf_reset(&buf);
+ bch2_bpos_to_text(&buf, b->data->max_key);
if (mustfix_fsck_err_on(bpos_cmp(b->data->max_key, SPOS_MAX), c,
- "btree root with incorrect max_key: %s",
- (bch2_bpos_to_text(&PBUF(buf), b->data->max_key), buf))) {
+ "btree root with incorrect max_key: %s", buf.buf)) {
bch_err(c, "repair unimplemented");
ret = FSCK_ERR_EXIT;
goto fsck_err;
struct bkey_s_c k = bkey_i_to_s_c(&b->key);
ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, true,
- &k, &max_stale, true);
+ &k, true);
}
fsck_err:
six_unlock_read(&b->c.lock);
if (ret < 0)
bch_err(c, "%s: ret %i", __func__, ret);
+ printbuf_exit(&buf);
return ret;
}
bch2_trans_init(&trans, c, 0, 0);
+ if (initial)
+ trans.is_initial_gc = true;
+
for (i = 0; i < BTREE_ID_NR; i++)
ids[i] = i;
bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
genradix_free(&c->gc_stripes);
for_each_member_device(ca, c, i) {
- kvpfree(rcu_dereference_protected(ca->buckets[1], 1),
+ kvpfree(rcu_dereference_protected(ca->buckets_gc, 1),
sizeof(struct bucket_array) +
ca->mi.nbuckets * sizeof(struct bucket));
- ca->buckets[1] = NULL;
+ ca->buckets_gc = NULL;
free_percpu(ca->usage_gc);
ca->usage_gc = NULL;
bool initial, bool metadata_only)
{
struct bch_dev *ca = NULL;
+ struct printbuf buf = PRINTBUF;
bool verify = !metadata_only && (!initial ||
(c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)));
unsigned i, dev;
for (i = 0; i < c->replicas.nr; i++) {
struct bch_replicas_entry *e =
cpu_replicas_entry(&c->replicas, i);
- char buf[80];
if (metadata_only &&
(e->data_type == BCH_DATA_user ||
e->data_type == BCH_DATA_cached))
continue;
- bch2_replicas_entry_to_text(&PBUF(buf), e);
+ printbuf_reset(&buf);
+ bch2_replicas_entry_to_text(&buf, e);
- copy_fs_field(replicas[i], "%s", buf);
+ copy_fs_field(replicas[i], "%s", buf.buf);
}
}
bch_err(c, "%s: ret %i", __func__, ret);
percpu_up_write(&c->mark_lock);
+ printbuf_exit(&buf);
return ret;
}
}
for_each_member_device(ca, c, i) {
- BUG_ON(ca->buckets[1]);
+ BUG_ON(ca->buckets_gc);
BUG_ON(ca->usage_gc);
ca->usage_gc = alloc_percpu(struct bch_dev_usage);
return 0;
}
+/* returns true if not equal */
+static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l,
+ struct bch_alloc_v4 r)
+{
+ return l.gen != r.gen ||
+ l.oldest_gen != r.oldest_gen ||
+ l.data_type != r.data_type ||
+ l.dirty_sectors != r.dirty_sectors ||
+ l.cached_sectors != r.cached_sectors ||
+ l.stripe_redundancy != r.stripe_redundancy ||
+ l.stripe != r.stripe;
+}
+
static int bch2_alloc_write_key(struct btree_trans *trans,
struct btree_iter *iter,
- bool initial, bool metadata_only)
+ bool metadata_only)
{
struct bch_fs *c = trans->c;
struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode);
- struct bucket *g;
+ struct bucket gc;
struct bkey_s_c k;
- struct bkey_alloc_unpacked old_u, new_u, gc_u;
- struct bkey_alloc_buf *a;
+ struct bkey_i_alloc_v4 *a;
+ struct bch_alloc_v4 old, new;
int ret;
- /*
- * For this to be correct at runtime, we'll need to figure out a way for
- * it to actually lock the key in the btree key cache:
- */
-
- if (!initial) {
- ret = bch2_btree_key_cache_flush(trans,
- BTREE_ID_alloc, iter->pos);
- if (ret)
- return ret;
- }
-
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret)
return ret;
- old_u = new_u = bch2_alloc_unpack(k);
+ bch2_alloc_to_v4(k, &old);
+ new = old;
percpu_down_read(&c->mark_lock);
- g = gc_bucket(ca, iter->pos.offset);
- gc_u = (struct bkey_alloc_unpacked) {
- .dev = iter->pos.inode,
- .bucket = iter->pos.offset,
- .gen = g->mark.gen,
- .oldest_gen = g->oldest_gen,
- .data_type = g->mark.data_type,
- .dirty_sectors = g->mark.dirty_sectors,
- .cached_sectors = g->mark.cached_sectors,
- .read_time = g->io_time[READ],
- .write_time = g->io_time[WRITE],
- .stripe = g->stripe,
- .stripe_redundancy = g->stripe_redundancy,
- };
+ gc = *gc_bucket(ca, iter->pos.offset);
percpu_up_read(&c->mark_lock);
if (metadata_only &&
- gc_u.data_type != BCH_DATA_sb &&
- gc_u.data_type != BCH_DATA_journal &&
- gc_u.data_type != BCH_DATA_btree)
+ gc.data_type != BCH_DATA_sb &&
+ gc.data_type != BCH_DATA_journal &&
+ gc.data_type != BCH_DATA_btree)
return 0;
- if (!bkey_alloc_unpacked_cmp(old_u, gc_u) ||
- gen_after(old_u.gen, gc_u.gen))
+ if (gen_after(old.gen, gc.gen))
return 0;
#define copy_bucket_field(_f) \
- if (fsck_err_on(new_u._f != gc_u._f, c, \
+ if (fsck_err_on(new._f != gc._f, c, \
"bucket %llu:%llu gen %u data type %s has wrong " #_f \
": got %u, should be %u", \
iter->pos.inode, iter->pos.offset, \
- new_u.gen, \
- bch2_data_types[new_u.data_type], \
- new_u._f, gc_u._f)) \
- new_u._f = gc_u._f; \
+ gc.gen, \
+ bch2_data_types[gc.data_type], \
+ new._f, gc._f)) \
+ new._f = gc._f; \
copy_bucket_field(gen);
copy_bucket_field(data_type);
- copy_bucket_field(stripe);
copy_bucket_field(dirty_sectors);
copy_bucket_field(cached_sectors);
copy_bucket_field(stripe_redundancy);
copy_bucket_field(stripe);
#undef copy_bucket_field
- new_u.oldest_gen = gc_u.oldest_gen;
-
- if (!bkey_alloc_unpacked_cmp(old_u, new_u))
+ if (!bch2_alloc_v4_cmp(old, new))
return 0;
- a = bch2_alloc_pack(trans, new_u);
- if (IS_ERR(a))
- return PTR_ERR(a);
+ a = bch2_alloc_to_v4_mut(trans, k);
+ ret = PTR_ERR_OR_ZERO(a);
+ if (ret)
+ return ret;
+
+ a->v = new;
- ret = initial
- ? bch2_journal_key_insert(c, BTREE_ID_alloc, 0, &a->k)
- : bch2_trans_update(trans, iter, &a->k, BTREE_TRIGGER_NORUN);
+ ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_NORUN);
fsck_err:
return ret;
}
-static int bch2_gc_alloc_done(struct bch_fs *c, bool initial, bool metadata_only)
+static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
{
struct btree_trans trans;
struct btree_iter iter;
ret = __bch2_trans_do(&trans, NULL, NULL,
BTREE_INSERT_LAZY_RW,
bch2_alloc_write_key(&trans, &iter,
- initial, metadata_only));
+ metadata_only));
if (ret)
break;
}
return ret;
}
-static int bch2_gc_alloc_start(struct bch_fs *c, bool initial, bool metadata_only)
+static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
{
struct bch_dev *ca;
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bucket *g;
+ struct bch_alloc_v4 a;
unsigned i;
+ int ret;
for_each_member_device(ca, c, i) {
struct bucket_array *buckets = kvpmalloc(sizeof(struct bucket_array) +
GFP_KERNEL|__GFP_ZERO);
if (!buckets) {
percpu_ref_put(&ca->ref);
- percpu_up_write(&c->mark_lock);
bch_err(c, "error allocating ca->buckets[gc]");
return -ENOMEM;
}
buckets->first_bucket = ca->mi.first_bucket;
buckets->nbuckets = ca->mi.nbuckets;
- rcu_assign_pointer(ca->buckets[1], buckets);
+ rcu_assign_pointer(ca->buckets_gc, buckets);
};
- return bch2_alloc_read(c, true, metadata_only);
+ bch2_trans_init(&trans, c, 0, 0);
+
+ for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
+ BTREE_ITER_PREFETCH, k, ret) {
+ ca = bch_dev_bkey_exists(c, k.k->p.inode);
+ g = gc_bucket(ca, k.k->p.offset);
+
+ bch2_alloc_to_v4(k, &a);
+
+ g->gen_valid = 1;
+ g->gen = a.gen;
+
+ if (metadata_only &&
+ (a.data_type == BCH_DATA_user ||
+ a.data_type == BCH_DATA_cached ||
+ a.data_type == BCH_DATA_parity)) {
+ g->data_type = a.data_type;
+ g->dirty_sectors = a.dirty_sectors;
+ g->cached_sectors = a.cached_sectors;
+ g->stripe = a.stripe;
+ g->stripe_redundancy = a.stripe_redundancy;
+ }
+ }
+ bch2_trans_iter_exit(&trans, &iter);
+
+ bch2_trans_exit(&trans);
+
+ if (ret)
+ bch_err(c, "error reading alloc info at gc start: %i", ret);
+
+ return ret;
}
-static int bch2_gc_reflink_done(struct bch_fs *c, bool initial,
- bool metadata_only)
+static void bch2_gc_alloc_reset(struct bch_fs *c, bool metadata_only)
+{
+ struct bch_dev *ca;
+ unsigned i;
+
+ for_each_member_device(ca, c, i) {
+ struct bucket_array *buckets = gc_bucket_array(ca);
+ struct bucket *g;
+
+ for_each_bucket(g, buckets) {
+ if (metadata_only &&
+ (g->data_type == BCH_DATA_user ||
+ g->data_type == BCH_DATA_cached ||
+ g->data_type == BCH_DATA_parity))
+ continue;
+ g->dirty_sectors = 0;
+ g->cached_sectors = 0;
+ }
+ };
+}
+
+static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only)
{
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
struct reflink_gc *r;
size_t idx = 0;
- char buf[200];
+ struct printbuf buf = PRINTBUF;
int ret = 0;
if (metadata_only)
"reflink key has wrong refcount:\n"
" %s\n"
" should be %u",
- (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf),
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf),
r->refcount)) {
struct bkey_i *new;
else
*bkey_refcount(new) = cpu_to_le64(r->refcount);
- ret = initial
- ? bch2_journal_key_insert(c, BTREE_ID_stripes, 0, new)
- : __bch2_trans_do(&trans, NULL, NULL, 0,
- __bch2_btree_insert(&trans, BTREE_ID_reflink, new));
+ ret = __bch2_trans_do(&trans, NULL, NULL, 0,
+ __bch2_btree_insert(&trans, BTREE_ID_reflink, new));
kfree(new);
if (ret)
bch2_trans_iter_exit(&trans, &iter);
c->reflink_gc_nr = 0;
bch2_trans_exit(&trans);
+ printbuf_exit(&buf);
return ret;
}
-static int bch2_gc_reflink_start(struct bch_fs *c, bool initial,
+static int bch2_gc_reflink_start(struct bch_fs *c,
bool metadata_only)
{
struct btree_trans trans;
return ret;
}
-static int bch2_gc_stripes_done(struct bch_fs *c, bool initial,
- bool metadata_only)
+static void bch2_gc_reflink_reset(struct bch_fs *c, bool metadata_only)
+{
+ struct genradix_iter iter;
+ struct reflink_gc *r;
+
+ genradix_for_each(&c->reflink_gc_table, iter, r)
+ r->refcount = 0;
+}
+
+static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only)
{
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
struct gc_stripe *m;
const struct bch_stripe *s;
- char buf[200];
+ struct printbuf buf = PRINTBUF;
unsigned i;
int ret = 0;
"stripe has wrong block sector count %u:\n"
" %s\n"
" should be %u", i,
- (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf),
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf),
m ? m->block_sectors[i] : 0)) {
struct bkey_i_stripe *new;
for (i = 0; i < new->v.nr_blocks; i++)
stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0);
- ret = initial
- ? bch2_journal_key_insert(c, BTREE_ID_stripes, 0, &new->k_i)
- : __bch2_trans_do(&trans, NULL, NULL, 0,
- __bch2_btree_insert(&trans, BTREE_ID_reflink, &new->k_i));
+ ret = __bch2_trans_do(&trans, NULL, NULL, 0,
+ __bch2_btree_insert(&trans, BTREE_ID_reflink, &new->k_i));
kfree(new);
}
}
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
+
+ printbuf_exit(&buf);
return ret;
}
+static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only)
+{
+ genradix_free(&c->gc_stripes);
+}
+
/**
* bch2_gc - walk _all_ references to buckets, and recompute them:
*
*/
int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
{
- struct bch_dev *ca;
u64 start_time = local_clock();
- unsigned i, iter = 0;
+ unsigned iter = 0;
int ret;
lockdep_assert_held(&c->state_lock);
/* flush interior btree updates: */
closure_wait_event(&c->btree_interior_update_wait,
!bch2_btree_interior_updates_nr_pending(c));
-again:
+
ret = bch2_gc_start(c, metadata_only) ?:
- bch2_gc_alloc_start(c, initial, metadata_only) ?:
- bch2_gc_reflink_start(c, initial, metadata_only);
+ bch2_gc_alloc_start(c, metadata_only) ?:
+ bch2_gc_reflink_start(c, metadata_only);
if (ret)
goto out;
-
+again:
gc_pos_set(c, gc_phase(GC_PHASE_START));
bch2_mark_superblocks(c);
if (test_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags) ||
(!iter && bch2_test_restart_gc)) {
+ if (iter++ > 2) {
+ bch_info(c, "Unable to fix bucket gens, looping");
+ ret = -EINVAL;
+ goto out;
+ }
+
/*
* XXX: make sure gens we fixed got saved
*/
- if (iter++ <= 2) {
- bch_info(c, "Second GC pass needed, restarting:");
- clear_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
- __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
+ bch_info(c, "Second GC pass needed, restarting:");
+ clear_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
+ __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
- percpu_down_write(&c->mark_lock);
- bch2_gc_free(c);
- percpu_up_write(&c->mark_lock);
- /* flush fsck errors, reset counters */
- bch2_flush_fsck_errs(c);
-
- goto again;
- }
+ bch2_gc_stripes_reset(c, metadata_only);
+ bch2_gc_alloc_reset(c, metadata_only);
+ bch2_gc_reflink_reset(c, metadata_only);
- bch_info(c, "Unable to fix bucket gens, looping");
- ret = -EINVAL;
+ /* flush fsck errors, reset counters */
+ bch2_flush_fsck_errs(c);
+ goto again;
}
out:
if (!ret) {
bch2_journal_block(&c->journal);
- ret = bch2_gc_stripes_done(c, initial, metadata_only) ?:
- bch2_gc_reflink_done(c, initial, metadata_only) ?:
- bch2_gc_alloc_done(c, initial, metadata_only) ?:
+ ret = bch2_gc_stripes_done(c, metadata_only) ?:
+ bch2_gc_reflink_done(c, metadata_only) ?:
+ bch2_gc_alloc_done(c, metadata_only) ?:
bch2_gc_done(c, initial, metadata_only);
bch2_journal_unblock(&c->journal);
trace_gc_end(c);
bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
- /*
- * Wake up allocator in case it was waiting for buckets
- * because of not being able to inc gens
- */
- for_each_member_device(ca, c, i)
- bch2_wake_allocator(ca);
-
/*
* At startup, allocations can happen directly instead of via the
* allocator thread - issue wakeup in case they blocked on gc_lock:
percpu_down_read(&c->mark_lock);
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- struct bucket *g = PTR_BUCKET(ca, ptr);
- if (gen_after(g->mark.gen, ptr->gen) > 16) {
+ if (ptr_stale(ca, ptr) > 16) {
percpu_up_read(&c->mark_lock);
return true;
}
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- struct bucket *g = PTR_BUCKET(ca, ptr);
+ u8 *gen = &ca->oldest_gen[PTR_BUCKET_NR(ca, ptr)];
- if (gen_after(g->gc_gen, ptr->gen))
- g->gc_gen = ptr->gen;
+ if (gen_after(*gen, ptr->gen))
+ *gen = ptr->gen;
}
percpu_up_read(&c->mark_lock);
* For recalculating oldest gen, we only need to walk keys in leaf nodes; btree
* node pointers currently never have cached pointers that can become stale:
*/
-static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id)
+static int bch2_gc_btree_gens(struct btree_trans *trans, enum btree_id btree_id)
{
- struct btree_trans trans;
+ struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_buf sk;
int ret = 0, commit_err = 0;
bch2_bkey_buf_init(&sk);
- bch2_trans_init(&trans, c, 0, 0);
- bch2_trans_iter_init(&trans, &iter, btree_id, POS_MIN,
+ bch2_trans_iter_init(trans, &iter, btree_id, POS_MIN,
BTREE_ITER_PREFETCH|
BTREE_ITER_NOT_EXTENTS|
BTREE_ITER_ALL_SNAPSHOTS);
- while ((bch2_trans_begin(&trans),
+ while ((bch2_trans_begin(trans),
k = bch2_btree_iter_peek(&iter)).k) {
ret = bkey_err(k);
bch2_extent_normalize(c, bkey_i_to_s(sk.k));
commit_err =
- bch2_trans_update(&trans, &iter, sk.k, 0) ?:
- bch2_trans_commit(&trans, NULL, NULL,
- BTREE_INSERT_NOWAIT|
- BTREE_INSERT_NOFAIL);
+ bch2_trans_update(trans, &iter, sk.k, 0) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOWAIT|
+ BTREE_INSERT_NOFAIL);
if (commit_err == -EINTR) {
commit_err = 0;
continue;
bch2_btree_iter_advance(&iter);
}
- bch2_trans_iter_exit(&trans, &iter);
+ bch2_trans_iter_exit(trans, &iter);
- bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&sk, c);
return ret;
}
+static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_iter *iter)
+{
+ struct bch_dev *ca = bch_dev_bkey_exists(trans->c, iter->pos.inode);
+ struct bkey_s_c k;
+ struct bch_alloc_v4 a;
+ struct bkey_i_alloc_v4 *a_mut;
+ int ret;
+
+ k = bch2_btree_iter_peek_slot(iter);
+ ret = bkey_err(k);
+ if (ret)
+ return ret;
+
+ bch2_alloc_to_v4(k, &a);
+
+ if (a.oldest_gen == ca->oldest_gen[iter->pos.offset])
+ return 0;
+
+ a_mut = bch2_alloc_to_v4_mut(trans, k);
+ ret = PTR_ERR_OR_ZERO(a_mut);
+ if (ret)
+ return ret;
+
+ a_mut->v.oldest_gen = ca->oldest_gen[iter->pos.offset];
+
+ return bch2_trans_update(trans, iter, &a_mut->k_i, 0);
+}
+
int bch2_gc_gens(struct bch_fs *c)
{
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
struct bch_dev *ca;
- struct bucket_array *buckets;
- struct bucket *g;
- u64 start_time = local_clock();
+ u64 b, start_time = local_clock();
unsigned i;
int ret;
* introduces a deadlock in the RO path - we currently take the state
* lock at the start of going RO, thus the gc thread may get stuck:
*/
+ if (!mutex_trylock(&c->gc_gens_lock))
+ return 0;
+
down_read(&c->gc_lock);
+ bch2_trans_init(&trans, c, 0, 0);
for_each_member_device(ca, c, i) {
- down_read(&ca->bucket_lock);
- buckets = bucket_array(ca);
+ struct bucket_gens *gens;
+
+ BUG_ON(ca->oldest_gen);
+
+ ca->oldest_gen = kvmalloc(ca->mi.nbuckets, GFP_KERNEL);
+ if (!ca->oldest_gen) {
+ percpu_ref_put(&ca->ref);
+ ret = -ENOMEM;
+ goto err;
+ }
- for_each_bucket(g, buckets)
- g->gc_gen = g->mark.gen;
- up_read(&ca->bucket_lock);
+ gens = bucket_gens(ca);
+
+ for (b = gens->first_bucket;
+ b < gens->nbuckets; b++)
+ ca->oldest_gen[b] = gens->b[b];
}
for (i = 0; i < BTREE_ID_NR; i++)
if ((1 << i) & BTREE_ID_HAS_PTRS) {
c->gc_gens_btree = i;
c->gc_gens_pos = POS_MIN;
- ret = bch2_gc_btree_gens(c, i);
+ ret = bch2_gc_btree_gens(&trans, i);
if (ret) {
bch_err(c, "error recalculating oldest_gen: %i", ret);
goto err;
}
}
- for_each_member_device(ca, c, i) {
- down_read(&ca->bucket_lock);
- buckets = bucket_array(ca);
-
- for_each_bucket(g, buckets)
- g->oldest_gen = g->gc_gen;
- up_read(&ca->bucket_lock);
+ for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
+ BTREE_ITER_PREFETCH, k, ret) {
+ ret = __bch2_trans_do(&trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL,
+ bch2_alloc_write_oldest_gen(&trans, &iter));
+ if (ret) {
+ bch_err(c, "error writing oldest_gen: %i", ret);
+ break;
+ }
}
+ bch2_trans_iter_exit(&trans, &iter);
c->gc_gens_btree = 0;
c->gc_gens_pos = POS_MIN;
bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
err:
+ for_each_member_device(ca, c, i) {
+ kvfree(ca->oldest_gen);
+ ca->oldest_gen = NULL;
+ }
+
+ bch2_trans_exit(&trans);
up_read(&c->gc_lock);
+ mutex_unlock(&c->gc_gens_lock);
return ret;
}