char buf[200];
int ret = 0;
+ /*
+ * XXX
+ * use check_bucket_ref here
+ */
bkey_for_each_ptr_decode(k->k, ptrs, p, entry) {
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
struct bucket *g = PTR_BUCKET(ca, &p.ptr, true);
}
}
+ if (fsck_err_on(gen_cmp(g->mark.gen, p.ptr.gen) > BUCKET_GC_GEN_MAX, c,
+ "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->mark.gen,
+ bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ p.ptr.gen,
+ (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
+ do_update = true;
+
if (fsck_err_on(!p.ptr.cached &&
gen_cmp(p.ptr.gen, g->mark.gen) < 0, c,
"bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
}
if (p.has_ec) {
- struct stripe *m = genradix_ptr(&c->stripes[true], p.ec.idx);
+ struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
if (fsck_err_on(!m || !m->alive, c,
"pointer to nonexistent stripe %llu\n"
(!g->gen_valid || gen_cmp(ptr->gen, g->mark.gen) > 0)) ||
(!ptr->cached &&
gen_cmp(ptr->gen, g->mark.gen) < 0) ||
+ gen_cmp(g->mark.gen, ptr->gen) > BUCKET_GC_GEN_MAX ||
(g->mark.data_type &&
g->mark.data_type != data_type);
}));
ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
bkey_extent_entry_for_each(ptrs, entry) {
if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) {
- struct stripe *m = genradix_ptr(&c->stripes[true],
+ struct gc_stripe *m = genradix_ptr(&c->gc_stripes,
entry->stripe_ptr.idx);
union bch_extent_entry *next_ptr;
/* marking of btree keys/nodes: */
-static int bch2_gc_mark_key(struct bch_fs *c, enum btree_id btree_id,
+static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
unsigned level, bool is_root,
struct bkey_s_c *k,
u8 *max_stale, bool initial)
{
+ struct bch_fs *c = trans->c;
struct bkey_ptrs_c ptrs;
const struct bch_extent_ptr *ptr;
+ struct bkey deleted = KEY(0, 0, 0);
+ struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
unsigned flags =
- BTREE_TRIGGER_INSERT|
BTREE_TRIGGER_GC|
(initial ? BTREE_TRIGGER_NOATOMIC : 0);
int ret = 0;
+ deleted.p = k->k->p;
+
if (initial) {
BUG_ON(bch2_journal_seq_verify &&
k->k->version.lo > journal_cur_seq(&c->journal));
k->k->version.lo,
atomic64_read(&c->key_version)))
atomic64_set(&c->key_version, k->k->version.lo);
-
- if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
- fsck_err_on(!bch2_bkey_replicas_marked(c, *k), c,
- "superblock not marked as containing replicas (type %u)",
- k->k->type)) {
- ret = bch2_mark_bkey_replicas(c, *k);
- if (ret) {
- bch_err(c, "error marking bkey replicas: %i", ret);
- goto err;
- }
- }
}
ptrs = bch2_bkey_ptrs_c(*k);
*max_stale = max(*max_stale, ptr_stale(ca, ptr));
}
- bch2_mark_key(c, *k, flags);
+ ret = bch2_mark_key(trans, old, *k, flags);
fsck_err:
err:
if (ret)
return ret;
}
-static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale,
+static int btree_gc_mark_node(struct btree_trans *trans, struct btree *b, u8 *max_stale,
bool initial)
{
+ struct bch_fs *c = trans->c;
struct btree_node_iter iter;
struct bkey unpacked;
struct bkey_s_c k;
bkey_init(&prev.k->k);
while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) {
- ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, false,
+ ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false,
&k, max_stale, initial);
if (ret)
break;
return ret;
}
-static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
+static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree_id,
bool initial, bool metadata_only)
{
- struct btree_trans trans;
+ struct bch_fs *c = trans->c;
struct btree_iter iter;
struct btree *b;
unsigned depth = metadata_only ? 1
u8 max_stale = 0;
int ret = 0;
- bch2_trans_init(&trans, c, 0, 0);
-
gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0));
- __for_each_btree_node(&trans, iter, btree_id, POS_MIN,
- 0, depth, BTREE_ITER_PREFETCH, b) {
+ __for_each_btree_node(trans, iter, btree_id, POS_MIN,
+ 0, depth, BTREE_ITER_PREFETCH, b, ret) {
bch2_verify_btree_nr_keys(b);
gc_pos_set(c, gc_pos_btree_node(b));
- ret = btree_gc_mark_node(c, b, &max_stale, initial);
+ ret = btree_gc_mark_node(trans, b, &max_stale, initial);
if (ret)
break;
if (!initial) {
if (max_stale > 64)
- bch2_btree_node_rewrite(&trans, &iter,
- b->data->keys.seq,
+ bch2_btree_node_rewrite(trans, &iter, b,
BTREE_INSERT_NOWAIT|
BTREE_INSERT_GC_LOCK_HELD);
else if (!bch2_btree_gc_rewrite_disabled &&
(bch2_btree_gc_always_rewrite || max_stale > 16))
- bch2_btree_node_rewrite(&trans, &iter,
- b->data->keys.seq,
- BTREE_INSERT_NOWAIT|
+ bch2_btree_node_rewrite(trans, &iter,
+ b, BTREE_INSERT_NOWAIT|
BTREE_INSERT_GC_LOCK_HELD);
}
-
- bch2_trans_cond_resched(&trans);
}
- bch2_trans_iter_exit(&trans, &iter);
+ bch2_trans_iter_exit(trans, &iter);
- ret = bch2_trans_exit(&trans) ?: ret;
if (ret)
return ret;
if (!btree_node_fake(b)) {
struct bkey_s_c k = bkey_i_to_s_c(&b->key);
- ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, true,
+ ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, true,
&k, &max_stale, initial);
}
gc_pos_set(c, gc_pos_btree_root(b->c.btree_id));
return ret;
}
-static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b,
+static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b,
unsigned target_depth)
{
+ struct bch_fs *c = trans->c;
struct btree_and_journal_iter iter;
struct bkey_s_c k;
struct bkey_buf cur, prev;
BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
- ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, false,
+ ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false,
&k, &max_stale, true);
if (ret) {
bch_err(c, "%s: error %i from bch2_gc_mark_key", __func__, ret);
break;
}
- ret = bch2_gc_btree_init_recurse(c, child,
+ ret = bch2_gc_btree_init_recurse(trans, child,
target_depth);
six_unlock_read(&child->c.lock);
return ret;
}
-static int bch2_gc_btree_init(struct bch_fs *c,
+static int bch2_gc_btree_init(struct btree_trans *trans,
enum btree_id btree_id,
bool metadata_only)
{
+ struct bch_fs *c = trans->c;
struct btree *b;
unsigned target_depth = metadata_only ? 1
: bch2_expensive_debug_checks ? 0
}
if (b->c.level >= target_depth)
- ret = bch2_gc_btree_init_recurse(c, b, target_depth);
+ ret = bch2_gc_btree_init_recurse(trans, b, target_depth);
if (!ret) {
struct bkey_s_c k = bkey_i_to_s_c(&b->key);
- ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, true,
+ ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, true,
&k, &max_stale, true);
}
fsck_err:
static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
{
+ struct btree_trans trans;
enum btree_id ids[BTREE_ID_NR];
unsigned i;
int ret = 0;
+ bch2_trans_init(&trans, c, 0, 0);
+
for (i = 0; i < BTREE_ID_NR; i++)
ids[i] = i;
bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
for (i = 0; i < BTREE_ID_NR && !ret; i++)
ret = initial
- ? bch2_gc_btree_init(c, ids[i], metadata_only)
- : bch2_gc_btree(c, ids[i], initial, metadata_only);
+ ? bch2_gc_btree_init(&trans, ids[i], metadata_only)
+ : bch2_gc_btree(&trans, ids[i], initial, metadata_only);
if (ret < 0)
bch_err(c, "%s: ret %i", __func__, ret);
+
+ bch2_trans_exit(&trans);
return ret;
}
for_each_pending_btree_node_free(c, as, d)
if (d->index_update_done)
- bch2_mark_key(c, bkey_i_to_s_c(&d->key),
- BTREE_TRIGGER_INSERT|BTREE_TRIGGER_GC);
+ bch2_mark_key(c, bkey_i_to_s_c(&d->key), BTREE_TRIGGER_GC);
mutex_unlock(&c->btree_interior_update_lock);
}
struct bch_dev *ca;
unsigned i;
- genradix_free(&c->stripes[1]);
+ genradix_free(&c->reflink_gc_table);
+ genradix_free(&c->gc_stripes);
for_each_member_device(ca, c, i) {
kvpfree(rcu_dereference_protected(ca->buckets[1], 1),
set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); \
}
#define copy_bucket_field(_f) \
- if (dst->b[b].mark._f != src->b[b].mark._f) { \
+ if (dst->b[b]._f != src->b[b]._f) { \
if (verify) \
fsck_err(c, "bucket %u:%zu gen %u data type %s has wrong " #_f \
": got %u, should be %u", dev, b, \
dst->b[b].mark.gen, \
bch2_data_types[dst->b[b].mark.data_type],\
- dst->b[b].mark._f, src->b[b].mark._f); \
- dst->b[b]._mark._f = src->b[b].mark._f; \
+ dst->b[b]._f, src->b[b]._f); \
+ dst->b[b]._f = src->b[b]._f; \
set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); \
}
#define copy_dev_field(_f, _msg, ...) \
#define copy_fs_field(_f, _msg, ...) \
copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__)
- if (!metadata_only) {
- struct genradix_iter iter = genradix_iter_init(&c->stripes[1], 0);
- struct stripe *dst, *src;
-
- while ((src = genradix_iter_peek(&iter, &c->stripes[1]))) {
- dst = genradix_ptr_alloc(&c->stripes[0], iter.pos, GFP_KERNEL);
-
- if (dst->alive != src->alive ||
- dst->sectors != src->sectors ||
- dst->algorithm != src->algorithm ||
- dst->nr_blocks != src->nr_blocks ||
- dst->nr_redundant != src->nr_redundant) {
- bch_err(c, "unexpected stripe inconsistency at bch2_gc_done, confused");
- ret = -EINVAL;
- goto fsck_err;
- }
-
- for (i = 0; i < ARRAY_SIZE(dst->block_sectors); i++)
- copy_stripe_field(block_sectors[i],
- "block_sectors[%u]", i);
-
- dst->blocks_nonempty = 0;
- for (i = 0; i < dst->nr_blocks; i++)
- dst->blocks_nonempty += dst->block_sectors[i] != 0;
-
- genradix_iter_advance(&iter, &c->stripes[1]);
- }
- }
-
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
bch2_fs_usage_acc_to_base(c, i);
size_t b;
for (b = 0; b < src->nbuckets; b++) {
- copy_bucket_field(gen);
- copy_bucket_field(data_type);
+ copy_bucket_field(_mark.gen);
+ copy_bucket_field(_mark.data_type);
+ copy_bucket_field(_mark.stripe);
+ copy_bucket_field(_mark.dirty_sectors);
+ copy_bucket_field(_mark.cached_sectors);
+ copy_bucket_field(stripe_redundancy);
copy_bucket_field(stripe);
- copy_bucket_field(dirty_sectors);
- copy_bucket_field(cached_sectors);
dst->b[b].oldest_gen = src->b[b].oldest_gen;
}
return 0;
}
-static int bch2_gc_reflink_done_initial_fn(struct bch_fs *c, struct bkey_s_c k)
+static int bch2_gc_reflink_done_initial_fn(struct btree_trans *trans,
+ struct bkey_s_c k)
{
+ struct bch_fs *c = trans->c;
struct reflink_gc *r;
const __le64 *refcount = bkey_refcount_c(k);
char buf[200];
if (metadata_only)
return 0;
+ bch2_trans_init(&trans, c, 0, 0);
+
if (initial) {
c->reflink_gc_idx = 0;
- ret = bch2_btree_and_journal_walk(c, BTREE_ID_reflink,
+ ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_reflink,
bch2_gc_reflink_done_initial_fn);
goto out;
}
- bch2_trans_init(&trans, c, 0, 0);
-
for_each_btree_key(&trans, iter, BTREE_ID_reflink, POS_MIN,
BTREE_ITER_PREFETCH, k, ret) {
const __le64 *refcount = bkey_refcount_c(k);
}
fsck_err:
bch2_trans_iter_exit(&trans, &iter);
- bch2_trans_exit(&trans);
out:
- genradix_free(&c->reflink_gc_table);
c->reflink_gc_nr = 0;
+ bch2_trans_exit(&trans);
+ return ret;
+}
+
+static int bch2_gc_stripes_done_initial_fn(struct btree_trans *trans,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ struct gc_stripe *m;
+ const struct bch_stripe *s;
+ char buf[200];
+ unsigned i;
+ int ret = 0;
+
+ if (k.k->type != KEY_TYPE_stripe)
+ return 0;
+
+ s = bkey_s_c_to_stripe(k).v;
+
+ m = genradix_ptr(&c->gc_stripes, k.k->p.offset);
+
+ for (i = 0; i < s->nr_blocks; i++)
+ if (stripe_blockcount_get(s, i) != (m ? m->block_sectors[i] : 0))
+ goto inconsistent;
+ return 0;
+inconsistent:
+ if (fsck_err_on(true, c,
+ "stripe has wrong block sector count %u:\n"
+ " %s\n"
+ " should be %u", i,
+ (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf),
+ m ? m->block_sectors[i] : 0)) {
+ struct bkey_i_stripe *new;
+
+ new = kmalloc(bkey_bytes(k.k), GFP_KERNEL);
+ if (!new) {
+ ret = -ENOMEM;
+ goto fsck_err;
+ }
+
+ bkey_reassemble(&new->k_i, k);
+
+ for (i = 0; i < new->v.nr_blocks; i++)
+ stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0);
+
+ ret = bch2_journal_key_insert(c, BTREE_ID_stripes, 0, &new->k_i);
+ if (ret)
+ kfree(new);
+ }
+fsck_err:
+ return ret;
+}
+
+static int bch2_gc_stripes_done(struct bch_fs *c, bool initial,
+ bool metadata_only)
+{
+ struct btree_trans trans;
+ int ret = 0;
+
+ if (metadata_only)
+ return 0;
+
+ bch2_trans_init(&trans, c, 0, 0);
+
+ if (initial) {
+ ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_stripes,
+ bch2_gc_stripes_done_initial_fn);
+ } else {
+ BUG();
+ }
+
+ bch2_trans_exit(&trans);
return ret;
}
-static int bch2_gc_reflink_start_initial_fn(struct bch_fs *c, struct bkey_s_c k)
+static int bch2_gc_reflink_start_initial_fn(struct btree_trans *trans,
+ struct bkey_s_c k)
{
+ struct bch_fs *c = trans->c;
struct reflink_gc *r;
const __le64 *refcount = bkey_refcount_c(k);
struct btree_iter iter;
struct bkey_s_c k;
struct reflink_gc *r;
- int ret;
+ int ret = 0;
if (metadata_only)
return 0;
- genradix_free(&c->reflink_gc_table);
+ bch2_trans_init(&trans, c, 0, 0);
c->reflink_gc_nr = 0;
- if (initial)
- return bch2_btree_and_journal_walk(c, BTREE_ID_reflink,
- bch2_gc_reflink_start_initial_fn);
-
- bch2_trans_init(&trans, c, 0, 0);
+ if (initial) {
+ ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_reflink,
+ bch2_gc_reflink_start_initial_fn);
+ goto out;
+ }
for_each_btree_key(&trans, iter, BTREE_ID_reflink, POS_MIN,
BTREE_ITER_PREFETCH, k, ret) {
r->refcount = 0;
}
bch2_trans_iter_exit(&trans, &iter);
-
+out:
bch2_trans_exit(&trans);
- return 0;
+ return ret;
}
/**
bch2_mark_superblocks(c);
- if (test_bit(BCH_FS_TOPOLOGY_ERROR, &c->flags) &&
+ if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb) &&
!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags) &&
c->opts.fix_errors != FSCK_OPT_NO) {
bch_info(c, "starting topology repair pass");
percpu_down_write(&c->mark_lock);
ret = bch2_gc_reflink_done(c, initial, metadata_only) ?:
+ bch2_gc_stripes_done(c, initial, metadata_only) ?:
bch2_gc_done(c, initial, metadata_only);
bch2_journal_unblock(&c->journal);
bch2_bkey_buf_reassemble(&sk, c, k);
bch2_extent_normalize(c, bkey_i_to_s(sk.k));
-
commit_err =
bch2_trans_update(&trans, &iter, sk.k, 0) ?:
bch2_trans_commit(&trans, NULL, NULL,