#include "alloc_background.h"
#include "alloc_foreground.h"
#include "bkey_methods.h"
+#include "bkey_buf.h"
#include "btree_locking.h"
#include "btree_update_interior.h"
#include "btree_io.h"
static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
{
+ preempt_disable();
write_seqcount_begin(&c->gc_pos_lock);
c->gc_pos = new_pos;
write_seqcount_end(&c->gc_pos_lock);
+ preempt_enable();
}
static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
__gc_pos_set(c, new_pos);
}
+/*
+ * Missing: if an interior btree node is empty, we need to do something -
+ * perhaps just kill it
+ */
static int bch2_gc_check_topology(struct bch_fs *c,
- struct bkey_s_c k,
- struct bpos *expected_start,
- struct bpos expected_end,
+ struct btree *b,
+ struct bkey_buf *prev,
+ struct bkey_buf cur,
bool is_last)
{
+ struct bpos node_start = b->data->min_key;
+ struct bpos node_end = b->data->max_key;
+ struct bpos expected_start = bkey_deleted(&prev->k->k)
+ ? node_start
+ : bpos_successor(prev->k->k.p);
+ char buf1[200], buf2[200];
+ bool update_min = false;
+ bool update_max = false;
int ret = 0;
- if (k.k->type == KEY_TYPE_btree_ptr_v2) {
- struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
+ if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) {
+ struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k);
- if (fsck_err_on(bkey_cmp(*expected_start, bp.v->min_key), c,
- "btree node with incorrect min_key: got %llu:%llu, should be %llu:%llu",
- bp.v->min_key.inode,
- bp.v->min_key.offset,
- expected_start->inode,
- expected_start->offset)) {
- BUG();
+ if (bkey_deleted(&prev->k->k)) {
+ struct printbuf out = PBUF(buf1);
+ pr_buf(&out, "start of node: ");
+ bch2_bpos_to_text(&out, node_start);
+ } else {
+ bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(prev->k));
}
- }
- *expected_start = bkey_cmp(k.k->p, POS_MAX)
- ? bkey_successor(k.k->p)
- : k.k->p;
+ if (fsck_err_on(bpos_cmp(expected_start, bp->v.min_key), c,
+ "btree node with incorrect min_key at btree %s level %u:\n"
+ " prev %s\n"
+ " cur %s",
+ bch2_btree_ids[b->c.btree_id], b->c.level,
+ buf1,
+ (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(cur.k)), buf2)))
+ update_min = true;
+ }
if (fsck_err_on(is_last &&
- bkey_cmp(k.k->p, expected_end), c,
- "btree node with incorrect max_key: got %llu:%llu, should be %llu:%llu",
- k.k->p.inode,
- k.k->p.offset,
- expected_end.inode,
- expected_end.offset)) {
- BUG();
+ bpos_cmp(cur.k->k.p, node_end), c,
+ "btree node with incorrect max_key at btree %s level %u:\n"
+ " %s\n"
+ " expected %s",
+ bch2_btree_ids[b->c.btree_id], b->c.level,
+ (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(cur.k)), buf1),
+ (bch2_bpos_to_text(&PBUF(buf2), node_end), buf2)))
+ update_max = true;
+
+ bch2_bkey_buf_copy(prev, c, cur.k);
+
+ if (update_min || update_max) {
+ struct bkey_i *new;
+ struct bkey_i_btree_ptr_v2 *bp = NULL;
+ struct btree *n;
+
+ if (update_max) {
+ ret = bch2_journal_key_delete(c, b->c.btree_id,
+ b->c.level, cur.k->k.p);
+ if (ret)
+ return ret;
+ }
+
+ new = kmalloc(bkey_bytes(&cur.k->k), GFP_KERNEL);
+ if (!new) {
+ bch_err(c, "%s: error allocating new key", __func__);
+ return -ENOMEM;
+ }
+
+ bkey_copy(new, cur.k);
+
+ if (new->k.type == KEY_TYPE_btree_ptr_v2)
+ bp = bkey_i_to_btree_ptr_v2(new);
+
+ if (update_min)
+ bp->v.min_key = expected_start;
+ if (update_max)
+ new->k.p = node_end;
+ if (bp)
+ SET_BTREE_PTR_RANGE_UPDATED(&bp->v, true);
+
+ ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level, new);
+ if (ret) {
+ kfree(new);
+ return ret;
+ }
+
+ n = bch2_btree_node_get_noiter(c, cur.k, b->c.btree_id,
+ b->c.level - 1, true);
+ if (n) {
+ mutex_lock(&c->btree_cache.lock);
+ bch2_btree_node_hash_remove(&c->btree_cache, n);
+
+ bkey_copy(&n->key, new);
+ if (update_min)
+ n->data->min_key = expected_start;
+ if (update_max)
+ n->data->max_key = node_end;
+
+ ret = __bch2_btree_node_hash_insert(&c->btree_cache, n);
+ BUG_ON(ret);
+ mutex_unlock(&c->btree_cache.lock);
+ six_unlock_read(&n->c.lock);
+ }
+ }
+fsck_err:
+ return ret;
+}
+
+static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
+ unsigned level, bool is_root,
+ struct bkey_s_c *k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(*k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p = { 0 };
+ bool do_update = false;
+ int ret = 0;
+
+ bkey_for_each_ptr_decode(k->k, ptrs, p, entry) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
+ struct bucket *g = PTR_BUCKET(ca, &p.ptr, true);
+ struct bucket *g2 = PTR_BUCKET(ca, &p.ptr, false);
+
+ if (fsck_err_on(!g->gen_valid, c,
+ "bucket %u:%zu data type %s ptr gen %u missing in alloc btree",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ p.ptr.gen)) {
+ if (p.ptr.cached) {
+ g2->_mark.gen = g->_mark.gen = p.ptr.gen;
+ g2->gen_valid = g->gen_valid = true;
+ set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags);
+ } else {
+ do_update = true;
+ }
+ }
+
+ if (fsck_err_on(gen_cmp(p.ptr.gen, g->mark.gen) > 0, c,
+ "bucket %u:%zu data type %s ptr gen in the future: %u > %u",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ p.ptr.gen, g->mark.gen)) {
+ if (p.ptr.cached) {
+ g2->_mark.gen = g->_mark.gen = p.ptr.gen;
+ g2->gen_valid = g->gen_valid = true;
+ g2->_mark.data_type = 0;
+ g2->_mark.dirty_sectors = 0;
+ g2->_mark.cached_sectors = 0;
+ set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
+ set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags);
+ } else {
+ do_update = true;
+ }
+ }
+
+ if (fsck_err_on(!p.ptr.cached &&
+ gen_cmp(p.ptr.gen, g->mark.gen) < 0, c,
+ "bucket %u:%zu data type %s stale dirty ptr: %u < %u",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ p.ptr.gen, g->mark.gen))
+ do_update = true;
+
+ if (p.has_ec) {
+ struct stripe *m = genradix_ptr(&c->stripes[true], p.ec.idx);
+
+ if (fsck_err_on(!m || !m->alive, c,
+ "pointer to nonexistent stripe %llu",
+ (u64) p.ec.idx))
+ do_update = true;
+
+ if (fsck_err_on(!bch2_ptr_matches_stripe_m(m, p), c,
+ "pointer does not match stripe %llu",
+ (u64) p.ec.idx))
+ do_update = true;
+ }
+ }
+
+ if (do_update) {
+ struct bkey_ptrs ptrs;
+ union bch_extent_entry *entry;
+ struct bch_extent_ptr *ptr;
+ struct bkey_i *new;
+
+ if (is_root) {
+ bch_err(c, "cannot update btree roots yet");
+ return -EINVAL;
+ }
+
+ new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
+ if (!new) {
+ bch_err(c, "%s: error allocating new key", __func__);
+ return -ENOMEM;
+ }
+
+ bkey_reassemble(new, *k);
+
+ bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, ({
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bucket *g = PTR_BUCKET(ca, ptr, true);
+
+ (ptr->cached &&
+ (!g->gen_valid || gen_cmp(ptr->gen, g->mark.gen) > 0)) ||
+ (!ptr->cached &&
+ gen_cmp(ptr->gen, g->mark.gen) < 0);
+ }));
+again:
+ ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
+ bkey_extent_entry_for_each(ptrs, entry) {
+ if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) {
+ struct stripe *m = genradix_ptr(&c->stripes[true],
+ entry->stripe_ptr.idx);
+ union bch_extent_entry *next_ptr;
+
+ bkey_extent_entry_for_each_from(ptrs, next_ptr, entry)
+ if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr)
+ goto found;
+ next_ptr = NULL;
+found:
+ if (!next_ptr) {
+ bch_err(c, "aieee, found stripe ptr with no data ptr");
+ continue;
+ }
+
+ if (!m || !m->alive ||
+ !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block],
+ &next_ptr->ptr,
+ m->sectors)) {
+ bch2_bkey_extent_entry_drop(new, entry);
+ goto again;
+ }
+ }
+ }
+
+ ret = bch2_journal_key_insert(c, btree_id, level, new);
+ if (ret)
+ kfree(new);
+ else
+ *k = bkey_i_to_s_c(new);
}
fsck_err:
return ret;
/* marking of btree keys/nodes: */
-static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
+static int bch2_gc_mark_key(struct bch_fs *c, enum btree_id btree_id,
+ unsigned level, bool is_root,
+ struct bkey_s_c k,
u8 *max_stale, bool initial)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
int ret = 0;
if (initial) {
- BUG_ON(journal_seq_verify(c) &&
+ BUG_ON(bch2_journal_seq_verify &&
k.k->version.lo > journal_cur_seq(&c->journal));
- /* XXX change to fsck check */
if (fsck_err_on(k.k->version.lo > atomic64_read(&c->key_version), c,
"key version number higher than recorded: %llu > %llu",
k.k->version.lo,
atomic64_set(&c->key_version, k.k->version.lo);
if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
- fsck_err_on(!bch2_bkey_replicas_marked(c, k, false), c,
+ fsck_err_on(!bch2_bkey_replicas_marked(c, k), c,
"superblock not marked as containing replicas (type %u)",
k.k->type)) {
ret = bch2_mark_bkey_replicas(c, k);
- if (ret)
- return ret;
- }
-
- bkey_for_each_ptr(ptrs, ptr) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- struct bucket *g = PTR_BUCKET(ca, ptr, true);
- struct bucket *g2 = PTR_BUCKET(ca, ptr, false);
-
- if (mustfix_fsck_err_on(!g->gen_valid, c,
- "bucket %u:%zu data type %s ptr gen %u missing in alloc btree",
- ptr->dev, PTR_BUCKET_NR(ca, ptr),
- bch2_data_types[ptr_data_type(k.k, ptr)],
- ptr->gen)) {
- g2->_mark.gen = g->_mark.gen = ptr->gen;
- g2->gen_valid = g->gen_valid = true;
- }
-
- if (mustfix_fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
- "bucket %u:%zu data type %s ptr gen in the future: %u > %u",
- ptr->dev, PTR_BUCKET_NR(ca, ptr),
- bch2_data_types[ptr_data_type(k.k, ptr)],
- ptr->gen, g->mark.gen)) {
- g2->_mark.gen = g->_mark.gen = ptr->gen;
- g2->gen_valid = g->gen_valid = true;
- g2->_mark.data_type = 0;
- g2->_mark.dirty_sectors = 0;
- g2->_mark.cached_sectors = 0;
- set_bit(BCH_FS_FIXED_GENS, &c->flags);
+ if (ret) {
+ bch_err(c, "error marking bkey replicas: %i", ret);
+ goto err;
}
}
+
+ ret = bch2_check_fix_ptrs(c, btree_id, level, is_root, &k);
}
bkey_for_each_ptr(ptrs, ptr) {
bch2_mark_key(c, k, 0, k.k->size, NULL, 0, flags);
fsck_err:
+err:
+ if (ret)
+ bch_err(c, "%s: ret %i", __func__, ret);
return ret;
}
static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale,
bool initial)
{
- struct bpos next_node_start = b->data->min_key;
struct btree_node_iter iter;
struct bkey unpacked;
struct bkey_s_c k;
+ struct bkey_buf prev, cur;
int ret = 0;
*max_stale = 0;
return 0;
bch2_btree_node_iter_init_from_start(&iter, b);
+ bch2_bkey_buf_init(&prev);
+ bch2_bkey_buf_init(&cur);
+ bkey_init(&prev.k->k);
while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) {
- bch2_bkey_debugcheck(c, b, k);
-
- ret = bch2_gc_mark_key(c, k, max_stale, initial);
+ ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, false,
+ k, max_stale, initial);
if (ret)
break;
bch2_btree_node_iter_advance(&iter, b);
- if (b->level) {
- ret = bch2_gc_check_topology(c, k,
- &next_node_start,
- b->data->max_key,
+ if (b->c.level) {
+ bch2_bkey_buf_reassemble(&cur, c, k);
+
+ ret = bch2_gc_check_topology(c, b, &prev, cur,
bch2_btree_node_iter_end(&iter));
if (ret)
break;
}
}
+ bch2_bkey_buf_exit(&cur, c);
+ bch2_bkey_buf_exit(&prev, c);
return ret;
}
static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
- bool initial, bool metadata_only)
+ bool initial)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree *b;
- unsigned depth = metadata_only ? 1
- : expensive_debug_checks(c) ? 0
+ unsigned depth = bch2_expensive_debug_checks ? 0
: !btree_node_type_needs_gc(btree_id) ? 1
: 0;
u8 max_stale = 0;
if (max_stale > 64)
bch2_btree_node_rewrite(c, iter,
b->data->keys.seq,
- BTREE_INSERT_USE_RESERVE|
BTREE_INSERT_NOWAIT|
BTREE_INSERT_GC_LOCK_HELD);
- else if (!btree_gc_rewrite_disabled(c) &&
- (btree_gc_always_rewrite(c) || max_stale > 16))
+ else if (!bch2_btree_gc_rewrite_disabled &&
+ (bch2_btree_gc_always_rewrite || max_stale > 16))
bch2_btree_node_rewrite(c, iter,
b->data->keys.seq,
BTREE_INSERT_NOWAIT|
bch2_trans_cond_resched(&trans);
}
+ bch2_trans_iter_put(&trans, iter);
+
ret = bch2_trans_exit(&trans) ?: ret;
if (ret)
return ret;
mutex_lock(&c->btree_root_lock);
b = c->btree_roots[btree_id].b;
if (!btree_node_fake(b))
- ret = bch2_gc_mark_key(c, bkey_i_to_s_c(&b->key),
+ ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, true,
+ bkey_i_to_s_c(&b->key),
&max_stale, initial);
- gc_pos_set(c, gc_pos_btree_root(b->btree_id));
+ gc_pos_set(c, gc_pos_btree_root(b->c.btree_id));
mutex_unlock(&c->btree_root_lock);
return ret;
}
static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b,
- struct journal_keys *journal_keys,
unsigned target_depth)
{
struct btree_and_journal_iter iter;
struct bkey_s_c k;
- struct bpos next_node_start = b->data->min_key;
+ struct bkey_buf cur, prev;
u8 max_stale = 0;
int ret = 0;
- bch2_btree_and_journal_iter_init_node_iter(&iter, journal_keys, b);
+ bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
+ bch2_bkey_buf_init(&prev);
+ bch2_bkey_buf_init(&cur);
+ bkey_init(&prev.k->k);
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
- bch2_bkey_debugcheck(c, b, k);
-
- BUG_ON(bkey_cmp(k.k->p, b->data->min_key) < 0);
- BUG_ON(bkey_cmp(k.k->p, b->data->max_key) > 0);
+ BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
+ BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
- ret = bch2_gc_mark_key(c, k, &max_stale, true);
- if (ret)
+ ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, false,
+ k, &max_stale, true);
+ if (ret) {
+ bch_err(c, "%s: error %i from bch2_gc_mark_key", __func__, ret);
break;
+ }
- if (b->level) {
- struct btree *child;
- BKEY_PADDED(k) tmp;
-
- bkey_reassemble(&tmp.k, k);
- k = bkey_i_to_s_c(&tmp.k);
+ if (b->c.level) {
+ bch2_bkey_buf_reassemble(&cur, c, k);
+ k = bkey_i_to_s_c(cur.k);
bch2_btree_and_journal_iter_advance(&iter);
- ret = bch2_gc_check_topology(c, k,
- &next_node_start,
- b->data->max_key,
+ ret = bch2_gc_check_topology(c, b,
+ &prev, cur,
!bch2_btree_and_journal_iter_peek(&iter).k);
if (ret)
break;
+ } else {
+ bch2_btree_and_journal_iter_advance(&iter);
+ }
+ }
- if (b->level > target_depth) {
- child = bch2_btree_node_get_noiter(c, &tmp.k,
- b->btree_id, b->level - 1);
- ret = PTR_ERR_OR_ZERO(child);
- if (ret)
- break;
+ if (b->c.level > target_depth) {
+ bch2_btree_and_journal_iter_exit(&iter);
+ bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
+
+ while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
+ struct btree *child;
- ret = bch2_gc_btree_init_recurse(c, child,
- journal_keys, target_depth);
- six_unlock_read(&child->lock);
+ bch2_bkey_buf_reassemble(&cur, c, k);
+ bch2_btree_and_journal_iter_advance(&iter);
+ child = bch2_btree_node_get_noiter(c, cur.k,
+ b->c.btree_id, b->c.level - 1,
+ false);
+ ret = PTR_ERR_OR_ZERO(child);
+
+ if (fsck_err_on(ret == -EIO, c,
+ "unreadable btree node")) {
+ ret = bch2_journal_key_delete(c, b->c.btree_id,
+ b->c.level, cur.k->k.p);
if (ret)
- break;
+ return ret;
+
+ set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
+ continue;
}
- } else {
- bch2_btree_and_journal_iter_advance(&iter);
+
+ if (ret) {
+ bch_err(c, "%s: error %i getting btree node",
+ __func__, ret);
+ break;
+ }
+
+ ret = bch2_gc_btree_init_recurse(c, child,
+ target_depth);
+ six_unlock_read(&child->c.lock);
+
+ if (ret)
+ break;
}
}
-
+fsck_err:
+ bch2_bkey_buf_exit(&cur, c);
+ bch2_bkey_buf_exit(&prev, c);
+ bch2_btree_and_journal_iter_exit(&iter);
return ret;
}
static int bch2_gc_btree_init(struct bch_fs *c,
- struct journal_keys *journal_keys,
- enum btree_id btree_id,
- bool metadata_only)
+ enum btree_id btree_id)
{
struct btree *b;
- unsigned target_depth = metadata_only ? 1
- : expensive_debug_checks(c) ? 0
- : !btree_node_type_needs_gc(btree_id) ? 1
+ unsigned target_depth = bch2_expensive_debug_checks ? 0
+ : !btree_node_type_needs_gc(btree_id) ? 1
: 0;
u8 max_stale = 0;
+ char buf[100];
int ret = 0;
b = c->btree_roots[btree_id].b;
if (btree_node_fake(b))
return 0;
- six_lock_read(&b->lock);
- if (fsck_err_on(bkey_cmp(b->data->min_key, POS_MIN), c,
- "btree root with incorrect min_key: %llu:%llu",
- b->data->min_key.inode,
- b->data->min_key.offset)) {
+ six_lock_read(&b->c.lock, NULL, NULL);
+ if (fsck_err_on(bpos_cmp(b->data->min_key, POS_MIN), c,
+ "btree root with incorrect min_key: %s",
+ (bch2_bpos_to_text(&PBUF(buf), b->data->min_key), buf))) {
BUG();
}
- if (fsck_err_on(bkey_cmp(b->data->max_key, POS_MAX), c,
- "btree root with incorrect min_key: %llu:%llu",
- b->data->max_key.inode,
- b->data->max_key.offset)) {
+ if (fsck_err_on(bpos_cmp(b->data->max_key, POS_MAX), c,
+ "btree root with incorrect max_key: %s",
+ (bch2_bpos_to_text(&PBUF(buf), b->data->max_key), buf))) {
BUG();
}
- if (b->level >= target_depth)
- ret = bch2_gc_btree_init_recurse(c, b,
- journal_keys, target_depth);
+ if (b->c.level >= target_depth)
+ ret = bch2_gc_btree_init_recurse(c, b, target_depth);
if (!ret)
- ret = bch2_gc_mark_key(c, bkey_i_to_s_c(&b->key),
+ ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, true,
+ bkey_i_to_s_c(&b->key),
&max_stale, true);
fsck_err:
- six_unlock_read(&b->lock);
+ six_unlock_read(&b->c.lock);
+ if (ret)
+ bch_err(c, "%s: ret %i", __func__, ret);
return ret;
}
(int) btree_id_to_gc_phase(r);
}
-static int bch2_gc_btrees(struct bch_fs *c, struct journal_keys *journal_keys,
- bool initial, bool metadata_only)
+static int bch2_gc_btrees(struct bch_fs *c, bool initial)
{
enum btree_id ids[BTREE_ID_NR];
unsigned i;
for (i = 0; i < BTREE_ID_NR; i++) {
enum btree_id id = ids[i];
int ret = initial
- ? bch2_gc_btree_init(c, journal_keys,
- id, metadata_only)
- : bch2_gc_btree(c, id, initial, metadata_only);
- if (ret)
+ ? bch2_gc_btree_init(c, id)
+ : bch2_gc_btree(c, id, initial);
+ if (ret) {
+ bch_err(c, "%s: ret %i", __func__, ret);
return ret;
+ }
}
return 0;
if (offset == BCH_SB_SECTOR)
mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
- BCH_DATA_SB, flags);
+ BCH_DATA_sb, flags);
mark_metadata_sectors(c, ca, offset,
offset + (1 << layout->sb_max_size_bits),
- BCH_DATA_SB, flags);
+ BCH_DATA_sb, flags);
}
for (i = 0; i < ca->journal.nr; i++) {
b = ca->journal.buckets[i];
- bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_JOURNAL,
+ bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal,
ca->mi.bucket_size,
gc_phase(GC_PHASE_SB), flags);
}
mutex_unlock(&c->sb_lock);
}
+#if 0
/* Also see bch2_pending_btree_node_free_insert_done() */
static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
{
mutex_unlock(&c->btree_interior_update_lock);
}
+#endif
static void bch2_mark_allocator_buckets(struct bch_fs *c)
{
ca->mi.nbuckets * sizeof(struct bucket));
ca->buckets[1] = NULL;
- free_percpu(ca->usage[1]);
- ca->usage[1] = NULL;
+ free_percpu(ca->usage_gc);
+ ca->usage_gc = NULL;
}
free_percpu(c->usage_gc);
}
static int bch2_gc_done(struct bch_fs *c,
- bool initial, bool metadata_only)
+ bool initial)
{
struct bch_dev *ca;
- bool verify = !metadata_only &&
- (!initial ||
- (c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO)));
- unsigned i;
+ bool verify = (!initial ||
+ (c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)));
+ unsigned i, dev;
int ret = 0;
#define copy_field(_f, _msg, ...) \
fsck_err(c, _msg ": got %llu, should be %llu" \
, ##__VA_ARGS__, dst->_f, src->_f); \
dst->_f = src->_f; \
+ set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); \
}
#define copy_stripe_field(_f, _msg, ...) \
if (dst->_f != src->_f) { \
if (verify) \
fsck_err(c, "stripe %zu has wrong "_msg \
": got %u, should be %u", \
- dst_iter.pos, ##__VA_ARGS__, \
+ iter.pos, ##__VA_ARGS__, \
dst->_f, src->_f); \
dst->_f = src->_f; \
- dst->dirty = true; \
+ set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); \
}
#define copy_bucket_field(_f) \
if (dst->b[b].mark._f != src->b[b].mark._f) { \
if (verify) \
- fsck_err(c, "dev %u bucket %zu has wrong " #_f \
+ fsck_err(c, "bucket %u:%zu gen %u data type %s has wrong " #_f \
": got %u, should be %u", i, b, \
+ dst->b[b].mark.gen, \
+ bch2_data_types[dst->b[b].mark.data_type],\
dst->b[b].mark._f, src->b[b].mark._f); \
dst->b[b]._mark._f = src->b[b].mark._f; \
+ set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); \
}
#define copy_dev_field(_f, _msg, ...) \
copy_field(_f, "dev %u has wrong " _msg, i, ##__VA_ARGS__)
#define copy_fs_field(_f, _msg, ...) \
copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__)
- if (!metadata_only) {
- struct genradix_iter dst_iter = genradix_iter_init(&c->stripes[0], 0);
- struct genradix_iter src_iter = genradix_iter_init(&c->stripes[1], 0);
+ {
+ struct genradix_iter iter = genradix_iter_init(&c->stripes[1], 0);
struct stripe *dst, *src;
- unsigned i;
-
- c->ec_stripes_heap.used = 0;
- while ((dst = genradix_iter_peek(&dst_iter, &c->stripes[0])) &&
- (src = genradix_iter_peek(&src_iter, &c->stripes[1]))) {
- BUG_ON(src_iter.pos != dst_iter.pos);
-
- copy_stripe_field(alive, "alive");
- copy_stripe_field(sectors, "sectors");
- copy_stripe_field(algorithm, "algorithm");
- copy_stripe_field(nr_blocks, "nr_blocks");
- copy_stripe_field(nr_redundant, "nr_redundant");
- copy_stripe_field(blocks_nonempty,
- "blocks_nonempty");
+ while ((src = genradix_iter_peek(&iter, &c->stripes[1]))) {
+ dst = genradix_ptr_alloc(&c->stripes[0], iter.pos, GFP_KERNEL);
+
+ if (dst->alive != src->alive ||
+ dst->sectors != src->sectors ||
+ dst->algorithm != src->algorithm ||
+ dst->nr_blocks != src->nr_blocks ||
+ dst->nr_redundant != src->nr_redundant) {
+ bch_err(c, "unexpected stripe inconsistency at bch2_gc_done, confused");
+ ret = -EINVAL;
+ goto fsck_err;
+ }
for (i = 0; i < ARRAY_SIZE(dst->block_sectors); i++)
copy_stripe_field(block_sectors[i],
"block_sectors[%u]", i);
- if (dst->alive)
- bch2_stripes_heap_insert(c, dst, dst_iter.pos);
+ dst->blocks_nonempty = 0;
+ for (i = 0; i < dst->nr_blocks; i++)
+ dst->blocks_nonempty += dst->block_sectors[i] != 0;
- genradix_iter_advance(&dst_iter, &c->stripes[0]);
- genradix_iter_advance(&src_iter, &c->stripes[1]);
+ genradix_iter_advance(&iter, &c->stripes[1]);
}
}
- for_each_member_device(ca, c, i) {
+ for (i = 0; i < ARRAY_SIZE(c->usage); i++)
+ bch2_fs_usage_acc_to_base(c, i);
+
+ for_each_member_device(ca, c, dev) {
struct bucket_array *dst = __bucket_array(ca, 0);
struct bucket_array *src = __bucket_array(ca, 1);
size_t b;
dst->b[b].oldest_gen = src->b[b].oldest_gen;
}
- };
- bch2_fs_usage_acc_to_base(c, 0);
- bch2_fs_usage_acc_to_base(c, 1);
+ {
+ struct bch_dev_usage *dst = ca->usage_base;
+ struct bch_dev_usage *src = (void *)
+ bch2_acc_percpu_u64s((void *) ca->usage_gc,
+ dev_usage_u64s());
- bch2_dev_usage_from_buckets(c);
+ copy_dev_field(buckets_ec, "buckets_ec");
+ copy_dev_field(buckets_unavailable, "buckets_unavailable");
+
+ for (i = 0; i < BCH_DATA_NR; i++) {
+ copy_dev_field(d[i].buckets, "%s buckets", bch2_data_types[i]);
+ copy_dev_field(d[i].sectors, "%s sectors", bch2_data_types[i]);
+ copy_dev_field(d[i].fragmented, "%s fragmented", bch2_data_types[i]);
+ }
+ }
+ };
{
unsigned nr = fs_usage_u64s(c);
copy_fs_field(hidden, "hidden");
copy_fs_field(btree, "btree");
+ copy_fs_field(data, "data");
+ copy_fs_field(cached, "cached");
+ copy_fs_field(reserved, "reserved");
+ copy_fs_field(nr_inodes,"nr_inodes");
- if (!metadata_only) {
- copy_fs_field(data, "data");
- copy_fs_field(cached, "cached");
- copy_fs_field(reserved, "reserved");
- copy_fs_field(nr_inodes,"nr_inodes");
-
- for (i = 0; i < BCH_REPLICAS_MAX; i++)
- copy_fs_field(persistent_reserved[i],
- "persistent_reserved[%i]", i);
- }
+ for (i = 0; i < BCH_REPLICAS_MAX; i++)
+ copy_fs_field(persistent_reserved[i],
+ "persistent_reserved[%i]", i);
for (i = 0; i < c->replicas.nr; i++) {
struct bch_replicas_entry *e =
cpu_replicas_entry(&c->replicas, i);
char buf[80];
- if (metadata_only &&
- (e->data_type == BCH_DATA_USER ||
- e->data_type == BCH_DATA_CACHED))
- continue;
-
bch2_replicas_entry_to_text(&PBUF(buf), e);
copy_fs_field(replicas[i], "%s", buf);
#undef copy_stripe_field
#undef copy_field
fsck_err:
+ if (ret)
+ bch_err(c, "%s: ret %i", __func__, ret);
return ret;
}
-static int bch2_gc_start(struct bch_fs *c,
- bool metadata_only)
+static int bch2_gc_start(struct bch_fs *c)
{
struct bch_dev *ca;
unsigned i;
for_each_member_device(ca, c, i) {
BUG_ON(ca->buckets[1]);
- BUG_ON(ca->usage[1]);
+ BUG_ON(ca->usage_gc);
ca->buckets[1] = kvpmalloc(sizeof(struct bucket_array) +
ca->mi.nbuckets * sizeof(struct bucket),
return -ENOMEM;
}
- ca->usage[1] = alloc_percpu(struct bch_dev_usage);
- if (!ca->usage[1]) {
- bch_err(c, "error allocating ca->usage[gc]");
+ ca->usage_gc = alloc_percpu(struct bch_dev_usage);
+ if (!ca->usage_gc) {
+ bch_err(c, "error allocating ca->usage_gc");
percpu_ref_put(&ca->ref);
return -ENOMEM;
}
d->_mark.gen = dst->b[b].oldest_gen = s->mark.gen;
d->gen_valid = s->gen_valid;
-
- if (metadata_only &&
- (s->mark.data_type == BCH_DATA_USER ||
- s->mark.data_type == BCH_DATA_CACHED)) {
- d->_mark = s->mark;
- d->_mark.owned_by_allocator = 0;
- }
}
};
* move around - if references move backwards in the ordering GC
* uses, GC could skip past them
*/
-int bch2_gc(struct bch_fs *c, struct journal_keys *journal_keys,
- bool initial, bool metadata_only)
+int bch2_gc(struct bch_fs *c, bool initial)
{
struct bch_dev *ca;
u64 start_time = local_clock();
unsigned i, iter = 0;
int ret;
+ lockdep_assert_held(&c->state_lock);
trace_gc_start(c);
down_write(&c->gc_lock);
+
+ /* flush interior btree updates: */
+ closure_wait_event(&c->btree_interior_update_wait,
+ !bch2_btree_interior_updates_nr_pending(c));
again:
- ret = bch2_gc_start(c, metadata_only);
+ ret = bch2_gc_start(c);
if (ret)
goto out;
bch2_mark_superblocks(c);
- ret = bch2_gc_btrees(c, journal_keys, initial, metadata_only);
+ ret = bch2_gc_btrees(c, initial);
if (ret)
goto out;
+#if 0
bch2_mark_pending_btree_node_frees(c);
+#endif
bch2_mark_allocator_buckets(c);
c->gc_count++;
-out:
- if (!ret &&
- (test_bit(BCH_FS_FIXED_GENS, &c->flags) ||
- (!iter && test_restart_gc(c)))) {
+
+ if (test_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags) ||
+ (!iter && bch2_test_restart_gc)) {
/*
* XXX: make sure gens we fixed got saved
*/
if (iter++ <= 2) {
- bch_info(c, "Fixed gens, restarting mark and sweep:");
- clear_bit(BCH_FS_FIXED_GENS, &c->flags);
+ bch_info(c, "Second GC pass needed, restarting:");
+ clear_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
__gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
percpu_down_write(&c->mark_lock);
bch_info(c, "Unable to fix bucket gens, looping");
ret = -EINVAL;
}
-
+out:
if (!ret) {
bch2_journal_block(&c->journal);
percpu_down_write(&c->mark_lock);
- ret = bch2_gc_done(c, initial, metadata_only);
+ ret = bch2_gc_done(c, initial);
bch2_journal_unblock(&c->journal);
} else {
return ret;
}
+static bool gc_btree_gens_key(struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+
+ percpu_down_read(&c->mark_lock);
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bucket *g = PTR_BUCKET(ca, ptr, false);
+
+ if (gen_after(g->mark.gen, ptr->gen) > 16) {
+ percpu_up_read(&c->mark_lock);
+ return true;
+ }
+ }
+
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bucket *g = PTR_BUCKET(ca, ptr, false);
+
+ if (gen_after(g->gc_gen, ptr->gen))
+ g->gc_gen = ptr->gen;
+ }
+ percpu_up_read(&c->mark_lock);
+
+ return false;
+}
+
+/*
+ * For recalculating oldest gen, we only need to walk keys in leaf nodes; btree
+ * node pointers currently never have cached pointers that can become stale:
+ */
+static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id)
+{
+ struct btree_trans trans;
+ struct btree_iter *iter;
+ struct bkey_s_c k;
+ struct bkey_buf sk;
+ int ret = 0;
+
+ bch2_bkey_buf_init(&sk);
+ bch2_trans_init(&trans, c, 0, 0);
+
+ iter = bch2_trans_get_iter(&trans, btree_id, POS_MIN,
+ BTREE_ITER_PREFETCH|
+ BTREE_ITER_NOT_EXTENTS|
+ BTREE_ITER_ALL_SNAPSHOTS);
+
+ while ((k = bch2_btree_iter_peek(iter)).k &&
+ !(ret = bkey_err(k))) {
+ if (gc_btree_gens_key(c, k)) {
+ bch2_bkey_buf_reassemble(&sk, c, k);
+ bch2_extent_normalize(c, bkey_i_to_s(sk.k));
+
+ bch2_trans_update(&trans, iter, sk.k, 0);
+
+ ret = bch2_trans_commit(&trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL);
+ if (ret == -EINTR)
+ continue;
+ if (ret) {
+ break;
+ }
+ }
+
+ bch2_btree_iter_advance(iter);
+ }
+ bch2_trans_iter_put(&trans, iter);
+
+ bch2_trans_exit(&trans);
+ bch2_bkey_buf_exit(&sk, c);
+
+ return ret;
+}
+
+int bch2_gc_gens(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ struct bucket_array *buckets;
+ struct bucket *g;
+ unsigned i;
+ int ret;
+
+ /*
+ * Ideally we would be using state_lock and not gc_lock here, but that
+ * introduces a deadlock in the RO path - we currently take the state
+ * lock at the start of going RO, thus the gc thread may get stuck:
+ */
+ down_read(&c->gc_lock);
+
+ for_each_member_device(ca, c, i) {
+ down_read(&ca->bucket_lock);
+ buckets = bucket_array(ca);
+
+ for_each_bucket(g, buckets)
+ g->gc_gen = g->mark.gen;
+ up_read(&ca->bucket_lock);
+ }
+
+ for (i = 0; i < BTREE_ID_NR; i++)
+ if ((1 << i) & BTREE_ID_HAS_PTRS) {
+ ret = bch2_gc_btree_gens(c, i);
+ if (ret) {
+ bch_err(c, "error recalculating oldest_gen: %i", ret);
+ goto err;
+ }
+ }
+
+ for_each_member_device(ca, c, i) {
+ down_read(&ca->bucket_lock);
+ buckets = bucket_array(ca);
+
+ for_each_bucket(g, buckets)
+ g->oldest_gen = g->gc_gen;
+ up_read(&ca->bucket_lock);
+ }
+
+ c->gc_count++;
+err:
+ up_read(&c->gc_lock);
+ return ret;
+}
+
/* Btree coalescing */
static void recalc_packed_keys(struct btree *b)
/* Find a format that all keys in @old_nodes can pack into */
bch2_bkey_format_init(&format_state);
+ /*
+ * XXX: this won't correctly take it account the new min/max keys:
+ */
for (i = 0; i < nr_old_nodes; i++)
__bch2_btree_calc_format(&format_state, old_nodes[i]);
}
if (bch2_keylist_realloc(&keylist, NULL, 0,
- (BKEY_U64s + BKEY_EXTENT_U64s_MAX) * nr_old_nodes)) {
+ BKEY_BTREE_PTR_U64s_MAX * nr_old_nodes)) {
trace_btree_gc_coalesce_fail(c,
BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC);
return;
}
- as = bch2_btree_update_start(iter->trans, iter->btree_id,
+ as = bch2_btree_update_start(iter, old_nodes[0]->c.level,
btree_update_reserve_required(c, parent) + nr_old_nodes,
BTREE_INSERT_NOFAIL|
- BTREE_INSERT_USE_RESERVE,
- NULL);
+ BTREE_INSERT_USE_RESERVE);
if (IS_ERR(as)) {
trace_btree_gc_coalesce_fail(c,
BTREE_GC_COALESCE_FAIL_RESERVE_GET);
k < vstruct_last(s2) &&
vstruct_blocks_plus(n1->data, c->block_bits,
u64s + k->u64s) <= blocks;
- k = bkey_next_skip_noops(k, vstruct_last(s2))) {
+ k = bkey_next(k)) {
last = k;
u64s += k->u64s;
}
set_btree_bset_end(n1, n1->set);
- six_unlock_write(&n2->lock);
+ six_unlock_write(&n2->c.lock);
bch2_btree_node_free_never_inserted(c, n2);
- six_unlock_intent(&n2->lock);
+ six_unlock_intent(&n2->c.lock);
memmove(new_nodes + i - 1,
new_nodes + i,
n1->key.k.p = n1->data->max_key =
bkey_unpack_pos(n1, last);
- n2->data->min_key = bkey_successor(n1->data->max_key);
+ n2->data->min_key = bpos_successor(n1->data->max_key);
memcpy_u64s(vstruct_last(s1),
s2->start, u64s);
btree_node_reset_sib_u64s(n);
bch2_btree_build_aux_trees(n);
- six_unlock_write(&n->lock);
+
+ bch2_btree_update_add_new_node(as, n);
+ six_unlock_write(&n->c.lock);
bch2_btree_node_write(c, n, SIX_LOCK_intent);
}
unsigned j;
for (j = 0; j < nr_new_nodes; j++)
- if (!bkey_cmp(old_nodes[i]->key.k.p,
+ if (!bpos_cmp(old_nodes[i]->key.k.p,
new_nodes[j]->key.k.p))
goto next;
BUG_ON(!bch2_keylist_empty(&keylist));
- BUG_ON(iter->l[old_nodes[0]->level].b != old_nodes[0]);
+ BUG_ON(iter->l[old_nodes[0]->c.level].b != old_nodes[0]);
bch2_btree_iter_node_replace(iter, new_nodes[0]);
for (i = 0; i < nr_new_nodes; i++)
- bch2_open_buckets_put(c, &new_nodes[i]->ob);
+ bch2_btree_update_get_open_buckets(as, new_nodes[i]);
/* Free the old nodes and update our sliding window */
for (i = 0; i < nr_old_nodes; i++) {
}
for (i = 0; i < nr_new_nodes; i++)
- six_unlock_intent(&new_nodes[i]->lock);
+ six_unlock_intent(&new_nodes[i]->c.lock);
bch2_btree_update_done(as);
bch2_keylist_free(&keylist, NULL);
struct btree *b;
bool kthread = (current->flags & PF_KTHREAD) != 0;
unsigned i;
+ int ret = 0;
/* Sliding window of adjacent btree nodes */
struct btree *merge[GC_MERGE_NODES];
for (i = 1; i < GC_MERGE_NODES; i++) {
if (!merge[i] ||
- !six_relock_intent(&merge[i]->lock, lock_seq[i]))
+ !six_relock_intent(&merge[i]->c.lock, lock_seq[i]))
break;
- if (merge[i]->level != merge[0]->level) {
- six_unlock_intent(&merge[i]->lock);
+ if (merge[i]->c.level != merge[0]->c.level) {
+ six_unlock_intent(&merge[i]->c.lock);
break;
}
}
bch2_coalesce_nodes(c, iter, merge);
for (i = 1; i < GC_MERGE_NODES && merge[i]; i++) {
- lock_seq[i] = merge[i]->lock.state.seq;
- six_unlock_intent(&merge[i]->lock);
+ lock_seq[i] = merge[i]->c.lock.state.seq;
+ six_unlock_intent(&merge[i]->c.lock);
}
- lock_seq[0] = merge[0]->lock.state.seq;
+ lock_seq[0] = merge[0]->c.lock.state.seq;
if (kthread && kthread_should_stop()) {
- bch2_trans_exit(&trans);
- return -ESHUTDOWN;
+ ret = -ESHUTDOWN;
+ break;
}
bch2_trans_cond_resched(&trans);
memset(merge + 1, 0,
(GC_MERGE_NODES - 1) * sizeof(merge[0]));
}
- return bch2_trans_exit(&trans);
+ bch2_trans_iter_put(&trans, iter);
+
+ return bch2_trans_exit(&trans) ?: ret;
}
/**
{
struct bch_fs *c = arg;
struct io_clock *clock = &c->io_clock[WRITE];
- unsigned long last = atomic_long_read(&clock->now);
+ unsigned long last = atomic64_read(&clock->now);
unsigned last_kick = atomic_read(&c->kick_gc);
int ret;
if (c->btree_gc_periodic) {
unsigned long next = last + c->capacity / 16;
- if (atomic_long_read(&clock->now) >= next)
+ if (atomic64_read(&clock->now) >= next)
break;
bch2_io_clock_schedule_timeout(clock, next);
}
__set_current_state(TASK_RUNNING);
- last = atomic_long_read(&clock->now);
+ last = atomic64_read(&clock->now);
last_kick = atomic_read(&c->kick_gc);
- ret = bch2_gc(c, NULL, false, false);
- if (ret)
+ /*
+ * Full gc is currently incompatible with btree key cache:
+ */
+#if 0
+ ret = bch2_gc(c, false, false);
+#else
+ ret = bch2_gc_gens(c);
+#endif
+ if (ret < 0)
bch_err(c, "btree gc failed: %i", ret);
debug_check_no_locks_held();
{
struct task_struct *p;
- BUG_ON(c->gc_thread);
+ if (c->gc_thread)
+ return 0;
- p = kthread_create(bch2_gc_thread, c, "bch_gc");
- if (IS_ERR(p))
+ p = kthread_create(bch2_gc_thread, c, "bch-gc/%s", c->name);
+ if (IS_ERR(p)) {
+ bch_err(c, "error creating gc thread: %li", PTR_ERR(p));
return PTR_ERR(p);
+ }
get_task_struct(p);
c->gc_thread = p;