+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
* Copyright (C) 2014 Datera Inc.
*/
#include "bcachefs.h"
-#include "alloc.h"
+#include "alloc_background.h"
+#include "alloc_foreground.h"
#include "bkey_methods.h"
+#include "bkey_buf.h"
#include "btree_locking.h"
#include "btree_update_interior.h"
#include "btree_io.h"
#include "buckets.h"
#include "clock.h"
#include "debug.h"
+#include "ec.h"
#include "error.h"
#include "extents.h"
#include "journal.h"
#include "keylist.h"
#include "move.h"
+#include "recovery.h"
+#include "reflink.h"
+#include "replicas.h"
#include "super-io.h"
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
+#include <linux/preempt.h>
#include <linux/rcupdate.h>
+#include <linux/sched/task.h>
#include <trace/events/bcachefs.h>
-struct range_checks {
- struct range_level {
- struct bpos min;
- struct bpos max;
- } l[BTREE_MAX_DEPTH];
- unsigned depth;
-};
+#define DROP_THIS_NODE 10
+#define DROP_PREV_NODE 11
-static void btree_node_range_checks_init(struct range_checks *r, unsigned depth)
+static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
{
- unsigned i;
-
- for (i = 0; i < BTREE_MAX_DEPTH; i++)
- r->l[i].min = r->l[i].max = POS_MIN;
- r->depth = depth;
+ preempt_disable();
+ write_seqcount_begin(&c->gc_pos_lock);
+ c->gc_pos = new_pos;
+ write_seqcount_end(&c->gc_pos_lock);
+ preempt_enable();
}
-static void btree_node_range_checks(struct bch_fs *c, struct btree *b,
- struct range_checks *r)
+static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
{
- struct range_level *l = &r->l[b->level];
-
- struct bpos expected_min = bkey_cmp(l->min, l->max)
- ? btree_type_successor(b->btree_id, l->max)
- : l->max;
-
- bch2_fs_inconsistent_on(bkey_cmp(b->data->min_key, expected_min), c,
- "btree node has incorrect min key: %llu:%llu != %llu:%llu",
- b->data->min_key.inode,
- b->data->min_key.offset,
- expected_min.inode,
- expected_min.offset);
-
- l->max = b->data->max_key;
-
- if (b->level > r->depth) {
- l = &r->l[b->level - 1];
-
- bch2_fs_inconsistent_on(bkey_cmp(b->data->min_key, l->min), c,
- "btree node min doesn't match min of child nodes: %llu:%llu != %llu:%llu",
- b->data->min_key.inode,
- b->data->min_key.offset,
- l->min.inode,
- l->min.offset);
-
- bch2_fs_inconsistent_on(bkey_cmp(b->data->max_key, l->max), c,
- "btree node max doesn't match max of child nodes: %llu:%llu != %llu:%llu",
- b->data->max_key.inode,
- b->data->max_key.offset,
- l->max.inode,
- l->max.offset);
-
- if (bkey_cmp(b->data->max_key, POS_MAX))
- l->min = l->max =
- btree_type_successor(b->btree_id,
- b->data->max_key);
- }
+ BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) <= 0);
+ __gc_pos_set(c, new_pos);
}
-u8 bch2_btree_key_recalc_oldest_gen(struct bch_fs *c, struct bkey_s_c k)
+/*
+ * Missing: if an interior btree node is empty, we need to do something -
+ * perhaps just kill it
+ */
+static int bch2_gc_check_topology(struct bch_fs *c,
+ struct btree *b,
+ struct bkey_buf *prev,
+ struct bkey_buf cur,
+ bool is_last)
{
- const struct bch_extent_ptr *ptr;
- u8 max_stale = 0;
+ struct bpos node_start = b->data->min_key;
+ struct bpos node_end = b->data->max_key;
+ struct bpos expected_start = bkey_deleted(&prev->k->k)
+ ? node_start
+ : bpos_successor(prev->k->k.p);
+ char buf1[200], buf2[200];
+ int ret = 0;
- if (bkey_extent_is_data(k.k)) {
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+ if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) {
+ struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k);
- extent_for_each_ptr(e, ptr) {
- struct bch_dev *ca = c->devs[ptr->dev];
- size_t b = PTR_BUCKET_NR(ca, ptr);
+ if (bkey_deleted(&prev->k->k)) {
+ struct printbuf out = PBUF(buf1);
+ pr_buf(&out, "start of node: ");
+ bch2_bpos_to_text(&out, node_start);
+ } else {
+ bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(prev->k));
+ }
- if (gen_after(ca->oldest_gens[b], ptr->gen))
- ca->oldest_gens[b] = ptr->gen;
+ if (bpos_cmp(expected_start, bp->v.min_key)) {
+ bch2_topology_error(c);
+
+ if (__fsck_err(c,
+ FSCK_CAN_FIX|
+ FSCK_CAN_IGNORE|
+ FSCK_NO_RATELIMIT,
+ "btree node with incorrect min_key at btree %s level %u:\n"
+ " prev %s\n"
+ " cur %s",
+ bch2_btree_ids[b->c.btree_id], b->c.level,
+ buf1,
+ (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(cur.k)), buf2)) &&
+ !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
+ bch_info(c, "Halting mark and sweep to start topology repair pass");
+ return FSCK_ERR_START_TOPOLOGY_REPAIR;
+ } else {
+ set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
+ }
+ }
+ }
- max_stale = max(max_stale, ptr_stale(ca, ptr));
+ if (is_last && bpos_cmp(cur.k->k.p, node_end)) {
+ bch2_topology_error(c);
+
+ if (__fsck_err(c,
+ FSCK_CAN_FIX|
+ FSCK_CAN_IGNORE|
+ FSCK_NO_RATELIMIT,
+ "btree node with incorrect max_key at btree %s level %u:\n"
+ " %s\n"
+ " expected %s",
+ bch2_btree_ids[b->c.btree_id], b->c.level,
+ (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(cur.k)), buf1),
+ (bch2_bpos_to_text(&PBUF(buf2), node_end), buf2)) &&
+ !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
+ bch_info(c, "Halting mark and sweep to start topology repair pass");
+ return FSCK_ERR_START_TOPOLOGY_REPAIR;
+ } else {
+ set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
}
}
- return max_stale;
+ bch2_bkey_buf_copy(prev, c, cur.k);
+fsck_err:
+ return ret;
}
-/*
- * For runtime mark and sweep:
- */
-static u8 bch2_btree_mark_key(struct bch_fs *c, enum bkey_type type,
- struct bkey_s_c k, unsigned flags)
+static void btree_ptr_to_v2(struct btree *b, struct bkey_i_btree_ptr_v2 *dst)
{
- switch (type) {
- case BKEY_TYPE_BTREE:
- bch2_gc_mark_key(c, k, c->sb.btree_node_size, true, flags);
- return 0;
- case BKEY_TYPE_EXTENTS:
- bch2_gc_mark_key(c, k, k.k->size, false, flags);
- return bch2_btree_key_recalc_oldest_gen(c, k);
+ switch (b->key.k.type) {
+ case KEY_TYPE_btree_ptr: {
+ struct bkey_i_btree_ptr *src = bkey_i_to_btree_ptr(&b->key);
+
+ dst->k.p = src->k.p;
+ dst->v.mem_ptr = 0;
+ dst->v.seq = b->data->keys.seq;
+ dst->v.sectors_written = 0;
+ dst->v.flags = 0;
+ dst->v.min_key = b->data->min_key;
+ set_bkey_val_bytes(&dst->k, sizeof(dst->v) + bkey_val_bytes(&src->k));
+ memcpy(dst->v.start, src->v.start, bkey_val_bytes(&src->k));
+ break;
+ }
+ case KEY_TYPE_btree_ptr_v2:
+ bkey_copy(&dst->k_i, &b->key);
+ break;
default:
BUG();
}
}
-int bch2_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type,
- struct bkey_s_c k)
+static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
+{
+ struct bkey_i_btree_ptr_v2 *new;
+ int ret;
+
+ new = kmalloc(BKEY_BTREE_PTR_U64s_MAX * sizeof(u64), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ btree_ptr_to_v2(b, new);
+ b->data->min_key = new_min;
+ new->v.min_key = new_min;
+ SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
+
+ ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level + 1, &new->k_i);
+ if (ret) {
+ kfree(new);
+ return ret;
+ }
+
+ bch2_btree_node_drop_keys_outside_node(b);
+
+ return 0;
+}
+
+static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
{
- enum bch_data_types data_type = type == BKEY_TYPE_BTREE
- ? BCH_DATA_BTREE : BCH_DATA_USER;
+ struct bkey_i_btree_ptr_v2 *new;
+ int ret;
+
+ ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level + 1, b->key.k.p);
+ if (ret)
+ return ret;
+
+ new = kmalloc(BKEY_BTREE_PTR_U64s_MAX * sizeof(u64), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ btree_ptr_to_v2(b, new);
+ b->data->max_key = new_max;
+ new->k.p = new_max;
+ SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
+
+ ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level + 1, &new->k_i);
+ if (ret) {
+ kfree(new);
+ return ret;
+ }
+
+ bch2_btree_node_drop_keys_outside_node(b);
+
+ mutex_lock(&c->btree_cache.lock);
+ bch2_btree_node_hash_remove(&c->btree_cache, b);
+
+ bkey_copy(&b->key, &new->k_i);
+ ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
+ BUG_ON(ret);
+ mutex_unlock(&c->btree_cache.lock);
+ return 0;
+}
+
+static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
+ struct btree *prev, struct btree *cur)
+{
+ struct bpos expected_start = !prev
+ ? b->data->min_key
+ : bpos_successor(prev->key.k.p);
+ char buf1[200], buf2[200];
+ int ret = 0;
+
+ if (!prev) {
+ struct printbuf out = PBUF(buf1);
+ pr_buf(&out, "start of node: ");
+ bch2_bpos_to_text(&out, b->data->min_key);
+ } else {
+ bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&prev->key));
+ }
+
+ bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(&cur->key));
+
+ if (prev &&
+ bpos_cmp(expected_start, cur->data->min_key) > 0 &&
+ BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) {
+ /* cur overwrites prev: */
+
+ if (mustfix_fsck_err_on(bpos_cmp(prev->data->min_key,
+ cur->data->min_key) >= 0, c,
+ "btree node overwritten by next node at btree %s level %u:\n"
+ " node %s\n"
+ " next %s",
+ bch2_btree_ids[b->c.btree_id], b->c.level,
+ buf1, buf2))
+ return DROP_PREV_NODE;
+
+ if (mustfix_fsck_err_on(bpos_cmp(prev->key.k.p,
+ bpos_predecessor(cur->data->min_key)), c,
+ "btree node with incorrect max_key at btree %s level %u:\n"
+ " node %s\n"
+ " next %s",
+ bch2_btree_ids[b->c.btree_id], b->c.level,
+ buf1, buf2))
+ ret = set_node_max(c, prev,
+ bpos_predecessor(cur->data->min_key));
+ } else {
+ /* prev overwrites cur: */
+
+ if (mustfix_fsck_err_on(bpos_cmp(expected_start,
+ cur->data->max_key) >= 0, c,
+ "btree node overwritten by prev node at btree %s level %u:\n"
+ " prev %s\n"
+ " node %s",
+ bch2_btree_ids[b->c.btree_id], b->c.level,
+ buf1, buf2))
+ return DROP_THIS_NODE;
+
+ if (mustfix_fsck_err_on(bpos_cmp(expected_start, cur->data->min_key), c,
+ "btree node with incorrect min_key at btree %s level %u:\n"
+ " prev %s\n"
+ " node %s",
+ bch2_btree_ids[b->c.btree_id], b->c.level,
+ buf1, buf2))
+ ret = set_node_min(c, cur, expected_start);
+ }
+fsck_err:
+ return ret;
+}
+
+static int btree_repair_node_end(struct bch_fs *c, struct btree *b,
+ struct btree *child)
+{
+ char buf1[200], buf2[200];
+ int ret = 0;
+
+ if (mustfix_fsck_err_on(bpos_cmp(child->key.k.p, b->key.k.p), c,
+ "btree node with incorrect max_key at btree %s level %u:\n"
+ " %s\n"
+ " expected %s",
+ bch2_btree_ids[b->c.btree_id], b->c.level,
+ (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&child->key)), buf1),
+ (bch2_bpos_to_text(&PBUF(buf2), b->key.k.p), buf2))) {
+ ret = set_node_max(c, child, b->key.k.p);
+ if (ret)
+ return ret;
+ }
+fsck_err:
+ return ret;
+}
+
+static int bch2_btree_repair_topology_recurse(struct bch_fs *c, struct btree *b)
+{
+ struct btree_and_journal_iter iter;
+ struct bkey_s_c k;
+ struct bkey_buf prev_k, cur_k;
+ struct btree *prev = NULL, *cur = NULL;
+ bool have_child, dropped_children = false;
+ char buf[200];
int ret = 0;
- switch (k.k->type) {
- case BCH_EXTENT:
- case BCH_EXTENT_CACHED: {
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
- const struct bch_extent_ptr *ptr;
-
- if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
- (!c->opts.nofsck &&
- fsck_err_on(!bch2_sb_has_replicas(c, e, data_type), c,
- "superblock not marked as containing replicas"))) {
- ret = bch2_check_mark_super(c, e, data_type);
+ if (!b->c.level)
+ return 0;
+again:
+ prev = NULL;
+ have_child = dropped_children = false;
+ bch2_bkey_buf_init(&prev_k);
+ bch2_bkey_buf_init(&cur_k);
+ bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
+
+ while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
+ BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
+ BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
+
+ bch2_btree_and_journal_iter_advance(&iter);
+ bch2_bkey_buf_reassemble(&cur_k, c, k);
+
+ cur = bch2_btree_node_get_noiter(c, cur_k.k,
+ b->c.btree_id, b->c.level - 1,
+ false);
+ ret = PTR_ERR_OR_ZERO(cur);
+
+ if (mustfix_fsck_err_on(ret == -EIO, c,
+ "Unreadable btree node at btree %s level %u:\n"
+ " %s",
+ bch2_btree_ids[b->c.btree_id],
+ b->c.level - 1,
+ (bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(cur_k.k)), buf))) {
+ bch2_btree_node_evict(c, cur_k.k);
+ ret = bch2_journal_key_delete(c, b->c.btree_id,
+ b->c.level, cur_k.k->k.p);
+ if (ret)
+ break;
+ continue;
+ }
+
+ if (ret) {
+ bch_err(c, "%s: error %i getting btree node",
+ __func__, ret);
+ break;
+ }
+
+ ret = btree_repair_node_boundaries(c, b, prev, cur);
+
+ if (ret == DROP_THIS_NODE) {
+ six_unlock_read(&cur->c.lock);
+ bch2_btree_node_evict(c, cur_k.k);
+ ret = bch2_journal_key_delete(c, b->c.btree_id,
+ b->c.level, cur_k.k->k.p);
+ if (ret)
+ break;
+ continue;
+ }
+
+ if (prev)
+ six_unlock_read(&prev->c.lock);
+ prev = NULL;
+
+ if (ret == DROP_PREV_NODE) {
+ bch2_btree_node_evict(c, prev_k.k);
+ ret = bch2_journal_key_delete(c, b->c.btree_id,
+ b->c.level, prev_k.k->k.p);
if (ret)
- return ret;
+ break;
+
+ bch2_btree_and_journal_iter_exit(&iter);
+ bch2_bkey_buf_exit(&prev_k, c);
+ bch2_bkey_buf_exit(&cur_k, c);
+ goto again;
+ } else if (ret)
+ break;
+
+ prev = cur;
+ cur = NULL;
+ bch2_bkey_buf_copy(&prev_k, c, cur_k.k);
+ }
+
+ if (!ret && !IS_ERR_OR_NULL(prev)) {
+ BUG_ON(cur);
+ ret = btree_repair_node_end(c, b, prev);
+ }
+
+ if (!IS_ERR_OR_NULL(prev))
+ six_unlock_read(&prev->c.lock);
+ prev = NULL;
+ if (!IS_ERR_OR_NULL(cur))
+ six_unlock_read(&cur->c.lock);
+ cur = NULL;
+
+ if (ret)
+ goto err;
+
+ bch2_btree_and_journal_iter_exit(&iter);
+ bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
+
+ while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
+ bch2_bkey_buf_reassemble(&cur_k, c, k);
+ bch2_btree_and_journal_iter_advance(&iter);
+
+ cur = bch2_btree_node_get_noiter(c, cur_k.k,
+ b->c.btree_id, b->c.level - 1,
+ false);
+ ret = PTR_ERR_OR_ZERO(cur);
+
+ if (ret) {
+ bch_err(c, "%s: error %i getting btree node",
+ __func__, ret);
+ goto err;
}
- extent_for_each_ptr(e, ptr) {
- struct bch_dev *ca = c->devs[ptr->dev];
- struct bucket *g = PTR_BUCKET(ca, ptr);
+ ret = bch2_btree_repair_topology_recurse(c, cur);
+ six_unlock_read(&cur->c.lock);
+ cur = NULL;
+
+ if (ret == DROP_THIS_NODE) {
+ bch2_btree_node_evict(c, cur_k.k);
+ ret = bch2_journal_key_delete(c, b->c.btree_id,
+ b->c.level, cur_k.k->k.p);
+ dropped_children = true;
+ }
- if (!g->mark.gen_valid) {
- g->_mark.gen = ptr->gen;
- g->_mark.gen_valid = 1;
- ca->need_alloc_write = true;
+ if (ret)
+ goto err;
+
+ have_child = true;
+ }
+
+ if (mustfix_fsck_err_on(!have_child, c,
+ "empty interior btree node at btree %s level %u\n"
+ " %s",
+ bch2_btree_ids[b->c.btree_id],
+ b->c.level,
+ (bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(&b->key)), buf)))
+ ret = DROP_THIS_NODE;
+err:
+fsck_err:
+ if (!IS_ERR_OR_NULL(prev))
+ six_unlock_read(&prev->c.lock);
+ if (!IS_ERR_OR_NULL(cur))
+ six_unlock_read(&cur->c.lock);
+
+ bch2_btree_and_journal_iter_exit(&iter);
+ bch2_bkey_buf_exit(&prev_k, c);
+ bch2_bkey_buf_exit(&cur_k, c);
+
+ if (!ret && dropped_children)
+ goto again;
+
+ return ret;
+}
+
+static int bch2_repair_topology(struct bch_fs *c)
+{
+ struct btree *b;
+ unsigned i;
+ int ret = 0;
+
+ for (i = 0; i < BTREE_ID_NR && !ret; i++) {
+ b = c->btree_roots[i].b;
+ if (btree_node_fake(b))
+ continue;
+
+ six_lock_read(&b->c.lock, NULL, NULL);
+ ret = bch2_btree_repair_topology_recurse(c, b);
+ six_unlock_read(&b->c.lock);
+
+ if (ret == DROP_THIS_NODE) {
+ bch_err(c, "empty btree root - repair unimplemented");
+ ret = FSCK_ERR_EXIT;
+ }
+ }
+
+ return ret;
+}
+
+static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
+ unsigned level, bool is_root,
+ struct bkey_s_c *k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(*k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p = { 0 };
+ bool do_update = false;
+ char buf[200];
+ int ret = 0;
+
+ /*
+ * XXX
+ * use check_bucket_ref here
+ */
+ bkey_for_each_ptr_decode(k->k, ptrs, p, entry) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
+ struct bucket *g = PTR_BUCKET(ca, &p.ptr, true);
+ struct bucket *g2 = PTR_BUCKET(ca, &p.ptr, false);
+ enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry->ptr);
+
+ if (fsck_err_on(!g->gen_valid, c,
+ "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ p.ptr.gen,
+ (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
+ if (!p.ptr.cached) {
+ g2->_mark.gen = g->_mark.gen = p.ptr.gen;
+ g2->gen_valid = g->gen_valid = true;
+ set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags);
+ } else {
+ do_update = true;
+ }
+ }
+
+ if (fsck_err_on(data_type == BCH_DATA_btree &&
+ g->mark.gen != p.ptr.gen, c,
+ "bucket %u:%zu data type %s has metadata but wrong gen: %u != %u\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ p.ptr.gen, g->mark.gen,
+ (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
+ g2->_mark.data_type = g->_mark.data_type = data_type;
+ g2->gen_valid = g->gen_valid = true;
+ set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags);
+ }
+
+ if (fsck_err_on(gen_cmp(p.ptr.gen, g->mark.gen) > 0, c,
+ "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ p.ptr.gen, g->mark.gen,
+ (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
+ if (!p.ptr.cached) {
+ g2->_mark.gen = g->_mark.gen = p.ptr.gen;
+ g2->gen_valid = g->gen_valid = true;
+ g2->_mark.data_type = 0;
+ g2->_mark.dirty_sectors = 0;
+ g2->_mark.cached_sectors = 0;
+ set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
+ set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags);
+ } else {
+ do_update = true;
}
+ }
- if (fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
- "%s ptr gen in the future: %u > %u",
- type == BKEY_TYPE_BTREE
- ? "btree" : "data",
- ptr->gen, g->mark.gen)) {
- g->_mark.gen = ptr->gen;
- g->_mark.gen_valid = 1;
- ca->need_alloc_write = true;
- set_bit(BCH_FS_FIXED_GENS, &c->flags);
+ if (fsck_err_on(gen_cmp(g->mark.gen, p.ptr.gen) > BUCKET_GC_GEN_MAX, c,
+ "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->mark.gen,
+ bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ p.ptr.gen,
+ (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
+ do_update = true;
+
+ if (fsck_err_on(!p.ptr.cached &&
+ gen_cmp(p.ptr.gen, g->mark.gen) < 0, c,
+ "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ p.ptr.gen, g->mark.gen,
+ (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
+ do_update = true;
+
+ if (p.ptr.gen != g->mark.gen)
+ continue;
+
+ if (fsck_err_on(g->mark.data_type &&
+ g->mark.data_type != data_type, c,
+ "bucket %u:%zu different types of data in same bucket: %s, %s\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_types[g->mark.data_type],
+ bch2_data_types[data_type],
+ (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
+ if (data_type == BCH_DATA_btree) {
+ g2->_mark.data_type = g->_mark.data_type = data_type;
+ g2->gen_valid = g->gen_valid = true;
+ set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags);
+ } else {
+ do_update = true;
}
+ }
+ if (p.has_ec) {
+ struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
+
+ if (fsck_err_on(!m || !m->alive, c,
+ "pointer to nonexistent stripe %llu\n"
+ "while marking %s",
+ (u64) p.ec.idx,
+ (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
+ do_update = true;
+
+ if (fsck_err_on(!bch2_ptr_matches_stripe_m(m, p), c,
+ "pointer does not match stripe %llu\n"
+ "while marking %s",
+ (u64) p.ec.idx,
+ (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
+ do_update = true;
}
- break;
}
+
+ if (do_update) {
+ struct bkey_ptrs ptrs;
+ union bch_extent_entry *entry;
+ struct bch_extent_ptr *ptr;
+ struct bkey_i *new;
+
+ if (is_root) {
+ bch_err(c, "cannot update btree roots yet");
+ return -EINVAL;
+ }
+
+ new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
+ if (!new) {
+ bch_err(c, "%s: error allocating new key", __func__);
+ return -ENOMEM;
+ }
+
+ bkey_reassemble(new, *k);
+
+ if (level) {
+ /*
+ * We don't want to drop btree node pointers - if the
+ * btree node isn't there anymore, the read path will
+ * sort it out:
+ */
+ ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bucket *g = PTR_BUCKET(ca, ptr, true);
+
+ ptr->gen = g->mark.gen;
+ }
+ } else {
+ bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, ({
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bucket *g = PTR_BUCKET(ca, ptr, true);
+ enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, ptr);
+
+ (ptr->cached &&
+ (!g->gen_valid || gen_cmp(ptr->gen, g->mark.gen) > 0)) ||
+ (!ptr->cached &&
+ gen_cmp(ptr->gen, g->mark.gen) < 0) ||
+ gen_cmp(g->mark.gen, ptr->gen) > BUCKET_GC_GEN_MAX ||
+ (g->mark.data_type &&
+ g->mark.data_type != data_type);
+ }));
+again:
+ ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
+ bkey_extent_entry_for_each(ptrs, entry) {
+ if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) {
+ struct gc_stripe *m = genradix_ptr(&c->gc_stripes,
+ entry->stripe_ptr.idx);
+ union bch_extent_entry *next_ptr;
+
+ bkey_extent_entry_for_each_from(ptrs, next_ptr, entry)
+ if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr)
+ goto found;
+ next_ptr = NULL;
+found:
+ if (!next_ptr) {
+ bch_err(c, "aieee, found stripe ptr with no data ptr");
+ continue;
+ }
+
+ if (!m || !m->alive ||
+ !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block],
+ &next_ptr->ptr,
+ m->sectors)) {
+ bch2_bkey_extent_entry_drop(new, entry);
+ goto again;
+ }
+ }
+ }
+ }
+
+ ret = bch2_journal_key_insert(c, btree_id, level, new);
+ if (ret)
+ kfree(new);
+ else
+ *k = bkey_i_to_s_c(new);
+ }
+fsck_err:
+ return ret;
+}
+
+/* marking of btree keys/nodes: */
+
+static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
+ unsigned level, bool is_root,
+ struct bkey_s_c *k,
+ u8 *max_stale, bool initial)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_ptrs_c ptrs;
+ const struct bch_extent_ptr *ptr;
+ struct bkey deleted = KEY(0, 0, 0);
+ struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
+ unsigned flags =
+ BTREE_TRIGGER_GC|
+ (initial ? BTREE_TRIGGER_NOATOMIC : 0);
+ int ret = 0;
+
+ deleted.p = k->k->p;
+
+ if (initial) {
+ BUG_ON(bch2_journal_seq_verify &&
+ k->k->version.lo > journal_cur_seq(&c->journal));
+
+ ret = bch2_check_fix_ptrs(c, btree_id, level, is_root, k);
+ if (ret)
+ goto err;
+
+ if (fsck_err_on(k->k->version.lo > atomic64_read(&c->key_version), c,
+ "key version number higher than recorded: %llu > %llu",
+ k->k->version.lo,
+ atomic64_read(&c->key_version)))
+ atomic64_set(&c->key_version, k->k->version.lo);
}
+ ptrs = bch2_bkey_ptrs_c(*k);
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bucket *g = PTR_BUCKET(ca, ptr, true);
- atomic64_set(&c->key_version,
- max_t(u64, k.k->version.lo,
- atomic64_read(&c->key_version)));
+ if (gen_after(g->oldest_gen, ptr->gen))
+ g->oldest_gen = ptr->gen;
- bch2_btree_mark_key(c, type, k, BCH_BUCKET_MARK_NOATOMIC);
+ *max_stale = max(*max_stale, ptr_stale(ca, ptr));
+ }
+
+ ret = bch2_mark_key(trans, old, *k, flags);
fsck_err:
+err:
+ if (ret)
+ bch_err(c, "%s: ret %i", __func__, ret);
return ret;
}
-static unsigned btree_gc_mark_node(struct bch_fs *c, struct btree *b)
+static int btree_gc_mark_node(struct btree_trans *trans, struct btree *b, u8 *max_stale,
+ bool initial)
{
- enum bkey_type type = btree_node_type(b);
+ struct bch_fs *c = trans->c;
struct btree_node_iter iter;
struct bkey unpacked;
struct bkey_s_c k;
- u8 stale = 0;
-
- if (btree_node_has_ptrs(b))
- for_each_btree_node_key_unpack(b, k, &iter,
- btree_node_is_extents(b),
- &unpacked) {
- bch2_bkey_debugcheck(c, b, k);
- stale = max(stale, bch2_btree_mark_key(c, type, k, 0));
+ struct bkey_buf prev, cur;
+ int ret = 0;
+
+ *max_stale = 0;
+
+ if (!btree_node_type_needs_gc(btree_node_type(b)))
+ return 0;
+
+ bch2_btree_node_iter_init_from_start(&iter, b);
+ bch2_bkey_buf_init(&prev);
+ bch2_bkey_buf_init(&cur);
+ bkey_init(&prev.k->k);
+
+ while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) {
+ ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false,
+ &k, max_stale, initial);
+ if (ret)
+ break;
+
+ bch2_btree_node_iter_advance(&iter, b);
+
+ if (b->c.level) {
+ bch2_bkey_buf_reassemble(&cur, c, k);
+
+ ret = bch2_gc_check_topology(c, b, &prev, cur,
+ bch2_btree_node_iter_end(&iter));
+ if (ret)
+ break;
}
+ }
- return stale;
+ bch2_bkey_buf_exit(&cur, c);
+ bch2_bkey_buf_exit(&prev, c);
+ return ret;
}
-static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
+static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree_id,
+ bool initial, bool metadata_only)
{
- write_seqcount_begin(&c->gc_pos_lock);
- c->gc_pos = new_pos;
- write_seqcount_end(&c->gc_pos_lock);
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct btree *b;
+ unsigned depth = metadata_only ? 1
+ : bch2_expensive_debug_checks ? 0
+ : !btree_node_type_needs_gc(btree_id) ? 1
+ : 0;
+ u8 max_stale = 0;
+ int ret = 0;
+
+ gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0));
+
+ __for_each_btree_node(trans, iter, btree_id, POS_MIN,
+ 0, depth, BTREE_ITER_PREFETCH, b, ret) {
+ bch2_verify_btree_nr_keys(b);
+
+ gc_pos_set(c, gc_pos_btree_node(b));
+
+ ret = btree_gc_mark_node(trans, b, &max_stale, initial);
+ if (ret)
+ break;
+
+ if (!initial) {
+ if (max_stale > 64)
+ bch2_btree_node_rewrite(trans, &iter, b,
+ BTREE_INSERT_NOWAIT|
+ BTREE_INSERT_GC_LOCK_HELD);
+ else if (!bch2_btree_gc_rewrite_disabled &&
+ (bch2_btree_gc_always_rewrite || max_stale > 16))
+ bch2_btree_node_rewrite(trans, &iter,
+ b, BTREE_INSERT_NOWAIT|
+ BTREE_INSERT_GC_LOCK_HELD);
+ }
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (ret)
+ return ret;
+
+ mutex_lock(&c->btree_root_lock);
+ b = c->btree_roots[btree_id].b;
+ if (!btree_node_fake(b)) {
+ struct bkey_s_c k = bkey_i_to_s_c(&b->key);
+
+ ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, true,
+ &k, &max_stale, initial);
+ }
+ gc_pos_set(c, gc_pos_btree_root(b->c.btree_id));
+ mutex_unlock(&c->btree_root_lock);
+
+ return ret;
}
-static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
+static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b,
+ unsigned target_depth)
{
- BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) <= 0);
- __gc_pos_set(c, new_pos);
+ struct bch_fs *c = trans->c;
+ struct btree_and_journal_iter iter;
+ struct bkey_s_c k;
+ struct bkey_buf cur, prev;
+ u8 max_stale = 0;
+ char buf[200];
+ int ret = 0;
+
+ bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
+ bch2_bkey_buf_init(&prev);
+ bch2_bkey_buf_init(&cur);
+ bkey_init(&prev.k->k);
+
+ while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
+ BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
+ BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
+
+ ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false,
+ &k, &max_stale, true);
+ if (ret) {
+ bch_err(c, "%s: error %i from bch2_gc_mark_key", __func__, ret);
+ goto fsck_err;
+ }
+
+ if (b->c.level) {
+ bch2_bkey_buf_reassemble(&cur, c, k);
+ k = bkey_i_to_s_c(cur.k);
+
+ bch2_btree_and_journal_iter_advance(&iter);
+
+ ret = bch2_gc_check_topology(c, b,
+ &prev, cur,
+ !bch2_btree_and_journal_iter_peek(&iter).k);
+ if (ret)
+ goto fsck_err;
+ } else {
+ bch2_btree_and_journal_iter_advance(&iter);
+ }
+ }
+
+ if (b->c.level > target_depth) {
+ bch2_btree_and_journal_iter_exit(&iter);
+ bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
+
+ while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
+ struct btree *child;
+
+ bch2_bkey_buf_reassemble(&cur, c, k);
+ bch2_btree_and_journal_iter_advance(&iter);
+
+ child = bch2_btree_node_get_noiter(c, cur.k,
+ b->c.btree_id, b->c.level - 1,
+ false);
+ ret = PTR_ERR_OR_ZERO(child);
+
+ if (ret == -EIO) {
+ bch2_topology_error(c);
+
+ if (__fsck_err(c,
+ FSCK_CAN_FIX|
+ FSCK_CAN_IGNORE|
+ FSCK_NO_RATELIMIT,
+ "Unreadable btree node at btree %s level %u:\n"
+ " %s",
+ bch2_btree_ids[b->c.btree_id],
+ b->c.level - 1,
+ (bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(cur.k)), buf)) &&
+ !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
+ ret = FSCK_ERR_START_TOPOLOGY_REPAIR;
+ bch_info(c, "Halting mark and sweep to start topology repair pass");
+ goto fsck_err;
+ } else {
+ /* Continue marking when opted to not
+ * fix the error: */
+ ret = 0;
+ set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
+ continue;
+ }
+ } else if (ret) {
+ bch_err(c, "%s: error %i getting btree node",
+ __func__, ret);
+ break;
+ }
+
+ ret = bch2_gc_btree_init_recurse(trans, child,
+ target_depth);
+ six_unlock_read(&child->c.lock);
+
+ if (ret)
+ break;
+ }
+ }
+fsck_err:
+ bch2_bkey_buf_exit(&cur, c);
+ bch2_bkey_buf_exit(&prev, c);
+ bch2_btree_and_journal_iter_exit(&iter);
+ return ret;
}
-static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id)
+static int bch2_gc_btree_init(struct btree_trans *trans,
+ enum btree_id btree_id,
+ bool metadata_only)
{
- struct btree_iter iter;
+ struct bch_fs *c = trans->c;
struct btree *b;
- struct range_checks r;
- unsigned depth = btree_id == BTREE_ID_EXTENTS ? 0 : 1;
- unsigned max_stale;
+ unsigned target_depth = metadata_only ? 1
+ : bch2_expensive_debug_checks ? 0
+ : !btree_node_type_needs_gc(btree_id) ? 1
+ : 0;
+ u8 max_stale = 0;
+ char buf[100];
int ret = 0;
+ b = c->btree_roots[btree_id].b;
+
+ if (btree_node_fake(b))
+ return 0;
+
+ six_lock_read(&b->c.lock, NULL, NULL);
+ if (mustfix_fsck_err_on(bpos_cmp(b->data->min_key, POS_MIN), c,
+ "btree root with incorrect min_key: %s",
+ (bch2_bpos_to_text(&PBUF(buf), b->data->min_key), buf))) {
+ bch_err(c, "repair unimplemented");
+ ret = FSCK_ERR_EXIT;
+ goto fsck_err;
+ }
+
+ if (mustfix_fsck_err_on(bpos_cmp(b->data->max_key, SPOS_MAX), c,
+ "btree root with incorrect max_key: %s",
+ (bch2_bpos_to_text(&PBUF(buf), b->data->max_key), buf))) {
+ bch_err(c, "repair unimplemented");
+ ret = FSCK_ERR_EXIT;
+ goto fsck_err;
+ }
+
+ if (b->c.level >= target_depth)
+ ret = bch2_gc_btree_init_recurse(trans, b, target_depth);
+
+ if (!ret) {
+ struct bkey_s_c k = bkey_i_to_s_c(&b->key);
+
+ ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, true,
+ &k, &max_stale, true);
+ }
+fsck_err:
+ six_unlock_read(&b->c.lock);
+
+ if (ret < 0)
+ bch_err(c, "%s: ret %i", __func__, ret);
+ return ret;
+}
+
+static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
+{
+ return (int) btree_id_to_gc_phase(l) -
+ (int) btree_id_to_gc_phase(r);
+}
+
+static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
+{
+ struct btree_trans trans;
+ enum btree_id ids[BTREE_ID_NR];
+ unsigned i;
+ int ret = 0;
+
+ bch2_trans_init(&trans, c, 0, 0);
+
+ for (i = 0; i < BTREE_ID_NR; i++)
+ ids[i] = i;
+ bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
+
+ for (i = 0; i < BTREE_ID_NR && !ret; i++)
+ ret = initial
+ ? bch2_gc_btree_init(&trans, ids[i], metadata_only)
+ : bch2_gc_btree(&trans, ids[i], initial, metadata_only);
+
+ if (ret < 0)
+ bch_err(c, "%s: ret %i", __func__, ret);
+
+ bch2_trans_exit(&trans);
+ return ret;
+}
+
+static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca,
+ u64 start, u64 end,
+ enum bch_data_type type,
+ unsigned flags)
+{
+ u64 b = sector_to_bucket(ca, start);
+
+ do {
+ unsigned sectors =
+ min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
+
+ bch2_mark_metadata_bucket(c, ca, b, type, sectors,
+ gc_phase(GC_PHASE_SB), flags);
+ b++;
+ start += sectors;
+ } while (start < end);
+}
+
+void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
+ unsigned flags)
+{
+ struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
+ unsigned i;
+ u64 b;
+
/*
- * if expensive_debug_checks is on, run range_checks on all leaf nodes:
+ * This conditional is kind of gross, but we may be called from the
+ * device add path, before the new device has actually been added to the
+ * running filesystem:
*/
- if (expensive_debug_checks(c))
- depth = 0;
+ if (c) {
+ lockdep_assert_held(&c->sb_lock);
+ percpu_down_read(&c->mark_lock);
+ }
- btree_node_range_checks_init(&r, depth);
+ for (i = 0; i < layout->nr_superblocks; i++) {
+ u64 offset = le64_to_cpu(layout->sb_offset[i]);
- __for_each_btree_node(&iter, c, btree_id, POS_MIN,
- 0, depth, BTREE_ITER_PREFETCH, b) {
- btree_node_range_checks(c, b, &r);
+ if (offset == BCH_SB_SECTOR)
+ mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
+ BCH_DATA_sb, flags);
- bch2_verify_btree_nr_keys(b);
+ mark_metadata_sectors(c, ca, offset,
+ offset + (1 << layout->sb_max_size_bits),
+ BCH_DATA_sb, flags);
+ }
- max_stale = btree_gc_mark_node(c, b);
+ for (i = 0; i < ca->journal.nr; i++) {
+ b = ca->journal.buckets[i];
+ bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal,
+ ca->mi.bucket_size,
+ gc_phase(GC_PHASE_SB), flags);
+ }
- gc_pos_set(c, gc_pos_btree_node(b));
+ if (c)
+ percpu_up_read(&c->mark_lock);
+}
+
+static void bch2_mark_superblocks(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i;
+
+ mutex_lock(&c->sb_lock);
+ gc_pos_set(c, gc_phase(GC_PHASE_SB));
+
+ for_each_online_member(ca, c, i)
+ bch2_mark_dev_superblock(c, ca, BTREE_TRIGGER_GC);
+ mutex_unlock(&c->sb_lock);
+}
+
+#if 0
+/* Also see bch2_pending_btree_node_free_insert_done() */
+static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
+{
+ struct btree_update *as;
+ struct pending_btree_node_free *d;
+
+ mutex_lock(&c->btree_interior_update_lock);
+ gc_pos_set(c, gc_phase(GC_PHASE_PENDING_DELETE));
+
+ for_each_pending_btree_node_free(c, as, d)
+ if (d->index_update_done)
+ bch2_mark_key(c, bkey_i_to_s_c(&d->key), BTREE_TRIGGER_GC);
- if (max_stale > 32)
- bch2_btree_node_rewrite(c, &iter,
- b->data->keys.seq,
- BTREE_INSERT_USE_RESERVE|
- BTREE_INSERT_GC_LOCK_HELD);
- else if (!btree_gc_rewrite_disabled(c) &&
- (btree_gc_always_rewrite(c) || max_stale > 16))
- bch2_btree_node_rewrite(c, &iter,
- b->data->keys.seq,
- BTREE_INSERT_NOWAIT|
- BTREE_INSERT_GC_LOCK_HELD);
-
- bch2_btree_iter_cond_resched(&iter);
+ mutex_unlock(&c->btree_interior_update_lock);
+}
+#endif
+
+static void bch2_gc_free(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i;
+
+ genradix_free(&c->reflink_gc_table);
+ genradix_free(&c->gc_stripes);
+
+ for_each_member_device(ca, c, i) {
+ kvpfree(rcu_dereference_protected(ca->buckets[1], 1),
+ sizeof(struct bucket_array) +
+ ca->mi.nbuckets * sizeof(struct bucket));
+ ca->buckets[1] = NULL;
+
+ free_percpu(ca->usage_gc);
+ ca->usage_gc = NULL;
+ }
+
+ free_percpu(c->usage_gc);
+ c->usage_gc = NULL;
+}
+
+static int bch2_gc_done(struct bch_fs *c,
+ bool initial, bool metadata_only)
+{
+ struct bch_dev *ca = NULL;
+ bool verify = !metadata_only && (!initial ||
+ (c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)));
+ unsigned i, dev;
+ int ret = 0;
+
+#define copy_field(_f, _msg, ...) \
+ if (dst->_f != src->_f) { \
+ if (verify) \
+ fsck_err(c, _msg ": got %llu, should be %llu" \
+ , ##__VA_ARGS__, dst->_f, src->_f); \
+ dst->_f = src->_f; \
+ set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); \
+ }
+#define copy_stripe_field(_f, _msg, ...) \
+ if (dst->_f != src->_f) { \
+ if (verify) \
+ fsck_err(c, "stripe %zu has wrong "_msg \
+ ": got %u, should be %u", \
+ iter.pos, ##__VA_ARGS__, \
+ dst->_f, src->_f); \
+ dst->_f = src->_f; \
+ set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); \
+ }
+#define copy_bucket_field(_f) \
+ if (dst->b[b]._f != src->b[b]._f) { \
+ if (verify) \
+ fsck_err(c, "bucket %u:%zu gen %u data type %s has wrong " #_f \
+ ": got %u, should be %u", dev, b, \
+ dst->b[b].mark.gen, \
+ bch2_data_types[dst->b[b].mark.data_type],\
+ dst->b[b]._f, src->b[b]._f); \
+ dst->b[b]._f = src->b[b]._f; \
+ set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags); \
}
- ret = bch2_btree_iter_unlock(&iter);
+#define copy_dev_field(_f, _msg, ...) \
+ copy_field(_f, "dev %u has wrong " _msg, dev, ##__VA_ARGS__)
+#define copy_fs_field(_f, _msg, ...) \
+ copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__)
+
+ for (i = 0; i < ARRAY_SIZE(c->usage); i++)
+ bch2_fs_usage_acc_to_base(c, i);
+
+ for_each_member_device(ca, c, dev) {
+ struct bucket_array *dst = __bucket_array(ca, 0);
+ struct bucket_array *src = __bucket_array(ca, 1);
+ size_t b;
+
+ for (b = 0; b < src->nbuckets; b++) {
+ copy_bucket_field(_mark.gen);
+ copy_bucket_field(_mark.data_type);
+ copy_bucket_field(_mark.stripe);
+ copy_bucket_field(_mark.dirty_sectors);
+ copy_bucket_field(_mark.cached_sectors);
+ copy_bucket_field(stripe_redundancy);
+ copy_bucket_field(stripe);
+
+ dst->b[b].oldest_gen = src->b[b].oldest_gen;
+ }
+
+ {
+ struct bch_dev_usage *dst = ca->usage_base;
+ struct bch_dev_usage *src = (void *)
+ bch2_acc_percpu_u64s((void *) ca->usage_gc,
+ dev_usage_u64s());
+
+ copy_dev_field(buckets_ec, "buckets_ec");
+ copy_dev_field(buckets_unavailable, "buckets_unavailable");
+
+ for (i = 0; i < BCH_DATA_NR; i++) {
+ copy_dev_field(d[i].buckets, "%s buckets", bch2_data_types[i]);
+ copy_dev_field(d[i].sectors, "%s sectors", bch2_data_types[i]);
+ copy_dev_field(d[i].fragmented, "%s fragmented", bch2_data_types[i]);
+ }
+ }
+ };
+
+ {
+ unsigned nr = fs_usage_u64s(c);
+ struct bch_fs_usage *dst = c->usage_base;
+ struct bch_fs_usage *src = (void *)
+ bch2_acc_percpu_u64s((void *) c->usage_gc, nr);
+
+ copy_fs_field(hidden, "hidden");
+ copy_fs_field(btree, "btree");
+
+ if (!metadata_only) {
+ copy_fs_field(data, "data");
+ copy_fs_field(cached, "cached");
+ copy_fs_field(reserved, "reserved");
+ copy_fs_field(nr_inodes,"nr_inodes");
+
+ for (i = 0; i < BCH_REPLICAS_MAX; i++)
+ copy_fs_field(persistent_reserved[i],
+ "persistent_reserved[%i]", i);
+ }
+
+ for (i = 0; i < c->replicas.nr; i++) {
+ struct bch_replicas_entry *e =
+ cpu_replicas_entry(&c->replicas, i);
+ char buf[80];
+
+ if (metadata_only &&
+ (e->data_type == BCH_DATA_user ||
+ e->data_type == BCH_DATA_cached))
+ continue;
+
+ bch2_replicas_entry_to_text(&PBUF(buf), e);
+
+ copy_fs_field(replicas[i], "%s", buf);
+ }
+ }
+
+#undef copy_fs_field
+#undef copy_dev_field
+#undef copy_bucket_field
+#undef copy_stripe_field
+#undef copy_field
+fsck_err:
+ if (ca)
+ percpu_ref_put(&ca->ref);
if (ret)
+ bch_err(c, "%s: ret %i", __func__, ret);
+ return ret;
+}
+
+static int bch2_gc_start(struct bch_fs *c,
+ bool metadata_only)
+{
+ struct bch_dev *ca = NULL;
+ unsigned i;
+ int ret;
+
+ BUG_ON(c->usage_gc);
+
+ c->usage_gc = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64),
+ sizeof(u64), GFP_KERNEL);
+ if (!c->usage_gc) {
+ bch_err(c, "error allocating c->usage_gc");
+ return -ENOMEM;
+ }
+
+ for_each_member_device(ca, c, i) {
+ BUG_ON(ca->buckets[1]);
+ BUG_ON(ca->usage_gc);
+
+ ca->buckets[1] = kvpmalloc(sizeof(struct bucket_array) +
+ ca->mi.nbuckets * sizeof(struct bucket),
+ GFP_KERNEL|__GFP_ZERO);
+ if (!ca->buckets[1]) {
+ percpu_ref_put(&ca->ref);
+ bch_err(c, "error allocating ca->buckets[gc]");
+ return -ENOMEM;
+ }
+
+ ca->usage_gc = alloc_percpu(struct bch_dev_usage);
+ if (!ca->usage_gc) {
+ bch_err(c, "error allocating ca->usage_gc");
+ percpu_ref_put(&ca->ref);
+ return -ENOMEM;
+ }
+ }
+
+ ret = bch2_ec_mem_alloc(c, true);
+ if (ret) {
+ bch_err(c, "error allocating ec gc mem");
return ret;
+ }
- mutex_lock(&c->btree_root_lock);
+ percpu_down_write(&c->mark_lock);
- b = c->btree_roots[btree_id].b;
- bch2_btree_mark_key(c, BKEY_TYPE_BTREE, bkey_i_to_s_c(&b->key), 0);
- gc_pos_set(c, gc_pos_btree_root(b->btree_id));
+ /*
+ * indicate to stripe code that we need to allocate for the gc stripes
+ * radix tree, too
+ */
+ gc_pos_set(c, gc_phase(GC_PHASE_START));
+
+ for_each_member_device(ca, c, i) {
+ struct bucket_array *dst = __bucket_array(ca, 1);
+ struct bucket_array *src = __bucket_array(ca, 0);
+ size_t b;
+
+ dst->first_bucket = src->first_bucket;
+ dst->nbuckets = src->nbuckets;
+
+ for (b = 0; b < src->nbuckets; b++) {
+ struct bucket *d = &dst->b[b];
+ struct bucket *s = &src->b[b];
+
+ d->_mark.gen = dst->b[b].oldest_gen = s->mark.gen;
+ d->gen_valid = s->gen_valid;
+
+ if (metadata_only &&
+ (s->mark.data_type == BCH_DATA_user ||
+ s->mark.data_type == BCH_DATA_cached))
+ d->_mark = s->mark;
+ }
+ };
+
+ percpu_up_write(&c->mark_lock);
- mutex_unlock(&c->btree_root_lock);
return 0;
}
-static void bch2_mark_allocator_buckets(struct bch_fs *c)
+static int bch2_gc_reflink_done_initial_fn(struct btree_trans *trans,
+ struct bkey_s_c k)
{
- struct bch_dev *ca;
- struct open_bucket *ob;
- size_t i, j, iter;
- unsigned ci;
+ struct bch_fs *c = trans->c;
+ struct reflink_gc *r;
+ const __le64 *refcount = bkey_refcount_c(k);
+ char buf[200];
+ int ret = 0;
+
+ if (!refcount)
+ return 0;
+
+ r = genradix_ptr(&c->reflink_gc_table, c->reflink_gc_idx++);
+ if (!r)
+ return -ENOMEM;
+
+ if (!r ||
+ r->offset != k.k->p.offset ||
+ r->size != k.k->size) {
+ bch_err(c, "unexpected inconsistency walking reflink table at gc finish");
+ return -EINVAL;
+ }
+
+ if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c,
+ "reflink key has wrong refcount:\n"
+ " %s\n"
+ " should be %u",
+ (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf),
+ r->refcount)) {
+ struct bkey_i *new;
+
+ new = kmalloc(bkey_bytes(k.k), GFP_KERNEL);
+ if (!new) {
+ ret = -ENOMEM;
+ goto fsck_err;
+ }
+
+ bkey_reassemble(new, k);
+
+ if (!r->refcount) {
+ new->k.type = KEY_TYPE_deleted;
+ new->k.size = 0;
+ } else {
+ *bkey_refcount(new) = cpu_to_le64(r->refcount);
+ }
+
+ ret = bch2_journal_key_insert(c, BTREE_ID_reflink, 0, new);
+ if (ret)
+ kfree(new);
+ }
+fsck_err:
+ return ret;
+}
+
+static int bch2_gc_reflink_done(struct bch_fs *c, bool initial,
+ bool metadata_only)
+{
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct reflink_gc *r;
+ size_t idx = 0;
+ char buf[200];
+ int ret = 0;
+
+ if (metadata_only)
+ return 0;
+
+ bch2_trans_init(&trans, c, 0, 0);
+
+ if (initial) {
+ c->reflink_gc_idx = 0;
+
+ ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_reflink,
+ bch2_gc_reflink_done_initial_fn);
+ goto out;
+ }
+
+ for_each_btree_key(&trans, iter, BTREE_ID_reflink, POS_MIN,
+ BTREE_ITER_PREFETCH, k, ret) {
+ const __le64 *refcount = bkey_refcount_c(k);
+
+ if (!refcount)
+ continue;
+
+ r = genradix_ptr(&c->reflink_gc_table, idx);
+ if (!r ||
+ r->offset != k.k->p.offset ||
+ r->size != k.k->size) {
+ bch_err(c, "unexpected inconsistency walking reflink table at gc finish");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c,
+ "reflink key has wrong refcount:\n"
+ " %s\n"
+ " should be %u",
+ (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf),
+ r->refcount)) {
+ struct bkey_i *new;
+
+ new = kmalloc(bkey_bytes(k.k), GFP_KERNEL);
+ if (!new) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ bkey_reassemble(new, k);
- for_each_member_device(ca, c, ci) {
- spin_lock(&ca->freelist_lock);
+ if (!r->refcount)
+ new->k.type = KEY_TYPE_deleted;
+ else
+ *bkey_refcount(new) = cpu_to_le64(r->refcount);
- fifo_for_each_entry(i, &ca->free_inc, iter)
- bch2_mark_alloc_bucket(ca, &ca->buckets[i], true);
+ ret = __bch2_trans_do(&trans, NULL, NULL, 0,
+ __bch2_btree_insert(&trans, BTREE_ID_reflink, new));
+ kfree(new);
- for (j = 0; j < RESERVE_NR; j++)
- fifo_for_each_entry(i, &ca->free[j], iter)
- bch2_mark_alloc_bucket(ca, &ca->buckets[i], true);
+ if (ret)
+ break;
+ }
+ }
+fsck_err:
+ bch2_trans_iter_exit(&trans, &iter);
+out:
+ c->reflink_gc_nr = 0;
+ bch2_trans_exit(&trans);
+ return ret;
+}
+
+static int bch2_gc_stripes_done_initial_fn(struct btree_trans *trans,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ struct gc_stripe *m;
+ const struct bch_stripe *s;
+ char buf[200];
+ unsigned i;
+ int ret = 0;
+
+ if (k.k->type != KEY_TYPE_stripe)
+ return 0;
- spin_unlock(&ca->freelist_lock);
- }
+ s = bkey_s_c_to_stripe(k).v;
- for (ob = c->open_buckets;
- ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
- ob++) {
- const struct bch_extent_ptr *ptr;
+ m = genradix_ptr(&c->gc_stripes, k.k->p.offset);
- mutex_lock(&ob->lock);
- open_bucket_for_each_ptr(ob, ptr) {
- ca = c->devs[ptr->dev];
- bch2_mark_alloc_bucket(ca, PTR_BUCKET(ca, ptr), true);
+ for (i = 0; i < s->nr_blocks; i++)
+ if (stripe_blockcount_get(s, i) != (m ? m->block_sectors[i] : 0))
+ goto inconsistent;
+ return 0;
+inconsistent:
+ if (fsck_err_on(true, c,
+ "stripe has wrong block sector count %u:\n"
+ " %s\n"
+ " should be %u", i,
+ (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf),
+ m ? m->block_sectors[i] : 0)) {
+ struct bkey_i_stripe *new;
+
+ new = kmalloc(bkey_bytes(k.k), GFP_KERNEL);
+ if (!new) {
+ ret = -ENOMEM;
+ goto fsck_err;
}
- mutex_unlock(&ob->lock);
- }
-}
-static void mark_metadata_sectors(struct bch_dev *ca, u64 start, u64 end,
- enum bucket_data_type type)
-{
- u64 b = start >> ca->bucket_bits;
-
- do {
- bch2_mark_metadata_bucket(ca, ca->buckets + b, type, true);
- b++;
- } while (b < end >> ca->bucket_bits);
-}
+ bkey_reassemble(&new->k_i, k);
-static void bch2_dev_mark_superblocks(struct bch_dev *ca)
-{
- struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
- unsigned i;
+ for (i = 0; i < new->v.nr_blocks; i++)
+ stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0);
- for (i = 0; i < layout->nr_superblocks; i++) {
- if (layout->sb_offset[i] == BCH_SB_SECTOR)
- mark_metadata_sectors(ca, 0, BCH_SB_SECTOR,
- BUCKET_SB);
-
- mark_metadata_sectors(ca,
- layout->sb_offset[i],
- layout->sb_offset[i] +
- (1 << layout->sb_max_size_bits),
- BUCKET_SB);
+ ret = bch2_journal_key_insert(c, BTREE_ID_stripes, 0, &new->k_i);
+ if (ret)
+ kfree(new);
}
+fsck_err:
+ return ret;
}
-/*
- * Mark non btree metadata - prios, journal
- */
-void bch2_mark_dev_metadata(struct bch_fs *c, struct bch_dev *ca)
+static int bch2_gc_stripes_done(struct bch_fs *c, bool initial,
+ bool metadata_only)
{
- unsigned i;
- u64 b;
-
- lockdep_assert_held(&c->sb_lock);
+ struct btree_trans trans;
+ int ret = 0;
- bch2_dev_mark_superblocks(ca);
+ if (metadata_only)
+ return 0;
- spin_lock(&c->journal.lock);
+ bch2_trans_init(&trans, c, 0, 0);
- for (i = 0; i < ca->journal.nr; i++) {
- b = ca->journal.buckets[i];
- bch2_mark_metadata_bucket(ca, ca->buckets + b,
- BUCKET_JOURNAL, true);
+ if (initial) {
+ ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_stripes,
+ bch2_gc_stripes_done_initial_fn);
+ } else {
+ BUG();
}
- spin_unlock(&c->journal.lock);
+ bch2_trans_exit(&trans);
+ return ret;
}
-static void bch2_mark_metadata(struct bch_fs *c)
+static int bch2_gc_reflink_start_initial_fn(struct btree_trans *trans,
+ struct bkey_s_c k)
{
- struct bch_dev *ca;
- unsigned i;
-
- mutex_lock(&c->sb_lock);
- gc_pos_set(c, gc_phase(GC_PHASE_SB_METADATA));
-
- for_each_online_member(ca, c, i)
- bch2_mark_dev_metadata(c, ca);
- mutex_unlock(&c->sb_lock);
-}
-/* Also see bch2_pending_btree_node_free_insert_done() */
-static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
-{
- struct bch_fs_usage stats = { 0 };
- struct btree_update *as;
- struct pending_btree_node_free *d;
+ struct bch_fs *c = trans->c;
+ struct reflink_gc *r;
+ const __le64 *refcount = bkey_refcount_c(k);
- mutex_lock(&c->btree_interior_update_lock);
- gc_pos_set(c, gc_phase(GC_PHASE_PENDING_DELETE));
+ if (!refcount)
+ return 0;
- for_each_pending_btree_node_free(c, as, d)
- if (d->index_update_done)
- __bch2_mark_key(c, bkey_i_to_s_c(&d->key),
- c->sb.btree_node_size, true,
- &stats, 0,
- BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE);
- /*
- * Don't apply stats - pending deletes aren't tracked in
- * bch_alloc_stats:
- */
+ r = genradix_ptr_alloc(&c->reflink_gc_table, c->reflink_gc_nr++,
+ GFP_KERNEL);
+ if (!r)
+ return -ENOMEM;
- mutex_unlock(&c->btree_interior_update_lock);
+ r->offset = k.k->p.offset;
+ r->size = k.k->size;
+ r->refcount = 0;
+ return 0;
}
-void bch2_gc_start(struct bch_fs *c)
+static int bch2_gc_reflink_start(struct bch_fs *c, bool initial,
+ bool metadata_only)
{
- struct bch_dev *ca;
- struct bucket *g;
- struct bucket_mark new;
- unsigned i;
- int cpu;
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct reflink_gc *r;
+ int ret = 0;
- lg_global_lock(&c->usage_lock);
+ if (metadata_only)
+ return 0;
- /*
- * Indicates to buckets code that gc is now in progress - done under
- * usage_lock to avoid racing with bch2_mark_key():
- */
- __gc_pos_set(c, GC_POS_MIN);
+ bch2_trans_init(&trans, c, 0, 0);
+ c->reflink_gc_nr = 0;
- /* Save a copy of the existing bucket stats while we recompute them: */
- for_each_member_device(ca, c, i) {
- ca->usage_cached = __bch2_dev_usage_read(ca);
- for_each_possible_cpu(cpu) {
- struct bch_dev_usage *p =
- per_cpu_ptr(ca->usage_percpu, cpu);
- memset(p, 0, sizeof(*p));
- }
+ if (initial) {
+ ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_reflink,
+ bch2_gc_reflink_start_initial_fn);
+ goto out;
}
- c->usage_cached = __bch2_fs_usage_read(c);
- for_each_possible_cpu(cpu) {
- struct bch_fs_usage *p =
- per_cpu_ptr(c->usage_percpu, cpu);
-
- memset(p->s, 0, sizeof(p->s));
- }
+ for_each_btree_key(&trans, iter, BTREE_ID_reflink, POS_MIN,
+ BTREE_ITER_PREFETCH, k, ret) {
+ const __le64 *refcount = bkey_refcount_c(k);
- lg_global_unlock(&c->usage_lock);
+ if (!refcount)
+ continue;
- /* Clear bucket marks: */
- for_each_member_device(ca, c, i)
- for_each_bucket(g, ca) {
- bucket_cmpxchg(g, new, ({
- new.owned_by_allocator = 0;
- new.data_type = 0;
- new.cached_sectors = 0;
- new.dirty_sectors = 0;
- }));
- ca->oldest_gens[g - ca->buckets] = new.gen;
+ r = genradix_ptr_alloc(&c->reflink_gc_table, c->reflink_gc_nr++,
+ GFP_KERNEL);
+ if (!r) {
+ ret = -ENOMEM;
+ break;
}
+
+ r->offset = k.k->p.offset;
+ r->size = k.k->size;
+ r->refcount = 0;
+ }
+ bch2_trans_iter_exit(&trans, &iter);
+out:
+ bch2_trans_exit(&trans);
+ return ret;
}
/**
- * bch_gc - recompute bucket marks and oldest_gen, rewrite btree nodes
+ * bch2_gc - walk _all_ references to buckets, and recompute them:
+ *
+ * Order matters here:
+ * - Concurrent GC relies on the fact that we have a total ordering for
+ * everything that GC walks - see gc_will_visit_node(),
+ * gc_will_visit_root()
+ *
+ * - also, references move around in the course of index updates and
+ * various other crap: everything needs to agree on the ordering
+ * references are allowed to move around in - e.g., we're allowed to
+ * start with a reference owned by an open_bucket (the allocator) and
+ * move it to the btree, but not the reverse.
+ *
+ * This is necessary to ensure that gc doesn't miss references that
+ * move around - if references move backwards in the ordering GC
+ * uses, GC could skip past them
*/
-void bch2_gc(struct bch_fs *c)
+int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
{
struct bch_dev *ca;
u64 start_time = local_clock();
- unsigned i;
+ unsigned i, iter = 0;
+ int ret;
- /*
- * Walk _all_ references to buckets, and recompute them:
- *
- * Order matters here:
- * - Concurrent GC relies on the fact that we have a total ordering for
- * everything that GC walks - see gc_will_visit_node(),
- * gc_will_visit_root()
- *
- * - also, references move around in the course of index updates and
- * various other crap: everything needs to agree on the ordering
- * references are allowed to move around in - e.g., we're allowed to
- * start with a reference owned by an open_bucket (the allocator) and
- * move it to the btree, but not the reverse.
- *
- * This is necessary to ensure that gc doesn't miss references that
- * move around - if references move backwards in the ordering GC
- * uses, GC could skip past them
- */
+ lockdep_assert_held(&c->state_lock);
trace_gc_start(c);
- /*
- * Do this before taking gc_lock - bch2_disk_reservation_get() blocks on
- * gc_lock if sectors_available goes to 0:
- */
- bch2_recalc_sectors_available(c);
-
down_write(&c->gc_lock);
- if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
+
+ /* flush interior btree updates: */
+ closure_wait_event(&c->btree_interior_update_wait,
+ !bch2_btree_interior_updates_nr_pending(c));
+again:
+ ret = bch2_gc_start(c, metadata_only) ?:
+ bch2_gc_reflink_start(c, initial, metadata_only);
+ if (ret)
goto out;
- bch2_gc_start(c);
+ bch2_mark_superblocks(c);
- /* Walk allocator's references: */
- bch2_mark_allocator_buckets(c);
+ if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb) &&
+ !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags) &&
+ c->opts.fix_errors != FSCK_OPT_NO) {
+ bch_info(c, "starting topology repair pass");
+ ret = bch2_repair_topology(c);
+ if (ret)
+ goto out;
+ bch_info(c, "topology repair pass done");
- /* Walk btree: */
- while (c->gc_pos.phase < (int) BTREE_ID_NR) {
- int ret = c->btree_roots[c->gc_pos.phase].b
- ? bch2_gc_btree(c, (int) c->gc_pos.phase)
- : 0;
+ set_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags);
+ }
- if (ret) {
- bch_err(c, "btree gc failed: %d", ret);
- set_bit(BCH_FS_GC_FAILURE, &c->flags);
- goto out;
- }
+ ret = bch2_gc_btrees(c, initial, metadata_only);
- gc_pos_set(c, gc_phase(c->gc_pos.phase + 1));
+ if (ret == FSCK_ERR_START_TOPOLOGY_REPAIR &&
+ !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags) &&
+ !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) {
+ set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
+ ret = 0;
}
- bch2_mark_metadata(c);
- bch2_mark_pending_btree_node_frees(c);
+ if (ret == FSCK_ERR_START_TOPOLOGY_REPAIR)
+ ret = FSCK_ERR_EXIT;
- for_each_member_device(ca, c, i)
- atomic_long_set(&ca->saturated_count, 0);
+ if (ret)
+ goto out;
- /* Indicates that gc is no longer in progress: */
- gc_pos_set(c, gc_phase(GC_PHASE_DONE));
+#if 0
+ bch2_mark_pending_btree_node_frees(c);
+#endif
c->gc_count++;
+
+ if (test_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags) ||
+ (!iter && bch2_test_restart_gc)) {
+ /*
+ * XXX: make sure gens we fixed got saved
+ */
+ if (iter++ <= 2) {
+ bch_info(c, "Second GC pass needed, restarting:");
+ clear_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
+ __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
+
+ percpu_down_write(&c->mark_lock);
+ bch2_gc_free(c);
+ percpu_up_write(&c->mark_lock);
+ /* flush fsck errors, reset counters */
+ bch2_flush_fsck_errs(c);
+
+ goto again;
+ }
+
+ bch_info(c, "Unable to fix bucket gens, looping");
+ ret = -EINVAL;
+ }
out:
+ if (!ret) {
+ bch2_journal_block(&c->journal);
+
+ percpu_down_write(&c->mark_lock);
+ ret = bch2_gc_reflink_done(c, initial, metadata_only) ?:
+ bch2_gc_stripes_done(c, initial, metadata_only) ?:
+ bch2_gc_done(c, initial, metadata_only);
+
+ bch2_journal_unblock(&c->journal);
+ } else {
+ percpu_down_write(&c->mark_lock);
+ }
+
+ /* Indicates that gc is no longer in progress: */
+ __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
+
+ bch2_gc_free(c);
+ percpu_up_write(&c->mark_lock);
+
up_write(&c->gc_lock);
+
trace_gc_end(c);
- bch2_time_stats_update(&c->btree_gc_time, start_time);
+ bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
/*
* Wake up allocator in case it was waiting for buckets
* allocator thread - issue wakeup in case they blocked on gc_lock:
*/
closure_wake_up(&c->freelist_wait);
+ return ret;
}
-/* Btree coalescing */
-
-static void recalc_packed_keys(struct btree *b)
-{
- struct bkey_packed *k;
-
- memset(&b->nr, 0, sizeof(b->nr));
-
- BUG_ON(b->nsets != 1);
-
- for (k = btree_bkey_first(b, b->set);
- k != btree_bkey_last(b, b->set);
- k = bkey_next(k))
- btree_keys_account_key_add(&b->nr, 0, k);
-}
-
-static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
- struct btree *old_nodes[GC_MERGE_NODES])
+static bool gc_btree_gens_key(struct bch_fs *c, struct bkey_s_c k)
{
- struct btree *parent = iter->nodes[old_nodes[0]->level + 1];
- unsigned i, nr_old_nodes, nr_new_nodes, u64s = 0;
- unsigned blocks = btree_blocks(c) * 2 / 3;
- struct btree *new_nodes[GC_MERGE_NODES];
- struct btree_update *as;
- struct keylist keylist;
- struct bkey_format_state format_state;
- struct bkey_format new_format;
-
- memset(new_nodes, 0, sizeof(new_nodes));
- bch2_keylist_init(&keylist, NULL, 0);
-
- /* Count keys that are not deleted */
- for (i = 0; i < GC_MERGE_NODES && old_nodes[i]; i++)
- u64s += old_nodes[i]->nr.live_u64s;
-
- nr_old_nodes = nr_new_nodes = i;
-
- /* Check if all keys in @old_nodes could fit in one fewer node */
- if (nr_old_nodes <= 1 ||
- __vstruct_blocks(struct btree_node, c->block_bits,
- DIV_ROUND_UP(u64s, nr_old_nodes - 1)) > blocks)
- return;
-
- /* Find a format that all keys in @old_nodes can pack into */
- bch2_bkey_format_init(&format_state);
-
- for (i = 0; i < nr_old_nodes; i++)
- __bch2_btree_calc_format(&format_state, old_nodes[i]);
-
- new_format = bch2_bkey_format_done(&format_state);
-
- /* Check if repacking would make any nodes too big to fit */
- for (i = 0; i < nr_old_nodes; i++)
- if (!bch2_btree_node_format_fits(c, old_nodes[i], &new_format)) {
- trace_btree_gc_coalesce_fail(c,
- BTREE_GC_COALESCE_FAIL_FORMAT_FITS);
- return;
- }
-
- if (bch2_keylist_realloc(&keylist, NULL, 0,
- (BKEY_U64s + BKEY_EXTENT_U64s_MAX) * nr_old_nodes)) {
- trace_btree_gc_coalesce_fail(c,
- BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC);
- return;
- }
-
- as = bch2_btree_update_start(c, iter->btree_id,
- btree_update_reserve_required(c, parent) + nr_old_nodes,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_USE_RESERVE,
- NULL);
- if (IS_ERR(as)) {
- trace_btree_gc_coalesce_fail(c,
- BTREE_GC_COALESCE_FAIL_RESERVE_GET);
- bch2_keylist_free(&keylist, NULL);
- return;
- }
-
- trace_btree_gc_coalesce(c, parent, nr_old_nodes);
-
- for (i = 0; i < nr_old_nodes; i++)
- bch2_btree_interior_update_will_free_node(as, old_nodes[i]);
-
- /* Repack everything with @new_format and sort down to one bset */
- for (i = 0; i < nr_old_nodes; i++)
- new_nodes[i] =
- __bch2_btree_node_alloc_replacement(as, old_nodes[i],
- new_format);
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
- /*
- * Conceptually we concatenate the nodes together and slice them
- * up at different boundaries.
- */
- for (i = nr_new_nodes - 1; i > 0; --i) {
- struct btree *n1 = new_nodes[i];
- struct btree *n2 = new_nodes[i - 1];
-
- struct bset *s1 = btree_bset_first(n1);
- struct bset *s2 = btree_bset_first(n2);
- struct bkey_packed *k, *last = NULL;
-
- /* Calculate how many keys from @n2 we could fit inside @n1 */
- u64s = 0;
-
- for (k = s2->start;
- k < vstruct_last(s2) &&
- vstruct_blocks_plus(n1->data, c->block_bits,
- u64s + k->u64s) <= blocks;
- k = bkey_next(k)) {
- last = k;
- u64s += k->u64s;
- }
+ percpu_down_read(&c->mark_lock);
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bucket *g = PTR_BUCKET(ca, ptr, false);
- if (u64s == le16_to_cpu(s2->u64s)) {
- /* n2 fits entirely in n1 */
- n1->key.k.p = n1->data->max_key = n2->data->max_key;
-
- memcpy_u64s(vstruct_last(s1),
- s2->start,
- le16_to_cpu(s2->u64s));
- le16_add_cpu(&s1->u64s, le16_to_cpu(s2->u64s));
-
- set_btree_bset_end(n1, n1->set);
-
- six_unlock_write(&n2->lock);
- bch2_btree_node_free_never_inserted(c, n2);
- six_unlock_intent(&n2->lock);
-
- memmove(new_nodes + i - 1,
- new_nodes + i,
- sizeof(new_nodes[0]) * (nr_new_nodes - i));
- new_nodes[--nr_new_nodes] = NULL;
- } else if (u64s) {
- /* move part of n2 into n1 */
- n1->key.k.p = n1->data->max_key =
- bkey_unpack_pos(n1, last);
-
- n2->data->min_key =
- btree_type_successor(iter->btree_id,
- n1->data->max_key);
-
- memcpy_u64s(vstruct_last(s1),
- s2->start, u64s);
- le16_add_cpu(&s1->u64s, u64s);
-
- memmove(s2->start,
- vstruct_idx(s2, u64s),
- (le16_to_cpu(s2->u64s) - u64s) * sizeof(u64));
- s2->u64s = cpu_to_le16(le16_to_cpu(s2->u64s) - u64s);
-
- set_btree_bset_end(n1, n1->set);
- set_btree_bset_end(n2, n2->set);
+ if (gen_after(g->mark.gen, ptr->gen) > 16) {
+ percpu_up_read(&c->mark_lock);
+ return true;
}
}
- for (i = 0; i < nr_new_nodes; i++) {
- struct btree *n = new_nodes[i];
-
- recalc_packed_keys(n);
- btree_node_reset_sib_u64s(n);
-
- bch2_btree_build_aux_trees(n);
- six_unlock_write(&n->lock);
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bucket *g = PTR_BUCKET(ca, ptr, false);
- bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent);
+ if (gen_after(g->gc_gen, ptr->gen))
+ g->gc_gen = ptr->gen;
}
+ percpu_up_read(&c->mark_lock);
- /*
- * The keys for the old nodes get deleted. We don't want to insert keys
- * that compare equal to the keys for the new nodes we'll also be
- * inserting - we can't because keys on a keylist must be strictly
- * greater than the previous keys, and we also don't need to since the
- * key for the new node will serve the same purpose (overwriting the key
- * for the old node).
- */
- for (i = 0; i < nr_old_nodes; i++) {
- struct bkey_i delete;
- unsigned j;
-
- for (j = 0; j < nr_new_nodes; j++)
- if (!bkey_cmp(old_nodes[i]->key.k.p,
- new_nodes[j]->key.k.p))
- goto next;
-
- bkey_init(&delete.k);
- delete.k.p = old_nodes[i]->key.k.p;
- bch2_keylist_add_in_order(&keylist, &delete);
-next:
- i = i;
- }
-
- /*
- * Keys for the new nodes get inserted: bch2_btree_insert_keys() only
- * does the lookup once and thus expects the keys to be in sorted order
- * so we have to make sure the new keys are correctly ordered with
- * respect to the deleted keys added in the previous loop
- */
- for (i = 0; i < nr_new_nodes; i++)
- bch2_keylist_add_in_order(&keylist, &new_nodes[i]->key);
-
- /* Insert the newly coalesced nodes */
- bch2_btree_insert_node(as, parent, iter, &keylist);
-
- BUG_ON(!bch2_keylist_empty(&keylist));
+ return false;
+}
- BUG_ON(iter->nodes[old_nodes[0]->level] != old_nodes[0]);
+/*
+ * For recalculating oldest gen, we only need to walk keys in leaf nodes; btree
+ * node pointers currently never have cached pointers that can become stale:
+ */
+static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id)
+{
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bkey_buf sk;
+ int ret = 0, commit_err = 0;
- BUG_ON(!bch2_btree_iter_node_replace(iter, new_nodes[0]));
+ bch2_bkey_buf_init(&sk);
+ bch2_trans_init(&trans, c, 0, 0);
- for (i = 0; i < nr_new_nodes; i++)
- bch2_btree_open_bucket_put(c, new_nodes[i]);
+ bch2_trans_iter_init(&trans, &iter, btree_id, POS_MIN,
+ BTREE_ITER_PREFETCH|
+ BTREE_ITER_NOT_EXTENTS|
+ BTREE_ITER_ALL_SNAPSHOTS);
- /* Free the old nodes and update our sliding window */
- for (i = 0; i < nr_old_nodes; i++) {
- bch2_btree_node_free_inmem(c, old_nodes[i], iter);
- six_unlock_intent(&old_nodes[i]->lock);
+ while ((bch2_trans_begin(&trans),
+ k = bch2_btree_iter_peek(&iter)).k) {
+ ret = bkey_err(k);
- /*
- * the index update might have triggered a split, in which case
- * the nodes we coalesced - the new nodes we just created -
- * might not be sibling nodes anymore - don't add them to the
- * sliding window (except the first):
- */
- if (!i) {
- old_nodes[i] = new_nodes[i];
- } else {
- old_nodes[i] = NULL;
- if (new_nodes[i])
- six_unlock_intent(&new_nodes[i]->lock);
+ if (ret == -EINTR)
+ continue;
+ if (ret)
+ break;
+
+ c->gc_gens_pos = iter.pos;
+
+ if (gc_btree_gens_key(c, k) && !commit_err) {
+ bch2_bkey_buf_reassemble(&sk, c, k);
+ bch2_extent_normalize(c, bkey_i_to_s(sk.k));
+
+ commit_err =
+ bch2_trans_update(&trans, &iter, sk.k, 0) ?:
+ bch2_trans_commit(&trans, NULL, NULL,
+ BTREE_INSERT_NOWAIT|
+ BTREE_INSERT_NOFAIL);
+ if (commit_err == -EINTR) {
+ commit_err = 0;
+ continue;
+ }
}
+
+ bch2_btree_iter_advance(&iter);
}
+ bch2_trans_iter_exit(&trans, &iter);
- bch2_btree_update_done(as);
- bch2_keylist_free(&keylist, NULL);
+ bch2_trans_exit(&trans);
+ bch2_bkey_buf_exit(&sk, c);
+
+ return ret;
}
-static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
+int bch2_gc_gens(struct bch_fs *c)
{
- struct btree_iter iter;
- struct btree *b;
+ struct bch_dev *ca;
+ struct bucket_array *buckets;
+ struct bucket *g;
unsigned i;
-
- /* Sliding window of adjacent btree nodes */
- struct btree *merge[GC_MERGE_NODES];
- u32 lock_seq[GC_MERGE_NODES];
+ int ret;
/*
- * XXX: We don't have a good way of positively matching on sibling nodes
- * that have the same parent - this code works by handling the cases
- * where they might not have the same parent, and is thus fragile. Ugh.
- *
- * Perhaps redo this to use multiple linked iterators?
+ * Ideally we would be using state_lock and not gc_lock here, but that
+ * introduces a deadlock in the RO path - we currently take the state
+ * lock at the start of going RO, thus the gc thread may get stuck:
*/
- memset(merge, 0, sizeof(merge));
-
- __for_each_btree_node(&iter, c, btree_id, POS_MIN,
- BTREE_MAX_DEPTH, 0,
- BTREE_ITER_PREFETCH, b) {
- memmove(merge + 1, merge,
- sizeof(merge) - sizeof(merge[0]));
- memmove(lock_seq + 1, lock_seq,
- sizeof(lock_seq) - sizeof(lock_seq[0]));
+ down_read(&c->gc_lock);
- merge[0] = b;
+ for_each_member_device(ca, c, i) {
+ down_read(&ca->bucket_lock);
+ buckets = bucket_array(ca);
- for (i = 1; i < GC_MERGE_NODES; i++) {
- if (!merge[i] ||
- !six_relock_intent(&merge[i]->lock, lock_seq[i]))
- break;
+ for_each_bucket(g, buckets)
+ g->gc_gen = g->mark.gen;
+ up_read(&ca->bucket_lock);
+ }
- if (merge[i]->level != merge[0]->level) {
- six_unlock_intent(&merge[i]->lock);
- break;
+ for (i = 0; i < BTREE_ID_NR; i++)
+ if ((1 << i) & BTREE_ID_HAS_PTRS) {
+ c->gc_gens_btree = i;
+ c->gc_gens_pos = POS_MIN;
+ ret = bch2_gc_btree_gens(c, i);
+ if (ret) {
+ bch_err(c, "error recalculating oldest_gen: %i", ret);
+ goto err;
}
}
- memset(merge + i, 0, (GC_MERGE_NODES - i) * sizeof(merge[0]));
-
- bch2_coalesce_nodes(c, &iter, merge);
-
- for (i = 1; i < GC_MERGE_NODES && merge[i]; i++) {
- lock_seq[i] = merge[i]->lock.state.seq;
- six_unlock_intent(&merge[i]->lock);
- }
-
- lock_seq[0] = merge[0]->lock.state.seq;
-
- if (test_bit(BCH_FS_GC_STOPPING, &c->flags)) {
- bch2_btree_iter_unlock(&iter);
- return -ESHUTDOWN;
- }
- bch2_btree_iter_cond_resched(&iter);
+ for_each_member_device(ca, c, i) {
+ down_read(&ca->bucket_lock);
+ buckets = bucket_array(ca);
- /*
- * If the parent node wasn't relocked, it might have been split
- * and the nodes in our sliding window might not have the same
- * parent anymore - blow away the sliding window:
- */
- if (iter.nodes[iter.level + 1] &&
- !btree_node_intent_locked(&iter, iter.level + 1))
- memset(merge + 1, 0,
- (GC_MERGE_NODES - 1) * sizeof(merge[0]));
+ for_each_bucket(g, buckets)
+ g->oldest_gen = g->gc_gen;
+ up_read(&ca->bucket_lock);
}
- return bch2_btree_iter_unlock(&iter);
-}
-
-/**
- * bch_coalesce - coalesce adjacent nodes with low occupancy
- */
-void bch2_coalesce(struct bch_fs *c)
-{
- enum btree_id id;
- if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
- return;
+ c->gc_gens_btree = 0;
+ c->gc_gens_pos = POS_MIN;
- down_read(&c->gc_lock);
- trace_gc_coalesce_start(c);
-
- for (id = 0; id < BTREE_ID_NR; id++) {
- int ret = c->btree_roots[id].b
- ? bch2_coalesce_btree(c, id)
- : 0;
-
- if (ret) {
- if (ret != -ESHUTDOWN)
- bch_err(c, "btree coalescing failed: %d", ret);
- set_bit(BCH_FS_GC_FAILURE, &c->flags);
- return;
- }
- }
-
- trace_gc_coalesce_end(c);
+ c->gc_count++;
+err:
up_read(&c->gc_lock);
+ return ret;
}
static int bch2_gc_thread(void *arg)
{
struct bch_fs *c = arg;
struct io_clock *clock = &c->io_clock[WRITE];
- unsigned long last = atomic_long_read(&clock->now);
+ unsigned long last = atomic64_read(&clock->now);
unsigned last_kick = atomic_read(&c->kick_gc);
+ int ret;
set_freezable();
if (c->btree_gc_periodic) {
unsigned long next = last + c->capacity / 16;
- if (atomic_long_read(&clock->now) >= next)
+ if (atomic64_read(&clock->now) >= next)
break;
bch2_io_clock_schedule_timeout(clock, next);
}
__set_current_state(TASK_RUNNING);
- last = atomic_long_read(&clock->now);
+ last = atomic64_read(&clock->now);
last_kick = atomic_read(&c->kick_gc);
- bch2_gc(c);
+ /*
+ * Full gc is currently incompatible with btree key cache:
+ */
+#if 0
+ ret = bch2_gc(c, false, false);
+#else
+ ret = bch2_gc_gens(c);
+#endif
+ if (ret < 0)
+ bch_err(c, "btree gc failed: %i", ret);
debug_check_no_locks_held();
}
void bch2_gc_thread_stop(struct bch_fs *c)
{
- set_bit(BCH_FS_GC_STOPPING, &c->flags);
-
- if (c->gc_thread)
- kthread_stop(c->gc_thread);
+ struct task_struct *p;
+ p = c->gc_thread;
c->gc_thread = NULL;
- clear_bit(BCH_FS_GC_STOPPING, &c->flags);
+
+ if (p) {
+ kthread_stop(p);
+ put_task_struct(p);
+ }
}
int bch2_gc_thread_start(struct bch_fs *c)
{
struct task_struct *p;
- BUG_ON(c->gc_thread);
-
- p = kthread_create(bch2_gc_thread, c, "bcache_gc");
- if (IS_ERR(p))
- return PTR_ERR(p);
-
- c->gc_thread = p;
- wake_up_process(c->gc_thread);
- return 0;
-}
-
-/* Initial GC computes bucket marks during startup */
-
-static int bch2_initial_gc_btree(struct bch_fs *c, enum btree_id id)
-{
- struct btree_iter iter;
- struct btree *b;
- struct range_checks r;
- int ret = 0;
-
- btree_node_range_checks_init(&r, 0);
-
- if (!c->btree_roots[id].b)
+ if (c->gc_thread)
return 0;
- ret = bch2_btree_mark_key_initial(c, BKEY_TYPE_BTREE,
- bkey_i_to_s_c(&c->btree_roots[id].b->key));
- if (ret)
- return ret;
-
- /*
- * We have to hit every btree node before starting journal replay, in
- * order for the journal seq blacklist machinery to work:
- */
- for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
- btree_node_range_checks(c, b, &r);
-
- if (btree_node_has_ptrs(b)) {
- struct btree_node_iter node_iter;
- struct bkey unpacked;
- struct bkey_s_c k;
-
- for_each_btree_node_key_unpack(b, k, &node_iter,
- btree_node_is_extents(b),
- &unpacked) {
- ret = bch2_btree_mark_key_initial(c,
- btree_node_type(b), k);
- if (ret)
- goto err;
- }
- }
-
- bch2_btree_iter_cond_resched(&iter);
- }
-err:
- bch2_btree_iter_unlock(&iter);
- return ret;
-}
-
-int bch2_initial_gc(struct bch_fs *c, struct list_head *journal)
-{
- unsigned iter = 0;
- enum btree_id id;
- int ret;
-
- mutex_lock(&c->sb_lock);
- if (!bch2_sb_get_replicas(c->disk_sb)) {
- if (BCH_SB_INITIALIZED(c->disk_sb))
- bch_info(c, "building replicas info");
- set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
- }
- mutex_unlock(&c->sb_lock);
-again:
- bch2_gc_start(c);
-
- for (id = 0; id < BTREE_ID_NR; id++) {
- ret = bch2_initial_gc_btree(c, id);
- if (ret)
- return ret;
- }
-
- ret = bch2_journal_mark(c, journal);
- if (ret)
- return ret;
-
- bch2_mark_metadata(c);
-
- if (test_bit(BCH_FS_FIXED_GENS, &c->flags)) {
- if (iter++ > 2) {
- bch_info(c, "Unable to fix bucket gens, looping");
- return -EINVAL;
- }
-
- bch_info(c, "Fixed gens, restarting initial mark and sweep:");
- clear_bit(BCH_FS_FIXED_GENS, &c->flags);
- goto again;
+ p = kthread_create(bch2_gc_thread, c, "bch-gc/%s", c->name);
+ if (IS_ERR(p)) {
+ bch_err(c, "error creating gc thread: %li", PTR_ERR(p));
+ return PTR_ERR(p);
}
- /*
- * Skip past versions that might have possibly been used (as nonces),
- * but hadn't had their pointers written:
- */
- if (c->sb.encryption_type)
- atomic64_add(1 << 16, &c->key_version);
-
- gc_pos_set(c, gc_phase(GC_PHASE_DONE));
- set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
-
+ get_task_struct(p);
+ c->gc_thread = p;
+ wake_up_process(p);
return 0;
}