*/
#include "bcachefs.h"
-#include "alloc.h"
+#include "alloc_background.h"
+#include "alloc_foreground.h"
#include "bkey_methods.h"
#include "btree_locking.h"
-#include "btree_update.h"
+#include "btree_update_interior.h"
#include "btree_io.h"
#include "btree_gc.h"
#include "buckets.h"
#include "clock.h"
#include "debug.h"
+#include "ec.h"
#include "error.h"
#include "extents.h"
#include "journal.h"
+#include "journal_io.h"
#include "keylist.h"
#include "move.h"
+#include "replicas.h"
#include "super-io.h"
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
+#include <linux/preempt.h>
#include <linux/rcupdate.h>
+#include <linux/sched/task.h>
#include <trace/events/bcachefs.h>
+static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
+{
+ write_seqcount_begin(&c->gc_pos_lock);
+ c->gc_pos = new_pos;
+ write_seqcount_end(&c->gc_pos_lock);
+}
+
+static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
+{
+ BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) <= 0);
+ __gc_pos_set(c, new_pos);
+}
+
+/* range_checks - for validating min/max pos of each btree node: */
+
struct range_checks {
struct range_level {
struct bpos min;
}
}
-u8 bch2_btree_key_recalc_oldest_gen(struct bch_fs *c, struct bkey_s_c k)
+/* marking of btree keys/nodes: */
+
+static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
+ u8 *max_stale, bool initial)
{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
- u8 max_stale = 0;
-
- if (bkey_extent_is_data(k.k)) {
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+ struct gc_pos pos = { 0 };
+ unsigned flags =
+ BCH_BUCKET_MARK_GC|
+ (initial ? BCH_BUCKET_MARK_NOATOMIC : 0);
+ int ret = 0;
- extent_for_each_ptr(e, ptr) {
- struct bch_dev *ca = c->devs[ptr->dev];
- size_t b = PTR_BUCKET_NR(ca, ptr);
+ if (initial) {
+ BUG_ON(journal_seq_verify(c) &&
+ k.k->version.lo > journal_cur_seq(&c->journal));
- if (gen_after(ca->oldest_gens[b], ptr->gen))
- ca->oldest_gens[b] = ptr->gen;
+ if (k.k->version.lo > atomic64_read(&c->key_version))
+ atomic64_set(&c->key_version, k.k->version.lo);
- max_stale = max(max_stale, ptr_stale(ca, ptr));
+ if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
+ fsck_err_on(!bch2_bkey_replicas_marked(c, k, false), c,
+ "superblock not marked as containing replicas (type %u)",
+ k.k->type)) {
+ ret = bch2_mark_bkey_replicas(c, k);
+ if (ret)
+ return ret;
}
- }
-
- return max_stale;
-}
-/*
- * For runtime mark and sweep:
- */
-static u8 bch2_btree_mark_key(struct bch_fs *c, enum bkey_type type,
- struct bkey_s_c k)
-{
- switch (type) {
- case BKEY_TYPE_BTREE:
- bch2_gc_mark_key(c, k, c->sb.btree_node_size, true);
- return 0;
- case BKEY_TYPE_EXTENTS:
- bch2_gc_mark_key(c, k, k.k->size, false);
- return bch2_btree_key_recalc_oldest_gen(c, k);
- default:
- BUG();
- }
-}
-
-int bch2_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type,
- struct bkey_s_c k)
-{
- int ret = 0;
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bucket *g = PTR_BUCKET(ca, ptr, true);
+ struct bucket *g2 = PTR_BUCKET(ca, ptr, false);
+
+ if (mustfix_fsck_err_on(!g->gen_valid, c,
+ "found ptr with missing gen in alloc btree,\n"
+ "type %u gen %u",
+ k.k->type, ptr->gen)) {
+ g2->_mark.gen = g->_mark.gen = ptr->gen;
+ g2->_mark.dirty = g->_mark.dirty = true;
+ g2->gen_valid = g->gen_valid = true;
+ }
- switch (k.k->type) {
- case BCH_EXTENT:
- case BCH_EXTENT_CACHED: {
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
- const struct bch_extent_ptr *ptr;
-
- extent_for_each_ptr(e, ptr) {
- struct bch_dev *ca = c->devs[ptr->dev];
- struct bucket *g = PTR_BUCKET(ca, ptr);
- struct bucket_mark new;
-
- if (fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
- "%s ptr gen in the future: %u > %u",
- type == BKEY_TYPE_BTREE
- ? "btree" : "data",
- ptr->gen, g->mark.gen)) {
- bucket_cmpxchg(g, new, new.gen = ptr->gen);
+ if (mustfix_fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
+ "%u ptr gen in the future: %u > %u",
+ k.k->type, ptr->gen, g->mark.gen)) {
+ g2->_mark.gen = g->_mark.gen = ptr->gen;
+ g2->_mark.dirty = g->_mark.dirty = true;
+ g2->gen_valid = g->gen_valid = true;
set_bit(BCH_FS_FIXED_GENS, &c->flags);
- ca->need_prio_write = true;
}
-
}
- break;
- }
}
- atomic64_set(&c->key_version,
- max_t(u64, k.k->version.lo,
- atomic64_read(&c->key_version)));
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bucket *g = PTR_BUCKET(ca, ptr, true);
+
+ if (gen_after(g->oldest_gen, ptr->gen))
+ g->oldest_gen = ptr->gen;
+
+ *max_stale = max(*max_stale, ptr_stale(ca, ptr));
+ }
- bch2_btree_mark_key(c, type, k);
+ bch2_mark_key(c, k, true, k.k->size, pos, NULL, 0, flags);
fsck_err:
return ret;
}
-static bool btree_gc_mark_node(struct bch_fs *c, struct btree *b)
+static int btree_gc_mark_node(struct bch_fs *c, struct btree *b,
+ u8 *max_stale, bool initial)
{
- if (btree_node_has_ptrs(b)) {
- struct btree_node_iter iter;
- struct bkey unpacked;
- struct bkey_s_c k;
- u8 stale = 0;
-
- for_each_btree_node_key_unpack(b, k, &iter,
- btree_node_is_extents(b),
- &unpacked) {
- bch2_bkey_debugcheck(c, b, k);
- stale = max(stale, bch2_btree_mark_key(c,
- btree_node_type(b), k));
- }
-
- if (btree_gc_rewrite_disabled(c))
- return false;
+ struct btree_node_iter iter;
+ struct bkey unpacked;
+ struct bkey_s_c k;
+ int ret = 0;
- if (stale > 10)
- return true;
- }
+ *max_stale = 0;
- if (btree_gc_always_rewrite(c))
- return true;
+ if (!btree_node_type_needs_gc(btree_node_type(b)))
+ return 0;
- return false;
-}
+ for_each_btree_node_key_unpack(b, k, &iter,
+ &unpacked) {
+ bch2_bkey_debugcheck(c, b, k);
-static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
-{
- write_seqcount_begin(&c->gc_pos_lock);
- c->gc_pos = new_pos;
- write_seqcount_end(&c->gc_pos_lock);
-}
+ ret = bch2_gc_mark_key(c, k, max_stale, initial);
+ if (ret)
+ break;
+ }
-static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
-{
- BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) <= 0);
- __gc_pos_set(c, new_pos);
+ return ret;
}
-static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id)
+static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
+ bool initial)
{
struct btree_iter iter;
struct btree *b;
- bool should_rewrite;
struct range_checks r;
- unsigned depth = btree_id == BTREE_ID_EXTENTS ? 0 : 1;
- int ret;
+ unsigned depth = btree_node_type_needs_gc(btree_id) ? 0 : 1;
+ u8 max_stale;
+ int ret = 0;
+
+ gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0));
/*
* if expensive_debug_checks is on, run range_checks on all leaf nodes:
+ *
+ * and on startup, we have to read every btree node (XXX: only if it was
+ * an unclean shutdown)
*/
- if (expensive_debug_checks(c))
+ if (initial || expensive_debug_checks(c))
depth = 0;
btree_node_range_checks_init(&r, depth);
- for_each_btree_node(&iter, c, btree_id, POS_MIN, depth, b) {
+ __for_each_btree_node(&iter, c, btree_id, POS_MIN,
+ 0, depth, BTREE_ITER_PREFETCH, b) {
btree_node_range_checks(c, b, &r);
bch2_verify_btree_nr_keys(b);
- should_rewrite = btree_gc_mark_node(c, b);
-
gc_pos_set(c, gc_pos_btree_node(b));
- if (should_rewrite)
- bch2_btree_node_rewrite(&iter, b, NULL);
+ ret = btree_gc_mark_node(c, b, &max_stale, initial);
+ if (ret)
+ break;
+
+ if (!initial) {
+ if (max_stale > 64)
+ bch2_btree_node_rewrite(c, &iter,
+ b->data->keys.seq,
+ BTREE_INSERT_USE_RESERVE|
+ BTREE_INSERT_NOWAIT|
+ BTREE_INSERT_GC_LOCK_HELD);
+ else if (!btree_gc_rewrite_disabled(c) &&
+ (btree_gc_always_rewrite(c) || max_stale > 16))
+ bch2_btree_node_rewrite(c, &iter,
+ b->data->keys.seq,
+ BTREE_INSERT_NOWAIT|
+ BTREE_INSERT_GC_LOCK_HELD);
+ }
bch2_btree_iter_cond_resched(&iter);
}
- ret = bch2_btree_iter_unlock(&iter);
+ ret = bch2_btree_iter_unlock(&iter) ?: ret;
if (ret)
return ret;
mutex_lock(&c->btree_root_lock);
-
b = c->btree_roots[btree_id].b;
- bch2_btree_mark_key(c, BKEY_TYPE_BTREE, bkey_i_to_s_c(&b->key));
+ if (!btree_node_fake(b))
+ ret = bch2_gc_mark_key(c, bkey_i_to_s_c(&b->key),
+ &max_stale, initial);
gc_pos_set(c, gc_pos_btree_root(b->btree_id));
-
mutex_unlock(&c->btree_root_lock);
- return 0;
+
+ return ret;
}
-static void bch2_mark_allocator_buckets(struct bch_fs *c)
+static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
{
- struct bch_dev *ca;
- struct open_bucket *ob;
- size_t i, j, iter;
- unsigned ci;
-
- for_each_member_device(ca, c, ci) {
- spin_lock(&ca->freelist_lock);
+ return (int) btree_id_to_gc_phase(l) -
+ (int) btree_id_to_gc_phase(r);
+}
- fifo_for_each_entry(i, &ca->free_inc, iter)
- bch2_mark_alloc_bucket(ca, &ca->buckets[i], true);
+static int bch2_gc_btrees(struct bch_fs *c, struct list_head *journal,
+ bool initial)
+{
+ enum btree_id ids[BTREE_ID_NR];
+ u8 max_stale;
+ unsigned i;
- for (j = 0; j < RESERVE_NR; j++)
- fifo_for_each_entry(i, &ca->free[j], iter)
- bch2_mark_alloc_bucket(ca, &ca->buckets[i], true);
+ for (i = 0; i < BTREE_ID_NR; i++)
+ ids[i] = i;
+ bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
- spin_unlock(&ca->freelist_lock);
- }
+ for (i = 0; i < BTREE_ID_NR; i++) {
+ enum btree_id id = ids[i];
+ enum btree_node_type type = __btree_node_type(0, id);
- for (ob = c->open_buckets;
- ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
- ob++) {
- const struct bch_extent_ptr *ptr;
+ int ret = bch2_gc_btree(c, id, initial);
+ if (ret)
+ return ret;
- mutex_lock(&ob->lock);
- open_bucket_for_each_ptr(ob, ptr) {
- ca = c->devs[ptr->dev];
- bch2_mark_alloc_bucket(ca, PTR_BUCKET(ca, ptr), true);
+ if (journal && btree_node_type_needs_gc(type)) {
+ struct bkey_i *k, *n;
+ struct jset_entry *j;
+ struct journal_replay *r;
+ int ret;
+
+ list_for_each_entry(r, journal, list)
+ for_each_jset_key(k, n, j, &r->j) {
+ if (type == __btree_node_type(j->level, j->btree_id)) {
+ ret = bch2_gc_mark_key(c,
+ bkey_i_to_s_c(k),
+ &max_stale, initial);
+ if (ret)
+ return ret;
+ }
+ }
}
- mutex_unlock(&ob->lock);
}
+
+ return 0;
}
-static void mark_metadata_sectors(struct bch_dev *ca, u64 start, u64 end,
- enum bucket_data_type type)
+static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca,
+ u64 start, u64 end,
+ enum bch_data_type type,
+ unsigned flags)
{
- u64 b = start >> ca->bucket_bits;
+ u64 b = sector_to_bucket(ca, start);
do {
- bch2_mark_metadata_bucket(ca, ca->buckets + b, type, true);
+ unsigned sectors =
+ min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
+
+ bch2_mark_metadata_bucket(c, ca, b, type, sectors,
+ gc_phase(GC_PHASE_SB), flags);
b++;
- } while (b < end >> ca->bucket_bits);
+ start += sectors;
+ } while (start < end);
}
-static void bch2_dev_mark_superblocks(struct bch_dev *ca)
+void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
+ unsigned flags)
{
struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
unsigned i;
+ u64 b;
- for (i = 0; i < layout->nr_superblocks; i++) {
- if (layout->sb_offset[i] == BCH_SB_SECTOR)
- mark_metadata_sectors(ca, 0, BCH_SB_SECTOR,
- BUCKET_SB);
-
- mark_metadata_sectors(ca,
- layout->sb_offset[i],
- layout->sb_offset[i] +
- (1 << layout->sb_max_size_bits),
- BUCKET_SB);
+ /*
+ * This conditional is kind of gross, but we may be called from the
+ * device add path, before the new device has actually been added to the
+ * running filesystem:
+ */
+ if (c) {
+ lockdep_assert_held(&c->sb_lock);
+ percpu_down_read_preempt_disable(&c->mark_lock);
+ } else {
+ preempt_disable();
}
-}
-/*
- * Mark non btree metadata - prios, journal
- */
-void bch2_mark_dev_metadata(struct bch_fs *c, struct bch_dev *ca)
-{
- unsigned i;
- u64 b;
-
- lockdep_assert_held(&c->sb_lock);
+ for (i = 0; i < layout->nr_superblocks; i++) {
+ u64 offset = le64_to_cpu(layout->sb_offset[i]);
- bch2_dev_mark_superblocks(ca);
+ if (offset == BCH_SB_SECTOR)
+ mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
+ BCH_DATA_SB, flags);
- spin_lock(&c->journal.lock);
+ mark_metadata_sectors(c, ca, offset,
+ offset + (1 << layout->sb_max_size_bits),
+ BCH_DATA_SB, flags);
+ }
for (i = 0; i < ca->journal.nr; i++) {
b = ca->journal.buckets[i];
- bch2_mark_metadata_bucket(ca, ca->buckets + b,
- BUCKET_JOURNAL, true);
+ bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_JOURNAL,
+ ca->mi.bucket_size,
+ gc_phase(GC_PHASE_SB), flags);
}
- spin_unlock(&c->journal.lock);
-
- spin_lock(&ca->prio_buckets_lock);
-
- for (i = 0; i < prio_buckets(ca) * 2; i++) {
- b = ca->prio_buckets[i];
- if (b)
- bch2_mark_metadata_bucket(ca, ca->buckets + b,
- BUCKET_PRIOS, true);
+ if (c) {
+ percpu_up_read_preempt_enable(&c->mark_lock);
+ } else {
+ preempt_enable();
}
-
- spin_unlock(&ca->prio_buckets_lock);
}
-static void bch2_mark_metadata(struct bch_fs *c)
+static void bch2_mark_superblocks(struct bch_fs *c)
{
struct bch_dev *ca;
unsigned i;
mutex_lock(&c->sb_lock);
- gc_pos_set(c, gc_phase(GC_PHASE_SB_METADATA));
+ gc_pos_set(c, gc_phase(GC_PHASE_SB));
for_each_online_member(ca, c, i)
- bch2_mark_dev_metadata(c, ca);
+ bch2_mark_dev_superblock(c, ca, BCH_BUCKET_MARK_GC);
mutex_unlock(&c->sb_lock);
}
/* Also see bch2_pending_btree_node_free_insert_done() */
static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
{
- struct bch_fs_usage stats = { 0 };
- struct btree_interior_update *as;
+ struct gc_pos pos = { 0 };
+ struct btree_update *as;
struct pending_btree_node_free *d;
mutex_lock(&c->btree_interior_update_lock);
for_each_pending_btree_node_free(c, as, d)
if (d->index_update_done)
- __bch2_gc_mark_key(c, bkey_i_to_s_c(&d->key),
- c->sb.btree_node_size, true,
- &stats);
- /*
- * Don't apply stats - pending deletes aren't tracked in
- * bch_alloc_stats:
- */
+ bch2_mark_key(c, bkey_i_to_s_c(&d->key),
+ true, 0,
+ pos, NULL, 0,
+ BCH_BUCKET_MARK_GC);
mutex_unlock(&c->btree_interior_update_lock);
}
-void bch2_gc_start(struct bch_fs *c)
+static void bch2_mark_allocator_buckets(struct bch_fs *c)
{
struct bch_dev *ca;
- struct bucket *g;
- struct bucket_mark new;
- unsigned i;
- int cpu;
+ struct open_bucket *ob;
+ size_t i, j, iter;
+ unsigned ci;
- lg_global_lock(&c->usage_lock);
+ percpu_down_read_preempt_disable(&c->mark_lock);
+
+ spin_lock(&c->freelist_lock);
+ gc_pos_set(c, gc_pos_alloc(c, NULL));
+
+ for_each_member_device(ca, c, ci) {
+ fifo_for_each_entry(i, &ca->free_inc, iter)
+ bch2_mark_alloc_bucket(c, ca, i, true,
+ gc_pos_alloc(c, NULL),
+ BCH_BUCKET_MARK_GC);
- /*
- * Indicates to buckets code that gc is now in progress - done under
- * usage_lock to avoid racing with bch2_mark_key():
- */
- __gc_pos_set(c, GC_POS_MIN);
- /* Save a copy of the existing bucket stats while we recompute them: */
+
+ for (j = 0; j < RESERVE_NR; j++)
+ fifo_for_each_entry(i, &ca->free[j], iter)
+ bch2_mark_alloc_bucket(c, ca, i, true,
+ gc_pos_alloc(c, NULL),
+ BCH_BUCKET_MARK_GC);
+ }
+
+ spin_unlock(&c->freelist_lock);
+
+ for (ob = c->open_buckets;
+ ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
+ ob++) {
+ spin_lock(&ob->lock);
+ if (ob->valid) {
+ gc_pos_set(c, gc_pos_alloc(c, ob));
+ ca = bch_dev_bkey_exists(c, ob->ptr.dev);
+ bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), true,
+ gc_pos_alloc(c, ob),
+ BCH_BUCKET_MARK_GC);
+ }
+ spin_unlock(&ob->lock);
+ }
+
+ percpu_up_read_preempt_enable(&c->mark_lock);
+}
+
+static void bch2_gc_free(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i;
+
+ genradix_free(&c->stripes[1]);
+
for_each_member_device(ca, c, i) {
- ca->usage_cached = __bch2_dev_usage_read(ca);
- for_each_possible_cpu(cpu) {
- struct bch_dev_usage *p =
- per_cpu_ptr(ca->usage_percpu, cpu);
- memset(p, 0, sizeof(*p));
+ kvpfree(rcu_dereference_protected(ca->buckets[1], 1),
+ sizeof(struct bucket_array) +
+ ca->mi.nbuckets * sizeof(struct bucket));
+ ca->buckets[1] = NULL;
+
+ free_percpu(ca->usage[1]);
+ ca->usage[1] = NULL;
+ }
+
+ percpu_down_write(&c->mark_lock);
+
+ free_percpu(c->usage[1]);
+ c->usage[1] = NULL;
+
+ percpu_up_write(&c->mark_lock);
+}
+
+static void bch2_gc_done(struct bch_fs *c, bool initial)
+{
+ struct bch_dev *ca;
+ bool verify = !initial ||
+ (c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO));
+ unsigned i;
+
+#define copy_field(_f, _msg, ...) \
+ if (dst->_f != src->_f) { \
+ if (verify) \
+ bch_err(c, _msg ": got %llu, should be %llu, fixing"\
+ , ##__VA_ARGS__, dst->_f, src->_f); \
+ dst->_f = src->_f; \
+ }
+#define copy_stripe_field(_f, _msg, ...) \
+ if (dst->_f != src->_f) { \
+ if (verify) \
+ bch_err_ratelimited(c, "stripe %zu has wrong "_msg\
+ ": got %u, should be %u, fixing", \
+ dst_iter.pos, ##__VA_ARGS__, \
+ dst->_f, src->_f); \
+ dst->_f = src->_f; \
+ dst->dirty = true; \
+ }
+#define copy_bucket_field(_f) \
+ if (dst->b[b].mark._f != src->b[b].mark._f) { \
+ if (verify) \
+ bch_err_ratelimited(c, "dev %u bucket %zu has wrong " #_f\
+ ": got %u, should be %u, fixing", i, b, \
+ dst->b[b].mark._f, src->b[b].mark._f); \
+ dst->b[b]._mark._f = src->b[b].mark._f; \
+ dst->b[b]._mark.dirty = true; \
+ }
+#define copy_dev_field(_f, _msg, ...) \
+ copy_field(_f, "dev %u has wrong " _msg, i, ##__VA_ARGS__)
+#define copy_fs_field(_f, _msg, ...) \
+ copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__)
+
+ percpu_down_write(&c->mark_lock);
+
+ {
+ struct genradix_iter dst_iter = genradix_iter_init(&c->stripes[0], 0);
+ struct genradix_iter src_iter = genradix_iter_init(&c->stripes[1], 0);
+ struct stripe *dst, *src;
+ unsigned i;
+
+ c->ec_stripes_heap.used = 0;
+
+ while ((dst = genradix_iter_peek(&dst_iter, &c->stripes[0])) &&
+ (src = genradix_iter_peek(&src_iter, &c->stripes[1]))) {
+ BUG_ON(src_iter.pos != dst_iter.pos);
+
+ copy_stripe_field(alive, "alive");
+ copy_stripe_field(sectors, "sectors");
+ copy_stripe_field(algorithm, "algorithm");
+ copy_stripe_field(nr_blocks, "nr_blocks");
+ copy_stripe_field(nr_redundant, "nr_redundant");
+ copy_stripe_field(blocks_nonempty,
+ "blocks_nonempty");
+
+ for (i = 0; i < ARRAY_SIZE(dst->block_sectors); i++)
+ copy_stripe_field(block_sectors[i],
+ "block_sectors[%u]", i);
+
+ if (dst->alive)
+ bch2_stripes_heap_insert(c, dst, dst_iter.pos);
+
+ genradix_iter_advance(&dst_iter, &c->stripes[0]);
+ genradix_iter_advance(&src_iter, &c->stripes[1]);
}
}
- c->usage_cached = __bch2_fs_usage_read(c);
- for_each_possible_cpu(cpu) {
- struct bch_fs_usage *p =
- per_cpu_ptr(c->usage_percpu, cpu);
+ for_each_member_device(ca, c, i) {
+ struct bucket_array *dst = __bucket_array(ca, 0);
+ struct bucket_array *src = __bucket_array(ca, 1);
+ size_t b;
+
+ if (initial) {
+ memcpy(dst, src,
+ sizeof(struct bucket_array) +
+ sizeof(struct bucket) * dst->nbuckets);
+ }
- memset(p->s, 0, sizeof(p->s));
- p->persistent_reserved = 0;
+ for (b = 0; b < src->nbuckets; b++) {
+ copy_bucket_field(gen);
+ copy_bucket_field(data_type);
+ copy_bucket_field(owned_by_allocator);
+ copy_bucket_field(stripe);
+ copy_bucket_field(dirty_sectors);
+ copy_bucket_field(cached_sectors);
+
+ if (dst->b[b].oldest_gen != src->b[b].oldest_gen) {
+ dst->b[b].oldest_gen = src->b[b].oldest_gen;
+ dst->b[b]._mark.dirty = true;
+ }
+ }
+ };
+
+ for_each_member_device(ca, c, i) {
+ unsigned nr = sizeof(struct bch_dev_usage) / sizeof(u64);
+ struct bch_dev_usage *dst = (void *)
+ bch2_acc_percpu_u64s((void *) ca->usage[0], nr);
+ struct bch_dev_usage *src = (void *)
+ bch2_acc_percpu_u64s((void *) ca->usage[1], nr);
+ unsigned b;
+
+ for (b = 0; b < BCH_DATA_NR; b++)
+ copy_dev_field(buckets[b], "buckets[%s]",
+ bch2_data_types[b]);
+ copy_dev_field(buckets_alloc, "buckets_alloc");
+ copy_dev_field(buckets_ec, "buckets_ec");
+ copy_dev_field(buckets_unavailable, "buckets_unavailable");
+
+ for (b = 0; b < BCH_DATA_NR; b++)
+ copy_dev_field(sectors[b], "sectors[%s]",
+ bch2_data_types[b]);
+ copy_dev_field(sectors_fragmented, "sectors_fragmented");
}
- lg_global_unlock(&c->usage_lock);
+ {
+ unsigned nr = fs_usage_u64s(c);
+ struct bch_fs_usage *dst = (void *)
+ bch2_acc_percpu_u64s((void *) c->usage[0], nr);
+ struct bch_fs_usage *src = (void *)
+ bch2_acc_percpu_u64s((void *) c->usage[1], nr);
- /* Clear bucket marks: */
- for_each_member_device(ca, c, i)
- for_each_bucket(g, ca) {
- bucket_cmpxchg(g, new, ({
- new.owned_by_allocator = 0;
- new.data_type = 0;
- new.cached_sectors = 0;
- new.dirty_sectors = 0;
- }));
- ca->oldest_gens[g - ca->buckets] = new.gen;
+ copy_fs_field(hidden, "hidden");
+ copy_fs_field(data, "data");
+ copy_fs_field(cached, "cached");
+ copy_fs_field(reserved, "reserved");
+ copy_fs_field(nr_inodes, "nr_inodes");
+
+ for (i = 0; i < BCH_REPLICAS_MAX; i++)
+ copy_fs_field(persistent_reserved[i],
+ "persistent_reserved[%i]", i);
+
+ for (i = 0; i < c->replicas.nr; i++) {
+ struct bch_replicas_entry *e =
+ cpu_replicas_entry(&c->replicas, i);
+ char buf[80];
+
+ bch2_replicas_entry_to_text(&PBUF(buf), e);
+
+ copy_fs_field(replicas[i], "%s", buf);
}
+ }
+
+ percpu_up_write(&c->mark_lock);
+
+#undef copy_fs_field
+#undef copy_dev_field
+#undef copy_bucket_field
+#undef copy_stripe_field
+#undef copy_field
}
-/**
- * bch_gc - recompute bucket marks and oldest_gen, rewrite btree nodes
- */
-void bch2_gc(struct bch_fs *c)
+static int bch2_gc_start(struct bch_fs *c)
{
struct bch_dev *ca;
- u64 start_time = local_clock();
unsigned i;
+ percpu_down_write(&c->mark_lock);
+
/*
- * Walk _all_ references to buckets, and recompute them:
- *
- * Order matters here:
- * - Concurrent GC relies on the fact that we have a total ordering for
- * everything that GC walks - see gc_will_visit_node(),
- * gc_will_visit_root()
- *
- * - also, references move around in the course of index updates and
- * various other crap: everything needs to agree on the ordering
- * references are allowed to move around in - e.g., we're allowed to
- * start with a reference owned by an open_bucket (the allocator) and
- * move it to the btree, but not the reverse.
- *
- * This is necessary to ensure that gc doesn't miss references that
- * move around - if references move backwards in the ordering GC
- * uses, GC could skip past them
+ * indicate to stripe code that we need to allocate for the gc stripes
+ * radix tree, too
*/
+ gc_pos_set(c, gc_phase(GC_PHASE_START));
- if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
- return;
+ BUG_ON(c->usage[1]);
- trace_gc_start(c);
+ c->usage[1] = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64),
+ sizeof(u64), GFP_KERNEL);
+ percpu_up_write(&c->mark_lock);
- /*
- * Do this before taking gc_lock - bch2_disk_reservation_get() blocks on
- * gc_lock if sectors_available goes to 0:
- */
- bch2_recalc_sectors_available(c);
+ if (!c->usage[1])
+ return -ENOMEM;
+
+ for_each_member_device(ca, c, i) {
+ BUG_ON(ca->buckets[1]);
+ BUG_ON(ca->usage[1]);
+
+ ca->buckets[1] = kvpmalloc(sizeof(struct bucket_array) +
+ ca->mi.nbuckets * sizeof(struct bucket),
+ GFP_KERNEL|__GFP_ZERO);
+ if (!ca->buckets[1]) {
+ percpu_ref_put(&ca->ref);
+ return -ENOMEM;
+ }
+
+ ca->usage[1] = alloc_percpu(struct bch_dev_usage);
+ if (!ca->usage[1]) {
+ percpu_ref_put(&ca->ref);
+ return -ENOMEM;
+ }
+ }
+
+ percpu_down_write(&c->mark_lock);
+
+ for_each_member_device(ca, c, i) {
+ struct bucket_array *dst = __bucket_array(ca, 1);
+ struct bucket_array *src = __bucket_array(ca, 0);
+ size_t b;
+
+ dst->first_bucket = src->first_bucket;
+ dst->nbuckets = src->nbuckets;
+
+ for (b = 0; b < src->nbuckets; b++) {
+ dst->b[b]._mark.gen =
+ dst->b[b].oldest_gen =
+ src->b[b].mark.gen;
+ dst->b[b].gen_valid = src->b[b].gen_valid;
+ }
+ };
+
+ percpu_up_write(&c->mark_lock);
+
+ return bch2_ec_mem_alloc(c, true);
+}
+
+/**
+ * bch2_gc - walk _all_ references to buckets, and recompute them:
+ *
+ * Order matters here:
+ * - Concurrent GC relies on the fact that we have a total ordering for
+ * everything that GC walks - see gc_will_visit_node(),
+ * gc_will_visit_root()
+ *
+ * - also, references move around in the course of index updates and
+ * various other crap: everything needs to agree on the ordering
+ * references are allowed to move around in - e.g., we're allowed to
+ * start with a reference owned by an open_bucket (the allocator) and
+ * move it to the btree, but not the reverse.
+ *
+ * This is necessary to ensure that gc doesn't miss references that
+ * move around - if references move backwards in the ordering GC
+ * uses, GC could skip past them
+ */
+int bch2_gc(struct bch_fs *c, struct list_head *journal, bool initial)
+{
+ struct bch_dev *ca;
+ u64 start_time = local_clock();
+ unsigned i, iter = 0;
+ int ret;
+
+ trace_gc_start(c);
down_write(&c->gc_lock);
+again:
+ ret = bch2_gc_start(c);
+ if (ret)
+ goto out;
- bch2_gc_start(c);
+ bch2_mark_superblocks(c);
- /* Walk allocator's references: */
- bch2_mark_allocator_buckets(c);
+ ret = bch2_gc_btrees(c, journal, initial);
+ if (ret)
+ goto out;
- /* Walk btree: */
- while (c->gc_pos.phase < (int) BTREE_ID_NR) {
- int ret = c->btree_roots[c->gc_pos.phase].b
- ? bch2_gc_btree(c, (int) c->gc_pos.phase)
- : 0;
+ bch2_mark_pending_btree_node_frees(c);
+ bch2_mark_allocator_buckets(c);
- if (ret) {
- bch_err(c, "btree gc failed: %d", ret);
- set_bit(BCH_FS_GC_FAILURE, &c->flags);
- up_write(&c->gc_lock);
- return;
+ c->gc_count++;
+out:
+ if (!ret &&
+ (test_bit(BCH_FS_FIXED_GENS, &c->flags) ||
+ (!iter && test_restart_gc(c)))) {
+ /*
+ * XXX: make sure gens we fixed got saved
+ */
+ if (iter++ <= 2) {
+ bch_info(c, "Fixed gens, restarting mark and sweep:");
+ clear_bit(BCH_FS_FIXED_GENS, &c->flags);
+ __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
+ bch2_gc_free(c);
+ goto again;
}
- gc_pos_set(c, gc_phase(c->gc_pos.phase + 1));
+ bch_info(c, "Unable to fix bucket gens, looping");
+ ret = -EINVAL;
}
- bch2_mark_metadata(c);
- bch2_mark_pending_btree_node_frees(c);
-
- for_each_member_device(ca, c, i)
- atomic_long_set(&ca->saturated_count, 0);
+ if (!ret)
+ bch2_gc_done(c, initial);
/* Indicates that gc is no longer in progress: */
- gc_pos_set(c, gc_phase(GC_PHASE_DONE));
+ __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
+ bch2_gc_free(c);
up_write(&c->gc_lock);
+
trace_gc_end(c);
- bch2_time_stats_update(&c->btree_gc_time, start_time);
+ bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
/*
* Wake up allocator in case it was waiting for buckets
*/
for_each_member_device(ca, c, i)
bch2_wake_allocator(ca);
+
+ /*
+ * At startup, allocations can happen directly instead of via the
+ * allocator thread - issue wakeup in case they blocked on gc_lock:
+ */
+ closure_wake_up(&c->freelist_wait);
+ return ret;
}
/* Btree coalescing */
static void recalc_packed_keys(struct btree *b)
{
+ struct bset *i = btree_bset_first(b);
struct bkey_packed *k;
memset(&b->nr, 0, sizeof(b->nr));
BUG_ON(b->nsets != 1);
- for (k = btree_bkey_first(b, b->set);
- k != btree_bkey_last(b, b->set);
- k = bkey_next(k))
+ vstruct_for_each(i, k)
btree_keys_account_key_add(&b->nr, 0, k);
}
-static void bch2_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
- struct btree_iter *iter)
+static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
+ struct btree *old_nodes[GC_MERGE_NODES])
{
- struct btree *parent = iter->nodes[old_nodes[0]->level + 1];
- struct bch_fs *c = iter->c;
+ struct btree *parent = btree_node_parent(iter, old_nodes[0]);
unsigned i, nr_old_nodes, nr_new_nodes, u64s = 0;
unsigned blocks = btree_blocks(c) * 2 / 3;
struct btree *new_nodes[GC_MERGE_NODES];
- struct btree_interior_update *as;
- struct btree_reserve *res;
+ struct btree_update *as;
struct keylist keylist;
struct bkey_format_state format_state;
struct bkey_format new_format;
memset(new_nodes, 0, sizeof(new_nodes));
- bch2_keylist_init(&keylist, NULL, 0);
+ bch2_keylist_init(&keylist, NULL);
/* Count keys that are not deleted */
for (i = 0; i < GC_MERGE_NODES && old_nodes[i]; i++)
DIV_ROUND_UP(u64s, nr_old_nodes - 1)) > blocks)
return;
- res = bch2_btree_reserve_get(c, parent, nr_old_nodes,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_USE_RESERVE,
- NULL);
- if (IS_ERR(res)) {
- trace_btree_gc_coalesce_fail(c,
- BTREE_GC_COALESCE_FAIL_RESERVE_GET);
- return;
- }
-
- if (bch2_keylist_realloc(&keylist, NULL, 0,
- (BKEY_U64s + BKEY_EXTENT_U64s_MAX) * nr_old_nodes)) {
- trace_btree_gc_coalesce_fail(c,
- BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC);
- goto out;
- }
-
/* Find a format that all keys in @old_nodes can pack into */
bch2_bkey_format_init(&format_state);
if (!bch2_btree_node_format_fits(c, old_nodes[i], &new_format)) {
trace_btree_gc_coalesce_fail(c,
BTREE_GC_COALESCE_FAIL_FORMAT_FITS);
- goto out;
+ return;
}
- trace_btree_gc_coalesce(c, parent, nr_old_nodes);
+ if (bch2_keylist_realloc(&keylist, NULL, 0,
+ (BKEY_U64s + BKEY_EXTENT_U64s_MAX) * nr_old_nodes)) {
+ trace_btree_gc_coalesce_fail(c,
+ BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC);
+ return;
+ }
+
+ as = bch2_btree_update_start(c, iter->btree_id,
+ btree_update_reserve_required(c, parent) + nr_old_nodes,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_USE_RESERVE,
+ NULL);
+ if (IS_ERR(as)) {
+ trace_btree_gc_coalesce_fail(c,
+ BTREE_GC_COALESCE_FAIL_RESERVE_GET);
+ bch2_keylist_free(&keylist, NULL);
+ return;
+ }
- as = bch2_btree_interior_update_alloc(c);
+ trace_btree_gc_coalesce(c, old_nodes[0]);
for (i = 0; i < nr_old_nodes; i++)
- bch2_btree_interior_update_will_free_node(c, as, old_nodes[i]);
+ bch2_btree_interior_update_will_free_node(as, old_nodes[i]);
/* Repack everything with @new_format and sort down to one bset */
- for (i = 0; i < nr_old_nodes; i++) {
+ for (i = 0; i < nr_old_nodes; i++)
new_nodes[i] =
- __bch2_btree_node_alloc_replacement(c, old_nodes[i],
- new_format, res);
- list_add(&new_nodes[i]->reachable, &as->reachable_list);
- }
+ __bch2_btree_node_alloc_replacement(as, old_nodes[i],
+ new_format);
/*
* Conceptually we concatenate the nodes together and slice them
set_btree_bset_end(n1, n1->set);
- list_del_init(&n2->reachable);
six_unlock_write(&n2->lock);
bch2_btree_node_free_never_inserted(c, n2);
six_unlock_intent(&n2->lock);
bch2_btree_build_aux_trees(n);
six_unlock_write(&n->lock);
- bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent);
+ bch2_btree_node_write(c, n, SIX_LOCK_intent);
}
/*
bch2_keylist_add_in_order(&keylist, &new_nodes[i]->key);
/* Insert the newly coalesced nodes */
- bch2_btree_insert_node(parent, iter, &keylist, res, as);
+ bch2_btree_insert_node(as, parent, iter, &keylist, 0);
BUG_ON(!bch2_keylist_empty(&keylist));
- BUG_ON(iter->nodes[old_nodes[0]->level] != old_nodes[0]);
+ BUG_ON(iter->l[old_nodes[0]->level].b != old_nodes[0]);
- BUG_ON(!bch2_btree_iter_node_replace(iter, new_nodes[0]));
+ bch2_btree_iter_node_replace(iter, new_nodes[0]);
for (i = 0; i < nr_new_nodes; i++)
- bch2_btree_open_bucket_put(c, new_nodes[i]);
+ bch2_open_buckets_put(c, &new_nodes[i]->ob);
/* Free the old nodes and update our sliding window */
for (i = 0; i < nr_old_nodes; i++) {
- bch2_btree_node_free_inmem(iter, old_nodes[i]);
- six_unlock_intent(&old_nodes[i]->lock);
+ bch2_btree_node_free_inmem(c, old_nodes[i], iter);
/*
* the index update might have triggered a split, in which case
six_unlock_intent(&new_nodes[i]->lock);
}
}
-out:
+
+ bch2_btree_update_done(as);
bch2_keylist_free(&keylist, NULL);
- bch2_btree_reserve_put(c, res);
}
static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
{
struct btree_iter iter;
struct btree *b;
+ bool kthread = (current->flags & PF_KTHREAD) != 0;
unsigned i;
/* Sliding window of adjacent btree nodes */
*/
memset(merge, 0, sizeof(merge));
- __for_each_btree_node(&iter, c, btree_id, POS_MIN, 0, b, U8_MAX) {
+ __for_each_btree_node(&iter, c, btree_id, POS_MIN,
+ BTREE_MAX_DEPTH, 0,
+ BTREE_ITER_PREFETCH, b) {
memmove(merge + 1, merge,
sizeof(merge) - sizeof(merge[0]));
memmove(lock_seq + 1, lock_seq,
}
memset(merge + i, 0, (GC_MERGE_NODES - i) * sizeof(merge[0]));
- bch2_coalesce_nodes(merge, &iter);
+ bch2_coalesce_nodes(c, &iter, merge);
for (i = 1; i < GC_MERGE_NODES && merge[i]; i++) {
lock_seq[i] = merge[i]->lock.state.seq;
lock_seq[0] = merge[0]->lock.state.seq;
- if (test_bit(BCH_FS_GC_STOPPING, &c->flags)) {
+ if (kthread && kthread_should_stop()) {
bch2_btree_iter_unlock(&iter);
return -ESHUTDOWN;
}
* and the nodes in our sliding window might not have the same
* parent anymore - blow away the sliding window:
*/
- if (iter.nodes[iter.level + 1] &&
+ if (btree_iter_node(&iter, iter.level + 1) &&
!btree_node_intent_locked(&iter, iter.level + 1))
memset(merge + 1, 0,
(GC_MERGE_NODES - 1) * sizeof(merge[0]));
*/
void bch2_coalesce(struct bch_fs *c)
{
- u64 start_time;
enum btree_id id;
- if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
- return;
-
down_read(&c->gc_lock);
trace_gc_coalesce_start(c);
- start_time = local_clock();
for (id = 0; id < BTREE_ID_NR; id++) {
int ret = c->btree_roots[id].b
if (ret) {
if (ret != -ESHUTDOWN)
bch_err(c, "btree coalescing failed: %d", ret);
- set_bit(BCH_FS_GC_FAILURE, &c->flags);
return;
}
}
- bch2_time_stats_update(&c->btree_coalesce_time, start_time);
trace_gc_coalesce_end(c);
up_read(&c->gc_lock);
}
struct io_clock *clock = &c->io_clock[WRITE];
unsigned long last = atomic_long_read(&clock->now);
unsigned last_kick = atomic_read(&c->kick_gc);
+ int ret;
set_freezable();
while (1) {
- unsigned long next = last + c->capacity / 16;
-
- while (atomic_long_read(&clock->now) < next) {
+ while (1) {
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop()) {
return 0;
}
- if (atomic_read(&c->kick_gc) != last_kick) {
- __set_current_state(TASK_RUNNING);
+ if (atomic_read(&c->kick_gc) != last_kick)
break;
+
+ if (c->btree_gc_periodic) {
+ unsigned long next = last + c->capacity / 16;
+
+ if (atomic_long_read(&clock->now) >= next)
+ break;
+
+ bch2_io_clock_schedule_timeout(clock, next);
+ } else {
+ schedule();
}
- bch2_io_clock_schedule_timeout(clock, next);
try_to_freeze();
}
+ __set_current_state(TASK_RUNNING);
last = atomic_long_read(&clock->now);
last_kick = atomic_read(&c->kick_gc);
- bch2_gc(c);
- if (!btree_gc_coalesce_disabled(c))
- bch2_coalesce(c);
+ ret = bch2_gc(c, NULL, false);
+ if (ret)
+ bch_err(c, "btree gc failed: %i", ret);
debug_check_no_locks_held();
}
void bch2_gc_thread_stop(struct bch_fs *c)
{
- set_bit(BCH_FS_GC_STOPPING, &c->flags);
-
- if (c->gc_thread)
- kthread_stop(c->gc_thread);
+ struct task_struct *p;
+ p = c->gc_thread;
c->gc_thread = NULL;
- clear_bit(BCH_FS_GC_STOPPING, &c->flags);
+
+ if (p) {
+ kthread_stop(p);
+ put_task_struct(p);
+ }
}
int bch2_gc_thread_start(struct bch_fs *c)
BUG_ON(c->gc_thread);
- p = kthread_create(bch2_gc_thread, c, "bcache_gc");
+ p = kthread_create(bch2_gc_thread, c, "bch_gc");
if (IS_ERR(p))
return PTR_ERR(p);
+ get_task_struct(p);
c->gc_thread = p;
- wake_up_process(c->gc_thread);
- return 0;
-}
-
-/* Initial GC computes bucket marks during startup */
-
-static int bch2_initial_gc_btree(struct bch_fs *c, enum btree_id id)
-{
- struct btree_iter iter;
- struct btree *b;
- struct range_checks r;
- int ret = 0;
-
- btree_node_range_checks_init(&r, 0);
-
- if (!c->btree_roots[id].b)
- return 0;
-
- ret = bch2_btree_mark_key_initial(c, BKEY_TYPE_BTREE,
- bkey_i_to_s_c(&c->btree_roots[id].b->key));
- if (ret)
- return ret;
-
- /*
- * We have to hit every btree node before starting journal replay, in
- * order for the journal seq blacklist machinery to work:
- */
- for_each_btree_node(&iter, c, id, POS_MIN, 0, b) {
- btree_node_range_checks(c, b, &r);
-
- if (btree_node_has_ptrs(b)) {
- struct btree_node_iter node_iter;
- struct bkey unpacked;
- struct bkey_s_c k;
-
- for_each_btree_node_key_unpack(b, k, &node_iter,
- btree_node_is_extents(b),
- &unpacked) {
- ret = bch2_btree_mark_key_initial(c,
- btree_node_type(b), k);
- if (ret)
- goto err;
- }
- }
-
- bch2_btree_iter_cond_resched(&iter);
- }
-err:
- bch2_btree_iter_unlock(&iter);
- return ret;
-}
-
-int bch2_initial_gc(struct bch_fs *c, struct list_head *journal)
-{
- unsigned iter = 0;
- enum btree_id id;
- int ret;
-again:
- bch2_gc_start(c);
-
- for (id = 0; id < BTREE_ID_NR; id++) {
- ret = bch2_initial_gc_btree(c, id);
- if (ret)
- return ret;
- }
-
- if (journal) {
- ret = bch2_journal_mark(c, journal);
- if (ret)
- return ret;
- }
-
- bch2_mark_metadata(c);
-
- if (test_bit(BCH_FS_FIXED_GENS, &c->flags)) {
- if (iter++ > 2) {
- bch_info(c, "Unable to fix bucket gens, looping");
- return -EINVAL;
- }
-
- bch_info(c, "Fixed gens, restarting initial mark and sweep:");
- clear_bit(BCH_FS_FIXED_GENS, &c->flags);
- goto again;
- }
-
- /*
- * Skip past versions that might have possibly been used (as nonces),
- * but hadn't had their pointers written:
- */
- if (c->sb.encryption_type)
- atomic64_add(1 << 16, &c->key_version);
-
- gc_pos_set(c, gc_phase(GC_PHASE_DONE));
- set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
-
+ wake_up_process(p);
return 0;
}