]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_gc.c
Update bcachefs sources to e57b5958cf bcachefs: fix for building in userspace
[bcachefs-tools-debian] / libbcachefs / btree_gc.c
index 78132e40330e2b865ff2d286434619f2cbc07384..1198fe39c10038b380170f9054cc12feb013c228 100644 (file)
@@ -7,7 +7,7 @@
 #include "alloc.h"
 #include "bkey_methods.h"
 #include "btree_locking.h"
-#include "btree_update.h"
+#include "btree_update_interior.h"
 #include "btree_io.h"
 #include "btree_gc.h"
 #include "buckets.h"
@@ -24,6 +24,7 @@
 #include <linux/bitops.h>
 #include <linux/freezer.h>
 #include <linux/kthread.h>
+#include <linux/preempt.h>
 #include <linux/rcupdate.h>
 #include <trace/events/bcachefs.h>
 
@@ -111,24 +112,42 @@ u8 bch2_btree_key_recalc_oldest_gen(struct bch_fs *c, struct bkey_s_c k)
 /*
  * For runtime mark and sweep:
  */
-static u8 bch2_btree_mark_key(struct bch_fs *c, enum bkey_type type,
-                            struct bkey_s_c k)
+static u8 bch2_gc_mark_key(struct bch_fs *c, enum bkey_type type,
+                          struct bkey_s_c k, unsigned flags)
 {
+       struct gc_pos pos = { 0 };
+       struct bch_fs_usage *stats;
+       u8 ret = 0;
+
+       preempt_disable();
+       stats = this_cpu_ptr(c->usage_percpu);
        switch (type) {
        case BKEY_TYPE_BTREE:
-               bch2_gc_mark_key(c, k, c->sb.btree_node_size, true);
-               return 0;
+               bch2_mark_key(c, k, c->opts.btree_node_size, true, pos, stats,
+                             0, flags|
+                             BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                             BCH_BUCKET_MARK_GC_LOCK_HELD);
+               break;
        case BKEY_TYPE_EXTENTS:
-               bch2_gc_mark_key(c, k, k.k->size, false);
-               return bch2_btree_key_recalc_oldest_gen(c, k);
+               bch2_mark_key(c, k, k.k->size, false, pos, stats,
+                             0, flags|
+                             BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                             BCH_BUCKET_MARK_GC_LOCK_HELD);
+               ret = bch2_btree_key_recalc_oldest_gen(c, k);
+               break;
        default:
                BUG();
        }
+       preempt_enable();
+
+       return ret;
 }
 
 int bch2_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type,
                                struct bkey_s_c k)
 {
+       enum bch_data_type data_type = type == BKEY_TYPE_BTREE
+               ? BCH_DATA_BTREE : BCH_DATA_USER;
        int ret = 0;
 
        switch (k.k->type) {
@@ -137,29 +156,36 @@ int bch2_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type,
                struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
                const struct bch_extent_ptr *ptr;
 
+               if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
+                   (!c->opts.nofsck &&
+                    fsck_err_on(!bch2_sb_has_replicas(c, e, data_type), c,
+                                "superblock not marked as containing replicas"))) {
+                       ret = bch2_check_mark_super(c, e, data_type);
+                       if (ret)
+                               return ret;
+               }
+
                extent_for_each_ptr(e, ptr) {
                        struct bch_dev *ca = c->devs[ptr->dev];
                        struct bucket *g = PTR_BUCKET(ca, ptr);
-                       struct bucket_mark new;
-
-                       if (!g->mark.gen_valid) {
-                               bucket_cmpxchg(g, new, ({
-                                       new.gen = ptr->gen;
-                                       new.gen_valid = 1;
-                               }));
-                               ca->need_prio_write = true;
+
+                       if (mustfix_fsck_err_on(!g->mark.gen_valid, c,
+                                       "found ptr with missing gen in alloc btree,\n"
+                                       "type %s gen %u",
+                                       bch2_data_types[data_type],
+                                       ptr->gen)) {
+                               g->_mark.gen = ptr->gen;
+                               g->_mark.gen_valid = 1;
+                               set_bit(g - ca->buckets, ca->bucket_dirty);
                        }
 
-                       if (fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
+                       if (mustfix_fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
                                        "%s ptr gen in the future: %u > %u",
-                                       type == BKEY_TYPE_BTREE
-                                       ? "btree" : "data",
+                                       bch2_data_types[data_type],
                                        ptr->gen, g->mark.gen)) {
-                               bucket_cmpxchg(g, new, ({
-                                       new.gen = ptr->gen;
-                                       new.gen_valid = 1;
-                               }));
-                               ca->need_prio_write = true;
+                               g->_mark.gen = ptr->gen;
+                               g->_mark.gen_valid = 1;
+                               set_bit(g - ca->buckets, ca->bucket_dirty);
                                set_bit(BCH_FS_FIXED_GENS, &c->flags);
                        }
 
@@ -168,17 +194,19 @@ int bch2_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type,
        }
        }
 
+
        atomic64_set(&c->key_version,
                     max_t(u64, k.k->version.lo,
                           atomic64_read(&c->key_version)));
 
-       bch2_btree_mark_key(c, type, k);
+       bch2_gc_mark_key(c, type, k, BCH_BUCKET_MARK_NOATOMIC);
 fsck_err:
        return ret;
 }
 
 static unsigned btree_gc_mark_node(struct bch_fs *c, struct btree *b)
 {
+       enum bkey_type type = btree_node_type(b);
        struct btree_node_iter iter;
        struct bkey unpacked;
        struct bkey_s_c k;
@@ -189,8 +217,7 @@ static unsigned btree_gc_mark_node(struct bch_fs *c, struct btree *b)
                                               btree_node_is_extents(b),
                                               &unpacked) {
                        bch2_bkey_debugcheck(c, b, k);
-                       stale = max(stale, bch2_btree_mark_key(c,
-                                                       btree_node_type(b), k));
+                       stale = max(stale, bch2_gc_mark_key(c, type, k, 0));
                }
 
        return stale;
@@ -257,128 +284,81 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id)
        mutex_lock(&c->btree_root_lock);
 
        b = c->btree_roots[btree_id].b;
-       bch2_btree_mark_key(c, BKEY_TYPE_BTREE, bkey_i_to_s_c(&b->key));
+       bch2_gc_mark_key(c, BKEY_TYPE_BTREE, bkey_i_to_s_c(&b->key), 0);
        gc_pos_set(c, gc_pos_btree_root(b->btree_id));
 
        mutex_unlock(&c->btree_root_lock);
        return 0;
 }
 
-static void bch2_mark_allocator_buckets(struct bch_fs *c)
-{
-       struct bch_dev *ca;
-       struct open_bucket *ob;
-       size_t i, j, iter;
-       unsigned ci;
-
-       for_each_member_device(ca, c, ci) {
-               spin_lock(&ca->freelist_lock);
-
-               fifo_for_each_entry(i, &ca->free_inc, iter)
-                       bch2_mark_alloc_bucket(ca, &ca->buckets[i], true);
-
-               for (j = 0; j < RESERVE_NR; j++)
-                       fifo_for_each_entry(i, &ca->free[j], iter)
-                               bch2_mark_alloc_bucket(ca, &ca->buckets[i], true);
-
-               spin_unlock(&ca->freelist_lock);
-       }
-
-       for (ob = c->open_buckets;
-            ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
-            ob++) {
-               const struct bch_extent_ptr *ptr;
-
-               mutex_lock(&ob->lock);
-               open_bucket_for_each_ptr(ob, ptr) {
-                       ca = c->devs[ptr->dev];
-                       bch2_mark_alloc_bucket(ca, PTR_BUCKET(ca, ptr), true);
-               }
-               mutex_unlock(&ob->lock);
-       }
-}
-
-static void mark_metadata_sectors(struct bch_dev *ca, u64 start, u64 end,
-                                 enum bucket_data_type type)
+static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca,
+                                 u64 start, u64 end,
+                                 enum bucket_data_type type,
+                                 unsigned flags)
 {
-       u64 b = start >> ca->bucket_bits;
+       u64 b = sector_to_bucket(ca, start);
 
        do {
-               bch2_mark_metadata_bucket(ca, ca->buckets + b, type, true);
+               bch2_mark_metadata_bucket(c, ca, ca->buckets + b, type,
+                                         gc_phase(GC_PHASE_SB), flags);
                b++;
-       } while (b < end >> ca->bucket_bits);
+       } while (b < sector_to_bucket(ca, end));
 }
 
-static void bch2_dev_mark_superblocks(struct bch_dev *ca)
+void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
+                             unsigned flags)
 {
        struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
        unsigned i;
+       u64 b;
+
+       lockdep_assert_held(&c->sb_lock);
 
        for (i = 0; i < layout->nr_superblocks; i++) {
                if (layout->sb_offset[i] == BCH_SB_SECTOR)
-                       mark_metadata_sectors(ca, 0, BCH_SB_SECTOR,
-                                             BUCKET_SB);
+                       mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
+                                             BUCKET_SB, flags);
 
-               mark_metadata_sectors(ca,
+               mark_metadata_sectors(c, ca,
                                      layout->sb_offset[i],
                                      layout->sb_offset[i] +
                                      (1 << layout->sb_max_size_bits),
-                                     BUCKET_SB);
+                                     BUCKET_SB, flags);
        }
-}
-
-/*
- * Mark non btree metadata - prios, journal
- */
-void bch2_mark_dev_metadata(struct bch_fs *c, struct bch_dev *ca)
-{
-       unsigned i;
-       u64 b;
-
-       lockdep_assert_held(&c->sb_lock);
-
-       bch2_dev_mark_superblocks(ca);
 
        spin_lock(&c->journal.lock);
 
        for (i = 0; i < ca->journal.nr; i++) {
                b = ca->journal.buckets[i];
-               bch2_mark_metadata_bucket(ca, ca->buckets + b,
-                                        BUCKET_JOURNAL, true);
+               bch2_mark_metadata_bucket(c, ca, ca->buckets + b,
+                                         BUCKET_JOURNAL,
+                                         gc_phase(GC_PHASE_SB), flags);
        }
 
        spin_unlock(&c->journal.lock);
-
-       spin_lock(&ca->prio_buckets_lock);
-
-       for (i = 0; i < prio_buckets(ca) * 2; i++) {
-               b = ca->prio_buckets[i];
-               if (b)
-                       bch2_mark_metadata_bucket(ca, ca->buckets + b,
-                                                BUCKET_PRIOS, true);
-       }
-
-       spin_unlock(&ca->prio_buckets_lock);
 }
 
-static void bch2_mark_metadata(struct bch_fs *c)
+static void bch2_mark_superblocks(struct bch_fs *c)
 {
        struct bch_dev *ca;
        unsigned i;
 
        mutex_lock(&c->sb_lock);
-       gc_pos_set(c, gc_phase(GC_PHASE_SB_METADATA));
+       gc_pos_set(c, gc_phase(GC_PHASE_SB));
 
        for_each_online_member(ca, c, i)
-               bch2_mark_dev_metadata(c, ca);
+               bch2_mark_dev_superblock(c, ca,
+                                        BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                                        BCH_BUCKET_MARK_GC_LOCK_HELD);
        mutex_unlock(&c->sb_lock);
 }
 
 /* Also see bch2_pending_btree_node_free_insert_done() */
 static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
 {
+       struct gc_pos pos = { 0 };
        struct bch_fs_usage stats = { 0 };
-       struct btree_interior_update *as;
+       struct btree_update *as;
        struct pending_btree_node_free *d;
 
        mutex_lock(&c->btree_interior_update_lock);
@@ -386,9 +366,11 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
 
        for_each_pending_btree_node_free(c, as, d)
                if (d->index_update_done)
-                       __bch2_gc_mark_key(c, bkey_i_to_s_c(&d->key),
-                                         c->sb.btree_node_size, true,
-                                         &stats);
+                       bch2_mark_key(c, bkey_i_to_s_c(&d->key),
+                                     c->opts.btree_node_size, true, pos,
+                                     &stats, 0,
+                                     BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                                     BCH_BUCKET_MARK_GC_LOCK_HELD);
        /*
         * Don't apply stats - pending deletes aren't tracked in
         * bch_alloc_stats:
@@ -397,6 +379,51 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
        mutex_unlock(&c->btree_interior_update_lock);
 }
 
+static void bch2_mark_allocator_buckets(struct bch_fs *c)
+{
+       struct bch_dev *ca;
+       struct open_bucket *ob;
+       size_t i, j, iter;
+       unsigned ci;
+
+       spin_lock(&c->freelist_lock);
+       gc_pos_set(c, gc_pos_alloc(c, NULL));
+
+       for_each_member_device(ca, c, ci) {
+               fifo_for_each_entry(i, &ca->free_inc, iter)
+                       bch2_mark_alloc_bucket(c, ca, &ca->buckets[i], true,
+                                              gc_pos_alloc(c, NULL),
+                                              BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                                              BCH_BUCKET_MARK_GC_LOCK_HELD);
+
+
+
+               for (j = 0; j < RESERVE_NR; j++)
+                       fifo_for_each_entry(i, &ca->free[j], iter)
+                               bch2_mark_alloc_bucket(c, ca, &ca->buckets[i], true,
+                                                      gc_pos_alloc(c, NULL),
+                                                      BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                                                      BCH_BUCKET_MARK_GC_LOCK_HELD);
+       }
+
+       spin_unlock(&c->freelist_lock);
+
+       for (ob = c->open_buckets;
+            ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
+            ob++) {
+               spin_lock(&ob->lock);
+               if (ob->valid) {
+                       gc_pos_set(c, gc_pos_alloc(c, ob));
+                       ca = c->devs[ob->ptr.dev];
+                       bch2_mark_alloc_bucket(c, ca, PTR_BUCKET(ca, &ob->ptr), true,
+                                              gc_pos_alloc(c, ob),
+                                              BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                                              BCH_BUCKET_MARK_GC_LOCK_HELD);
+               }
+               spin_unlock(&ob->lock);
+       }
+}
+
 void bch2_gc_start(struct bch_fs *c)
 {
        struct bch_dev *ca;
@@ -429,7 +456,6 @@ void bch2_gc_start(struct bch_fs *c)
                        per_cpu_ptr(c->usage_percpu, cpu);
 
                memset(p->s, 0, sizeof(p->s));
-               p->persistent_reserved = 0;
        }
 
        lg_global_unlock(&c->usage_lock);
@@ -474,10 +500,6 @@ void bch2_gc(struct bch_fs *c)
         *    move around - if references move backwards in the ordering GC
         *    uses, GC could skip past them
         */
-
-       if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
-               return;
-
        trace_gc_start(c);
 
        /*
@@ -487,12 +509,11 @@ void bch2_gc(struct bch_fs *c)
        bch2_recalc_sectors_available(c);
 
        down_write(&c->gc_lock);
+       if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
+               goto out;
 
        bch2_gc_start(c);
 
-       /* Walk allocator's references: */
-       bch2_mark_allocator_buckets(c);
-
        /* Walk btree: */
        while (c->gc_pos.phase < (int) BTREE_ID_NR) {
                int ret = c->btree_roots[c->gc_pos.phase].b
@@ -502,15 +523,15 @@ void bch2_gc(struct bch_fs *c)
                if (ret) {
                        bch_err(c, "btree gc failed: %d", ret);
                        set_bit(BCH_FS_GC_FAILURE, &c->flags);
-                       up_write(&c->gc_lock);
-                       return;
+                       goto out;
                }
 
                gc_pos_set(c, gc_phase(c->gc_pos.phase + 1));
        }
 
-       bch2_mark_metadata(c);
+       bch2_mark_superblocks(c);
        bch2_mark_pending_btree_node_frees(c);
+       bch2_mark_allocator_buckets(c);
 
        for_each_member_device(ca, c, i)
                atomic_long_set(&ca->saturated_count, 0);
@@ -518,7 +539,7 @@ void bch2_gc(struct bch_fs *c)
        /* Indicates that gc is no longer in progress: */
        gc_pos_set(c, gc_phase(GC_PHASE_DONE));
        c->gc_count++;
-
+out:
        up_write(&c->gc_lock);
        trace_gc_end(c);
        bch2_time_stats_update(&c->btree_gc_time, start_time);
@@ -529,6 +550,12 @@ void bch2_gc(struct bch_fs *c)
         */
        for_each_member_device(ca, c, i)
                bch2_wake_allocator(ca);
+
+       /*
+        * At startup, allocations can happen directly instead of via the
+        * allocator thread - issue wakeup in case they blocked on gc_lock:
+        */
+       closure_wake_up(&c->freelist_wait);
 }
 
 /* Btree coalescing */
@@ -547,22 +574,20 @@ static void recalc_packed_keys(struct btree *b)
                btree_keys_account_key_add(&b->nr, 0, k);
 }
 
-static void bch2_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
-                               struct btree_iter *iter)
+static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
+                               struct btree *old_nodes[GC_MERGE_NODES])
 {
        struct btree *parent = iter->nodes[old_nodes[0]->level + 1];
-       struct bch_fs *c = iter->c;
        unsigned i, nr_old_nodes, nr_new_nodes, u64s = 0;
        unsigned blocks = btree_blocks(c) * 2 / 3;
        struct btree *new_nodes[GC_MERGE_NODES];
-       struct btree_interior_update *as;
-       struct btree_reserve *res;
+       struct btree_update *as;
        struct keylist keylist;
        struct bkey_format_state format_state;
        struct bkey_format new_format;
 
        memset(new_nodes, 0, sizeof(new_nodes));
-       bch2_keylist_init(&keylist, NULL, 0);
+       bch2_keylist_init(&keylist, NULL);
 
        /* Count keys that are not deleted */
        for (i = 0; i < GC_MERGE_NODES && old_nodes[i]; i++)
@@ -576,23 +601,6 @@ static void bch2_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
                             DIV_ROUND_UP(u64s, nr_old_nodes - 1)) > blocks)
                return;
 
-       res = bch2_btree_reserve_get(c, parent, nr_old_nodes,
-                                   BTREE_INSERT_NOFAIL|
-                                   BTREE_INSERT_USE_RESERVE,
-                                   NULL);
-       if (IS_ERR(res)) {
-               trace_btree_gc_coalesce_fail(c,
-                               BTREE_GC_COALESCE_FAIL_RESERVE_GET);
-               return;
-       }
-
-       if (bch2_keylist_realloc(&keylist, NULL, 0,
-                       (BKEY_U64s + BKEY_EXTENT_U64s_MAX) * nr_old_nodes)) {
-               trace_btree_gc_coalesce_fail(c,
-                               BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC);
-               goto out;
-       }
-
        /* Find a format that all keys in @old_nodes can pack into */
        bch2_bkey_format_init(&format_state);
 
@@ -606,23 +614,38 @@ static void bch2_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
                if (!bch2_btree_node_format_fits(c, old_nodes[i], &new_format)) {
                        trace_btree_gc_coalesce_fail(c,
                                        BTREE_GC_COALESCE_FAIL_FORMAT_FITS);
-                       goto out;
+                       return;
                }
 
-       trace_btree_gc_coalesce(c, parent, nr_old_nodes);
+       if (bch2_keylist_realloc(&keylist, NULL, 0,
+                       (BKEY_U64s + BKEY_EXTENT_U64s_MAX) * nr_old_nodes)) {
+               trace_btree_gc_coalesce_fail(c,
+                               BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC);
+               return;
+       }
+
+       as = bch2_btree_update_start(c, iter->btree_id,
+                       btree_update_reserve_required(c, parent) + nr_old_nodes,
+                       BTREE_INSERT_NOFAIL|
+                       BTREE_INSERT_USE_RESERVE,
+                       NULL);
+       if (IS_ERR(as)) {
+               trace_btree_gc_coalesce_fail(c,
+                               BTREE_GC_COALESCE_FAIL_RESERVE_GET);
+               bch2_keylist_free(&keylist, NULL);
+               return;
+       }
 
-       as = bch2_btree_interior_update_alloc(c);
+       trace_btree_gc_coalesce(c, old_nodes[0]);
 
        for (i = 0; i < nr_old_nodes; i++)
-               bch2_btree_interior_update_will_free_node(c, as, old_nodes[i]);
+               bch2_btree_interior_update_will_free_node(as, old_nodes[i]);
 
        /* Repack everything with @new_format and sort down to one bset */
-       for (i = 0; i < nr_old_nodes; i++) {
+       for (i = 0; i < nr_old_nodes; i++)
                new_nodes[i] =
-                       __bch2_btree_node_alloc_replacement(c, old_nodes[i],
-                                                           new_format, res);
-               list_add(&new_nodes[i]->reachable, &as->reachable_list);
-       }
+                       __bch2_btree_node_alloc_replacement(as, old_nodes[i],
+                                                           new_format);
 
        /*
         * Conceptually we concatenate the nodes together and slice them
@@ -659,7 +682,6 @@ static void bch2_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
 
                        set_btree_bset_end(n1, n1->set);
 
-                       list_del_init(&n2->reachable);
                        six_unlock_write(&n2->lock);
                        bch2_btree_node_free_never_inserted(c, n2);
                        six_unlock_intent(&n2->lock);
@@ -737,7 +759,7 @@ next:
                bch2_keylist_add_in_order(&keylist, &new_nodes[i]->key);
 
        /* Insert the newly coalesced nodes */
-       bch2_btree_insert_node(parent, iter, &keylist, res, as);
+       bch2_btree_insert_node(as, parent, iter, &keylist);
 
        BUG_ON(!bch2_keylist_empty(&keylist));
 
@@ -750,7 +772,7 @@ next:
 
        /* Free the old nodes and update our sliding window */
        for (i = 0; i < nr_old_nodes; i++) {
-               bch2_btree_node_free_inmem(iter, old_nodes[i]);
+               bch2_btree_node_free_inmem(c, old_nodes[i], iter);
                six_unlock_intent(&old_nodes[i]->lock);
 
                /*
@@ -767,9 +789,9 @@ next:
                                six_unlock_intent(&new_nodes[i]->lock);
                }
        }
-out:
+
+       bch2_btree_update_done(as);
        bch2_keylist_free(&keylist, NULL);
-       bch2_btree_reserve_put(c, res);
 }
 
 static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
@@ -792,7 +814,8 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
        memset(merge, 0, sizeof(merge));
 
        __for_each_btree_node(&iter, c, btree_id, POS_MIN,
-                             U8_MAX, 0, BTREE_ITER_PREFETCH, b) {
+                             BTREE_MAX_DEPTH, 0,
+                             BTREE_ITER_PREFETCH, b) {
                memmove(merge + 1, merge,
                        sizeof(merge) - sizeof(merge[0]));
                memmove(lock_seq + 1, lock_seq,
@@ -812,7 +835,7 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
                }
                memset(merge + i, 0, (GC_MERGE_NODES - i) * sizeof(merge[0]));
 
-               bch2_coalesce_nodes(merge, &iter);
+               bch2_coalesce_nodes(c, &iter, merge);
 
                for (i = 1; i < GC_MERGE_NODES && merge[i]; i++) {
                        lock_seq[i] = merge[i]->lock.state.seq;
@@ -988,8 +1011,7 @@ static int bch2_initial_gc_btree(struct bch_fs *c, enum btree_id id)
                bch2_btree_iter_cond_resched(&iter);
        }
 err:
-       bch2_btree_iter_unlock(&iter);
-       return ret;
+       return bch2_btree_iter_unlock(&iter) ?: ret;
 }
 
 int bch2_initial_gc(struct bch_fs *c, struct list_head *journal)
@@ -997,6 +1019,14 @@ int bch2_initial_gc(struct bch_fs *c, struct list_head *journal)
        unsigned iter = 0;
        enum btree_id id;
        int ret;
+
+       mutex_lock(&c->sb_lock);
+       if (!bch2_sb_get_replicas(c->disk_sb)) {
+               if (BCH_SB_INITIALIZED(c->disk_sb))
+                       bch_info(c, "building replicas info");
+               set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
+       }
+       mutex_unlock(&c->sb_lock);
 again:
        bch2_gc_start(c);
 
@@ -1006,13 +1036,9 @@ again:
                        return ret;
        }
 
-       if (journal) {
-               ret = bch2_journal_mark(c, journal);
-               if (ret)
-                       return ret;
-       }
-
-       bch2_mark_metadata(c);
+       ret = bch2_journal_mark(c, journal);
+       if (ret)
+       return ret;
 
        if (test_bit(BCH_FS_FIXED_GENS, &c->flags)) {
                if (iter++ > 2) {
@@ -1032,6 +1058,8 @@ again:
        if (c->sb.encryption_type)
                atomic64_add(1 << 16, &c->key_version);
 
+       bch2_mark_superblocks(c);
+
        gc_pos_set(c, gc_phase(GC_PHASE_DONE));
        set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);