]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_gc.c
Update bcachefs sources to edf5f38218 bcachefs: Refactor superblock code
[bcachefs-tools-debian] / libbcachefs / btree_gc.c
index e07a3f97c1131998348f09474d6c1509783fd4a1..ad51f29c9a38450f0aac4d917e63978e39b8aaee 100644 (file)
@@ -7,7 +7,7 @@
 #include "alloc.h"
 #include "bkey_methods.h"
 #include "btree_locking.h"
-#include "btree_update.h"
+#include "btree_update_interior.h"
 #include "btree_io.h"
 #include "btree_gc.h"
 #include "buckets.h"
 #include "journal.h"
 #include "keylist.h"
 #include "move.h"
+#include "replicas.h"
 #include "super-io.h"
 
 #include <linux/slab.h>
 #include <linux/bitops.h>
 #include <linux/freezer.h>
 #include <linux/kthread.h>
+#include <linux/preempt.h>
 #include <linux/rcupdate.h>
 #include <trace/events/bcachefs.h>
 
@@ -95,7 +97,7 @@ u8 bch2_btree_key_recalc_oldest_gen(struct bch_fs *c, struct bkey_s_c k)
                struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
 
                extent_for_each_ptr(e, ptr) {
-                       struct bch_dev *ca = c->devs[ptr->dev];
+                       struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
                        size_t b = PTR_BUCKET_NR(ca, ptr);
 
                        if (gen_after(ca->oldest_gens[b], ptr->gen))
@@ -111,26 +113,53 @@ u8 bch2_btree_key_recalc_oldest_gen(struct bch_fs *c, struct bkey_s_c k)
 /*
  * For runtime mark and sweep:
  */
-static u8 bch2_btree_mark_key(struct bch_fs *c, enum bkey_type type,
-                            struct bkey_s_c k)
+static u8 bch2_gc_mark_key(struct bch_fs *c, enum bkey_type type,
+                          struct bkey_s_c k, unsigned flags)
 {
+       struct gc_pos pos = { 0 };
+       struct bch_fs_usage *stats;
+       u8 ret = 0;
+
+       preempt_disable();
+       stats = this_cpu_ptr(c->usage_percpu);
        switch (type) {
        case BKEY_TYPE_BTREE:
-               bch2_gc_mark_key(c, k, c->sb.btree_node_size, true);
-               return 0;
+               bch2_mark_key(c, k, c->opts.btree_node_size, true, pos, stats,
+                             0, flags|
+                             BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                             BCH_BUCKET_MARK_GC_LOCK_HELD);
+               break;
        case BKEY_TYPE_EXTENTS:
-               bch2_gc_mark_key(c, k, k.k->size, false);
-               return bch2_btree_key_recalc_oldest_gen(c, k);
+               bch2_mark_key(c, k, k.k->size, false, pos, stats,
+                             0, flags|
+                             BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                             BCH_BUCKET_MARK_GC_LOCK_HELD);
+               ret = bch2_btree_key_recalc_oldest_gen(c, k);
+               break;
        default:
                BUG();
        }
+       preempt_enable();
+
+       return ret;
 }
 
 int bch2_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type,
                                struct bkey_s_c k)
 {
+       enum bch_data_type data_type = type == BKEY_TYPE_BTREE
+               ? BCH_DATA_BTREE : BCH_DATA_USER;
        int ret = 0;
 
+       if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
+           fsck_err_on(!bch2_bkey_replicas_marked(c, data_type, k), c,
+                       "superblock not marked as containing replicas (type %u)",
+                       data_type)) {
+               ret = bch2_mark_bkey_replicas(c, data_type, k);
+               if (ret)
+                       return ret;
+       }
+
        switch (k.k->type) {
        case BCH_EXTENT:
        case BCH_EXTENT_CACHED: {
@@ -138,18 +167,28 @@ int bch2_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type,
                const struct bch_extent_ptr *ptr;
 
                extent_for_each_ptr(e, ptr) {
-                       struct bch_dev *ca = c->devs[ptr->dev];
+                       struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+                       size_t b = PTR_BUCKET_NR(ca, ptr);
                        struct bucket *g = PTR_BUCKET(ca, ptr);
-                       struct bucket_mark new;
 
-                       if (fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
+                       if (mustfix_fsck_err_on(!g->mark.gen_valid, c,
+                                       "found ptr with missing gen in alloc btree,\n"
+                                       "type %s gen %u",
+                                       bch2_data_types[data_type],
+                                       ptr->gen)) {
+                               g->_mark.gen = ptr->gen;
+                               g->_mark.gen_valid = 1;
+                               set_bit(b, ca->buckets_dirty);
+                       }
+
+                       if (mustfix_fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
                                        "%s ptr gen in the future: %u > %u",
-                                       type == BKEY_TYPE_BTREE
-                                       ? "btree" : "data",
+                                       bch2_data_types[data_type],
                                        ptr->gen, g->mark.gen)) {
-                               bucket_cmpxchg(g, new, new.gen = ptr->gen);
+                               g->_mark.gen = ptr->gen;
+                               g->_mark.gen_valid = 1;
+                               set_bit(b, ca->buckets_dirty);
                                set_bit(BCH_FS_FIXED_GENS, &c->flags);
-                               ca->need_prio_write = true;
                        }
 
                }
@@ -161,38 +200,28 @@ int bch2_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type,
                     max_t(u64, k.k->version.lo,
                           atomic64_read(&c->key_version)));
 
-       bch2_btree_mark_key(c, type, k);
+       bch2_gc_mark_key(c, type, k, BCH_BUCKET_MARK_NOATOMIC);
 fsck_err:
        return ret;
 }
 
-static bool btree_gc_mark_node(struct bch_fs *c, struct btree *b)
+static unsigned btree_gc_mark_node(struct bch_fs *c, struct btree *b)
 {
-       if (btree_node_has_ptrs(b)) {
-               struct btree_node_iter iter;
-               struct bkey unpacked;
-               struct bkey_s_c k;
-               u8 stale = 0;
+       enum bkey_type type = btree_node_type(b);
+       struct btree_node_iter iter;
+       struct bkey unpacked;
+       struct bkey_s_c k;
+       u8 stale = 0;
 
+       if (btree_node_has_ptrs(b))
                for_each_btree_node_key_unpack(b, k, &iter,
                                               btree_node_is_extents(b),
                                               &unpacked) {
                        bch2_bkey_debugcheck(c, b, k);
-                       stale = max(stale, bch2_btree_mark_key(c,
-                                                       btree_node_type(b), k));
+                       stale = max(stale, bch2_gc_mark_key(c, type, k, 0));
                }
 
-               if (btree_gc_rewrite_disabled(c))
-                       return false;
-
-               if (stale > 10)
-                       return true;
-       }
-
-       if (btree_gc_always_rewrite(c))
-               return true;
-
-       return false;
+       return stale;
 }
 
 static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
@@ -212,10 +241,10 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id)
 {
        struct btree_iter iter;
        struct btree *b;
-       bool should_rewrite;
        struct range_checks r;
        unsigned depth = btree_id == BTREE_ID_EXTENTS ? 0 : 1;
-       int ret;
+       unsigned max_stale;
+       int ret = 0;
 
        /*
         * if expensive_debug_checks is on, run range_checks on all leaf nodes:
@@ -225,17 +254,27 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id)
 
        btree_node_range_checks_init(&r, depth);
 
-       for_each_btree_node(&iter, c, btree_id, POS_MIN, depth, b) {
+       __for_each_btree_node(&iter, c, btree_id, POS_MIN,
+                             0, depth, BTREE_ITER_PREFETCH, b) {
                btree_node_range_checks(c, b, &r);
 
                bch2_verify_btree_nr_keys(b);
 
-               should_rewrite = btree_gc_mark_node(c, b);
+               max_stale = btree_gc_mark_node(c, b);
 
                gc_pos_set(c, gc_pos_btree_node(b));
 
-               if (should_rewrite)
-                       bch2_btree_node_rewrite(&iter, b, NULL);
+               if (max_stale > 32)
+                       bch2_btree_node_rewrite(c, &iter,
+                                       b->data->keys.seq,
+                                       BTREE_INSERT_USE_RESERVE|
+                                       BTREE_INSERT_GC_LOCK_HELD);
+               else if (!btree_gc_rewrite_disabled(c) &&
+                        (btree_gc_always_rewrite(c) || max_stale > 16))
+                       bch2_btree_node_rewrite(c, &iter,
+                                       b->data->keys.seq,
+                                       BTREE_INSERT_NOWAIT|
+                                       BTREE_INSERT_GC_LOCK_HELD);
 
                bch2_btree_iter_cond_resched(&iter);
        }
@@ -246,128 +285,89 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id)
        mutex_lock(&c->btree_root_lock);
 
        b = c->btree_roots[btree_id].b;
-       bch2_btree_mark_key(c, BKEY_TYPE_BTREE, bkey_i_to_s_c(&b->key));
+       if (!btree_node_fake(b))
+               bch2_gc_mark_key(c, BKEY_TYPE_BTREE, bkey_i_to_s_c(&b->key), 0);
        gc_pos_set(c, gc_pos_btree_root(b->btree_id));
 
        mutex_unlock(&c->btree_root_lock);
        return 0;
 }
 
-static void bch2_mark_allocator_buckets(struct bch_fs *c)
-{
-       struct bch_dev *ca;
-       struct open_bucket *ob;
-       size_t i, j, iter;
-       unsigned ci;
-
-       for_each_member_device(ca, c, ci) {
-               spin_lock(&ca->freelist_lock);
-
-               fifo_for_each_entry(i, &ca->free_inc, iter)
-                       bch2_mark_alloc_bucket(ca, &ca->buckets[i], true);
-
-               for (j = 0; j < RESERVE_NR; j++)
-                       fifo_for_each_entry(i, &ca->free[j], iter)
-                               bch2_mark_alloc_bucket(ca, &ca->buckets[i], true);
-
-               spin_unlock(&ca->freelist_lock);
-       }
-
-       for (ob = c->open_buckets;
-            ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
-            ob++) {
-               const struct bch_extent_ptr *ptr;
-
-               mutex_lock(&ob->lock);
-               open_bucket_for_each_ptr(ob, ptr) {
-                       ca = c->devs[ptr->dev];
-                       bch2_mark_alloc_bucket(ca, PTR_BUCKET(ca, ptr), true);
-               }
-               mutex_unlock(&ob->lock);
-       }
-}
-
-static void mark_metadata_sectors(struct bch_dev *ca, u64 start, u64 end,
-                                 enum bucket_data_type type)
+static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca,
+                                 u64 start, u64 end,
+                                 enum bch_data_type type,
+                                 unsigned flags)
 {
-       u64 b = start >> ca->bucket_bits;
+       u64 b = sector_to_bucket(ca, start);
 
        do {
-               bch2_mark_metadata_bucket(ca, ca->buckets + b, type, true);
+               unsigned sectors =
+                       min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
+
+               bch2_mark_metadata_bucket(c, ca, b, type, sectors,
+                                         gc_phase(GC_PHASE_SB), flags);
                b++;
-       } while (b < end >> ca->bucket_bits);
+               start += sectors;
+       } while (start < end);
 }
 
-static void bch2_dev_mark_superblocks(struct bch_dev *ca)
+void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
+                             unsigned flags)
 {
        struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
        unsigned i;
-
-       for (i = 0; i < layout->nr_superblocks; i++) {
-               if (layout->sb_offset[i] == BCH_SB_SECTOR)
-                       mark_metadata_sectors(ca, 0, BCH_SB_SECTOR,
-                                             BUCKET_SB);
-
-               mark_metadata_sectors(ca,
-                                     layout->sb_offset[i],
-                                     layout->sb_offset[i] +
-                                     (1 << layout->sb_max_size_bits),
-                                     BUCKET_SB);
-       }
-}
-
-/*
- * Mark non btree metadata - prios, journal
- */
-void bch2_mark_dev_metadata(struct bch_fs *c, struct bch_dev *ca)
-{
-       unsigned i;
        u64 b;
 
-       lockdep_assert_held(&c->sb_lock);
+       if (c)
+               lockdep_assert_held(&c->sb_lock);
 
-       bch2_dev_mark_superblocks(ca);
+       for (i = 0; i < layout->nr_superblocks; i++) {
+               u64 offset = le64_to_cpu(layout->sb_offset[i]);
 
-       spin_lock(&c->journal.lock);
+               if (offset == BCH_SB_SECTOR)
+                       mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
+                                             BCH_DATA_SB, flags);
 
-       for (i = 0; i < ca->journal.nr; i++) {
-               b = ca->journal.buckets[i];
-               bch2_mark_metadata_bucket(ca, ca->buckets + b,
-                                        BUCKET_JOURNAL, true);
+               mark_metadata_sectors(c, ca, offset,
+                                     offset + (1 << layout->sb_max_size_bits),
+                                     BCH_DATA_SB, flags);
        }
 
-       spin_unlock(&c->journal.lock);
-
-       spin_lock(&ca->prio_buckets_lock);
+       if (c)
+               spin_lock(&c->journal.lock);
 
-       for (i = 0; i < prio_buckets(ca) * 2; i++) {
-               b = ca->prio_buckets[i];
-               if (b)
-                       bch2_mark_metadata_bucket(ca, ca->buckets + b,
-                                                BUCKET_PRIOS, true);
+       for (i = 0; i < ca->journal.nr; i++) {
+               b = ca->journal.buckets[i];
+               bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_JOURNAL,
+                                         ca->mi.bucket_size,
+                                         gc_phase(GC_PHASE_SB), flags);
        }
 
-       spin_unlock(&ca->prio_buckets_lock);
+       if (c)
+               spin_unlock(&c->journal.lock);
 }
 
-static void bch2_mark_metadata(struct bch_fs *c)
+static void bch2_mark_superblocks(struct bch_fs *c)
 {
        struct bch_dev *ca;
        unsigned i;
 
        mutex_lock(&c->sb_lock);
-       gc_pos_set(c, gc_phase(GC_PHASE_SB_METADATA));
+       gc_pos_set(c, gc_phase(GC_PHASE_SB));
 
        for_each_online_member(ca, c, i)
-               bch2_mark_dev_metadata(c, ca);
+               bch2_mark_dev_superblock(c, ca,
+                                        BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                                        BCH_BUCKET_MARK_GC_LOCK_HELD);
        mutex_unlock(&c->sb_lock);
 }
 
 /* Also see bch2_pending_btree_node_free_insert_done() */
 static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
 {
+       struct gc_pos pos = { 0 };
        struct bch_fs_usage stats = { 0 };
-       struct btree_interior_update *as;
+       struct btree_update *as;
        struct pending_btree_node_free *d;
 
        mutex_lock(&c->btree_interior_update_lock);
@@ -375,9 +375,11 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
 
        for_each_pending_btree_node_free(c, as, d)
                if (d->index_update_done)
-                       __bch2_gc_mark_key(c, bkey_i_to_s_c(&d->key),
-                                         c->sb.btree_node_size, true,
-                                         &stats);
+                       bch2_mark_key(c, bkey_i_to_s_c(&d->key),
+                                     c->opts.btree_node_size, true, pos,
+                                     &stats, 0,
+                                     BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                                     BCH_BUCKET_MARK_GC_LOCK_HELD);
        /*
         * Don't apply stats - pending deletes aren't tracked in
         * bch_alloc_stats:
@@ -386,12 +388,58 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
        mutex_unlock(&c->btree_interior_update_lock);
 }
 
-void bch2_gc_start(struct bch_fs *c)
+static void bch2_mark_allocator_buckets(struct bch_fs *c)
+{
+       struct bch_dev *ca;
+       struct open_bucket *ob;
+       size_t i, j, iter;
+       unsigned ci;
+
+       spin_lock(&c->freelist_lock);
+       gc_pos_set(c, gc_pos_alloc(c, NULL));
+
+       for_each_member_device(ca, c, ci) {
+               fifo_for_each_entry(i, &ca->free_inc, iter)
+                       bch2_mark_alloc_bucket(c, ca, i, true,
+                                              gc_pos_alloc(c, NULL),
+                                              BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                                              BCH_BUCKET_MARK_GC_LOCK_HELD);
+
+
+
+               for (j = 0; j < RESERVE_NR; j++)
+                       fifo_for_each_entry(i, &ca->free[j], iter)
+                               bch2_mark_alloc_bucket(c, ca, i, true,
+                                                      gc_pos_alloc(c, NULL),
+                                                      BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                                                      BCH_BUCKET_MARK_GC_LOCK_HELD);
+       }
+
+       spin_unlock(&c->freelist_lock);
+
+       for (ob = c->open_buckets;
+            ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
+            ob++) {
+               spin_lock(&ob->lock);
+               if (ob->valid) {
+                       gc_pos_set(c, gc_pos_alloc(c, ob));
+                       ca = bch_dev_bkey_exists(c, ob->ptr.dev);
+                       bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), true,
+                                              gc_pos_alloc(c, ob),
+                                              BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                                              BCH_BUCKET_MARK_GC_LOCK_HELD);
+               }
+               spin_unlock(&ob->lock);
+       }
+}
+
+static void bch2_gc_start(struct bch_fs *c)
 {
        struct bch_dev *ca;
-       struct bucket *g;
+       struct bucket_array *buckets;
        struct bucket_mark new;
        unsigned i;
+       size_t b;
        int cpu;
 
        lg_global_lock(&c->usage_lock);
@@ -418,22 +466,26 @@ void bch2_gc_start(struct bch_fs *c)
                        per_cpu_ptr(c->usage_percpu, cpu);
 
                memset(p->s, 0, sizeof(p->s));
-               p->persistent_reserved = 0;
        }
 
        lg_global_unlock(&c->usage_lock);
 
        /* Clear bucket marks: */
-       for_each_member_device(ca, c, i)
-               for_each_bucket(g, ca) {
-                       bucket_cmpxchg(g, new, ({
+       for_each_member_device(ca, c, i) {
+               down_read(&ca->bucket_lock);
+               buckets = bucket_array(ca);
+
+               for (b = buckets->first_bucket; b < buckets->nbuckets; b++) {
+                       bucket_cmpxchg(buckets->b + b, new, ({
                                new.owned_by_allocator  = 0;
                                new.data_type           = 0;
                                new.cached_sectors      = 0;
                                new.dirty_sectors       = 0;
                        }));
-                       ca->oldest_gens[g - ca->buckets] = new.gen;
+                       ca->oldest_gens[b] = new.gen;
                }
+               up_read(&ca->bucket_lock);
+       }
 }
 
 /**
@@ -463,10 +515,6 @@ void bch2_gc(struct bch_fs *c)
         *    move around - if references move backwards in the ordering GC
         *    uses, GC could skip past them
         */
-
-       if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
-               return;
-
        trace_gc_start(c);
 
        /*
@@ -476,12 +524,11 @@ void bch2_gc(struct bch_fs *c)
        bch2_recalc_sectors_available(c);
 
        down_write(&c->gc_lock);
+       if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
+               goto out;
 
        bch2_gc_start(c);
 
-       /* Walk allocator's references: */
-       bch2_mark_allocator_buckets(c);
-
        /* Walk btree: */
        while (c->gc_pos.phase < (int) BTREE_ID_NR) {
                int ret = c->btree_roots[c->gc_pos.phase].b
@@ -491,22 +538,23 @@ void bch2_gc(struct bch_fs *c)
                if (ret) {
                        bch_err(c, "btree gc failed: %d", ret);
                        set_bit(BCH_FS_GC_FAILURE, &c->flags);
-                       up_write(&c->gc_lock);
-                       return;
+                       goto out;
                }
 
                gc_pos_set(c, gc_phase(c->gc_pos.phase + 1));
        }
 
-       bch2_mark_metadata(c);
+       bch2_mark_superblocks(c);
        bch2_mark_pending_btree_node_frees(c);
+       bch2_mark_allocator_buckets(c);
 
        for_each_member_device(ca, c, i)
                atomic_long_set(&ca->saturated_count, 0);
 
        /* Indicates that gc is no longer in progress: */
        gc_pos_set(c, gc_phase(GC_PHASE_DONE));
-
+       c->gc_count++;
+out:
        up_write(&c->gc_lock);
        trace_gc_end(c);
        bch2_time_stats_update(&c->btree_gc_time, start_time);
@@ -517,6 +565,12 @@ void bch2_gc(struct bch_fs *c)
         */
        for_each_member_device(ca, c, i)
                bch2_wake_allocator(ca);
+
+       /*
+        * At startup, allocations can happen directly instead of via the
+        * allocator thread - issue wakeup in case they blocked on gc_lock:
+        */
+       closure_wake_up(&c->freelist_wait);
 }
 
 /* Btree coalescing */
@@ -535,22 +589,20 @@ static void recalc_packed_keys(struct btree *b)
                btree_keys_account_key_add(&b->nr, 0, k);
 }
 
-static void bch2_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
-                               struct btree_iter *iter)
+static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
+                               struct btree *old_nodes[GC_MERGE_NODES])
 {
-       struct btree *parent = iter->nodes[old_nodes[0]->level + 1];
-       struct bch_fs *c = iter->c;
+       struct btree *parent = btree_node_parent(iter, old_nodes[0]);
        unsigned i, nr_old_nodes, nr_new_nodes, u64s = 0;
        unsigned blocks = btree_blocks(c) * 2 / 3;
        struct btree *new_nodes[GC_MERGE_NODES];
-       struct btree_interior_update *as;
-       struct btree_reserve *res;
+       struct btree_update *as;
        struct keylist keylist;
        struct bkey_format_state format_state;
        struct bkey_format new_format;
 
        memset(new_nodes, 0, sizeof(new_nodes));
-       bch2_keylist_init(&keylist, NULL, 0);
+       bch2_keylist_init(&keylist, NULL);
 
        /* Count keys that are not deleted */
        for (i = 0; i < GC_MERGE_NODES && old_nodes[i]; i++)
@@ -564,23 +616,6 @@ static void bch2_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
                             DIV_ROUND_UP(u64s, nr_old_nodes - 1)) > blocks)
                return;
 
-       res = bch2_btree_reserve_get(c, parent, nr_old_nodes,
-                                   BTREE_INSERT_NOFAIL|
-                                   BTREE_INSERT_USE_RESERVE,
-                                   NULL);
-       if (IS_ERR(res)) {
-               trace_btree_gc_coalesce_fail(c,
-                               BTREE_GC_COALESCE_FAIL_RESERVE_GET);
-               return;
-       }
-
-       if (bch2_keylist_realloc(&keylist, NULL, 0,
-                       (BKEY_U64s + BKEY_EXTENT_U64s_MAX) * nr_old_nodes)) {
-               trace_btree_gc_coalesce_fail(c,
-                               BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC);
-               goto out;
-       }
-
        /* Find a format that all keys in @old_nodes can pack into */
        bch2_bkey_format_init(&format_state);
 
@@ -594,21 +629,38 @@ static void bch2_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
                if (!bch2_btree_node_format_fits(c, old_nodes[i], &new_format)) {
                        trace_btree_gc_coalesce_fail(c,
                                        BTREE_GC_COALESCE_FAIL_FORMAT_FITS);
-                       goto out;
+                       return;
                }
 
-       trace_btree_gc_coalesce(c, parent, nr_old_nodes);
+       if (bch2_keylist_realloc(&keylist, NULL, 0,
+                       (BKEY_U64s + BKEY_EXTENT_U64s_MAX) * nr_old_nodes)) {
+               trace_btree_gc_coalesce_fail(c,
+                               BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC);
+               return;
+       }
+
+       as = bch2_btree_update_start(c, iter->btree_id,
+                       btree_update_reserve_required(c, parent) + nr_old_nodes,
+                       BTREE_INSERT_NOFAIL|
+                       BTREE_INSERT_USE_RESERVE,
+                       NULL);
+       if (IS_ERR(as)) {
+               trace_btree_gc_coalesce_fail(c,
+                               BTREE_GC_COALESCE_FAIL_RESERVE_GET);
+               bch2_keylist_free(&keylist, NULL);
+               return;
+       }
 
-       as = bch2_btree_interior_update_alloc(c);
+       trace_btree_gc_coalesce(c, old_nodes[0]);
 
        for (i = 0; i < nr_old_nodes; i++)
-               bch2_btree_interior_update_will_free_node(c, as, old_nodes[i]);
+               bch2_btree_interior_update_will_free_node(as, old_nodes[i]);
 
        /* Repack everything with @new_format and sort down to one bset */
        for (i = 0; i < nr_old_nodes; i++)
                new_nodes[i] =
-                       __bch2_btree_node_alloc_replacement(c, old_nodes[i],
-                                                           new_format, res);
+                       __bch2_btree_node_alloc_replacement(as, old_nodes[i],
+                                                           new_format);
 
        /*
         * Conceptually we concatenate the nodes together and slice them
@@ -685,7 +737,7 @@ static void bch2_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
                bch2_btree_build_aux_trees(n);
                six_unlock_write(&n->lock);
 
-               bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent, -1);
+               bch2_btree_node_write(c, n, SIX_LOCK_intent);
        }
 
        /*
@@ -722,11 +774,11 @@ next:
                bch2_keylist_add_in_order(&keylist, &new_nodes[i]->key);
 
        /* Insert the newly coalesced nodes */
-       bch2_btree_insert_node(parent, iter, &keylist, res, as);
+       bch2_btree_insert_node(as, parent, iter, &keylist);
 
        BUG_ON(!bch2_keylist_empty(&keylist));
 
-       BUG_ON(iter->nodes[old_nodes[0]->level] != old_nodes[0]);
+       BUG_ON(iter->l[old_nodes[0]->level].b != old_nodes[0]);
 
        BUG_ON(!bch2_btree_iter_node_replace(iter, new_nodes[0]));
 
@@ -735,7 +787,7 @@ next:
 
        /* Free the old nodes and update our sliding window */
        for (i = 0; i < nr_old_nodes; i++) {
-               bch2_btree_node_free_inmem(iter, old_nodes[i]);
+               bch2_btree_node_free_inmem(c, old_nodes[i], iter);
                six_unlock_intent(&old_nodes[i]->lock);
 
                /*
@@ -752,9 +804,9 @@ next:
                                six_unlock_intent(&new_nodes[i]->lock);
                }
        }
-out:
+
+       bch2_btree_update_done(as);
        bch2_keylist_free(&keylist, NULL);
-       bch2_btree_reserve_put(c, res);
 }
 
 static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
@@ -776,7 +828,9 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
         */
        memset(merge, 0, sizeof(merge));
 
-       __for_each_btree_node(&iter, c, btree_id, POS_MIN, 0, b, U8_MAX) {
+       __for_each_btree_node(&iter, c, btree_id, POS_MIN,
+                             BTREE_MAX_DEPTH, 0,
+                             BTREE_ITER_PREFETCH, b) {
                memmove(merge + 1, merge,
                        sizeof(merge) - sizeof(merge[0]));
                memmove(lock_seq + 1, lock_seq,
@@ -796,7 +850,7 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
                }
                memset(merge + i, 0, (GC_MERGE_NODES - i) * sizeof(merge[0]));
 
-               bch2_coalesce_nodes(merge, &iter);
+               bch2_coalesce_nodes(c, &iter, merge);
 
                for (i = 1; i < GC_MERGE_NODES && merge[i]; i++) {
                        lock_seq[i] = merge[i]->lock.state.seq;
@@ -817,7 +871,7 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
                 * and the nodes in our sliding window might not have the same
                 * parent anymore - blow away the sliding window:
                 */
-               if (iter.nodes[iter.level + 1] &&
+               if (btree_iter_node(&iter, iter.level + 1) &&
                    !btree_node_intent_locked(&iter, iter.level + 1))
                        memset(merge + 1, 0,
                               (GC_MERGE_NODES - 1) * sizeof(merge[0]));
@@ -830,7 +884,6 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
  */
 void bch2_coalesce(struct bch_fs *c)
 {
-       u64 start_time;
        enum btree_id id;
 
        if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
@@ -838,7 +891,6 @@ void bch2_coalesce(struct bch_fs *c)
 
        down_read(&c->gc_lock);
        trace_gc_coalesce_start(c);
-       start_time = local_clock();
 
        for (id = 0; id < BTREE_ID_NR; id++) {
                int ret = c->btree_roots[id].b
@@ -853,7 +905,6 @@ void bch2_coalesce(struct bch_fs *c)
                }
        }
 
-       bch2_time_stats_update(&c->btree_coalesce_time, start_time);
        trace_gc_coalesce_end(c);
        up_read(&c->gc_lock);
 }
@@ -868,9 +919,7 @@ static int bch2_gc_thread(void *arg)
        set_freezable();
 
        while (1) {
-               unsigned long next = last + c->capacity / 16;
-
-               while (atomic_long_read(&clock->now) < next) {
+               while (1) {
                        set_current_state(TASK_INTERRUPTIBLE);
 
                        if (kthread_should_stop()) {
@@ -878,21 +927,28 @@ static int bch2_gc_thread(void *arg)
                                return 0;
                        }
 
-                       if (atomic_read(&c->kick_gc) != last_kick) {
-                               __set_current_state(TASK_RUNNING);
+                       if (atomic_read(&c->kick_gc) != last_kick)
                                break;
+
+                       if (c->btree_gc_periodic) {
+                               unsigned long next = last + c->capacity / 16;
+
+                               if (atomic_long_read(&clock->now) >= next)
+                                       break;
+
+                               bch2_io_clock_schedule_timeout(clock, next);
+                       } else {
+                               schedule();
                        }
 
-                       bch2_io_clock_schedule_timeout(clock, next);
                        try_to_freeze();
                }
+               __set_current_state(TASK_RUNNING);
 
                last = atomic_long_read(&clock->now);
                last_kick = atomic_read(&c->kick_gc);
 
                bch2_gc(c);
-               if (!btree_gc_coalesce_disabled(c))
-                       bch2_coalesce(c);
 
                debug_check_no_locks_held();
        }
@@ -940,8 +996,10 @@ static int bch2_initial_gc_btree(struct bch_fs *c, enum btree_id id)
        if (!c->btree_roots[id].b)
                return 0;
 
-       ret = bch2_btree_mark_key_initial(c, BKEY_TYPE_BTREE,
-                          bkey_i_to_s_c(&c->btree_roots[id].b->key));
+       b = c->btree_roots[id].b;
+       if (!btree_node_fake(b))
+               ret = bch2_btree_mark_key_initial(c, BKEY_TYPE_BTREE,
+                                                 bkey_i_to_s_c(&b->key));
        if (ret)
                return ret;
 
@@ -949,7 +1007,7 @@ static int bch2_initial_gc_btree(struct bch_fs *c, enum btree_id id)
         * We have to hit every btree node before starting journal replay, in
         * order for the journal seq blacklist machinery to work:
         */
-       for_each_btree_node(&iter, c, id, POS_MIN, 0, b) {
+       for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
                btree_node_range_checks(c, b, &r);
 
                if (btree_node_has_ptrs(b)) {
@@ -970,15 +1028,22 @@ static int bch2_initial_gc_btree(struct bch_fs *c, enum btree_id id)
                bch2_btree_iter_cond_resched(&iter);
        }
 err:
-       bch2_btree_iter_unlock(&iter);
-       return ret;
+       return bch2_btree_iter_unlock(&iter) ?: ret;
 }
 
-int bch2_initial_gc(struct bch_fs *c, struct list_head *journal)
+static int __bch2_initial_gc(struct bch_fs *c, struct list_head *journal)
 {
        unsigned iter = 0;
        enum btree_id id;
        int ret;
+
+       mutex_lock(&c->sb_lock);
+       if (!bch2_sb_get_replicas(c->disk_sb.sb)) {
+               if (BCH_SB_INITIALIZED(c->disk_sb.sb))
+                       bch_info(c, "building replicas info");
+               set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
+       }
+       mutex_unlock(&c->sb_lock);
 again:
        bch2_gc_start(c);
 
@@ -988,13 +1053,9 @@ again:
                        return ret;
        }
 
-       if (journal) {
-               ret = bch2_journal_mark(c, journal);
-               if (ret)
-                       return ret;
-       }
-
-       bch2_mark_metadata(c);
+       ret = bch2_journal_mark(c, journal);
+       if (ret)
+               return ret;
 
        if (test_bit(BCH_FS_FIXED_GENS, &c->flags)) {
                if (iter++ > 2) {
@@ -1014,8 +1075,21 @@ again:
        if (c->sb.encryption_type)
                atomic64_add(1 << 16, &c->key_version);
 
+       bch2_mark_superblocks(c);
+
        gc_pos_set(c, gc_phase(GC_PHASE_DONE));
        set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
 
        return 0;
 }
+
+int bch2_initial_gc(struct bch_fs *c, struct list_head *journal)
+{
+       int ret;
+
+       down_write(&c->gc_lock);
+       ret = __bch2_initial_gc(c, journal);
+       up_write(&c->gc_lock);
+
+       return ret;
+}