]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_gc.c
Update bcachefs sources to edf5f38218 bcachefs: Refactor superblock code
[bcachefs-tools-debian] / libbcachefs / btree_gc.c
index b09019659ddd0c7583b7bd91c7d3fd73e1063889..ad51f29c9a38450f0aac4d917e63978e39b8aaee 100644 (file)
 #include "journal.h"
 #include "keylist.h"
 #include "move.h"
+#include "replicas.h"
 #include "super-io.h"
 
 #include <linux/slab.h>
 #include <linux/bitops.h>
 #include <linux/freezer.h>
 #include <linux/kthread.h>
+#include <linux/preempt.h>
 #include <linux/rcupdate.h>
 #include <trace/events/bcachefs.h>
 
@@ -95,7 +97,7 @@ u8 bch2_btree_key_recalc_oldest_gen(struct bch_fs *c, struct bkey_s_c k)
                struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
 
                extent_for_each_ptr(e, ptr) {
-                       struct bch_dev *ca = c->devs[ptr->dev];
+                       struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
                        size_t b = PTR_BUCKET_NR(ca, ptr);
 
                        if (gen_after(ca->oldest_gens[b], ptr->gen))
@@ -111,19 +113,35 @@ u8 bch2_btree_key_recalc_oldest_gen(struct bch_fs *c, struct bkey_s_c k)
 /*
  * For runtime mark and sweep:
  */
-static u8 bch2_btree_mark_key(struct bch_fs *c, enum bkey_type type,
-                             struct bkey_s_c k, unsigned flags)
+static u8 bch2_gc_mark_key(struct bch_fs *c, enum bkey_type type,
+                          struct bkey_s_c k, unsigned flags)
 {
+       struct gc_pos pos = { 0 };
+       struct bch_fs_usage *stats;
+       u8 ret = 0;
+
+       preempt_disable();
+       stats = this_cpu_ptr(c->usage_percpu);
        switch (type) {
        case BKEY_TYPE_BTREE:
-               bch2_gc_mark_key(c, k, c->opts.btree_node_size, true, flags);
-               return 0;
+               bch2_mark_key(c, k, c->opts.btree_node_size, true, pos, stats,
+                             0, flags|
+                             BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                             BCH_BUCKET_MARK_GC_LOCK_HELD);
+               break;
        case BKEY_TYPE_EXTENTS:
-               bch2_gc_mark_key(c, k, k.k->size, false, flags);
-               return bch2_btree_key_recalc_oldest_gen(c, k);
+               bch2_mark_key(c, k, k.k->size, false, pos, stats,
+                             0, flags|
+                             BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                             BCH_BUCKET_MARK_GC_LOCK_HELD);
+               ret = bch2_btree_key_recalc_oldest_gen(c, k);
+               break;
        default:
                BUG();
        }
+       preempt_enable();
+
+       return ret;
 }
 
 int bch2_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type,
@@ -133,23 +151,24 @@ int bch2_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type,
                ? BCH_DATA_BTREE : BCH_DATA_USER;
        int ret = 0;
 
+       if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
+           fsck_err_on(!bch2_bkey_replicas_marked(c, data_type, k), c,
+                       "superblock not marked as containing replicas (type %u)",
+                       data_type)) {
+               ret = bch2_mark_bkey_replicas(c, data_type, k);
+               if (ret)
+                       return ret;
+       }
+
        switch (k.k->type) {
        case BCH_EXTENT:
        case BCH_EXTENT_CACHED: {
                struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
                const struct bch_extent_ptr *ptr;
 
-               if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
-                   (!c->opts.nofsck &&
-                    fsck_err_on(!bch2_sb_has_replicas(c, e, data_type), c,
-                                "superblock not marked as containing replicas"))) {
-                       ret = bch2_check_mark_super(c, e, data_type);
-                       if (ret)
-                               return ret;
-               }
-
                extent_for_each_ptr(e, ptr) {
-                       struct bch_dev *ca = c->devs[ptr->dev];
+                       struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+                       size_t b = PTR_BUCKET_NR(ca, ptr);
                        struct bucket *g = PTR_BUCKET(ca, ptr);
 
                        if (mustfix_fsck_err_on(!g->mark.gen_valid, c,
@@ -159,7 +178,7 @@ int bch2_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type,
                                        ptr->gen)) {
                                g->_mark.gen = ptr->gen;
                                g->_mark.gen_valid = 1;
-                               set_bit(g - ca->buckets, ca->bucket_dirty);
+                               set_bit(b, ca->buckets_dirty);
                        }
 
                        if (mustfix_fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
@@ -168,7 +187,7 @@ int bch2_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type,
                                        ptr->gen, g->mark.gen)) {
                                g->_mark.gen = ptr->gen;
                                g->_mark.gen_valid = 1;
-                               set_bit(g - ca->buckets, ca->bucket_dirty);
+                               set_bit(b, ca->buckets_dirty);
                                set_bit(BCH_FS_FIXED_GENS, &c->flags);
                        }
 
@@ -177,12 +196,11 @@ int bch2_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type,
        }
        }
 
-
        atomic64_set(&c->key_version,
                     max_t(u64, k.k->version.lo,
                           atomic64_read(&c->key_version)));
 
-       bch2_btree_mark_key(c, type, k, BCH_BUCKET_MARK_NOATOMIC);
+       bch2_gc_mark_key(c, type, k, BCH_BUCKET_MARK_NOATOMIC);
 fsck_err:
        return ret;
 }
@@ -200,7 +218,7 @@ static unsigned btree_gc_mark_node(struct bch_fs *c, struct btree *b)
                                               btree_node_is_extents(b),
                                               &unpacked) {
                        bch2_bkey_debugcheck(c, b, k);
-                       stale = max(stale, bch2_btree_mark_key(c, type, k, 0));
+                       stale = max(stale, bch2_gc_mark_key(c, type, k, 0));
                }
 
        return stale;
@@ -267,123 +285,87 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id)
        mutex_lock(&c->btree_root_lock);
 
        b = c->btree_roots[btree_id].b;
-       bch2_btree_mark_key(c, BKEY_TYPE_BTREE, bkey_i_to_s_c(&b->key), 0);
+       if (!btree_node_fake(b))
+               bch2_gc_mark_key(c, BKEY_TYPE_BTREE, bkey_i_to_s_c(&b->key), 0);
        gc_pos_set(c, gc_pos_btree_root(b->btree_id));
 
        mutex_unlock(&c->btree_root_lock);
        return 0;
 }
 
-static void bch2_mark_allocator_buckets(struct bch_fs *c)
-{
-       struct bch_dev *ca;
-       struct open_bucket *ob;
-       const struct open_bucket_ptr *ptr;
-       size_t i, j, iter;
-       unsigned ci;
-
-       down_write(&c->alloc_gc_lock);
-
-       for_each_member_device(ca, c, ci) {
-               spin_lock(&ca->freelist_lock);
-
-               fifo_for_each_entry(i, &ca->free_inc, iter)
-                       bch2_mark_alloc_bucket(ca, &ca->buckets[i], true);
-
-               for (j = 0; j < RESERVE_NR; j++)
-                       fifo_for_each_entry(i, &ca->free[j], iter)
-                               bch2_mark_alloc_bucket(ca, &ca->buckets[i], true);
-
-               for (ptr = ca->open_buckets_partial;
-                    ptr < ca->open_buckets_partial + ca->open_buckets_partial_nr;
-                    ptr++)
-                       bch2_mark_alloc_bucket(ca, PTR_BUCKET(ca, &ptr->ptr), true);
-
-               spin_unlock(&ca->freelist_lock);
-       }
-
-       for (ob = c->open_buckets;
-            ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
-            ob++) {
-               spin_lock(&ob->lock);
-               open_bucket_for_each_ptr(ob, ptr) {
-                       ca = c->devs[ptr->ptr.dev];
-                       bch2_mark_alloc_bucket(ca, PTR_BUCKET(ca, &ptr->ptr), true);
-               }
-               spin_unlock(&ob->lock);
-       }
-
-       up_write(&c->alloc_gc_lock);
-}
-
-static void mark_metadata_sectors(struct bch_dev *ca, u64 start, u64 end,
-                                 enum bucket_data_type type)
+static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca,
+                                 u64 start, u64 end,
+                                 enum bch_data_type type,
+                                 unsigned flags)
 {
        u64 b = sector_to_bucket(ca, start);
 
        do {
-               bch2_mark_metadata_bucket(ca, ca->buckets + b, type, true);
+               unsigned sectors =
+                       min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
+
+               bch2_mark_metadata_bucket(c, ca, b, type, sectors,
+                                         gc_phase(GC_PHASE_SB), flags);
                b++;
-       } while (b < sector_to_bucket(ca, end));
+               start += sectors;
+       } while (start < end);
 }
 
-static void bch2_dev_mark_superblocks(struct bch_dev *ca)
+void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
+                             unsigned flags)
 {
        struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
        unsigned i;
+       u64 b;
 
-       for (i = 0; i < layout->nr_superblocks; i++) {
-               if (layout->sb_offset[i] == BCH_SB_SECTOR)
-                       mark_metadata_sectors(ca, 0, BCH_SB_SECTOR,
-                                             BUCKET_SB);
-
-               mark_metadata_sectors(ca,
-                                     layout->sb_offset[i],
-                                     layout->sb_offset[i] +
-                                     (1 << layout->sb_max_size_bits),
-                                     BUCKET_SB);
-       }
-}
+       if (c)
+               lockdep_assert_held(&c->sb_lock);
 
-/*
- * Mark non btree metadata - prios, journal
- */
-void bch2_mark_dev_metadata(struct bch_fs *c, struct bch_dev *ca)
-{
-       unsigned i;
-       u64 b;
+       for (i = 0; i < layout->nr_superblocks; i++) {
+               u64 offset = le64_to_cpu(layout->sb_offset[i]);
 
-       lockdep_assert_held(&c->sb_lock);
+               if (offset == BCH_SB_SECTOR)
+                       mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
+                                             BCH_DATA_SB, flags);
 
-       bch2_dev_mark_superblocks(ca);
+               mark_metadata_sectors(c, ca, offset,
+                                     offset + (1 << layout->sb_max_size_bits),
+                                     BCH_DATA_SB, flags);
+       }
 
-       spin_lock(&c->journal.lock);
+       if (c)
+               spin_lock(&c->journal.lock);
 
        for (i = 0; i < ca->journal.nr; i++) {
                b = ca->journal.buckets[i];
-               bch2_mark_metadata_bucket(ca, ca->buckets + b,
-                                        BUCKET_JOURNAL, true);
+               bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_JOURNAL,
+                                         ca->mi.bucket_size,
+                                         gc_phase(GC_PHASE_SB), flags);
        }
 
-       spin_unlock(&c->journal.lock);
+       if (c)
+               spin_unlock(&c->journal.lock);
 }
 
-static void bch2_mark_metadata(struct bch_fs *c)
+static void bch2_mark_superblocks(struct bch_fs *c)
 {
        struct bch_dev *ca;
        unsigned i;
 
        mutex_lock(&c->sb_lock);
-       gc_pos_set(c, gc_phase(GC_PHASE_SB_METADATA));
+       gc_pos_set(c, gc_phase(GC_PHASE_SB));
 
        for_each_online_member(ca, c, i)
-               bch2_mark_dev_metadata(c, ca);
+               bch2_mark_dev_superblock(c, ca,
+                                        BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                                        BCH_BUCKET_MARK_GC_LOCK_HELD);
        mutex_unlock(&c->sb_lock);
 }
 
 /* Also see bch2_pending_btree_node_free_insert_done() */
 static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
 {
+       struct gc_pos pos = { 0 };
        struct bch_fs_usage stats = { 0 };
        struct btree_update *as;
        struct pending_btree_node_free *d;
@@ -393,10 +375,11 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
 
        for_each_pending_btree_node_free(c, as, d)
                if (d->index_update_done)
-                       __bch2_mark_key(c, bkey_i_to_s_c(&d->key),
-                                       c->opts.btree_node_size, true,
-                                       &stats, 0,
-                                       BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE);
+                       bch2_mark_key(c, bkey_i_to_s_c(&d->key),
+                                     c->opts.btree_node_size, true, pos,
+                                     &stats, 0,
+                                     BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                                     BCH_BUCKET_MARK_GC_LOCK_HELD);
        /*
         * Don't apply stats - pending deletes aren't tracked in
         * bch_alloc_stats:
@@ -405,12 +388,58 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
        mutex_unlock(&c->btree_interior_update_lock);
 }
 
-void bch2_gc_start(struct bch_fs *c)
+static void bch2_mark_allocator_buckets(struct bch_fs *c)
 {
        struct bch_dev *ca;
-       struct bucket *g;
+       struct open_bucket *ob;
+       size_t i, j, iter;
+       unsigned ci;
+
+       spin_lock(&c->freelist_lock);
+       gc_pos_set(c, gc_pos_alloc(c, NULL));
+
+       for_each_member_device(ca, c, ci) {
+               fifo_for_each_entry(i, &ca->free_inc, iter)
+                       bch2_mark_alloc_bucket(c, ca, i, true,
+                                              gc_pos_alloc(c, NULL),
+                                              BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                                              BCH_BUCKET_MARK_GC_LOCK_HELD);
+
+
+
+               for (j = 0; j < RESERVE_NR; j++)
+                       fifo_for_each_entry(i, &ca->free[j], iter)
+                               bch2_mark_alloc_bucket(c, ca, i, true,
+                                                      gc_pos_alloc(c, NULL),
+                                                      BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                                                      BCH_BUCKET_MARK_GC_LOCK_HELD);
+       }
+
+       spin_unlock(&c->freelist_lock);
+
+       for (ob = c->open_buckets;
+            ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
+            ob++) {
+               spin_lock(&ob->lock);
+               if (ob->valid) {
+                       gc_pos_set(c, gc_pos_alloc(c, ob));
+                       ca = bch_dev_bkey_exists(c, ob->ptr.dev);
+                       bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), true,
+                                              gc_pos_alloc(c, ob),
+                                              BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+                                              BCH_BUCKET_MARK_GC_LOCK_HELD);
+               }
+               spin_unlock(&ob->lock);
+       }
+}
+
+static void bch2_gc_start(struct bch_fs *c)
+{
+       struct bch_dev *ca;
+       struct bucket_array *buckets;
        struct bucket_mark new;
        unsigned i;
+       size_t b;
        int cpu;
 
        lg_global_lock(&c->usage_lock);
@@ -442,16 +471,21 @@ void bch2_gc_start(struct bch_fs *c)
        lg_global_unlock(&c->usage_lock);
 
        /* Clear bucket marks: */
-       for_each_member_device(ca, c, i)
-               for_each_bucket(g, ca) {
-                       bucket_cmpxchg(g, new, ({
+       for_each_member_device(ca, c, i) {
+               down_read(&ca->bucket_lock);
+               buckets = bucket_array(ca);
+
+               for (b = buckets->first_bucket; b < buckets->nbuckets; b++) {
+                       bucket_cmpxchg(buckets->b + b, new, ({
                                new.owned_by_allocator  = 0;
                                new.data_type           = 0;
                                new.cached_sectors      = 0;
                                new.dirty_sectors       = 0;
                        }));
-                       ca->oldest_gens[g - ca->buckets] = new.gen;
+                       ca->oldest_gens[b] = new.gen;
                }
+               up_read(&ca->bucket_lock);
+       }
 }
 
 /**
@@ -495,9 +529,6 @@ void bch2_gc(struct bch_fs *c)
 
        bch2_gc_start(c);
 
-       /* Walk allocator's references: */
-       bch2_mark_allocator_buckets(c);
-
        /* Walk btree: */
        while (c->gc_pos.phase < (int) BTREE_ID_NR) {
                int ret = c->btree_roots[c->gc_pos.phase].b
@@ -513,8 +544,9 @@ void bch2_gc(struct bch_fs *c)
                gc_pos_set(c, gc_phase(c->gc_pos.phase + 1));
        }
 
-       bch2_mark_metadata(c);
+       bch2_mark_superblocks(c);
        bch2_mark_pending_btree_node_frees(c);
+       bch2_mark_allocator_buckets(c);
 
        for_each_member_device(ca, c, i)
                atomic_long_set(&ca->saturated_count, 0);
@@ -560,7 +592,7 @@ static void recalc_packed_keys(struct btree *b)
 static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
                                struct btree *old_nodes[GC_MERGE_NODES])
 {
-       struct btree *parent = iter->nodes[old_nodes[0]->level + 1];
+       struct btree *parent = btree_node_parent(iter, old_nodes[0]);
        unsigned i, nr_old_nodes, nr_new_nodes, u64s = 0;
        unsigned blocks = btree_blocks(c) * 2 / 3;
        struct btree *new_nodes[GC_MERGE_NODES];
@@ -570,7 +602,7 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
        struct bkey_format new_format;
 
        memset(new_nodes, 0, sizeof(new_nodes));
-       bch2_keylist_init(&keylist, NULL, 0);
+       bch2_keylist_init(&keylist, NULL);
 
        /* Count keys that are not deleted */
        for (i = 0; i < GC_MERGE_NODES && old_nodes[i]; i++)
@@ -705,7 +737,7 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
                bch2_btree_build_aux_trees(n);
                six_unlock_write(&n->lock);
 
-               bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent);
+               bch2_btree_node_write(c, n, SIX_LOCK_intent);
        }
 
        /*
@@ -746,7 +778,7 @@ next:
 
        BUG_ON(!bch2_keylist_empty(&keylist));
 
-       BUG_ON(iter->nodes[old_nodes[0]->level] != old_nodes[0]);
+       BUG_ON(iter->l[old_nodes[0]->level].b != old_nodes[0]);
 
        BUG_ON(!bch2_btree_iter_node_replace(iter, new_nodes[0]));
 
@@ -839,7 +871,7 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
                 * and the nodes in our sliding window might not have the same
                 * parent anymore - blow away the sliding window:
                 */
-               if (iter.nodes[iter.level + 1] &&
+               if (btree_iter_node(&iter, iter.level + 1) &&
                    !btree_node_intent_locked(&iter, iter.level + 1))
                        memset(merge + 1, 0,
                               (GC_MERGE_NODES - 1) * sizeof(merge[0]));
@@ -964,8 +996,10 @@ static int bch2_initial_gc_btree(struct bch_fs *c, enum btree_id id)
        if (!c->btree_roots[id].b)
                return 0;
 
-       ret = bch2_btree_mark_key_initial(c, BKEY_TYPE_BTREE,
-                          bkey_i_to_s_c(&c->btree_roots[id].b->key));
+       b = c->btree_roots[id].b;
+       if (!btree_node_fake(b))
+               ret = bch2_btree_mark_key_initial(c, BKEY_TYPE_BTREE,
+                                                 bkey_i_to_s_c(&b->key));
        if (ret)
                return ret;
 
@@ -997,15 +1031,15 @@ err:
        return bch2_btree_iter_unlock(&iter) ?: ret;
 }
 
-int bch2_initial_gc(struct bch_fs *c, struct list_head *journal)
+static int __bch2_initial_gc(struct bch_fs *c, struct list_head *journal)
 {
        unsigned iter = 0;
        enum btree_id id;
        int ret;
 
        mutex_lock(&c->sb_lock);
-       if (!bch2_sb_get_replicas(c->disk_sb)) {
-               if (BCH_SB_INITIALIZED(c->disk_sb))
+       if (!bch2_sb_get_replicas(c->disk_sb.sb)) {
+               if (BCH_SB_INITIALIZED(c->disk_sb.sb))
                        bch_info(c, "building replicas info");
                set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
        }
@@ -1021,9 +1055,7 @@ again:
 
        ret = bch2_journal_mark(c, journal);
        if (ret)
-       return ret;
-
-       bch2_mark_metadata(c);
+               return ret;
 
        if (test_bit(BCH_FS_FIXED_GENS, &c->flags)) {
                if (iter++ > 2) {
@@ -1043,8 +1075,21 @@ again:
        if (c->sb.encryption_type)
                atomic64_add(1 << 16, &c->key_version);
 
+       bch2_mark_superblocks(c);
+
        gc_pos_set(c, gc_phase(GC_PHASE_DONE));
        set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
 
        return 0;
 }
+
+int bch2_initial_gc(struct bch_fs *c, struct list_head *journal)
+{
+       int ret;
+
+       down_write(&c->gc_lock);
+       ret = __bch2_initial_gc(c, journal);
+       up_write(&c->gc_lock);
+
+       return ret;
+}