]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/buckets.c
Update bcachefs sources to 940d6ca657 bcachefs: acl code improvements
[bcachefs-tools-debian] / libbcachefs / buckets.c
index 8899e3c6e284264b3c0125c21bb57c265fd62d1e..43112445040501828e49274b2d1fde18b09f98ad 100644 (file)
@@ -67,6 +67,7 @@
 #include "btree_gc.h"
 #include "buckets.h"
 #include "error.h"
+#include "movinggc.h"
 
 #include <linux/preempt.h>
 #include <trace/events/bcachefs.h>
@@ -147,12 +148,16 @@ void bch2_bucket_seq_cleanup(struct bch_fs *c)
 {
        u16 last_seq_ondisk = c->journal.last_seq_ondisk;
        struct bch_dev *ca;
+       struct bucket_array *buckets;
        struct bucket *g;
        struct bucket_mark m;
        unsigned i;
 
-       for_each_member_device(ca, c, i)
-               for_each_bucket(g, ca) {
+       for_each_member_device(ca, c, i) {
+               down_read(&ca->bucket_lock);
+               buckets = bucket_array(ca);
+
+               for_each_bucket(g, buckets) {
                        bucket_cmpxchg(g, m, ({
                                if (!m.journal_seq_valid ||
                                    bucket_needs_journal_commit(m, last_seq_ondisk))
@@ -161,6 +166,8 @@ void bch2_bucket_seq_cleanup(struct bch_fs *c)
                                m.journal_seq_valid = 0;
                        }));
                }
+               up_read(&ca->bucket_lock);
+       }
 }
 
 #define bch2_usage_add(_acc, _stats)                                   \
@@ -251,6 +258,11 @@ static u64 reserve_factor(u64 r)
        return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
 }
 
+static u64 avail_factor(u64 r)
+{
+       return (r << RESERVE_FACTOR) / (1 << RESERVE_FACTOR) + 1;
+}
+
 u64 __bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage stats)
 {
        struct fs_usage_sum sum = __fs_usage_sum(stats);
@@ -263,11 +275,27 @@ u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage stats)
        return min(c->capacity, __bch2_fs_sectors_used(c, stats));
 }
 
+u64 bch2_fs_sectors_free(struct bch_fs *c, struct bch_fs_usage stats)
+{
+       return avail_factor(c->capacity - bch2_fs_sectors_used(c, stats));
+}
+
 static inline int is_unavailable_bucket(struct bucket_mark m)
 {
        return !is_available_bucket(m);
 }
 
+static inline int is_fragmented_bucket(struct bucket_mark m,
+                                      struct bch_dev *ca)
+{
+       if (!m.owned_by_allocator &&
+           m.data_type == BCH_DATA_USER &&
+           bucket_sectors_used(m))
+               return max_t(int, 0, (int) ca->mi.bucket_size -
+                            bucket_sectors_used(m));
+       return 0;
+}
+
 static inline enum bch_data_type bucket_type(struct bucket_mark m)
 {
        return m.cached_sectors && !m.dirty_sectors
@@ -281,7 +309,7 @@ static bool bucket_became_unavailable(struct bch_fs *c,
 {
        return is_available_bucket(old) &&
               !is_available_bucket(new) &&
-              c && c->gc_pos.phase == GC_PHASE_DONE;
+              (!c || c->gc_pos.phase == GC_PHASE_DONE);
 }
 
 void bch2_fs_usage_apply(struct bch_fs *c,
@@ -303,7 +331,7 @@ void bch2_fs_usage_apply(struct bch_fs *c,
                stats->online_reserved  -= added;
        }
 
-       lg_local_lock(&c->usage_lock);
+       percpu_down_read_preempt_disable(&c->usage_lock);
        /* online_reserved not subject to gc: */
        this_cpu_ptr(c->usage_percpu)->online_reserved +=
                stats->online_reserved;
@@ -313,26 +341,28 @@ void bch2_fs_usage_apply(struct bch_fs *c,
                bch2_usage_add(this_cpu_ptr(c->usage_percpu), stats);
 
        bch2_fs_stats_verify(c);
-       lg_local_unlock(&c->usage_lock);
+       percpu_up_read_preempt_enable(&c->usage_lock);
 
        memset(stats, 0, sizeof(*stats));
 }
 
 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
-                                 struct bucket *g, struct bucket_mark old,
-                                 struct bucket_mark new)
+                                 struct bucket_mark old, struct bucket_mark new)
 {
        struct bch_dev_usage *dev_usage;
 
-       BUG_ON((g - ca->buckets) < ca->mi.first_bucket ||
-              (g - ca->buckets) >= ca->mi.nbuckets);
+       if (c)
+               percpu_rwsem_assert_held(&c->usage_lock);
 
-       bch2_fs_inconsistent_on(old.data_type && new.data_type &&
-                       old.data_type != new.data_type, c,
-                       "different types of data in same bucket: %u, %u",
-                       old.data_type, new.data_type);
+       if (old.data_type && new.data_type &&
+           old.data_type != new.data_type) {
+               BUG_ON(!c);
+               bch2_fs_inconsistent(c,
+                       "different types of data in same bucket: %s, %s",
+                       bch2_data_types[old.data_type],
+                       bch2_data_types[new.data_type]);
+       }
 
-       preempt_disable();
        dev_usage = this_cpu_ptr(ca->usage_percpu);
 
        dev_usage->buckets[bucket_type(old)]--;
@@ -347,7 +377,8 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
        dev_usage->sectors[new.data_type] += new.dirty_sectors;
        dev_usage->sectors[BCH_DATA_CACHED] +=
                (int) new.cached_sectors - (int) old.cached_sectors;
-       preempt_enable();
+       dev_usage->sectors_fragmented +=
+               is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
 
        if (!is_available_bucket(old) && is_available_bucket(new))
                bch2_wake_allocator(ca);
@@ -359,76 +390,56 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
 ({                                                             \
        struct bucket_mark _old = bucket_cmpxchg(g, new, expr); \
                                                                \
-       bch2_dev_usage_update(c, ca, g, _old, new);             \
+       bch2_dev_usage_update(c, ca, _old, new);                \
        _old;                                                   \
 })
 
 bool bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
-                           struct bucket *g, struct bucket_mark *old)
+                           size_t b, struct bucket_mark *old)
 {
+       struct bucket *g;
        struct bucket_mark new;
 
-       lg_local_lock(&c->usage_lock);
+       percpu_rwsem_assert_held(&c->usage_lock);
+
+       g = bucket(ca, b);
+
        *old = bucket_data_cmpxchg(c, ca, g, new, ({
                if (!is_available_bucket(new)) {
-                       lg_local_unlock(&c->usage_lock);
+                       percpu_up_read_preempt_enable(&c->usage_lock);
                        return false;
                }
 
                new.owned_by_allocator  = 1;
-               new.touched_this_mount  = 1;
                new.data_type           = 0;
                new.cached_sectors      = 0;
                new.dirty_sectors       = 0;
                new.gen++;
        }));
-       lg_local_unlock(&c->usage_lock);
 
        if (!old->owned_by_allocator && old->cached_sectors)
-               trace_invalidate(ca, bucket_to_sector(ca, g - ca->buckets),
+               trace_invalidate(ca, bucket_to_sector(ca, b),
                                 old->cached_sectors);
        return true;
 }
 
-bool bch2_mark_alloc_bucket_startup(struct bch_fs *c, struct bch_dev *ca,
-                                   struct bucket *g)
-{
-       struct bucket_mark new, old;
-
-       lg_local_lock(&c->usage_lock);
-       old = bucket_data_cmpxchg(c, ca, g, new, ({
-               if (new.touched_this_mount ||
-                   !is_available_bucket(new)) {
-                       lg_local_unlock(&c->usage_lock);
-                       return false;
-               }
-
-               new.owned_by_allocator  = 1;
-               new.touched_this_mount  = 1;
-       }));
-       lg_local_unlock(&c->usage_lock);
-
-       return true;
-}
-
 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
-                           struct bucket *g, bool owned_by_allocator,
+                           size_t b, bool owned_by_allocator,
                            struct gc_pos pos, unsigned flags)
 {
+       struct bucket *g;
        struct bucket_mark old, new;
 
-       lg_local_lock(&c->usage_lock);
+       percpu_rwsem_assert_held(&c->usage_lock);
+       g = bucket(ca, b);
+
        if (!(flags & BCH_BUCKET_MARK_GC_LOCK_HELD) &&
-           gc_will_visit(c, pos)) {
-               lg_local_unlock(&c->usage_lock);
+           gc_will_visit(c, pos))
                return;
-       }
 
        old = bucket_data_cmpxchg(c, ca, g, new, ({
-               new.touched_this_mount  = 1;
                new.owned_by_allocator  = owned_by_allocator;
        }));
-       lg_local_unlock(&c->usage_lock);
 
        BUG_ON(!owned_by_allocator && !old.owned_by_allocator &&
               c->gc_pos.phase == GC_PHASE_DONE);
@@ -448,28 +459,33 @@ do {                                                              \
 } while (0)
 
 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
-                              struct bucket *g, enum bch_data_type type,
+                              size_t b, enum bch_data_type type,
                               unsigned sectors, struct gc_pos pos,
                               unsigned flags)
 {
+       struct bucket *g;
        struct bucket_mark old, new;
 
        BUG_ON(!type);
 
-       lg_local_lock(&c->usage_lock);
-       if (!(flags & BCH_BUCKET_MARK_GC_LOCK_HELD) &&
-           gc_will_visit(c, pos)) {
-               lg_local_unlock(&c->usage_lock);
-               return;
+       if (likely(c)) {
+               percpu_rwsem_assert_held(&c->usage_lock);
+
+               if (!(flags & BCH_BUCKET_MARK_GC_LOCK_HELD) &&
+                   gc_will_visit(c, pos))
+                       return;
        }
 
+       rcu_read_lock();
+
+       g = bucket(ca, b);
        old = bucket_data_cmpxchg(c, ca, g, new, ({
                saturated_add(ca, new.dirty_sectors, sectors,
                              GC_MAX_SECTORS_USED);
                new.data_type           = type;
-               new.touched_this_mount  = 1;
        }));
-       lg_local_unlock(&c->usage_lock);
+
+       rcu_read_unlock();
 
        BUG_ON(!(flags & BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE) &&
               bucket_became_unavailable(c, old, new));
@@ -502,7 +518,7 @@ static void bch2_mark_pointer(struct bch_fs *c,
        struct bucket_mark old, new;
        unsigned saturated;
        struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
-       struct bucket *g = ca->buckets + PTR_BUCKET_NR(ca, ptr);
+       struct bucket *g = PTR_BUCKET(ca, ptr);
        enum bch_data_type data_type = type == S_META
                ? BCH_DATA_BTREE : BCH_DATA_USER;
        u64 v;
@@ -525,7 +541,6 @@ static void bch2_mark_pointer(struct bch_fs *c,
        if (flags & BCH_BUCKET_MARK_GC_WILL_VISIT) {
                if (journal_seq)
                        bucket_cmpxchg(g, new, ({
-                               new.touched_this_mount  = 1;
                                new.journal_seq_valid   = 1;
                                new.journal_seq         = journal_seq;
                        }));
@@ -533,9 +548,9 @@ static void bch2_mark_pointer(struct bch_fs *c,
                return;
        }
 
-       v = READ_ONCE(g->_mark.counter);
+       v = atomic64_read(&g->_mark.v);
        do {
-               new.counter = old.counter = v;
+               new.v.counter = old.v.counter = v;
                saturated = 0;
 
                /*
@@ -574,17 +589,15 @@ static void bch2_mark_pointer(struct bch_fs *c,
                        new.data_type = data_type;
                }
 
-               new.touched_this_mount  = 1;
-
                if (flags & BCH_BUCKET_MARK_NOATOMIC) {
                        g->_mark = new;
                        break;
                }
-       } while ((v = cmpxchg(&g->_mark.counter,
-                             old.counter,
-                             new.counter)) != old.counter);
+       } while ((v = atomic64_cmpxchg(&g->_mark.v,
+                             old.v.counter,
+                             new.v.counter)) != old.v.counter);
 
-       bch2_dev_usage_update(c, ca, g, old, new);
+       bch2_dev_usage_update(c, ca, old, new);
 
        BUG_ON(!(flags & BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE) &&
               bucket_became_unavailable(c, old, new));
@@ -634,11 +647,14 @@ void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
         *    (e.g. the btree node lock, or the relevant allocator lock).
         */
 
-       lg_local_lock(&c->usage_lock);
+       percpu_down_read_preempt_disable(&c->usage_lock);
        if (!(flags & BCH_BUCKET_MARK_GC_LOCK_HELD) &&
            gc_will_visit(c, pos))
                flags |= BCH_BUCKET_MARK_GC_WILL_VISIT;
 
+       if (!stats)
+               stats = this_cpu_ptr(c->usage_percpu);
+
        switch (k.k->type) {
        case BCH_EXTENT:
        case BCH_EXTENT_CACHED: {
@@ -673,42 +689,37 @@ void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
                break;
        }
        }
-       lg_local_unlock(&c->usage_lock);
+       percpu_up_read_preempt_enable(&c->usage_lock);
 }
 
 /* Disk reservations: */
 
 static u64 __recalc_sectors_available(struct bch_fs *c)
 {
-       u64 avail;
        int cpu;
 
        for_each_possible_cpu(cpu)
                per_cpu_ptr(c->usage_percpu, cpu)->available_cache = 0;
 
-       avail = c->capacity - bch2_fs_sectors_used(c, bch2_fs_usage_read(c));
-
-       avail <<= RESERVE_FACTOR;
-       avail /= (1 << RESERVE_FACTOR) + 1;
-       return avail;
+       return bch2_fs_sectors_free(c, bch2_fs_usage_read(c));
 }
 
 /* Used by gc when it's starting: */
 void bch2_recalc_sectors_available(struct bch_fs *c)
 {
-       lg_global_lock(&c->usage_lock);
+       percpu_down_write(&c->usage_lock);
        atomic64_set(&c->sectors_available, __recalc_sectors_available(c));
-       lg_global_unlock(&c->usage_lock);
+       percpu_up_write(&c->usage_lock);
 }
 
 void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
 {
-       lg_local_lock(&c->usage_lock);
+       percpu_down_read_preempt_disable(&c->usage_lock);
        this_cpu_sub(c->usage_percpu->online_reserved,
                     res->sectors);
 
        bch2_fs_stats_verify(c);
-       lg_local_unlock(&c->usage_lock);
+       percpu_up_read_preempt_enable(&c->usage_lock);
 
        res->sectors = 0;
 }
@@ -723,9 +734,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
        s64 sectors_available;
        int ret;
 
-       sectors *= res->nr_replicas;
-
-       lg_local_lock(&c->usage_lock);
+       percpu_down_read_preempt_disable(&c->usage_lock);
        stats = this_cpu_ptr(c->usage_percpu);
 
        if (sectors <= stats->available_cache)
@@ -737,7 +746,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
                get = min((u64) sectors + SECTORS_CACHE, old);
 
                if (get < sectors) {
-                       lg_local_unlock(&c->usage_lock);
+                       percpu_up_read_preempt_enable(&c->usage_lock);
                        goto recalculate;
                }
        } while ((v = atomic64_cmpxchg(&c->sectors_available,
@@ -752,7 +761,7 @@ out:
 
        bch2_disk_reservations_verify(c, flags);
        bch2_fs_stats_verify(c);
-       lg_local_unlock(&c->usage_lock);
+       percpu_up_read_preempt_enable(&c->usage_lock);
        return 0;
 
 recalculate:
@@ -772,8 +781,8 @@ recalculate:
                else if (!down_read_trylock(&c->gc_lock))
                        return -EINTR;
        }
-       lg_global_lock(&c->usage_lock);
 
+       percpu_down_write(&c->usage_lock);
        sectors_available = __recalc_sectors_available(c);
 
        if (sectors <= sectors_available ||
@@ -791,22 +800,171 @@ recalculate:
        }
 
        bch2_fs_stats_verify(c);
-       lg_global_unlock(&c->usage_lock);
+       percpu_up_write(&c->usage_lock);
+
        if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD))
                up_read(&c->gc_lock);
 
        return ret;
 }
 
-int bch2_disk_reservation_get(struct bch_fs *c,
-                            struct disk_reservation *res,
-                            unsigned sectors, int flags)
+/* Startup/shutdown: */
+
+static void buckets_free_rcu(struct rcu_head *rcu)
 {
-       res->sectors = 0;
-       res->gen = c->capacity_gen;
-       res->nr_replicas = (flags & BCH_DISK_RESERVATION_METADATA)
-               ? c->opts.metadata_replicas
-               : c->opts.data_replicas;
+       struct bucket_array *buckets =
+               container_of(rcu, struct bucket_array, rcu);
+
+       kvpfree(buckets,
+               sizeof(struct bucket_array) +
+               buckets->nbuckets * sizeof(struct bucket));
+}
+
+int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
+{
+       struct bucket_array *buckets = NULL, *old_buckets = NULL;
+       unsigned long *buckets_dirty = NULL;
+       u8 *oldest_gens = NULL;
+       alloc_fifo      free[RESERVE_NR];
+       alloc_fifo      free_inc;
+       alloc_heap      alloc_heap;
+       copygc_heap     copygc_heap;
+
+       size_t btree_reserve    = DIV_ROUND_UP(BTREE_NODE_RESERVE,
+                            ca->mi.bucket_size / c->opts.btree_node_size);
+       /* XXX: these should be tunable */
+       size_t reserve_none     = max_t(size_t, 4, ca->mi.nbuckets >> 9);
+       size_t copygc_reserve   = max_t(size_t, 16, ca->mi.nbuckets >> 7);
+       size_t free_inc_reserve = copygc_reserve / 2;
+       bool resize = ca->buckets != NULL,
+            start_copygc = ca->copygc_thread != NULL;
+       int ret = -ENOMEM;
+       unsigned i;
+
+       memset(&free,           0, sizeof(free));
+       memset(&free_inc,       0, sizeof(free_inc));
+       memset(&alloc_heap,     0, sizeof(alloc_heap));
+       memset(&copygc_heap,    0, sizeof(copygc_heap));
+
+       if (!(buckets           = kvpmalloc(sizeof(struct bucket_array) +
+                                           nbuckets * sizeof(struct bucket),
+                                           GFP_KERNEL|__GFP_ZERO)) ||
+           !(oldest_gens       = kvpmalloc(nbuckets * sizeof(u8),
+                                           GFP_KERNEL|__GFP_ZERO)) ||
+           !(buckets_dirty     = kvpmalloc(BITS_TO_LONGS(nbuckets) *
+                                           sizeof(unsigned long),
+                                           GFP_KERNEL|__GFP_ZERO)) ||
+           !init_fifo(&free[RESERVE_BTREE], btree_reserve, GFP_KERNEL) ||
+           !init_fifo(&free[RESERVE_MOVINGGC],
+                      copygc_reserve, GFP_KERNEL) ||
+           !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
+           !init_fifo(&free_inc,       free_inc_reserve, GFP_KERNEL) ||
+           !init_heap(&alloc_heap,     free_inc_reserve, GFP_KERNEL) ||
+           !init_heap(&copygc_heap,    copygc_reserve, GFP_KERNEL))
+               goto err;
+
+       buckets->first_bucket   = ca->mi.first_bucket;
+       buckets->nbuckets       = nbuckets;
+
+       bch2_copygc_stop(ca);
+
+       if (resize) {
+               down_write(&c->gc_lock);
+               down_write(&ca->bucket_lock);
+               percpu_down_write(&c->usage_lock);
+       }
+
+       old_buckets = bucket_array(ca);
+
+       if (resize) {
+               size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
+
+               memcpy(buckets->b,
+                      old_buckets->b,
+                      n * sizeof(struct bucket));
+               memcpy(oldest_gens,
+                      ca->oldest_gens,
+                      n * sizeof(u8));
+               memcpy(buckets_dirty,
+                      ca->buckets_dirty,
+                      BITS_TO_LONGS(n) * sizeof(unsigned long));
+       }
+
+       rcu_assign_pointer(ca->buckets, buckets);
+       buckets = old_buckets;
+
+       swap(ca->oldest_gens, oldest_gens);
+       swap(ca->buckets_dirty, buckets_dirty);
+
+       if (resize)
+               percpu_up_write(&c->usage_lock);
+
+       spin_lock(&c->freelist_lock);
+       for (i = 0; i < RESERVE_NR; i++) {
+               fifo_move(&free[i], &ca->free[i]);
+               swap(ca->free[i], free[i]);
+       }
+       fifo_move(&free_inc, &ca->free_inc);
+       swap(ca->free_inc, free_inc);
+       spin_unlock(&c->freelist_lock);
+
+       /* with gc lock held, alloc_heap can't be in use: */
+       swap(ca->alloc_heap, alloc_heap);
+
+       /* and we shut down copygc: */
+       swap(ca->copygc_heap, copygc_heap);
+
+       nbuckets = ca->mi.nbuckets;
+
+       if (resize) {
+               up_write(&ca->bucket_lock);
+               up_write(&c->gc_lock);
+       }
+
+       if (start_copygc &&
+           bch2_copygc_start(c, ca))
+               bch_err(ca, "error restarting copygc thread");
+
+       ret = 0;
+err:
+       free_heap(&copygc_heap);
+       free_heap(&alloc_heap);
+       free_fifo(&free_inc);
+       for (i = 0; i < RESERVE_NR; i++)
+               free_fifo(&free[i]);
+       kvpfree(buckets_dirty,
+               BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
+       kvpfree(oldest_gens,
+               nbuckets * sizeof(u8));
+       if (buckets)
+               call_rcu(&old_buckets->rcu, buckets_free_rcu);
+
+       return ret;
+}
+
+void bch2_dev_buckets_free(struct bch_dev *ca)
+{
+       unsigned i;
+
+       free_heap(&ca->copygc_heap);
+       free_heap(&ca->alloc_heap);
+       free_fifo(&ca->free_inc);
+       for (i = 0; i < RESERVE_NR; i++)
+               free_fifo(&ca->free[i]);
+       kvpfree(ca->buckets_dirty,
+               BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
+       kvpfree(ca->oldest_gens, ca->mi.nbuckets * sizeof(u8));
+       kvpfree(rcu_dereference_protected(ca->buckets, 1),
+               sizeof(struct bucket_array) +
+               ca->mi.nbuckets * sizeof(struct bucket));
+
+       free_percpu(ca->usage_percpu);
+}
+
+int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
+{
+       if (!(ca->usage_percpu = alloc_percpu(struct bch_dev_usage)))
+               return -ENOMEM;
 
-       return bch2_disk_reservation_add(c, res, sectors, flags);
+       return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;
 }