]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/movinggc.c
Update bcachefs sources to 841a95c29f4c bcachefs: fix userspace build errors
[bcachefs-tools-debian] / libbcachefs / movinggc.c
index d414ee94cc2c3677da2468c512f4871911c25a30..fd239a261aca054b22072716e6d2ef7d49791e6a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Moving/copying garbage collector
  *
  */
 
 #include "bcachefs.h"
+#include "alloc_background.h"
+#include "alloc_foreground.h"
 #include "btree_iter.h"
 #include "btree_update.h"
+#include "btree_write_buffer.h"
 #include "buckets.h"
 #include "clock.h"
-#include "disk_groups.h"
-#include "extents.h"
-#include "eytzinger.h"
-#include "io.h"
-#include "keylist.h"
+#include "errcode.h"
+#include "error.h"
+#include "lru.h"
 #include "move.h"
 #include "movinggc.h"
-#include "super-io.h"
+#include "trace.h"
 
-#include <trace/events/bcachefs.h>
 #include <linux/freezer.h>
 #include <linux/kthread.h>
 #include <linux/math64.h>
 #include <linux/sched/task.h>
-#include <linux/sort.h>
 #include <linux/wait.h>
 
-/*
- * We can't use the entire copygc reserve in one iteration of copygc: we may
- * need the buckets we're freeing up to go back into the copygc reserve to make
- * forward progress, but if the copygc reserve is full they'll be available for
- * any allocation - and it's possible that in a given iteration, we free up most
- * of the buckets we're going to free before we allocate most of the buckets
- * we're going to allocate.
- *
- * If we only use half of the reserve per iteration, then in steady state we'll
- * always have room in the reserve for the buckets we're going to need in the
- * next iteration:
- */
-#define COPYGC_BUCKETS_PER_ITER(ca)                                    \
-       ((ca)->free[RESERVE_MOVINGGC].size / 2)
+struct buckets_in_flight {
+       struct rhashtable               table;
+       struct move_bucket_in_flight    *first;
+       struct move_bucket_in_flight    *last;
+       size_t                          nr;
+       size_t                          sectors;
+};
+
+static const struct rhashtable_params bch_move_bucket_params = {
+       .head_offset    = offsetof(struct move_bucket_in_flight, hash),
+       .key_offset     = offsetof(struct move_bucket_in_flight, bucket.k),
+       .key_len        = sizeof(struct move_bucket_key),
+};
+
+static struct move_bucket_in_flight *
+move_bucket_in_flight_add(struct buckets_in_flight *list, struct move_bucket b)
+{
+       struct move_bucket_in_flight *new = kzalloc(sizeof(*new), GFP_KERNEL);
+       int ret;
 
-/*
- * Max sectors to move per iteration: Have to take into account internal
- * fragmentation from the multiple write points for each generation:
- */
-#define COPYGC_SECTORS_PER_ITER(ca)                                    \
-       ((ca)->mi.bucket_size * COPYGC_BUCKETS_PER_ITER(ca))
+       if (!new)
+               return ERR_PTR(-ENOMEM);
 
-static inline int sectors_used_cmp(copygc_heap *heap,
-                                  struct copygc_heap_entry l,
-                                  struct copygc_heap_entry r)
-{
-       return (l.sectors > r.sectors) - (l.sectors < r.sectors);
+       new->bucket = b;
+
+       ret = rhashtable_lookup_insert_fast(&list->table, &new->hash,
+                                           bch_move_bucket_params);
+       if (ret) {
+               kfree(new);
+               return ERR_PTR(ret);
+       }
+
+       if (!list->first)
+               list->first = new;
+       else
+               list->last->next = new;
+
+       list->last = new;
+       list->nr++;
+       list->sectors += b.sectors;
+       return new;
 }
 
-static int bucket_offset_cmp(const void *_l, const void *_r, size_t size)
+static int bch2_bucket_is_movable(struct btree_trans *trans,
+                                 struct move_bucket *b, u64 time)
 {
-       const struct copygc_heap_entry *l = _l;
-       const struct copygc_heap_entry *r = _r;
+       struct btree_iter iter;
+       struct bkey_s_c k;
+       struct bch_alloc_v4 _a;
+       const struct bch_alloc_v4 *a;
+       int ret;
+
+       if (bch2_bucket_is_open(trans->c,
+                               b->k.bucket.inode,
+                               b->k.bucket.offset))
+               return 0;
+
+       k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
+                              b->k.bucket, BTREE_ITER_CACHED);
+       ret = bkey_err(k);
+       if (ret)
+               return ret;
+
+       a = bch2_alloc_to_v4(k, &_a);
+       b->k.gen        = a->gen;
+       b->sectors      = bch2_bucket_sectors_dirty(*a);
+
+       ret = data_type_movable(a->data_type) &&
+               a->fragmentation_lru &&
+               a->fragmentation_lru <= time;
 
-       return (l->offset > r->offset) - (l->offset < r->offset);
+       bch2_trans_iter_exit(trans, &iter);
+       return ret;
 }
 
-static bool __copygc_pred(struct bch_dev *ca,
-                         struct bkey_s_c_extent e)
+static void move_buckets_wait(struct moving_context *ctxt,
+                             struct buckets_in_flight *list,
+                             bool flush)
 {
-       copygc_heap *h = &ca->copygc_heap;
-       const struct bch_extent_ptr *ptr =
-               bch2_extent_has_device(e, ca->dev_idx);
+       struct move_bucket_in_flight *i;
+       int ret;
+
+       while ((i = list->first)) {
+               if (flush)
+                       move_ctxt_wait_event(ctxt, !atomic_read(&i->count));
+
+               if (atomic_read(&i->count))
+                       break;
 
-       if (ptr) {
-               struct copygc_heap_entry search = { .offset = ptr->offset };
+               list->first = i->next;
+               if (!list->first)
+                       list->last = NULL;
 
-               ssize_t i = eytzinger0_find_le(h->data, h->used,
-                                              sizeof(h->data[0]),
-                                              bucket_offset_cmp, &search);
+               list->nr--;
+               list->sectors -= i->bucket.sectors;
 
-               return (i >= 0 &&
-                       ptr->offset < h->data[i].offset + ca->mi.bucket_size &&
-                       ptr->gen == h->data[i].gen);
+               ret = rhashtable_remove_fast(&list->table, &i->hash,
+                                            bch_move_bucket_params);
+               BUG_ON(ret);
+               kfree(i);
        }
 
-       return false;
+       bch2_trans_unlock_long(ctxt->trans);
 }
 
-static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
-                                enum bkey_type type,
-                                struct bkey_s_c_extent e,
-                                struct bch_io_opts *io_opts,
-                                struct data_opts *data_opts)
+static bool bucket_in_flight(struct buckets_in_flight *list,
+                            struct move_bucket_key k)
 {
-       struct bch_dev *ca = arg;
-
-       if (!__copygc_pred(ca, e))
-               return DATA_SKIP;
-
-       data_opts->target               = dev_to_target(ca->dev_idx);
-       data_opts->btree_insert_flags   = BTREE_INSERT_USE_RESERVE;
-       data_opts->rewrite_dev          = ca->dev_idx;
-       return DATA_REWRITE;
+       return rhashtable_lookup_fast(&list->table, &k, bch_move_bucket_params);
 }
 
-static bool have_copygc_reserve(struct bch_dev *ca)
+typedef DARRAY(struct move_bucket) move_buckets;
+
+static int bch2_copygc_get_buckets(struct moving_context *ctxt,
+                       struct buckets_in_flight *buckets_in_flight,
+                       move_buckets *buckets)
 {
-       bool ret;
+       struct btree_trans *trans = ctxt->trans;
+       struct bch_fs *c = trans->c;
+       struct btree_iter iter;
+       struct bkey_s_c k;
+       size_t nr_to_get = max_t(size_t, 16U, buckets_in_flight->nr / 4);
+       size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0;
+       int ret;
 
-       spin_lock(&ca->freelist_lock);
-       ret = fifo_full(&ca->free[RESERVE_MOVINGGC]) ||
-               ca->allocator_blocked;
-       spin_unlock(&ca->freelist_lock);
+       move_buckets_wait(ctxt, buckets_in_flight, false);
+
+       ret = bch2_btree_write_buffer_tryflush(trans);
+       if (bch2_err_matches(ret, EROFS))
+               return ret;
+
+       if (bch2_fs_fatal_err_on(ret, c, "%s: error %s from bch2_btree_write_buffer_tryflush()",
+                                __func__, bch2_err_str(ret)))
+               return ret;
+
+       ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru,
+                                 lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
+                                 lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
+                                 0, k, ({
+               struct move_bucket b = { .k.bucket = u64_to_bucket(k.k->p.offset) };
+               int ret2 = 0;
+
+               saw++;
+
+               ret2 = bch2_bucket_is_movable(trans, &b, lru_pos_time(k.k->p));
+               if (ret2 < 0)
+                       goto err;
+
+               if (!ret2)
+                       not_movable++;
+               else if (bucket_in_flight(buckets_in_flight, b.k))
+                       in_flight++;
+               else {
+                       ret2 = darray_push(buckets, b);
+                       if (ret2)
+                               goto err;
+                       sectors += b.sectors;
+               }
 
-       return ret;
+               ret2 = buckets->nr >= nr_to_get;
+err:
+               ret2;
+       }));
+
+       pr_debug("have: %zu (%zu) saw %zu in flight %zu not movable %zu got %zu (%zu)/%zu buckets ret %i",
+                buckets_in_flight->nr, buckets_in_flight->sectors,
+                saw, in_flight, not_movable, buckets->nr, sectors, nr_to_get, ret);
+
+       return ret < 0 ? ret : 0;
 }
 
-static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca)
+noinline
+static int bch2_copygc(struct moving_context *ctxt,
+                      struct buckets_in_flight *buckets_in_flight,
+                      bool *did_work)
 {
-       copygc_heap *h = &ca->copygc_heap;
-       struct copygc_heap_entry e, *i;
-       struct bucket_array *buckets;
-       struct bch_move_stats move_stats;
-       u64 sectors_to_move = 0, sectors_not_moved = 0;
-       u64 buckets_to_move, buckets_not_moved = 0;
-       size_t b;
-       int ret;
+       struct btree_trans *trans = ctxt->trans;
+       struct bch_fs *c = trans->c;
+       struct data_update_opts data_opts = {
+               .btree_insert_flags = BCH_WATERMARK_copygc,
+       };
+       move_buckets buckets = { 0 };
+       struct move_bucket_in_flight *f;
+       struct move_bucket *i;
+       u64 moved = atomic64_read(&ctxt->stats->sectors_moved);
+       int ret = 0;
+
+       ret = bch2_copygc_get_buckets(ctxt, buckets_in_flight, &buckets);
+       if (ret)
+               goto err;
+
+       darray_for_each(buckets, i) {
+               if (kthread_should_stop() || freezing(current))
+                       break;
 
-       memset(&move_stats, 0, sizeof(move_stats));
-       closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca));
-
-       /*
-        * Find buckets with lowest sector counts, skipping completely
-        * empty buckets, by building a maxheap sorted by sector count,
-        * and repeatedly replacing the maximum element until all
-        * buckets have been visited.
-        */
-       h->used = 0;
-
-       /*
-        * We need bucket marks to be up to date - gc can't be recalculating
-        * them:
-        */
-       down_read(&c->gc_lock);
-       down_read(&ca->bucket_lock);
-       buckets = bucket_array(ca);
-
-       for (b = buckets->first_bucket; b < buckets->nbuckets; b++) {
-               struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
-               struct copygc_heap_entry e;
-
-               if (m.owned_by_allocator ||
-                   m.data_type != BCH_DATA_USER ||
-                   !bucket_sectors_used(m) ||
-                   bucket_sectors_used(m) >= ca->mi.bucket_size)
+               f = move_bucket_in_flight_add(buckets_in_flight, *i);
+               ret = PTR_ERR_OR_ZERO(f);
+               if (ret == -EEXIST) { /* rare race: copygc_get_buckets returned same bucket more than once */
+                       ret = 0;
                        continue;
+               }
+               if (ret == -ENOMEM) { /* flush IO, continue later */
+                       ret = 0;
+                       break;
+               }
+
+               ret = bch2_evacuate_bucket(ctxt, f, f->bucket.k.bucket,
+                                            f->bucket.k.gen, data_opts);
+               if (ret)
+                       goto err;
 
-               e = (struct copygc_heap_entry) {
-                       .gen            = m.gen,
-                       .sectors        = bucket_sectors_used(m),
-                       .offset         = bucket_to_sector(ca, b),
-               };
-               heap_add_or_replace(h, e, -sectors_used_cmp);
+               *did_work = true;
        }
-       up_read(&ca->bucket_lock);
-       up_read(&c->gc_lock);
+err:
+       darray_exit(&buckets);
 
-       for (i = h->data; i < h->data + h->used; i++)
-               sectors_to_move += i->sectors;
+       /* no entries in LRU btree found, or got to end: */
+       if (bch2_err_matches(ret, ENOENT))
+               ret = 0;
 
-       while (sectors_to_move > COPYGC_SECTORS_PER_ITER(ca)) {
-               BUG_ON(!heap_pop(h, e, -sectors_used_cmp));
-               sectors_to_move -= e.sectors;
-       }
+       if (ret < 0 && !bch2_err_matches(ret, EROFS))
+               bch_err_msg(c, ret, "from bch2_move_data()");
 
-       buckets_to_move = h->used;
+       moved = atomic64_read(&ctxt->stats->sectors_moved) - moved;
+       trace_and_count(c, copygc, c, moved, 0, 0, 0);
+       return ret;
+}
 
-       if (!buckets_to_move)
-               return;
+/*
+ * Copygc runs when the amount of fragmented data is above some arbitrary
+ * threshold:
+ *
+ * The threshold at the limit - when the device is full - is the amount of space
+ * we reserved in bch2_recalc_capacity; we can't have more than that amount of
+ * disk space stranded due to fragmentation and store everything we have
+ * promised to store.
+ *
+ * But we don't want to be running copygc unnecessarily when the device still
+ * has plenty of free space - rather, we want copygc to smoothly run every so
+ * often and continually reduce the amount of fragmented space as the device
+ * fills up. So, we increase the threshold by half the current free space.
+ */
+unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
+{
+       struct bch_dev *ca;
+       unsigned dev_idx;
+       s64 wait = S64_MAX, fragmented_allowed, fragmented;
+       unsigned i;
 
-       eytzinger0_sort(h->data, h->used,
-                       sizeof(h->data[0]),
-                       bucket_offset_cmp, NULL);
+       for_each_rw_member(ca, c, dev_idx) {
+               struct bch_dev_usage usage = bch2_dev_usage_read(ca);
 
-       ret = bch2_move_data(c, &ca->copygc_pd.rate,
-                            writepoint_ptr(&ca->copygc_write_point),
-                            POS_MIN, POS_MAX,
-                            copygc_pred, ca,
-                            &move_stats);
+               fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
+                                      ca->mi.bucket_size) >> 1);
+               fragmented = 0;
 
-       down_read(&ca->bucket_lock);
-       buckets = bucket_array(ca);
-       for (i = h->data; i < h->data + h->used; i++) {
-               size_t b = sector_to_bucket(ca, i->offset);
-               struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
+               for (i = 0; i < BCH_DATA_NR; i++)
+                       if (data_type_movable(i))
+                               fragmented += usage.d[i].fragmented;
 
-               if (i->gen == m.gen && bucket_sectors_used(m)) {
-                       sectors_not_moved += bucket_sectors_used(m);
-                       buckets_not_moved++;
-               }
+               wait = min(wait, max(0LL, fragmented_allowed - fragmented));
        }
-       up_read(&ca->bucket_lock);
 
-       if (sectors_not_moved && !ret)
-               bch_warn(c, "copygc finished but %llu/%llu sectors, %llu/%llu buckets not moved",
-                        sectors_not_moved, sectors_to_move,
-                        buckets_not_moved, buckets_to_move);
+       return wait;
+}
 
-       trace_copygc(ca,
-                    atomic64_read(&move_stats.sectors_moved), sectors_not_moved,
-                    buckets_to_move, buckets_not_moved);
+void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
+{
+       prt_printf(out, "Currently waiting for:     ");
+       prt_human_readable_u64(out, max(0LL, c->copygc_wait -
+                                       atomic64_read(&c->io_clock[WRITE].now)) << 9);
+       prt_newline(out);
+
+       prt_printf(out, "Currently waiting since:   ");
+       prt_human_readable_u64(out, max(0LL,
+                                       atomic64_read(&c->io_clock[WRITE].now) -
+                                       c->copygc_wait_at) << 9);
+       prt_newline(out);
+
+       prt_printf(out, "Currently calculated wait: ");
+       prt_human_readable_u64(out, bch2_copygc_wait_amount(c));
+       prt_newline(out);
 }
 
 static int bch2_copygc_thread(void *arg)
 {
-       struct bch_dev *ca = arg;
-       struct bch_fs *c = ca->fs;
+       struct bch_fs *c = arg;
+       struct moving_context ctxt;
+       struct bch_move_stats move_stats;
        struct io_clock *clock = &c->io_clock[WRITE];
-       struct bch_dev_usage usage;
-       unsigned long last;
-       u64 available, fragmented, reserve, next;
+       struct buckets_in_flight *buckets;
+       u64 last, wait;
+       int ret = 0;
+
+       buckets = kzalloc(sizeof(struct buckets_in_flight), GFP_KERNEL);
+       if (!buckets)
+               return -ENOMEM;
+       ret = rhashtable_init(&buckets->table, &bch_move_bucket_params);
+       if (ret) {
+               kfree(buckets);
+               bch_err_msg(c, ret, "allocating copygc buckets in flight");
+               return ret;
+       }
 
        set_freezable();
 
-       while (!kthread_should_stop()) {
-               if (kthread_wait_freezable(c->copy_gc_enabled))
-                       break;
+       bch2_move_stats_init(&move_stats, "copygc");
+       bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
+                             writepoint_ptr(&c->copygc_write_point),
+                             false);
 
-               last = atomic_long_read(&clock->now);
+       while (!ret && !kthread_should_stop()) {
+               bool did_work = false;
 
-               reserve = ca->copygc_threshold;
+               bch2_trans_unlock_long(ctxt.trans);
+               cond_resched();
 
-               usage = bch2_dev_usage_read(c, ca);
+               if (!c->copy_gc_enabled) {
+                       move_buckets_wait(&ctxt, buckets, true);
+                       kthread_wait_freezable(c->copy_gc_enabled ||
+                                              kthread_should_stop());
+               }
 
-               available = __dev_buckets_available(ca, usage) *
-                       ca->mi.bucket_size;
-               if (available > reserve) {
-                       next = last + available - reserve;
-                       bch2_kthread_io_clock_wait(clock, next,
-                                       MAX_SCHEDULE_TIMEOUT);
+               if (unlikely(freezing(current))) {
+                       move_buckets_wait(&ctxt, buckets, true);
+                       __refrigerator(false);
                        continue;
                }
 
-               /*
-                * don't start copygc until there's more than half the copygc
-                * reserve of fragmented space:
-                */
-               fragmented = usage.sectors_fragmented;
-               if (fragmented < reserve) {
-                       next = last + reserve - fragmented;
-                       bch2_kthread_io_clock_wait(clock, next,
+               last = atomic64_read(&clock->now);
+               wait = bch2_copygc_wait_amount(c);
+
+               if (wait > clock->max_slop) {
+                       c->copygc_wait_at = last;
+                       c->copygc_wait = last + wait;
+                       move_buckets_wait(&ctxt, buckets, true);
+                       trace_and_count(c, copygc_wait, c, wait, last + wait);
+                       bch2_kthread_io_clock_wait(clock, last + wait,
                                        MAX_SCHEDULE_TIMEOUT);
                        continue;
                }
 
-               bch2_copygc(c, ca);
+               c->copygc_wait = 0;
+
+               c->copygc_running = true;
+               ret = bch2_copygc(&ctxt, buckets, &did_work);
+               c->copygc_running = false;
+
+               wake_up(&c->copygc_running_wq);
+
+               if (!wait && !did_work) {
+                       u64 min_member_capacity = bch2_min_rw_member_capacity(c);
+
+                       if (min_member_capacity == U64_MAX)
+                               min_member_capacity = 128 * 2048;
+
+                       bch2_trans_unlock_long(ctxt.trans);
+                       bch2_kthread_io_clock_wait(clock, last + (min_member_capacity >> 6),
+                                       MAX_SCHEDULE_TIMEOUT);
+               }
        }
 
+       move_buckets_wait(&ctxt, buckets, true);
+
+       rhashtable_destroy(&buckets->table);
+       kfree(buckets);
+       bch2_moving_ctxt_exit(&ctxt);
+       bch2_move_stats_exit(&move_stats, c);
+
        return 0;
 }
 
-void bch2_copygc_stop(struct bch_dev *ca)
+void bch2_copygc_stop(struct bch_fs *c)
 {
-       ca->copygc_pd.rate.rate = UINT_MAX;
-       bch2_ratelimit_reset(&ca->copygc_pd.rate);
-
-       if (ca->copygc_thread) {
-               kthread_stop(ca->copygc_thread);
-               put_task_struct(ca->copygc_thread);
+       if (c->copygc_thread) {
+               kthread_stop(c->copygc_thread);
+               put_task_struct(c->copygc_thread);
        }
-       ca->copygc_thread = NULL;
+       c->copygc_thread = NULL;
 }
 
-int bch2_copygc_start(struct bch_fs *c, struct bch_dev *ca)
+int bch2_copygc_start(struct bch_fs *c)
 {
        struct task_struct *t;
+       int ret;
 
-       BUG_ON(ca->copygc_thread);
+       if (c->copygc_thread)
+               return 0;
 
        if (c->opts.nochanges)
                return 0;
@@ -282,21 +421,23 @@ int bch2_copygc_start(struct bch_fs *c, struct bch_dev *ca)
        if (bch2_fs_init_fault("copygc_start"))
                return -ENOMEM;
 
-       t = kthread_create(bch2_copygc_thread, ca,
-                          "bch_copygc[%s]", ca->name);
-       if (IS_ERR(t))
-               return PTR_ERR(t);
+       t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name);
+       ret = PTR_ERR_OR_ZERO(t);
+       if (ret) {
+               bch_err_msg(c, ret, "creating copygc thread");
+               return ret;
+       }
 
        get_task_struct(t);
 
-       ca->copygc_thread = t;
-       wake_up_process(ca->copygc_thread);
+       c->copygc_thread = t;
+       wake_up_process(c->copygc_thread);
 
        return 0;
 }
 
-void bch2_dev_copygc_init(struct bch_dev *ca)
+void bch2_fs_copygc_init(struct bch_fs *c)
 {
-       bch2_pd_controller_init(&ca->copygc_pd);
-       ca->copygc_pd.d_term = 0;
+       init_waitqueue_head(&c->copygc_running_wq);
+       c->copygc_running = false;
 }