]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/rebalance.c
Update bcachefs sources to 3f3f969859 bcachefs: Fix some compiler warnings
[bcachefs-tools-debian] / libbcachefs / rebalance.c
index dc6ca94dc556ef63d1ca39b8c03319a0a13dcd74..a573fede05b11fba7a5ada92b9bbfae322608612 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 
 #include "bcachefs.h"
 #include "alloc_foreground.h"
 #include <linux/sched/cputime.h>
 #include <trace/events/bcachefs.h>
 
-static inline bool rebalance_ptr_pred(struct bch_fs *c,
-                                     struct extent_ptr_decoded p,
-                                     struct bch_io_opts *io_opts)
+/*
+ * Check if an extent should be moved:
+ * returns -1 if it should not be moved, or
+ * device of pointer that should be moved, if known, or INT_MAX if unknown
+ */
+static int __bch2_rebalance_pred(struct bch_fs *c,
+                                struct bkey_s_c k,
+                                struct bch_io_opts *io_opts)
 {
-       if (io_opts->background_target &&
-           !bch2_dev_in_target(c, p.ptr.dev, io_opts->background_target) &&
-           !p.ptr.cached)
-               return true;
+       struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+       const union bch_extent_entry *entry;
+       struct extent_ptr_decoded p;
 
        if (io_opts->background_compression &&
-           p.crc.compression_type !=
-           bch2_compression_opt_to_type[io_opts->background_compression])
-               return true;
-
-       return false;
+           !bch2_bkey_is_incompressible(k))
+               bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+                       if (!p.ptr.cached &&
+                           p.crc.compression_type !=
+                           bch2_compression_opt_to_type[io_opts->background_compression])
+                               return p.ptr.dev;
+
+       if (io_opts->background_target)
+               bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+                       if (!p.ptr.cached &&
+                           !bch2_dev_in_target(c, p.ptr.dev, io_opts->background_target))
+                               return p.ptr.dev;
+
+       return -1;
 }
 
 void bch2_rebalance_add_key(struct bch_fs *c,
                            struct bkey_s_c k,
                            struct bch_io_opts *io_opts)
 {
-       const union bch_extent_entry *entry;
-       struct extent_ptr_decoded p;
-       struct bkey_s_c_extent e;
+       atomic64_t *counter;
+       int dev;
 
-       if (!bkey_extent_is_data(k.k))
+       dev = __bch2_rebalance_pred(c, k, io_opts);
+       if (dev < 0)
                return;
 
-       if (!io_opts->background_target &&
-           !io_opts->background_compression)
-               return;
-
-       e = bkey_s_c_to_extent(k);
+       counter = dev < INT_MAX
+               ? &bch_dev_bkey_exists(c, dev)->rebalance_work
+               : &c->rebalance.work_unknown_dev;
 
-       extent_for_each_ptr_decode(e, p, entry)
-               if (rebalance_ptr_pred(c, p, io_opts)) {
-                       struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
-
-                       if (atomic64_add_return(p.crc.compressed_size,
-                                               &ca->rebalance_work) ==
-                           p.crc.compressed_size)
-                               rebalance_wakeup(c);
-               }
-}
-
-void bch2_rebalance_add_work(struct bch_fs *c, u64 sectors)
-{
-       if (atomic64_add_return(sectors, &c->rebalance.work_unknown_dev) ==
-           sectors)
+       if (atomic64_add_return(k.k->size, counter) == k.k->size)
                rebalance_wakeup(c);
 }
 
@@ -73,32 +71,23 @@ static enum data_cmd rebalance_pred(struct bch_fs *c, void *arg,
                                    struct bch_io_opts *io_opts,
                                    struct data_opts *data_opts)
 {
-       switch (k.k->type) {
-       case KEY_TYPE_extent: {
-               struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
-               const union bch_extent_entry *entry;
-               struct extent_ptr_decoded p;
-
-               /* Make sure we have room to add a new pointer: */
-               if (bkey_val_u64s(e.k) + BKEY_EXTENT_PTR_U64s_MAX >
-                   BKEY_EXTENT_VAL_U64s_MAX)
-                       return DATA_SKIP;
-
-               extent_for_each_ptr_decode(e, p, entry)
-                       if (rebalance_ptr_pred(c, p, io_opts))
-                               goto found;
-
-               return DATA_SKIP;
-found:
+       if (__bch2_rebalance_pred(c, k, io_opts) >= 0) {
                data_opts->target               = io_opts->background_target;
+               data_opts->nr_replicas          = 1;
                data_opts->btree_insert_flags   = 0;
                return DATA_ADD_REPLICAS;
-       }
-       default:
+       } else {
                return DATA_SKIP;
        }
 }
 
+void bch2_rebalance_add_work(struct bch_fs *c, u64 sectors)
+{
+       if (atomic64_add_return(sectors, &c->rebalance.work_unknown_dev) ==
+           sectors)
+               rebalance_wakeup(c);
+}
+
 struct rebalance_work {
        int             dev_most_full_idx;
        unsigned        dev_most_full_percent;
@@ -177,20 +166,24 @@ static int bch2_rebalance_thread(void *arg)
        struct bch_fs_rebalance *r = &c->rebalance;
        struct io_clock *clock = &c->io_clock[WRITE];
        struct rebalance_work w, p;
+       struct bch_move_stats move_stats;
        unsigned long start, prev_start;
        unsigned long prev_run_time, prev_run_cputime;
        unsigned long cputime, prev_cputime;
-       unsigned long io_start;
+       u64 io_start;
        long throttle;
 
        set_freezable();
 
-       io_start        = atomic_long_read(&clock->now);
+       io_start        = atomic64_read(&clock->now);
        p               = rebalance_work(c);
        prev_start      = jiffies;
        prev_cputime    = curr_cputime();
 
+       bch_move_stats_init(&move_stats, "rebalance");
        while (!kthread_wait_freezable(r->enabled)) {
+               cond_resched();
+
                start                   = jiffies;
                cputime                 = curr_cputime();
 
@@ -214,17 +207,21 @@ static int bch2_rebalance_thread(void *arg)
                        prev_run_time;
 
                if (w.dev_most_full_percent < 20 && throttle > 0) {
-                       r->state = REBALANCE_THROTTLED;
                        r->throttled_until_iotime = io_start +
                                div_u64(w.dev_most_full_capacity *
                                        (20 - w.dev_most_full_percent),
                                        50);
-                       r->throttled_until_cputime = start + throttle;
 
-                       bch2_kthread_io_clock_wait(clock,
-                               r->throttled_until_iotime,
-                               throttle);
-                       continue;
+                       if (atomic64_read(&clock->now) + clock->max_slop <
+                           r->throttled_until_iotime) {
+                               r->throttled_until_cputime = start + throttle;
+                               r->state = REBALANCE_THROTTLED;
+
+                               bch2_kthread_io_clock_wait(clock,
+                                       r->throttled_until_iotime,
+                                       throttle);
+                               continue;
+                       }
                }
 
                /* minimum 1 mb/sec: */
@@ -234,66 +231,61 @@ static int bch2_rebalance_thread(void *arg)
                              max(p.dev_most_full_percent, 1U) /
                              max(w.dev_most_full_percent, 1U));
 
-               io_start        = atomic_long_read(&clock->now);
+               io_start        = atomic64_read(&clock->now);
                p               = w;
                prev_start      = start;
                prev_cputime    = cputime;
 
                r->state = REBALANCE_RUNNING;
-               memset(&r->move_stats, 0, sizeof(r->move_stats));
+               memset(&move_stats, 0, sizeof(move_stats));
                rebalance_work_reset(c);
 
                bch2_move_data(c,
+                              0,               POS_MIN,
+                              BTREE_ID_NR,     POS_MAX,
                               /* ratelimiting disabled for now */
                               NULL, /*  &r->pd.rate, */
                               writepoint_ptr(&c->rebalance_write_point),
-                              POS_MIN, POS_MAX,
                               rebalance_pred, NULL,
-                              &r->move_stats);
+                              &move_stats);
        }
 
        return 0;
 }
 
-ssize_t bch2_rebalance_work_show(struct bch_fs *c, char *buf)
+void bch2_rebalance_work_to_text(struct printbuf *out, struct bch_fs *c)
 {
-       struct printbuf out = _PBUF(buf, PAGE_SIZE);
        struct bch_fs_rebalance *r = &c->rebalance;
        struct rebalance_work w = rebalance_work(c);
        char h1[21], h2[21];
 
-       bch2_hprint(h1, w.dev_most_full_work << 9);
-       bch2_hprint(h2, w.dev_most_full_capacity << 9);
-       pr_buf(&out, "fullest_dev (%i):\t%s/%s\n",
+       bch2_hprint(&PBUF(h1), w.dev_most_full_work << 9);
+       bch2_hprint(&PBUF(h2), w.dev_most_full_capacity << 9);
+       pr_buf(out, "fullest_dev (%i):\t%s/%s\n",
               w.dev_most_full_idx, h1, h2);
 
-       bch2_hprint(h1, w.total_work << 9);
-       bch2_hprint(h2, c->capacity << 9);
-       pr_buf(&out, "total work:\t\t%s/%s\n", h1, h2);
+       bch2_hprint(&PBUF(h1), w.total_work << 9);
+       bch2_hprint(&PBUF(h2), c->capacity << 9);
+       pr_buf(out, "total work:\t\t%s/%s\n", h1, h2);
 
-       pr_buf(&out, "rate:\t\t\t%u\n", r->pd.rate.rate);
+       pr_buf(out, "rate:\t\t\t%u\n", r->pd.rate.rate);
 
        switch (r->state) {
        case REBALANCE_WAITING:
-               pr_buf(&out, "waiting\n");
+               pr_buf(out, "waiting\n");
                break;
        case REBALANCE_THROTTLED:
-               bch2_hprint(h1,
+               bch2_hprint(&PBUF(h1),
                            (r->throttled_until_iotime -
-                            atomic_long_read(&c->io_clock[WRITE].now)) << 9);
-               pr_buf(&out, "throttled for %lu sec or %s io\n",
+                            atomic64_read(&c->io_clock[WRITE].now)) << 9);
+               pr_buf(out, "throttled for %lu sec or %s io\n",
                       (r->throttled_until_cputime - jiffies) / HZ,
                       h1);
                break;
        case REBALANCE_RUNNING:
-               pr_buf(&out, "running\n");
-               pr_buf(&out, "pos %llu:%llu\n",
-                      r->move_stats.iter.pos.inode,
-                      r->move_stats.iter.pos.offset);
+               pr_buf(out, "running\n");
                break;
        }
-
-       return out.pos - buf;
 }
 
 void bch2_rebalance_stop(struct bch_fs *c)
@@ -319,12 +311,17 @@ int bch2_rebalance_start(struct bch_fs *c)
 {
        struct task_struct *p;
 
+       if (c->rebalance.thread)
+               return 0;
+
        if (c->opts.nochanges)
                return 0;
 
-       p = kthread_create(bch2_rebalance_thread, c, "bch_rebalance");
-       if (IS_ERR(p))
+       p = kthread_create(bch2_rebalance_thread, c, "bch-rebalance/%s", c->name);
+       if (IS_ERR(p)) {
+               bch_err(c, "error creating rebalance thread: %li", PTR_ERR(p));
                return PTR_ERR(p);
+       }
 
        get_task_struct(p);
        rcu_assign_pointer(c->rebalance.thread, p);