1 // SPDX-License-Identifier: GPL-2.0
4 #include "alloc_foreground.h"
5 #include "btree_iter.h"
9 #include "disk_groups.h"
14 #include "rebalance.h"
18 #include <linux/freezer.h>
19 #include <linux/kthread.h>
20 #include <linux/sched/cputime.h>
23 * Check if an extent should be moved:
24 * returns -1 if it should not be moved, or
25 * device of pointer that should be moved, if known, or INT_MAX if unknown
27 static bool rebalance_pred(struct bch_fs *c, void *arg,
29 struct bch_io_opts *io_opts,
30 struct data_update_opts *data_opts)
32 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
35 data_opts->rewrite_ptrs = 0;
36 data_opts->target = io_opts->background_target;
37 data_opts->extra_replicas = 0;
38 data_opts->btree_insert_flags = 0;
40 if (io_opts->background_compression &&
41 !bch2_bkey_is_incompressible(k)) {
42 const union bch_extent_entry *entry;
43 struct extent_ptr_decoded p;
46 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
48 p.crc.compression_type !=
49 bch2_compression_opt_to_type(io_opts->background_compression))
50 data_opts->rewrite_ptrs |= 1U << i;
55 if (io_opts->background_target) {
56 const struct bch_extent_ptr *ptr;
59 bkey_for_each_ptr(ptrs, ptr) {
61 !bch2_dev_in_target(c, ptr->dev, io_opts->background_target) &&
62 bch2_target_accepts_data(c, BCH_DATA_user, io_opts->background_target))
63 data_opts->rewrite_ptrs |= 1U << i;
68 return data_opts->rewrite_ptrs != 0;
71 void bch2_rebalance_add_key(struct bch_fs *c,
73 struct bch_io_opts *io_opts)
75 struct data_update_opts update_opts = { 0 };
76 struct bkey_ptrs_c ptrs;
77 const struct bch_extent_ptr *ptr;
80 if (!rebalance_pred(c, NULL, k, io_opts, &update_opts))
84 ptrs = bch2_bkey_ptrs_c(k);
85 bkey_for_each_ptr(ptrs, ptr) {
86 if ((1U << i) && update_opts.rewrite_ptrs)
87 if (atomic64_add_return(k.k->size,
88 &bch_dev_bkey_exists(c, ptr->dev)->rebalance_work) ==
95 void bch2_rebalance_add_work(struct bch_fs *c, u64 sectors)
97 if (atomic64_add_return(sectors, &c->rebalance.work_unknown_dev) ==
102 struct rebalance_work {
103 int dev_most_full_idx;
104 unsigned dev_most_full_percent;
105 u64 dev_most_full_work;
106 u64 dev_most_full_capacity;
110 static void rebalance_work_accumulate(struct rebalance_work *w,
111 u64 dev_work, u64 unknown_dev, u64 capacity, int idx)
113 unsigned percent_full;
114 u64 work = dev_work + unknown_dev;
116 /* avoid divide by 0 */
120 if (work < dev_work || work < unknown_dev)
122 work = min(work, capacity);
124 percent_full = div64_u64(work * 100, capacity);
126 if (percent_full >= w->dev_most_full_percent) {
127 w->dev_most_full_idx = idx;
128 w->dev_most_full_percent = percent_full;
129 w->dev_most_full_work = work;
130 w->dev_most_full_capacity = capacity;
133 if (w->total_work + dev_work >= w->total_work &&
134 w->total_work + dev_work >= dev_work)
135 w->total_work += dev_work;
138 static struct rebalance_work rebalance_work(struct bch_fs *c)
141 struct rebalance_work ret = { .dev_most_full_idx = -1 };
142 u64 unknown_dev = atomic64_read(&c->rebalance.work_unknown_dev);
145 for_each_online_member(ca, c, i)
146 rebalance_work_accumulate(&ret,
147 atomic64_read(&ca->rebalance_work),
149 bucket_to_sector(ca, ca->mi.nbuckets -
150 ca->mi.first_bucket),
153 rebalance_work_accumulate(&ret,
154 unknown_dev, 0, c->capacity, -1);
159 static void rebalance_work_reset(struct bch_fs *c)
164 for_each_online_member(ca, c, i)
165 atomic64_set(&ca->rebalance_work, 0);
167 atomic64_set(&c->rebalance.work_unknown_dev, 0);
170 static unsigned long curr_cputime(void)
174 task_cputime_adjusted(current, &utime, &stime);
175 return nsecs_to_jiffies(utime + stime);
178 static int bch2_rebalance_thread(void *arg)
180 struct bch_fs *c = arg;
181 struct bch_fs_rebalance *r = &c->rebalance;
182 struct io_clock *clock = &c->io_clock[WRITE];
183 struct rebalance_work w, p;
184 struct bch_move_stats move_stats;
185 unsigned long start, prev_start;
186 unsigned long prev_run_time, prev_run_cputime;
187 unsigned long cputime, prev_cputime;
193 io_start = atomic64_read(&clock->now);
194 p = rebalance_work(c);
195 prev_start = jiffies;
196 prev_cputime = curr_cputime();
198 bch2_move_stats_init(&move_stats, "rebalance");
199 while (!kthread_wait_freezable(r->enabled)) {
203 cputime = curr_cputime();
205 prev_run_time = start - prev_start;
206 prev_run_cputime = cputime - prev_cputime;
208 w = rebalance_work(c);
209 BUG_ON(!w.dev_most_full_capacity);
212 r->state = REBALANCE_WAITING;
213 kthread_wait_freezable(rebalance_work(c).total_work);
218 * If there isn't much work to do, throttle cpu usage:
220 throttle = prev_run_cputime * 100 /
221 max(1U, w.dev_most_full_percent) -
224 if (w.dev_most_full_percent < 20 && throttle > 0) {
225 r->throttled_until_iotime = io_start +
226 div_u64(w.dev_most_full_capacity *
227 (20 - w.dev_most_full_percent),
230 if (atomic64_read(&clock->now) + clock->max_slop <
231 r->throttled_until_iotime) {
232 r->throttled_until_cputime = start + throttle;
233 r->state = REBALANCE_THROTTLED;
235 bch2_kthread_io_clock_wait(clock,
236 r->throttled_until_iotime,
242 /* minimum 1 mb/sec: */
246 max(p.dev_most_full_percent, 1U) /
247 max(w.dev_most_full_percent, 1U));
249 io_start = atomic64_read(&clock->now);
252 prev_cputime = cputime;
254 r->state = REBALANCE_RUNNING;
255 memset(&move_stats, 0, sizeof(move_stats));
256 rebalance_work_reset(c);
260 BTREE_ID_NR, POS_MAX,
261 /* ratelimiting disabled for now */
262 NULL, /* &r->pd.rate, */
264 writepoint_ptr(&c->rebalance_write_point),
266 rebalance_pred, NULL);
272 void bch2_rebalance_work_to_text(struct printbuf *out, struct bch_fs *c)
274 struct bch_fs_rebalance *r = &c->rebalance;
275 struct rebalance_work w = rebalance_work(c);
277 if (!out->nr_tabstops)
278 printbuf_tabstop_push(out, 20);
280 prt_printf(out, "fullest_dev (%i):", w.dev_most_full_idx);
283 prt_human_readable_u64(out, w.dev_most_full_work << 9);
284 prt_printf(out, "/");
285 prt_human_readable_u64(out, w.dev_most_full_capacity << 9);
288 prt_printf(out, "total work:");
291 prt_human_readable_u64(out, w.total_work << 9);
292 prt_printf(out, "/");
293 prt_human_readable_u64(out, c->capacity << 9);
296 prt_printf(out, "rate:");
298 prt_printf(out, "%u", r->pd.rate.rate);
302 case REBALANCE_WAITING:
303 prt_printf(out, "waiting");
305 case REBALANCE_THROTTLED:
306 prt_printf(out, "throttled for %lu sec or ",
307 (r->throttled_until_cputime - jiffies) / HZ);
308 prt_human_readable_u64(out,
309 (r->throttled_until_iotime -
310 atomic64_read(&c->io_clock[WRITE].now)) << 9);
311 prt_printf(out, " io");
313 case REBALANCE_RUNNING:
314 prt_printf(out, "running");
320 void bch2_rebalance_stop(struct bch_fs *c)
322 struct task_struct *p;
324 c->rebalance.pd.rate.rate = UINT_MAX;
325 bch2_ratelimit_reset(&c->rebalance.pd.rate);
327 p = rcu_dereference_protected(c->rebalance.thread, 1);
328 c->rebalance.thread = NULL;
331 /* for sychronizing with rebalance_wakeup() */
339 int bch2_rebalance_start(struct bch_fs *c)
341 struct task_struct *p;
344 if (c->rebalance.thread)
347 if (c->opts.nochanges)
350 p = kthread_create(bch2_rebalance_thread, c, "bch-rebalance/%s", c->name);
351 ret = PTR_ERR_OR_ZERO(p);
353 bch_err(c, "error creating rebalance thread: %s", bch2_err_str(ret));
358 rcu_assign_pointer(c->rebalance.thread, p);
363 void bch2_fs_rebalance_init(struct bch_fs *c)
365 bch2_pd_controller_init(&c->rebalance.pd);
367 atomic64_set(&c->rebalance.work_unknown_dev, S64_MAX);