1 // SPDX-License-Identifier: GPL-2.0
4 #include "alloc_foreground.h"
5 #include "btree_iter.h"
8 #include "disk_groups.h"
12 #include "rebalance.h"
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/cputime.h>
18 #include <trace/events/bcachefs.h>
21 * Check if an extent should be moved:
22 * returns -1 if it should not be moved, or
23 * device of pointer that should be moved, if known, or INT_MAX if unknown
25 static int __bch2_rebalance_pred(struct bch_fs *c,
27 struct bch_io_opts *io_opts)
29 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
30 const union bch_extent_entry *entry;
31 struct extent_ptr_decoded p;
33 if (io_opts->background_compression &&
34 !bch2_bkey_is_incompressible(k))
35 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
37 p.crc.compression_type !=
38 bch2_compression_opt_to_type[io_opts->background_compression])
41 if (io_opts->background_target)
42 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
44 !bch2_dev_in_target(c, p.ptr.dev, io_opts->background_target))
50 void bch2_rebalance_add_key(struct bch_fs *c,
52 struct bch_io_opts *io_opts)
57 dev = __bch2_rebalance_pred(c, k, io_opts);
61 counter = dev < INT_MAX
62 ? &bch_dev_bkey_exists(c, dev)->rebalance_work
63 : &c->rebalance.work_unknown_dev;
65 if (atomic64_add_return(k.k->size, counter) == k.k->size)
69 static enum data_cmd rebalance_pred(struct bch_fs *c, void *arg,
71 struct bch_io_opts *io_opts,
72 struct data_opts *data_opts)
74 if (__bch2_rebalance_pred(c, k, io_opts) >= 0) {
75 data_opts->target = io_opts->background_target;
76 data_opts->nr_replicas = 1;
77 data_opts->btree_insert_flags = 0;
78 return DATA_ADD_REPLICAS;
84 void bch2_rebalance_add_work(struct bch_fs *c, u64 sectors)
86 if (atomic64_add_return(sectors, &c->rebalance.work_unknown_dev) ==
91 struct rebalance_work {
92 int dev_most_full_idx;
93 unsigned dev_most_full_percent;
94 u64 dev_most_full_work;
95 u64 dev_most_full_capacity;
99 static void rebalance_work_accumulate(struct rebalance_work *w,
100 u64 dev_work, u64 unknown_dev, u64 capacity, int idx)
102 unsigned percent_full;
103 u64 work = dev_work + unknown_dev;
105 if (work < dev_work || work < unknown_dev)
107 work = min(work, capacity);
109 percent_full = div64_u64(work * 100, capacity);
111 if (percent_full >= w->dev_most_full_percent) {
112 w->dev_most_full_idx = idx;
113 w->dev_most_full_percent = percent_full;
114 w->dev_most_full_work = work;
115 w->dev_most_full_capacity = capacity;
118 if (w->total_work + dev_work >= w->total_work &&
119 w->total_work + dev_work >= dev_work)
120 w->total_work += dev_work;
123 static struct rebalance_work rebalance_work(struct bch_fs *c)
126 struct rebalance_work ret = { .dev_most_full_idx = -1 };
127 u64 unknown_dev = atomic64_read(&c->rebalance.work_unknown_dev);
130 for_each_online_member(ca, c, i)
131 rebalance_work_accumulate(&ret,
132 atomic64_read(&ca->rebalance_work),
134 bucket_to_sector(ca, ca->mi.nbuckets -
135 ca->mi.first_bucket),
138 rebalance_work_accumulate(&ret,
139 unknown_dev, 0, c->capacity, -1);
144 static void rebalance_work_reset(struct bch_fs *c)
149 for_each_online_member(ca, c, i)
150 atomic64_set(&ca->rebalance_work, 0);
152 atomic64_set(&c->rebalance.work_unknown_dev, 0);
155 static unsigned long curr_cputime(void)
159 task_cputime_adjusted(current, &utime, &stime);
160 return nsecs_to_jiffies(utime + stime);
163 static int bch2_rebalance_thread(void *arg)
165 struct bch_fs *c = arg;
166 struct bch_fs_rebalance *r = &c->rebalance;
167 struct io_clock *clock = &c->io_clock[WRITE];
168 struct rebalance_work w, p;
169 unsigned long start, prev_start;
170 unsigned long prev_run_time, prev_run_cputime;
171 unsigned long cputime, prev_cputime;
172 unsigned long io_start;
177 io_start = atomic_long_read(&clock->now);
178 p = rebalance_work(c);
179 prev_start = jiffies;
180 prev_cputime = curr_cputime();
182 while (!kthread_wait_freezable(r->enabled)) {
186 cputime = curr_cputime();
188 prev_run_time = start - prev_start;
189 prev_run_cputime = cputime - prev_cputime;
191 w = rebalance_work(c);
192 BUG_ON(!w.dev_most_full_capacity);
195 r->state = REBALANCE_WAITING;
196 kthread_wait_freezable(rebalance_work(c).total_work);
201 * If there isn't much work to do, throttle cpu usage:
203 throttle = prev_run_cputime * 100 /
204 max(1U, w.dev_most_full_percent) -
207 if (w.dev_most_full_percent < 20 && throttle > 0) {
208 r->throttled_until_iotime = io_start +
209 div_u64(w.dev_most_full_capacity *
210 (20 - w.dev_most_full_percent),
213 if (atomic_long_read(&clock->now) + clock->max_slop <
214 r->throttled_until_iotime) {
215 r->throttled_until_cputime = start + throttle;
216 r->state = REBALANCE_THROTTLED;
218 bch2_kthread_io_clock_wait(clock,
219 r->throttled_until_iotime,
225 /* minimum 1 mb/sec: */
229 max(p.dev_most_full_percent, 1U) /
230 max(w.dev_most_full_percent, 1U));
232 io_start = atomic_long_read(&clock->now);
235 prev_cputime = cputime;
237 r->state = REBALANCE_RUNNING;
238 memset(&r->move_stats, 0, sizeof(r->move_stats));
239 rebalance_work_reset(c);
242 /* ratelimiting disabled for now */
243 NULL, /* &r->pd.rate, */
244 writepoint_ptr(&c->rebalance_write_point),
246 rebalance_pred, NULL,
253 void bch2_rebalance_work_to_text(struct printbuf *out, struct bch_fs *c)
255 struct bch_fs_rebalance *r = &c->rebalance;
256 struct rebalance_work w = rebalance_work(c);
259 bch2_hprint(&PBUF(h1), w.dev_most_full_work << 9);
260 bch2_hprint(&PBUF(h2), w.dev_most_full_capacity << 9);
261 pr_buf(out, "fullest_dev (%i):\t%s/%s\n",
262 w.dev_most_full_idx, h1, h2);
264 bch2_hprint(&PBUF(h1), w.total_work << 9);
265 bch2_hprint(&PBUF(h2), c->capacity << 9);
266 pr_buf(out, "total work:\t\t%s/%s\n", h1, h2);
268 pr_buf(out, "rate:\t\t\t%u\n", r->pd.rate.rate);
271 case REBALANCE_WAITING:
272 pr_buf(out, "waiting\n");
274 case REBALANCE_THROTTLED:
275 bch2_hprint(&PBUF(h1),
276 (r->throttled_until_iotime -
277 atomic_long_read(&c->io_clock[WRITE].now)) << 9);
278 pr_buf(out, "throttled for %lu sec or %s io\n",
279 (r->throttled_until_cputime - jiffies) / HZ,
282 case REBALANCE_RUNNING:
283 pr_buf(out, "running\n");
284 pr_buf(out, "pos %llu:%llu\n",
285 r->move_stats.pos.inode,
286 r->move_stats.pos.offset);
291 void bch2_rebalance_stop(struct bch_fs *c)
293 struct task_struct *p;
295 c->rebalance.pd.rate.rate = UINT_MAX;
296 bch2_ratelimit_reset(&c->rebalance.pd.rate);
298 p = rcu_dereference_protected(c->rebalance.thread, 1);
299 c->rebalance.thread = NULL;
302 /* for sychronizing with rebalance_wakeup() */
310 int bch2_rebalance_start(struct bch_fs *c)
312 struct task_struct *p;
314 if (c->opts.nochanges)
317 p = kthread_create(bch2_rebalance_thread, c, "bch_rebalance");
322 rcu_assign_pointer(c->rebalance.thread, p);
327 void bch2_fs_rebalance_init(struct bch_fs *c)
329 bch2_pd_controller_init(&c->rebalance.pd);
331 atomic64_set(&c->rebalance.work_unknown_dev, S64_MAX);