]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/rebalance.c
Update bcachefs sources to 8d3fc97ca3 bcachefs: Fixes for building in userspace
[bcachefs-tools-debian] / libbcachefs / rebalance.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "alloc_foreground.h"
5 #include "btree_iter.h"
6 #include "buckets.h"
7 #include "clock.h"
8 #include "disk_groups.h"
9 #include "errcode.h"
10 #include "extents.h"
11 #include "io.h"
12 #include "move.h"
13 #include "rebalance.h"
14 #include "super-io.h"
15
16 #include <linux/freezer.h>
17 #include <linux/kthread.h>
18 #include <linux/sched/cputime.h>
19 #include <trace/events/bcachefs.h>
20
21 /*
22  * Check if an extent should be moved:
23  * returns -1 if it should not be moved, or
24  * device of pointer that should be moved, if known, or INT_MAX if unknown
25  */
26 static bool rebalance_pred(struct bch_fs *c, void *arg,
27                            struct bkey_s_c k,
28                            struct bch_io_opts *io_opts,
29                            struct data_update_opts *data_opts)
30 {
31         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
32         unsigned i;
33
34         data_opts->rewrite_ptrs         = 0;
35         data_opts->target               = io_opts->background_target;
36         data_opts->extra_replicas       = 0;
37         data_opts->btree_insert_flags   = 0;
38
39         if (io_opts->background_compression &&
40             !bch2_bkey_is_incompressible(k)) {
41                 const union bch_extent_entry *entry;
42                 struct extent_ptr_decoded p;
43
44                 i = 0;
45                 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
46                         if (!p.ptr.cached &&
47                             p.crc.compression_type !=
48                             bch2_compression_opt_to_type[io_opts->background_compression])
49                                 data_opts->rewrite_ptrs |= 1U << i;
50                         i++;
51                 }
52         }
53
54         if (io_opts->background_target) {
55                 const struct bch_extent_ptr *ptr;
56
57                 i = 0;
58                 bkey_for_each_ptr(ptrs, ptr) {
59                         if (!ptr->cached &&
60                             !bch2_dev_in_target(c, ptr->dev, io_opts->background_target))
61                                 data_opts->rewrite_ptrs |= 1U << i;
62                         i++;
63                 }
64         }
65
66         return data_opts->rewrite_ptrs != 0;
67 }
68
69 void bch2_rebalance_add_key(struct bch_fs *c,
70                             struct bkey_s_c k,
71                             struct bch_io_opts *io_opts)
72 {
73         struct data_update_opts update_opts = { 0 };
74         struct bkey_ptrs_c ptrs;
75         const struct bch_extent_ptr *ptr;
76         unsigned i;
77
78         if (!rebalance_pred(c, NULL, k, io_opts, &update_opts))
79                 return;
80
81         i = 0;
82         ptrs = bch2_bkey_ptrs_c(k);
83         bkey_for_each_ptr(ptrs, ptr) {
84                 if ((1U << i) && update_opts.rewrite_ptrs)
85                         if (atomic64_add_return(k.k->size,
86                                         &bch_dev_bkey_exists(c, ptr->dev)->rebalance_work) ==
87                             k.k->size)
88                                 rebalance_wakeup(c);
89                 i++;
90         }
91 }
92
93 void bch2_rebalance_add_work(struct bch_fs *c, u64 sectors)
94 {
95         if (atomic64_add_return(sectors, &c->rebalance.work_unknown_dev) ==
96             sectors)
97                 rebalance_wakeup(c);
98 }
99
100 struct rebalance_work {
101         int             dev_most_full_idx;
102         unsigned        dev_most_full_percent;
103         u64             dev_most_full_work;
104         u64             dev_most_full_capacity;
105         u64             total_work;
106 };
107
108 static void rebalance_work_accumulate(struct rebalance_work *w,
109                 u64 dev_work, u64 unknown_dev, u64 capacity, int idx)
110 {
111         unsigned percent_full;
112         u64 work = dev_work + unknown_dev;
113
114         if (work < dev_work || work < unknown_dev)
115                 work = U64_MAX;
116         work = min(work, capacity);
117
118         percent_full = div64_u64(work * 100, capacity);
119
120         if (percent_full >= w->dev_most_full_percent) {
121                 w->dev_most_full_idx            = idx;
122                 w->dev_most_full_percent        = percent_full;
123                 w->dev_most_full_work           = work;
124                 w->dev_most_full_capacity       = capacity;
125         }
126
127         if (w->total_work + dev_work >= w->total_work &&
128             w->total_work + dev_work >= dev_work)
129                 w->total_work += dev_work;
130 }
131
132 static struct rebalance_work rebalance_work(struct bch_fs *c)
133 {
134         struct bch_dev *ca;
135         struct rebalance_work ret = { .dev_most_full_idx = -1 };
136         u64 unknown_dev = atomic64_read(&c->rebalance.work_unknown_dev);
137         unsigned i;
138
139         for_each_online_member(ca, c, i)
140                 rebalance_work_accumulate(&ret,
141                         atomic64_read(&ca->rebalance_work),
142                         unknown_dev,
143                         bucket_to_sector(ca, ca->mi.nbuckets -
144                                          ca->mi.first_bucket),
145                         i);
146
147         rebalance_work_accumulate(&ret,
148                 unknown_dev, 0, c->capacity, -1);
149
150         return ret;
151 }
152
153 static void rebalance_work_reset(struct bch_fs *c)
154 {
155         struct bch_dev *ca;
156         unsigned i;
157
158         for_each_online_member(ca, c, i)
159                 atomic64_set(&ca->rebalance_work, 0);
160
161         atomic64_set(&c->rebalance.work_unknown_dev, 0);
162 }
163
164 static unsigned long curr_cputime(void)
165 {
166         u64 utime, stime;
167
168         task_cputime_adjusted(current, &utime, &stime);
169         return nsecs_to_jiffies(utime + stime);
170 }
171
172 static int bch2_rebalance_thread(void *arg)
173 {
174         struct bch_fs *c = arg;
175         struct bch_fs_rebalance *r = &c->rebalance;
176         struct io_clock *clock = &c->io_clock[WRITE];
177         struct rebalance_work w, p;
178         struct bch_move_stats move_stats;
179         unsigned long start, prev_start;
180         unsigned long prev_run_time, prev_run_cputime;
181         unsigned long cputime, prev_cputime;
182         u64 io_start;
183         long throttle;
184
185         set_freezable();
186
187         io_start        = atomic64_read(&clock->now);
188         p               = rebalance_work(c);
189         prev_start      = jiffies;
190         prev_cputime    = curr_cputime();
191
192         bch2_move_stats_init(&move_stats, "rebalance");
193         while (!kthread_wait_freezable(r->enabled)) {
194                 cond_resched();
195
196                 start                   = jiffies;
197                 cputime                 = curr_cputime();
198
199                 prev_run_time           = start - prev_start;
200                 prev_run_cputime        = cputime - prev_cputime;
201
202                 w                       = rebalance_work(c);
203                 BUG_ON(!w.dev_most_full_capacity);
204
205                 if (!w.total_work) {
206                         r->state = REBALANCE_WAITING;
207                         kthread_wait_freezable(rebalance_work(c).total_work);
208                         continue;
209                 }
210
211                 /*
212                  * If there isn't much work to do, throttle cpu usage:
213                  */
214                 throttle = prev_run_cputime * 100 /
215                         max(1U, w.dev_most_full_percent) -
216                         prev_run_time;
217
218                 if (w.dev_most_full_percent < 20 && throttle > 0) {
219                         r->throttled_until_iotime = io_start +
220                                 div_u64(w.dev_most_full_capacity *
221                                         (20 - w.dev_most_full_percent),
222                                         50);
223
224                         if (atomic64_read(&clock->now) + clock->max_slop <
225                             r->throttled_until_iotime) {
226                                 r->throttled_until_cputime = start + throttle;
227                                 r->state = REBALANCE_THROTTLED;
228
229                                 bch2_kthread_io_clock_wait(clock,
230                                         r->throttled_until_iotime,
231                                         throttle);
232                                 continue;
233                         }
234                 }
235
236                 /* minimum 1 mb/sec: */
237                 r->pd.rate.rate =
238                         max_t(u64, 1 << 11,
239                               r->pd.rate.rate *
240                               max(p.dev_most_full_percent, 1U) /
241                               max(w.dev_most_full_percent, 1U));
242
243                 io_start        = atomic64_read(&clock->now);
244                 p               = w;
245                 prev_start      = start;
246                 prev_cputime    = cputime;
247
248                 r->state = REBALANCE_RUNNING;
249                 memset(&move_stats, 0, sizeof(move_stats));
250                 rebalance_work_reset(c);
251
252                 bch2_move_data(c,
253                                0,               POS_MIN,
254                                BTREE_ID_NR,     POS_MAX,
255                                /* ratelimiting disabled for now */
256                                NULL, /*  &r->pd.rate, */
257                                &move_stats,
258                                writepoint_ptr(&c->rebalance_write_point),
259                                true,
260                                rebalance_pred, NULL);
261         }
262
263         return 0;
264 }
265
266 void bch2_rebalance_work_to_text(struct printbuf *out, struct bch_fs *c)
267 {
268         struct bch_fs_rebalance *r = &c->rebalance;
269         struct rebalance_work w = rebalance_work(c);
270
271         if (!out->nr_tabstops)
272                 printbuf_tabstop_push(out, 20);
273
274         prt_printf(out, "fullest_dev (%i):", w.dev_most_full_idx);
275         prt_tab(out);
276
277         prt_human_readable_u64(out, w.dev_most_full_work << 9);
278         prt_printf(out, "/");
279         prt_human_readable_u64(out, w.dev_most_full_capacity << 9);
280         prt_newline(out);
281
282         prt_printf(out, "total work:");
283         prt_tab(out);
284
285         prt_human_readable_u64(out, w.total_work << 9);
286         prt_printf(out, "/");
287         prt_human_readable_u64(out, c->capacity << 9);
288         prt_newline(out);
289
290         prt_printf(out, "rate:");
291         prt_tab(out);
292         prt_printf(out, "%u", r->pd.rate.rate);
293         prt_newline(out);
294
295         switch (r->state) {
296         case REBALANCE_WAITING:
297                 prt_printf(out, "waiting");
298                 break;
299         case REBALANCE_THROTTLED:
300                 prt_printf(out, "throttled for %lu sec or ",
301                        (r->throttled_until_cputime - jiffies) / HZ);
302                 prt_human_readable_u64(out,
303                             (r->throttled_until_iotime -
304                              atomic64_read(&c->io_clock[WRITE].now)) << 9);
305                 prt_printf(out, " io");
306                 break;
307         case REBALANCE_RUNNING:
308                 prt_printf(out, "running");
309                 break;
310         }
311         prt_newline(out);
312 }
313
314 void bch2_rebalance_stop(struct bch_fs *c)
315 {
316         struct task_struct *p;
317
318         c->rebalance.pd.rate.rate = UINT_MAX;
319         bch2_ratelimit_reset(&c->rebalance.pd.rate);
320
321         p = rcu_dereference_protected(c->rebalance.thread, 1);
322         c->rebalance.thread = NULL;
323
324         if (p) {
325                 /* for sychronizing with rebalance_wakeup() */
326                 synchronize_rcu();
327
328                 kthread_stop(p);
329                 put_task_struct(p);
330         }
331 }
332
333 int bch2_rebalance_start(struct bch_fs *c)
334 {
335         struct task_struct *p;
336         int ret;
337
338         if (c->rebalance.thread)
339                 return 0;
340
341         if (c->opts.nochanges)
342                 return 0;
343
344         p = kthread_create(bch2_rebalance_thread, c, "bch-rebalance/%s", c->name);
345         ret = PTR_ERR_OR_ZERO(p);
346         if (ret) {
347                 bch_err(c, "error creating rebalance thread: %s", bch2_err_str(ret));
348                 return ret;
349         }
350
351         get_task_struct(p);
352         rcu_assign_pointer(c->rebalance.thread, p);
353         wake_up_process(p);
354         return 0;
355 }
356
357 void bch2_fs_rebalance_init(struct bch_fs *c)
358 {
359         bch2_pd_controller_init(&c->rebalance.pd);
360
361         atomic64_set(&c->rebalance.work_unknown_dev, S64_MAX);
362 }