]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/rebalance.c
Update bcachefs sources to a8115093df bcachefs: Fix divide by zero in rebalance_work()
[bcachefs-tools-debian] / libbcachefs / rebalance.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "alloc_foreground.h"
5 #include "btree_iter.h"
6 #include "buckets.h"
7 #include "clock.h"
8 #include "compress.h"
9 #include "disk_groups.h"
10 #include "errcode.h"
11 #include "extents.h"
12 #include "io.h"
13 #include "move.h"
14 #include "rebalance.h"
15 #include "super-io.h"
16 #include "trace.h"
17
18 #include <linux/freezer.h>
19 #include <linux/kthread.h>
20 #include <linux/sched/cputime.h>
21
22 /*
23  * Check if an extent should be moved:
24  * returns -1 if it should not be moved, or
25  * device of pointer that should be moved, if known, or INT_MAX if unknown
26  */
27 static bool rebalance_pred(struct bch_fs *c, void *arg,
28                            struct bkey_s_c k,
29                            struct bch_io_opts *io_opts,
30                            struct data_update_opts *data_opts)
31 {
32         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
33         unsigned i;
34
35         data_opts->rewrite_ptrs         = 0;
36         data_opts->target               = io_opts->background_target;
37         data_opts->extra_replicas       = 0;
38         data_opts->btree_insert_flags   = 0;
39
40         if (io_opts->background_compression &&
41             !bch2_bkey_is_incompressible(k)) {
42                 const union bch_extent_entry *entry;
43                 struct extent_ptr_decoded p;
44
45                 i = 0;
46                 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
47                         if (!p.ptr.cached &&
48                             p.crc.compression_type !=
49                             bch2_compression_opt_to_type(io_opts->background_compression))
50                                 data_opts->rewrite_ptrs |= 1U << i;
51                         i++;
52                 }
53         }
54
55         if (io_opts->background_target) {
56                 const struct bch_extent_ptr *ptr;
57
58                 i = 0;
59                 bkey_for_each_ptr(ptrs, ptr) {
60                         if (!ptr->cached &&
61                             !bch2_dev_in_target(c, ptr->dev, io_opts->background_target) &&
62                             bch2_target_accepts_data(c, BCH_DATA_user, io_opts->background_target))
63                                 data_opts->rewrite_ptrs |= 1U << i;
64                         i++;
65                 }
66         }
67
68         return data_opts->rewrite_ptrs != 0;
69 }
70
71 void bch2_rebalance_add_key(struct bch_fs *c,
72                             struct bkey_s_c k,
73                             struct bch_io_opts *io_opts)
74 {
75         struct data_update_opts update_opts = { 0 };
76         struct bkey_ptrs_c ptrs;
77         const struct bch_extent_ptr *ptr;
78         unsigned i;
79
80         if (!rebalance_pred(c, NULL, k, io_opts, &update_opts))
81                 return;
82
83         i = 0;
84         ptrs = bch2_bkey_ptrs_c(k);
85         bkey_for_each_ptr(ptrs, ptr) {
86                 if ((1U << i) && update_opts.rewrite_ptrs)
87                         if (atomic64_add_return(k.k->size,
88                                         &bch_dev_bkey_exists(c, ptr->dev)->rebalance_work) ==
89                             k.k->size)
90                                 rebalance_wakeup(c);
91                 i++;
92         }
93 }
94
95 void bch2_rebalance_add_work(struct bch_fs *c, u64 sectors)
96 {
97         if (atomic64_add_return(sectors, &c->rebalance.work_unknown_dev) ==
98             sectors)
99                 rebalance_wakeup(c);
100 }
101
102 struct rebalance_work {
103         int             dev_most_full_idx;
104         unsigned        dev_most_full_percent;
105         u64             dev_most_full_work;
106         u64             dev_most_full_capacity;
107         u64             total_work;
108 };
109
110 static void rebalance_work_accumulate(struct rebalance_work *w,
111                 u64 dev_work, u64 unknown_dev, u64 capacity, int idx)
112 {
113         unsigned percent_full;
114         u64 work = dev_work + unknown_dev;
115
116         /* avoid divide by 0 */
117         if (!capacity)
118                 return;
119
120         if (work < dev_work || work < unknown_dev)
121                 work = U64_MAX;
122         work = min(work, capacity);
123
124         percent_full = div64_u64(work * 100, capacity);
125
126         if (percent_full >= w->dev_most_full_percent) {
127                 w->dev_most_full_idx            = idx;
128                 w->dev_most_full_percent        = percent_full;
129                 w->dev_most_full_work           = work;
130                 w->dev_most_full_capacity       = capacity;
131         }
132
133         if (w->total_work + dev_work >= w->total_work &&
134             w->total_work + dev_work >= dev_work)
135                 w->total_work += dev_work;
136 }
137
138 static struct rebalance_work rebalance_work(struct bch_fs *c)
139 {
140         struct bch_dev *ca;
141         struct rebalance_work ret = { .dev_most_full_idx = -1 };
142         u64 unknown_dev = atomic64_read(&c->rebalance.work_unknown_dev);
143         unsigned i;
144
145         for_each_online_member(ca, c, i)
146                 rebalance_work_accumulate(&ret,
147                         atomic64_read(&ca->rebalance_work),
148                         unknown_dev,
149                         bucket_to_sector(ca, ca->mi.nbuckets -
150                                          ca->mi.first_bucket),
151                         i);
152
153         rebalance_work_accumulate(&ret,
154                 unknown_dev, 0, c->capacity, -1);
155
156         return ret;
157 }
158
159 static void rebalance_work_reset(struct bch_fs *c)
160 {
161         struct bch_dev *ca;
162         unsigned i;
163
164         for_each_online_member(ca, c, i)
165                 atomic64_set(&ca->rebalance_work, 0);
166
167         atomic64_set(&c->rebalance.work_unknown_dev, 0);
168 }
169
170 static unsigned long curr_cputime(void)
171 {
172         u64 utime, stime;
173
174         task_cputime_adjusted(current, &utime, &stime);
175         return nsecs_to_jiffies(utime + stime);
176 }
177
178 static int bch2_rebalance_thread(void *arg)
179 {
180         struct bch_fs *c = arg;
181         struct bch_fs_rebalance *r = &c->rebalance;
182         struct io_clock *clock = &c->io_clock[WRITE];
183         struct rebalance_work w, p;
184         struct bch_move_stats move_stats;
185         unsigned long start, prev_start;
186         unsigned long prev_run_time, prev_run_cputime;
187         unsigned long cputime, prev_cputime;
188         u64 io_start;
189         long throttle;
190
191         set_freezable();
192
193         io_start        = atomic64_read(&clock->now);
194         p               = rebalance_work(c);
195         prev_start      = jiffies;
196         prev_cputime    = curr_cputime();
197
198         bch2_move_stats_init(&move_stats, "rebalance");
199         while (!kthread_wait_freezable(r->enabled)) {
200                 cond_resched();
201
202                 start                   = jiffies;
203                 cputime                 = curr_cputime();
204
205                 prev_run_time           = start - prev_start;
206                 prev_run_cputime        = cputime - prev_cputime;
207
208                 w                       = rebalance_work(c);
209                 BUG_ON(!w.dev_most_full_capacity);
210
211                 if (!w.total_work) {
212                         r->state = REBALANCE_WAITING;
213                         kthread_wait_freezable(rebalance_work(c).total_work);
214                         continue;
215                 }
216
217                 /*
218                  * If there isn't much work to do, throttle cpu usage:
219                  */
220                 throttle = prev_run_cputime * 100 /
221                         max(1U, w.dev_most_full_percent) -
222                         prev_run_time;
223
224                 if (w.dev_most_full_percent < 20 && throttle > 0) {
225                         r->throttled_until_iotime = io_start +
226                                 div_u64(w.dev_most_full_capacity *
227                                         (20 - w.dev_most_full_percent),
228                                         50);
229
230                         if (atomic64_read(&clock->now) + clock->max_slop <
231                             r->throttled_until_iotime) {
232                                 r->throttled_until_cputime = start + throttle;
233                                 r->state = REBALANCE_THROTTLED;
234
235                                 bch2_kthread_io_clock_wait(clock,
236                                         r->throttled_until_iotime,
237                                         throttle);
238                                 continue;
239                         }
240                 }
241
242                 /* minimum 1 mb/sec: */
243                 r->pd.rate.rate =
244                         max_t(u64, 1 << 11,
245                               r->pd.rate.rate *
246                               max(p.dev_most_full_percent, 1U) /
247                               max(w.dev_most_full_percent, 1U));
248
249                 io_start        = atomic64_read(&clock->now);
250                 p               = w;
251                 prev_start      = start;
252                 prev_cputime    = cputime;
253
254                 r->state = REBALANCE_RUNNING;
255                 memset(&move_stats, 0, sizeof(move_stats));
256                 rebalance_work_reset(c);
257
258                 bch2_move_data(c,
259                                0,               POS_MIN,
260                                BTREE_ID_NR,     POS_MAX,
261                                /* ratelimiting disabled for now */
262                                NULL, /*  &r->pd.rate, */
263                                &move_stats,
264                                writepoint_ptr(&c->rebalance_write_point),
265                                true,
266                                rebalance_pred, NULL);
267         }
268
269         return 0;
270 }
271
272 void bch2_rebalance_work_to_text(struct printbuf *out, struct bch_fs *c)
273 {
274         struct bch_fs_rebalance *r = &c->rebalance;
275         struct rebalance_work w = rebalance_work(c);
276
277         if (!out->nr_tabstops)
278                 printbuf_tabstop_push(out, 20);
279
280         prt_printf(out, "fullest_dev (%i):", w.dev_most_full_idx);
281         prt_tab(out);
282
283         prt_human_readable_u64(out, w.dev_most_full_work << 9);
284         prt_printf(out, "/");
285         prt_human_readable_u64(out, w.dev_most_full_capacity << 9);
286         prt_newline(out);
287
288         prt_printf(out, "total work:");
289         prt_tab(out);
290
291         prt_human_readable_u64(out, w.total_work << 9);
292         prt_printf(out, "/");
293         prt_human_readable_u64(out, c->capacity << 9);
294         prt_newline(out);
295
296         prt_printf(out, "rate:");
297         prt_tab(out);
298         prt_printf(out, "%u", r->pd.rate.rate);
299         prt_newline(out);
300
301         switch (r->state) {
302         case REBALANCE_WAITING:
303                 prt_printf(out, "waiting");
304                 break;
305         case REBALANCE_THROTTLED:
306                 prt_printf(out, "throttled for %lu sec or ",
307                        (r->throttled_until_cputime - jiffies) / HZ);
308                 prt_human_readable_u64(out,
309                             (r->throttled_until_iotime -
310                              atomic64_read(&c->io_clock[WRITE].now)) << 9);
311                 prt_printf(out, " io");
312                 break;
313         case REBALANCE_RUNNING:
314                 prt_printf(out, "running");
315                 break;
316         }
317         prt_newline(out);
318 }
319
320 void bch2_rebalance_stop(struct bch_fs *c)
321 {
322         struct task_struct *p;
323
324         c->rebalance.pd.rate.rate = UINT_MAX;
325         bch2_ratelimit_reset(&c->rebalance.pd.rate);
326
327         p = rcu_dereference_protected(c->rebalance.thread, 1);
328         c->rebalance.thread = NULL;
329
330         if (p) {
331                 /* for sychronizing with rebalance_wakeup() */
332                 synchronize_rcu();
333
334                 kthread_stop(p);
335                 put_task_struct(p);
336         }
337 }
338
339 int bch2_rebalance_start(struct bch_fs *c)
340 {
341         struct task_struct *p;
342         int ret;
343
344         if (c->rebalance.thread)
345                 return 0;
346
347         if (c->opts.nochanges)
348                 return 0;
349
350         p = kthread_create(bch2_rebalance_thread, c, "bch-rebalance/%s", c->name);
351         ret = PTR_ERR_OR_ZERO(p);
352         if (ret) {
353                 bch_err(c, "error creating rebalance thread: %s", bch2_err_str(ret));
354                 return ret;
355         }
356
357         get_task_struct(p);
358         rcu_assign_pointer(c->rebalance.thread, p);
359         wake_up_process(p);
360         return 0;
361 }
362
363 void bch2_fs_rebalance_init(struct bch_fs *c)
364 {
365         bch2_pd_controller_init(&c->rebalance.pd);
366
367         atomic64_set(&c->rebalance.work_unknown_dev, S64_MAX);
368 }