]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/rebalance.c
Update bcachefs sources to 95ff72a6c1 fixup! mm: Centralize & improve oom reporting...
[bcachefs-tools-debian] / libbcachefs / rebalance.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "alloc_foreground.h"
5 #include "btree_iter.h"
6 #include "buckets.h"
7 #include "clock.h"
8 #include "disk_groups.h"
9 #include "extents.h"
10 #include "io.h"
11 #include "move.h"
12 #include "rebalance.h"
13 #include "super-io.h"
14
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/cputime.h>
18 #include <trace/events/bcachefs.h>
19
20 /*
21  * Check if an extent should be moved:
22  * returns -1 if it should not be moved, or
23  * device of pointer that should be moved, if known, or INT_MAX if unknown
24  */
25 static bool rebalance_pred(struct bch_fs *c, void *arg,
26                            struct bkey_s_c k,
27                            struct bch_io_opts *io_opts,
28                            struct data_update_opts *data_opts)
29 {
30         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
31         unsigned i;
32
33         data_opts->rewrite_ptrs         = 0;
34         data_opts->target               = io_opts->background_target;
35         data_opts->extra_replicas       = 0;
36         data_opts->btree_insert_flags   = 0;
37
38         if (io_opts->background_compression &&
39             !bch2_bkey_is_incompressible(k)) {
40                 const union bch_extent_entry *entry;
41                 struct extent_ptr_decoded p;
42
43                 i = 0;
44                 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
45                         if (!p.ptr.cached &&
46                             p.crc.compression_type !=
47                             bch2_compression_opt_to_type[io_opts->background_compression])
48                                 data_opts->rewrite_ptrs |= 1U << i;
49                         i++;
50                 }
51         }
52
53         if (io_opts->background_target) {
54                 const struct bch_extent_ptr *ptr;
55
56                 i = 0;
57                 bkey_for_each_ptr(ptrs, ptr) {
58                         if (!ptr->cached &&
59                             !bch2_dev_in_target(c, ptr->dev, io_opts->background_target))
60                                 data_opts->rewrite_ptrs |= 1U << i;
61                         i++;
62                 }
63         }
64
65         return data_opts->rewrite_ptrs != 0;
66 }
67
68 void bch2_rebalance_add_key(struct bch_fs *c,
69                             struct bkey_s_c k,
70                             struct bch_io_opts *io_opts)
71 {
72         struct data_update_opts update_opts = { 0 };
73         struct bkey_ptrs_c ptrs;
74         const struct bch_extent_ptr *ptr;
75         unsigned i;
76
77         if (!rebalance_pred(c, NULL, k, io_opts, &update_opts))
78                 return;
79
80         i = 0;
81         ptrs = bch2_bkey_ptrs_c(k);
82         bkey_for_each_ptr(ptrs, ptr) {
83                 if ((1U << i) && update_opts.rewrite_ptrs)
84                         if (atomic64_add_return(k.k->size,
85                                         &bch_dev_bkey_exists(c, ptr->dev)->rebalance_work) ==
86                             k.k->size)
87                                 rebalance_wakeup(c);
88                 i++;
89         }
90 }
91
92 void bch2_rebalance_add_work(struct bch_fs *c, u64 sectors)
93 {
94         if (atomic64_add_return(sectors, &c->rebalance.work_unknown_dev) ==
95             sectors)
96                 rebalance_wakeup(c);
97 }
98
99 struct rebalance_work {
100         int             dev_most_full_idx;
101         unsigned        dev_most_full_percent;
102         u64             dev_most_full_work;
103         u64             dev_most_full_capacity;
104         u64             total_work;
105 };
106
107 static void rebalance_work_accumulate(struct rebalance_work *w,
108                 u64 dev_work, u64 unknown_dev, u64 capacity, int idx)
109 {
110         unsigned percent_full;
111         u64 work = dev_work + unknown_dev;
112
113         if (work < dev_work || work < unknown_dev)
114                 work = U64_MAX;
115         work = min(work, capacity);
116
117         percent_full = div64_u64(work * 100, capacity);
118
119         if (percent_full >= w->dev_most_full_percent) {
120                 w->dev_most_full_idx            = idx;
121                 w->dev_most_full_percent        = percent_full;
122                 w->dev_most_full_work           = work;
123                 w->dev_most_full_capacity       = capacity;
124         }
125
126         if (w->total_work + dev_work >= w->total_work &&
127             w->total_work + dev_work >= dev_work)
128                 w->total_work += dev_work;
129 }
130
131 static struct rebalance_work rebalance_work(struct bch_fs *c)
132 {
133         struct bch_dev *ca;
134         struct rebalance_work ret = { .dev_most_full_idx = -1 };
135         u64 unknown_dev = atomic64_read(&c->rebalance.work_unknown_dev);
136         unsigned i;
137
138         for_each_online_member(ca, c, i)
139                 rebalance_work_accumulate(&ret,
140                         atomic64_read(&ca->rebalance_work),
141                         unknown_dev,
142                         bucket_to_sector(ca, ca->mi.nbuckets -
143                                          ca->mi.first_bucket),
144                         i);
145
146         rebalance_work_accumulate(&ret,
147                 unknown_dev, 0, c->capacity, -1);
148
149         return ret;
150 }
151
152 static void rebalance_work_reset(struct bch_fs *c)
153 {
154         struct bch_dev *ca;
155         unsigned i;
156
157         for_each_online_member(ca, c, i)
158                 atomic64_set(&ca->rebalance_work, 0);
159
160         atomic64_set(&c->rebalance.work_unknown_dev, 0);
161 }
162
163 static unsigned long curr_cputime(void)
164 {
165         u64 utime, stime;
166
167         task_cputime_adjusted(current, &utime, &stime);
168         return nsecs_to_jiffies(utime + stime);
169 }
170
171 static int bch2_rebalance_thread(void *arg)
172 {
173         struct bch_fs *c = arg;
174         struct bch_fs_rebalance *r = &c->rebalance;
175         struct io_clock *clock = &c->io_clock[WRITE];
176         struct rebalance_work w, p;
177         struct bch_move_stats move_stats;
178         unsigned long start, prev_start;
179         unsigned long prev_run_time, prev_run_cputime;
180         unsigned long cputime, prev_cputime;
181         u64 io_start;
182         long throttle;
183
184         set_freezable();
185
186         io_start        = atomic64_read(&clock->now);
187         p               = rebalance_work(c);
188         prev_start      = jiffies;
189         prev_cputime    = curr_cputime();
190
191         bch_move_stats_init(&move_stats, "rebalance");
192         while (!kthread_wait_freezable(r->enabled)) {
193                 cond_resched();
194
195                 start                   = jiffies;
196                 cputime                 = curr_cputime();
197
198                 prev_run_time           = start - prev_start;
199                 prev_run_cputime        = cputime - prev_cputime;
200
201                 w                       = rebalance_work(c);
202                 BUG_ON(!w.dev_most_full_capacity);
203
204                 if (!w.total_work) {
205                         r->state = REBALANCE_WAITING;
206                         kthread_wait_freezable(rebalance_work(c).total_work);
207                         continue;
208                 }
209
210                 /*
211                  * If there isn't much work to do, throttle cpu usage:
212                  */
213                 throttle = prev_run_cputime * 100 /
214                         max(1U, w.dev_most_full_percent) -
215                         prev_run_time;
216
217                 if (w.dev_most_full_percent < 20 && throttle > 0) {
218                         r->throttled_until_iotime = io_start +
219                                 div_u64(w.dev_most_full_capacity *
220                                         (20 - w.dev_most_full_percent),
221                                         50);
222
223                         if (atomic64_read(&clock->now) + clock->max_slop <
224                             r->throttled_until_iotime) {
225                                 r->throttled_until_cputime = start + throttle;
226                                 r->state = REBALANCE_THROTTLED;
227
228                                 bch2_kthread_io_clock_wait(clock,
229                                         r->throttled_until_iotime,
230                                         throttle);
231                                 continue;
232                         }
233                 }
234
235                 /* minimum 1 mb/sec: */
236                 r->pd.rate.rate =
237                         max_t(u64, 1 << 11,
238                               r->pd.rate.rate *
239                               max(p.dev_most_full_percent, 1U) /
240                               max(w.dev_most_full_percent, 1U));
241
242                 io_start        = atomic64_read(&clock->now);
243                 p               = w;
244                 prev_start      = start;
245                 prev_cputime    = cputime;
246
247                 r->state = REBALANCE_RUNNING;
248                 memset(&move_stats, 0, sizeof(move_stats));
249                 rebalance_work_reset(c);
250
251                 bch2_move_data(c,
252                                0,               POS_MIN,
253                                BTREE_ID_NR,     POS_MAX,
254                                /* ratelimiting disabled for now */
255                                NULL, /*  &r->pd.rate, */
256                                &move_stats,
257                                writepoint_ptr(&c->rebalance_write_point),
258                                true,
259                                rebalance_pred, NULL);
260         }
261
262         return 0;
263 }
264
265 void bch2_rebalance_work_to_text(struct printbuf *out, struct bch_fs *c)
266 {
267         struct bch_fs_rebalance *r = &c->rebalance;
268         struct rebalance_work w = rebalance_work(c);
269
270         out->tabstops[0] = 20;
271
272         prt_printf(out, "fullest_dev (%i):", w.dev_most_full_idx);
273         prt_tab(out);
274
275         prt_human_readable_u64(out, w.dev_most_full_work << 9);
276         prt_printf(out, "/");
277         prt_human_readable_u64(out, w.dev_most_full_capacity << 9);
278         prt_newline(out);
279
280         prt_printf(out, "total work:");
281         prt_tab(out);
282
283         prt_human_readable_u64(out, w.total_work << 9);
284         prt_printf(out, "/");
285         prt_human_readable_u64(out, c->capacity << 9);
286         prt_newline(out);
287
288         prt_printf(out, "rate:");
289         prt_tab(out);
290         prt_printf(out, "%u", r->pd.rate.rate);
291         prt_newline(out);
292
293         switch (r->state) {
294         case REBALANCE_WAITING:
295                 prt_printf(out, "waiting");
296                 break;
297         case REBALANCE_THROTTLED:
298                 prt_printf(out, "throttled for %lu sec or ",
299                        (r->throttled_until_cputime - jiffies) / HZ);
300                 prt_human_readable_u64(out,
301                             (r->throttled_until_iotime -
302                              atomic64_read(&c->io_clock[WRITE].now)) << 9);
303                 prt_printf(out, " io");
304                 break;
305         case REBALANCE_RUNNING:
306                 prt_printf(out, "running");
307                 break;
308         }
309         prt_newline(out);
310 }
311
312 void bch2_rebalance_stop(struct bch_fs *c)
313 {
314         struct task_struct *p;
315
316         c->rebalance.pd.rate.rate = UINT_MAX;
317         bch2_ratelimit_reset(&c->rebalance.pd.rate);
318
319         p = rcu_dereference_protected(c->rebalance.thread, 1);
320         c->rebalance.thread = NULL;
321
322         if (p) {
323                 /* for sychronizing with rebalance_wakeup() */
324                 synchronize_rcu();
325
326                 kthread_stop(p);
327                 put_task_struct(p);
328         }
329 }
330
331 int bch2_rebalance_start(struct bch_fs *c)
332 {
333         struct task_struct *p;
334
335         if (c->rebalance.thread)
336                 return 0;
337
338         if (c->opts.nochanges)
339                 return 0;
340
341         p = kthread_create(bch2_rebalance_thread, c, "bch-rebalance/%s", c->name);
342         if (IS_ERR(p)) {
343                 bch_err(c, "error creating rebalance thread: %li", PTR_ERR(p));
344                 return PTR_ERR(p);
345         }
346
347         get_task_struct(p);
348         rcu_assign_pointer(c->rebalance.thread, p);
349         wake_up_process(p);
350         return 0;
351 }
352
353 void bch2_fs_rebalance_init(struct bch_fs *c)
354 {
355         bch2_pd_controller_init(&c->rebalance.pd);
356
357         atomic64_set(&c->rebalance.work_unknown_dev, S64_MAX);
358 }