]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/rebalance.c
Update bcachefs sources to 63924135a1 bcachefs: Have fsck check for stripe pointers...
[bcachefs-tools-debian] / libbcachefs / rebalance.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "alloc_foreground.h"
5 #include "btree_iter.h"
6 #include "buckets.h"
7 #include "clock.h"
8 #include "disk_groups.h"
9 #include "extents.h"
10 #include "io.h"
11 #include "move.h"
12 #include "rebalance.h"
13 #include "super-io.h"
14
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/cputime.h>
18 #include <trace/events/bcachefs.h>
19
20 /*
21  * Check if an extent should be moved:
22  * returns -1 if it should not be moved, or
23  * device of pointer that should be moved, if known, or INT_MAX if unknown
24  */
25 static int __bch2_rebalance_pred(struct bch_fs *c,
26                                  struct bkey_s_c k,
27                                  struct bch_io_opts *io_opts)
28 {
29         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
30         const union bch_extent_entry *entry;
31         struct extent_ptr_decoded p;
32
33         if (io_opts->background_compression &&
34             !bch2_bkey_is_incompressible(k))
35                 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
36                         if (!p.ptr.cached &&
37                             p.crc.compression_type !=
38                             bch2_compression_opt_to_type[io_opts->background_compression])
39                                 return p.ptr.dev;
40
41         if (io_opts->background_target)
42                 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
43                         if (!p.ptr.cached &&
44                             !bch2_dev_in_target(c, p.ptr.dev, io_opts->background_target))
45                                 return p.ptr.dev;
46
47         return -1;
48 }
49
50 void bch2_rebalance_add_key(struct bch_fs *c,
51                             struct bkey_s_c k,
52                             struct bch_io_opts *io_opts)
53 {
54         atomic64_t *counter;
55         int dev;
56
57         dev = __bch2_rebalance_pred(c, k, io_opts);
58         if (dev < 0)
59                 return;
60
61         counter = dev < INT_MAX
62                 ? &bch_dev_bkey_exists(c, dev)->rebalance_work
63                 : &c->rebalance.work_unknown_dev;
64
65         if (atomic64_add_return(k.k->size, counter) == k.k->size)
66                 rebalance_wakeup(c);
67 }
68
69 static enum data_cmd rebalance_pred(struct bch_fs *c, void *arg,
70                                     struct bkey_s_c k,
71                                     struct bch_io_opts *io_opts,
72                                     struct data_opts *data_opts)
73 {
74         if (__bch2_rebalance_pred(c, k, io_opts) >= 0) {
75                 data_opts->target               = io_opts->background_target;
76                 data_opts->nr_replicas          = 1;
77                 data_opts->btree_insert_flags   = 0;
78                 return DATA_ADD_REPLICAS;
79         } else {
80                 return DATA_SKIP;
81         }
82 }
83
84 void bch2_rebalance_add_work(struct bch_fs *c, u64 sectors)
85 {
86         if (atomic64_add_return(sectors, &c->rebalance.work_unknown_dev) ==
87             sectors)
88                 rebalance_wakeup(c);
89 }
90
91 struct rebalance_work {
92         int             dev_most_full_idx;
93         unsigned        dev_most_full_percent;
94         u64             dev_most_full_work;
95         u64             dev_most_full_capacity;
96         u64             total_work;
97 };
98
99 static void rebalance_work_accumulate(struct rebalance_work *w,
100                 u64 dev_work, u64 unknown_dev, u64 capacity, int idx)
101 {
102         unsigned percent_full;
103         u64 work = dev_work + unknown_dev;
104
105         if (work < dev_work || work < unknown_dev)
106                 work = U64_MAX;
107         work = min(work, capacity);
108
109         percent_full = div64_u64(work * 100, capacity);
110
111         if (percent_full >= w->dev_most_full_percent) {
112                 w->dev_most_full_idx            = idx;
113                 w->dev_most_full_percent        = percent_full;
114                 w->dev_most_full_work           = work;
115                 w->dev_most_full_capacity       = capacity;
116         }
117
118         if (w->total_work + dev_work >= w->total_work &&
119             w->total_work + dev_work >= dev_work)
120                 w->total_work += dev_work;
121 }
122
123 static struct rebalance_work rebalance_work(struct bch_fs *c)
124 {
125         struct bch_dev *ca;
126         struct rebalance_work ret = { .dev_most_full_idx = -1 };
127         u64 unknown_dev = atomic64_read(&c->rebalance.work_unknown_dev);
128         unsigned i;
129
130         for_each_online_member(ca, c, i)
131                 rebalance_work_accumulate(&ret,
132                         atomic64_read(&ca->rebalance_work),
133                         unknown_dev,
134                         bucket_to_sector(ca, ca->mi.nbuckets -
135                                          ca->mi.first_bucket),
136                         i);
137
138         rebalance_work_accumulate(&ret,
139                 unknown_dev, 0, c->capacity, -1);
140
141         return ret;
142 }
143
144 static void rebalance_work_reset(struct bch_fs *c)
145 {
146         struct bch_dev *ca;
147         unsigned i;
148
149         for_each_online_member(ca, c, i)
150                 atomic64_set(&ca->rebalance_work, 0);
151
152         atomic64_set(&c->rebalance.work_unknown_dev, 0);
153 }
154
155 static unsigned long curr_cputime(void)
156 {
157         u64 utime, stime;
158
159         task_cputime_adjusted(current, &utime, &stime);
160         return nsecs_to_jiffies(utime + stime);
161 }
162
163 static int bch2_rebalance_thread(void *arg)
164 {
165         struct bch_fs *c = arg;
166         struct bch_fs_rebalance *r = &c->rebalance;
167         struct io_clock *clock = &c->io_clock[WRITE];
168         struct rebalance_work w, p;
169         unsigned long start, prev_start;
170         unsigned long prev_run_time, prev_run_cputime;
171         unsigned long cputime, prev_cputime;
172         u64 io_start;
173         long throttle;
174
175         set_freezable();
176
177         io_start        = atomic64_read(&clock->now);
178         p               = rebalance_work(c);
179         prev_start      = jiffies;
180         prev_cputime    = curr_cputime();
181
182         while (!kthread_wait_freezable(r->enabled)) {
183                 cond_resched();
184
185                 start                   = jiffies;
186                 cputime                 = curr_cputime();
187
188                 prev_run_time           = start - prev_start;
189                 prev_run_cputime        = cputime - prev_cputime;
190
191                 w                       = rebalance_work(c);
192                 BUG_ON(!w.dev_most_full_capacity);
193
194                 if (!w.total_work) {
195                         r->state = REBALANCE_WAITING;
196                         kthread_wait_freezable(rebalance_work(c).total_work);
197                         continue;
198                 }
199
200                 /*
201                  * If there isn't much work to do, throttle cpu usage:
202                  */
203                 throttle = prev_run_cputime * 100 /
204                         max(1U, w.dev_most_full_percent) -
205                         prev_run_time;
206
207                 if (w.dev_most_full_percent < 20 && throttle > 0) {
208                         r->throttled_until_iotime = io_start +
209                                 div_u64(w.dev_most_full_capacity *
210                                         (20 - w.dev_most_full_percent),
211                                         50);
212
213                         if (atomic64_read(&clock->now) + clock->max_slop <
214                             r->throttled_until_iotime) {
215                                 r->throttled_until_cputime = start + throttle;
216                                 r->state = REBALANCE_THROTTLED;
217
218                                 bch2_kthread_io_clock_wait(clock,
219                                         r->throttled_until_iotime,
220                                         throttle);
221                                 continue;
222                         }
223                 }
224
225                 /* minimum 1 mb/sec: */
226                 r->pd.rate.rate =
227                         max_t(u64, 1 << 11,
228                               r->pd.rate.rate *
229                               max(p.dev_most_full_percent, 1U) /
230                               max(w.dev_most_full_percent, 1U));
231
232                 io_start        = atomic64_read(&clock->now);
233                 p               = w;
234                 prev_start      = start;
235                 prev_cputime    = cputime;
236
237                 r->state = REBALANCE_RUNNING;
238                 memset(&r->move_stats, 0, sizeof(r->move_stats));
239                 rebalance_work_reset(c);
240
241                 bch2_move_data(c,
242                                /* ratelimiting disabled for now */
243                                NULL, /*  &r->pd.rate, */
244                                writepoint_ptr(&c->rebalance_write_point),
245                                POS_MIN, POS_MAX,
246                                rebalance_pred, NULL,
247                                &r->move_stats);
248         }
249
250         return 0;
251 }
252
253 void bch2_rebalance_work_to_text(struct printbuf *out, struct bch_fs *c)
254 {
255         struct bch_fs_rebalance *r = &c->rebalance;
256         struct rebalance_work w = rebalance_work(c);
257         char h1[21], h2[21];
258
259         bch2_hprint(&PBUF(h1), w.dev_most_full_work << 9);
260         bch2_hprint(&PBUF(h2), w.dev_most_full_capacity << 9);
261         pr_buf(out, "fullest_dev (%i):\t%s/%s\n",
262                w.dev_most_full_idx, h1, h2);
263
264         bch2_hprint(&PBUF(h1), w.total_work << 9);
265         bch2_hprint(&PBUF(h2), c->capacity << 9);
266         pr_buf(out, "total work:\t\t%s/%s\n", h1, h2);
267
268         pr_buf(out, "rate:\t\t\t%u\n", r->pd.rate.rate);
269
270         switch (r->state) {
271         case REBALANCE_WAITING:
272                 pr_buf(out, "waiting\n");
273                 break;
274         case REBALANCE_THROTTLED:
275                 bch2_hprint(&PBUF(h1),
276                             (r->throttled_until_iotime -
277                              atomic64_read(&c->io_clock[WRITE].now)) << 9);
278                 pr_buf(out, "throttled for %lu sec or %s io\n",
279                        (r->throttled_until_cputime - jiffies) / HZ,
280                        h1);
281                 break;
282         case REBALANCE_RUNNING:
283                 pr_buf(out, "running\n"
284                        "pos ");
285                 bch2_bpos_to_text(out, r->move_stats.pos);
286                 pr_buf(out, "\n");
287                 break;
288         }
289 }
290
291 void bch2_rebalance_stop(struct bch_fs *c)
292 {
293         struct task_struct *p;
294
295         c->rebalance.pd.rate.rate = UINT_MAX;
296         bch2_ratelimit_reset(&c->rebalance.pd.rate);
297
298         p = rcu_dereference_protected(c->rebalance.thread, 1);
299         c->rebalance.thread = NULL;
300
301         if (p) {
302                 /* for sychronizing with rebalance_wakeup() */
303                 synchronize_rcu();
304
305                 kthread_stop(p);
306                 put_task_struct(p);
307         }
308 }
309
310 int bch2_rebalance_start(struct bch_fs *c)
311 {
312         struct task_struct *p;
313
314         if (c->opts.nochanges)
315                 return 0;
316
317         p = kthread_create(bch2_rebalance_thread, c, "bch-rebalance/%s", c->name);
318         if (IS_ERR(p)) {
319                 bch_err(c, "error creating rebalance thread: %li", PTR_ERR(p));
320                 return PTR_ERR(p);
321         }
322
323         get_task_struct(p);
324         rcu_assign_pointer(c->rebalance.thread, p);
325         wake_up_process(p);
326         return 0;
327 }
328
329 void bch2_fs_rebalance_init(struct bch_fs *c)
330 {
331         bch2_pd_controller_init(&c->rebalance.pd);
332
333         atomic64_set(&c->rebalance.work_unknown_dev, S64_MAX);
334 }