]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/rebalance.c
Update bcachefs sources to 5a3a4087af bcachefs: Convert a BUG_ON() to a warning
[bcachefs-tools-debian] / libbcachefs / rebalance.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "alloc_foreground.h"
5 #include "btree_iter.h"
6 #include "buckets.h"
7 #include "clock.h"
8 #include "disk_groups.h"
9 #include "extents.h"
10 #include "io.h"
11 #include "move.h"
12 #include "rebalance.h"
13 #include "super-io.h"
14
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/cputime.h>
18 #include <trace/events/bcachefs.h>
19
20 static inline bool rebalance_ptr_pred(struct bch_fs *c,
21                                       struct extent_ptr_decoded p,
22                                       struct bch_io_opts *io_opts)
23 {
24         if (io_opts->background_target &&
25             !bch2_dev_in_target(c, p.ptr.dev, io_opts->background_target) &&
26             !p.ptr.cached)
27                 return true;
28
29         if (io_opts->background_compression &&
30             p.crc.compression_type !=
31             bch2_compression_opt_to_type[io_opts->background_compression])
32                 return true;
33
34         return false;
35 }
36
37 void bch2_rebalance_add_key(struct bch_fs *c,
38                             struct bkey_s_c k,
39                             struct bch_io_opts *io_opts)
40 {
41         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
42         const union bch_extent_entry *entry;
43         struct extent_ptr_decoded p;
44
45         if (!io_opts->background_target &&
46             !io_opts->background_compression)
47                 return;
48
49         bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
50                 if (rebalance_ptr_pred(c, p, io_opts)) {
51                         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
52
53                         if (atomic64_add_return(p.crc.compressed_size,
54                                                 &ca->rebalance_work) ==
55                             p.crc.compressed_size)
56                                 rebalance_wakeup(c);
57                 }
58 }
59
60 void bch2_rebalance_add_work(struct bch_fs *c, u64 sectors)
61 {
62         if (atomic64_add_return(sectors, &c->rebalance.work_unknown_dev) ==
63             sectors)
64                 rebalance_wakeup(c);
65 }
66
67 static enum data_cmd rebalance_pred(struct bch_fs *c, void *arg,
68                                     struct bkey_s_c k,
69                                     struct bch_io_opts *io_opts,
70                                     struct data_opts *data_opts)
71 {
72         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
73         const union bch_extent_entry *entry;
74         struct extent_ptr_decoded p;
75         unsigned nr_replicas = 0;
76
77         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
78                 nr_replicas += !p.ptr.cached;
79
80                 if (rebalance_ptr_pred(c, p, io_opts))
81                         goto found;
82         }
83
84         if (nr_replicas < io_opts->data_replicas)
85                 goto found;
86
87         return DATA_SKIP;
88 found:
89         data_opts->target               = io_opts->background_target;
90         data_opts->btree_insert_flags   = 0;
91         return DATA_ADD_REPLICAS;
92 }
93
94 struct rebalance_work {
95         int             dev_most_full_idx;
96         unsigned        dev_most_full_percent;
97         u64             dev_most_full_work;
98         u64             dev_most_full_capacity;
99         u64             total_work;
100 };
101
102 static void rebalance_work_accumulate(struct rebalance_work *w,
103                 u64 dev_work, u64 unknown_dev, u64 capacity, int idx)
104 {
105         unsigned percent_full;
106         u64 work = dev_work + unknown_dev;
107
108         if (work < dev_work || work < unknown_dev)
109                 work = U64_MAX;
110         work = min(work, capacity);
111
112         percent_full = div64_u64(work * 100, capacity);
113
114         if (percent_full >= w->dev_most_full_percent) {
115                 w->dev_most_full_idx            = idx;
116                 w->dev_most_full_percent        = percent_full;
117                 w->dev_most_full_work           = work;
118                 w->dev_most_full_capacity       = capacity;
119         }
120
121         if (w->total_work + dev_work >= w->total_work &&
122             w->total_work + dev_work >= dev_work)
123                 w->total_work += dev_work;
124 }
125
126 static struct rebalance_work rebalance_work(struct bch_fs *c)
127 {
128         struct bch_dev *ca;
129         struct rebalance_work ret = { .dev_most_full_idx = -1 };
130         u64 unknown_dev = atomic64_read(&c->rebalance.work_unknown_dev);
131         unsigned i;
132
133         for_each_online_member(ca, c, i)
134                 rebalance_work_accumulate(&ret,
135                         atomic64_read(&ca->rebalance_work),
136                         unknown_dev,
137                         bucket_to_sector(ca, ca->mi.nbuckets -
138                                          ca->mi.first_bucket),
139                         i);
140
141         rebalance_work_accumulate(&ret,
142                 unknown_dev, 0, c->capacity, -1);
143
144         return ret;
145 }
146
147 static void rebalance_work_reset(struct bch_fs *c)
148 {
149         struct bch_dev *ca;
150         unsigned i;
151
152         for_each_online_member(ca, c, i)
153                 atomic64_set(&ca->rebalance_work, 0);
154
155         atomic64_set(&c->rebalance.work_unknown_dev, 0);
156 }
157
158 static unsigned long curr_cputime(void)
159 {
160         u64 utime, stime;
161
162         task_cputime_adjusted(current, &utime, &stime);
163         return nsecs_to_jiffies(utime + stime);
164 }
165
166 static int bch2_rebalance_thread(void *arg)
167 {
168         struct bch_fs *c = arg;
169         struct bch_fs_rebalance *r = &c->rebalance;
170         struct io_clock *clock = &c->io_clock[WRITE];
171         struct rebalance_work w, p;
172         unsigned long start, prev_start;
173         unsigned long prev_run_time, prev_run_cputime;
174         unsigned long cputime, prev_cputime;
175         unsigned long io_start;
176         long throttle;
177
178         set_freezable();
179
180         io_start        = atomic_long_read(&clock->now);
181         p               = rebalance_work(c);
182         prev_start      = jiffies;
183         prev_cputime    = curr_cputime();
184
185         while (!kthread_wait_freezable(r->enabled)) {
186                 start                   = jiffies;
187                 cputime                 = curr_cputime();
188
189                 prev_run_time           = start - prev_start;
190                 prev_run_cputime        = cputime - prev_cputime;
191
192                 w                       = rebalance_work(c);
193                 BUG_ON(!w.dev_most_full_capacity);
194
195                 if (!w.total_work) {
196                         r->state = REBALANCE_WAITING;
197                         kthread_wait_freezable(rebalance_work(c).total_work);
198                         continue;
199                 }
200
201                 /*
202                  * If there isn't much work to do, throttle cpu usage:
203                  */
204                 throttle = prev_run_cputime * 100 /
205                         max(1U, w.dev_most_full_percent) -
206                         prev_run_time;
207
208                 if (w.dev_most_full_percent < 20 && throttle > 0) {
209                         r->state = REBALANCE_THROTTLED;
210                         r->throttled_until_iotime = io_start +
211                                 div_u64(w.dev_most_full_capacity *
212                                         (20 - w.dev_most_full_percent),
213                                         50);
214                         r->throttled_until_cputime = start + throttle;
215
216                         bch2_kthread_io_clock_wait(clock,
217                                 r->throttled_until_iotime,
218                                 throttle);
219                         continue;
220                 }
221
222                 /* minimum 1 mb/sec: */
223                 r->pd.rate.rate =
224                         max_t(u64, 1 << 11,
225                               r->pd.rate.rate *
226                               max(p.dev_most_full_percent, 1U) /
227                               max(w.dev_most_full_percent, 1U));
228
229                 io_start        = atomic_long_read(&clock->now);
230                 p               = w;
231                 prev_start      = start;
232                 prev_cputime    = cputime;
233
234                 r->state = REBALANCE_RUNNING;
235                 memset(&r->move_stats, 0, sizeof(r->move_stats));
236                 rebalance_work_reset(c);
237
238                 bch2_move_data(c,
239                                /* ratelimiting disabled for now */
240                                NULL, /*  &r->pd.rate, */
241                                writepoint_ptr(&c->rebalance_write_point),
242                                POS_MIN, POS_MAX,
243                                rebalance_pred, NULL,
244                                &r->move_stats);
245         }
246
247         return 0;
248 }
249
250 ssize_t bch2_rebalance_work_show(struct bch_fs *c, char *buf)
251 {
252         struct printbuf out = _PBUF(buf, PAGE_SIZE);
253         struct bch_fs_rebalance *r = &c->rebalance;
254         struct rebalance_work w = rebalance_work(c);
255         char h1[21], h2[21];
256
257         bch2_hprint(&PBUF(h1), w.dev_most_full_work << 9);
258         bch2_hprint(&PBUF(h2), w.dev_most_full_capacity << 9);
259         pr_buf(&out, "fullest_dev (%i):\t%s/%s\n",
260                w.dev_most_full_idx, h1, h2);
261
262         bch2_hprint(&PBUF(h1), w.total_work << 9);
263         bch2_hprint(&PBUF(h2), c->capacity << 9);
264         pr_buf(&out, "total work:\t\t%s/%s\n", h1, h2);
265
266         pr_buf(&out, "rate:\t\t\t%u\n", r->pd.rate.rate);
267
268         switch (r->state) {
269         case REBALANCE_WAITING:
270                 pr_buf(&out, "waiting\n");
271                 break;
272         case REBALANCE_THROTTLED:
273                 bch2_hprint(&PBUF(h1),
274                             (r->throttled_until_iotime -
275                              atomic_long_read(&c->io_clock[WRITE].now)) << 9);
276                 pr_buf(&out, "throttled for %lu sec or %s io\n",
277                        (r->throttled_until_cputime - jiffies) / HZ,
278                        h1);
279                 break;
280         case REBALANCE_RUNNING:
281                 pr_buf(&out, "running\n");
282                 pr_buf(&out, "pos %llu:%llu\n",
283                        r->move_stats.pos.inode,
284                        r->move_stats.pos.offset);
285                 break;
286         }
287
288         return out.pos - buf;
289 }
290
291 void bch2_rebalance_stop(struct bch_fs *c)
292 {
293         struct task_struct *p;
294
295         c->rebalance.pd.rate.rate = UINT_MAX;
296         bch2_ratelimit_reset(&c->rebalance.pd.rate);
297
298         p = rcu_dereference_protected(c->rebalance.thread, 1);
299         c->rebalance.thread = NULL;
300
301         if (p) {
302                 /* for sychronizing with rebalance_wakeup() */
303                 synchronize_rcu();
304
305                 kthread_stop(p);
306                 put_task_struct(p);
307         }
308 }
309
310 int bch2_rebalance_start(struct bch_fs *c)
311 {
312         struct task_struct *p;
313
314         if (c->opts.nochanges)
315                 return 0;
316
317         p = kthread_create(bch2_rebalance_thread, c, "bch_rebalance");
318         if (IS_ERR(p))
319                 return PTR_ERR(p);
320
321         get_task_struct(p);
322         rcu_assign_pointer(c->rebalance.thread, p);
323         wake_up_process(p);
324         return 0;
325 }
326
327 void bch2_fs_rebalance_init(struct bch_fs *c)
328 {
329         bch2_pd_controller_init(&c->rebalance.pd);
330
331         atomic64_set(&c->rebalance.work_unknown_dev, S64_MAX);
332 }