]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/rebalance.c
Update bcachefs sources to ece184f718 bcachefs: Reflink
[bcachefs-tools-debian] / libbcachefs / rebalance.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "alloc_foreground.h"
5 #include "btree_iter.h"
6 #include "buckets.h"
7 #include "clock.h"
8 #include "disk_groups.h"
9 #include "extents.h"
10 #include "io.h"
11 #include "move.h"
12 #include "rebalance.h"
13 #include "super-io.h"
14
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/cputime.h>
18 #include <trace/events/bcachefs.h>
19
20 static inline bool rebalance_ptr_pred(struct bch_fs *c,
21                                       struct extent_ptr_decoded p,
22                                       struct bch_io_opts *io_opts)
23 {
24         if (io_opts->background_target &&
25             !bch2_dev_in_target(c, p.ptr.dev, io_opts->background_target) &&
26             !p.ptr.cached)
27                 return true;
28
29         if (io_opts->background_compression &&
30             p.crc.compression_type !=
31             bch2_compression_opt_to_type[io_opts->background_compression])
32                 return true;
33
34         return false;
35 }
36
37 void bch2_rebalance_add_key(struct bch_fs *c,
38                             struct bkey_s_c k,
39                             struct bch_io_opts *io_opts)
40 {
41         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
42         const union bch_extent_entry *entry;
43         struct extent_ptr_decoded p;
44
45         if (!bkey_extent_is_data(k.k))
46                 return;
47
48         if (!io_opts->background_target &&
49             !io_opts->background_compression)
50                 return;
51
52         bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
53                 if (rebalance_ptr_pred(c, p, io_opts)) {
54                         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
55
56                         if (atomic64_add_return(p.crc.compressed_size,
57                                                 &ca->rebalance_work) ==
58                             p.crc.compressed_size)
59                                 rebalance_wakeup(c);
60                 }
61 }
62
63 void bch2_rebalance_add_work(struct bch_fs *c, u64 sectors)
64 {
65         if (atomic64_add_return(sectors, &c->rebalance.work_unknown_dev) ==
66             sectors)
67                 rebalance_wakeup(c);
68 }
69
70 static enum data_cmd rebalance_pred(struct bch_fs *c, void *arg,
71                                     struct bkey_s_c k,
72                                     struct bch_io_opts *io_opts,
73                                     struct data_opts *data_opts)
74 {
75         switch (k.k->type) {
76         case KEY_TYPE_extent: {
77                 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
78                 const union bch_extent_entry *entry;
79                 struct extent_ptr_decoded p;
80
81                 /* Make sure we have room to add a new pointer: */
82                 if (bkey_val_u64s(e.k) + BKEY_EXTENT_PTR_U64s_MAX >
83                     BKEY_EXTENT_VAL_U64s_MAX)
84                         return DATA_SKIP;
85
86                 extent_for_each_ptr_decode(e, p, entry)
87                         if (rebalance_ptr_pred(c, p, io_opts))
88                                 goto found;
89
90                 return DATA_SKIP;
91 found:
92                 data_opts->target               = io_opts->background_target;
93                 data_opts->btree_insert_flags   = 0;
94                 return DATA_ADD_REPLICAS;
95         }
96         default:
97                 return DATA_SKIP;
98         }
99 }
100
101 struct rebalance_work {
102         int             dev_most_full_idx;
103         unsigned        dev_most_full_percent;
104         u64             dev_most_full_work;
105         u64             dev_most_full_capacity;
106         u64             total_work;
107 };
108
109 static void rebalance_work_accumulate(struct rebalance_work *w,
110                 u64 dev_work, u64 unknown_dev, u64 capacity, int idx)
111 {
112         unsigned percent_full;
113         u64 work = dev_work + unknown_dev;
114
115         if (work < dev_work || work < unknown_dev)
116                 work = U64_MAX;
117         work = min(work, capacity);
118
119         percent_full = div64_u64(work * 100, capacity);
120
121         if (percent_full >= w->dev_most_full_percent) {
122                 w->dev_most_full_idx            = idx;
123                 w->dev_most_full_percent        = percent_full;
124                 w->dev_most_full_work           = work;
125                 w->dev_most_full_capacity       = capacity;
126         }
127
128         if (w->total_work + dev_work >= w->total_work &&
129             w->total_work + dev_work >= dev_work)
130                 w->total_work += dev_work;
131 }
132
133 static struct rebalance_work rebalance_work(struct bch_fs *c)
134 {
135         struct bch_dev *ca;
136         struct rebalance_work ret = { .dev_most_full_idx = -1 };
137         u64 unknown_dev = atomic64_read(&c->rebalance.work_unknown_dev);
138         unsigned i;
139
140         for_each_online_member(ca, c, i)
141                 rebalance_work_accumulate(&ret,
142                         atomic64_read(&ca->rebalance_work),
143                         unknown_dev,
144                         bucket_to_sector(ca, ca->mi.nbuckets -
145                                          ca->mi.first_bucket),
146                         i);
147
148         rebalance_work_accumulate(&ret,
149                 unknown_dev, 0, c->capacity, -1);
150
151         return ret;
152 }
153
154 static void rebalance_work_reset(struct bch_fs *c)
155 {
156         struct bch_dev *ca;
157         unsigned i;
158
159         for_each_online_member(ca, c, i)
160                 atomic64_set(&ca->rebalance_work, 0);
161
162         atomic64_set(&c->rebalance.work_unknown_dev, 0);
163 }
164
165 static unsigned long curr_cputime(void)
166 {
167         u64 utime, stime;
168
169         task_cputime_adjusted(current, &utime, &stime);
170         return nsecs_to_jiffies(utime + stime);
171 }
172
173 static int bch2_rebalance_thread(void *arg)
174 {
175         struct bch_fs *c = arg;
176         struct bch_fs_rebalance *r = &c->rebalance;
177         struct io_clock *clock = &c->io_clock[WRITE];
178         struct rebalance_work w, p;
179         unsigned long start, prev_start;
180         unsigned long prev_run_time, prev_run_cputime;
181         unsigned long cputime, prev_cputime;
182         unsigned long io_start;
183         long throttle;
184
185         set_freezable();
186
187         io_start        = atomic_long_read(&clock->now);
188         p               = rebalance_work(c);
189         prev_start      = jiffies;
190         prev_cputime    = curr_cputime();
191
192         while (!kthread_wait_freezable(r->enabled)) {
193                 start                   = jiffies;
194                 cputime                 = curr_cputime();
195
196                 prev_run_time           = start - prev_start;
197                 prev_run_cputime        = cputime - prev_cputime;
198
199                 w                       = rebalance_work(c);
200                 BUG_ON(!w.dev_most_full_capacity);
201
202                 if (!w.total_work) {
203                         r->state = REBALANCE_WAITING;
204                         kthread_wait_freezable(rebalance_work(c).total_work);
205                         continue;
206                 }
207
208                 /*
209                  * If there isn't much work to do, throttle cpu usage:
210                  */
211                 throttle = prev_run_cputime * 100 /
212                         max(1U, w.dev_most_full_percent) -
213                         prev_run_time;
214
215                 if (w.dev_most_full_percent < 20 && throttle > 0) {
216                         r->state = REBALANCE_THROTTLED;
217                         r->throttled_until_iotime = io_start +
218                                 div_u64(w.dev_most_full_capacity *
219                                         (20 - w.dev_most_full_percent),
220                                         50);
221                         r->throttled_until_cputime = start + throttle;
222
223                         bch2_kthread_io_clock_wait(clock,
224                                 r->throttled_until_iotime,
225                                 throttle);
226                         continue;
227                 }
228
229                 /* minimum 1 mb/sec: */
230                 r->pd.rate.rate =
231                         max_t(u64, 1 << 11,
232                               r->pd.rate.rate *
233                               max(p.dev_most_full_percent, 1U) /
234                               max(w.dev_most_full_percent, 1U));
235
236                 io_start        = atomic_long_read(&clock->now);
237                 p               = w;
238                 prev_start      = start;
239                 prev_cputime    = cputime;
240
241                 r->state = REBALANCE_RUNNING;
242                 memset(&r->move_stats, 0, sizeof(r->move_stats));
243                 rebalance_work_reset(c);
244
245                 bch2_move_data(c,
246                                /* ratelimiting disabled for now */
247                                NULL, /*  &r->pd.rate, */
248                                writepoint_ptr(&c->rebalance_write_point),
249                                POS_MIN, POS_MAX,
250                                rebalance_pred, NULL,
251                                &r->move_stats);
252         }
253
254         return 0;
255 }
256
257 ssize_t bch2_rebalance_work_show(struct bch_fs *c, char *buf)
258 {
259         struct printbuf out = _PBUF(buf, PAGE_SIZE);
260         struct bch_fs_rebalance *r = &c->rebalance;
261         struct rebalance_work w = rebalance_work(c);
262         char h1[21], h2[21];
263
264         bch2_hprint(&PBUF(h1), w.dev_most_full_work << 9);
265         bch2_hprint(&PBUF(h2), w.dev_most_full_capacity << 9);
266         pr_buf(&out, "fullest_dev (%i):\t%s/%s\n",
267                w.dev_most_full_idx, h1, h2);
268
269         bch2_hprint(&PBUF(h1), w.total_work << 9);
270         bch2_hprint(&PBUF(h2), c->capacity << 9);
271         pr_buf(&out, "total work:\t\t%s/%s\n", h1, h2);
272
273         pr_buf(&out, "rate:\t\t\t%u\n", r->pd.rate.rate);
274
275         switch (r->state) {
276         case REBALANCE_WAITING:
277                 pr_buf(&out, "waiting\n");
278                 break;
279         case REBALANCE_THROTTLED:
280                 bch2_hprint(&PBUF(h1),
281                             (r->throttled_until_iotime -
282                              atomic_long_read(&c->io_clock[WRITE].now)) << 9);
283                 pr_buf(&out, "throttled for %lu sec or %s io\n",
284                        (r->throttled_until_cputime - jiffies) / HZ,
285                        h1);
286                 break;
287         case REBALANCE_RUNNING:
288                 pr_buf(&out, "running\n");
289                 pr_buf(&out, "pos %llu:%llu\n",
290                        r->move_stats.pos.inode,
291                        r->move_stats.pos.offset);
292                 break;
293         }
294
295         return out.pos - buf;
296 }
297
298 void bch2_rebalance_stop(struct bch_fs *c)
299 {
300         struct task_struct *p;
301
302         c->rebalance.pd.rate.rate = UINT_MAX;
303         bch2_ratelimit_reset(&c->rebalance.pd.rate);
304
305         p = rcu_dereference_protected(c->rebalance.thread, 1);
306         c->rebalance.thread = NULL;
307
308         if (p) {
309                 /* for sychronizing with rebalance_wakeup() */
310                 synchronize_rcu();
311
312                 kthread_stop(p);
313                 put_task_struct(p);
314         }
315 }
316
317 int bch2_rebalance_start(struct bch_fs *c)
318 {
319         struct task_struct *p;
320
321         if (c->opts.nochanges)
322                 return 0;
323
324         p = kthread_create(bch2_rebalance_thread, c, "bch_rebalance");
325         if (IS_ERR(p))
326                 return PTR_ERR(p);
327
328         get_task_struct(p);
329         rcu_assign_pointer(c->rebalance.thread, p);
330         wake_up_process(p);
331         return 0;
332 }
333
334 void bch2_fs_rebalance_init(struct bch_fs *c)
335 {
336         bch2_pd_controller_init(&c->rebalance.pd);
337
338         atomic64_set(&c->rebalance.work_unknown_dev, S64_MAX);
339 }