]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/rebalance.c
Update bcachefs sources to b12d1535f3 bcachefs: fix bounds checks in bch2_bio_map()
[bcachefs-tools-debian] / libbcachefs / rebalance.c
1
2 #include "bcachefs.h"
3 #include "alloc_foreground.h"
4 #include "btree_iter.h"
5 #include "buckets.h"
6 #include "clock.h"
7 #include "disk_groups.h"
8 #include "extents.h"
9 #include "io.h"
10 #include "move.h"
11 #include "rebalance.h"
12 #include "super-io.h"
13
14 #include <linux/freezer.h>
15 #include <linux/kthread.h>
16 #include <linux/sched/cputime.h>
17 #include <trace/events/bcachefs.h>
18
19 static inline bool rebalance_ptr_pred(struct bch_fs *c,
20                                       struct extent_ptr_decoded p,
21                                       struct bch_io_opts *io_opts)
22 {
23         if (io_opts->background_target &&
24             !bch2_dev_in_target(c, p.ptr.dev, io_opts->background_target) &&
25             !p.ptr.cached)
26                 return true;
27
28         if (io_opts->background_compression &&
29             p.crc.compression_type !=
30             bch2_compression_opt_to_type[io_opts->background_compression])
31                 return true;
32
33         return false;
34 }
35
36 void bch2_rebalance_add_key(struct bch_fs *c,
37                             struct bkey_s_c k,
38                             struct bch_io_opts *io_opts)
39 {
40         const union bch_extent_entry *entry;
41         struct extent_ptr_decoded p;
42         struct bkey_s_c_extent e;
43
44         if (!bkey_extent_is_data(k.k))
45                 return;
46
47         if (!io_opts->background_target &&
48             !io_opts->background_compression)
49                 return;
50
51         e = bkey_s_c_to_extent(k);
52
53         extent_for_each_ptr_decode(e, p, entry)
54                 if (rebalance_ptr_pred(c, p, io_opts)) {
55                         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
56
57                         if (atomic64_add_return(p.crc.compressed_size,
58                                                 &ca->rebalance_work) ==
59                             p.crc.compressed_size)
60                                 rebalance_wakeup(c);
61                 }
62 }
63
64 void bch2_rebalance_add_work(struct bch_fs *c, u64 sectors)
65 {
66         if (atomic64_add_return(sectors, &c->rebalance.work_unknown_dev) ==
67             sectors)
68                 rebalance_wakeup(c);
69 }
70
71 static enum data_cmd rebalance_pred(struct bch_fs *c, void *arg,
72                                     enum bkey_type type,
73                                     struct bkey_s_c_extent e,
74                                     struct bch_io_opts *io_opts,
75                                     struct data_opts *data_opts)
76 {
77         const union bch_extent_entry *entry;
78         struct extent_ptr_decoded p;
79
80         /* Make sure we have room to add a new pointer: */
81         if (bkey_val_u64s(e.k) + BKEY_EXTENT_PTR_U64s_MAX >
82             BKEY_EXTENT_VAL_U64s_MAX)
83                 return DATA_SKIP;
84
85         extent_for_each_ptr_decode(e, p, entry)
86                 if (rebalance_ptr_pred(c, p, io_opts))
87                         goto found;
88
89         return DATA_SKIP;
90 found:
91         data_opts->target               = io_opts->background_target;
92         data_opts->btree_insert_flags   = 0;
93         return DATA_ADD_REPLICAS;
94 }
95
96 struct rebalance_work {
97         int             dev_most_full_idx;
98         unsigned        dev_most_full_percent;
99         u64             dev_most_full_work;
100         u64             dev_most_full_capacity;
101         u64             total_work;
102 };
103
104 static void rebalance_work_accumulate(struct rebalance_work *w,
105                 u64 dev_work, u64 unknown_dev, u64 capacity, int idx)
106 {
107         unsigned percent_full;
108         u64 work = dev_work + unknown_dev;
109
110         if (work < dev_work || work < unknown_dev)
111                 work = U64_MAX;
112         work = min(work, capacity);
113
114         percent_full = div64_u64(work * 100, capacity);
115
116         if (percent_full >= w->dev_most_full_percent) {
117                 w->dev_most_full_idx            = idx;
118                 w->dev_most_full_percent        = percent_full;
119                 w->dev_most_full_work           = work;
120                 w->dev_most_full_capacity       = capacity;
121         }
122
123         if (w->total_work + dev_work >= w->total_work &&
124             w->total_work + dev_work >= dev_work)
125                 w->total_work += dev_work;
126 }
127
128 static struct rebalance_work rebalance_work(struct bch_fs *c)
129 {
130         struct bch_dev *ca;
131         struct rebalance_work ret = { .dev_most_full_idx = -1 };
132         u64 unknown_dev = atomic64_read(&c->rebalance.work_unknown_dev);
133         unsigned i;
134
135         for_each_online_member(ca, c, i)
136                 rebalance_work_accumulate(&ret,
137                         atomic64_read(&ca->rebalance_work),
138                         unknown_dev,
139                         bucket_to_sector(ca, ca->mi.nbuckets -
140                                          ca->mi.first_bucket),
141                         i);
142
143         rebalance_work_accumulate(&ret,
144                 unknown_dev, 0, c->capacity, -1);
145
146         return ret;
147 }
148
149 static void rebalance_work_reset(struct bch_fs *c)
150 {
151         struct bch_dev *ca;
152         unsigned i;
153
154         for_each_online_member(ca, c, i)
155                 atomic64_set(&ca->rebalance_work, 0);
156
157         atomic64_set(&c->rebalance.work_unknown_dev, 0);
158 }
159
160 static unsigned long curr_cputime(void)
161 {
162         u64 utime, stime;
163
164         task_cputime_adjusted(current, &utime, &stime);
165         return nsecs_to_jiffies(utime + stime);
166 }
167
168 static int bch2_rebalance_thread(void *arg)
169 {
170         struct bch_fs *c = arg;
171         struct bch_fs_rebalance *r = &c->rebalance;
172         struct io_clock *clock = &c->io_clock[WRITE];
173         struct rebalance_work w, p;
174         unsigned long start, prev_start;
175         unsigned long prev_run_time, prev_run_cputime;
176         unsigned long cputime, prev_cputime;
177         unsigned long io_start;
178         long throttle;
179
180         set_freezable();
181
182         io_start        = atomic_long_read(&clock->now);
183         p               = rebalance_work(c);
184         prev_start      = jiffies;
185         prev_cputime    = curr_cputime();
186
187         while (!kthread_wait_freezable(r->enabled)) {
188                 start                   = jiffies;
189                 cputime                 = curr_cputime();
190
191                 prev_run_time           = start - prev_start;
192                 prev_run_cputime        = cputime - prev_cputime;
193
194                 w                       = rebalance_work(c);
195                 BUG_ON(!w.dev_most_full_capacity);
196
197                 if (!w.total_work) {
198                         r->state = REBALANCE_WAITING;
199                         kthread_wait_freezable(rebalance_work(c).total_work);
200                         continue;
201                 }
202
203                 /*
204                  * If there isn't much work to do, throttle cpu usage:
205                  */
206                 throttle = prev_run_cputime * 100 /
207                         max(1U, w.dev_most_full_percent) -
208                         prev_run_time;
209
210                 if (w.dev_most_full_percent < 20 && throttle > 0) {
211                         r->state = REBALANCE_THROTTLED;
212                         r->throttled_until_iotime = io_start +
213                                 div_u64(w.dev_most_full_capacity *
214                                         (20 - w.dev_most_full_percent),
215                                         50);
216                         r->throttled_until_cputime = start + throttle;
217
218                         bch2_kthread_io_clock_wait(clock,
219                                 r->throttled_until_iotime,
220                                 throttle);
221                         continue;
222                 }
223
224                 /* minimum 1 mb/sec: */
225                 r->pd.rate.rate =
226                         max_t(u64, 1 << 11,
227                               r->pd.rate.rate *
228                               max(p.dev_most_full_percent, 1U) /
229                               max(w.dev_most_full_percent, 1U));
230
231                 io_start        = atomic_long_read(&clock->now);
232                 p               = w;
233                 prev_start      = start;
234                 prev_cputime    = cputime;
235
236                 r->state = REBALANCE_RUNNING;
237                 memset(&r->move_stats, 0, sizeof(r->move_stats));
238                 rebalance_work_reset(c);
239
240                 bch2_move_data(c,
241                                /* ratelimiting disabled for now */
242                                NULL, /*  &r->pd.rate, */
243                                writepoint_ptr(&c->rebalance_write_point),
244                                POS_MIN, POS_MAX,
245                                rebalance_pred, NULL,
246                                &r->move_stats);
247         }
248
249         return 0;
250 }
251
252 ssize_t bch2_rebalance_work_show(struct bch_fs *c, char *buf)
253 {
254         char *out = buf, *end = out + PAGE_SIZE;
255         struct bch_fs_rebalance *r = &c->rebalance;
256         struct rebalance_work w = rebalance_work(c);
257         char h1[21], h2[21];
258
259         bch2_hprint(h1, w.dev_most_full_work << 9);
260         bch2_hprint(h2, w.dev_most_full_capacity << 9);
261         out += scnprintf(out, end - out,
262                          "fullest_dev (%i):\t%s/%s\n",
263                          w.dev_most_full_idx, h1, h2);
264
265         bch2_hprint(h1, w.total_work << 9);
266         bch2_hprint(h2, c->capacity << 9);
267         out += scnprintf(out, end - out,
268                          "total work:\t\t%s/%s\n",
269                          h1, h2);
270
271         out += scnprintf(out, end - out,
272                          "rate:\t\t\t%u\n",
273                          r->pd.rate.rate);
274
275         switch (r->state) {
276         case REBALANCE_WAITING:
277                 out += scnprintf(out, end - out, "waiting\n");
278                 break;
279         case REBALANCE_THROTTLED:
280                 bch2_hprint(h1,
281                             (r->throttled_until_iotime -
282                              atomic_long_read(&c->io_clock[WRITE].now)) << 9);
283                 out += scnprintf(out, end - out,
284                                  "throttled for %lu sec or %s io\n",
285                                  (r->throttled_until_cputime - jiffies) / HZ,
286                                  h1);
287                 break;
288         case REBALANCE_RUNNING:
289                 out += scnprintf(out, end - out, "running\n");
290                 out += scnprintf(out, end - out, "pos %llu:%llu\n",
291                                  r->move_stats.iter.pos.inode,
292                                  r->move_stats.iter.pos.offset);
293                 break;
294         }
295
296         return out - buf;
297 }
298
299 void bch2_rebalance_stop(struct bch_fs *c)
300 {
301         struct task_struct *p;
302
303         c->rebalance.pd.rate.rate = UINT_MAX;
304         bch2_ratelimit_reset(&c->rebalance.pd.rate);
305
306         p = rcu_dereference_protected(c->rebalance.thread, 1);
307         c->rebalance.thread = NULL;
308
309         if (p) {
310                 /* for sychronizing with rebalance_wakeup() */
311                 synchronize_rcu();
312
313                 kthread_stop(p);
314                 put_task_struct(p);
315         }
316 }
317
318 int bch2_rebalance_start(struct bch_fs *c)
319 {
320         struct task_struct *p;
321
322         if (c->opts.nochanges)
323                 return 0;
324
325         p = kthread_create(bch2_rebalance_thread, c, "bch_rebalance");
326         if (IS_ERR(p))
327                 return PTR_ERR(p);
328
329         get_task_struct(p);
330         rcu_assign_pointer(c->rebalance.thread, p);
331         wake_up_process(p);
332         return 0;
333 }
334
335 void bch2_fs_rebalance_init(struct bch_fs *c)
336 {
337         bch2_pd_controller_init(&c->rebalance.pd);
338
339         atomic64_set(&c->rebalance.work_unknown_dev, S64_MAX);
340 }