]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/rebalance.c
Update bcachefs sources to d868a87c67 bcachefs: fix initial gc
[bcachefs-tools-debian] / libbcachefs / rebalance.c
1
2 #include "bcachefs.h"
3 #include "alloc_foreground.h"
4 #include "btree_iter.h"
5 #include "buckets.h"
6 #include "clock.h"
7 #include "disk_groups.h"
8 #include "extents.h"
9 #include "io.h"
10 #include "move.h"
11 #include "rebalance.h"
12 #include "super-io.h"
13
14 #include <linux/freezer.h>
15 #include <linux/kthread.h>
16 #include <linux/sched/cputime.h>
17 #include <trace/events/bcachefs.h>
18
19 static inline bool rebalance_ptr_pred(struct bch_fs *c,
20                                       struct extent_ptr_decoded p,
21                                       struct bch_io_opts *io_opts)
22 {
23         if (io_opts->background_target &&
24             !bch2_dev_in_target(c, p.ptr.dev, io_opts->background_target) &&
25             !p.ptr.cached)
26                 return true;
27
28         if (io_opts->background_compression &&
29             p.crc.compression_type !=
30             bch2_compression_opt_to_type[io_opts->background_compression])
31                 return true;
32
33         return false;
34 }
35
36 void bch2_rebalance_add_key(struct bch_fs *c,
37                             struct bkey_s_c k,
38                             struct bch_io_opts *io_opts)
39 {
40         const union bch_extent_entry *entry;
41         struct extent_ptr_decoded p;
42         struct bkey_s_c_extent e;
43
44         if (!bkey_extent_is_data(k.k))
45                 return;
46
47         if (!io_opts->background_target &&
48             !io_opts->background_compression)
49                 return;
50
51         e = bkey_s_c_to_extent(k);
52
53         extent_for_each_ptr_decode(e, p, entry)
54                 if (rebalance_ptr_pred(c, p, io_opts)) {
55                         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
56
57                         if (atomic64_add_return(p.crc.compressed_size,
58                                                 &ca->rebalance_work) ==
59                             p.crc.compressed_size)
60                                 rebalance_wakeup(c);
61                 }
62 }
63
64 void bch2_rebalance_add_work(struct bch_fs *c, u64 sectors)
65 {
66         if (atomic64_add_return(sectors, &c->rebalance.work_unknown_dev) ==
67             sectors)
68                 rebalance_wakeup(c);
69 }
70
71 static enum data_cmd rebalance_pred(struct bch_fs *c, void *arg,
72                                     struct bkey_s_c k,
73                                     struct bch_io_opts *io_opts,
74                                     struct data_opts *data_opts)
75 {
76         switch (k.k->type) {
77         case KEY_TYPE_extent: {
78                 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
79                 const union bch_extent_entry *entry;
80                 struct extent_ptr_decoded p;
81
82                 /* Make sure we have room to add a new pointer: */
83                 if (bkey_val_u64s(e.k) + BKEY_EXTENT_PTR_U64s_MAX >
84                     BKEY_EXTENT_VAL_U64s_MAX)
85                         return DATA_SKIP;
86
87                 extent_for_each_ptr_decode(e, p, entry)
88                         if (rebalance_ptr_pred(c, p, io_opts))
89                                 goto found;
90
91                 return DATA_SKIP;
92 found:
93                 data_opts->target               = io_opts->background_target;
94                 data_opts->btree_insert_flags   = 0;
95                 return DATA_ADD_REPLICAS;
96         }
97         default:
98                 return DATA_SKIP;
99         }
100 }
101
102 struct rebalance_work {
103         int             dev_most_full_idx;
104         unsigned        dev_most_full_percent;
105         u64             dev_most_full_work;
106         u64             dev_most_full_capacity;
107         u64             total_work;
108 };
109
110 static void rebalance_work_accumulate(struct rebalance_work *w,
111                 u64 dev_work, u64 unknown_dev, u64 capacity, int idx)
112 {
113         unsigned percent_full;
114         u64 work = dev_work + unknown_dev;
115
116         if (work < dev_work || work < unknown_dev)
117                 work = U64_MAX;
118         work = min(work, capacity);
119
120         percent_full = div64_u64(work * 100, capacity);
121
122         if (percent_full >= w->dev_most_full_percent) {
123                 w->dev_most_full_idx            = idx;
124                 w->dev_most_full_percent        = percent_full;
125                 w->dev_most_full_work           = work;
126                 w->dev_most_full_capacity       = capacity;
127         }
128
129         if (w->total_work + dev_work >= w->total_work &&
130             w->total_work + dev_work >= dev_work)
131                 w->total_work += dev_work;
132 }
133
134 static struct rebalance_work rebalance_work(struct bch_fs *c)
135 {
136         struct bch_dev *ca;
137         struct rebalance_work ret = { .dev_most_full_idx = -1 };
138         u64 unknown_dev = atomic64_read(&c->rebalance.work_unknown_dev);
139         unsigned i;
140
141         for_each_online_member(ca, c, i)
142                 rebalance_work_accumulate(&ret,
143                         atomic64_read(&ca->rebalance_work),
144                         unknown_dev,
145                         bucket_to_sector(ca, ca->mi.nbuckets -
146                                          ca->mi.first_bucket),
147                         i);
148
149         rebalance_work_accumulate(&ret,
150                 unknown_dev, 0, c->capacity, -1);
151
152         return ret;
153 }
154
155 static void rebalance_work_reset(struct bch_fs *c)
156 {
157         struct bch_dev *ca;
158         unsigned i;
159
160         for_each_online_member(ca, c, i)
161                 atomic64_set(&ca->rebalance_work, 0);
162
163         atomic64_set(&c->rebalance.work_unknown_dev, 0);
164 }
165
166 static unsigned long curr_cputime(void)
167 {
168         u64 utime, stime;
169
170         task_cputime_adjusted(current, &utime, &stime);
171         return nsecs_to_jiffies(utime + stime);
172 }
173
174 static int bch2_rebalance_thread(void *arg)
175 {
176         struct bch_fs *c = arg;
177         struct bch_fs_rebalance *r = &c->rebalance;
178         struct io_clock *clock = &c->io_clock[WRITE];
179         struct rebalance_work w, p;
180         unsigned long start, prev_start;
181         unsigned long prev_run_time, prev_run_cputime;
182         unsigned long cputime, prev_cputime;
183         unsigned long io_start;
184         long throttle;
185
186         set_freezable();
187
188         io_start        = atomic_long_read(&clock->now);
189         p               = rebalance_work(c);
190         prev_start      = jiffies;
191         prev_cputime    = curr_cputime();
192
193         while (!kthread_wait_freezable(r->enabled)) {
194                 start                   = jiffies;
195                 cputime                 = curr_cputime();
196
197                 prev_run_time           = start - prev_start;
198                 prev_run_cputime        = cputime - prev_cputime;
199
200                 w                       = rebalance_work(c);
201                 BUG_ON(!w.dev_most_full_capacity);
202
203                 if (!w.total_work) {
204                         r->state = REBALANCE_WAITING;
205                         kthread_wait_freezable(rebalance_work(c).total_work);
206                         continue;
207                 }
208
209                 /*
210                  * If there isn't much work to do, throttle cpu usage:
211                  */
212                 throttle = prev_run_cputime * 100 /
213                         max(1U, w.dev_most_full_percent) -
214                         prev_run_time;
215
216                 if (w.dev_most_full_percent < 20 && throttle > 0) {
217                         r->state = REBALANCE_THROTTLED;
218                         r->throttled_until_iotime = io_start +
219                                 div_u64(w.dev_most_full_capacity *
220                                         (20 - w.dev_most_full_percent),
221                                         50);
222                         r->throttled_until_cputime = start + throttle;
223
224                         bch2_kthread_io_clock_wait(clock,
225                                 r->throttled_until_iotime,
226                                 throttle);
227                         continue;
228                 }
229
230                 /* minimum 1 mb/sec: */
231                 r->pd.rate.rate =
232                         max_t(u64, 1 << 11,
233                               r->pd.rate.rate *
234                               max(p.dev_most_full_percent, 1U) /
235                               max(w.dev_most_full_percent, 1U));
236
237                 io_start        = atomic_long_read(&clock->now);
238                 p               = w;
239                 prev_start      = start;
240                 prev_cputime    = cputime;
241
242                 r->state = REBALANCE_RUNNING;
243                 memset(&r->move_stats, 0, sizeof(r->move_stats));
244                 rebalance_work_reset(c);
245
246                 bch2_move_data(c,
247                                /* ratelimiting disabled for now */
248                                NULL, /*  &r->pd.rate, */
249                                writepoint_ptr(&c->rebalance_write_point),
250                                POS_MIN, POS_MAX,
251                                rebalance_pred, NULL,
252                                &r->move_stats);
253         }
254
255         return 0;
256 }
257
258 ssize_t bch2_rebalance_work_show(struct bch_fs *c, char *buf)
259 {
260         struct printbuf out = _PBUF(buf, PAGE_SIZE);
261         struct bch_fs_rebalance *r = &c->rebalance;
262         struct rebalance_work w = rebalance_work(c);
263         char h1[21], h2[21];
264
265         bch2_hprint(&PBUF(h1), w.dev_most_full_work << 9);
266         bch2_hprint(&PBUF(h2), w.dev_most_full_capacity << 9);
267         pr_buf(&out, "fullest_dev (%i):\t%s/%s\n",
268                w.dev_most_full_idx, h1, h2);
269
270         bch2_hprint(&PBUF(h1), w.total_work << 9);
271         bch2_hprint(&PBUF(h2), c->capacity << 9);
272         pr_buf(&out, "total work:\t\t%s/%s\n", h1, h2);
273
274         pr_buf(&out, "rate:\t\t\t%u\n", r->pd.rate.rate);
275
276         switch (r->state) {
277         case REBALANCE_WAITING:
278                 pr_buf(&out, "waiting\n");
279                 break;
280         case REBALANCE_THROTTLED:
281                 bch2_hprint(&PBUF(h1),
282                             (r->throttled_until_iotime -
283                              atomic_long_read(&c->io_clock[WRITE].now)) << 9);
284                 pr_buf(&out, "throttled for %lu sec or %s io\n",
285                        (r->throttled_until_cputime - jiffies) / HZ,
286                        h1);
287                 break;
288         case REBALANCE_RUNNING:
289                 pr_buf(&out, "running\n");
290                 pr_buf(&out, "pos %llu:%llu\n",
291                        r->move_stats.pos.inode,
292                        r->move_stats.pos.offset);
293                 break;
294         }
295
296         return out.pos - buf;
297 }
298
299 void bch2_rebalance_stop(struct bch_fs *c)
300 {
301         struct task_struct *p;
302
303         c->rebalance.pd.rate.rate = UINT_MAX;
304         bch2_ratelimit_reset(&c->rebalance.pd.rate);
305
306         p = rcu_dereference_protected(c->rebalance.thread, 1);
307         c->rebalance.thread = NULL;
308
309         if (p) {
310                 /* for sychronizing with rebalance_wakeup() */
311                 synchronize_rcu();
312
313                 kthread_stop(p);
314                 put_task_struct(p);
315         }
316 }
317
318 int bch2_rebalance_start(struct bch_fs *c)
319 {
320         struct task_struct *p;
321
322         if (c->opts.nochanges)
323                 return 0;
324
325         p = kthread_create(bch2_rebalance_thread, c, "bch_rebalance");
326         if (IS_ERR(p))
327                 return PTR_ERR(p);
328
329         get_task_struct(p);
330         rcu_assign_pointer(c->rebalance.thread, p);
331         wake_up_process(p);
332         return 0;
333 }
334
335 void bch2_fs_rebalance_init(struct bch_fs *c)
336 {
337         bch2_pd_controller_init(&c->rebalance.pd);
338
339         atomic64_set(&c->rebalance.work_unknown_dev, S64_MAX);
340 }