]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/movinggc.c
Update bcachefs sources to f70a3402188e bcachefs: Fix ca->oldest_gen allocation
[bcachefs-tools-debian] / libbcachefs / movinggc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Moving/copying garbage collector
4  *
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc_background.h"
10 #include "alloc_foreground.h"
11 #include "btree_iter.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
14 #include "buckets.h"
15 #include "clock.h"
16 #include "errcode.h"
17 #include "error.h"
18 #include "lru.h"
19 #include "move.h"
20 #include "movinggc.h"
21 #include "trace.h"
22
23 #include <linux/freezer.h>
24 #include <linux/kthread.h>
25 #include <linux/math64.h>
26 #include <linux/sched/task.h>
27 #include <linux/wait.h>
28
29 struct buckets_in_flight {
30         struct rhashtable               table;
31         struct move_bucket_in_flight    *first;
32         struct move_bucket_in_flight    *last;
33         size_t                          nr;
34         size_t                          sectors;
35 };
36
37 static const struct rhashtable_params bch_move_bucket_params = {
38         .head_offset    = offsetof(struct move_bucket_in_flight, hash),
39         .key_offset     = offsetof(struct move_bucket_in_flight, bucket.k),
40         .key_len        = sizeof(struct move_bucket_key),
41 };
42
43 static struct move_bucket_in_flight *
44 move_bucket_in_flight_add(struct buckets_in_flight *list, struct move_bucket b)
45 {
46         struct move_bucket_in_flight *new = kzalloc(sizeof(*new), GFP_KERNEL);
47         int ret;
48
49         if (!new)
50                 return ERR_PTR(-ENOMEM);
51
52         new->bucket = b;
53
54         ret = rhashtable_lookup_insert_fast(&list->table, &new->hash,
55                                             bch_move_bucket_params);
56         if (ret) {
57                 kfree(new);
58                 return ERR_PTR(ret);
59         }
60
61         if (!list->first)
62                 list->first = new;
63         else
64                 list->last->next = new;
65
66         list->last = new;
67         list->nr++;
68         list->sectors += b.sectors;
69         return new;
70 }
71
72 static int bch2_bucket_is_movable(struct btree_trans *trans,
73                                   struct move_bucket *b, u64 time)
74 {
75         struct btree_iter iter;
76         struct bkey_s_c k;
77         struct bch_alloc_v4 _a;
78         const struct bch_alloc_v4 *a;
79         int ret;
80
81         if (bch2_bucket_is_open(trans->c,
82                                 b->k.bucket.inode,
83                                 b->k.bucket.offset))
84                 return 0;
85
86         k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
87                                b->k.bucket, BTREE_ITER_CACHED);
88         ret = bkey_err(k);
89         if (ret)
90                 return ret;
91
92         a = bch2_alloc_to_v4(k, &_a);
93         b->k.gen        = a->gen;
94         b->sectors      = a->dirty_sectors;
95
96         ret = data_type_movable(a->data_type) &&
97                 a->fragmentation_lru &&
98                 a->fragmentation_lru <= time;
99
100         bch2_trans_iter_exit(trans, &iter);
101         return ret;
102 }
103
104 static void move_buckets_wait(struct btree_trans *trans,
105                               struct moving_context *ctxt,
106                               struct buckets_in_flight *list,
107                               bool flush)
108 {
109         struct move_bucket_in_flight *i;
110         int ret;
111
112         while ((i = list->first)) {
113                 if (flush)
114                         move_ctxt_wait_event(ctxt, trans, !atomic_read(&i->count));
115
116                 if (atomic_read(&i->count))
117                         break;
118
119                 list->first = i->next;
120                 if (!list->first)
121                         list->last = NULL;
122
123                 list->nr--;
124                 list->sectors -= i->bucket.sectors;
125
126                 ret = rhashtable_remove_fast(&list->table, &i->hash,
127                                              bch_move_bucket_params);
128                 BUG_ON(ret);
129                 kfree(i);
130         }
131
132         bch2_trans_unlock(trans);
133 }
134
135 static bool bucket_in_flight(struct buckets_in_flight *list,
136                              struct move_bucket_key k)
137 {
138         return rhashtable_lookup_fast(&list->table, &k, bch_move_bucket_params);
139 }
140
141 typedef DARRAY(struct move_bucket) move_buckets;
142
143 static int bch2_copygc_get_buckets(struct btree_trans *trans,
144                         struct moving_context *ctxt,
145                         struct buckets_in_flight *buckets_in_flight,
146                         move_buckets *buckets)
147 {
148         struct bch_fs *c = trans->c;
149         struct btree_iter iter;
150         struct bkey_s_c k;
151         size_t nr_to_get = max_t(size_t, 16U, buckets_in_flight->nr / 4);
152         size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0;
153         int ret;
154
155         move_buckets_wait(trans, ctxt, buckets_in_flight, false);
156
157         ret = bch2_btree_write_buffer_flush(trans);
158         if (bch2_fs_fatal_err_on(ret, c, "%s: error %s from bch2_btree_write_buffer_flush()",
159                                  __func__, bch2_err_str(ret)))
160                 return ret;
161
162         ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru,
163                                   lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
164                                   lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
165                                   0, k, ({
166                 struct move_bucket b = { .k.bucket = u64_to_bucket(k.k->p.offset) };
167                 int ret2 = 0;
168
169                 saw++;
170
171                 if (!bch2_bucket_is_movable(trans, &b, lru_pos_time(k.k->p)))
172                         not_movable++;
173                 else if (bucket_in_flight(buckets_in_flight, b.k))
174                         in_flight++;
175                 else {
176                         ret2 = darray_push(buckets, b) ?: buckets->nr >= nr_to_get;
177                         if (ret2 >= 0)
178                                 sectors += b.sectors;
179                 }
180                 ret2;
181         }));
182
183         pr_debug("have: %zu (%zu) saw %zu in flight %zu not movable %zu got %zu (%zu)/%zu buckets ret %i",
184                  buckets_in_flight->nr, buckets_in_flight->sectors,
185                  saw, in_flight, not_movable, buckets->nr, sectors, nr_to_get, ret);
186
187         return ret < 0 ? ret : 0;
188 }
189
190 noinline
191 static int bch2_copygc(struct btree_trans *trans,
192                        struct moving_context *ctxt,
193                        struct buckets_in_flight *buckets_in_flight)
194 {
195         struct bch_fs *c = trans->c;
196         struct data_update_opts data_opts = {
197                 .btree_insert_flags = BCH_WATERMARK_copygc,
198         };
199         move_buckets buckets = { 0 };
200         struct move_bucket_in_flight *f;
201         struct move_bucket *i;
202         u64 moved = atomic64_read(&ctxt->stats->sectors_moved);
203         int ret = 0;
204
205         ret = bch2_copygc_get_buckets(trans, ctxt, buckets_in_flight, &buckets);
206         if (ret)
207                 goto err;
208
209         darray_for_each(buckets, i) {
210                 if (unlikely(freezing(current)))
211                         break;
212
213                 f = move_bucket_in_flight_add(buckets_in_flight, *i);
214                 ret = PTR_ERR_OR_ZERO(f);
215                 if (ret == -EEXIST) { /* rare race: copygc_get_buckets returned same bucket more than once */
216                         ret = 0;
217                         continue;
218                 }
219                 if (ret == -ENOMEM) { /* flush IO, continue later */
220                         ret = 0;
221                         break;
222                 }
223
224                 ret = __bch2_evacuate_bucket(trans, ctxt, f, f->bucket.k.bucket,
225                                              f->bucket.k.gen, data_opts);
226                 if (ret)
227                         goto err;
228         }
229 err:
230         darray_exit(&buckets);
231
232         /* no entries in LRU btree found, or got to end: */
233         if (bch2_err_matches(ret, ENOENT))
234                 ret = 0;
235
236         if (ret < 0 && !bch2_err_matches(ret, EROFS))
237                 bch_err_msg(c, ret, "from bch2_move_data()");
238
239         moved = atomic64_read(&ctxt->stats->sectors_moved) - moved;
240         trace_and_count(c, copygc, c, moved, 0, 0, 0);
241         return ret;
242 }
243
244 /*
245  * Copygc runs when the amount of fragmented data is above some arbitrary
246  * threshold:
247  *
248  * The threshold at the limit - when the device is full - is the amount of space
249  * we reserved in bch2_recalc_capacity; we can't have more than that amount of
250  * disk space stranded due to fragmentation and store everything we have
251  * promised to store.
252  *
253  * But we don't want to be running copygc unnecessarily when the device still
254  * has plenty of free space - rather, we want copygc to smoothly run every so
255  * often and continually reduce the amount of fragmented space as the device
256  * fills up. So, we increase the threshold by half the current free space.
257  */
258 unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
259 {
260         struct bch_dev *ca;
261         unsigned dev_idx;
262         s64 wait = S64_MAX, fragmented_allowed, fragmented;
263         unsigned i;
264
265         for_each_rw_member(ca, c, dev_idx) {
266                 struct bch_dev_usage usage = bch2_dev_usage_read(ca);
267
268                 fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
269                                        ca->mi.bucket_size) >> 1);
270                 fragmented = 0;
271
272                 for (i = 0; i < BCH_DATA_NR; i++)
273                         if (data_type_movable(i))
274                                 fragmented += usage.d[i].fragmented;
275
276                 wait = min(wait, max(0LL, fragmented_allowed - fragmented));
277         }
278
279         return wait;
280 }
281
282 void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
283 {
284         prt_printf(out, "Currently waiting for:     ");
285         prt_human_readable_u64(out, max(0LL, c->copygc_wait -
286                                         atomic64_read(&c->io_clock[WRITE].now)) << 9);
287         prt_newline(out);
288
289         prt_printf(out, "Currently waiting since:   ");
290         prt_human_readable_u64(out, max(0LL,
291                                         atomic64_read(&c->io_clock[WRITE].now) -
292                                         c->copygc_wait_at) << 9);
293         prt_newline(out);
294
295         prt_printf(out, "Currently calculated wait: ");
296         prt_human_readable_u64(out, bch2_copygc_wait_amount(c));
297         prt_newline(out);
298 }
299
300 static int bch2_copygc_thread(void *arg)
301 {
302         struct bch_fs *c = arg;
303         struct btree_trans *trans;
304         struct moving_context ctxt;
305         struct bch_move_stats move_stats;
306         struct io_clock *clock = &c->io_clock[WRITE];
307         struct buckets_in_flight buckets;
308         u64 last, wait;
309         int ret = 0;
310
311         memset(&buckets, 0, sizeof(buckets));
312
313         ret = rhashtable_init(&buckets.table, &bch_move_bucket_params);
314         if (ret) {
315                 bch_err_msg(c, ret, "allocating copygc buckets in flight");
316                 return ret;
317         }
318
319         set_freezable();
320         trans = bch2_trans_get(c);
321
322         bch2_move_stats_init(&move_stats, "copygc");
323         bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
324                               writepoint_ptr(&c->copygc_write_point),
325                               false);
326
327         while (!ret && !kthread_should_stop()) {
328                 bch2_trans_unlock(trans);
329                 cond_resched();
330
331                 if (!c->copy_gc_enabled) {
332                         move_buckets_wait(trans, &ctxt, &buckets, true);
333                         kthread_wait_freezable(c->copy_gc_enabled);
334                 }
335
336                 if (unlikely(freezing(current))) {
337                         move_buckets_wait(trans, &ctxt, &buckets, true);
338                         __refrigerator(false);
339                         continue;
340                 }
341
342                 last = atomic64_read(&clock->now);
343                 wait = bch2_copygc_wait_amount(c);
344
345                 if (wait > clock->max_slop) {
346                         c->copygc_wait_at = last;
347                         c->copygc_wait = last + wait;
348                         move_buckets_wait(trans, &ctxt, &buckets, true);
349                         trace_and_count(c, copygc_wait, c, wait, last + wait);
350                         bch2_kthread_io_clock_wait(clock, last + wait,
351                                         MAX_SCHEDULE_TIMEOUT);
352                         continue;
353                 }
354
355                 c->copygc_wait = 0;
356
357                 c->copygc_running = true;
358                 ret = bch2_copygc(trans, &ctxt, &buckets);
359                 c->copygc_running = false;
360
361                 wake_up(&c->copygc_running_wq);
362         }
363
364         move_buckets_wait(trans, &ctxt, &buckets, true);
365         rhashtable_destroy(&buckets.table);
366         bch2_trans_put(trans);
367         bch2_moving_ctxt_exit(&ctxt);
368
369         return 0;
370 }
371
372 void bch2_copygc_stop(struct bch_fs *c)
373 {
374         if (c->copygc_thread) {
375                 kthread_stop(c->copygc_thread);
376                 put_task_struct(c->copygc_thread);
377         }
378         c->copygc_thread = NULL;
379 }
380
381 int bch2_copygc_start(struct bch_fs *c)
382 {
383         struct task_struct *t;
384         int ret;
385
386         if (c->copygc_thread)
387                 return 0;
388
389         if (c->opts.nochanges)
390                 return 0;
391
392         if (bch2_fs_init_fault("copygc_start"))
393                 return -ENOMEM;
394
395         t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name);
396         ret = PTR_ERR_OR_ZERO(t);
397         if (ret) {
398                 bch_err_msg(c, ret, "creating copygc thread");
399                 return ret;
400         }
401
402         get_task_struct(t);
403
404         c->copygc_thread = t;
405         wake_up_process(c->copygc_thread);
406
407         return 0;
408 }
409
410 void bch2_fs_copygc_init(struct bch_fs *c)
411 {
412         init_waitqueue_head(&c->copygc_running_wq);
413         c->copygc_running = false;
414 }