1 // SPDX-License-Identifier: GPL-2.0
3 * Moving/copying garbage collector
5 * Copyright 2012 Google, Inc.
9 #include "alloc_background.h"
10 #include "alloc_foreground.h"
11 #include "btree_iter.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
16 #include "disk_groups.h"
20 #include "eytzinger.h"
28 #include <trace/events/bcachefs.h>
29 #include <linux/bsearch.h>
30 #include <linux/freezer.h>
31 #include <linux/kthread.h>
32 #include <linux/math64.h>
33 #include <linux/sched/task.h>
34 #include <linux/sort.h>
35 #include <linux/wait.h>
37 struct buckets_in_flight {
38 struct rhashtable table;
39 struct move_bucket_in_flight *first;
40 struct move_bucket_in_flight *last;
45 static const struct rhashtable_params bch_move_bucket_params = {
46 .head_offset = offsetof(struct move_bucket_in_flight, hash),
47 .key_offset = offsetof(struct move_bucket_in_flight, bucket.k),
48 .key_len = sizeof(struct move_bucket_key),
51 static struct move_bucket_in_flight *
52 move_bucket_in_flight_add(struct buckets_in_flight *list, struct move_bucket b)
54 struct move_bucket_in_flight *new = kzalloc(sizeof(*new), GFP_KERNEL);
58 return ERR_PTR(-ENOMEM);
62 ret = rhashtable_lookup_insert_fast(&list->table, &new->hash,
63 bch_move_bucket_params);
72 list->last->next = new;
76 list->sectors += b.sectors;
80 static int bch2_bucket_is_movable(struct btree_trans *trans,
81 struct move_bucket *b, u64 time)
83 struct btree_iter iter;
85 struct bch_alloc_v4 _a;
86 const struct bch_alloc_v4 *a;
89 if (bch2_bucket_is_open(trans->c,
94 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
95 b->k.bucket, BTREE_ITER_CACHED);
96 k = bch2_btree_iter_peek_slot(&iter);
98 bch2_trans_iter_exit(trans, &iter);
103 a = bch2_alloc_to_v4(k, &_a);
105 b->sectors = a->dirty_sectors;
107 ret = data_type_movable(a->data_type) &&
108 a->fragmentation_lru &&
109 a->fragmentation_lru <= time;
112 struct printbuf buf = PRINTBUF;
114 bch2_bkey_val_to_text(&buf, trans->c, k);
115 pr_debug("%s", buf.buf);
122 static void move_buckets_wait(struct btree_trans *trans,
123 struct moving_context *ctxt,
124 struct buckets_in_flight *list,
127 struct move_bucket_in_flight *i;
130 while ((i = list->first)) {
132 move_ctxt_wait_event(ctxt, trans, !atomic_read(&i->count));
134 if (atomic_read(&i->count))
138 * moving_ctxt_exit calls bch2_write as it flushes pending
139 * reads, which inits another btree_trans; this one must be
142 bch2_verify_bucket_evacuated(trans, i->bucket.k.bucket, i->bucket.k.gen);
144 list->first = i->next;
149 list->sectors -= i->bucket.sectors;
151 ret = rhashtable_remove_fast(&list->table, &i->hash,
152 bch_move_bucket_params);
157 bch2_trans_unlock(trans);
160 static bool bucket_in_flight(struct buckets_in_flight *list,
161 struct move_bucket_key k)
163 return rhashtable_lookup_fast(&list->table, &k, bch_move_bucket_params);
166 typedef DARRAY(struct move_bucket) move_buckets;
168 static int bch2_copygc_get_buckets(struct btree_trans *trans,
169 struct moving_context *ctxt,
170 struct buckets_in_flight *buckets_in_flight,
171 move_buckets *buckets)
173 struct bch_fs *c = trans->c;
174 struct btree_iter iter;
176 size_t nr_to_get = max(16UL, buckets_in_flight->nr / 4);
177 size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0;
180 move_buckets_wait(trans, ctxt, buckets_in_flight, false);
182 ret = bch2_btree_write_buffer_flush(trans);
183 if (bch2_fs_fatal_err_on(ret, c, "%s: error %s from bch2_btree_write_buffer_flush()",
184 __func__, bch2_err_str(ret)))
187 ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru,
188 lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
189 lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
191 struct move_bucket b = { .k.bucket = u64_to_bucket(k.k->p.offset) };
196 if (!bch2_bucket_is_movable(trans, &b, lru_pos_time(k.k->p)))
198 else if (bucket_in_flight(buckets_in_flight, b.k))
201 ret = darray_push(buckets, b) ?: buckets->nr >= nr_to_get;
203 sectors += b.sectors;
208 pr_debug("have: %zu (%zu) saw %zu in flight %zu not movable %zu got %zu (%zu)/%zu buckets ret %i",
209 buckets_in_flight->nr, buckets_in_flight->sectors,
210 saw, in_flight, not_movable, buckets->nr, sectors, nr_to_get, ret);
212 return ret < 0 ? ret : 0;
215 static int bch2_copygc(struct btree_trans *trans,
216 struct moving_context *ctxt,
217 struct buckets_in_flight *buckets_in_flight)
219 struct bch_fs *c = trans->c;
220 struct data_update_opts data_opts = {
221 .btree_insert_flags = BTREE_INSERT_USE_RESERVE|JOURNAL_WATERMARK_copygc,
223 move_buckets buckets = { 0 };
224 struct move_bucket_in_flight *f;
225 struct move_bucket *i;
226 u64 moved = atomic64_read(&ctxt->stats->sectors_moved);
229 ret = bch2_copygc_get_buckets(trans, ctxt, buckets_in_flight, &buckets);
233 darray_for_each(buckets, i) {
234 if (unlikely(freezing(current)))
237 f = move_bucket_in_flight_add(buckets_in_flight, *i);
238 ret = PTR_ERR_OR_ZERO(f);
239 if (ret == -EEXIST) /* rare race: copygc_get_buckets returned same bucket more than once */
241 if (ret == -ENOMEM) { /* flush IO, continue later */
246 ret = __bch2_evacuate_bucket(trans, ctxt, f, f->bucket.k.bucket,
247 f->bucket.k.gen, data_opts);
252 darray_exit(&buckets);
254 /* no entries in LRU btree found, or got to end: */
258 if (ret < 0 && !bch2_err_matches(ret, EROFS))
259 bch_err(c, "error from bch2_move_data() in copygc: %s", bch2_err_str(ret));
261 moved = atomic64_read(&ctxt->stats->sectors_moved) - moved;
262 trace_and_count(c, copygc, c, moved, 0, 0, 0);
267 * Copygc runs when the amount of fragmented data is above some arbitrary
270 * The threshold at the limit - when the device is full - is the amount of space
271 * we reserved in bch2_recalc_capacity; we can't have more than that amount of
272 * disk space stranded due to fragmentation and store everything we have
275 * But we don't want to be running copygc unnecessarily when the device still
276 * has plenty of free space - rather, we want copygc to smoothly run every so
277 * often and continually reduce the amount of fragmented space as the device
278 * fills up. So, we increase the threshold by half the current free space.
280 unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
284 s64 wait = S64_MAX, fragmented_allowed, fragmented;
287 for_each_rw_member(ca, c, dev_idx) {
288 struct bch_dev_usage usage = bch2_dev_usage_read(ca);
290 fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_stripe) *
291 ca->mi.bucket_size) >> 1);
294 for (i = 0; i < BCH_DATA_NR; i++)
295 if (data_type_movable(i))
296 fragmented += usage.d[i].fragmented;
298 wait = min(wait, max(0LL, fragmented_allowed - fragmented));
304 void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
306 prt_printf(out, "Currently waiting for: ");
307 prt_human_readable_u64(out, max(0LL, c->copygc_wait -
308 atomic64_read(&c->io_clock[WRITE].now)) << 9);
311 prt_printf(out, "Currently waiting since: ");
312 prt_human_readable_u64(out, max(0LL,
313 atomic64_read(&c->io_clock[WRITE].now) -
314 c->copygc_wait_at) << 9);
317 prt_printf(out, "Currently calculated wait: ");
318 prt_human_readable_u64(out, bch2_copygc_wait_amount(c));
322 static int bch2_copygc_thread(void *arg)
324 struct bch_fs *c = arg;
325 struct btree_trans trans;
326 struct moving_context ctxt;
327 struct bch_move_stats move_stats;
328 struct io_clock *clock = &c->io_clock[WRITE];
329 struct buckets_in_flight move_buckets;
333 memset(&move_buckets, 0, sizeof(move_buckets));
335 ret = rhashtable_init(&move_buckets.table, &bch_move_bucket_params);
337 bch_err(c, "error allocating copygc buckets in flight: %s",
343 bch2_trans_init(&trans, c, 0, 0);
345 bch2_move_stats_init(&move_stats, "copygc");
346 bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
347 writepoint_ptr(&c->copygc_write_point),
350 while (!ret && !kthread_should_stop()) {
351 bch2_trans_unlock(&trans);
354 if (!c->copy_gc_enabled) {
355 move_buckets_wait(&trans, &ctxt, &move_buckets, true);
356 kthread_wait_freezable(c->copy_gc_enabled);
359 if (unlikely(freezing(current))) {
360 move_buckets_wait(&trans, &ctxt, &move_buckets, true);
361 __refrigerator(false);
365 last = atomic64_read(&clock->now);
366 wait = bch2_copygc_wait_amount(c);
368 if (wait > clock->max_slop) {
369 c->copygc_wait_at = last;
370 c->copygc_wait = last + wait;
371 move_buckets_wait(&trans, &ctxt, &move_buckets, true);
372 trace_and_count(c, copygc_wait, c, wait, last + wait);
373 bch2_kthread_io_clock_wait(clock, last + wait,
374 MAX_SCHEDULE_TIMEOUT);
380 c->copygc_running = true;
381 ret = bch2_copygc(&trans, &ctxt, &move_buckets);
382 c->copygc_running = false;
384 wake_up(&c->copygc_running_wq);
387 move_buckets_wait(&trans, &ctxt, &move_buckets, true);
388 bch2_trans_exit(&trans);
389 bch2_moving_ctxt_exit(&ctxt);
394 void bch2_copygc_stop(struct bch_fs *c)
396 if (c->copygc_thread) {
397 kthread_stop(c->copygc_thread);
398 put_task_struct(c->copygc_thread);
400 c->copygc_thread = NULL;
403 int bch2_copygc_start(struct bch_fs *c)
405 struct task_struct *t;
408 if (c->copygc_thread)
411 if (c->opts.nochanges)
414 if (bch2_fs_init_fault("copygc_start"))
417 t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name);
418 ret = PTR_ERR_OR_ZERO(t);
420 bch_err(c, "error creating copygc thread: %s", bch2_err_str(ret));
426 c->copygc_thread = t;
427 wake_up_process(c->copygc_thread);
432 void bch2_fs_copygc_init(struct bch_fs *c)
434 init_waitqueue_head(&c->copygc_running_wq);
435 c->copygc_running = false;