1 // SPDX-License-Identifier: GPL-2.0
3 * Moving/copying garbage collector
5 * Copyright 2012 Google, Inc.
9 #include "alloc_background.h"
10 #include "alloc_foreground.h"
11 #include "btree_iter.h"
12 #include "btree_update.h"
15 #include "disk_groups.h"
18 #include "eytzinger.h"
25 #include <trace/events/bcachefs.h>
26 #include <linux/freezer.h>
27 #include <linux/kthread.h>
28 #include <linux/math64.h>
29 #include <linux/sched/task.h>
30 #include <linux/sort.h>
31 #include <linux/wait.h>
33 static inline int fragmentation_cmp(copygc_heap *heap,
34 struct copygc_heap_entry l,
35 struct copygc_heap_entry r)
37 return cmp_int(l.fragmentation, r.fragmentation);
40 static int find_buckets_to_copygc(struct bch_fs *c)
42 copygc_heap *h = &c->copygc_heap;
43 struct btree_trans trans;
44 struct btree_iter iter;
46 struct bch_alloc_v4 a;
49 bch2_trans_init(&trans, c, 0, 0);
52 * Find buckets with lowest sector counts, skipping completely
53 * empty buckets, by building a maxheap sorted by sector count,
54 * and repeatedly replacing the maximum element until all
55 * buckets have been visited.
59 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
60 BTREE_ITER_PREFETCH, k, ret) {
61 struct bch_dev *ca = bch_dev_bkey_exists(c, iter.pos.inode);
62 struct copygc_heap_entry e;
64 bch2_alloc_to_v4(k, &a);
66 if ((a.data_type != BCH_DATA_btree &&
67 a.data_type != BCH_DATA_user) ||
68 a.dirty_sectors >= ca->mi.bucket_size ||
69 bch2_bucket_is_open(c, iter.pos.inode, iter.pos.offset))
72 e = (struct copygc_heap_entry) {
73 .dev = iter.pos.inode,
75 .replicas = 1 + a.stripe_redundancy,
76 .fragmentation = div_u64((u64) a.dirty_sectors * (1ULL << 31),
78 .sectors = a.dirty_sectors,
79 .bucket = iter.pos.offset,
81 heap_add_or_replace(h, e, -fragmentation_cmp, NULL);
84 bch2_trans_iter_exit(&trans, &iter);
86 bch2_trans_exit(&trans);
90 static int bch2_copygc(struct bch_fs *c)
92 copygc_heap *h = &c->copygc_heap;
93 struct copygc_heap_entry e;
94 struct bch_move_stats move_stats;
98 struct data_opts data_opts = {
100 .btree_insert_flags = BTREE_INSERT_USE_RESERVE|JOURNAL_WATERMARK_copygc,
104 bch_move_stats_init(&move_stats, "copygc");
106 for_each_rw_member(ca, c, dev_idx)
107 heap_size += ca->mi.nbuckets >> 7;
109 if (h->size < heap_size) {
110 free_heap(&c->copygc_heap);
111 if (!init_heap(&c->copygc_heap, heap_size, GFP_KERNEL)) {
112 bch_err(c, "error allocating copygc heap");
117 ret = find_buckets_to_copygc(c);
119 bch2_fs_fatal_error(c, "error walking buckets to copygc!");
124 bch_err_ratelimited(c, "copygc requested to run but found no buckets to move!");
128 heap_resort(h, fragmentation_cmp, NULL);
131 BUG_ON(!heap_pop(h, e, -fragmentation_cmp, NULL));
132 /* not correct w.r.t. device removal */
134 ret = bch2_evacuate_bucket(c, POS(e.dev, e.bucket), e.gen, NULL,
135 writepoint_ptr(&c->copygc_write_point),
136 DATA_REWRITE, &data_opts,
139 bch_err(c, "error %i from bch2_move_data() in copygc", ret);
144 trace_copygc(c, atomic64_read(&move_stats.sectors_moved), 0, 0, 0);
149 * Copygc runs when the amount of fragmented data is above some arbitrary
152 * The threshold at the limit - when the device is full - is the amount of space
153 * we reserved in bch2_recalc_capacity; we can't have more than that amount of
154 * disk space stranded due to fragmentation and store everything we have
157 * But we don't want to be running copygc unnecessarily when the device still
158 * has plenty of free space - rather, we want copygc to smoothly run every so
159 * often and continually reduce the amount of fragmented space as the device
160 * fills up. So, we increase the threshold by half the current free space.
162 unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
166 s64 wait = S64_MAX, fragmented_allowed, fragmented;
168 for_each_rw_member(ca, c, dev_idx) {
169 struct bch_dev_usage usage = bch2_dev_usage_read(ca);
171 fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_none) *
172 ca->mi.bucket_size) >> 1);
173 fragmented = usage.d[BCH_DATA_user].fragmented;
175 wait = min(wait, max(0LL, fragmented_allowed - fragmented));
181 static int bch2_copygc_thread(void *arg)
183 struct bch_fs *c = arg;
184 struct io_clock *clock = &c->io_clock[WRITE];
189 while (!kthread_should_stop()) {
192 if (kthread_wait_freezable(c->copy_gc_enabled))
195 last = atomic64_read(&clock->now);
196 wait = bch2_copygc_wait_amount(c);
198 if (wait > clock->max_slop) {
199 trace_copygc_wait(c, wait, last + wait);
200 c->copygc_wait = last + wait;
201 bch2_kthread_io_clock_wait(clock, last + wait,
202 MAX_SCHEDULE_TIMEOUT);
215 void bch2_copygc_stop(struct bch_fs *c)
217 if (c->copygc_thread) {
218 kthread_stop(c->copygc_thread);
219 put_task_struct(c->copygc_thread);
221 c->copygc_thread = NULL;
224 int bch2_copygc_start(struct bch_fs *c)
226 struct task_struct *t;
228 if (c->copygc_thread)
231 if (c->opts.nochanges)
234 if (bch2_fs_init_fault("copygc_start"))
237 t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name);
239 bch_err(c, "error creating copygc thread: %li", PTR_ERR(t));
245 c->copygc_thread = t;
246 wake_up_process(c->copygc_thread);
251 void bch2_fs_copygc_init(struct bch_fs *c)