1 // SPDX-License-Identifier: GPL-2.0
3 * Moving/copying garbage collector
5 * Copyright 2012 Google, Inc.
9 #include "alloc_foreground.h"
10 #include "btree_iter.h"
11 #include "btree_update.h"
14 #include "disk_groups.h"
16 #include "eytzinger.h"
23 #include <trace/events/bcachefs.h>
24 #include <linux/freezer.h>
25 #include <linux/kthread.h>
26 #include <linux/math64.h>
27 #include <linux/sched/task.h>
28 #include <linux/sort.h>
29 #include <linux/wait.h>
32 * We can't use the entire copygc reserve in one iteration of copygc: we may
33 * need the buckets we're freeing up to go back into the copygc reserve to make
34 * forward progress, but if the copygc reserve is full they'll be available for
35 * any allocation - and it's possible that in a given iteration, we free up most
36 * of the buckets we're going to free before we allocate most of the buckets
37 * we're going to allocate.
39 * If we only use half of the reserve per iteration, then in steady state we'll
40 * always have room in the reserve for the buckets we're going to need in the
43 #define COPYGC_BUCKETS_PER_ITER(ca) \
44 ((ca)->free[RESERVE_MOVINGGC].size / 2)
47 * Max sectors to move per iteration: Have to take into account internal
48 * fragmentation from the multiple write points for each generation:
50 #define COPYGC_SECTORS_PER_ITER(ca) \
51 ((ca)->mi.bucket_size * COPYGC_BUCKETS_PER_ITER(ca))
53 static inline int sectors_used_cmp(copygc_heap *heap,
54 struct copygc_heap_entry l,
55 struct copygc_heap_entry r)
57 return cmp_int(l.sectors, r.sectors);
60 static int bucket_offset_cmp(const void *_l, const void *_r, size_t size)
62 const struct copygc_heap_entry *l = _l;
63 const struct copygc_heap_entry *r = _r;
65 return cmp_int(l->offset, r->offset);
68 static bool __copygc_pred(struct bch_dev *ca,
71 copygc_heap *h = &ca->copygc_heap;
72 const struct bch_extent_ptr *ptr =
73 bch2_bkey_has_device(k, ca->dev_idx);
76 struct copygc_heap_entry search = { .offset = ptr->offset };
78 ssize_t i = eytzinger0_find_le(h->data, h->used,
80 bucket_offset_cmp, &search);
82 /* eytzinger search verify code: */
85 for (k = 0; k < h->used; k++)
86 if (h->data[k].offset <= ptr->offset &&
87 (j < 0 || h->data[k].offset > h->data[j].offset))
93 ptr->offset < h->data[i].offset + ca->mi.bucket_size &&
94 ptr->gen == h->data[i].gen);
100 static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
102 struct bch_io_opts *io_opts,
103 struct data_opts *data_opts)
105 struct bch_dev *ca = arg;
107 if (!__copygc_pred(ca, k))
110 data_opts->target = dev_to_target(ca->dev_idx);
111 data_opts->btree_insert_flags = BTREE_INSERT_USE_RESERVE;
112 data_opts->rewrite_dev = ca->dev_idx;
116 static bool have_copygc_reserve(struct bch_dev *ca)
120 spin_lock(&ca->fs->freelist_lock);
121 ret = fifo_full(&ca->free[RESERVE_MOVINGGC]) ||
122 ca->allocator_state != ALLOCATOR_RUNNING;
123 spin_unlock(&ca->fs->freelist_lock);
128 static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca)
130 copygc_heap *h = &ca->copygc_heap;
131 struct copygc_heap_entry e, *i;
132 struct bucket_array *buckets;
133 struct bch_move_stats move_stats;
134 u64 sectors_to_move = 0, sectors_not_moved = 0;
135 u64 buckets_to_move, buckets_not_moved = 0;
139 memset(&move_stats, 0, sizeof(move_stats));
140 closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca));
143 * Find buckets with lowest sector counts, skipping completely
144 * empty buckets, by building a maxheap sorted by sector count,
145 * and repeatedly replacing the maximum element until all
146 * buckets have been visited.
151 * We need bucket marks to be up to date - gc can't be recalculating
154 down_read(&c->gc_lock);
155 down_read(&ca->bucket_lock);
156 buckets = bucket_array(ca);
158 for (b = buckets->first_bucket; b < buckets->nbuckets; b++) {
159 struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
160 struct copygc_heap_entry e;
162 if (m.owned_by_allocator ||
163 m.data_type != BCH_DATA_USER ||
164 !bucket_sectors_used(m) ||
165 bucket_sectors_used(m) >= ca->mi.bucket_size)
168 e = (struct copygc_heap_entry) {
170 .sectors = bucket_sectors_used(m),
171 .offset = bucket_to_sector(ca, b),
173 heap_add_or_replace(h, e, -sectors_used_cmp, NULL);
175 up_read(&ca->bucket_lock);
176 up_read(&c->gc_lock);
178 for (i = h->data; i < h->data + h->used; i++)
179 sectors_to_move += i->sectors;
181 while (sectors_to_move > COPYGC_SECTORS_PER_ITER(ca)) {
182 BUG_ON(!heap_pop(h, e, -sectors_used_cmp, NULL));
183 sectors_to_move -= e.sectors;
186 buckets_to_move = h->used;
188 if (!buckets_to_move)
191 eytzinger0_sort(h->data, h->used,
193 bucket_offset_cmp, NULL);
195 ret = bch2_move_data(c, &ca->copygc_pd.rate,
196 writepoint_ptr(&ca->copygc_write_point),
201 down_read(&ca->bucket_lock);
202 buckets = bucket_array(ca);
203 for (i = h->data; i < h->data + h->used; i++) {
204 size_t b = sector_to_bucket(ca, i->offset);
205 struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
207 if (i->gen == m.gen && bucket_sectors_used(m)) {
208 sectors_not_moved += bucket_sectors_used(m);
212 up_read(&ca->bucket_lock);
214 if (sectors_not_moved && !ret)
215 bch_warn_ratelimited(c,
216 "copygc finished but %llu/%llu sectors, %llu/%llu buckets not moved (move stats: moved %llu sectors, raced %llu keys, %llu sectors)",
217 sectors_not_moved, sectors_to_move,
218 buckets_not_moved, buckets_to_move,
219 atomic64_read(&move_stats.sectors_moved),
220 atomic64_read(&move_stats.keys_raced),
221 atomic64_read(&move_stats.sectors_raced));
224 atomic64_read(&move_stats.sectors_moved), sectors_not_moved,
225 buckets_to_move, buckets_not_moved);
229 * Copygc runs when the amount of fragmented data is above some arbitrary
232 * The threshold at the limit - when the device is full - is the amount of space
233 * we reserved in bch2_recalc_capacity; we can't have more than that amount of
234 * disk space stranded due to fragmentation and store everything we have
237 * But we don't want to be running copygc unnecessarily when the device still
238 * has plenty of free space - rather, we want copygc to smoothly run every so
239 * often and continually reduce the amount of fragmented space as the device
240 * fills up. So, we increase the threshold by half the current free space.
242 unsigned long bch2_copygc_wait_amount(struct bch_dev *ca)
244 struct bch_fs *c = ca->fs;
245 struct bch_dev_usage usage = bch2_dev_usage_read(c, ca);
246 u64 fragmented_allowed = ca->copygc_threshold +
247 ((__dev_buckets_available(ca, usage) * ca->mi.bucket_size) >> 1);
249 return max_t(s64, 0, fragmented_allowed - usage.sectors_fragmented);
252 static int bch2_copygc_thread(void *arg)
254 struct bch_dev *ca = arg;
255 struct bch_fs *c = ca->fs;
256 struct io_clock *clock = &c->io_clock[WRITE];
257 unsigned long last, wait;
261 while (!kthread_should_stop()) {
262 if (kthread_wait_freezable(c->copy_gc_enabled))
265 last = atomic_long_read(&clock->now);
266 wait = bch2_copygc_wait_amount(ca);
268 if (wait > clock->max_slop) {
269 bch2_kthread_io_clock_wait(clock, last + wait,
270 MAX_SCHEDULE_TIMEOUT);
280 void bch2_copygc_stop(struct bch_dev *ca)
282 ca->copygc_pd.rate.rate = UINT_MAX;
283 bch2_ratelimit_reset(&ca->copygc_pd.rate);
285 if (ca->copygc_thread) {
286 kthread_stop(ca->copygc_thread);
287 put_task_struct(ca->copygc_thread);
289 ca->copygc_thread = NULL;
292 int bch2_copygc_start(struct bch_fs *c, struct bch_dev *ca)
294 struct task_struct *t;
296 if (ca->copygc_thread)
299 if (c->opts.nochanges)
302 if (bch2_fs_init_fault("copygc_start"))
305 t = kthread_create(bch2_copygc_thread, ca,
306 "bch_copygc[%s]", ca->name);
312 ca->copygc_thread = t;
313 wake_up_process(ca->copygc_thread);
318 void bch2_dev_copygc_init(struct bch_dev *ca)
320 bch2_pd_controller_init(&ca->copygc_pd);
321 ca->copygc_pd.d_term = 0;