1 // SPDX-License-Identifier: GPL-2.0
3 * Moving/copying garbage collector
5 * Copyright 2012 Google, Inc.
9 #include "alloc_foreground.h"
10 #include "btree_iter.h"
11 #include "btree_update.h"
14 #include "disk_groups.h"
17 #include "eytzinger.h"
24 #include <trace/events/bcachefs.h>
25 #include <linux/freezer.h>
26 #include <linux/kthread.h>
27 #include <linux/math64.h>
28 #include <linux/sched/task.h>
29 #include <linux/sort.h>
30 #include <linux/wait.h>
33 * We can't use the entire copygc reserve in one iteration of copygc: we may
34 * need the buckets we're freeing up to go back into the copygc reserve to make
35 * forward progress, but if the copygc reserve is full they'll be available for
36 * any allocation - and it's possible that in a given iteration, we free up most
37 * of the buckets we're going to free before we allocate most of the buckets
38 * we're going to allocate.
40 * If we only use half of the reserve per iteration, then in steady state we'll
41 * always have room in the reserve for the buckets we're going to need in the
44 #define COPYGC_BUCKETS_PER_ITER(ca) \
45 ((ca)->free[RESERVE_MOVINGGC].size / 2)
47 static int bucket_offset_cmp(const void *_l, const void *_r, size_t size)
49 const struct copygc_heap_entry *l = _l;
50 const struct copygc_heap_entry *r = _r;
52 return cmp_int(l->dev, r->dev) ?:
53 cmp_int(l->offset, r->offset);
56 static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
58 struct bch_io_opts *io_opts,
59 struct data_opts *data_opts)
61 copygc_heap *h = &c->copygc_heap;
62 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
63 const union bch_extent_entry *entry;
64 struct extent_ptr_decoded p = { 0 };
66 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
67 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
68 struct copygc_heap_entry search = {
70 .offset = p.ptr.offset,
73 ssize_t i = eytzinger0_find_le(h->data, h->used,
75 bucket_offset_cmp, &search);
77 /* eytzinger search verify code: */
80 for (k = 0; k < h->used; k++)
81 if (h->data[k].offset <= ptr->offset &&
82 (j < 0 || h->data[k].offset > h->data[j].offset))
88 p.ptr.offset < h->data[i].offset + ca->mi.bucket_size &&
89 p.ptr.gen == h->data[i].gen) {
91 * We need to use the journal reserve here, because
92 * - journal reclaim depends on btree key cache
93 * flushing to make forward progress,
94 * - which has to make forward progress when the
95 * journal is pre-reservation full,
96 * - and depends on allocation - meaning allocator and
100 data_opts->target = io_opts->background_target;
101 data_opts->nr_replicas = 1;
102 data_opts->btree_insert_flags = BTREE_INSERT_USE_RESERVE|
103 BTREE_INSERT_JOURNAL_RESERVED;
104 data_opts->rewrite_dev = p.ptr.dev;
107 data_opts->nr_replicas += p.ec.redundancy;
116 static bool have_copygc_reserve(struct bch_dev *ca)
120 spin_lock(&ca->fs->freelist_lock);
121 ret = fifo_full(&ca->free[RESERVE_MOVINGGC]) ||
122 ca->allocator_state != ALLOCATOR_running;
123 spin_unlock(&ca->fs->freelist_lock);
128 static inline int fragmentation_cmp(copygc_heap *heap,
129 struct copygc_heap_entry l,
130 struct copygc_heap_entry r)
132 return cmp_int(l.fragmentation, r.fragmentation);
135 static int bch2_copygc(struct bch_fs *c)
137 copygc_heap *h = &c->copygc_heap;
138 struct copygc_heap_entry e, *i;
139 struct bucket_array *buckets;
140 struct bch_move_stats move_stats;
141 u64 sectors_to_move = 0, sectors_not_moved = 0;
142 u64 sectors_reserved = 0;
143 u64 buckets_to_move, buckets_not_moved = 0;
146 size_t b, heap_size = 0;
149 memset(&move_stats, 0, sizeof(move_stats));
151 * Find buckets with lowest sector counts, skipping completely
152 * empty buckets, by building a maxheap sorted by sector count,
153 * and repeatedly replacing the maximum element until all
154 * buckets have been visited.
158 for_each_rw_member(ca, c, dev_idx)
159 heap_size += ca->mi.nbuckets >> 7;
161 if (h->size < heap_size) {
162 free_heap(&c->copygc_heap);
163 if (!init_heap(&c->copygc_heap, heap_size, GFP_KERNEL)) {
164 bch_err(c, "error allocating copygc heap");
169 for_each_rw_member(ca, c, dev_idx) {
170 closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca));
172 spin_lock(&ca->fs->freelist_lock);
173 sectors_reserved += fifo_used(&ca->free[RESERVE_MOVINGGC]) * ca->mi.bucket_size;
174 spin_unlock(&ca->fs->freelist_lock);
176 down_read(&ca->bucket_lock);
177 buckets = bucket_array(ca);
179 for (b = buckets->first_bucket; b < buckets->nbuckets; b++) {
180 struct bucket *g = buckets->b + b;
181 struct bucket_mark m = READ_ONCE(g->mark);
182 struct copygc_heap_entry e;
184 if (m.owned_by_allocator ||
185 m.data_type != BCH_DATA_user ||
186 !bucket_sectors_used(m) ||
187 bucket_sectors_used(m) >= ca->mi.bucket_size)
190 WARN_ON(m.stripe && !g->stripe_redundancy);
192 e = (struct copygc_heap_entry) {
195 .replicas = 1 + g->stripe_redundancy,
196 .fragmentation = bucket_sectors_used(m) * (1U << 15)
197 / ca->mi.bucket_size,
198 .sectors = bucket_sectors_used(m),
199 .offset = bucket_to_sector(ca, b),
201 heap_add_or_replace(h, e, -fragmentation_cmp, NULL);
203 up_read(&ca->bucket_lock);
206 if (!sectors_reserved) {
207 bch2_fs_fatal_error(c, "stuck, ran out of copygc reserve!");
212 * Our btree node allocations also come out of RESERVE_MOVINGGC:
214 sectors_to_move = (sectors_to_move * 3) / 4;
216 for (i = h->data; i < h->data + h->used; i++)
217 sectors_to_move += i->sectors * i->replicas;
219 while (sectors_to_move > sectors_reserved) {
220 BUG_ON(!heap_pop(h, e, -fragmentation_cmp, NULL));
221 sectors_to_move -= e.sectors * e.replicas;
224 buckets_to_move = h->used;
226 if (!buckets_to_move)
229 eytzinger0_sort(h->data, h->used,
231 bucket_offset_cmp, NULL);
233 ret = bch2_move_data(c,
235 BTREE_ID_NR, POS_MAX,
237 writepoint_ptr(&c->copygc_write_point),
241 for_each_rw_member(ca, c, dev_idx) {
242 down_read(&ca->bucket_lock);
243 buckets = bucket_array(ca);
244 for (i = h->data; i < h->data + h->used; i++) {
245 struct bucket_mark m;
248 if (i->dev != dev_idx)
251 b = sector_to_bucket(ca, i->offset);
252 m = READ_ONCE(buckets->b[b].mark);
254 if (i->gen == m.gen &&
255 bucket_sectors_used(m)) {
256 sectors_not_moved += bucket_sectors_used(m);
260 up_read(&ca->bucket_lock);
263 if (sectors_not_moved && !ret)
264 bch_warn_ratelimited(c,
265 "copygc finished but %llu/%llu sectors, %llu/%llu buckets not moved (move stats: moved %llu sectors, raced %llu keys, %llu sectors)",
266 sectors_not_moved, sectors_to_move,
267 buckets_not_moved, buckets_to_move,
268 atomic64_read(&move_stats.sectors_moved),
269 atomic64_read(&move_stats.keys_raced),
270 atomic64_read(&move_stats.sectors_raced));
273 atomic64_read(&move_stats.sectors_moved), sectors_not_moved,
274 buckets_to_move, buckets_not_moved);
279 * Copygc runs when the amount of fragmented data is above some arbitrary
282 * The threshold at the limit - when the device is full - is the amount of space
283 * we reserved in bch2_recalc_capacity; we can't have more than that amount of
284 * disk space stranded due to fragmentation and store everything we have
287 * But we don't want to be running copygc unnecessarily when the device still
288 * has plenty of free space - rather, we want copygc to smoothly run every so
289 * often and continually reduce the amount of fragmented space as the device
290 * fills up. So, we increase the threshold by half the current free space.
292 unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
296 s64 wait = S64_MAX, fragmented_allowed, fragmented;
298 for_each_rw_member(ca, c, dev_idx) {
299 struct bch_dev_usage usage = bch2_dev_usage_read(ca);
301 fragmented_allowed = ((__dev_buckets_reclaimable(ca, usage) *
302 ca->mi.bucket_size) >> 1);
303 fragmented = usage.d[BCH_DATA_user].fragmented;
305 wait = min(wait, max(0LL, fragmented_allowed - fragmented));
311 static int bch2_copygc_thread(void *arg)
313 struct bch_fs *c = arg;
314 struct io_clock *clock = &c->io_clock[WRITE];
319 while (!kthread_should_stop()) {
320 if (kthread_wait_freezable(c->copy_gc_enabled))
323 last = atomic64_read(&clock->now);
324 wait = bch2_copygc_wait_amount(c);
326 if (wait > clock->max_slop) {
327 c->copygc_wait = last + wait;
328 bch2_kthread_io_clock_wait(clock, last + wait,
329 MAX_SCHEDULE_TIMEOUT);
342 void bch2_copygc_stop(struct bch_fs *c)
344 if (c->copygc_thread) {
345 kthread_stop(c->copygc_thread);
346 put_task_struct(c->copygc_thread);
348 c->copygc_thread = NULL;
351 int bch2_copygc_start(struct bch_fs *c)
353 struct task_struct *t;
355 if (c->copygc_thread)
358 if (c->opts.nochanges)
361 if (bch2_fs_init_fault("copygc_start"))
364 t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name);
366 bch_err(c, "error creating copygc thread: %li", PTR_ERR(t));
372 c->copygc_thread = t;
373 wake_up_process(c->copygc_thread);
378 void bch2_fs_copygc_init(struct bch_fs *c)