1 // SPDX-License-Identifier: GPL-2.0
3 * Moving/copying garbage collector
5 * Copyright 2012 Google, Inc.
9 #include "alloc_background.h"
10 #include "alloc_foreground.h"
11 #include "btree_iter.h"
12 #include "btree_update.h"
15 #include "disk_groups.h"
18 #include "eytzinger.h"
25 #include <trace/events/bcachefs.h>
26 #include <linux/freezer.h>
27 #include <linux/kthread.h>
28 #include <linux/math64.h>
29 #include <linux/sched/task.h>
30 #include <linux/sort.h>
31 #include <linux/wait.h>
34 * We can't use the entire copygc reserve in one iteration of copygc: we may
35 * need the buckets we're freeing up to go back into the copygc reserve to make
36 * forward progress, but if the copygc reserve is full they'll be available for
37 * any allocation - and it's possible that in a given iteration, we free up most
38 * of the buckets we're going to free before we allocate most of the buckets
39 * we're going to allocate.
41 * If we only use half of the reserve per iteration, then in steady state we'll
42 * always have room in the reserve for the buckets we're going to need in the
45 #define COPYGC_BUCKETS_PER_ITER(ca) \
46 ((ca)->free[RESERVE_MOVINGGC].size / 2)
48 static int bucket_offset_cmp(const void *_l, const void *_r, size_t size)
50 const struct copygc_heap_entry *l = _l;
51 const struct copygc_heap_entry *r = _r;
53 return cmp_int(l->dev, r->dev) ?:
54 cmp_int(l->offset, r->offset);
57 static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
59 struct bch_io_opts *io_opts,
60 struct data_opts *data_opts)
62 copygc_heap *h = &c->copygc_heap;
63 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
64 const union bch_extent_entry *entry;
65 struct extent_ptr_decoded p = { 0 };
67 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
68 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
69 struct copygc_heap_entry search = {
71 .offset = p.ptr.offset,
78 i = eytzinger0_find_le(h->data, h->used,
80 bucket_offset_cmp, &search);
82 /* eytzinger search verify code: */
85 for (k = 0; k < h->used; k++)
86 if (h->data[k].offset <= ptr->offset &&
87 (j < 0 || h->data[k].offset > h->data[j].offset))
93 p.ptr.dev == h->data[i].dev &&
94 p.ptr.offset < h->data[i].offset + ca->mi.bucket_size &&
95 p.ptr.gen == h->data[i].gen) {
97 * We need to use the journal reserve here, because
98 * - journal reclaim depends on btree key cache
99 * flushing to make forward progress,
100 * - which has to make forward progress when the
101 * journal is pre-reservation full,
102 * - and depends on allocation - meaning allocator and
106 data_opts->target = io_opts->background_target;
107 data_opts->nr_replicas = 1;
108 data_opts->btree_insert_flags = BTREE_INSERT_USE_RESERVE|
109 BTREE_INSERT_JOURNAL_RESERVED;
110 data_opts->rewrite_dev = p.ptr.dev;
113 data_opts->nr_replicas += p.ec.redundancy;
122 static bool have_copygc_reserve(struct bch_dev *ca)
126 spin_lock(&ca->fs->freelist_lock);
127 ret = fifo_full(&ca->free[RESERVE_MOVINGGC]) ||
128 ca->allocator_state != ALLOCATOR_running;
129 spin_unlock(&ca->fs->freelist_lock);
134 static inline int fragmentation_cmp(copygc_heap *heap,
135 struct copygc_heap_entry l,
136 struct copygc_heap_entry r)
138 return cmp_int(l.fragmentation, r.fragmentation);
141 static int walk_buckets_to_copygc(struct bch_fs *c)
143 copygc_heap *h = &c->copygc_heap;
144 struct btree_trans trans;
145 struct btree_iter iter;
147 struct bkey_alloc_unpacked u;
150 bch2_trans_init(&trans, c, 0, 0);
152 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
153 BTREE_ITER_PREFETCH, k, ret) {
154 struct bch_dev *ca = bch_dev_bkey_exists(c, iter.pos.inode);
155 struct copygc_heap_entry e;
157 u = bch2_alloc_unpack(k);
159 if (u.data_type != BCH_DATA_user ||
160 u.dirty_sectors >= ca->mi.bucket_size ||
161 bch2_bucket_is_open(c, iter.pos.inode, iter.pos.offset))
164 e = (struct copygc_heap_entry) {
165 .dev = iter.pos.inode,
167 .replicas = 1 + u.stripe_redundancy,
168 .fragmentation = u.dirty_sectors * (1U << 15)
169 / ca->mi.bucket_size,
170 .sectors = u.dirty_sectors,
171 .offset = bucket_to_sector(ca, iter.pos.offset),
173 heap_add_or_replace(h, e, -fragmentation_cmp, NULL);
176 bch2_trans_iter_exit(&trans, &iter);
178 bch2_trans_exit(&trans);
182 static int bucket_inorder_cmp(const void *_l, const void *_r)
184 const struct copygc_heap_entry *l = _l;
185 const struct copygc_heap_entry *r = _r;
187 return cmp_int(l->dev, r->dev) ?: cmp_int(l->offset, r->offset);
190 static int check_copygc_was_done(struct bch_fs *c,
191 u64 *sectors_not_moved,
192 u64 *buckets_not_moved)
194 copygc_heap *h = &c->copygc_heap;
195 struct btree_trans trans;
196 struct btree_iter iter;
198 struct bkey_alloc_unpacked u;
199 struct copygc_heap_entry *i;
202 sort(h->data, h->used, sizeof(h->data[0]), bucket_inorder_cmp, NULL);
204 bch2_trans_init(&trans, c, 0, 0);
205 bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN, 0);
207 for (i = h->data; i < h->data + h->used; i++) {
208 struct bch_dev *ca = bch_dev_bkey_exists(c, i->dev);
210 bch2_btree_iter_set_pos(&iter, POS(i->dev, sector_to_bucket(ca, i->offset)));
212 ret = lockrestart_do(&trans,
213 bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
217 u = bch2_alloc_unpack(k);
219 if (u.gen == i->gen && u.dirty_sectors) {
220 *sectors_not_moved += u.dirty_sectors;
221 *buckets_not_moved += 1;
224 bch2_trans_iter_exit(&trans, &iter);
226 bch2_trans_exit(&trans);
230 static int bch2_copygc(struct bch_fs *c)
232 copygc_heap *h = &c->copygc_heap;
233 struct copygc_heap_entry e, *i;
234 struct bch_move_stats move_stats;
235 u64 sectors_to_move = 0, sectors_to_write = 0, sectors_not_moved = 0;
236 u64 sectors_reserved = 0;
237 u64 buckets_to_move, buckets_not_moved = 0;
240 size_t heap_size = 0;
243 bch_move_stats_init(&move_stats, "copygc");
246 * Find buckets with lowest sector counts, skipping completely
247 * empty buckets, by building a maxheap sorted by sector count,
248 * and repeatedly replacing the maximum element until all
249 * buckets have been visited.
253 for_each_rw_member(ca, c, dev_idx)
254 heap_size += ca->mi.nbuckets >> 7;
256 if (h->size < heap_size) {
257 free_heap(&c->copygc_heap);
258 if (!init_heap(&c->copygc_heap, heap_size, GFP_KERNEL)) {
259 bch_err(c, "error allocating copygc heap");
264 for_each_rw_member(ca, c, dev_idx) {
265 closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca));
267 spin_lock(&ca->fs->freelist_lock);
268 sectors_reserved += fifo_used(&ca->free[RESERVE_MOVINGGC]) * ca->mi.bucket_size;
269 spin_unlock(&ca->fs->freelist_lock);
272 ret = walk_buckets_to_copygc(c);
274 bch2_fs_fatal_error(c, "error walking buckets to copygc!");
279 bch_err_ratelimited(c, "copygc requested to run but found no buckets to move!");
284 * Our btree node allocations also come out of RESERVE_MOVINGGC:
286 sectors_reserved = (sectors_reserved * 3) / 4;
287 if (!sectors_reserved) {
288 bch2_fs_fatal_error(c, "stuck, ran out of copygc reserve!");
292 for (i = h->data; i < h->data + h->used; i++) {
293 sectors_to_move += i->sectors;
294 sectors_to_write += i->sectors * i->replicas;
297 while (sectors_to_write > sectors_reserved) {
298 BUG_ON(!heap_pop(h, e, -fragmentation_cmp, NULL));
299 sectors_to_write -= e.sectors * e.replicas;
302 buckets_to_move = h->used;
304 if (!buckets_to_move) {
305 bch_err_ratelimited(c, "copygc cannot run - sectors_reserved %llu!",
310 eytzinger0_sort(h->data, h->used,
312 bucket_offset_cmp, NULL);
314 ret = bch2_move_data(c,
316 BTREE_ID_NR, POS_MAX,
318 writepoint_ptr(&c->copygc_write_point),
322 bch_err(c, "error %i from bch2_move_data() in copygc", ret);
326 ret = check_copygc_was_done(c, §ors_not_moved, &buckets_not_moved);
328 bch_err(c, "error %i from check_copygc_was_done()", ret);
332 if (sectors_not_moved)
333 bch_warn_ratelimited(c,
334 "copygc finished but %llu/%llu sectors, %llu/%llu buckets not moved (move stats: moved %llu sectors, raced %llu keys, %llu sectors)",
335 sectors_not_moved, sectors_to_move,
336 buckets_not_moved, buckets_to_move,
337 atomic64_read(&move_stats.sectors_moved),
338 atomic64_read(&move_stats.keys_raced),
339 atomic64_read(&move_stats.sectors_raced));
342 atomic64_read(&move_stats.sectors_moved), sectors_not_moved,
343 buckets_to_move, buckets_not_moved);
348 * Copygc runs when the amount of fragmented data is above some arbitrary
351 * The threshold at the limit - when the device is full - is the amount of space
352 * we reserved in bch2_recalc_capacity; we can't have more than that amount of
353 * disk space stranded due to fragmentation and store everything we have
356 * But we don't want to be running copygc unnecessarily when the device still
357 * has plenty of free space - rather, we want copygc to smoothly run every so
358 * often and continually reduce the amount of fragmented space as the device
359 * fills up. So, we increase the threshold by half the current free space.
361 unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
365 s64 wait = S64_MAX, fragmented_allowed, fragmented;
367 for_each_rw_member(ca, c, dev_idx) {
368 struct bch_dev_usage usage = bch2_dev_usage_read(ca);
370 fragmented_allowed = ((__dev_buckets_reclaimable(ca, usage) *
371 ca->mi.bucket_size) >> 1);
372 fragmented = usage.d[BCH_DATA_user].fragmented;
374 wait = min(wait, max(0LL, fragmented_allowed - fragmented));
380 static int bch2_copygc_thread(void *arg)
382 struct bch_fs *c = arg;
383 struct io_clock *clock = &c->io_clock[WRITE];
388 while (!kthread_should_stop()) {
391 if (kthread_wait_freezable(c->copy_gc_enabled))
394 last = atomic64_read(&clock->now);
395 wait = bch2_copygc_wait_amount(c);
397 if (wait > clock->max_slop) {
398 trace_copygc_wait(c, wait, last + wait);
399 c->copygc_wait = last + wait;
400 bch2_kthread_io_clock_wait(clock, last + wait,
401 MAX_SCHEDULE_TIMEOUT);
414 void bch2_copygc_stop(struct bch_fs *c)
416 if (c->copygc_thread) {
417 kthread_stop(c->copygc_thread);
418 put_task_struct(c->copygc_thread);
420 c->copygc_thread = NULL;
423 int bch2_copygc_start(struct bch_fs *c)
425 struct task_struct *t;
427 if (c->copygc_thread)
430 if (c->opts.nochanges)
433 if (bch2_fs_init_fault("copygc_start"))
436 t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name);
438 bch_err(c, "error creating copygc thread: %li", PTR_ERR(t));
444 c->copygc_thread = t;
445 wake_up_process(c->copygc_thread);
450 void bch2_fs_copygc_init(struct bch_fs *c)