1 // SPDX-License-Identifier: GPL-2.0
3 * Moving/copying garbage collector
5 * Copyright 2012 Google, Inc.
9 #include "alloc_background.h"
10 #include "alloc_foreground.h"
11 #include "btree_iter.h"
12 #include "btree_update.h"
15 #include "disk_groups.h"
18 #include "eytzinger.h"
25 #include <trace/events/bcachefs.h>
26 #include <linux/freezer.h>
27 #include <linux/kthread.h>
28 #include <linux/math64.h>
29 #include <linux/sched/task.h>
30 #include <linux/sort.h>
31 #include <linux/wait.h>
33 static int bucket_offset_cmp(const void *_l, const void *_r, size_t size)
35 const struct copygc_heap_entry *l = _l;
36 const struct copygc_heap_entry *r = _r;
38 return cmp_int(l->dev, r->dev) ?:
39 cmp_int(l->offset, r->offset);
42 static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
44 struct bch_io_opts *io_opts,
45 struct data_opts *data_opts)
47 copygc_heap *h = &c->copygc_heap;
48 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
49 const union bch_extent_entry *entry;
50 struct extent_ptr_decoded p = { 0 };
52 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
53 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
54 struct copygc_heap_entry search = {
56 .offset = p.ptr.offset,
63 i = eytzinger0_find_le(h->data, h->used,
65 bucket_offset_cmp, &search);
67 /* eytzinger search verify code: */
70 for (k = 0; k < h->used; k++)
71 if (h->data[k].offset <= ptr->offset &&
72 (j < 0 || h->data[k].offset > h->data[j].offset))
78 p.ptr.dev == h->data[i].dev &&
79 p.ptr.offset < h->data[i].offset + ca->mi.bucket_size &&
80 p.ptr.gen == h->data[i].gen) {
82 * We need to use the journal reserve here, because
83 * - journal reclaim depends on btree key cache
84 * flushing to make forward progress,
85 * - which has to make forward progress when the
86 * journal is pre-reservation full,
87 * - and depends on allocation - meaning allocator and
91 data_opts->target = io_opts->background_target;
92 data_opts->nr_replicas = 1;
93 data_opts->btree_insert_flags = BTREE_INSERT_USE_RESERVE|
94 JOURNAL_WATERMARK_copygc;
95 data_opts->rewrite_dev = p.ptr.dev;
98 data_opts->nr_replicas += p.ec.redundancy;
107 static inline int fragmentation_cmp(copygc_heap *heap,
108 struct copygc_heap_entry l,
109 struct copygc_heap_entry r)
111 return cmp_int(l.fragmentation, r.fragmentation);
114 static int walk_buckets_to_copygc(struct bch_fs *c)
116 copygc_heap *h = &c->copygc_heap;
117 struct btree_trans trans;
118 struct btree_iter iter;
120 struct bch_alloc_v4 a;
123 bch2_trans_init(&trans, c, 0, 0);
125 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
126 BTREE_ITER_PREFETCH, k, ret) {
127 struct bch_dev *ca = bch_dev_bkey_exists(c, iter.pos.inode);
128 struct copygc_heap_entry e;
130 bch2_alloc_to_v4(k, &a);
132 if (a.data_type != BCH_DATA_user ||
133 a.dirty_sectors >= ca->mi.bucket_size ||
134 bch2_bucket_is_open(c, iter.pos.inode, iter.pos.offset))
137 e = (struct copygc_heap_entry) {
138 .dev = iter.pos.inode,
140 .replicas = 1 + a.stripe_redundancy,
141 .fragmentation = div_u64((u64) a.dirty_sectors * (1ULL << 31),
143 .sectors = a.dirty_sectors,
144 .offset = bucket_to_sector(ca, iter.pos.offset),
146 heap_add_or_replace(h, e, -fragmentation_cmp, NULL);
149 bch2_trans_iter_exit(&trans, &iter);
151 bch2_trans_exit(&trans);
155 static int bucket_inorder_cmp(const void *_l, const void *_r)
157 const struct copygc_heap_entry *l = _l;
158 const struct copygc_heap_entry *r = _r;
160 return cmp_int(l->dev, r->dev) ?: cmp_int(l->offset, r->offset);
163 static int check_copygc_was_done(struct bch_fs *c,
164 u64 *sectors_not_moved,
165 u64 *buckets_not_moved)
167 copygc_heap *h = &c->copygc_heap;
168 struct btree_trans trans;
169 struct btree_iter iter;
171 struct bch_alloc_v4 a;
172 struct copygc_heap_entry *i;
175 sort(h->data, h->used, sizeof(h->data[0]), bucket_inorder_cmp, NULL);
177 bch2_trans_init(&trans, c, 0, 0);
178 bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN, 0);
180 for (i = h->data; i < h->data + h->used; i++) {
181 struct bch_dev *ca = bch_dev_bkey_exists(c, i->dev);
183 bch2_btree_iter_set_pos(&iter, POS(i->dev, sector_to_bucket(ca, i->offset)));
185 ret = lockrestart_do(&trans,
186 bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
190 bch2_alloc_to_v4(k, &a);
192 if (a.gen == i->gen && a.dirty_sectors) {
193 *sectors_not_moved += a.dirty_sectors;
194 *buckets_not_moved += 1;
197 bch2_trans_iter_exit(&trans, &iter);
199 bch2_trans_exit(&trans);
203 static int bch2_copygc(struct bch_fs *c)
205 copygc_heap *h = &c->copygc_heap;
206 struct copygc_heap_entry e, *i;
207 struct bch_move_stats move_stats;
208 u64 sectors_to_move = 0, sectors_to_write = 0, sectors_not_moved = 0;
209 u64 sectors_reserved = 0;
210 u64 buckets_to_move, buckets_not_moved = 0;
213 size_t heap_size = 0;
216 bch_move_stats_init(&move_stats, "copygc");
219 * Find buckets with lowest sector counts, skipping completely
220 * empty buckets, by building a maxheap sorted by sector count,
221 * and repeatedly replacing the maximum element until all
222 * buckets have been visited.
226 for_each_rw_member(ca, c, dev_idx)
227 heap_size += ca->mi.nbuckets >> 7;
229 if (h->size < heap_size) {
230 free_heap(&c->copygc_heap);
231 if (!init_heap(&c->copygc_heap, heap_size, GFP_KERNEL)) {
232 bch_err(c, "error allocating copygc heap");
237 for_each_rw_member(ca, c, dev_idx) {
238 struct bch_dev_usage usage = bch2_dev_usage_read(ca);
240 u64 avail = max_t(s64, 0,
241 usage.d[BCH_DATA_free].buckets +
242 usage.d[BCH_DATA_need_discard].buckets -
243 ca->nr_open_buckets -
244 bch2_dev_buckets_reserved(ca, RESERVE_movinggc));
246 avail = min(avail, ca->mi.nbuckets >> 6);
248 sectors_reserved += avail * ca->mi.bucket_size;
251 ret = walk_buckets_to_copygc(c);
253 bch2_fs_fatal_error(c, "error walking buckets to copygc!");
258 bch_err_ratelimited(c, "copygc requested to run but found no buckets to move!");
263 * Our btree node allocations also come out of RESERVE_movingc:
265 sectors_reserved = (sectors_reserved * 3) / 4;
266 if (!sectors_reserved) {
267 bch2_fs_fatal_error(c, "stuck, ran out of copygc reserve!");
271 for (i = h->data; i < h->data + h->used; i++) {
272 sectors_to_move += i->sectors;
273 sectors_to_write += i->sectors * i->replicas;
276 while (sectors_to_write > sectors_reserved) {
277 BUG_ON(!heap_pop(h, e, -fragmentation_cmp, NULL));
278 sectors_to_write -= e.sectors * e.replicas;
281 buckets_to_move = h->used;
283 if (!buckets_to_move) {
284 bch_err_ratelimited(c, "copygc cannot run - sectors_reserved %llu!",
289 eytzinger0_sort(h->data, h->used,
291 bucket_offset_cmp, NULL);
293 ret = bch2_move_data(c,
295 BTREE_ID_NR, POS_MAX,
297 writepoint_ptr(&c->copygc_write_point),
301 bch_err(c, "error %i from bch2_move_data() in copygc", ret);
305 ret = check_copygc_was_done(c, §ors_not_moved, &buckets_not_moved);
307 bch_err(c, "error %i from check_copygc_was_done()", ret);
311 if (sectors_not_moved)
312 bch_warn_ratelimited(c,
313 "copygc finished but %llu/%llu sectors, %llu/%llu buckets not moved (move stats: moved %llu sectors, raced %llu keys, %llu sectors)",
314 sectors_not_moved, sectors_to_move,
315 buckets_not_moved, buckets_to_move,
316 atomic64_read(&move_stats.sectors_moved),
317 atomic64_read(&move_stats.keys_raced),
318 atomic64_read(&move_stats.sectors_raced));
321 atomic64_read(&move_stats.sectors_moved), sectors_not_moved,
322 buckets_to_move, buckets_not_moved);
327 * Copygc runs when the amount of fragmented data is above some arbitrary
330 * The threshold at the limit - when the device is full - is the amount of space
331 * we reserved in bch2_recalc_capacity; we can't have more than that amount of
332 * disk space stranded due to fragmentation and store everything we have
335 * But we don't want to be running copygc unnecessarily when the device still
336 * has plenty of free space - rather, we want copygc to smoothly run every so
337 * often and continually reduce the amount of fragmented space as the device
338 * fills up. So, we increase the threshold by half the current free space.
340 unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
344 s64 wait = S64_MAX, fragmented_allowed, fragmented;
346 for_each_rw_member(ca, c, dev_idx) {
347 struct bch_dev_usage usage = bch2_dev_usage_read(ca);
349 fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_none) *
350 ca->mi.bucket_size) >> 1);
351 fragmented = usage.d[BCH_DATA_user].fragmented;
353 wait = min(wait, max(0LL, fragmented_allowed - fragmented));
359 static int bch2_copygc_thread(void *arg)
361 struct bch_fs *c = arg;
362 struct io_clock *clock = &c->io_clock[WRITE];
367 while (!kthread_should_stop()) {
370 if (kthread_wait_freezable(c->copy_gc_enabled))
373 last = atomic64_read(&clock->now);
374 wait = bch2_copygc_wait_amount(c);
376 if (wait > clock->max_slop) {
377 trace_copygc_wait(c, wait, last + wait);
378 c->copygc_wait = last + wait;
379 bch2_kthread_io_clock_wait(clock, last + wait,
380 MAX_SCHEDULE_TIMEOUT);
393 void bch2_copygc_stop(struct bch_fs *c)
395 if (c->copygc_thread) {
396 kthread_stop(c->copygc_thread);
397 put_task_struct(c->copygc_thread);
399 c->copygc_thread = NULL;
402 int bch2_copygc_start(struct bch_fs *c)
404 struct task_struct *t;
406 if (c->copygc_thread)
409 if (c->opts.nochanges)
412 if (bch2_fs_init_fault("copygc_start"))
415 t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name);
417 bch_err(c, "error creating copygc thread: %li", PTR_ERR(t));
423 c->copygc_thread = t;
424 wake_up_process(c->copygc_thread);
429 void bch2_fs_copygc_init(struct bch_fs *c)