1 // SPDX-License-Identifier: GPL-2.0
3 * Moving/copying garbage collector
5 * Copyright 2012 Google, Inc.
9 #include "alloc_background.h"
10 #include "alloc_foreground.h"
11 #include "btree_iter.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
16 #include "disk_groups.h"
20 #include "eytzinger.h"
28 #include <trace/events/bcachefs.h>
29 #include <linux/freezer.h>
30 #include <linux/kthread.h>
31 #include <linux/math64.h>
32 #include <linux/sched/task.h>
33 #include <linux/sort.h>
34 #include <linux/wait.h>
36 static int bch2_bucket_is_movable(struct btree_trans *trans,
37 struct bpos bucket, u64 time, u8 *gen)
39 struct btree_iter iter;
41 struct bch_alloc_v4 _a;
42 const struct bch_alloc_v4 *a;
45 if (bch2_bucket_is_open(trans->c, bucket.inode, bucket.offset))
48 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, 0);
49 k = bch2_btree_iter_peek_slot(&iter);
51 bch2_trans_iter_exit(trans, &iter);
56 a = bch2_alloc_to_v4(k, &_a);
58 ret = (a->data_type == BCH_DATA_btree ||
59 a->data_type == BCH_DATA_user) &&
60 a->fragmentation_lru &&
61 a->fragmentation_lru <= time;
64 struct printbuf buf = PRINTBUF;
66 bch2_bkey_val_to_text(&buf, trans->c, k);
67 pr_debug("%s", buf.buf);
74 static int bch2_copygc_next_bucket(struct btree_trans *trans,
75 struct bpos *bucket, u8 *gen, struct bpos *pos)
77 struct btree_iter iter;
81 ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru,
82 bpos_max(*pos, lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0)),
83 lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
85 *bucket = u64_to_bucket(k.k->p.offset);
87 bch2_bucket_is_movable(trans, *bucket, lru_pos_time(k.k->p), gen);
93 return ret ? 0 : -ENOENT;
96 static int bch2_copygc(struct bch_fs *c)
98 struct bch_move_stats move_stats;
99 struct btree_trans trans;
100 struct moving_context ctxt;
101 struct data_update_opts data_opts = {
102 .btree_insert_flags = BTREE_INSERT_USE_RESERVE|JOURNAL_WATERMARK_copygc,
107 unsigned nr_evacuated;
110 bch2_move_stats_init(&move_stats, "copygc");
111 bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
112 writepoint_ptr(&c->copygc_write_point),
114 bch2_trans_init(&trans, c, 0, 0);
116 ret = bch2_btree_write_buffer_flush(&trans);
119 for (nr_evacuated = 0, pos = POS_MIN;
120 nr_evacuated < 32 && !ret;
121 nr_evacuated++, pos = bpos_nosnap_successor(pos)) {
122 ret = bch2_copygc_next_bucket(&trans, &bucket, &gen, &pos) ?:
123 __bch2_evacuate_bucket(&trans, &ctxt, bucket, gen, data_opts);
124 if (bkey_eq(pos, POS_MAX))
128 bch2_trans_exit(&trans);
129 bch2_moving_ctxt_exit(&ctxt);
131 /* no entries in LRU btree found, or got to end: */
135 if (ret < 0 && !bch2_err_matches(ret, EROFS))
136 bch_err(c, "error from bch2_move_data() in copygc: %s", bch2_err_str(ret));
138 trace_and_count(c, copygc, c, atomic64_read(&move_stats.sectors_moved), 0, 0, 0);
143 * Copygc runs when the amount of fragmented data is above some arbitrary
146 * The threshold at the limit - when the device is full - is the amount of space
147 * we reserved in bch2_recalc_capacity; we can't have more than that amount of
148 * disk space stranded due to fragmentation and store everything we have
151 * But we don't want to be running copygc unnecessarily when the device still
152 * has plenty of free space - rather, we want copygc to smoothly run every so
153 * often and continually reduce the amount of fragmented space as the device
154 * fills up. So, we increase the threshold by half the current free space.
156 unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
160 s64 wait = S64_MAX, fragmented_allowed, fragmented;
162 for_each_rw_member(ca, c, dev_idx) {
163 struct bch_dev_usage usage = bch2_dev_usage_read(ca);
165 fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_none) *
166 ca->mi.bucket_size) >> 1);
167 fragmented = usage.d[BCH_DATA_user].fragmented;
169 wait = min(wait, max(0LL, fragmented_allowed - fragmented));
175 void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
177 prt_printf(out, "Currently waiting for: ");
178 prt_human_readable_u64(out, max(0LL, c->copygc_wait -
179 atomic64_read(&c->io_clock[WRITE].now)) << 9);
182 prt_printf(out, "Currently calculated wait: ");
183 prt_human_readable_u64(out, bch2_copygc_wait_amount(c));
187 static int bch2_copygc_thread(void *arg)
189 struct bch_fs *c = arg;
190 struct io_clock *clock = &c->io_clock[WRITE];
196 while (!ret && !kthread_should_stop()) {
199 if (kthread_wait_freezable(c->copy_gc_enabled))
202 last = atomic64_read(&clock->now);
203 wait = bch2_copygc_wait_amount(c);
205 if (wait > clock->max_slop) {
206 trace_and_count(c, copygc_wait, c, wait, last + wait);
207 c->copygc_wait = last + wait;
208 bch2_kthread_io_clock_wait(clock, last + wait,
209 MAX_SCHEDULE_TIMEOUT);
215 c->copygc_running = true;
216 ret = bch2_copygc(c);
217 c->copygc_running = false;
219 wake_up(&c->copygc_running_wq);
225 void bch2_copygc_stop(struct bch_fs *c)
227 if (c->copygc_thread) {
228 kthread_stop(c->copygc_thread);
229 put_task_struct(c->copygc_thread);
231 c->copygc_thread = NULL;
234 int bch2_copygc_start(struct bch_fs *c)
236 struct task_struct *t;
239 if (c->copygc_thread)
242 if (c->opts.nochanges)
245 if (bch2_fs_init_fault("copygc_start"))
248 t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name);
249 ret = PTR_ERR_OR_ZERO(t);
251 bch_err(c, "error creating copygc thread: %s", bch2_err_str(ret));
257 c->copygc_thread = t;
258 wake_up_process(c->copygc_thread);
263 void bch2_fs_copygc_init(struct bch_fs *c)
265 init_waitqueue_head(&c->copygc_running_wq);
266 c->copygc_running = false;