2 * Moving/copying garbage collector
4 * Copyright 2012 Google, Inc.
8 #include "btree_iter.h"
9 #include "btree_update.h"
12 #include "disk_groups.h"
14 #include "eytzinger.h"
21 #include <trace/events/bcachefs.h>
22 #include <linux/freezer.h>
23 #include <linux/kthread.h>
24 #include <linux/math64.h>
25 #include <linux/sched/task.h>
26 #include <linux/sort.h>
27 #include <linux/wait.h>
30 * We can't use the entire copygc reserve in one iteration of copygc: we may
31 * need the buckets we're freeing up to go back into the copygc reserve to make
32 * forward progress, but if the copygc reserve is full they'll be available for
33 * any allocation - and it's possible that in a given iteration, we free up most
34 * of the buckets we're going to free before we allocate most of the buckets
35 * we're going to allocate.
37 * If we only use half of the reserve per iteration, then in steady state we'll
38 * always have room in the reserve for the buckets we're going to need in the
41 #define COPYGC_BUCKETS_PER_ITER(ca) \
42 ((ca)->free[RESERVE_MOVINGGC].size / 2)
45 * Max sectors to move per iteration: Have to take into account internal
46 * fragmentation from the multiple write points for each generation:
48 #define COPYGC_SECTORS_PER_ITER(ca) \
49 ((ca)->mi.bucket_size * COPYGC_BUCKETS_PER_ITER(ca))
51 static inline int sectors_used_cmp(copygc_heap *heap,
52 struct copygc_heap_entry l,
53 struct copygc_heap_entry r)
55 return (l.sectors > r.sectors) - (l.sectors < r.sectors);
58 static int bucket_offset_cmp(const void *_l, const void *_r, size_t size)
60 const struct copygc_heap_entry *l = _l;
61 const struct copygc_heap_entry *r = _r;
63 return (l->offset > r->offset) - (l->offset < r->offset);
66 static bool __copygc_pred(struct bch_dev *ca,
67 struct bkey_s_c_extent e)
69 copygc_heap *h = &ca->copygc_heap;
70 const struct bch_extent_ptr *ptr =
71 bch2_extent_has_device(e, ca->dev_idx);
74 struct copygc_heap_entry search = { .offset = ptr->offset };
76 ssize_t i = eytzinger0_find_le(h->data, h->used,
78 bucket_offset_cmp, &search);
81 ptr->offset < h->data[i].offset + ca->mi.bucket_size &&
82 ptr->gen == h->data[i].gen);
88 static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
90 struct bkey_s_c_extent e,
91 struct bch_io_opts *io_opts,
92 struct data_opts *data_opts)
94 struct bch_dev *ca = arg;
96 if (!__copygc_pred(ca, e))
99 data_opts->target = dev_to_target(ca->dev_idx);
100 data_opts->btree_insert_flags = BTREE_INSERT_USE_RESERVE;
101 data_opts->rewrite_dev = ca->dev_idx;
105 static bool have_copygc_reserve(struct bch_dev *ca)
109 spin_lock(&ca->freelist_lock);
110 ret = fifo_full(&ca->free[RESERVE_MOVINGGC]) ||
111 ca->allocator_blocked;
112 spin_unlock(&ca->freelist_lock);
117 static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca)
119 copygc_heap *h = &ca->copygc_heap;
120 struct copygc_heap_entry e, *i;
121 struct bucket_array *buckets;
122 struct bch_move_stats move_stats;
123 u64 sectors_to_move = 0, sectors_not_moved = 0;
124 u64 buckets_to_move, buckets_not_moved = 0;
128 memset(&move_stats, 0, sizeof(move_stats));
129 closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca));
132 * Find buckets with lowest sector counts, skipping completely
133 * empty buckets, by building a maxheap sorted by sector count,
134 * and repeatedly replacing the maximum element until all
135 * buckets have been visited.
140 * We need bucket marks to be up to date - gc can't be recalculating
143 down_read(&c->gc_lock);
144 down_read(&ca->bucket_lock);
145 buckets = bucket_array(ca);
147 for (b = buckets->first_bucket; b < buckets->nbuckets; b++) {
148 struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
149 struct copygc_heap_entry e;
151 if (m.owned_by_allocator ||
152 m.data_type != BCH_DATA_USER ||
153 !bucket_sectors_used(m) ||
154 bucket_sectors_used(m) >= ca->mi.bucket_size)
157 e = (struct copygc_heap_entry) {
159 .sectors = bucket_sectors_used(m),
160 .offset = bucket_to_sector(ca, b),
162 heap_add_or_replace(h, e, -sectors_used_cmp);
164 up_read(&ca->bucket_lock);
165 up_read(&c->gc_lock);
167 for (i = h->data; i < h->data + h->used; i++)
168 sectors_to_move += i->sectors;
170 while (sectors_to_move > COPYGC_SECTORS_PER_ITER(ca)) {
171 BUG_ON(!heap_pop(h, e, -sectors_used_cmp));
172 sectors_to_move -= e.sectors;
175 buckets_to_move = h->used;
177 if (!buckets_to_move)
180 eytzinger0_sort(h->data, h->used,
182 bucket_offset_cmp, NULL);
184 ret = bch2_move_data(c, &ca->copygc_pd.rate,
185 writepoint_ptr(&ca->copygc_write_point),
190 down_read(&ca->bucket_lock);
191 buckets = bucket_array(ca);
192 for (i = h->data; i < h->data + h->used; i++) {
193 size_t b = sector_to_bucket(ca, i->offset);
194 struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
196 if (i->gen == m.gen && bucket_sectors_used(m)) {
197 sectors_not_moved += bucket_sectors_used(m);
201 up_read(&ca->bucket_lock);
203 if (sectors_not_moved && !ret)
204 bch_warn(c, "copygc finished but %llu/%llu sectors, %llu/%llu buckets not moved",
205 sectors_not_moved, sectors_to_move,
206 buckets_not_moved, buckets_to_move);
209 atomic64_read(&move_stats.sectors_moved), sectors_not_moved,
210 buckets_to_move, buckets_not_moved);
213 static int bch2_copygc_thread(void *arg)
215 struct bch_dev *ca = arg;
216 struct bch_fs *c = ca->fs;
217 struct io_clock *clock = &c->io_clock[WRITE];
218 struct bch_dev_usage usage;
220 u64 available, fragmented, reserve, next;
224 while (!kthread_should_stop()) {
225 if (kthread_wait_freezable(c->copy_gc_enabled))
228 last = atomic_long_read(&clock->now);
230 reserve = div64_u64((ca->mi.nbuckets - ca->mi.first_bucket) *
232 c->opts.gc_reserve_percent, 200);
234 usage = bch2_dev_usage_read(c, ca);
237 * don't start copygc until less than half the gc reserve is
240 available = __dev_buckets_available(ca, usage) *
242 if (available > reserve) {
243 next = last + available - reserve;
244 bch2_kthread_io_clock_wait(clock, next,
245 MAX_SCHEDULE_TIMEOUT);
250 * don't start copygc until there's more than half the copygc
251 * reserve of fragmented space:
253 fragmented = usage.sectors_fragmented;
254 if (fragmented < reserve) {
255 next = last + reserve - fragmented;
256 bch2_kthread_io_clock_wait(clock, next,
257 MAX_SCHEDULE_TIMEOUT);
267 void bch2_copygc_stop(struct bch_dev *ca)
269 ca->copygc_pd.rate.rate = UINT_MAX;
270 bch2_ratelimit_reset(&ca->copygc_pd.rate);
272 if (ca->copygc_thread) {
273 kthread_stop(ca->copygc_thread);
274 put_task_struct(ca->copygc_thread);
276 ca->copygc_thread = NULL;
279 int bch2_copygc_start(struct bch_fs *c, struct bch_dev *ca)
281 struct task_struct *t;
283 BUG_ON(ca->copygc_thread);
285 if (c->opts.nochanges)
288 if (bch2_fs_init_fault("copygc_start"))
291 t = kthread_create(bch2_copygc_thread, ca,
292 "bch_copygc[%s]", ca->name);
298 ca->copygc_thread = t;
299 wake_up_process(ca->copygc_thread);
304 void bch2_dev_copygc_init(struct bch_dev *ca)
306 bch2_pd_controller_init(&ca->copygc_pd);
307 ca->copygc_pd.d_term = 0;