2 * Moving/copying garbage collector
4 * Copyright 2012 Google, Inc.
8 #include "btree_iter.h"
9 #include "btree_update.h"
13 #include "eytzinger.h"
20 #include <trace/events/bcachefs.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/math64.h>
24 #include <linux/sort.h>
25 #include <linux/wait.h>
28 * We can't use the entire copygc reserve in one iteration of copygc: we may
29 * need the buckets we're freeing up to go back into the copygc reserve to make
30 * forward progress, but if the copygc reserve is full they'll be available for
31 * any allocation - and it's possible that in a given iteration, we free up most
32 * of the buckets we're going to free before we allocate most of the buckets
33 * we're going to allocate.
35 * If we only use half of the reserve per iteration, then in steady state we'll
36 * always have room in the reserve for the buckets we're going to need in the
39 #define COPYGC_BUCKETS_PER_ITER(ca) \
40 ((ca)->free[RESERVE_MOVINGGC].size / 2)
43 * Max sectors to move per iteration: Have to take into account internal
44 * fragmentation from the multiple write points for each generation:
46 #define COPYGC_SECTORS_PER_ITER(ca) \
47 ((ca)->mi.bucket_size * COPYGC_BUCKETS_PER_ITER(ca))
49 static inline int sectors_used_cmp(copygc_heap *heap,
50 struct copygc_heap_entry l,
51 struct copygc_heap_entry r)
53 return bucket_sectors_used(l.mark) - bucket_sectors_used(r.mark);
56 static int bucket_offset_cmp(const void *_l, const void *_r, size_t size)
58 const struct copygc_heap_entry *l = _l;
59 const struct copygc_heap_entry *r = _r;
61 return (l->offset > r->offset) - (l->offset < r->offset);
64 static bool copygc_pred(void *arg, struct bkey_s_c_extent e)
66 struct bch_dev *ca = arg;
67 copygc_heap *h = &ca->copygc_heap;
68 const struct bch_extent_ptr *ptr =
69 bch2_extent_has_device(e, ca->dev_idx);
72 struct copygc_heap_entry search = { .offset = ptr->offset };
74 size_t i = eytzinger0_find_le(h->data, h->used,
76 bucket_offset_cmp, &search);
79 ptr->offset < h->data[i].offset + ca->mi.bucket_size &&
80 ptr->gen == h->data[i].mark.gen);
86 static bool have_copygc_reserve(struct bch_dev *ca)
90 spin_lock(&ca->freelist_lock);
91 ret = fifo_used(&ca->free[RESERVE_MOVINGGC]) >=
92 COPYGC_BUCKETS_PER_ITER(ca);
93 spin_unlock(&ca->freelist_lock);
98 static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca)
100 copygc_heap *h = &ca->copygc_heap;
101 struct copygc_heap_entry e, *i;
103 u64 keys_moved, sectors_moved;
104 u64 sectors_to_move = 0, sectors_not_moved = 0;
105 u64 buckets_to_move, buckets_not_moved = 0;
108 closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca));
111 * Find buckets with lowest sector counts, skipping completely
112 * empty buckets, by building a maxheap sorted by sector count,
113 * and repeatedly replacing the maximum element until all
114 * buckets have been visited.
118 * We need bucket marks to be up to date - gc can't be recalculating
121 down_read(&c->gc_lock);
123 for_each_bucket(g, ca) {
124 struct bucket_mark m = READ_ONCE(g->mark);
125 struct copygc_heap_entry e;
127 if (m.owned_by_allocator ||
128 m.data_type != BCH_DATA_USER ||
129 !bucket_sectors_used(m) ||
130 bucket_sectors_used(m) >= ca->mi.bucket_size)
133 e = (struct copygc_heap_entry) {
134 .offset = bucket_to_sector(ca, g - ca->buckets),
137 heap_add_or_replace(h, e, -sectors_used_cmp);
139 up_read(&c->gc_lock);
141 for (i = h->data; i < h->data + h->used; i++)
142 sectors_to_move += bucket_sectors_used(i->mark);
144 while (sectors_to_move > COPYGC_SECTORS_PER_ITER(ca)) {
145 BUG_ON(!heap_pop(h, e, -sectors_used_cmp));
146 sectors_to_move -= bucket_sectors_used(e.mark);
149 buckets_to_move = h->used;
151 if (!buckets_to_move)
154 eytzinger0_sort(h->data, h->used,
156 bucket_offset_cmp, NULL);
158 ret = bch2_move_data(c, &ca->copygc_pd.rate,
159 SECTORS_IN_FLIGHT_PER_DEVICE,
161 writepoint_ptr(&ca->copygc_write_point),
162 BTREE_INSERT_USE_RESERVE,
168 for (i = h->data; i < h->data + h->used; i++) {
169 size_t bucket = sector_to_bucket(ca, i->offset);
170 struct bucket_mark m = READ_ONCE(ca->buckets[bucket].mark);
172 if (i->mark.gen == m.gen && bucket_sectors_used(m)) {
173 sectors_not_moved += bucket_sectors_used(m);
178 if (sectors_not_moved && !ret)
179 bch_warn(c, "copygc finished but %llu/%llu sectors, %llu/%llu buckets not moved",
180 sectors_not_moved, sectors_to_move,
181 buckets_not_moved, buckets_to_move);
184 sectors_moved, sectors_not_moved,
185 buckets_to_move, buckets_not_moved);
188 static int bch2_copygc_thread(void *arg)
190 struct bch_dev *ca = arg;
191 struct bch_fs *c = ca->fs;
192 struct io_clock *clock = &c->io_clock[WRITE];
194 u64 available, want, next;
198 while (!kthread_should_stop()) {
199 if (kthread_wait_freezable(c->copy_gc_enabled))
202 last = atomic_long_read(&clock->now);
204 * don't start copygc until less than half the gc reserve is
207 available = dev_buckets_available(c, ca);
208 want = div64_u64((ca->mi.nbuckets - ca->mi.first_bucket) *
209 c->opts.gc_reserve_percent, 200);
210 if (available > want) {
211 next = last + (available - want) *
213 bch2_kthread_io_clock_wait(clock, next);
223 void bch2_copygc_stop(struct bch_dev *ca)
225 ca->copygc_pd.rate.rate = UINT_MAX;
226 bch2_ratelimit_reset(&ca->copygc_pd.rate);
228 if (ca->copygc_thread)
229 kthread_stop(ca->copygc_thread);
230 ca->copygc_thread = NULL;
233 int bch2_copygc_start(struct bch_fs *c, struct bch_dev *ca)
235 struct task_struct *t;
237 BUG_ON(ca->copygc_thread);
239 if (c->opts.nochanges)
242 if (bch2_fs_init_fault("copygc_start"))
245 t = kthread_create(bch2_copygc_thread, ca, "bch_copygc");
249 ca->copygc_thread = t;
250 wake_up_process(ca->copygc_thread);
255 void bch2_dev_copygc_init(struct bch_dev *ca)
257 bch2_pd_controller_init(&ca->copygc_pd);
258 ca->copygc_pd.d_term = 0;