2 * Moving/copying garbage collector
4 * Copyright 2012 Google, Inc.
8 #include "btree_iter.h"
12 #include "eytzinger.h"
19 #include <trace/events/bcachefs.h>
20 #include <linux/freezer.h>
21 #include <linux/kthread.h>
22 #include <linux/math64.h>
23 #include <linux/sort.h>
24 #include <linux/wait.h>
26 /* Moving GC - IO loop */
28 static int bucket_idx_cmp(const void *_l, const void *_r, size_t size)
30 const struct bucket_heap_entry *l = _l;
31 const struct bucket_heap_entry *r = _r;
33 if (l->bucket < r->bucket)
35 if (l->bucket > r->bucket)
40 static const struct bch_extent_ptr *moving_pred(struct bch_dev *ca,
43 bucket_heap *h = &ca->copygc_heap;
44 const struct bch_extent_ptr *ptr;
46 if (bkey_extent_is_data(k.k) &&
47 (ptr = bch2_extent_has_device(bkey_s_c_to_extent(k),
49 struct bucket_heap_entry search = {
50 .bucket = PTR_BUCKET_NR(ca, ptr)
53 size_t i = eytzinger0_find(h->data, h->used,
55 bucket_idx_cmp, &search);
64 static int issue_moving_gc_move(struct bch_dev *ca,
65 struct moving_context *ctxt,
68 struct bch_fs *c = ca->fs;
69 const struct bch_extent_ptr *ptr;
72 ptr = moving_pred(ca, k);
73 if (!ptr) /* We raced - bucket's been reused */
76 ret = bch2_data_move(c, ctxt, &ca->self, k, ptr);
80 trace_moving_gc_alloc_fail(c, k.k->size);
84 static void read_moving(struct bch_dev *ca, size_t buckets_to_move,
87 struct bch_fs *c = ca->fs;
88 bucket_heap *h = &ca->copygc_heap;
89 struct moving_context ctxt;
90 struct btree_iter iter;
92 u64 sectors_not_moved = 0;
93 size_t buckets_not_moved = 0;
94 struct bucket_heap_entry *i;
96 bch2_ratelimit_reset(&ca->moving_gc_pd.rate);
97 bch2_move_ctxt_init(&ctxt, &ca->moving_gc_pd.rate,
98 SECTORS_IN_FLIGHT_PER_DEVICE);
99 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
100 BTREE_ITER_PREFETCH);
103 if (kthread_should_stop())
105 if (bch2_move_ctxt_wait(&ctxt))
107 k = bch2_btree_iter_peek(&iter);
110 if (btree_iter_err(k))
113 if (!moving_pred(ca, k))
116 if (issue_moving_gc_move(ca, &ctxt, k)) {
117 bch2_btree_iter_unlock(&iter);
119 /* memory allocation failure, wait for some IO to finish */
120 bch2_move_ctxt_wait_for_io(&ctxt);
124 bch2_btree_iter_advance_pos(&iter);
125 //bch2_btree_iter_cond_resched(&iter);
127 /* unlock before calling moving_context_wait() */
128 bch2_btree_iter_unlock(&iter);
132 bch2_btree_iter_unlock(&iter);
133 bch2_move_ctxt_exit(&ctxt);
134 trace_moving_gc_end(ca, ctxt.sectors_moved, ctxt.keys_moved,
137 /* don't check this if we bailed out early: */
138 for (i = h->data; i < h->data + h->used; i++) {
139 struct bucket_mark m = READ_ONCE(ca->buckets[i->bucket].mark);
141 if (i->mark.gen == m.gen && bucket_sectors_used(m)) {
142 sectors_not_moved += bucket_sectors_used(m);
147 if (sectors_not_moved)
148 bch_warn(c, "copygc finished but %llu/%llu sectors, %zu/%zu buckets not moved",
149 sectors_not_moved, sectors_to_move,
150 buckets_not_moved, buckets_to_move);
153 bch2_btree_iter_unlock(&iter);
154 bch2_move_ctxt_exit(&ctxt);
155 trace_moving_gc_end(ca, ctxt.sectors_moved, ctxt.keys_moved,
159 static bool have_copygc_reserve(struct bch_dev *ca)
163 spin_lock(&ca->freelist_lock);
164 ret = fifo_used(&ca->free[RESERVE_MOVINGGC]) >=
165 COPYGC_BUCKETS_PER_ITER(ca);
166 spin_unlock(&ca->freelist_lock);
171 static inline int sectors_used_cmp(bucket_heap *heap,
172 struct bucket_heap_entry l,
173 struct bucket_heap_entry r)
175 return bucket_sectors_used(l.mark) - bucket_sectors_used(r.mark);
178 static void bch2_moving_gc(struct bch_dev *ca)
180 struct bch_fs *c = ca->fs;
182 u64 sectors_to_move = 0;
183 size_t buckets_to_move, buckets_unused = 0;
184 struct bucket_heap_entry e, *i;
187 if (!have_copygc_reserve(ca)) {
190 closure_init_stack(&cl);
192 closure_wait(&c->freelist_wait, &cl);
193 if (have_copygc_reserve(ca))
197 closure_wake_up(&c->freelist_wait);
200 reserve_sectors = COPYGC_SECTORS_PER_ITER(ca);
202 trace_moving_gc_start(ca);
205 * Find buckets with lowest sector counts, skipping completely
206 * empty buckets, by building a maxheap sorted by sector count,
207 * and repeatedly replacing the maximum element until all
208 * buckets have been visited.
212 * We need bucket marks to be up to date - gc can't be recalculating
215 down_read(&c->gc_lock);
216 ca->copygc_heap.used = 0;
217 for_each_bucket(g, ca) {
218 struct bucket_mark m = READ_ONCE(g->mark);
219 struct bucket_heap_entry e = { g - ca->buckets, m };
221 if (bucket_unused(m)) {
226 if (m.owned_by_allocator ||
227 m.data_type != BUCKET_DATA)
230 if (bucket_sectors_used(m) >= ca->mi.bucket_size)
233 heap_add_or_replace(&ca->copygc_heap, e, -sectors_used_cmp);
235 up_read(&c->gc_lock);
237 for (i = ca->copygc_heap.data;
238 i < ca->copygc_heap.data + ca->copygc_heap.used;
240 sectors_to_move += bucket_sectors_used(i->mark);
242 while (sectors_to_move > COPYGC_SECTORS_PER_ITER(ca)) {
243 BUG_ON(!heap_pop(&ca->copygc_heap, e, -sectors_used_cmp));
244 sectors_to_move -= bucket_sectors_used(e.mark);
247 buckets_to_move = ca->copygc_heap.used;
249 eytzinger0_sort(ca->copygc_heap.data,
250 ca->copygc_heap.used,
251 sizeof(ca->copygc_heap.data[0]),
252 bucket_idx_cmp, NULL);
254 read_moving(ca, buckets_to_move, sectors_to_move);
257 static int bch2_moving_gc_thread(void *arg)
259 struct bch_dev *ca = arg;
260 struct bch_fs *c = ca->fs;
261 struct io_clock *clock = &c->io_clock[WRITE];
263 u64 available, want, next;
267 while (!kthread_should_stop()) {
268 if (kthread_wait_freezable(c->copy_gc_enabled))
271 last = atomic_long_read(&clock->now);
273 * don't start copygc until less than half the gc reserve is
276 available = dev_buckets_available(ca);
277 want = div64_u64((ca->mi.nbuckets - ca->mi.first_bucket) *
278 c->opts.gc_reserve_percent, 200);
279 if (available > want) {
280 next = last + (available - want) *
282 bch2_kthread_io_clock_wait(clock, next);
292 void bch2_moving_gc_stop(struct bch_dev *ca)
294 ca->moving_gc_pd.rate.rate = UINT_MAX;
295 bch2_ratelimit_reset(&ca->moving_gc_pd.rate);
297 if (ca->moving_gc_read)
298 kthread_stop(ca->moving_gc_read);
299 ca->moving_gc_read = NULL;
302 int bch2_moving_gc_start(struct bch_dev *ca)
304 struct task_struct *t;
306 BUG_ON(ca->moving_gc_read);
308 if (ca->fs->opts.nochanges)
311 if (bch2_fs_init_fault("moving_gc_start"))
314 t = kthread_create(bch2_moving_gc_thread, ca, "bch_copygc_read");
318 ca->moving_gc_read = t;
319 wake_up_process(ca->moving_gc_read);
324 void bch2_dev_moving_gc_init(struct bch_dev *ca)
326 bch2_pd_controller_init(&ca->moving_gc_pd);
327 ca->moving_gc_pd.d_term = 0;