2 * Moving/copying garbage collector
4 * Copyright 2012 Google, Inc.
8 #include "btree_iter.h"
17 #include <trace/events/bcachefs.h>
18 #include <linux/freezer.h>
19 #include <linux/kthread.h>
20 #include <linux/math64.h>
21 #include <linux/wait.h>
23 /* Moving GC - IO loop */
25 static const struct bch_extent_ptr *moving_pred(struct bch_dev *ca,
28 const struct bch_extent_ptr *ptr;
30 if (bkey_extent_is_data(k.k) &&
31 (ptr = bch2_extent_has_device(bkey_s_c_to_extent(k),
33 PTR_BUCKET(ca, ptr)->mark.copygc)
39 static int issue_moving_gc_move(struct bch_dev *ca,
40 struct moving_context *ctxt,
43 struct bch_fs *c = ca->fs;
44 const struct bch_extent_ptr *ptr;
47 ptr = moving_pred(ca, k);
48 if (!ptr) /* We raced - bucket's been reused */
51 ret = bch2_data_move(c, ctxt, &ca->copygc_write_point, k, ptr);
55 trace_moving_gc_alloc_fail(c, k.k->size);
59 static void read_moving(struct bch_dev *ca, size_t buckets_to_move,
62 struct bch_fs *c = ca->fs;
64 struct moving_context ctxt;
65 struct btree_iter iter;
67 u64 sectors_not_moved = 0;
68 size_t buckets_not_moved = 0;
70 bch2_ratelimit_reset(&ca->moving_gc_pd.rate);
71 bch2_move_ctxt_init(&ctxt, &ca->moving_gc_pd.rate,
72 SECTORS_IN_FLIGHT_PER_DEVICE);
73 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN);
76 if (kthread_should_stop())
78 if (bch2_move_ctxt_wait(&ctxt))
80 k = bch2_btree_iter_peek(&iter);
83 if (btree_iter_err(k))
86 if (!moving_pred(ca, k))
89 if (issue_moving_gc_move(ca, &ctxt, k)) {
90 bch2_btree_iter_unlock(&iter);
92 /* memory allocation failure, wait for some IO to finish */
93 bch2_move_ctxt_wait_for_io(&ctxt);
97 bch2_btree_iter_advance_pos(&iter);
98 //bch2_btree_iter_cond_resched(&iter);
100 /* unlock before calling moving_context_wait() */
101 bch2_btree_iter_unlock(&iter);
105 bch2_btree_iter_unlock(&iter);
106 bch2_move_ctxt_exit(&ctxt);
107 trace_moving_gc_end(ca, ctxt.sectors_moved, ctxt.keys_moved,
110 /* don't check this if we bailed out early: */
111 for_each_bucket(g, ca)
112 if (g->mark.copygc && bucket_sectors_used(g)) {
113 sectors_not_moved += bucket_sectors_used(g);
117 if (sectors_not_moved)
118 bch_warn(c, "copygc finished but %llu/%llu sectors, %zu/%zu buckets not moved",
119 sectors_not_moved, sectors_to_move,
120 buckets_not_moved, buckets_to_move);
123 bch2_btree_iter_unlock(&iter);
124 bch2_move_ctxt_exit(&ctxt);
125 trace_moving_gc_end(ca, ctxt.sectors_moved, ctxt.keys_moved,
129 static bool have_copygc_reserve(struct bch_dev *ca)
133 spin_lock(&ca->freelist_lock);
134 ret = fifo_used(&ca->free[RESERVE_MOVINGGC]) >=
135 COPYGC_BUCKETS_PER_ITER(ca);
136 spin_unlock(&ca->freelist_lock);
141 static void bch2_moving_gc(struct bch_dev *ca)
143 struct bch_fs *c = ca->fs;
145 struct bucket_mark new;
147 size_t buckets_to_move, buckets_unused = 0;
148 struct bucket_heap_entry e;
149 unsigned sectors_used, i;
152 if (!have_copygc_reserve(ca)) {
155 closure_init_stack(&cl);
157 closure_wait(&c->freelist_wait, &cl);
158 if (have_copygc_reserve(ca))
162 closure_wake_up(&c->freelist_wait);
165 reserve_sectors = COPYGC_SECTORS_PER_ITER(ca);
167 trace_moving_gc_start(ca);
170 * Find buckets with lowest sector counts, skipping completely
171 * empty buckets, by building a maxheap sorted by sector count,
172 * and repeatedly replacing the maximum element until all
173 * buckets have been visited.
177 * We need bucket marks to be up to date, so gc can't be recalculating
178 * them, and we don't want the allocator invalidating a bucket after
179 * we've decided to evacuate it but before we set copygc:
181 down_read(&c->gc_lock);
182 mutex_lock(&ca->heap_lock);
183 mutex_lock(&ca->fs->bucket_lock);
186 for_each_bucket(g, ca) {
187 bucket_cmpxchg(g, new, new.copygc = 0);
189 if (bucket_unused(g)) {
194 if (g->mark.owned_by_allocator ||
195 g->mark.data_type != BUCKET_DATA)
198 sectors_used = bucket_sectors_used(g);
200 if (sectors_used >= ca->mi.bucket_size)
203 bucket_heap_push(ca, g, sectors_used);
207 for (i = 0; i < ca->heap.used; i++)
208 sectors_to_move += ca->heap.data[i].val;
210 while (sectors_to_move > COPYGC_SECTORS_PER_ITER(ca)) {
211 BUG_ON(!heap_pop(&ca->heap, e, bucket_min_cmp));
212 sectors_to_move -= e.val;
215 for (i = 0; i < ca->heap.used; i++)
216 bucket_cmpxchg(ca->heap.data[i].g, new, new.copygc = 1);
218 buckets_to_move = ca->heap.used;
220 mutex_unlock(&ca->fs->bucket_lock);
221 mutex_unlock(&ca->heap_lock);
222 up_read(&c->gc_lock);
224 read_moving(ca, buckets_to_move, sectors_to_move);
227 static int bch2_moving_gc_thread(void *arg)
229 struct bch_dev *ca = arg;
230 struct bch_fs *c = ca->fs;
231 struct io_clock *clock = &c->io_clock[WRITE];
233 u64 available, want, next;
237 while (!kthread_should_stop()) {
238 if (kthread_wait_freezable(c->copy_gc_enabled))
241 last = atomic_long_read(&clock->now);
243 * don't start copygc until less than half the gc reserve is
246 available = dev_buckets_available(ca);
247 want = div64_u64((ca->mi.nbuckets - ca->mi.first_bucket) *
248 c->opts.gc_reserve_percent, 200);
249 if (available > want) {
250 next = last + (available - want) *
252 bch2_kthread_io_clock_wait(clock, next);
262 void bch2_moving_gc_stop(struct bch_dev *ca)
264 ca->moving_gc_pd.rate.rate = UINT_MAX;
265 bch2_ratelimit_reset(&ca->moving_gc_pd.rate);
267 if (ca->moving_gc_read)
268 kthread_stop(ca->moving_gc_read);
269 ca->moving_gc_read = NULL;
272 int bch2_moving_gc_start(struct bch_dev *ca)
274 struct task_struct *t;
276 BUG_ON(ca->moving_gc_read);
278 if (ca->fs->opts.nochanges)
281 if (bch2_fs_init_fault("moving_gc_start"))
284 t = kthread_create(bch2_moving_gc_thread, ca, "bch_copygc_read");
288 ca->moving_gc_read = t;
289 wake_up_process(ca->moving_gc_read);
294 void bch2_dev_moving_gc_init(struct bch_dev *ca)
296 bch2_pd_controller_init(&ca->moving_gc_pd);
297 ca->moving_gc_pd.d_term = 0;