]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcache/btree_gc.c
5c77b267ad5df9787bb6286143a2a8d9c1b73449
[bcachefs-tools-debian] / libbcache / btree_gc.c
1 /*
2  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
3  * Copyright (C) 2014 Datera Inc.
4  */
5
6 #include "bcache.h"
7 #include "alloc.h"
8 #include "bkey_methods.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_io.h"
12 #include "btree_gc.h"
13 #include "buckets.h"
14 #include "clock.h"
15 #include "debug.h"
16 #include "error.h"
17 #include "extents.h"
18 #include "journal.h"
19 #include "keylist.h"
20 #include "move.h"
21 #include "super-io.h"
22 #include "writeback.h"
23
24 #include <linux/slab.h>
25 #include <linux/bitops.h>
26 #include <linux/freezer.h>
27 #include <linux/kthread.h>
28 #include <linux/rcupdate.h>
29 #include <trace/events/bcache.h>
30
31 struct range_checks {
32         struct range_level {
33                 struct bpos     min;
34                 struct bpos     max;
35         }                       l[BTREE_MAX_DEPTH];
36         unsigned                depth;
37 };
38
39 static void btree_node_range_checks_init(struct range_checks *r, unsigned depth)
40 {
41         unsigned i;
42
43         for (i = 0; i < BTREE_MAX_DEPTH; i++)
44                 r->l[i].min = r->l[i].max = POS_MIN;
45         r->depth = depth;
46 }
47
48 static void btree_node_range_checks(struct cache_set *c, struct btree *b,
49                                     struct range_checks *r)
50 {
51         struct range_level *l = &r->l[b->level];
52
53         struct bpos expected_min = bkey_cmp(l->min, l->max)
54                 ? btree_type_successor(b->btree_id, l->max)
55                 : l->max;
56
57         cache_set_inconsistent_on(bkey_cmp(b->data->min_key,
58                                            expected_min), c,
59                 "btree node has incorrect min key: %llu:%llu != %llu:%llu",
60                 b->data->min_key.inode,
61                 b->data->min_key.offset,
62                 expected_min.inode,
63                 expected_min.offset);
64
65         l->max = b->data->max_key;
66
67         if (b->level > r->depth) {
68                 l = &r->l[b->level - 1];
69
70                 cache_set_inconsistent_on(bkey_cmp(b->data->min_key,
71                                                    l->min), c,
72                         "btree node min doesn't match min of child nodes: %llu:%llu != %llu:%llu",
73                         b->data->min_key.inode,
74                         b->data->min_key.offset,
75                         l->min.inode,
76                         l->min.offset);
77
78                 cache_set_inconsistent_on(bkey_cmp(b->data->max_key,
79                                                    l->max), c,
80                         "btree node max doesn't match max of child nodes: %llu:%llu != %llu:%llu",
81                         b->data->max_key.inode,
82                         b->data->max_key.offset,
83                         l->max.inode,
84                         l->max.offset);
85
86                 if (bkey_cmp(b->data->max_key, POS_MAX))
87                         l->min = l->max =
88                                 btree_type_successor(b->btree_id,
89                                                      b->data->max_key);
90         }
91 }
92
93 u8 bch_btree_key_recalc_oldest_gen(struct cache_set *c, struct bkey_s_c k)
94 {
95         const struct bch_extent_ptr *ptr;
96         struct cache *ca;
97         u8 max_stale = 0;
98
99         if (bkey_extent_is_data(k.k)) {
100                 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
101
102                 rcu_read_lock();
103
104                 extent_for_each_online_device(c, e, ptr, ca) {
105                         size_t b = PTR_BUCKET_NR(ca, ptr);
106
107                         if (__gen_after(ca->oldest_gens[b], ptr->gen))
108                                 ca->oldest_gens[b] = ptr->gen;
109
110                         max_stale = max(max_stale, ptr_stale(ca, ptr));
111                 }
112
113                 rcu_read_unlock();
114         }
115
116         return max_stale;
117 }
118
119 /*
120  * For runtime mark and sweep:
121  */
122 static u8 bch_btree_mark_key(struct cache_set *c, enum bkey_type type,
123                              struct bkey_s_c k)
124 {
125         switch (type) {
126         case BKEY_TYPE_BTREE:
127                 bch_gc_mark_key(c, k, c->sb.btree_node_size, true);
128                 return 0;
129         case BKEY_TYPE_EXTENTS:
130                 bch_gc_mark_key(c, k, k.k->size, false);
131                 return bch_btree_key_recalc_oldest_gen(c, k);
132         default:
133                 BUG();
134         }
135 }
136
137 u8 bch_btree_mark_key_initial(struct cache_set *c, enum bkey_type type,
138                           struct bkey_s_c k)
139 {
140         atomic64_set(&c->key_version,
141                      max_t(u64, k.k->version.lo,
142                            atomic64_read(&c->key_version)));
143
144         return bch_btree_mark_key(c, type, k);
145 }
146
147 static bool btree_gc_mark_node(struct cache_set *c, struct btree *b)
148 {
149         if (btree_node_has_ptrs(b)) {
150                 struct btree_node_iter iter;
151                 struct bkey unpacked;
152                 struct bkey_s_c k;
153                 u8 stale = 0;
154
155                 for_each_btree_node_key_unpack(b, k, &iter,
156                                                btree_node_is_extents(b),
157                                                &unpacked) {
158                         bkey_debugcheck(c, b, k);
159                         stale = max(stale, bch_btree_mark_key(c,
160                                                         btree_node_type(b), k));
161                 }
162
163                 if (btree_gc_rewrite_disabled(c))
164                         return false;
165
166                 if (stale > 10)
167                         return true;
168         }
169
170         if (btree_gc_always_rewrite(c))
171                 return true;
172
173         return false;
174 }
175
176 static inline void __gc_pos_set(struct cache_set *c, struct gc_pos new_pos)
177 {
178         write_seqcount_begin(&c->gc_pos_lock);
179         c->gc_pos = new_pos;
180         write_seqcount_end(&c->gc_pos_lock);
181 }
182
183 static inline void gc_pos_set(struct cache_set *c, struct gc_pos new_pos)
184 {
185         BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) <= 0);
186         __gc_pos_set(c, new_pos);
187 }
188
189 static int bch_gc_btree(struct cache_set *c, enum btree_id btree_id)
190 {
191         struct btree_iter iter;
192         struct btree *b;
193         bool should_rewrite;
194         struct range_checks r;
195         unsigned depth = btree_id == BTREE_ID_EXTENTS ? 0 : 1;
196         int ret;
197
198         /*
199          * if expensive_debug_checks is on, run range_checks on all leaf nodes:
200          */
201         if (expensive_debug_checks(c))
202                 depth = 0;
203
204         btree_node_range_checks_init(&r, depth);
205
206         for_each_btree_node(&iter, c, btree_id, POS_MIN, depth, b) {
207                 btree_node_range_checks(c, b, &r);
208
209                 bch_verify_btree_nr_keys(b);
210
211                 should_rewrite = btree_gc_mark_node(c, b);
212
213                 gc_pos_set(c, gc_pos_btree_node(b));
214
215                 if (should_rewrite)
216                         bch_btree_node_rewrite(&iter, b, NULL);
217
218                 bch_btree_iter_cond_resched(&iter);
219         }
220         ret = bch_btree_iter_unlock(&iter);
221         if (ret)
222                 return ret;
223
224         mutex_lock(&c->btree_root_lock);
225
226         b = c->btree_roots[btree_id].b;
227         bch_btree_mark_key(c, BKEY_TYPE_BTREE, bkey_i_to_s_c(&b->key));
228         gc_pos_set(c, gc_pos_btree_root(b->btree_id));
229
230         mutex_unlock(&c->btree_root_lock);
231         return 0;
232 }
233
234 static void bch_mark_allocator_buckets(struct cache_set *c)
235 {
236         struct cache *ca;
237         struct open_bucket *ob;
238         size_t i, j, iter;
239         unsigned ci;
240
241         for_each_cache(ca, c, ci) {
242                 spin_lock(&ca->freelist_lock);
243
244                 fifo_for_each_entry(i, &ca->free_inc, iter)
245                         bch_mark_alloc_bucket(ca, &ca->buckets[i], true);
246
247                 for (j = 0; j < RESERVE_NR; j++)
248                         fifo_for_each_entry(i, &ca->free[j], iter)
249                                 bch_mark_alloc_bucket(ca, &ca->buckets[i], true);
250
251                 spin_unlock(&ca->freelist_lock);
252         }
253
254         for (ob = c->open_buckets;
255              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
256              ob++) {
257                 const struct bch_extent_ptr *ptr;
258
259                 mutex_lock(&ob->lock);
260                 rcu_read_lock();
261                 open_bucket_for_each_online_device(c, ob, ptr, ca)
262                         bch_mark_alloc_bucket(ca, PTR_BUCKET(ca, ptr), true);
263                 rcu_read_unlock();
264                 mutex_unlock(&ob->lock);
265         }
266 }
267
268 /*
269  * Mark non btree metadata - prios, journal
270  */
271 static void bch_mark_metadata(struct cache_set *c)
272 {
273         struct cache *ca;
274         unsigned i, j;
275         u64 b;
276
277         for_each_cache(ca, c, i) {
278                 for (j = 0; j < ca->journal.nr; j++) {
279                         b = ca->journal.buckets[j];
280                         bch_mark_metadata_bucket(ca, ca->buckets + b, true);
281                 }
282
283                 spin_lock(&ca->prio_buckets_lock);
284
285                 for (j = 0; j < prio_buckets(ca) * 2; j++) {
286                         b = ca->prio_buckets[j];
287                         bch_mark_metadata_bucket(ca, ca->buckets + b, true);
288                 }
289
290                 spin_unlock(&ca->prio_buckets_lock);
291         }
292 }
293
294 /* Also see bch_pending_btree_node_free_insert_done() */
295 static void bch_mark_pending_btree_node_frees(struct cache_set *c)
296 {
297         struct bucket_stats_cache_set stats = { 0 };
298         struct btree_interior_update *as;
299         struct pending_btree_node_free *d;
300
301         mutex_lock(&c->btree_interior_update_lock);
302         gc_pos_set(c, gc_phase(GC_PHASE_PENDING_DELETE));
303
304         for_each_pending_btree_node_free(c, as, d)
305                 if (d->index_update_done)
306                         __bch_gc_mark_key(c, bkey_i_to_s_c(&d->key),
307                                           c->sb.btree_node_size, true,
308                                           &stats);
309         /*
310          * Don't apply stats - pending deletes aren't tracked in
311          * cache_set_stats:
312          */
313
314         mutex_unlock(&c->btree_interior_update_lock);
315 }
316
317 /**
318  * bch_gc - recompute bucket marks and oldest_gen, rewrite btree nodes
319  */
320 void bch_gc(struct cache_set *c)
321 {
322         struct cache *ca;
323         struct bucket *g;
324         struct bucket_mark new;
325         u64 start_time = local_clock();
326         unsigned i;
327         int cpu;
328
329         /*
330          * Walk _all_ references to buckets, and recompute them:
331          *
332          * Order matters here:
333          *  - Concurrent GC relies on the fact that we have a total ordering for
334          *    everything that GC walks - see  gc_will_visit_node(),
335          *    gc_will_visit_root()
336          *
337          *  - also, references move around in the course of index updates and
338          *    various other crap: everything needs to agree on the ordering
339          *    references are allowed to move around in - e.g., we're allowed to
340          *    start with a reference owned by an open_bucket (the allocator) and
341          *    move it to the btree, but not the reverse.
342          *
343          *    This is necessary to ensure that gc doesn't miss references that
344          *    move around - if references move backwards in the ordering GC
345          *    uses, GC could skip past them
346          */
347
348         if (test_bit(CACHE_SET_GC_FAILURE, &c->flags))
349                 return;
350
351         trace_bcache_gc_start(c);
352
353         /*
354          * Do this before taking gc_lock - bch_disk_reservation_get() blocks on
355          * gc_lock if sectors_available goes to 0:
356          */
357         bch_recalc_sectors_available(c);
358
359         down_write(&c->gc_lock);
360
361         lg_global_lock(&c->bucket_stats_lock);
362
363         /*
364          * Indicates to buckets code that gc is now in progress - done under
365          * bucket_stats_lock to avoid racing with bch_mark_key():
366          */
367         __gc_pos_set(c, GC_POS_MIN);
368
369         /* Save a copy of the existing bucket stats while we recompute them: */
370         for_each_cache(ca, c, i) {
371                 ca->bucket_stats_cached = __bch_bucket_stats_read_cache(ca);
372                 for_each_possible_cpu(cpu) {
373                         struct bucket_stats_cache *p =
374                                 per_cpu_ptr(ca->bucket_stats_percpu, cpu);
375                         memset(p, 0, sizeof(*p));
376                 }
377         }
378
379         c->bucket_stats_cached = __bch_bucket_stats_read_cache_set(c);
380         for_each_possible_cpu(cpu) {
381                 struct bucket_stats_cache_set *p =
382                         per_cpu_ptr(c->bucket_stats_percpu, cpu);
383
384                 memset(p->s, 0, sizeof(p->s));
385                 p->persistent_reserved = 0;
386         }
387
388         lg_global_unlock(&c->bucket_stats_lock);
389
390         /* Clear bucket marks: */
391         for_each_cache(ca, c, i)
392                 for_each_bucket(g, ca) {
393                         bucket_cmpxchg(g, new, ({
394                                 new.owned_by_allocator  = 0;
395                                 new.is_metadata         = 0;
396                                 new.cached_sectors      = 0;
397                                 new.dirty_sectors       = 0;
398                         }));
399                         ca->oldest_gens[g - ca->buckets] = new.gen;
400                 }
401
402         /* Walk allocator's references: */
403         bch_mark_allocator_buckets(c);
404
405         /* Walk btree: */
406         while (c->gc_pos.phase < (int) BTREE_ID_NR) {
407                 int ret = c->btree_roots[c->gc_pos.phase].b
408                         ? bch_gc_btree(c, (int) c->gc_pos.phase)
409                         : 0;
410
411                 if (ret) {
412                         bch_err(c, "btree gc failed: %d", ret);
413                         set_bit(CACHE_SET_GC_FAILURE, &c->flags);
414                         up_write(&c->gc_lock);
415                         return;
416                 }
417
418                 gc_pos_set(c, gc_phase(c->gc_pos.phase + 1));
419         }
420
421         bch_mark_metadata(c);
422         bch_mark_pending_btree_node_frees(c);
423         bch_writeback_recalc_oldest_gens(c);
424
425         for_each_cache(ca, c, i)
426                 atomic_long_set(&ca->saturated_count, 0);
427
428         /* Indicates that gc is no longer in progress: */
429         gc_pos_set(c, gc_phase(GC_PHASE_DONE));
430
431         up_write(&c->gc_lock);
432         trace_bcache_gc_end(c);
433         bch_time_stats_update(&c->btree_gc_time, start_time);
434
435         /*
436          * Wake up allocator in case it was waiting for buckets
437          * because of not being able to inc gens
438          */
439         for_each_cache(ca, c, i)
440                 bch_wake_allocator(ca);
441 }
442
443 /* Btree coalescing */
444
445 static void recalc_packed_keys(struct btree *b)
446 {
447         struct bkey_packed *k;
448
449         memset(&b->nr, 0, sizeof(b->nr));
450
451         BUG_ON(b->nsets != 1);
452
453         for (k =  btree_bkey_first(b, b->set);
454              k != btree_bkey_last(b, b->set);
455              k = bkey_next(k))
456                 btree_keys_account_key_add(&b->nr, 0, k);
457 }
458
459 static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
460                                struct btree_iter *iter)
461 {
462         struct btree *parent = iter->nodes[old_nodes[0]->level + 1];
463         struct cache_set *c = iter->c;
464         unsigned i, nr_old_nodes, nr_new_nodes, u64s = 0;
465         unsigned blocks = btree_blocks(c) * 2 / 3;
466         struct btree *new_nodes[GC_MERGE_NODES];
467         struct btree_interior_update *as;
468         struct btree_reserve *res;
469         struct keylist keylist;
470         struct bkey_format_state format_state;
471         struct bkey_format new_format;
472
473         memset(new_nodes, 0, sizeof(new_nodes));
474         bch_keylist_init(&keylist, NULL, 0);
475
476         /* Count keys that are not deleted */
477         for (i = 0; i < GC_MERGE_NODES && old_nodes[i]; i++)
478                 u64s += old_nodes[i]->nr.live_u64s;
479
480         nr_old_nodes = nr_new_nodes = i;
481
482         /* Check if all keys in @old_nodes could fit in one fewer node */
483         if (nr_old_nodes <= 1 ||
484             __vstruct_blocks(struct btree_node, c->block_bits,
485                              DIV_ROUND_UP(u64s, nr_old_nodes - 1)) > blocks)
486                 return;
487
488         res = bch_btree_reserve_get(c, parent, nr_old_nodes,
489                                     BTREE_INSERT_NOFAIL|
490                                     BTREE_INSERT_USE_RESERVE,
491                                     NULL);
492         if (IS_ERR(res)) {
493                 trace_bcache_btree_gc_coalesce_fail(c,
494                                 BTREE_GC_COALESCE_FAIL_RESERVE_GET);
495                 return;
496         }
497
498         if (bch_keylist_realloc(&keylist, NULL, 0,
499                         (BKEY_U64s + BKEY_EXTENT_U64s_MAX) * nr_old_nodes)) {
500                 trace_bcache_btree_gc_coalesce_fail(c,
501                                 BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC);
502                 goto out;
503         }
504
505         /* Find a format that all keys in @old_nodes can pack into */
506         bch_bkey_format_init(&format_state);
507
508         for (i = 0; i < nr_old_nodes; i++)
509                 __bch_btree_calc_format(&format_state, old_nodes[i]);
510
511         new_format = bch_bkey_format_done(&format_state);
512
513         /* Check if repacking would make any nodes too big to fit */
514         for (i = 0; i < nr_old_nodes; i++)
515                 if (!bch_btree_node_format_fits(c, old_nodes[i], &new_format)) {
516                         trace_bcache_btree_gc_coalesce_fail(c,
517                                         BTREE_GC_COALESCE_FAIL_FORMAT_FITS);
518                         goto out;
519                 }
520
521         trace_bcache_btree_gc_coalesce(c, parent, nr_old_nodes);
522
523         as = bch_btree_interior_update_alloc(c);
524
525         for (i = 0; i < nr_old_nodes; i++)
526                 bch_btree_interior_update_will_free_node(c, as, old_nodes[i]);
527
528         /* Repack everything with @new_format and sort down to one bset */
529         for (i = 0; i < nr_old_nodes; i++)
530                 new_nodes[i] = __btree_node_alloc_replacement(c, old_nodes[i],
531                                                               new_format, res);
532
533         /*
534          * Conceptually we concatenate the nodes together and slice them
535          * up at different boundaries.
536          */
537         for (i = nr_new_nodes - 1; i > 0; --i) {
538                 struct btree *n1 = new_nodes[i];
539                 struct btree *n2 = new_nodes[i - 1];
540
541                 struct bset *s1 = btree_bset_first(n1);
542                 struct bset *s2 = btree_bset_first(n2);
543                 struct bkey_packed *k, *last = NULL;
544
545                 /* Calculate how many keys from @n2 we could fit inside @n1 */
546                 u64s = 0;
547
548                 for (k = s2->start;
549                      k < vstruct_last(s2) &&
550                      vstruct_blocks_plus(n1->data, c->block_bits,
551                                          u64s + k->u64s) <= blocks;
552                      k = bkey_next(k)) {
553                         last = k;
554                         u64s += k->u64s;
555                 }
556
557                 if (u64s == le16_to_cpu(s2->u64s)) {
558                         /* n2 fits entirely in n1 */
559                         n1->key.k.p = n1->data->max_key = n2->data->max_key;
560
561                         memcpy_u64s(vstruct_last(s1),
562                                     s2->start,
563                                     le16_to_cpu(s2->u64s));
564                         le16_add_cpu(&s1->u64s, le16_to_cpu(s2->u64s));
565
566                         set_btree_bset_end(n1, n1->set);
567
568                         six_unlock_write(&n2->lock);
569                         bch_btree_node_free_never_inserted(c, n2);
570                         six_unlock_intent(&n2->lock);
571
572                         memmove(new_nodes + i - 1,
573                                 new_nodes + i,
574                                 sizeof(new_nodes[0]) * (nr_new_nodes - i));
575                         new_nodes[--nr_new_nodes] = NULL;
576                 } else if (u64s) {
577                         /* move part of n2 into n1 */
578                         n1->key.k.p = n1->data->max_key =
579                                 bkey_unpack_pos(n1, last);
580
581                         n2->data->min_key =
582                                 btree_type_successor(iter->btree_id,
583                                                      n1->data->max_key);
584
585                         memcpy_u64s(vstruct_last(s1),
586                                     s2->start, u64s);
587                         le16_add_cpu(&s1->u64s, u64s);
588
589                         memmove(s2->start,
590                                 vstruct_idx(s2, u64s),
591                                 (le16_to_cpu(s2->u64s) - u64s) * sizeof(u64));
592                         s2->u64s = cpu_to_le16(le16_to_cpu(s2->u64s) - u64s);
593
594                         set_btree_bset_end(n1, n1->set);
595                         set_btree_bset_end(n2, n2->set);
596                 }
597         }
598
599         for (i = 0; i < nr_new_nodes; i++) {
600                 struct btree *n = new_nodes[i];
601
602                 recalc_packed_keys(n);
603                 btree_node_reset_sib_u64s(n);
604
605                 bch_btree_build_aux_trees(n);
606                 six_unlock_write(&n->lock);
607
608                 bch_btree_node_write(c, n, &as->cl, SIX_LOCK_intent, -1);
609         }
610
611         /*
612          * The keys for the old nodes get deleted. We don't want to insert keys
613          * that compare equal to the keys for the new nodes we'll also be
614          * inserting - we can't because keys on a keylist must be strictly
615          * greater than the previous keys, and we also don't need to since the
616          * key for the new node will serve the same purpose (overwriting the key
617          * for the old node).
618          */
619         for (i = 0; i < nr_old_nodes; i++) {
620                 struct bkey_i delete;
621                 unsigned j;
622
623                 for (j = 0; j < nr_new_nodes; j++)
624                         if (!bkey_cmp(old_nodes[i]->key.k.p,
625                                       new_nodes[j]->key.k.p))
626                                 goto next;
627
628                 bkey_init(&delete.k);
629                 delete.k.p = old_nodes[i]->key.k.p;
630                 bch_keylist_add_in_order(&keylist, &delete);
631 next:
632                 i = i;
633         }
634
635         /*
636          * Keys for the new nodes get inserted: bch_btree_insert_keys() only
637          * does the lookup once and thus expects the keys to be in sorted order
638          * so we have to make sure the new keys are correctly ordered with
639          * respect to the deleted keys added in the previous loop
640          */
641         for (i = 0; i < nr_new_nodes; i++)
642                 bch_keylist_add_in_order(&keylist, &new_nodes[i]->key);
643
644         /* Insert the newly coalesced nodes */
645         bch_btree_insert_node(parent, iter, &keylist, res, as);
646
647         BUG_ON(!bch_keylist_empty(&keylist));
648
649         BUG_ON(iter->nodes[old_nodes[0]->level] != old_nodes[0]);
650
651         BUG_ON(!bch_btree_iter_node_replace(iter, new_nodes[0]));
652
653         for (i = 0; i < nr_new_nodes; i++)
654                 btree_open_bucket_put(c, new_nodes[i]);
655
656         /* Free the old nodes and update our sliding window */
657         for (i = 0; i < nr_old_nodes; i++) {
658                 bch_btree_node_free_inmem(iter, old_nodes[i]);
659                 six_unlock_intent(&old_nodes[i]->lock);
660
661                 /*
662                  * the index update might have triggered a split, in which case
663                  * the nodes we coalesced - the new nodes we just created -
664                  * might not be sibling nodes anymore - don't add them to the
665                  * sliding window (except the first):
666                  */
667                 if (!i) {
668                         old_nodes[i] = new_nodes[i];
669                 } else {
670                         old_nodes[i] = NULL;
671                         if (new_nodes[i])
672                                 six_unlock_intent(&new_nodes[i]->lock);
673                 }
674         }
675 out:
676         bch_keylist_free(&keylist, NULL);
677         bch_btree_reserve_put(c, res);
678 }
679
680 static int bch_coalesce_btree(struct cache_set *c, enum btree_id btree_id)
681 {
682         struct btree_iter iter;
683         struct btree *b;
684         unsigned i;
685
686         /* Sliding window of adjacent btree nodes */
687         struct btree *merge[GC_MERGE_NODES];
688         u32 lock_seq[GC_MERGE_NODES];
689
690         /*
691          * XXX: We don't have a good way of positively matching on sibling nodes
692          * that have the same parent - this code works by handling the cases
693          * where they might not have the same parent, and is thus fragile. Ugh.
694          *
695          * Perhaps redo this to use multiple linked iterators?
696          */
697         memset(merge, 0, sizeof(merge));
698
699         __for_each_btree_node(&iter, c, btree_id, POS_MIN, 0, b, U8_MAX) {
700                 memmove(merge + 1, merge,
701                         sizeof(merge) - sizeof(merge[0]));
702                 memmove(lock_seq + 1, lock_seq,
703                         sizeof(lock_seq) - sizeof(lock_seq[0]));
704
705                 merge[0] = b;
706
707                 for (i = 1; i < GC_MERGE_NODES; i++) {
708                         if (!merge[i] ||
709                             !six_relock_intent(&merge[i]->lock, lock_seq[i]))
710                                 break;
711
712                         if (merge[i]->level != merge[0]->level) {
713                                 six_unlock_intent(&merge[i]->lock);
714                                 break;
715                         }
716                 }
717                 memset(merge + i, 0, (GC_MERGE_NODES - i) * sizeof(merge[0]));
718
719                 bch_coalesce_nodes(merge, &iter);
720
721                 for (i = 1; i < GC_MERGE_NODES && merge[i]; i++) {
722                         lock_seq[i] = merge[i]->lock.state.seq;
723                         six_unlock_intent(&merge[i]->lock);
724                 }
725
726                 lock_seq[0] = merge[0]->lock.state.seq;
727
728                 if (test_bit(CACHE_SET_GC_STOPPING, &c->flags)) {
729                         bch_btree_iter_unlock(&iter);
730                         return -ESHUTDOWN;
731                 }
732
733                 bch_btree_iter_cond_resched(&iter);
734
735                 /*
736                  * If the parent node wasn't relocked, it might have been split
737                  * and the nodes in our sliding window might not have the same
738                  * parent anymore - blow away the sliding window:
739                  */
740                 if (iter.nodes[iter.level + 1] &&
741                     !btree_node_intent_locked(&iter, iter.level + 1))
742                         memset(merge + 1, 0,
743                                (GC_MERGE_NODES - 1) * sizeof(merge[0]));
744         }
745         return bch_btree_iter_unlock(&iter);
746 }
747
748 /**
749  * bch_coalesce - coalesce adjacent nodes with low occupancy
750  */
751 void bch_coalesce(struct cache_set *c)
752 {
753         u64 start_time;
754         enum btree_id id;
755
756         if (btree_gc_coalesce_disabled(c))
757                 return;
758
759         if (test_bit(CACHE_SET_GC_FAILURE, &c->flags))
760                 return;
761
762         down_read(&c->gc_lock);
763         trace_bcache_gc_coalesce_start(c);
764         start_time = local_clock();
765
766         for (id = 0; id < BTREE_ID_NR; id++) {
767                 int ret = c->btree_roots[id].b
768                         ? bch_coalesce_btree(c, id)
769                         : 0;
770
771                 if (ret) {
772                         if (ret != -ESHUTDOWN)
773                                 bch_err(c, "btree coalescing failed: %d", ret);
774                         set_bit(CACHE_SET_GC_FAILURE, &c->flags);
775                         return;
776                 }
777         }
778
779         bch_time_stats_update(&c->btree_coalesce_time, start_time);
780         trace_bcache_gc_coalesce_end(c);
781         up_read(&c->gc_lock);
782 }
783
784 static int bch_gc_thread(void *arg)
785 {
786         struct cache_set *c = arg;
787         struct io_clock *clock = &c->io_clock[WRITE];
788         unsigned long last = atomic_long_read(&clock->now);
789         unsigned last_kick = atomic_read(&c->kick_gc);
790
791         set_freezable();
792
793         while (1) {
794                 unsigned long next = last + c->capacity / 16;
795
796                 while (atomic_long_read(&clock->now) < next) {
797                         set_current_state(TASK_INTERRUPTIBLE);
798
799                         if (kthread_should_stop()) {
800                                 __set_current_state(TASK_RUNNING);
801                                 return 0;
802                         }
803
804                         if (atomic_read(&c->kick_gc) != last_kick) {
805                                 __set_current_state(TASK_RUNNING);
806                                 break;
807                         }
808
809                         bch_io_clock_schedule_timeout(clock, next);
810                         try_to_freeze();
811                 }
812
813                 last = atomic_long_read(&clock->now);
814                 last_kick = atomic_read(&c->kick_gc);
815
816                 bch_gc(c);
817                 bch_coalesce(c);
818
819                 debug_check_no_locks_held();
820         }
821
822         return 0;
823 }
824
825 void bch_gc_thread_stop(struct cache_set *c)
826 {
827         set_bit(CACHE_SET_GC_STOPPING, &c->flags);
828
829         if (!IS_ERR_OR_NULL(c->gc_thread))
830                 kthread_stop(c->gc_thread);
831 }
832
833 int bch_gc_thread_start(struct cache_set *c)
834 {
835         clear_bit(CACHE_SET_GC_STOPPING, &c->flags);
836
837         c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
838         if (IS_ERR(c->gc_thread))
839                 return PTR_ERR(c->gc_thread);
840
841         wake_up_process(c->gc_thread);
842         return 0;
843 }
844
845 /* Initial GC computes bucket marks during startup */
846
847 static void bch_initial_gc_btree(struct cache_set *c, enum btree_id id)
848 {
849         struct btree_iter iter;
850         struct btree *b;
851         struct range_checks r;
852
853         btree_node_range_checks_init(&r, 0);
854
855         if (!c->btree_roots[id].b)
856                 return;
857
858         /*
859          * We have to hit every btree node before starting journal replay, in
860          * order for the journal seq blacklist machinery to work:
861          */
862         for_each_btree_node(&iter, c, id, POS_MIN, 0, b) {
863                 btree_node_range_checks(c, b, &r);
864
865                 if (btree_node_has_ptrs(b)) {
866                         struct btree_node_iter node_iter;
867                         struct bkey unpacked;
868                         struct bkey_s_c k;
869
870                         for_each_btree_node_key_unpack(b, k, &node_iter,
871                                                        btree_node_is_extents(b),
872                                                        &unpacked)
873                                 bch_btree_mark_key_initial(c, btree_node_type(b), k);
874                 }
875
876                 bch_btree_iter_cond_resched(&iter);
877         }
878
879         bch_btree_iter_unlock(&iter);
880
881         bch_btree_mark_key(c, BKEY_TYPE_BTREE,
882                            bkey_i_to_s_c(&c->btree_roots[id].b->key));
883 }
884
885 int bch_initial_gc(struct cache_set *c, struct list_head *journal)
886 {
887         enum btree_id id;
888
889         if (journal) {
890                 for (id = 0; id < BTREE_ID_NR; id++)
891                         bch_initial_gc_btree(c, id);
892
893                 bch_journal_mark(c, journal);
894         }
895
896         /*
897          * Skip past versions that might have possibly been used (as nonces),
898          * but hadn't had their pointers written:
899          */
900         if (c->sb.encryption_type)
901                 atomic64_add(1 << 16, &c->key_version);
902
903         bch_mark_metadata(c);
904
905         gc_pos_set(c, gc_phase(GC_PHASE_DONE));
906         set_bit(CACHE_SET_INITIAL_GC_DONE, &c->flags);
907
908         return 0;
909 }