]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_gc.c
433e8f22fd1904607dd168e515c2c866e3d4bf63
[bcachefs-tools-debian] / libbcachefs / btree_gc.c
1 /*
2  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
3  * Copyright (C) 2014 Datera Inc.
4  */
5
6 #include "bcachefs.h"
7 #include "alloc_background.h"
8 #include "alloc_foreground.h"
9 #include "bkey_methods.h"
10 #include "btree_locking.h"
11 #include "btree_update_interior.h"
12 #include "btree_io.h"
13 #include "btree_gc.h"
14 #include "buckets.h"
15 #include "clock.h"
16 #include "debug.h"
17 #include "ec.h"
18 #include "error.h"
19 #include "extents.h"
20 #include "journal.h"
21 #include "journal_io.h"
22 #include "keylist.h"
23 #include "move.h"
24 #include "replicas.h"
25 #include "super-io.h"
26
27 #include <linux/slab.h>
28 #include <linux/bitops.h>
29 #include <linux/freezer.h>
30 #include <linux/kthread.h>
31 #include <linux/preempt.h>
32 #include <linux/rcupdate.h>
33 #include <linux/sched/task.h>
34 #include <trace/events/bcachefs.h>
35
36 static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
37 {
38         write_seqcount_begin(&c->gc_pos_lock);
39         c->gc_pos = new_pos;
40         write_seqcount_end(&c->gc_pos_lock);
41 }
42
43 static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
44 {
45         BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) <= 0);
46         __gc_pos_set(c, new_pos);
47 }
48
49 /* range_checks - for validating min/max pos of each btree node: */
50
51 struct range_checks {
52         struct range_level {
53                 struct bpos     min;
54                 struct bpos     max;
55         }                       l[BTREE_MAX_DEPTH];
56         unsigned                depth;
57 };
58
59 static void btree_node_range_checks_init(struct range_checks *r, unsigned depth)
60 {
61         unsigned i;
62
63         for (i = 0; i < BTREE_MAX_DEPTH; i++)
64                 r->l[i].min = r->l[i].max = POS_MIN;
65         r->depth = depth;
66 }
67
68 static void btree_node_range_checks(struct bch_fs *c, struct btree *b,
69                                     struct range_checks *r)
70 {
71         struct range_level *l = &r->l[b->level];
72
73         struct bpos expected_min = bkey_cmp(l->min, l->max)
74                 ? btree_type_successor(b->btree_id, l->max)
75                 : l->max;
76
77         bch2_fs_inconsistent_on(bkey_cmp(b->data->min_key, expected_min), c,
78                 "btree node has incorrect min key: %llu:%llu != %llu:%llu",
79                 b->data->min_key.inode,
80                 b->data->min_key.offset,
81                 expected_min.inode,
82                 expected_min.offset);
83
84         l->max = b->data->max_key;
85
86         if (b->level > r->depth) {
87                 l = &r->l[b->level - 1];
88
89                 bch2_fs_inconsistent_on(bkey_cmp(b->data->min_key, l->min), c,
90                         "btree node min doesn't match min of child nodes: %llu:%llu != %llu:%llu",
91                         b->data->min_key.inode,
92                         b->data->min_key.offset,
93                         l->min.inode,
94                         l->min.offset);
95
96                 bch2_fs_inconsistent_on(bkey_cmp(b->data->max_key, l->max), c,
97                         "btree node max doesn't match max of child nodes: %llu:%llu != %llu:%llu",
98                         b->data->max_key.inode,
99                         b->data->max_key.offset,
100                         l->max.inode,
101                         l->max.offset);
102
103                 if (bkey_cmp(b->data->max_key, POS_MAX))
104                         l->min = l->max =
105                                 btree_type_successor(b->btree_id,
106                                                      b->data->max_key);
107         }
108 }
109
110 /* marking of btree keys/nodes: */
111
112 static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
113                             u8 *max_stale, bool initial)
114 {
115         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
116         const struct bch_extent_ptr *ptr;
117         struct gc_pos pos = { 0 };
118         unsigned flags =
119                 BCH_BUCKET_MARK_GC|
120                 (initial ? BCH_BUCKET_MARK_NOATOMIC : 0);
121         int ret = 0;
122
123         if (initial) {
124                 BUG_ON(journal_seq_verify(c) &&
125                        k.k->version.lo > journal_cur_seq(&c->journal));
126
127                 if (k.k->version.lo > atomic64_read(&c->key_version))
128                         atomic64_set(&c->key_version, k.k->version.lo);
129
130                 if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
131                     fsck_err_on(!bch2_bkey_replicas_marked(c, k, false), c,
132                                 "superblock not marked as containing replicas (type %u)",
133                                 k.k->type)) {
134                         ret = bch2_mark_bkey_replicas(c, k);
135                         if (ret)
136                                 return ret;
137                 }
138
139                 bkey_for_each_ptr(ptrs, ptr) {
140                         struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
141                         size_t b = PTR_BUCKET_NR(ca, ptr);
142                         struct bucket *g = PTR_BUCKET(ca, ptr);
143
144                         if (mustfix_fsck_err_on(!g->gen_valid, c,
145                                         "found ptr with missing gen in alloc btree,\n"
146                                         "type %u gen %u",
147                                         k.k->type, ptr->gen)) {
148                                 g->_mark.gen = ptr->gen;
149                                 g->gen_valid = 1;
150                                 bucket_set_dirty(ca, b);
151                         }
152
153                         if (mustfix_fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
154                                         "%u ptr gen in the future: %u > %u",
155                                         k.k->type, ptr->gen, g->mark.gen)) {
156                                 g->_mark.gen = ptr->gen;
157                                 g->gen_valid = 1;
158                                 bucket_set_dirty(ca, b);
159                                 set_bit(BCH_FS_FIXED_GENS, &c->flags);
160                         }
161                 }
162         }
163
164         bkey_for_each_ptr(ptrs, ptr) {
165                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
166                 size_t b = PTR_BUCKET_NR(ca, ptr);
167
168                 if (gen_after(ca->oldest_gens[b], ptr->gen))
169                         ca->oldest_gens[b] = ptr->gen;
170
171                 *max_stale = max(*max_stale, ptr_stale(ca, ptr));
172         }
173
174         bch2_mark_key(c, k, true, k.k->size, pos, NULL, 0, flags);
175 fsck_err:
176         return ret;
177 }
178
179 static int btree_gc_mark_node(struct bch_fs *c, struct btree *b,
180                               u8 *max_stale, bool initial)
181 {
182         struct btree_node_iter iter;
183         struct bkey unpacked;
184         struct bkey_s_c k;
185         int ret = 0;
186
187         *max_stale = 0;
188
189         if (!btree_node_type_needs_gc(btree_node_type(b)))
190                 return 0;
191
192         for_each_btree_node_key_unpack(b, k, &iter,
193                                        &unpacked) {
194                 bch2_bkey_debugcheck(c, b, k);
195
196                 ret = bch2_gc_mark_key(c, k, max_stale, initial);
197                 if (ret)
198                         break;
199         }
200
201         return ret;
202 }
203
204 static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
205                          bool initial)
206 {
207         struct btree_iter iter;
208         struct btree *b;
209         struct range_checks r;
210         unsigned depth = btree_node_type_needs_gc(btree_id) ? 0 : 1;
211         u8 max_stale;
212         int ret = 0;
213
214         gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0));
215
216         /*
217          * if expensive_debug_checks is on, run range_checks on all leaf nodes:
218          *
219          * and on startup, we have to read every btree node (XXX: only if it was
220          * an unclean shutdown)
221          */
222         if (initial || expensive_debug_checks(c))
223                 depth = 0;
224
225         btree_node_range_checks_init(&r, depth);
226
227         __for_each_btree_node(&iter, c, btree_id, POS_MIN,
228                               0, depth, BTREE_ITER_PREFETCH, b) {
229                 btree_node_range_checks(c, b, &r);
230
231                 bch2_verify_btree_nr_keys(b);
232
233                 ret = btree_gc_mark_node(c, b, &max_stale, initial);
234                 if (ret)
235                         break;
236
237                 gc_pos_set(c, gc_pos_btree_node(b));
238
239                 if (!initial) {
240                         if (max_stale > 64)
241                                 bch2_btree_node_rewrite(c, &iter,
242                                                 b->data->keys.seq,
243                                                 BTREE_INSERT_USE_RESERVE|
244                                                 BTREE_INSERT_NOWAIT|
245                                                 BTREE_INSERT_GC_LOCK_HELD);
246                         else if (!btree_gc_rewrite_disabled(c) &&
247                                  (btree_gc_always_rewrite(c) || max_stale > 16))
248                                 bch2_btree_node_rewrite(c, &iter,
249                                                 b->data->keys.seq,
250                                                 BTREE_INSERT_NOWAIT|
251                                                 BTREE_INSERT_GC_LOCK_HELD);
252                 }
253
254                 bch2_btree_iter_cond_resched(&iter);
255         }
256         ret = bch2_btree_iter_unlock(&iter) ?: ret;
257         if (ret)
258                 return ret;
259
260         mutex_lock(&c->btree_root_lock);
261
262         b = c->btree_roots[btree_id].b;
263         if (!btree_node_fake(b))
264                 bch2_gc_mark_key(c, bkey_i_to_s_c(&b->key),
265                                  &max_stale, initial);
266         gc_pos_set(c, gc_pos_btree_root(b->btree_id));
267
268         mutex_unlock(&c->btree_root_lock);
269         return 0;
270 }
271
272 static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
273 {
274         return  (int) btree_id_to_gc_phase(l) -
275                 (int) btree_id_to_gc_phase(r);
276 }
277
278 static int bch2_gc_btrees(struct bch_fs *c, struct list_head *journal,
279                           bool initial)
280 {
281         enum btree_id ids[BTREE_ID_NR];
282         u8 max_stale;
283         unsigned i;
284
285         for (i = 0; i < BTREE_ID_NR; i++)
286                 ids[i] = i;
287         bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
288
289         for (i = 0; i < BTREE_ID_NR; i++) {
290                 enum btree_id id = ids[i];
291                 enum btree_node_type type = __btree_node_type(0, id);
292
293                 int ret = bch2_gc_btree(c, id, initial);
294                 if (ret)
295                         return ret;
296
297                 if (journal && btree_node_type_needs_gc(type)) {
298                         struct bkey_i *k, *n;
299                         struct jset_entry *j;
300                         struct journal_replay *r;
301                         int ret;
302
303                         list_for_each_entry(r, journal, list)
304                                 for_each_jset_key(k, n, j, &r->j) {
305                                         if (type == __btree_node_type(j->level, j->btree_id)) {
306                                                 ret = bch2_gc_mark_key(c,
307                                                         bkey_i_to_s_c(k),
308                                                         &max_stale, initial);
309                                                 if (ret)
310                                                         return ret;
311                                         }
312                                 }
313                 }
314         }
315
316         return 0;
317 }
318
319 static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca,
320                                   u64 start, u64 end,
321                                   enum bch_data_type type,
322                                   unsigned flags)
323 {
324         u64 b = sector_to_bucket(ca, start);
325
326         do {
327                 unsigned sectors =
328                         min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
329
330                 bch2_mark_metadata_bucket(c, ca, b, type, sectors,
331                                           gc_phase(GC_PHASE_SB), flags);
332                 b++;
333                 start += sectors;
334         } while (start < end);
335 }
336
337 void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
338                               unsigned flags)
339 {
340         struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
341         unsigned i;
342         u64 b;
343
344         /*
345          * This conditional is kind of gross, but we may be called from the
346          * device add path, before the new device has actually been added to the
347          * running filesystem:
348          */
349         if (c) {
350                 lockdep_assert_held(&c->sb_lock);
351                 percpu_down_read_preempt_disable(&c->mark_lock);
352         } else {
353                 preempt_disable();
354         }
355
356         for (i = 0; i < layout->nr_superblocks; i++) {
357                 u64 offset = le64_to_cpu(layout->sb_offset[i]);
358
359                 if (offset == BCH_SB_SECTOR)
360                         mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
361                                               BCH_DATA_SB, flags);
362
363                 mark_metadata_sectors(c, ca, offset,
364                                       offset + (1 << layout->sb_max_size_bits),
365                                       BCH_DATA_SB, flags);
366         }
367
368         for (i = 0; i < ca->journal.nr; i++) {
369                 b = ca->journal.buckets[i];
370                 bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_JOURNAL,
371                                           ca->mi.bucket_size,
372                                           gc_phase(GC_PHASE_SB), flags);
373         }
374
375         if (c) {
376                 percpu_up_read_preempt_enable(&c->mark_lock);
377         } else {
378                 preempt_enable();
379         }
380 }
381
382 static void bch2_mark_superblocks(struct bch_fs *c)
383 {
384         struct bch_dev *ca;
385         unsigned i;
386
387         mutex_lock(&c->sb_lock);
388         gc_pos_set(c, gc_phase(GC_PHASE_SB));
389
390         for_each_online_member(ca, c, i)
391                 bch2_mark_dev_superblock(c, ca, BCH_BUCKET_MARK_GC);
392         mutex_unlock(&c->sb_lock);
393 }
394
395 /* Also see bch2_pending_btree_node_free_insert_done() */
396 static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
397 {
398         struct gc_pos pos = { 0 };
399         struct btree_update *as;
400         struct pending_btree_node_free *d;
401
402         mutex_lock(&c->btree_interior_update_lock);
403         gc_pos_set(c, gc_phase(GC_PHASE_PENDING_DELETE));
404
405         for_each_pending_btree_node_free(c, as, d)
406                 if (d->index_update_done)
407                         bch2_mark_key(c, bkey_i_to_s_c(&d->key),
408                                       true, 0,
409                                       pos, NULL, 0,
410                                       BCH_BUCKET_MARK_GC);
411
412         mutex_unlock(&c->btree_interior_update_lock);
413 }
414
415 static void bch2_mark_allocator_buckets(struct bch_fs *c)
416 {
417         struct bch_dev *ca;
418         struct open_bucket *ob;
419         size_t i, j, iter;
420         unsigned ci;
421
422         percpu_down_read_preempt_disable(&c->mark_lock);
423
424         spin_lock(&c->freelist_lock);
425         gc_pos_set(c, gc_pos_alloc(c, NULL));
426
427         for_each_member_device(ca, c, ci) {
428                 fifo_for_each_entry(i, &ca->free_inc, iter)
429                         bch2_mark_alloc_bucket(c, ca, i, true,
430                                                gc_pos_alloc(c, NULL),
431                                                BCH_BUCKET_MARK_GC);
432
433
434
435                 for (j = 0; j < RESERVE_NR; j++)
436                         fifo_for_each_entry(i, &ca->free[j], iter)
437                                 bch2_mark_alloc_bucket(c, ca, i, true,
438                                                        gc_pos_alloc(c, NULL),
439                                                        BCH_BUCKET_MARK_GC);
440         }
441
442         spin_unlock(&c->freelist_lock);
443
444         for (ob = c->open_buckets;
445              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
446              ob++) {
447                 spin_lock(&ob->lock);
448                 if (ob->valid) {
449                         gc_pos_set(c, gc_pos_alloc(c, ob));
450                         ca = bch_dev_bkey_exists(c, ob->ptr.dev);
451                         bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), true,
452                                                gc_pos_alloc(c, ob),
453                                                BCH_BUCKET_MARK_GC);
454                 }
455                 spin_unlock(&ob->lock);
456         }
457
458         percpu_up_read_preempt_enable(&c->mark_lock);
459 }
460
461 static void bch2_gc_free(struct bch_fs *c)
462 {
463         struct bch_dev *ca;
464         unsigned i;
465
466         genradix_free(&c->stripes[1]);
467
468         for_each_member_device(ca, c, i) {
469                 kvpfree(rcu_dereference_protected(ca->buckets[1], 1),
470                         sizeof(struct bucket_array) +
471                         ca->mi.nbuckets * sizeof(struct bucket));
472                 ca->buckets[1] = NULL;
473
474                 free_percpu(ca->usage[1]);
475                 ca->usage[1] = NULL;
476         }
477
478         percpu_down_write(&c->mark_lock);
479
480         free_percpu(c->usage[1]);
481         c->usage[1] = NULL;
482
483         percpu_up_write(&c->mark_lock);
484 }
485
486 static void bch2_gc_done_nocheck(struct bch_fs *c)
487 {
488         struct bch_dev *ca;
489         unsigned i;
490
491         {
492                 struct genradix_iter dst_iter = genradix_iter_init(&c->stripes[0], 0);
493                 struct genradix_iter src_iter = genradix_iter_init(&c->stripes[1], 0);
494                 struct stripe *dst, *src;
495
496                 c->ec_stripes_heap.used = 0;
497
498                 while ((dst = genradix_iter_peek(&dst_iter, &c->stripes[0])) &&
499                        (src = genradix_iter_peek(&src_iter, &c->stripes[1]))) {
500                         *dst = *src;
501
502                         if (dst->alive)
503                                 bch2_stripes_heap_insert(c, dst, dst_iter.pos);
504
505                         genradix_iter_advance(&dst_iter, &c->stripes[0]);
506                         genradix_iter_advance(&src_iter, &c->stripes[1]);
507                 }
508         }
509
510         for_each_member_device(ca, c, i) {
511                 struct bucket_array *src = __bucket_array(ca, 1);
512
513                 memcpy(__bucket_array(ca, 0), src,
514                        sizeof(struct bucket_array) +
515                        sizeof(struct bucket) * src->nbuckets);
516         };
517
518         for_each_member_device(ca, c, i) {
519                 unsigned nr = sizeof(struct bch_dev_usage) / sizeof(u64);
520                 struct bch_dev_usage *dst = (void *)
521                         bch2_acc_percpu_u64s((void *) ca->usage[0], nr);
522                 struct bch_dev_usage *src = (void *)
523                         bch2_acc_percpu_u64s((void *) ca->usage[1], nr);
524
525                 *dst = *src;
526         }
527
528         {
529                 unsigned nr = sizeof(struct bch_fs_usage) / sizeof(u64) +
530                         c->replicas.nr;
531                 struct bch_fs_usage *dst = (void *)
532                         bch2_acc_percpu_u64s((void *) c->usage[0], nr);
533                 struct bch_fs_usage *src = (void *)
534                         bch2_acc_percpu_u64s((void *) c->usage[1], nr);
535
536                 memcpy(&dst->s.gc_start[0],
537                        &src->s.gc_start[0],
538                        nr * sizeof(u64) - offsetof(typeof(*dst), s.gc_start));
539         }
540 }
541
542 static void bch2_gc_done(struct bch_fs *c, bool initial)
543 {
544         struct bch_dev *ca;
545         unsigned i;
546
547 #define copy_field(_f, _msg, ...)                                       \
548         if (dst->_f != src->_f) {                                       \
549                 bch_err(c, _msg ": got %llu, should be %llu, fixing"    \
550                         , ##__VA_ARGS__, dst->_f, src->_f);             \
551                 dst->_f = src->_f;                                      \
552         }
553 #define copy_stripe_field(_f, _msg, ...)                                \
554         if (dst->_f != src->_f) {                                       \
555                 bch_err_ratelimited(c, "stripe %zu has wrong "_msg      \
556                         ": got %u, should be %u, fixing",               \
557                         dst_iter.pos, ##__VA_ARGS__,                    \
558                         dst->_f, src->_f);                              \
559                 dst->_f = src->_f;                                      \
560                 dst->dirty = true;                                      \
561         }
562 #define copy_bucket_field(_f)                                           \
563         if (dst->b[b].mark._f != src->b[b].mark._f) {                   \
564                 bch_err_ratelimited(c, "dev %u bucket %zu has wrong " #_f\
565                         ": got %u, should be %u, fixing",               \
566                         i, b, dst->b[b].mark._f, src->b[b].mark._f);    \
567                 dst->b[b]._mark._f = src->b[b].mark._f;                 \
568         }
569 #define copy_dev_field(_f, _msg, ...)                                   \
570         copy_field(_f, "dev %u has wrong " _msg, i, ##__VA_ARGS__)
571 #define copy_fs_field(_f, _msg, ...)                                    \
572         copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__)
573
574         percpu_down_write(&c->mark_lock);
575
576         if (initial) {
577                 bch2_gc_done_nocheck(c);
578                 goto out;
579         }
580
581         {
582                 struct genradix_iter dst_iter = genradix_iter_init(&c->stripes[0], 0);
583                 struct genradix_iter src_iter = genradix_iter_init(&c->stripes[1], 0);
584                 struct stripe *dst, *src;
585                 unsigned i;
586
587                 c->ec_stripes_heap.used = 0;
588
589                 while ((dst = genradix_iter_peek(&dst_iter, &c->stripes[0])) &&
590                        (src = genradix_iter_peek(&src_iter, &c->stripes[1]))) {
591                         BUG_ON(src_iter.pos != dst_iter.pos);
592
593                         copy_stripe_field(alive,        "alive");
594                         copy_stripe_field(sectors,      "sectors");
595                         copy_stripe_field(algorithm,    "algorithm");
596                         copy_stripe_field(nr_blocks,    "nr_blocks");
597                         copy_stripe_field(nr_redundant, "nr_redundant");
598                         copy_stripe_field(blocks_nonempty,
599                                           "blocks_nonempty");
600
601                         for (i = 0; i < ARRAY_SIZE(dst->block_sectors); i++)
602                                 copy_stripe_field(block_sectors[i],
603                                                   "block_sectors[%u]", i);
604
605                         if (dst->alive)
606                                 bch2_stripes_heap_insert(c, dst, dst_iter.pos);
607
608                         genradix_iter_advance(&dst_iter, &c->stripes[0]);
609                         genradix_iter_advance(&src_iter, &c->stripes[1]);
610                 }
611         }
612
613         for_each_member_device(ca, c, i) {
614                 struct bucket_array *dst = __bucket_array(ca, 0);
615                 struct bucket_array *src = __bucket_array(ca, 1);
616                 size_t b;
617
618                 if (initial) {
619                         memcpy(dst, src,
620                                sizeof(struct bucket_array) +
621                                sizeof(struct bucket) * dst->nbuckets);
622                 }
623
624                 for (b = 0; b < src->nbuckets; b++) {
625                         copy_bucket_field(gen);
626                         copy_bucket_field(data_type);
627                         copy_bucket_field(owned_by_allocator);
628                         copy_bucket_field(stripe);
629                         copy_bucket_field(dirty_sectors);
630                         copy_bucket_field(cached_sectors);
631                 }
632         };
633
634         for_each_member_device(ca, c, i) {
635                 unsigned nr = sizeof(struct bch_dev_usage) / sizeof(u64);
636                 struct bch_dev_usage *dst = (void *)
637                         bch2_acc_percpu_u64s((void *) ca->usage[0], nr);
638                 struct bch_dev_usage *src = (void *)
639                         bch2_acc_percpu_u64s((void *) ca->usage[1], nr);
640                 unsigned b;
641
642                 for (b = 0; b < BCH_DATA_NR; b++)
643                         copy_dev_field(buckets[b],
644                                        "buckets[%s]", bch2_data_types[b]);
645                 copy_dev_field(buckets_alloc, "buckets_alloc");
646                 copy_dev_field(buckets_ec, "buckets_ec");
647
648                 for (b = 0; b < BCH_DATA_NR; b++)
649                         copy_dev_field(sectors[b],
650                                        "sectors[%s]", bch2_data_types[b]);
651                 copy_dev_field(sectors_fragmented,
652                                "sectors_fragmented");
653         }
654
655         {
656                 unsigned nr = sizeof(struct bch_fs_usage) / sizeof(u64) +
657                         c->replicas.nr;
658                 struct bch_fs_usage *dst = (void *)
659                         bch2_acc_percpu_u64s((void *) c->usage[0], nr);
660                 struct bch_fs_usage *src = (void *)
661                         bch2_acc_percpu_u64s((void *) c->usage[1], nr);
662
663                 copy_fs_field(s.hidden,         "hidden");
664                 copy_fs_field(s.data,           "data");
665                 copy_fs_field(s.cached,         "cached");
666                 copy_fs_field(s.reserved,       "reserved");
667                 copy_fs_field(s.nr_inodes,      "nr_inodes");
668
669                 for (i = 0; i < BCH_REPLICAS_MAX; i++)
670                         copy_fs_field(persistent_reserved[i],
671                                       "persistent_reserved[%i]", i);
672
673                 for (i = 0; i < c->replicas.nr; i++) {
674                         /*
675                          * XXX: print out replicas entry
676                          */
677                         copy_fs_field(data[i], "data[%i]", i);
678                 }
679         }
680 out:
681         percpu_up_write(&c->mark_lock);
682
683 #undef copy_fs_field
684 #undef copy_dev_field
685 #undef copy_bucket_field
686 #undef copy_stripe_field
687 #undef copy_field
688 }
689
690 static int bch2_gc_start(struct bch_fs *c)
691 {
692         struct bch_dev *ca;
693         unsigned i;
694
695         /*
696          * indicate to stripe code that we need to allocate for the gc stripes
697          * radix tree, too
698          */
699         gc_pos_set(c, gc_phase(GC_PHASE_START));
700
701         percpu_down_write(&c->mark_lock);
702         BUG_ON(c->usage[1]);
703
704         c->usage[1] = __alloc_percpu_gfp(sizeof(struct bch_fs_usage) +
705                                          sizeof(u64) * c->replicas.nr,
706                                          sizeof(u64),
707                                          GFP_KERNEL);
708         percpu_up_write(&c->mark_lock);
709
710         if (!c->usage[1])
711                 return -ENOMEM;
712
713         for_each_member_device(ca, c, i) {
714                 BUG_ON(ca->buckets[1]);
715                 BUG_ON(ca->usage[1]);
716
717                 ca->buckets[1] = kvpmalloc(sizeof(struct bucket_array) +
718                                 ca->mi.nbuckets * sizeof(struct bucket),
719                                 GFP_KERNEL|__GFP_ZERO);
720                 if (!ca->buckets[1]) {
721                         percpu_ref_put(&ca->ref);
722                         return -ENOMEM;
723                 }
724
725                 ca->usage[1] = alloc_percpu(struct bch_dev_usage);
726                 if (!ca->usage[1]) {
727                         percpu_ref_put(&ca->ref);
728                         return -ENOMEM;
729                 }
730         }
731
732         percpu_down_write(&c->mark_lock);
733
734         for_each_member_device(ca, c, i) {
735                 struct bucket_array *dst = __bucket_array(ca, 1);
736                 struct bucket_array *src = __bucket_array(ca, 0);
737                 size_t b;
738
739                 dst->first_bucket       = src->first_bucket;
740                 dst->nbuckets           = src->nbuckets;
741
742                 for (b = 0; b < src->nbuckets; b++)
743                         dst->b[b]._mark.gen = src->b[b].mark.gen;
744         };
745
746         percpu_up_write(&c->mark_lock);
747
748         return bch2_ec_mem_alloc(c, true);
749 }
750
751 /**
752  * bch2_gc - walk _all_ references to buckets, and recompute them:
753  *
754  * Order matters here:
755  *  - Concurrent GC relies on the fact that we have a total ordering for
756  *    everything that GC walks - see  gc_will_visit_node(),
757  *    gc_will_visit_root()
758  *
759  *  - also, references move around in the course of index updates and
760  *    various other crap: everything needs to agree on the ordering
761  *    references are allowed to move around in - e.g., we're allowed to
762  *    start with a reference owned by an open_bucket (the allocator) and
763  *    move it to the btree, but not the reverse.
764  *
765  *    This is necessary to ensure that gc doesn't miss references that
766  *    move around - if references move backwards in the ordering GC
767  *    uses, GC could skip past them
768  */
769 int bch2_gc(struct bch_fs *c, struct list_head *journal, bool initial)
770 {
771         struct bch_dev *ca;
772         u64 start_time = local_clock();
773         unsigned i, iter = 0;
774         int ret;
775
776         trace_gc_start(c);
777
778         down_write(&c->gc_lock);
779 again:
780         ret = bch2_gc_start(c);
781         if (ret)
782                 goto out;
783
784         bch2_mark_superblocks(c);
785
786         ret = bch2_gc_btrees(c, journal, initial);
787         if (ret)
788                 goto out;
789
790         bch2_mark_pending_btree_node_frees(c);
791         bch2_mark_allocator_buckets(c);
792
793         c->gc_count++;
794 out:
795         if (!ret && test_bit(BCH_FS_FIXED_GENS, &c->flags)) {
796                 /*
797                  * XXX: make sure gens we fixed got saved
798                  */
799                 if (iter++ <= 2) {
800                         bch_info(c, "Fixed gens, restarting mark and sweep:");
801                         clear_bit(BCH_FS_FIXED_GENS, &c->flags);
802                         goto again;
803                 }
804
805                 bch_info(c, "Unable to fix bucket gens, looping");
806                 ret = -EINVAL;
807         }
808
809         if (!ret)
810                 bch2_gc_done(c, initial);
811
812         /* Indicates that gc is no longer in progress: */
813         __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
814
815         bch2_gc_free(c);
816         up_write(&c->gc_lock);
817
818         if (!ret && initial)
819                 set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
820
821         trace_gc_end(c);
822         bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
823
824         /*
825          * Wake up allocator in case it was waiting for buckets
826          * because of not being able to inc gens
827          */
828         for_each_member_device(ca, c, i)
829                 bch2_wake_allocator(ca);
830
831         /*
832          * At startup, allocations can happen directly instead of via the
833          * allocator thread - issue wakeup in case they blocked on gc_lock:
834          */
835         closure_wake_up(&c->freelist_wait);
836         return ret;
837 }
838
839 /* Btree coalescing */
840
841 static void recalc_packed_keys(struct btree *b)
842 {
843         struct bset *i = btree_bset_first(b);
844         struct bkey_packed *k;
845
846         memset(&b->nr, 0, sizeof(b->nr));
847
848         BUG_ON(b->nsets != 1);
849
850         vstruct_for_each(i, k)
851                 btree_keys_account_key_add(&b->nr, 0, k);
852 }
853
854 static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
855                                 struct btree *old_nodes[GC_MERGE_NODES])
856 {
857         struct btree *parent = btree_node_parent(iter, old_nodes[0]);
858         unsigned i, nr_old_nodes, nr_new_nodes, u64s = 0;
859         unsigned blocks = btree_blocks(c) * 2 / 3;
860         struct btree *new_nodes[GC_MERGE_NODES];
861         struct btree_update *as;
862         struct keylist keylist;
863         struct bkey_format_state format_state;
864         struct bkey_format new_format;
865
866         memset(new_nodes, 0, sizeof(new_nodes));
867         bch2_keylist_init(&keylist, NULL);
868
869         /* Count keys that are not deleted */
870         for (i = 0; i < GC_MERGE_NODES && old_nodes[i]; i++)
871                 u64s += old_nodes[i]->nr.live_u64s;
872
873         nr_old_nodes = nr_new_nodes = i;
874
875         /* Check if all keys in @old_nodes could fit in one fewer node */
876         if (nr_old_nodes <= 1 ||
877             __vstruct_blocks(struct btree_node, c->block_bits,
878                              DIV_ROUND_UP(u64s, nr_old_nodes - 1)) > blocks)
879                 return;
880
881         /* Find a format that all keys in @old_nodes can pack into */
882         bch2_bkey_format_init(&format_state);
883
884         for (i = 0; i < nr_old_nodes; i++)
885                 __bch2_btree_calc_format(&format_state, old_nodes[i]);
886
887         new_format = bch2_bkey_format_done(&format_state);
888
889         /* Check if repacking would make any nodes too big to fit */
890         for (i = 0; i < nr_old_nodes; i++)
891                 if (!bch2_btree_node_format_fits(c, old_nodes[i], &new_format)) {
892                         trace_btree_gc_coalesce_fail(c,
893                                         BTREE_GC_COALESCE_FAIL_FORMAT_FITS);
894                         return;
895                 }
896
897         if (bch2_keylist_realloc(&keylist, NULL, 0,
898                         (BKEY_U64s + BKEY_EXTENT_U64s_MAX) * nr_old_nodes)) {
899                 trace_btree_gc_coalesce_fail(c,
900                                 BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC);
901                 return;
902         }
903
904         as = bch2_btree_update_start(c, iter->btree_id,
905                         btree_update_reserve_required(c, parent) + nr_old_nodes,
906                         BTREE_INSERT_NOFAIL|
907                         BTREE_INSERT_USE_RESERVE,
908                         NULL);
909         if (IS_ERR(as)) {
910                 trace_btree_gc_coalesce_fail(c,
911                                 BTREE_GC_COALESCE_FAIL_RESERVE_GET);
912                 bch2_keylist_free(&keylist, NULL);
913                 return;
914         }
915
916         trace_btree_gc_coalesce(c, old_nodes[0]);
917
918         for (i = 0; i < nr_old_nodes; i++)
919                 bch2_btree_interior_update_will_free_node(as, old_nodes[i]);
920
921         /* Repack everything with @new_format and sort down to one bset */
922         for (i = 0; i < nr_old_nodes; i++)
923                 new_nodes[i] =
924                         __bch2_btree_node_alloc_replacement(as, old_nodes[i],
925                                                             new_format);
926
927         /*
928          * Conceptually we concatenate the nodes together and slice them
929          * up at different boundaries.
930          */
931         for (i = nr_new_nodes - 1; i > 0; --i) {
932                 struct btree *n1 = new_nodes[i];
933                 struct btree *n2 = new_nodes[i - 1];
934
935                 struct bset *s1 = btree_bset_first(n1);
936                 struct bset *s2 = btree_bset_first(n2);
937                 struct bkey_packed *k, *last = NULL;
938
939                 /* Calculate how many keys from @n2 we could fit inside @n1 */
940                 u64s = 0;
941
942                 for (k = s2->start;
943                      k < vstruct_last(s2) &&
944                      vstruct_blocks_plus(n1->data, c->block_bits,
945                                          u64s + k->u64s) <= blocks;
946                      k = bkey_next(k)) {
947                         last = k;
948                         u64s += k->u64s;
949                 }
950
951                 if (u64s == le16_to_cpu(s2->u64s)) {
952                         /* n2 fits entirely in n1 */
953                         n1->key.k.p = n1->data->max_key = n2->data->max_key;
954
955                         memcpy_u64s(vstruct_last(s1),
956                                     s2->start,
957                                     le16_to_cpu(s2->u64s));
958                         le16_add_cpu(&s1->u64s, le16_to_cpu(s2->u64s));
959
960                         set_btree_bset_end(n1, n1->set);
961
962                         six_unlock_write(&n2->lock);
963                         bch2_btree_node_free_never_inserted(c, n2);
964                         six_unlock_intent(&n2->lock);
965
966                         memmove(new_nodes + i - 1,
967                                 new_nodes + i,
968                                 sizeof(new_nodes[0]) * (nr_new_nodes - i));
969                         new_nodes[--nr_new_nodes] = NULL;
970                 } else if (u64s) {
971                         /* move part of n2 into n1 */
972                         n1->key.k.p = n1->data->max_key =
973                                 bkey_unpack_pos(n1, last);
974
975                         n2->data->min_key =
976                                 btree_type_successor(iter->btree_id,
977                                                      n1->data->max_key);
978
979                         memcpy_u64s(vstruct_last(s1),
980                                     s2->start, u64s);
981                         le16_add_cpu(&s1->u64s, u64s);
982
983                         memmove(s2->start,
984                                 vstruct_idx(s2, u64s),
985                                 (le16_to_cpu(s2->u64s) - u64s) * sizeof(u64));
986                         s2->u64s = cpu_to_le16(le16_to_cpu(s2->u64s) - u64s);
987
988                         set_btree_bset_end(n1, n1->set);
989                         set_btree_bset_end(n2, n2->set);
990                 }
991         }
992
993         for (i = 0; i < nr_new_nodes; i++) {
994                 struct btree *n = new_nodes[i];
995
996                 recalc_packed_keys(n);
997                 btree_node_reset_sib_u64s(n);
998
999                 bch2_btree_build_aux_trees(n);
1000                 six_unlock_write(&n->lock);
1001
1002                 bch2_btree_node_write(c, n, SIX_LOCK_intent);
1003         }
1004
1005         /*
1006          * The keys for the old nodes get deleted. We don't want to insert keys
1007          * that compare equal to the keys for the new nodes we'll also be
1008          * inserting - we can't because keys on a keylist must be strictly
1009          * greater than the previous keys, and we also don't need to since the
1010          * key for the new node will serve the same purpose (overwriting the key
1011          * for the old node).
1012          */
1013         for (i = 0; i < nr_old_nodes; i++) {
1014                 struct bkey_i delete;
1015                 unsigned j;
1016
1017                 for (j = 0; j < nr_new_nodes; j++)
1018                         if (!bkey_cmp(old_nodes[i]->key.k.p,
1019                                       new_nodes[j]->key.k.p))
1020                                 goto next;
1021
1022                 bkey_init(&delete.k);
1023                 delete.k.p = old_nodes[i]->key.k.p;
1024                 bch2_keylist_add_in_order(&keylist, &delete);
1025 next:
1026                 i = i;
1027         }
1028
1029         /*
1030          * Keys for the new nodes get inserted: bch2_btree_insert_keys() only
1031          * does the lookup once and thus expects the keys to be in sorted order
1032          * so we have to make sure the new keys are correctly ordered with
1033          * respect to the deleted keys added in the previous loop
1034          */
1035         for (i = 0; i < nr_new_nodes; i++)
1036                 bch2_keylist_add_in_order(&keylist, &new_nodes[i]->key);
1037
1038         /* Insert the newly coalesced nodes */
1039         bch2_btree_insert_node(as, parent, iter, &keylist, 0);
1040
1041         BUG_ON(!bch2_keylist_empty(&keylist));
1042
1043         BUG_ON(iter->l[old_nodes[0]->level].b != old_nodes[0]);
1044
1045         bch2_btree_iter_node_replace(iter, new_nodes[0]);
1046
1047         for (i = 0; i < nr_new_nodes; i++)
1048                 bch2_open_buckets_put(c, &new_nodes[i]->ob);
1049
1050         /* Free the old nodes and update our sliding window */
1051         for (i = 0; i < nr_old_nodes; i++) {
1052                 bch2_btree_node_free_inmem(c, old_nodes[i], iter);
1053
1054                 /*
1055                  * the index update might have triggered a split, in which case
1056                  * the nodes we coalesced - the new nodes we just created -
1057                  * might not be sibling nodes anymore - don't add them to the
1058                  * sliding window (except the first):
1059                  */
1060                 if (!i) {
1061                         old_nodes[i] = new_nodes[i];
1062                 } else {
1063                         old_nodes[i] = NULL;
1064                         if (new_nodes[i])
1065                                 six_unlock_intent(&new_nodes[i]->lock);
1066                 }
1067         }
1068
1069         bch2_btree_update_done(as);
1070         bch2_keylist_free(&keylist, NULL);
1071 }
1072
1073 static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
1074 {
1075         struct btree_iter iter;
1076         struct btree *b;
1077         bool kthread = (current->flags & PF_KTHREAD) != 0;
1078         unsigned i;
1079
1080         /* Sliding window of adjacent btree nodes */
1081         struct btree *merge[GC_MERGE_NODES];
1082         u32 lock_seq[GC_MERGE_NODES];
1083
1084         /*
1085          * XXX: We don't have a good way of positively matching on sibling nodes
1086          * that have the same parent - this code works by handling the cases
1087          * where they might not have the same parent, and is thus fragile. Ugh.
1088          *
1089          * Perhaps redo this to use multiple linked iterators?
1090          */
1091         memset(merge, 0, sizeof(merge));
1092
1093         __for_each_btree_node(&iter, c, btree_id, POS_MIN,
1094                               BTREE_MAX_DEPTH, 0,
1095                               BTREE_ITER_PREFETCH, b) {
1096                 memmove(merge + 1, merge,
1097                         sizeof(merge) - sizeof(merge[0]));
1098                 memmove(lock_seq + 1, lock_seq,
1099                         sizeof(lock_seq) - sizeof(lock_seq[0]));
1100
1101                 merge[0] = b;
1102
1103                 for (i = 1; i < GC_MERGE_NODES; i++) {
1104                         if (!merge[i] ||
1105                             !six_relock_intent(&merge[i]->lock, lock_seq[i]))
1106                                 break;
1107
1108                         if (merge[i]->level != merge[0]->level) {
1109                                 six_unlock_intent(&merge[i]->lock);
1110                                 break;
1111                         }
1112                 }
1113                 memset(merge + i, 0, (GC_MERGE_NODES - i) * sizeof(merge[0]));
1114
1115                 bch2_coalesce_nodes(c, &iter, merge);
1116
1117                 for (i = 1; i < GC_MERGE_NODES && merge[i]; i++) {
1118                         lock_seq[i] = merge[i]->lock.state.seq;
1119                         six_unlock_intent(&merge[i]->lock);
1120                 }
1121
1122                 lock_seq[0] = merge[0]->lock.state.seq;
1123
1124                 if (kthread && kthread_should_stop()) {
1125                         bch2_btree_iter_unlock(&iter);
1126                         return -ESHUTDOWN;
1127                 }
1128
1129                 bch2_btree_iter_cond_resched(&iter);
1130
1131                 /*
1132                  * If the parent node wasn't relocked, it might have been split
1133                  * and the nodes in our sliding window might not have the same
1134                  * parent anymore - blow away the sliding window:
1135                  */
1136                 if (btree_iter_node(&iter, iter.level + 1) &&
1137                     !btree_node_intent_locked(&iter, iter.level + 1))
1138                         memset(merge + 1, 0,
1139                                (GC_MERGE_NODES - 1) * sizeof(merge[0]));
1140         }
1141         return bch2_btree_iter_unlock(&iter);
1142 }
1143
1144 /**
1145  * bch_coalesce - coalesce adjacent nodes with low occupancy
1146  */
1147 void bch2_coalesce(struct bch_fs *c)
1148 {
1149         enum btree_id id;
1150
1151         down_read(&c->gc_lock);
1152         trace_gc_coalesce_start(c);
1153
1154         for (id = 0; id < BTREE_ID_NR; id++) {
1155                 int ret = c->btree_roots[id].b
1156                         ? bch2_coalesce_btree(c, id)
1157                         : 0;
1158
1159                 if (ret) {
1160                         if (ret != -ESHUTDOWN)
1161                                 bch_err(c, "btree coalescing failed: %d", ret);
1162                         return;
1163                 }
1164         }
1165
1166         trace_gc_coalesce_end(c);
1167         up_read(&c->gc_lock);
1168 }
1169
1170 static int bch2_gc_thread(void *arg)
1171 {
1172         struct bch_fs *c = arg;
1173         struct io_clock *clock = &c->io_clock[WRITE];
1174         unsigned long last = atomic_long_read(&clock->now);
1175         unsigned last_kick = atomic_read(&c->kick_gc);
1176         int ret;
1177
1178         set_freezable();
1179
1180         while (1) {
1181                 while (1) {
1182                         set_current_state(TASK_INTERRUPTIBLE);
1183
1184                         if (kthread_should_stop()) {
1185                                 __set_current_state(TASK_RUNNING);
1186                                 return 0;
1187                         }
1188
1189                         if (atomic_read(&c->kick_gc) != last_kick)
1190                                 break;
1191
1192                         if (c->btree_gc_periodic) {
1193                                 unsigned long next = last + c->capacity / 16;
1194
1195                                 if (atomic_long_read(&clock->now) >= next)
1196                                         break;
1197
1198                                 bch2_io_clock_schedule_timeout(clock, next);
1199                         } else {
1200                                 schedule();
1201                         }
1202
1203                         try_to_freeze();
1204                 }
1205                 __set_current_state(TASK_RUNNING);
1206
1207                 last = atomic_long_read(&clock->now);
1208                 last_kick = atomic_read(&c->kick_gc);
1209
1210                 ret = bch2_gc(c, NULL, false);
1211                 if (ret)
1212                         bch_err(c, "btree gc failed: %i", ret);
1213
1214                 debug_check_no_locks_held();
1215         }
1216
1217         return 0;
1218 }
1219
1220 void bch2_gc_thread_stop(struct bch_fs *c)
1221 {
1222         struct task_struct *p;
1223
1224         p = c->gc_thread;
1225         c->gc_thread = NULL;
1226
1227         if (p) {
1228                 kthread_stop(p);
1229                 put_task_struct(p);
1230         }
1231 }
1232
1233 int bch2_gc_thread_start(struct bch_fs *c)
1234 {
1235         struct task_struct *p;
1236
1237         BUG_ON(c->gc_thread);
1238
1239         p = kthread_create(bch2_gc_thread, c, "bch_gc");
1240         if (IS_ERR(p))
1241                 return PTR_ERR(p);
1242
1243         get_task_struct(p);
1244         c->gc_thread = p;
1245         wake_up_process(p);
1246         return 0;
1247 }
1248
1249 /* Initial GC computes bucket marks during startup */
1250
1251 int bch2_initial_gc(struct bch_fs *c, struct list_head *journal)
1252 {
1253         int ret = bch2_gc(c, journal, true);
1254
1255         /*
1256          * Skip past versions that might have possibly been used (as nonces),
1257          * but hadn't had their pointers written:
1258          */
1259         if (c->sb.encryption_type)
1260                 atomic64_add(1 << 16, &c->key_version);
1261
1262         return ret;
1263 }