]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_gc.c
Merge pull request #26 from unquietwiki/master
[bcachefs-tools-debian] / libbcachefs / btree_gc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  * Copyright (C) 2014 Datera Inc.
5  */
6
7 #include "bcachefs.h"
8 #include "alloc_background.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_locking.h"
12 #include "btree_update_interior.h"
13 #include "btree_io.h"
14 #include "btree_gc.h"
15 #include "buckets.h"
16 #include "clock.h"
17 #include "debug.h"
18 #include "ec.h"
19 #include "error.h"
20 #include "extents.h"
21 #include "journal.h"
22 #include "keylist.h"
23 #include "move.h"
24 #include "recovery.h"
25 #include "replicas.h"
26 #include "super-io.h"
27
28 #include <linux/slab.h>
29 #include <linux/bitops.h>
30 #include <linux/freezer.h>
31 #include <linux/kthread.h>
32 #include <linux/preempt.h>
33 #include <linux/rcupdate.h>
34 #include <linux/sched/task.h>
35 #include <trace/events/bcachefs.h>
36
37 static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
38 {
39         write_seqcount_begin(&c->gc_pos_lock);
40         c->gc_pos = new_pos;
41         write_seqcount_end(&c->gc_pos_lock);
42 }
43
44 static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
45 {
46         BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) <= 0);
47         __gc_pos_set(c, new_pos);
48 }
49
50 /* range_checks - for validating min/max pos of each btree node: */
51
52 struct range_checks {
53         struct range_level {
54                 struct bpos     min;
55                 struct bpos     max;
56         }                       l[BTREE_MAX_DEPTH];
57         unsigned                depth;
58 };
59
60 static void btree_node_range_checks_init(struct range_checks *r, unsigned depth)
61 {
62         unsigned i;
63
64         for (i = 0; i < BTREE_MAX_DEPTH; i++)
65                 r->l[i].min = r->l[i].max = POS_MIN;
66         r->depth = depth;
67 }
68
69 static void btree_node_range_checks(struct bch_fs *c, struct btree *b,
70                                     struct range_checks *r)
71 {
72         struct range_level *l = &r->l[b->level];
73
74         struct bpos expected_min = bkey_cmp(l->min, l->max)
75                 ? btree_type_successor(b->btree_id, l->max)
76                 : l->max;
77
78         bch2_fs_inconsistent_on(bkey_cmp(b->data->min_key, expected_min), c,
79                 "btree node has incorrect min key: %llu:%llu != %llu:%llu",
80                 b->data->min_key.inode,
81                 b->data->min_key.offset,
82                 expected_min.inode,
83                 expected_min.offset);
84
85         l->max = b->data->max_key;
86
87         if (b->level > r->depth) {
88                 l = &r->l[b->level - 1];
89
90                 bch2_fs_inconsistent_on(bkey_cmp(b->data->min_key, l->min), c,
91                         "btree node min doesn't match min of child nodes: %llu:%llu != %llu:%llu",
92                         b->data->min_key.inode,
93                         b->data->min_key.offset,
94                         l->min.inode,
95                         l->min.offset);
96
97                 bch2_fs_inconsistent_on(bkey_cmp(b->data->max_key, l->max), c,
98                         "btree node max doesn't match max of child nodes: %llu:%llu != %llu:%llu",
99                         b->data->max_key.inode,
100                         b->data->max_key.offset,
101                         l->max.inode,
102                         l->max.offset);
103
104                 if (bkey_cmp(b->data->max_key, POS_MAX))
105                         l->min = l->max =
106                                 btree_type_successor(b->btree_id,
107                                                      b->data->max_key);
108         }
109 }
110
111 /* marking of btree keys/nodes: */
112
113 static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
114                             u8 *max_stale, bool initial)
115 {
116         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
117         const struct bch_extent_ptr *ptr;
118         unsigned flags =
119                 BTREE_TRIGGER_GC|
120                 (initial ? BTREE_TRIGGER_NOATOMIC : 0);
121         int ret = 0;
122
123         if (initial) {
124                 BUG_ON(journal_seq_verify(c) &&
125                        k.k->version.lo > journal_cur_seq(&c->journal));
126
127                 /* XXX change to fsck check */
128                 if (fsck_err_on(k.k->version.lo > atomic64_read(&c->key_version), c,
129                                 "key version number higher than recorded: %llu > %llu",
130                                 k.k->version.lo,
131                                 atomic64_read(&c->key_version)))
132                         atomic64_set(&c->key_version, k.k->version.lo);
133
134                 if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
135                     fsck_err_on(!bch2_bkey_replicas_marked(c, k, false), c,
136                                 "superblock not marked as containing replicas (type %u)",
137                                 k.k->type)) {
138                         ret = bch2_mark_bkey_replicas(c, k);
139                         if (ret)
140                                 return ret;
141                 }
142
143                 bkey_for_each_ptr(ptrs, ptr) {
144                         struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
145                         struct bucket *g = PTR_BUCKET(ca, ptr, true);
146                         struct bucket *g2 = PTR_BUCKET(ca, ptr, false);
147
148                         if (mustfix_fsck_err_on(!g->gen_valid, c,
149                                         "bucket %u:%zu data type %s ptr gen %u missing in alloc btree",
150                                         ptr->dev, PTR_BUCKET_NR(ca, ptr),
151                                         bch2_data_types[ptr_data_type(k.k, ptr)],
152                                         ptr->gen)) {
153                                 g2->_mark.gen   = g->_mark.gen          = ptr->gen;
154                                 g2->gen_valid   = g->gen_valid          = true;
155                         }
156
157                         if (mustfix_fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
158                                         "bucket %u:%zu data type %s ptr gen in the future: %u > %u",
159                                         ptr->dev, PTR_BUCKET_NR(ca, ptr),
160                                         bch2_data_types[ptr_data_type(k.k, ptr)],
161                                         ptr->gen, g->mark.gen)) {
162                                 g2->_mark.gen   = g->_mark.gen          = ptr->gen;
163                                 g2->gen_valid   = g->gen_valid          = true;
164                                 g2->_mark.data_type             = 0;
165                                 g2->_mark.dirty_sectors         = 0;
166                                 g2->_mark.cached_sectors        = 0;
167                                 set_bit(BCH_FS_FIXED_GENS, &c->flags);
168                         }
169                 }
170         }
171
172         bkey_for_each_ptr(ptrs, ptr) {
173                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
174                 struct bucket *g = PTR_BUCKET(ca, ptr, true);
175
176                 if (gen_after(g->oldest_gen, ptr->gen))
177                         g->oldest_gen = ptr->gen;
178
179                 *max_stale = max(*max_stale, ptr_stale(ca, ptr));
180         }
181
182         bch2_mark_key(c, k, 0, k.k->size, NULL, 0, flags);
183 fsck_err:
184         return ret;
185 }
186
187 static int btree_gc_mark_node(struct bch_fs *c, struct btree *b,
188                               u8 *max_stale, bool initial)
189 {
190         struct btree_node_iter iter;
191         struct bkey unpacked;
192         struct bkey_s_c k;
193         int ret = 0;
194
195         *max_stale = 0;
196
197         if (!btree_node_type_needs_gc(btree_node_type(b)))
198                 return 0;
199
200         for_each_btree_node_key_unpack(b, k, &iter,
201                                        &unpacked) {
202                 bch2_bkey_debugcheck(c, b, k);
203
204                 ret = bch2_gc_mark_key(c, k, max_stale, initial);
205                 if (ret)
206                         break;
207         }
208
209         return ret;
210 }
211
212 static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
213                          bool initial, bool metadata_only)
214 {
215         struct btree_trans trans;
216         struct btree_iter *iter;
217         struct btree *b;
218         struct range_checks r;
219         unsigned depth = metadata_only                  ? 1
220                 : expensive_debug_checks(c)             ? 0
221                 : !btree_node_type_needs_gc(btree_id)   ? 1
222                 : 0;
223         u8 max_stale = 0;
224         int ret = 0;
225
226         bch2_trans_init(&trans, c, 0, 0);
227
228         gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0));
229
230         btree_node_range_checks_init(&r, depth);
231
232         __for_each_btree_node(&trans, iter, btree_id, POS_MIN,
233                               0, depth, BTREE_ITER_PREFETCH, b) {
234                 btree_node_range_checks(c, b, &r);
235
236                 bch2_verify_btree_nr_keys(b);
237
238                 gc_pos_set(c, gc_pos_btree_node(b));
239
240                 ret = btree_gc_mark_node(c, b, &max_stale, initial);
241                 if (ret)
242                         break;
243
244                 if (!initial) {
245                         if (max_stale > 64)
246                                 bch2_btree_node_rewrite(c, iter,
247                                                 b->data->keys.seq,
248                                                 BTREE_INSERT_USE_RESERVE|
249                                                 BTREE_INSERT_NOWAIT|
250                                                 BTREE_INSERT_GC_LOCK_HELD);
251                         else if (!btree_gc_rewrite_disabled(c) &&
252                                  (btree_gc_always_rewrite(c) || max_stale > 16))
253                                 bch2_btree_node_rewrite(c, iter,
254                                                 b->data->keys.seq,
255                                                 BTREE_INSERT_NOWAIT|
256                                                 BTREE_INSERT_GC_LOCK_HELD);
257                 }
258
259                 bch2_trans_cond_resched(&trans);
260         }
261         ret = bch2_trans_exit(&trans) ?: ret;
262         if (ret)
263                 return ret;
264
265         mutex_lock(&c->btree_root_lock);
266         b = c->btree_roots[btree_id].b;
267         if (!btree_node_fake(b))
268                 ret = bch2_gc_mark_key(c, bkey_i_to_s_c(&b->key),
269                                        &max_stale, initial);
270         gc_pos_set(c, gc_pos_btree_root(b->btree_id));
271         mutex_unlock(&c->btree_root_lock);
272
273         return ret;
274 }
275
276 static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
277 {
278         return  (int) btree_id_to_gc_phase(l) -
279                 (int) btree_id_to_gc_phase(r);
280 }
281
282 static int mark_journal_key(struct bch_fs *c, enum btree_id id,
283                             struct bkey_i *insert)
284 {
285         struct btree_trans trans;
286         struct btree_iter *iter;
287         struct bkey_s_c k;
288         u8 max_stale;
289         int ret = 0;
290
291         ret = bch2_gc_mark_key(c, bkey_i_to_s_c(insert), &max_stale, true);
292         if (ret)
293                 return ret;
294
295         bch2_trans_init(&trans, c, 0, 0);
296
297         for_each_btree_key(&trans, iter, id, bkey_start_pos(&insert->k),
298                            BTREE_ITER_SLOTS, k, ret) {
299                 percpu_down_read(&c->mark_lock);
300                 ret = bch2_mark_overwrite(&trans, iter, k, insert, NULL,
301                                          BTREE_TRIGGER_GC|
302                                          BTREE_TRIGGER_NOATOMIC);
303                 percpu_up_read(&c->mark_lock);
304
305                 if (!ret)
306                         break;
307         }
308
309         return bch2_trans_exit(&trans) ?: ret;
310 }
311
312 static int bch2_gc_btrees(struct bch_fs *c, struct journal_keys *journal_keys,
313                           bool initial, bool metadata_only)
314 {
315         enum btree_id ids[BTREE_ID_NR];
316         unsigned i;
317
318         for (i = 0; i < BTREE_ID_NR; i++)
319                 ids[i] = i;
320         bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
321
322         for (i = 0; i < BTREE_ID_NR; i++) {
323                 enum btree_id id = ids[i];
324                 enum btree_node_type type = __btree_node_type(0, id);
325
326                 int ret = bch2_gc_btree(c, id, initial, metadata_only);
327                 if (ret)
328                         return ret;
329
330                 if (journal_keys && !metadata_only &&
331                     btree_node_type_needs_gc(type)) {
332                         struct journal_key *j;
333                         int ret;
334
335                         for_each_journal_key(*journal_keys, j)
336                                 if (j->btree_id == id) {
337                                         ret = mark_journal_key(c, id, j->k);
338                                         if (ret)
339                                                 return ret;
340                                 }
341                 }
342         }
343
344         return 0;
345 }
346
347 static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca,
348                                   u64 start, u64 end,
349                                   enum bch_data_type type,
350                                   unsigned flags)
351 {
352         u64 b = sector_to_bucket(ca, start);
353
354         do {
355                 unsigned sectors =
356                         min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
357
358                 bch2_mark_metadata_bucket(c, ca, b, type, sectors,
359                                           gc_phase(GC_PHASE_SB), flags);
360                 b++;
361                 start += sectors;
362         } while (start < end);
363 }
364
365 void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
366                               unsigned flags)
367 {
368         struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
369         unsigned i;
370         u64 b;
371
372         /*
373          * This conditional is kind of gross, but we may be called from the
374          * device add path, before the new device has actually been added to the
375          * running filesystem:
376          */
377         if (c) {
378                 lockdep_assert_held(&c->sb_lock);
379                 percpu_down_read(&c->mark_lock);
380         }
381
382         for (i = 0; i < layout->nr_superblocks; i++) {
383                 u64 offset = le64_to_cpu(layout->sb_offset[i]);
384
385                 if (offset == BCH_SB_SECTOR)
386                         mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
387                                               BCH_DATA_SB, flags);
388
389                 mark_metadata_sectors(c, ca, offset,
390                                       offset + (1 << layout->sb_max_size_bits),
391                                       BCH_DATA_SB, flags);
392         }
393
394         for (i = 0; i < ca->journal.nr; i++) {
395                 b = ca->journal.buckets[i];
396                 bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_JOURNAL,
397                                           ca->mi.bucket_size,
398                                           gc_phase(GC_PHASE_SB), flags);
399         }
400
401         if (c)
402                 percpu_up_read(&c->mark_lock);
403 }
404
405 static void bch2_mark_superblocks(struct bch_fs *c)
406 {
407         struct bch_dev *ca;
408         unsigned i;
409
410         mutex_lock(&c->sb_lock);
411         gc_pos_set(c, gc_phase(GC_PHASE_SB));
412
413         for_each_online_member(ca, c, i)
414                 bch2_mark_dev_superblock(c, ca, BTREE_TRIGGER_GC);
415         mutex_unlock(&c->sb_lock);
416 }
417
418 /* Also see bch2_pending_btree_node_free_insert_done() */
419 static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
420 {
421         struct btree_update *as;
422         struct pending_btree_node_free *d;
423
424         mutex_lock(&c->btree_interior_update_lock);
425         gc_pos_set(c, gc_phase(GC_PHASE_PENDING_DELETE));
426
427         for_each_pending_btree_node_free(c, as, d)
428                 if (d->index_update_done)
429                         bch2_mark_key(c, bkey_i_to_s_c(&d->key),
430                                       0, 0, NULL, 0,
431                                       BTREE_TRIGGER_GC);
432
433         mutex_unlock(&c->btree_interior_update_lock);
434 }
435
436 static void bch2_mark_allocator_buckets(struct bch_fs *c)
437 {
438         struct bch_dev *ca;
439         struct open_bucket *ob;
440         size_t i, j, iter;
441         unsigned ci;
442
443         percpu_down_read(&c->mark_lock);
444
445         spin_lock(&c->freelist_lock);
446         gc_pos_set(c, gc_pos_alloc(c, NULL));
447
448         for_each_member_device(ca, c, ci) {
449                 fifo_for_each_entry(i, &ca->free_inc, iter)
450                         bch2_mark_alloc_bucket(c, ca, i, true,
451                                                gc_pos_alloc(c, NULL),
452                                                BTREE_TRIGGER_GC);
453
454
455
456                 for (j = 0; j < RESERVE_NR; j++)
457                         fifo_for_each_entry(i, &ca->free[j], iter)
458                                 bch2_mark_alloc_bucket(c, ca, i, true,
459                                                        gc_pos_alloc(c, NULL),
460                                                        BTREE_TRIGGER_GC);
461         }
462
463         spin_unlock(&c->freelist_lock);
464
465         for (ob = c->open_buckets;
466              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
467              ob++) {
468                 spin_lock(&ob->lock);
469                 if (ob->valid) {
470                         gc_pos_set(c, gc_pos_alloc(c, ob));
471                         ca = bch_dev_bkey_exists(c, ob->ptr.dev);
472                         bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), true,
473                                                gc_pos_alloc(c, ob),
474                                                BTREE_TRIGGER_GC);
475                 }
476                 spin_unlock(&ob->lock);
477         }
478
479         percpu_up_read(&c->mark_lock);
480 }
481
482 static void bch2_gc_free(struct bch_fs *c)
483 {
484         struct bch_dev *ca;
485         unsigned i;
486
487         genradix_free(&c->stripes[1]);
488
489         for_each_member_device(ca, c, i) {
490                 kvpfree(rcu_dereference_protected(ca->buckets[1], 1),
491                         sizeof(struct bucket_array) +
492                         ca->mi.nbuckets * sizeof(struct bucket));
493                 ca->buckets[1] = NULL;
494
495                 free_percpu(ca->usage[1]);
496                 ca->usage[1] = NULL;
497         }
498
499         free_percpu(c->usage_gc);
500         c->usage_gc = NULL;
501 }
502
503 static int bch2_gc_done(struct bch_fs *c,
504                         bool initial, bool metadata_only)
505 {
506         struct bch_dev *ca;
507         bool verify = !metadata_only &&
508                 (!initial ||
509                  (c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO)));
510         unsigned i;
511         int ret = 0;
512
513 #define copy_field(_f, _msg, ...)                                       \
514         if (dst->_f != src->_f) {                                       \
515                 if (verify)                                             \
516                         fsck_err(c, _msg ": got %llu, should be %llu"   \
517                                 , ##__VA_ARGS__, dst->_f, src->_f);     \
518                 dst->_f = src->_f;                                      \
519         }
520 #define copy_stripe_field(_f, _msg, ...)                                \
521         if (dst->_f != src->_f) {                                       \
522                 if (verify)                                             \
523                         fsck_err(c, "stripe %zu has wrong "_msg         \
524                                 ": got %u, should be %u",               \
525                                 dst_iter.pos, ##__VA_ARGS__,            \
526                                 dst->_f, src->_f);                      \
527                 dst->_f = src->_f;                                      \
528                 dst->dirty = true;                                      \
529         }
530 #define copy_bucket_field(_f)                                           \
531         if (dst->b[b].mark._f != src->b[b].mark._f) {                   \
532                 if (verify)                                             \
533                         fsck_err(c, "dev %u bucket %zu has wrong " #_f  \
534                                 ": got %u, should be %u", i, b,         \
535                                 dst->b[b].mark._f, src->b[b].mark._f);  \
536                 dst->b[b]._mark._f = src->b[b].mark._f;                 \
537         }
538 #define copy_dev_field(_f, _msg, ...)                                   \
539         copy_field(_f, "dev %u has wrong " _msg, i, ##__VA_ARGS__)
540 #define copy_fs_field(_f, _msg, ...)                                    \
541         copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__)
542
543         if (!metadata_only) {
544                 struct genradix_iter dst_iter = genradix_iter_init(&c->stripes[0], 0);
545                 struct genradix_iter src_iter = genradix_iter_init(&c->stripes[1], 0);
546                 struct stripe *dst, *src;
547                 unsigned i;
548
549                 c->ec_stripes_heap.used = 0;
550
551                 while ((dst = genradix_iter_peek(&dst_iter, &c->stripes[0])) &&
552                        (src = genradix_iter_peek(&src_iter, &c->stripes[1]))) {
553                         BUG_ON(src_iter.pos != dst_iter.pos);
554
555                         copy_stripe_field(alive,        "alive");
556                         copy_stripe_field(sectors,      "sectors");
557                         copy_stripe_field(algorithm,    "algorithm");
558                         copy_stripe_field(nr_blocks,    "nr_blocks");
559                         copy_stripe_field(nr_redundant, "nr_redundant");
560                         copy_stripe_field(blocks_nonempty,
561                                           "blocks_nonempty");
562
563                         for (i = 0; i < ARRAY_SIZE(dst->block_sectors); i++)
564                                 copy_stripe_field(block_sectors[i],
565                                                   "block_sectors[%u]", i);
566
567                         if (dst->alive)
568                                 bch2_stripes_heap_insert(c, dst, dst_iter.pos);
569
570                         genradix_iter_advance(&dst_iter, &c->stripes[0]);
571                         genradix_iter_advance(&src_iter, &c->stripes[1]);
572                 }
573         }
574
575         for_each_member_device(ca, c, i) {
576                 struct bucket_array *dst = __bucket_array(ca, 0);
577                 struct bucket_array *src = __bucket_array(ca, 1);
578                 size_t b;
579
580                 for (b = 0; b < src->nbuckets; b++) {
581                         copy_bucket_field(gen);
582                         copy_bucket_field(data_type);
583                         copy_bucket_field(owned_by_allocator);
584                         copy_bucket_field(stripe);
585                         copy_bucket_field(dirty_sectors);
586                         copy_bucket_field(cached_sectors);
587
588                         dst->b[b].oldest_gen = src->b[b].oldest_gen;
589                 }
590         };
591
592         bch2_fs_usage_acc_to_base(c, 0);
593         bch2_fs_usage_acc_to_base(c, 1);
594
595         bch2_dev_usage_from_buckets(c);
596
597         {
598                 unsigned nr = fs_usage_u64s(c);
599                 struct bch_fs_usage *dst = c->usage_base;
600                 struct bch_fs_usage *src = (void *)
601                         bch2_acc_percpu_u64s((void *) c->usage_gc, nr);
602
603                 copy_fs_field(hidden,           "hidden");
604                 copy_fs_field(btree,            "btree");
605
606                 if (!metadata_only) {
607                         copy_fs_field(data,     "data");
608                         copy_fs_field(cached,   "cached");
609                         copy_fs_field(reserved, "reserved");
610                         copy_fs_field(nr_inodes,"nr_inodes");
611
612                         for (i = 0; i < BCH_REPLICAS_MAX; i++)
613                                 copy_fs_field(persistent_reserved[i],
614                                               "persistent_reserved[%i]", i);
615                 }
616
617                 for (i = 0; i < c->replicas.nr; i++) {
618                         struct bch_replicas_entry *e =
619                                 cpu_replicas_entry(&c->replicas, i);
620                         char buf[80];
621
622                         if (metadata_only &&
623                             (e->data_type == BCH_DATA_USER ||
624                              e->data_type == BCH_DATA_CACHED))
625                                 continue;
626
627                         bch2_replicas_entry_to_text(&PBUF(buf), e);
628
629                         copy_fs_field(replicas[i], "%s", buf);
630                 }
631         }
632
633 #undef copy_fs_field
634 #undef copy_dev_field
635 #undef copy_bucket_field
636 #undef copy_stripe_field
637 #undef copy_field
638 fsck_err:
639         return ret;
640 }
641
642 static int bch2_gc_start(struct bch_fs *c,
643                          bool metadata_only)
644 {
645         struct bch_dev *ca;
646         unsigned i;
647         int ret;
648
649         BUG_ON(c->usage_gc);
650
651         c->usage_gc = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64),
652                                          sizeof(u64), GFP_KERNEL);
653         if (!c->usage_gc)
654                 return -ENOMEM;
655
656         for_each_member_device(ca, c, i) {
657                 BUG_ON(ca->buckets[1]);
658                 BUG_ON(ca->usage[1]);
659
660                 ca->buckets[1] = kvpmalloc(sizeof(struct bucket_array) +
661                                 ca->mi.nbuckets * sizeof(struct bucket),
662                                 GFP_KERNEL|__GFP_ZERO);
663                 if (!ca->buckets[1]) {
664                         percpu_ref_put(&ca->ref);
665                         return -ENOMEM;
666                 }
667
668                 ca->usage[1] = alloc_percpu(struct bch_dev_usage);
669                 if (!ca->usage[1]) {
670                         percpu_ref_put(&ca->ref);
671                         return -ENOMEM;
672                 }
673         }
674
675         ret = bch2_ec_mem_alloc(c, true);
676         if (ret)
677                 return ret;
678
679         percpu_down_write(&c->mark_lock);
680
681         /*
682          * indicate to stripe code that we need to allocate for the gc stripes
683          * radix tree, too
684          */
685         gc_pos_set(c, gc_phase(GC_PHASE_START));
686
687         for_each_member_device(ca, c, i) {
688                 struct bucket_array *dst = __bucket_array(ca, 1);
689                 struct bucket_array *src = __bucket_array(ca, 0);
690                 size_t b;
691
692                 dst->first_bucket       = src->first_bucket;
693                 dst->nbuckets           = src->nbuckets;
694
695                 for (b = 0; b < src->nbuckets; b++) {
696                         struct bucket *d = &dst->b[b];
697                         struct bucket *s = &src->b[b];
698
699                         d->_mark.gen = dst->b[b].oldest_gen = s->mark.gen;
700                         d->gen_valid = s->gen_valid;
701
702                         if (metadata_only &&
703                             (s->mark.data_type == BCH_DATA_USER ||
704                              s->mark.data_type == BCH_DATA_CACHED)) {
705                                 d->_mark = s->mark;
706                                 d->_mark.owned_by_allocator = 0;
707                         }
708                 }
709         };
710
711         percpu_up_write(&c->mark_lock);
712
713         return 0;
714 }
715
716 /**
717  * bch2_gc - walk _all_ references to buckets, and recompute them:
718  *
719  * Order matters here:
720  *  - Concurrent GC relies on the fact that we have a total ordering for
721  *    everything that GC walks - see  gc_will_visit_node(),
722  *    gc_will_visit_root()
723  *
724  *  - also, references move around in the course of index updates and
725  *    various other crap: everything needs to agree on the ordering
726  *    references are allowed to move around in - e.g., we're allowed to
727  *    start with a reference owned by an open_bucket (the allocator) and
728  *    move it to the btree, but not the reverse.
729  *
730  *    This is necessary to ensure that gc doesn't miss references that
731  *    move around - if references move backwards in the ordering GC
732  *    uses, GC could skip past them
733  */
734 int bch2_gc(struct bch_fs *c, struct journal_keys *journal_keys,
735             bool initial, bool metadata_only)
736 {
737         struct bch_dev *ca;
738         u64 start_time = local_clock();
739         unsigned i, iter = 0;
740         int ret;
741
742         trace_gc_start(c);
743
744         down_write(&c->gc_lock);
745 again:
746         ret = bch2_gc_start(c, metadata_only);
747         if (ret)
748                 goto out;
749
750         bch2_mark_superblocks(c);
751
752         ret = bch2_gc_btrees(c, journal_keys, initial, metadata_only);
753         if (ret)
754                 goto out;
755
756         bch2_mark_pending_btree_node_frees(c);
757         bch2_mark_allocator_buckets(c);
758
759         c->gc_count++;
760 out:
761         if (!ret &&
762             (test_bit(BCH_FS_FIXED_GENS, &c->flags) ||
763              (!iter && test_restart_gc(c)))) {
764                 /*
765                  * XXX: make sure gens we fixed got saved
766                  */
767                 if (iter++ <= 2) {
768                         bch_info(c, "Fixed gens, restarting mark and sweep:");
769                         clear_bit(BCH_FS_FIXED_GENS, &c->flags);
770                         __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
771
772                         percpu_down_write(&c->mark_lock);
773                         bch2_gc_free(c);
774                         percpu_up_write(&c->mark_lock);
775                         /* flush fsck errors, reset counters */
776                         bch2_flush_fsck_errs(c);
777
778                         goto again;
779                 }
780
781                 bch_info(c, "Unable to fix bucket gens, looping");
782                 ret = -EINVAL;
783         }
784
785         if (!ret) {
786                 bch2_journal_block(&c->journal);
787
788                 percpu_down_write(&c->mark_lock);
789                 ret = bch2_gc_done(c, initial, metadata_only);
790
791                 bch2_journal_unblock(&c->journal);
792         } else {
793                 percpu_down_write(&c->mark_lock);
794         }
795
796         /* Indicates that gc is no longer in progress: */
797         __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
798
799         bch2_gc_free(c);
800         percpu_up_write(&c->mark_lock);
801
802         up_write(&c->gc_lock);
803
804         trace_gc_end(c);
805         bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
806
807         /*
808          * Wake up allocator in case it was waiting for buckets
809          * because of not being able to inc gens
810          */
811         for_each_member_device(ca, c, i)
812                 bch2_wake_allocator(ca);
813
814         /*
815          * At startup, allocations can happen directly instead of via the
816          * allocator thread - issue wakeup in case they blocked on gc_lock:
817          */
818         closure_wake_up(&c->freelist_wait);
819         return ret;
820 }
821
822 /* Btree coalescing */
823
824 static void recalc_packed_keys(struct btree *b)
825 {
826         struct bset *i = btree_bset_first(b);
827         struct bkey_packed *k;
828
829         memset(&b->nr, 0, sizeof(b->nr));
830
831         BUG_ON(b->nsets != 1);
832
833         vstruct_for_each(i, k)
834                 btree_keys_account_key_add(&b->nr, 0, k);
835 }
836
837 static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
838                                 struct btree *old_nodes[GC_MERGE_NODES])
839 {
840         struct btree *parent = btree_node_parent(iter, old_nodes[0]);
841         unsigned i, nr_old_nodes, nr_new_nodes, u64s = 0;
842         unsigned blocks = btree_blocks(c) * 2 / 3;
843         struct btree *new_nodes[GC_MERGE_NODES];
844         struct btree_update *as;
845         struct keylist keylist;
846         struct bkey_format_state format_state;
847         struct bkey_format new_format;
848
849         memset(new_nodes, 0, sizeof(new_nodes));
850         bch2_keylist_init(&keylist, NULL);
851
852         /* Count keys that are not deleted */
853         for (i = 0; i < GC_MERGE_NODES && old_nodes[i]; i++)
854                 u64s += old_nodes[i]->nr.live_u64s;
855
856         nr_old_nodes = nr_new_nodes = i;
857
858         /* Check if all keys in @old_nodes could fit in one fewer node */
859         if (nr_old_nodes <= 1 ||
860             __vstruct_blocks(struct btree_node, c->block_bits,
861                              DIV_ROUND_UP(u64s, nr_old_nodes - 1)) > blocks)
862                 return;
863
864         /* Find a format that all keys in @old_nodes can pack into */
865         bch2_bkey_format_init(&format_state);
866
867         for (i = 0; i < nr_old_nodes; i++)
868                 __bch2_btree_calc_format(&format_state, old_nodes[i]);
869
870         new_format = bch2_bkey_format_done(&format_state);
871
872         /* Check if repacking would make any nodes too big to fit */
873         for (i = 0; i < nr_old_nodes; i++)
874                 if (!bch2_btree_node_format_fits(c, old_nodes[i], &new_format)) {
875                         trace_btree_gc_coalesce_fail(c,
876                                         BTREE_GC_COALESCE_FAIL_FORMAT_FITS);
877                         return;
878                 }
879
880         if (bch2_keylist_realloc(&keylist, NULL, 0,
881                         (BKEY_U64s + BKEY_EXTENT_U64s_MAX) * nr_old_nodes)) {
882                 trace_btree_gc_coalesce_fail(c,
883                                 BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC);
884                 return;
885         }
886
887         as = bch2_btree_update_start(c, iter->btree_id,
888                         btree_update_reserve_required(c, parent) + nr_old_nodes,
889                         BTREE_INSERT_NOFAIL|
890                         BTREE_INSERT_USE_RESERVE,
891                         NULL);
892         if (IS_ERR(as)) {
893                 trace_btree_gc_coalesce_fail(c,
894                                 BTREE_GC_COALESCE_FAIL_RESERVE_GET);
895                 bch2_keylist_free(&keylist, NULL);
896                 return;
897         }
898
899         trace_btree_gc_coalesce(c, old_nodes[0]);
900
901         for (i = 0; i < nr_old_nodes; i++)
902                 bch2_btree_interior_update_will_free_node(as, old_nodes[i]);
903
904         /* Repack everything with @new_format and sort down to one bset */
905         for (i = 0; i < nr_old_nodes; i++)
906                 new_nodes[i] =
907                         __bch2_btree_node_alloc_replacement(as, old_nodes[i],
908                                                             new_format);
909
910         /*
911          * Conceptually we concatenate the nodes together and slice them
912          * up at different boundaries.
913          */
914         for (i = nr_new_nodes - 1; i > 0; --i) {
915                 struct btree *n1 = new_nodes[i];
916                 struct btree *n2 = new_nodes[i - 1];
917
918                 struct bset *s1 = btree_bset_first(n1);
919                 struct bset *s2 = btree_bset_first(n2);
920                 struct bkey_packed *k, *last = NULL;
921
922                 /* Calculate how many keys from @n2 we could fit inside @n1 */
923                 u64s = 0;
924
925                 for (k = s2->start;
926                      k < vstruct_last(s2) &&
927                      vstruct_blocks_plus(n1->data, c->block_bits,
928                                          u64s + k->u64s) <= blocks;
929                      k = bkey_next_skip_noops(k, vstruct_last(s2))) {
930                         last = k;
931                         u64s += k->u64s;
932                 }
933
934                 if (u64s == le16_to_cpu(s2->u64s)) {
935                         /* n2 fits entirely in n1 */
936                         n1->key.k.p = n1->data->max_key = n2->data->max_key;
937
938                         memcpy_u64s(vstruct_last(s1),
939                                     s2->start,
940                                     le16_to_cpu(s2->u64s));
941                         le16_add_cpu(&s1->u64s, le16_to_cpu(s2->u64s));
942
943                         set_btree_bset_end(n1, n1->set);
944
945                         six_unlock_write(&n2->lock);
946                         bch2_btree_node_free_never_inserted(c, n2);
947                         six_unlock_intent(&n2->lock);
948
949                         memmove(new_nodes + i - 1,
950                                 new_nodes + i,
951                                 sizeof(new_nodes[0]) * (nr_new_nodes - i));
952                         new_nodes[--nr_new_nodes] = NULL;
953                 } else if (u64s) {
954                         /* move part of n2 into n1 */
955                         n1->key.k.p = n1->data->max_key =
956                                 bkey_unpack_pos(n1, last);
957
958                         n2->data->min_key =
959                                 btree_type_successor(iter->btree_id,
960                                                      n1->data->max_key);
961
962                         memcpy_u64s(vstruct_last(s1),
963                                     s2->start, u64s);
964                         le16_add_cpu(&s1->u64s, u64s);
965
966                         memmove(s2->start,
967                                 vstruct_idx(s2, u64s),
968                                 (le16_to_cpu(s2->u64s) - u64s) * sizeof(u64));
969                         s2->u64s = cpu_to_le16(le16_to_cpu(s2->u64s) - u64s);
970
971                         set_btree_bset_end(n1, n1->set);
972                         set_btree_bset_end(n2, n2->set);
973                 }
974         }
975
976         for (i = 0; i < nr_new_nodes; i++) {
977                 struct btree *n = new_nodes[i];
978
979                 recalc_packed_keys(n);
980                 btree_node_reset_sib_u64s(n);
981
982                 bch2_btree_build_aux_trees(n);
983                 six_unlock_write(&n->lock);
984
985                 bch2_btree_node_write(c, n, SIX_LOCK_intent);
986         }
987
988         /*
989          * The keys for the old nodes get deleted. We don't want to insert keys
990          * that compare equal to the keys for the new nodes we'll also be
991          * inserting - we can't because keys on a keylist must be strictly
992          * greater than the previous keys, and we also don't need to since the
993          * key for the new node will serve the same purpose (overwriting the key
994          * for the old node).
995          */
996         for (i = 0; i < nr_old_nodes; i++) {
997                 struct bkey_i delete;
998                 unsigned j;
999
1000                 for (j = 0; j < nr_new_nodes; j++)
1001                         if (!bkey_cmp(old_nodes[i]->key.k.p,
1002                                       new_nodes[j]->key.k.p))
1003                                 goto next;
1004
1005                 bkey_init(&delete.k);
1006                 delete.k.p = old_nodes[i]->key.k.p;
1007                 bch2_keylist_add_in_order(&keylist, &delete);
1008 next:
1009                 i = i;
1010         }
1011
1012         /*
1013          * Keys for the new nodes get inserted: bch2_btree_insert_keys() only
1014          * does the lookup once and thus expects the keys to be in sorted order
1015          * so we have to make sure the new keys are correctly ordered with
1016          * respect to the deleted keys added in the previous loop
1017          */
1018         for (i = 0; i < nr_new_nodes; i++)
1019                 bch2_keylist_add_in_order(&keylist, &new_nodes[i]->key);
1020
1021         /* Insert the newly coalesced nodes */
1022         bch2_btree_insert_node(as, parent, iter, &keylist, 0);
1023
1024         BUG_ON(!bch2_keylist_empty(&keylist));
1025
1026         BUG_ON(iter->l[old_nodes[0]->level].b != old_nodes[0]);
1027
1028         bch2_btree_iter_node_replace(iter, new_nodes[0]);
1029
1030         for (i = 0; i < nr_new_nodes; i++)
1031                 bch2_open_buckets_put(c, &new_nodes[i]->ob);
1032
1033         /* Free the old nodes and update our sliding window */
1034         for (i = 0; i < nr_old_nodes; i++) {
1035                 bch2_btree_node_free_inmem(c, old_nodes[i], iter);
1036
1037                 /*
1038                  * the index update might have triggered a split, in which case
1039                  * the nodes we coalesced - the new nodes we just created -
1040                  * might not be sibling nodes anymore - don't add them to the
1041                  * sliding window (except the first):
1042                  */
1043                 if (!i) {
1044                         old_nodes[i] = new_nodes[i];
1045                 } else {
1046                         old_nodes[i] = NULL;
1047                 }
1048         }
1049
1050         for (i = 0; i < nr_new_nodes; i++)
1051                 six_unlock_intent(&new_nodes[i]->lock);
1052
1053         bch2_btree_update_done(as);
1054         bch2_keylist_free(&keylist, NULL);
1055 }
1056
1057 static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
1058 {
1059         struct btree_trans trans;
1060         struct btree_iter *iter;
1061         struct btree *b;
1062         bool kthread = (current->flags & PF_KTHREAD) != 0;
1063         unsigned i;
1064
1065         /* Sliding window of adjacent btree nodes */
1066         struct btree *merge[GC_MERGE_NODES];
1067         u32 lock_seq[GC_MERGE_NODES];
1068
1069         bch2_trans_init(&trans, c, 0, 0);
1070
1071         /*
1072          * XXX: We don't have a good way of positively matching on sibling nodes
1073          * that have the same parent - this code works by handling the cases
1074          * where they might not have the same parent, and is thus fragile. Ugh.
1075          *
1076          * Perhaps redo this to use multiple linked iterators?
1077          */
1078         memset(merge, 0, sizeof(merge));
1079
1080         __for_each_btree_node(&trans, iter, btree_id, POS_MIN,
1081                               BTREE_MAX_DEPTH, 0,
1082                               BTREE_ITER_PREFETCH, b) {
1083                 memmove(merge + 1, merge,
1084                         sizeof(merge) - sizeof(merge[0]));
1085                 memmove(lock_seq + 1, lock_seq,
1086                         sizeof(lock_seq) - sizeof(lock_seq[0]));
1087
1088                 merge[0] = b;
1089
1090                 for (i = 1; i < GC_MERGE_NODES; i++) {
1091                         if (!merge[i] ||
1092                             !six_relock_intent(&merge[i]->lock, lock_seq[i]))
1093                                 break;
1094
1095                         if (merge[i]->level != merge[0]->level) {
1096                                 six_unlock_intent(&merge[i]->lock);
1097                                 break;
1098                         }
1099                 }
1100                 memset(merge + i, 0, (GC_MERGE_NODES - i) * sizeof(merge[0]));
1101
1102                 bch2_coalesce_nodes(c, iter, merge);
1103
1104                 for (i = 1; i < GC_MERGE_NODES && merge[i]; i++) {
1105                         lock_seq[i] = merge[i]->lock.state.seq;
1106                         six_unlock_intent(&merge[i]->lock);
1107                 }
1108
1109                 lock_seq[0] = merge[0]->lock.state.seq;
1110
1111                 if (kthread && kthread_should_stop()) {
1112                         bch2_trans_exit(&trans);
1113                         return -ESHUTDOWN;
1114                 }
1115
1116                 bch2_trans_cond_resched(&trans);
1117
1118                 /*
1119                  * If the parent node wasn't relocked, it might have been split
1120                  * and the nodes in our sliding window might not have the same
1121                  * parent anymore - blow away the sliding window:
1122                  */
1123                 if (btree_iter_node(iter, iter->level + 1) &&
1124                     !btree_node_intent_locked(iter, iter->level + 1))
1125                         memset(merge + 1, 0,
1126                                (GC_MERGE_NODES - 1) * sizeof(merge[0]));
1127         }
1128         return bch2_trans_exit(&trans);
1129 }
1130
1131 /**
1132  * bch_coalesce - coalesce adjacent nodes with low occupancy
1133  */
1134 void bch2_coalesce(struct bch_fs *c)
1135 {
1136         enum btree_id id;
1137
1138         down_read(&c->gc_lock);
1139         trace_gc_coalesce_start(c);
1140
1141         for (id = 0; id < BTREE_ID_NR; id++) {
1142                 int ret = c->btree_roots[id].b
1143                         ? bch2_coalesce_btree(c, id)
1144                         : 0;
1145
1146                 if (ret) {
1147                         if (ret != -ESHUTDOWN)
1148                                 bch_err(c, "btree coalescing failed: %d", ret);
1149                         return;
1150                 }
1151         }
1152
1153         trace_gc_coalesce_end(c);
1154         up_read(&c->gc_lock);
1155 }
1156
1157 static int bch2_gc_thread(void *arg)
1158 {
1159         struct bch_fs *c = arg;
1160         struct io_clock *clock = &c->io_clock[WRITE];
1161         unsigned long last = atomic_long_read(&clock->now);
1162         unsigned last_kick = atomic_read(&c->kick_gc);
1163         int ret;
1164
1165         set_freezable();
1166
1167         while (1) {
1168                 while (1) {
1169                         set_current_state(TASK_INTERRUPTIBLE);
1170
1171                         if (kthread_should_stop()) {
1172                                 __set_current_state(TASK_RUNNING);
1173                                 return 0;
1174                         }
1175
1176                         if (atomic_read(&c->kick_gc) != last_kick)
1177                                 break;
1178
1179                         if (c->btree_gc_periodic) {
1180                                 unsigned long next = last + c->capacity / 16;
1181
1182                                 if (atomic_long_read(&clock->now) >= next)
1183                                         break;
1184
1185                                 bch2_io_clock_schedule_timeout(clock, next);
1186                         } else {
1187                                 schedule();
1188                         }
1189
1190                         try_to_freeze();
1191                 }
1192                 __set_current_state(TASK_RUNNING);
1193
1194                 last = atomic_long_read(&clock->now);
1195                 last_kick = atomic_read(&c->kick_gc);
1196
1197                 ret = bch2_gc(c, NULL, false, false);
1198                 if (ret)
1199                         bch_err(c, "btree gc failed: %i", ret);
1200
1201                 debug_check_no_locks_held();
1202         }
1203
1204         return 0;
1205 }
1206
1207 void bch2_gc_thread_stop(struct bch_fs *c)
1208 {
1209         struct task_struct *p;
1210
1211         p = c->gc_thread;
1212         c->gc_thread = NULL;
1213
1214         if (p) {
1215                 kthread_stop(p);
1216                 put_task_struct(p);
1217         }
1218 }
1219
1220 int bch2_gc_thread_start(struct bch_fs *c)
1221 {
1222         struct task_struct *p;
1223
1224         BUG_ON(c->gc_thread);
1225
1226         p = kthread_create(bch2_gc_thread, c, "bch_gc");
1227         if (IS_ERR(p))
1228                 return PTR_ERR(p);
1229
1230         get_task_struct(p);
1231         c->gc_thread = p;
1232         wake_up_process(p);
1233         return 0;
1234 }