]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_gc.c
Update bcachefs sources to 8d3093bd9b bcachefs: Evict btree nodes we're deleting
[bcachefs-tools-debian] / libbcachefs / btree_gc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  * Copyright (C) 2014 Datera Inc.
5  */
6
7 #include "bcachefs.h"
8 #include "alloc_background.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "bkey_buf.h"
12 #include "btree_locking.h"
13 #include "btree_update_interior.h"
14 #include "btree_io.h"
15 #include "btree_gc.h"
16 #include "buckets.h"
17 #include "clock.h"
18 #include "debug.h"
19 #include "ec.h"
20 #include "error.h"
21 #include "extents.h"
22 #include "journal.h"
23 #include "keylist.h"
24 #include "move.h"
25 #include "recovery.h"
26 #include "replicas.h"
27 #include "super-io.h"
28
29 #include <linux/slab.h>
30 #include <linux/bitops.h>
31 #include <linux/freezer.h>
32 #include <linux/kthread.h>
33 #include <linux/preempt.h>
34 #include <linux/rcupdate.h>
35 #include <linux/sched/task.h>
36 #include <trace/events/bcachefs.h>
37
38 static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
39 {
40         preempt_disable();
41         write_seqcount_begin(&c->gc_pos_lock);
42         c->gc_pos = new_pos;
43         write_seqcount_end(&c->gc_pos_lock);
44         preempt_enable();
45 }
46
47 static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
48 {
49         BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) <= 0);
50         __gc_pos_set(c, new_pos);
51 }
52
53 /*
54  * Missing: if an interior btree node is empty, we need to do something -
55  * perhaps just kill it
56  */
57 static int bch2_gc_check_topology(struct bch_fs *c,
58                                   struct btree *b,
59                                   struct bkey_buf *prev,
60                                   struct bkey_buf cur,
61                                   bool is_last)
62 {
63         struct bpos node_start  = b->data->min_key;
64         struct bpos node_end    = b->data->max_key;
65         struct bpos expected_start = bkey_deleted(&prev->k->k)
66                 ? node_start
67                 : bpos_successor(prev->k->k.p);
68         char buf1[200], buf2[200];
69         int ret = 0;
70
71         if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) {
72                 struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k);
73
74                 if (bkey_deleted(&prev->k->k)) {
75                         struct printbuf out = PBUF(buf1);
76                         pr_buf(&out, "start of node: ");
77                         bch2_bpos_to_text(&out, node_start);
78                 } else {
79                         bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(prev->k));
80                 }
81
82                 if (bpos_cmp(expected_start, bp->v.min_key)) {
83                         bch2_topology_error(c);
84
85                         if (fsck_err(c, "btree node with incorrect min_key at btree %s level %u:\n"
86                                      "  prev %s\n"
87                                      "  cur %s",
88                                      bch2_btree_ids[b->c.btree_id], b->c.level,
89                                      buf1,
90                                      (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(cur.k)), buf2))) {
91                                 bch_info(c, "Halting mark and sweep to start topology repair pass");
92                                 return FSCK_ERR_START_TOPOLOGY_REPAIR;
93                         } else {
94                                 set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
95                         }
96                 }
97         }
98
99         if (is_last && bpos_cmp(cur.k->k.p, node_end)) {
100                 bch2_topology_error(c);
101
102                 if (fsck_err(c, "btree node with incorrect max_key at btree %s level %u:\n"
103                              "  %s\n"
104                              "  expected %s",
105                              bch2_btree_ids[b->c.btree_id], b->c.level,
106                              (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(cur.k)), buf1),
107                              (bch2_bpos_to_text(&PBUF(buf2), node_end), buf2))) {
108                         bch_info(c, "Halting mark and sweep to start topology repair pass");
109                         return FSCK_ERR_START_TOPOLOGY_REPAIR;
110                 } else {
111                         set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
112                 }
113         }
114
115         bch2_bkey_buf_copy(prev, c, cur.k);
116 fsck_err:
117         return ret;
118 }
119
120 static void btree_ptr_to_v2(struct btree *b, struct bkey_i_btree_ptr_v2 *dst)
121 {
122         switch (b->key.k.type) {
123         case KEY_TYPE_btree_ptr: {
124                 struct bkey_i_btree_ptr *src = bkey_i_to_btree_ptr(&b->key);
125
126                 dst->k.p                = src->k.p;
127                 dst->v.mem_ptr          = 0;
128                 dst->v.seq              = b->data->keys.seq;
129                 dst->v.sectors_written  = 0;
130                 dst->v.flags            = 0;
131                 dst->v.min_key          = b->data->min_key;
132                 set_bkey_val_bytes(&dst->k, sizeof(dst->v) + bkey_val_bytes(&src->k));
133                 memcpy(dst->v.start, src->v.start, bkey_val_bytes(&src->k));
134                 break;
135         }
136         case KEY_TYPE_btree_ptr_v2:
137                 bkey_copy(&dst->k_i, &b->key);
138                 break;
139         default:
140                 BUG();
141         }
142 }
143
144 static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
145 {
146         struct bkey_i_btree_ptr_v2 *new;
147         int ret;
148
149         new = kmalloc(BKEY_BTREE_PTR_U64s_MAX * sizeof(u64), GFP_KERNEL);
150         if (!new)
151                 return -ENOMEM;
152
153         btree_ptr_to_v2(b, new);
154         b->data->min_key        = new_min;
155         new->v.min_key          = new_min;
156         SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
157
158         ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level + 1, &new->k_i);
159         if (ret) {
160                 kfree(new);
161                 return ret;
162         }
163
164         bch2_btree_node_drop_keys_outside_node(b);
165
166         return 0;
167 }
168
169 static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
170 {
171         struct bkey_i_btree_ptr_v2 *new;
172         int ret;
173
174         ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level + 1, b->key.k.p);
175         if (ret)
176                 return ret;
177
178         new = kmalloc(BKEY_BTREE_PTR_U64s_MAX * sizeof(u64), GFP_KERNEL);
179         if (!new)
180                 return -ENOMEM;
181
182         btree_ptr_to_v2(b, new);
183         b->data->max_key        = new_max;
184         new->k.p                = new_max;
185         SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
186
187         ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level + 1, &new->k_i);
188         if (ret) {
189                 kfree(new);
190                 return ret;
191         }
192
193         bch2_btree_node_drop_keys_outside_node(b);
194
195         mutex_lock(&c->btree_cache.lock);
196         bch2_btree_node_hash_remove(&c->btree_cache, b);
197
198         bkey_copy(&b->key, &new->k_i);
199         ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
200         BUG_ON(ret);
201         mutex_unlock(&c->btree_cache.lock);
202         return 0;
203 }
204
205 static int btree_repair_node_start(struct bch_fs *c, struct btree *b,
206                                    struct btree *prev, struct btree *cur)
207 {
208         struct bpos expected_start = !prev
209                 ? b->data->min_key
210                 : bpos_successor(prev->key.k.p);
211         char buf1[200], buf2[200];
212         int ret = 0;
213
214         if (!prev) {
215                 struct printbuf out = PBUF(buf1);
216                 pr_buf(&out, "start of node: ");
217                 bch2_bpos_to_text(&out, b->data->min_key);
218         } else {
219                 bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&prev->key));
220         }
221
222         if (mustfix_fsck_err_on(bpos_cmp(expected_start, cur->data->min_key), c,
223                         "btree node with incorrect min_key at btree %s level %u:\n"
224                         "  prev %s\n"
225                         "  cur %s",
226                         bch2_btree_ids[b->c.btree_id], b->c.level,
227                         buf1,
228                         (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(&cur->key)), buf2))) {
229                 if (prev &&
230                     bpos_cmp(expected_start, cur->data->min_key) > 0 &&
231                     BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data))
232                         ret = set_node_max(c, prev,
233                                 bpos_predecessor(cur->data->min_key));
234                 else
235                         ret = set_node_min(c, cur, expected_start);
236                 if (ret)
237                         return ret;
238         }
239 fsck_err:
240         return ret;
241 }
242
243 static int btree_repair_node_end(struct bch_fs *c, struct btree *b,
244                                  struct btree *child)
245 {
246         char buf1[200], buf2[200];
247         int ret = 0;
248
249         if (mustfix_fsck_err_on(bpos_cmp(child->key.k.p, b->key.k.p), c,
250                         "btree node with incorrect max_key at btree %s level %u:\n"
251                         "  %s\n"
252                         "  expected %s",
253                         bch2_btree_ids[b->c.btree_id], b->c.level,
254                         (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&child->key)), buf1),
255                         (bch2_bpos_to_text(&PBUF(buf2), b->key.k.p), buf2))) {
256                 ret = set_node_max(c, child, b->key.k.p);
257                 if (ret)
258                         return ret;
259         }
260 fsck_err:
261         return ret;
262 }
263
264 #define DROP_THIS_NODE          10
265
266 static int bch2_btree_repair_topology_recurse(struct bch_fs *c, struct btree *b)
267 {
268         struct btree_and_journal_iter iter;
269         struct bkey_s_c k;
270         struct bkey_buf tmp;
271         struct btree *prev = NULL, *cur = NULL;
272         bool have_child, dropped_children = false;
273         char buf[200];
274         int ret = 0;
275
276         if (!b->c.level)
277                 return 0;
278 again:
279         have_child = dropped_children = false;
280         bch2_bkey_buf_init(&tmp);
281         bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
282
283         while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
284                 bch2_btree_and_journal_iter_advance(&iter);
285                 bch2_bkey_buf_reassemble(&tmp, c, k);
286
287                 cur = bch2_btree_node_get_noiter(c, tmp.k,
288                                         b->c.btree_id, b->c.level - 1,
289                                         false);
290                 ret = PTR_ERR_OR_ZERO(cur);
291
292                 if (mustfix_fsck_err_on(ret == -EIO, c,
293                                 "Unreadable btree node at btree %s level %u:\n"
294                                 "  %s",
295                                 bch2_btree_ids[b->c.btree_id],
296                                 b->c.level - 1,
297                                 (bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(tmp.k)), buf))) {
298                         bch2_btree_node_evict(c, tmp.k);
299                         ret = bch2_journal_key_delete(c, b->c.btree_id,
300                                                       b->c.level, tmp.k->k.p);
301                         if (ret)
302                                 goto err;
303                         continue;
304                 }
305
306                 if (ret) {
307                         bch_err(c, "%s: error %i getting btree node",
308                                 __func__, ret);
309                         break;
310                 }
311
312                 ret = btree_repair_node_start(c, b, prev, cur);
313                 if (prev)
314                         six_unlock_read(&prev->c.lock);
315                 prev = cur;
316                 cur = NULL;
317
318                 if (ret)
319                         break;
320         }
321
322         if (!ret && !IS_ERR_OR_NULL(prev)) {
323                 BUG_ON(cur);
324                 ret = btree_repair_node_end(c, b, prev);
325         }
326
327         if (!IS_ERR_OR_NULL(prev))
328                 six_unlock_read(&prev->c.lock);
329         prev = NULL;
330         if (!IS_ERR_OR_NULL(cur))
331                 six_unlock_read(&cur->c.lock);
332         cur = NULL;
333
334         if (ret)
335                 goto err;
336
337         bch2_btree_and_journal_iter_exit(&iter);
338         bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
339
340         while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
341                 bch2_bkey_buf_reassemble(&tmp, c, k);
342                 bch2_btree_and_journal_iter_advance(&iter);
343
344                 cur = bch2_btree_node_get_noiter(c, tmp.k,
345                                         b->c.btree_id, b->c.level - 1,
346                                         false);
347                 ret = PTR_ERR_OR_ZERO(cur);
348
349                 if (ret) {
350                         bch_err(c, "%s: error %i getting btree node",
351                                 __func__, ret);
352                         goto err;
353                 }
354
355                 ret = bch2_btree_repair_topology_recurse(c, cur);
356                 six_unlock_read(&cur->c.lock);
357                 cur = NULL;
358
359                 if (ret == DROP_THIS_NODE) {
360                         bch2_btree_node_evict(c, tmp.k);
361                         ret = bch2_journal_key_delete(c, b->c.btree_id,
362                                                       b->c.level, tmp.k->k.p);
363                         dropped_children = true;
364                 }
365
366                 if (ret)
367                         goto err;
368
369                 have_child = true;
370         }
371
372         if (mustfix_fsck_err_on(!have_child, c,
373                         "empty interior btree node at btree %s level %u\n"
374                         "  %s",
375                         bch2_btree_ids[b->c.btree_id],
376                         b->c.level,
377                         (bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(&b->key)), buf)))
378                 ret = DROP_THIS_NODE;
379 err:
380 fsck_err:
381         if (!IS_ERR_OR_NULL(prev))
382                 six_unlock_read(&prev->c.lock);
383         if (!IS_ERR_OR_NULL(cur))
384                 six_unlock_read(&cur->c.lock);
385
386         bch2_btree_and_journal_iter_exit(&iter);
387         bch2_bkey_buf_exit(&tmp, c);
388
389         if (!ret && dropped_children)
390                 goto again;
391
392         return ret;
393 }
394
395 static int bch2_repair_topology(struct bch_fs *c)
396 {
397         struct btree *b;
398         unsigned i;
399         int ret = 0;
400
401         for (i = 0; i < BTREE_ID_NR && !ret; i++) {
402                 b = c->btree_roots[i].b;
403                 if (btree_node_fake(b))
404                         continue;
405
406                 six_lock_read(&b->c.lock, NULL, NULL);
407                 ret = bch2_btree_repair_topology_recurse(c, b);
408                 six_unlock_read(&b->c.lock);
409
410                 if (ret == DROP_THIS_NODE) {
411                         bch_err(c, "empty btree root - repair unimplemented");
412                         ret = FSCK_ERR_EXIT;
413                 }
414         }
415
416         return ret;
417 }
418
419 static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
420                                unsigned level, bool is_root,
421                                struct bkey_s_c *k)
422 {
423         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(*k);
424         const union bch_extent_entry *entry;
425         struct extent_ptr_decoded p = { 0 };
426         bool do_update = false;
427         int ret = 0;
428
429         bkey_for_each_ptr_decode(k->k, ptrs, p, entry) {
430                 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
431                 struct bucket *g = PTR_BUCKET(ca, &p.ptr, true);
432                 struct bucket *g2 = PTR_BUCKET(ca, &p.ptr, false);
433
434                 if (fsck_err_on(!g->gen_valid, c,
435                                 "bucket %u:%zu data type %s ptr gen %u missing in alloc btree",
436                                 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
437                                 bch2_data_types[ptr_data_type(k->k, &p.ptr)],
438                                 p.ptr.gen)) {
439                         if (p.ptr.cached) {
440                                 g2->_mark.gen   = g->_mark.gen          = p.ptr.gen;
441                                 g2->gen_valid   = g->gen_valid          = true;
442                                 set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags);
443                         } else {
444                                 do_update = true;
445                         }
446                 }
447
448                 if (fsck_err_on(gen_cmp(p.ptr.gen, g->mark.gen) > 0, c,
449                                 "bucket %u:%zu data type %s ptr gen in the future: %u > %u",
450                                 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
451                                 bch2_data_types[ptr_data_type(k->k, &p.ptr)],
452                                 p.ptr.gen, g->mark.gen)) {
453                         if (p.ptr.cached) {
454                                 g2->_mark.gen   = g->_mark.gen  = p.ptr.gen;
455                                 g2->gen_valid   = g->gen_valid  = true;
456                                 g2->_mark.data_type             = 0;
457                                 g2->_mark.dirty_sectors         = 0;
458                                 g2->_mark.cached_sectors        = 0;
459                                 set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
460                                 set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags);
461                         } else {
462                                 do_update = true;
463                         }
464                 }
465
466                 if (fsck_err_on(!p.ptr.cached &&
467                                 gen_cmp(p.ptr.gen, g->mark.gen) < 0, c,
468                                 "bucket %u:%zu data type %s stale dirty ptr: %u < %u",
469                                 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
470                                 bch2_data_types[ptr_data_type(k->k, &p.ptr)],
471                                 p.ptr.gen, g->mark.gen))
472                         do_update = true;
473
474                 if (p.has_ec) {
475                         struct stripe *m = genradix_ptr(&c->stripes[true], p.ec.idx);
476
477                         if (fsck_err_on(!m || !m->alive, c,
478                                         "pointer to nonexistent stripe %llu",
479                                         (u64) p.ec.idx))
480                                 do_update = true;
481
482                         if (fsck_err_on(!bch2_ptr_matches_stripe_m(m, p), c,
483                                         "pointer does not match stripe %llu",
484                                         (u64) p.ec.idx))
485                                 do_update = true;
486                 }
487         }
488
489         if (do_update) {
490                 struct bkey_ptrs ptrs;
491                 union bch_extent_entry *entry;
492                 struct bch_extent_ptr *ptr;
493                 struct bkey_i *new;
494
495                 if (is_root) {
496                         bch_err(c, "cannot update btree roots yet");
497                         return -EINVAL;
498                 }
499
500                 new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
501                 if (!new) {
502                         bch_err(c, "%s: error allocating new key", __func__);
503                         return -ENOMEM;
504                 }
505
506                 bkey_reassemble(new, *k);
507
508                 if (level) {
509                         /*
510                          * We don't want to drop btree node pointers - if the
511                          * btree node isn't there anymore, the read path will
512                          * sort it out:
513                          */
514                         ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
515                         bkey_for_each_ptr(ptrs, ptr) {
516                                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
517                                 struct bucket *g = PTR_BUCKET(ca, ptr, true);
518
519                                 ptr->gen = g->mark.gen;
520                         }
521                 } else {
522                         bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, ({
523                                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
524                                 struct bucket *g = PTR_BUCKET(ca, ptr, true);
525
526                                 (ptr->cached &&
527                                  (!g->gen_valid || gen_cmp(ptr->gen, g->mark.gen) > 0)) ||
528                                 (!ptr->cached &&
529                                  gen_cmp(ptr->gen, g->mark.gen) < 0);
530                         }));
531 again:
532                         ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
533                         bkey_extent_entry_for_each(ptrs, entry) {
534                                 if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) {
535                                         struct stripe *m = genradix_ptr(&c->stripes[true],
536                                                                         entry->stripe_ptr.idx);
537                                         union bch_extent_entry *next_ptr;
538
539                                         bkey_extent_entry_for_each_from(ptrs, next_ptr, entry)
540                                                 if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr)
541                                                         goto found;
542                                         next_ptr = NULL;
543 found:
544                                         if (!next_ptr) {
545                                                 bch_err(c, "aieee, found stripe ptr with no data ptr");
546                                                 continue;
547                                         }
548
549                                         if (!m || !m->alive ||
550                                             !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block],
551                                                                        &next_ptr->ptr,
552                                                                        m->sectors)) {
553                                                 bch2_bkey_extent_entry_drop(new, entry);
554                                                 goto again;
555                                         }
556                                 }
557                         }
558                 }
559
560                 ret = bch2_journal_key_insert(c, btree_id, level, new);
561                 if (ret)
562                         kfree(new);
563                 else
564                         *k = bkey_i_to_s_c(new);
565         }
566 fsck_err:
567         return ret;
568 }
569
570 /* marking of btree keys/nodes: */
571
572 static int bch2_gc_mark_key(struct bch_fs *c, enum btree_id btree_id,
573                             unsigned level, bool is_root,
574                             struct bkey_s_c *k,
575                             u8 *max_stale, bool initial)
576 {
577         struct bkey_ptrs_c ptrs;
578         const struct bch_extent_ptr *ptr;
579         unsigned flags =
580                 BTREE_TRIGGER_GC|
581                 (initial ? BTREE_TRIGGER_NOATOMIC : 0);
582         int ret = 0;
583
584         if (initial) {
585                 BUG_ON(bch2_journal_seq_verify &&
586                        k->k->version.lo > journal_cur_seq(&c->journal));
587
588                 ret = bch2_check_fix_ptrs(c, btree_id, level, is_root, k);
589                 if (ret)
590                         goto err;
591
592                 if (fsck_err_on(k->k->version.lo > atomic64_read(&c->key_version), c,
593                                 "key version number higher than recorded: %llu > %llu",
594                                 k->k->version.lo,
595                                 atomic64_read(&c->key_version)))
596                         atomic64_set(&c->key_version, k->k->version.lo);
597
598                 if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
599                     fsck_err_on(!bch2_bkey_replicas_marked(c, *k), c,
600                                 "superblock not marked as containing replicas (type %u)",
601                                 k->k->type)) {
602                         ret = bch2_mark_bkey_replicas(c, *k);
603                         if (ret) {
604                                 bch_err(c, "error marking bkey replicas: %i", ret);
605                                 goto err;
606                         }
607                 }
608         }
609
610         ptrs = bch2_bkey_ptrs_c(*k);
611         bkey_for_each_ptr(ptrs, ptr) {
612                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
613                 struct bucket *g = PTR_BUCKET(ca, ptr, true);
614
615                 if (gen_after(g->oldest_gen, ptr->gen))
616                         g->oldest_gen = ptr->gen;
617
618                 *max_stale = max(*max_stale, ptr_stale(ca, ptr));
619         }
620
621         bch2_mark_key(c, *k, 0, k->k->size, NULL, 0, flags);
622 fsck_err:
623 err:
624         if (ret)
625                 bch_err(c, "%s: ret %i", __func__, ret);
626         return ret;
627 }
628
629 static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale,
630                               bool initial)
631 {
632         struct btree_node_iter iter;
633         struct bkey unpacked;
634         struct bkey_s_c k;
635         struct bkey_buf prev, cur;
636         int ret = 0;
637
638         *max_stale = 0;
639
640         if (!btree_node_type_needs_gc(btree_node_type(b)))
641                 return 0;
642
643         bch2_btree_node_iter_init_from_start(&iter, b);
644         bch2_bkey_buf_init(&prev);
645         bch2_bkey_buf_init(&cur);
646         bkey_init(&prev.k->k);
647
648         while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) {
649                 ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, false,
650                                        &k, max_stale, initial);
651                 if (ret)
652                         break;
653
654                 bch2_btree_node_iter_advance(&iter, b);
655
656                 if (b->c.level) {
657                         bch2_bkey_buf_reassemble(&cur, c, k);
658
659                         ret = bch2_gc_check_topology(c, b, &prev, cur,
660                                         bch2_btree_node_iter_end(&iter));
661                         if (ret)
662                                 break;
663                 }
664         }
665
666         bch2_bkey_buf_exit(&cur, c);
667         bch2_bkey_buf_exit(&prev, c);
668         return ret;
669 }
670
671 static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
672                          bool initial, bool metadata_only)
673 {
674         struct btree_trans trans;
675         struct btree_iter *iter;
676         struct btree *b;
677         unsigned depth = metadata_only                  ? 1
678                 : bch2_expensive_debug_checks           ? 0
679                 : !btree_node_type_needs_gc(btree_id)   ? 1
680                 : 0;
681         u8 max_stale = 0;
682         int ret = 0;
683
684         bch2_trans_init(&trans, c, 0, 0);
685
686         gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0));
687
688         __for_each_btree_node(&trans, iter, btree_id, POS_MIN,
689                               0, depth, BTREE_ITER_PREFETCH, b) {
690                 bch2_verify_btree_nr_keys(b);
691
692                 gc_pos_set(c, gc_pos_btree_node(b));
693
694                 ret = btree_gc_mark_node(c, b, &max_stale, initial);
695                 if (ret)
696                         break;
697
698                 if (!initial) {
699                         if (max_stale > 64)
700                                 bch2_btree_node_rewrite(c, iter,
701                                                 b->data->keys.seq,
702                                                 BTREE_INSERT_NOWAIT|
703                                                 BTREE_INSERT_GC_LOCK_HELD);
704                         else if (!bch2_btree_gc_rewrite_disabled &&
705                                  (bch2_btree_gc_always_rewrite || max_stale > 16))
706                                 bch2_btree_node_rewrite(c, iter,
707                                                 b->data->keys.seq,
708                                                 BTREE_INSERT_NOWAIT|
709                                                 BTREE_INSERT_GC_LOCK_HELD);
710                 }
711
712                 bch2_trans_cond_resched(&trans);
713         }
714         bch2_trans_iter_put(&trans, iter);
715
716         ret = bch2_trans_exit(&trans) ?: ret;
717         if (ret)
718                 return ret;
719
720         mutex_lock(&c->btree_root_lock);
721         b = c->btree_roots[btree_id].b;
722         if (!btree_node_fake(b)) {
723                 struct bkey_s_c k = bkey_i_to_s_c(&b->key);
724
725                 ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, true,
726                                        &k, &max_stale, initial);
727         }
728         gc_pos_set(c, gc_pos_btree_root(b->c.btree_id));
729         mutex_unlock(&c->btree_root_lock);
730
731         return ret;
732 }
733
734 static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b,
735                                       unsigned target_depth)
736 {
737         struct btree_and_journal_iter iter;
738         struct bkey_s_c k;
739         struct bkey_buf cur, prev;
740         u8 max_stale = 0;
741         char buf[200];
742         int ret = 0;
743
744         bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
745         bch2_bkey_buf_init(&prev);
746         bch2_bkey_buf_init(&cur);
747         bkey_init(&prev.k->k);
748
749         while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
750                 BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
751                 BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
752
753                 ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, false,
754                                        &k, &max_stale, true);
755                 if (ret) {
756                         bch_err(c, "%s: error %i from bch2_gc_mark_key", __func__, ret);
757                         goto fsck_err;
758                 }
759
760                 if (b->c.level) {
761                         bch2_bkey_buf_reassemble(&cur, c, k);
762                         k = bkey_i_to_s_c(cur.k);
763
764                         bch2_btree_and_journal_iter_advance(&iter);
765
766                         ret = bch2_gc_check_topology(c, b,
767                                         &prev, cur,
768                                         !bch2_btree_and_journal_iter_peek(&iter).k);
769                         if (ret)
770                                 goto fsck_err;
771                 } else {
772                         bch2_btree_and_journal_iter_advance(&iter);
773                 }
774         }
775
776         if (b->c.level > target_depth) {
777                 bch2_btree_and_journal_iter_exit(&iter);
778                 bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
779
780                 while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
781                         struct btree *child;
782
783                         bch2_bkey_buf_reassemble(&cur, c, k);
784                         bch2_btree_and_journal_iter_advance(&iter);
785
786                         child = bch2_btree_node_get_noiter(c, cur.k,
787                                                 b->c.btree_id, b->c.level - 1,
788                                                 false);
789                         ret = PTR_ERR_OR_ZERO(child);
790
791                         if (ret == -EIO) {
792                                 bch2_topology_error(c);
793
794                                 if (fsck_err(c, "Unreadable btree node at btree %s level %u:\n"
795                                         "  %s",
796                                         bch2_btree_ids[b->c.btree_id],
797                                         b->c.level - 1,
798                                         (bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(cur.k)), buf))) {
799                                         ret = FSCK_ERR_START_TOPOLOGY_REPAIR;
800                                         bch_info(c, "Halting mark and sweep to start topology repair pass");
801                                         goto fsck_err;
802                                 } else {
803                                         /* Continue marking when opted to not
804                                          * fix the error: */
805                                         ret = 0;
806                                         set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
807                                         continue;
808                                 }
809                         } else if (ret) {
810                                 bch_err(c, "%s: error %i getting btree node",
811                                         __func__, ret);
812                                 break;
813                         }
814
815                         ret = bch2_gc_btree_init_recurse(c, child,
816                                                          target_depth);
817                         six_unlock_read(&child->c.lock);
818
819                         if (ret)
820                                 break;
821                 }
822         }
823 fsck_err:
824         bch2_bkey_buf_exit(&cur, c);
825         bch2_bkey_buf_exit(&prev, c);
826         bch2_btree_and_journal_iter_exit(&iter);
827         return ret;
828 }
829
830 static int bch2_gc_btree_init(struct bch_fs *c,
831                               enum btree_id btree_id,
832                               bool metadata_only)
833 {
834         struct btree *b;
835         unsigned target_depth = metadata_only           ? 1
836                 : bch2_expensive_debug_checks           ? 0
837                 : !btree_node_type_needs_gc(btree_id)   ? 1
838                 : 0;
839         u8 max_stale = 0;
840         char buf[100];
841         int ret = 0;
842
843         b = c->btree_roots[btree_id].b;
844
845         if (btree_node_fake(b))
846                 return 0;
847
848         six_lock_read(&b->c.lock, NULL, NULL);
849         if (mustfix_fsck_err_on(bpos_cmp(b->data->min_key, POS_MIN), c,
850                         "btree root with incorrect min_key: %s",
851                         (bch2_bpos_to_text(&PBUF(buf), b->data->min_key), buf))) {
852                 bch_err(c, "repair unimplemented");
853                 ret = FSCK_ERR_EXIT;
854                 goto fsck_err;
855         }
856
857         if (mustfix_fsck_err_on(bpos_cmp(b->data->max_key, POS_MAX), c,
858                         "btree root with incorrect max_key: %s",
859                         (bch2_bpos_to_text(&PBUF(buf), b->data->max_key), buf))) {
860                 bch_err(c, "repair unimplemented");
861                 ret = FSCK_ERR_EXIT;
862                 goto fsck_err;
863         }
864
865         if (b->c.level >= target_depth)
866                 ret = bch2_gc_btree_init_recurse(c, b, target_depth);
867
868         if (!ret) {
869                 struct bkey_s_c k = bkey_i_to_s_c(&b->key);
870
871                 ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, true,
872                                        &k, &max_stale, true);
873         }
874 fsck_err:
875         six_unlock_read(&b->c.lock);
876
877         if (ret < 0)
878                 bch_err(c, "%s: ret %i", __func__, ret);
879         return ret;
880 }
881
882 static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
883 {
884         return  (int) btree_id_to_gc_phase(l) -
885                 (int) btree_id_to_gc_phase(r);
886 }
887
888 static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
889 {
890         enum btree_id ids[BTREE_ID_NR];
891         unsigned i;
892         int ret = 0;
893
894         for (i = 0; i < BTREE_ID_NR; i++)
895                 ids[i] = i;
896         bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
897
898         for (i = 0; i < BTREE_ID_NR && !ret; i++)
899                 ret = initial
900                         ? bch2_gc_btree_init(c, ids[i], metadata_only)
901                         : bch2_gc_btree(c, ids[i], initial, metadata_only);
902
903         if (ret < 0)
904                 bch_err(c, "%s: ret %i", __func__, ret);
905         return ret;
906 }
907
908 static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca,
909                                   u64 start, u64 end,
910                                   enum bch_data_type type,
911                                   unsigned flags)
912 {
913         u64 b = sector_to_bucket(ca, start);
914
915         do {
916                 unsigned sectors =
917                         min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
918
919                 bch2_mark_metadata_bucket(c, ca, b, type, sectors,
920                                           gc_phase(GC_PHASE_SB), flags);
921                 b++;
922                 start += sectors;
923         } while (start < end);
924 }
925
926 void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
927                               unsigned flags)
928 {
929         struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
930         unsigned i;
931         u64 b;
932
933         /*
934          * This conditional is kind of gross, but we may be called from the
935          * device add path, before the new device has actually been added to the
936          * running filesystem:
937          */
938         if (c) {
939                 lockdep_assert_held(&c->sb_lock);
940                 percpu_down_read(&c->mark_lock);
941         }
942
943         for (i = 0; i < layout->nr_superblocks; i++) {
944                 u64 offset = le64_to_cpu(layout->sb_offset[i]);
945
946                 if (offset == BCH_SB_SECTOR)
947                         mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
948                                               BCH_DATA_sb, flags);
949
950                 mark_metadata_sectors(c, ca, offset,
951                                       offset + (1 << layout->sb_max_size_bits),
952                                       BCH_DATA_sb, flags);
953         }
954
955         for (i = 0; i < ca->journal.nr; i++) {
956                 b = ca->journal.buckets[i];
957                 bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal,
958                                           ca->mi.bucket_size,
959                                           gc_phase(GC_PHASE_SB), flags);
960         }
961
962         if (c)
963                 percpu_up_read(&c->mark_lock);
964 }
965
966 static void bch2_mark_superblocks(struct bch_fs *c)
967 {
968         struct bch_dev *ca;
969         unsigned i;
970
971         mutex_lock(&c->sb_lock);
972         gc_pos_set(c, gc_phase(GC_PHASE_SB));
973
974         for_each_online_member(ca, c, i)
975                 bch2_mark_dev_superblock(c, ca, BTREE_TRIGGER_GC);
976         mutex_unlock(&c->sb_lock);
977 }
978
979 #if 0
980 /* Also see bch2_pending_btree_node_free_insert_done() */
981 static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
982 {
983         struct btree_update *as;
984         struct pending_btree_node_free *d;
985
986         mutex_lock(&c->btree_interior_update_lock);
987         gc_pos_set(c, gc_phase(GC_PHASE_PENDING_DELETE));
988
989         for_each_pending_btree_node_free(c, as, d)
990                 if (d->index_update_done)
991                         bch2_mark_key(c, bkey_i_to_s_c(&d->key),
992                                       0, 0, NULL, 0,
993                                       BTREE_TRIGGER_GC);
994
995         mutex_unlock(&c->btree_interior_update_lock);
996 }
997 #endif
998
999 static void bch2_gc_free(struct bch_fs *c)
1000 {
1001         struct bch_dev *ca;
1002         unsigned i;
1003
1004         genradix_free(&c->stripes[1]);
1005
1006         for_each_member_device(ca, c, i) {
1007                 kvpfree(rcu_dereference_protected(ca->buckets[1], 1),
1008                         sizeof(struct bucket_array) +
1009                         ca->mi.nbuckets * sizeof(struct bucket));
1010                 ca->buckets[1] = NULL;
1011
1012                 free_percpu(ca->usage_gc);
1013                 ca->usage_gc = NULL;
1014         }
1015
1016         free_percpu(c->usage_gc);
1017         c->usage_gc = NULL;
1018 }
1019
1020 static int bch2_gc_done(struct bch_fs *c,
1021                         bool initial, bool metadata_only)
1022 {
1023         struct bch_dev *ca;
1024         bool verify = !metadata_only && (!initial ||
1025                        (c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)));
1026         unsigned i, dev;
1027         int ret = 0;
1028
1029 #define copy_field(_f, _msg, ...)                                       \
1030         if (dst->_f != src->_f) {                                       \
1031                 if (verify)                                             \
1032                         fsck_err(c, _msg ": got %llu, should be %llu"   \
1033                                 , ##__VA_ARGS__, dst->_f, src->_f);     \
1034                 dst->_f = src->_f;                                      \
1035                 set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags);            \
1036         }
1037 #define copy_stripe_field(_f, _msg, ...)                                \
1038         if (dst->_f != src->_f) {                                       \
1039                 if (verify)                                             \
1040                         fsck_err(c, "stripe %zu has wrong "_msg         \
1041                                 ": got %u, should be %u",               \
1042                                 iter.pos, ##__VA_ARGS__,                \
1043                                 dst->_f, src->_f);                      \
1044                 dst->_f = src->_f;                                      \
1045                 set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags);            \
1046         }
1047 #define copy_bucket_field(_f)                                           \
1048         if (dst->b[b].mark._f != src->b[b].mark._f) {                   \
1049                 if (verify)                                             \
1050                         fsck_err(c, "bucket %u:%zu gen %u data type %s has wrong " #_f  \
1051                                 ": got %u, should be %u", dev, b,       \
1052                                 dst->b[b].mark.gen,                     \
1053                                 bch2_data_types[dst->b[b].mark.data_type],\
1054                                 dst->b[b].mark._f, src->b[b].mark._f);  \
1055                 dst->b[b]._mark._f = src->b[b].mark._f;                 \
1056                 set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags);            \
1057         }
1058 #define copy_dev_field(_f, _msg, ...)                                   \
1059         copy_field(_f, "dev %u has wrong " _msg, dev, ##__VA_ARGS__)
1060 #define copy_fs_field(_f, _msg, ...)                                    \
1061         copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__)
1062
1063         if (!metadata_only) {
1064                 struct genradix_iter iter = genradix_iter_init(&c->stripes[1], 0);
1065                 struct stripe *dst, *src;
1066
1067                 while ((src = genradix_iter_peek(&iter, &c->stripes[1]))) {
1068                         dst = genradix_ptr_alloc(&c->stripes[0], iter.pos, GFP_KERNEL);
1069
1070                         if (dst->alive          != src->alive ||
1071                             dst->sectors        != src->sectors ||
1072                             dst->algorithm      != src->algorithm ||
1073                             dst->nr_blocks      != src->nr_blocks ||
1074                             dst->nr_redundant   != src->nr_redundant) {
1075                                 bch_err(c, "unexpected stripe inconsistency at bch2_gc_done, confused");
1076                                 ret = -EINVAL;
1077                                 goto fsck_err;
1078                         }
1079
1080                         for (i = 0; i < ARRAY_SIZE(dst->block_sectors); i++)
1081                                 copy_stripe_field(block_sectors[i],
1082                                                   "block_sectors[%u]", i);
1083
1084                         dst->blocks_nonempty = 0;
1085                         for (i = 0; i < dst->nr_blocks; i++)
1086                                 dst->blocks_nonempty += dst->block_sectors[i] != 0;
1087
1088                         genradix_iter_advance(&iter, &c->stripes[1]);
1089                 }
1090         }
1091
1092         for (i = 0; i < ARRAY_SIZE(c->usage); i++)
1093                 bch2_fs_usage_acc_to_base(c, i);
1094
1095         for_each_member_device(ca, c, dev) {
1096                 struct bucket_array *dst = __bucket_array(ca, 0);
1097                 struct bucket_array *src = __bucket_array(ca, 1);
1098                 size_t b;
1099
1100                 for (b = 0; b < src->nbuckets; b++) {
1101                         copy_bucket_field(gen);
1102                         copy_bucket_field(data_type);
1103                         copy_bucket_field(stripe);
1104                         copy_bucket_field(dirty_sectors);
1105                         copy_bucket_field(cached_sectors);
1106
1107                         dst->b[b].oldest_gen = src->b[b].oldest_gen;
1108                 }
1109
1110                 {
1111                         struct bch_dev_usage *dst = ca->usage_base;
1112                         struct bch_dev_usage *src = (void *)
1113                                 bch2_acc_percpu_u64s((void *) ca->usage_gc,
1114                                                      dev_usage_u64s());
1115
1116                         copy_dev_field(buckets_ec,              "buckets_ec");
1117                         copy_dev_field(buckets_unavailable,     "buckets_unavailable");
1118
1119                         for (i = 0; i < BCH_DATA_NR; i++) {
1120                                 copy_dev_field(d[i].buckets,    "%s buckets", bch2_data_types[i]);
1121                                 copy_dev_field(d[i].sectors,    "%s sectors", bch2_data_types[i]);
1122                                 copy_dev_field(d[i].fragmented, "%s fragmented", bch2_data_types[i]);
1123                         }
1124                 }
1125         };
1126
1127         {
1128                 unsigned nr = fs_usage_u64s(c);
1129                 struct bch_fs_usage *dst = c->usage_base;
1130                 struct bch_fs_usage *src = (void *)
1131                         bch2_acc_percpu_u64s((void *) c->usage_gc, nr);
1132
1133                 copy_fs_field(hidden,           "hidden");
1134                 copy_fs_field(btree,            "btree");
1135
1136                 if (!metadata_only) {
1137                         copy_fs_field(data,     "data");
1138                         copy_fs_field(cached,   "cached");
1139                         copy_fs_field(reserved, "reserved");
1140                         copy_fs_field(nr_inodes,"nr_inodes");
1141
1142                         for (i = 0; i < BCH_REPLICAS_MAX; i++)
1143                                 copy_fs_field(persistent_reserved[i],
1144                                               "persistent_reserved[%i]", i);
1145                 }
1146
1147                 for (i = 0; i < c->replicas.nr; i++) {
1148                         struct bch_replicas_entry *e =
1149                                 cpu_replicas_entry(&c->replicas, i);
1150                         char buf[80];
1151
1152                         if (metadata_only &&
1153                             (e->data_type == BCH_DATA_user ||
1154                              e->data_type == BCH_DATA_cached))
1155                                 continue;
1156
1157                         bch2_replicas_entry_to_text(&PBUF(buf), e);
1158
1159                         copy_fs_field(replicas[i], "%s", buf);
1160                 }
1161         }
1162
1163 #undef copy_fs_field
1164 #undef copy_dev_field
1165 #undef copy_bucket_field
1166 #undef copy_stripe_field
1167 #undef copy_field
1168 fsck_err:
1169         if (ret)
1170                 bch_err(c, "%s: ret %i", __func__, ret);
1171         return ret;
1172 }
1173
1174 static int bch2_gc_start(struct bch_fs *c,
1175                          bool metadata_only)
1176 {
1177         struct bch_dev *ca;
1178         unsigned i;
1179         int ret;
1180
1181         BUG_ON(c->usage_gc);
1182
1183         c->usage_gc = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64),
1184                                          sizeof(u64), GFP_KERNEL);
1185         if (!c->usage_gc) {
1186                 bch_err(c, "error allocating c->usage_gc");
1187                 return -ENOMEM;
1188         }
1189
1190         for_each_member_device(ca, c, i) {
1191                 BUG_ON(ca->buckets[1]);
1192                 BUG_ON(ca->usage_gc);
1193
1194                 ca->buckets[1] = kvpmalloc(sizeof(struct bucket_array) +
1195                                 ca->mi.nbuckets * sizeof(struct bucket),
1196                                 GFP_KERNEL|__GFP_ZERO);
1197                 if (!ca->buckets[1]) {
1198                         percpu_ref_put(&ca->ref);
1199                         bch_err(c, "error allocating ca->buckets[gc]");
1200                         return -ENOMEM;
1201                 }
1202
1203                 ca->usage_gc = alloc_percpu(struct bch_dev_usage);
1204                 if (!ca->usage_gc) {
1205                         bch_err(c, "error allocating ca->usage_gc");
1206                         percpu_ref_put(&ca->ref);
1207                         return -ENOMEM;
1208                 }
1209         }
1210
1211         ret = bch2_ec_mem_alloc(c, true);
1212         if (ret) {
1213                 bch_err(c, "error allocating ec gc mem");
1214                 return ret;
1215         }
1216
1217         percpu_down_write(&c->mark_lock);
1218
1219         /*
1220          * indicate to stripe code that we need to allocate for the gc stripes
1221          * radix tree, too
1222          */
1223         gc_pos_set(c, gc_phase(GC_PHASE_START));
1224
1225         for_each_member_device(ca, c, i) {
1226                 struct bucket_array *dst = __bucket_array(ca, 1);
1227                 struct bucket_array *src = __bucket_array(ca, 0);
1228                 size_t b;
1229
1230                 dst->first_bucket       = src->first_bucket;
1231                 dst->nbuckets           = src->nbuckets;
1232
1233                 for (b = 0; b < src->nbuckets; b++) {
1234                         struct bucket *d = &dst->b[b];
1235                         struct bucket *s = &src->b[b];
1236
1237                         d->_mark.gen = dst->b[b].oldest_gen = s->mark.gen;
1238                         d->gen_valid = s->gen_valid;
1239
1240                         if (metadata_only &&
1241                             (s->mark.data_type == BCH_DATA_user ||
1242                              s->mark.data_type == BCH_DATA_cached))
1243                                 d->_mark = s->mark;
1244                 }
1245         };
1246
1247         percpu_up_write(&c->mark_lock);
1248
1249         return 0;
1250 }
1251
1252 /**
1253  * bch2_gc - walk _all_ references to buckets, and recompute them:
1254  *
1255  * Order matters here:
1256  *  - Concurrent GC relies on the fact that we have a total ordering for
1257  *    everything that GC walks - see  gc_will_visit_node(),
1258  *    gc_will_visit_root()
1259  *
1260  *  - also, references move around in the course of index updates and
1261  *    various other crap: everything needs to agree on the ordering
1262  *    references are allowed to move around in - e.g., we're allowed to
1263  *    start with a reference owned by an open_bucket (the allocator) and
1264  *    move it to the btree, but not the reverse.
1265  *
1266  *    This is necessary to ensure that gc doesn't miss references that
1267  *    move around - if references move backwards in the ordering GC
1268  *    uses, GC could skip past them
1269  */
1270 int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
1271 {
1272         struct bch_dev *ca;
1273         u64 start_time = local_clock();
1274         unsigned i, iter = 0;
1275         int ret;
1276
1277         lockdep_assert_held(&c->state_lock);
1278         trace_gc_start(c);
1279
1280         down_write(&c->gc_lock);
1281
1282         /* flush interior btree updates: */
1283         closure_wait_event(&c->btree_interior_update_wait,
1284                            !bch2_btree_interior_updates_nr_pending(c));
1285 again:
1286         ret = bch2_gc_start(c, metadata_only);
1287         if (ret)
1288                 goto out;
1289
1290         bch2_mark_superblocks(c);
1291
1292         if (test_bit(BCH_FS_TOPOLOGY_ERROR, &c->flags) &&
1293             !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags) &&
1294             c->opts.fix_errors != FSCK_OPT_NO) {
1295                 bch_info(c, "starting topology repair pass");
1296                 ret = bch2_repair_topology(c);
1297                 if (ret)
1298                         goto out;
1299                 bch_info(c, "topology repair pass done");
1300         }
1301
1302         ret = bch2_gc_btrees(c, initial, metadata_only);
1303
1304         if (ret == FSCK_ERR_START_TOPOLOGY_REPAIR &&
1305             !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) {
1306                 set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
1307                 ret = 0;
1308         }
1309
1310         if (ret == FSCK_ERR_START_TOPOLOGY_REPAIR)
1311                 ret = FSCK_ERR_EXIT;
1312
1313         if (ret)
1314                 goto out;
1315
1316 #if 0
1317         bch2_mark_pending_btree_node_frees(c);
1318 #endif
1319         c->gc_count++;
1320
1321         if (test_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags) ||
1322             (!iter && bch2_test_restart_gc)) {
1323                 /*
1324                  * XXX: make sure gens we fixed got saved
1325                  */
1326                 if (iter++ <= 2) {
1327                         bch_info(c, "Second GC pass needed, restarting:");
1328                         clear_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
1329                         __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
1330
1331                         percpu_down_write(&c->mark_lock);
1332                         bch2_gc_free(c);
1333                         percpu_up_write(&c->mark_lock);
1334                         /* flush fsck errors, reset counters */
1335                         bch2_flush_fsck_errs(c);
1336
1337                         goto again;
1338                 }
1339
1340                 bch_info(c, "Unable to fix bucket gens, looping");
1341                 ret = -EINVAL;
1342         }
1343 out:
1344         if (!ret) {
1345                 bch2_journal_block(&c->journal);
1346
1347                 percpu_down_write(&c->mark_lock);
1348                 ret = bch2_gc_done(c, initial, metadata_only);
1349
1350                 bch2_journal_unblock(&c->journal);
1351         } else {
1352                 percpu_down_write(&c->mark_lock);
1353         }
1354
1355         /* Indicates that gc is no longer in progress: */
1356         __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
1357
1358         bch2_gc_free(c);
1359         percpu_up_write(&c->mark_lock);
1360
1361         up_write(&c->gc_lock);
1362
1363         trace_gc_end(c);
1364         bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
1365
1366         /*
1367          * Wake up allocator in case it was waiting for buckets
1368          * because of not being able to inc gens
1369          */
1370         for_each_member_device(ca, c, i)
1371                 bch2_wake_allocator(ca);
1372
1373         /*
1374          * At startup, allocations can happen directly instead of via the
1375          * allocator thread - issue wakeup in case they blocked on gc_lock:
1376          */
1377         closure_wake_up(&c->freelist_wait);
1378         return ret;
1379 }
1380
1381 static bool gc_btree_gens_key(struct bch_fs *c, struct bkey_s_c k)
1382 {
1383         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1384         const struct bch_extent_ptr *ptr;
1385
1386         percpu_down_read(&c->mark_lock);
1387         bkey_for_each_ptr(ptrs, ptr) {
1388                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1389                 struct bucket *g = PTR_BUCKET(ca, ptr, false);
1390
1391                 if (gen_after(g->mark.gen, ptr->gen) > 16) {
1392                         percpu_up_read(&c->mark_lock);
1393                         return true;
1394                 }
1395         }
1396
1397         bkey_for_each_ptr(ptrs, ptr) {
1398                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1399                 struct bucket *g = PTR_BUCKET(ca, ptr, false);
1400
1401                 if (gen_after(g->gc_gen, ptr->gen))
1402                         g->gc_gen = ptr->gen;
1403         }
1404         percpu_up_read(&c->mark_lock);
1405
1406         return false;
1407 }
1408
1409 /*
1410  * For recalculating oldest gen, we only need to walk keys in leaf nodes; btree
1411  * node pointers currently never have cached pointers that can become stale:
1412  */
1413 static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id)
1414 {
1415         struct btree_trans trans;
1416         struct btree_iter *iter;
1417         struct bkey_s_c k;
1418         struct bkey_buf sk;
1419         int ret = 0, commit_err = 0;
1420
1421         bch2_bkey_buf_init(&sk);
1422         bch2_trans_init(&trans, c, 0, 0);
1423
1424         iter = bch2_trans_get_iter(&trans, btree_id, POS_MIN,
1425                                    BTREE_ITER_PREFETCH|
1426                                    BTREE_ITER_NOT_EXTENTS|
1427                                    BTREE_ITER_ALL_SNAPSHOTS);
1428
1429         while ((k = bch2_btree_iter_peek(iter)).k &&
1430                !(ret = bkey_err(k))) {
1431                 c->gc_gens_pos = iter->pos;
1432
1433                 if (gc_btree_gens_key(c, k) && !commit_err) {
1434                         bch2_bkey_buf_reassemble(&sk, c, k);
1435                         bch2_extent_normalize(c, bkey_i_to_s(sk.k));
1436
1437                         bch2_trans_update(&trans, iter, sk.k, 0);
1438
1439                         commit_err = bch2_trans_commit(&trans, NULL, NULL,
1440                                                        BTREE_INSERT_NOWAIT|
1441                                                        BTREE_INSERT_NOFAIL);
1442                         if (commit_err == -EINTR) {
1443                                 commit_err = 0;
1444                                 continue;
1445                         }
1446                 }
1447
1448                 bch2_btree_iter_advance(iter);
1449         }
1450         bch2_trans_iter_put(&trans, iter);
1451
1452         bch2_trans_exit(&trans);
1453         bch2_bkey_buf_exit(&sk, c);
1454
1455         return ret;
1456 }
1457
1458 int bch2_gc_gens(struct bch_fs *c)
1459 {
1460         struct bch_dev *ca;
1461         struct bucket_array *buckets;
1462         struct bucket *g;
1463         unsigned i;
1464         int ret;
1465
1466         /*
1467          * Ideally we would be using state_lock and not gc_lock here, but that
1468          * introduces a deadlock in the RO path - we currently take the state
1469          * lock at the start of going RO, thus the gc thread may get stuck:
1470          */
1471         down_read(&c->gc_lock);
1472
1473         for_each_member_device(ca, c, i) {
1474                 down_read(&ca->bucket_lock);
1475                 buckets = bucket_array(ca);
1476
1477                 for_each_bucket(g, buckets)
1478                         g->gc_gen = g->mark.gen;
1479                 up_read(&ca->bucket_lock);
1480         }
1481
1482         for (i = 0; i < BTREE_ID_NR; i++)
1483                 if ((1 << i) & BTREE_ID_HAS_PTRS) {
1484                         c->gc_gens_btree = i;
1485                         c->gc_gens_pos = POS_MIN;
1486                         ret = bch2_gc_btree_gens(c, i);
1487                         if (ret) {
1488                                 bch_err(c, "error recalculating oldest_gen: %i", ret);
1489                                 goto err;
1490                         }
1491                 }
1492
1493         for_each_member_device(ca, c, i) {
1494                 down_read(&ca->bucket_lock);
1495                 buckets = bucket_array(ca);
1496
1497                 for_each_bucket(g, buckets)
1498                         g->oldest_gen = g->gc_gen;
1499                 up_read(&ca->bucket_lock);
1500         }
1501
1502         c->gc_gens_btree        = 0;
1503         c->gc_gens_pos          = POS_MIN;
1504
1505         c->gc_count++;
1506 err:
1507         up_read(&c->gc_lock);
1508         return ret;
1509 }
1510
1511 static int bch2_gc_thread(void *arg)
1512 {
1513         struct bch_fs *c = arg;
1514         struct io_clock *clock = &c->io_clock[WRITE];
1515         unsigned long last = atomic64_read(&clock->now);
1516         unsigned last_kick = atomic_read(&c->kick_gc);
1517         int ret;
1518
1519         set_freezable();
1520
1521         while (1) {
1522                 while (1) {
1523                         set_current_state(TASK_INTERRUPTIBLE);
1524
1525                         if (kthread_should_stop()) {
1526                                 __set_current_state(TASK_RUNNING);
1527                                 return 0;
1528                         }
1529
1530                         if (atomic_read(&c->kick_gc) != last_kick)
1531                                 break;
1532
1533                         if (c->btree_gc_periodic) {
1534                                 unsigned long next = last + c->capacity / 16;
1535
1536                                 if (atomic64_read(&clock->now) >= next)
1537                                         break;
1538
1539                                 bch2_io_clock_schedule_timeout(clock, next);
1540                         } else {
1541                                 schedule();
1542                         }
1543
1544                         try_to_freeze();
1545                 }
1546                 __set_current_state(TASK_RUNNING);
1547
1548                 last = atomic64_read(&clock->now);
1549                 last_kick = atomic_read(&c->kick_gc);
1550
1551                 /*
1552                  * Full gc is currently incompatible with btree key cache:
1553                  */
1554 #if 0
1555                 ret = bch2_gc(c, false, false);
1556 #else
1557                 ret = bch2_gc_gens(c);
1558 #endif
1559                 if (ret < 0)
1560                         bch_err(c, "btree gc failed: %i", ret);
1561
1562                 debug_check_no_locks_held();
1563         }
1564
1565         return 0;
1566 }
1567
1568 void bch2_gc_thread_stop(struct bch_fs *c)
1569 {
1570         struct task_struct *p;
1571
1572         p = c->gc_thread;
1573         c->gc_thread = NULL;
1574
1575         if (p) {
1576                 kthread_stop(p);
1577                 put_task_struct(p);
1578         }
1579 }
1580
1581 int bch2_gc_thread_start(struct bch_fs *c)
1582 {
1583         struct task_struct *p;
1584
1585         if (c->gc_thread)
1586                 return 0;
1587
1588         p = kthread_create(bch2_gc_thread, c, "bch-gc/%s", c->name);
1589         if (IS_ERR(p)) {
1590                 bch_err(c, "error creating gc thread: %li", PTR_ERR(p));
1591                 return PTR_ERR(p);
1592         }
1593
1594         get_task_struct(p);
1595         c->gc_thread = p;
1596         wake_up_process(p);
1597         return 0;
1598 }