]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_gc.c
Update bcachefs sources to 2152e671d8 bcachefs: Add a bit of missing repair code
[bcachefs-tools-debian] / libbcachefs / btree_gc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  * Copyright (C) 2014 Datera Inc.
5  */
6
7 #include "bcachefs.h"
8 #include "alloc_background.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "bkey_buf.h"
12 #include "btree_locking.h"
13 #include "btree_update_interior.h"
14 #include "btree_io.h"
15 #include "btree_gc.h"
16 #include "buckets.h"
17 #include "clock.h"
18 #include "debug.h"
19 #include "ec.h"
20 #include "error.h"
21 #include "extents.h"
22 #include "journal.h"
23 #include "keylist.h"
24 #include "move.h"
25 #include "recovery.h"
26 #include "reflink.h"
27 #include "replicas.h"
28 #include "super-io.h"
29
30 #include <linux/slab.h>
31 #include <linux/bitops.h>
32 #include <linux/freezer.h>
33 #include <linux/kthread.h>
34 #include <linux/preempt.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched/task.h>
37 #include <trace/events/bcachefs.h>
38
39 #define DROP_THIS_NODE          10
40 #define DROP_PREV_NODE          11
41
42 static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
43 {
44         preempt_disable();
45         write_seqcount_begin(&c->gc_pos_lock);
46         c->gc_pos = new_pos;
47         write_seqcount_end(&c->gc_pos_lock);
48         preempt_enable();
49 }
50
51 static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
52 {
53         BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) <= 0);
54         __gc_pos_set(c, new_pos);
55 }
56
57 /*
58  * Missing: if an interior btree node is empty, we need to do something -
59  * perhaps just kill it
60  */
61 static int bch2_gc_check_topology(struct bch_fs *c,
62                                   struct btree *b,
63                                   struct bkey_buf *prev,
64                                   struct bkey_buf cur,
65                                   bool is_last)
66 {
67         struct bpos node_start  = b->data->min_key;
68         struct bpos node_end    = b->data->max_key;
69         struct bpos expected_start = bkey_deleted(&prev->k->k)
70                 ? node_start
71                 : bpos_successor(prev->k->k.p);
72         char buf1[200], buf2[200];
73         int ret = 0;
74
75         if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) {
76                 struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k);
77
78                 if (bkey_deleted(&prev->k->k)) {
79                         struct printbuf out = PBUF(buf1);
80                         pr_buf(&out, "start of node: ");
81                         bch2_bpos_to_text(&out, node_start);
82                 } else {
83                         bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(prev->k));
84                 }
85
86                 if (bpos_cmp(expected_start, bp->v.min_key)) {
87                         bch2_topology_error(c);
88
89                         if (__fsck_err(c,
90                                   FSCK_CAN_FIX|
91                                   FSCK_CAN_IGNORE|
92                                   FSCK_NO_RATELIMIT,
93                                   "btree node with incorrect min_key at btree %s level %u:\n"
94                                   "  prev %s\n"
95                                   "  cur %s",
96                                   bch2_btree_ids[b->c.btree_id], b->c.level,
97                                   buf1,
98                                   (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(cur.k)), buf2)) &&
99                             !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
100                                 bch_info(c, "Halting mark and sweep to start topology repair pass");
101                                 return FSCK_ERR_START_TOPOLOGY_REPAIR;
102                         } else {
103                                 set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
104                         }
105                 }
106         }
107
108         if (is_last && bpos_cmp(cur.k->k.p, node_end)) {
109                 bch2_topology_error(c);
110
111                 if (__fsck_err(c,
112                           FSCK_CAN_FIX|
113                           FSCK_CAN_IGNORE|
114                           FSCK_NO_RATELIMIT,
115                           "btree node with incorrect max_key at btree %s level %u:\n"
116                           "  %s\n"
117                           "  expected %s",
118                           bch2_btree_ids[b->c.btree_id], b->c.level,
119                           (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(cur.k)), buf1),
120                           (bch2_bpos_to_text(&PBUF(buf2), node_end), buf2)) &&
121                     !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
122                         bch_info(c, "Halting mark and sweep to start topology repair pass");
123                         return FSCK_ERR_START_TOPOLOGY_REPAIR;
124                 } else {
125                         set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
126                 }
127         }
128
129         bch2_bkey_buf_copy(prev, c, cur.k);
130 fsck_err:
131         return ret;
132 }
133
134 static void btree_ptr_to_v2(struct btree *b, struct bkey_i_btree_ptr_v2 *dst)
135 {
136         switch (b->key.k.type) {
137         case KEY_TYPE_btree_ptr: {
138                 struct bkey_i_btree_ptr *src = bkey_i_to_btree_ptr(&b->key);
139
140                 dst->k.p                = src->k.p;
141                 dst->v.mem_ptr          = 0;
142                 dst->v.seq              = b->data->keys.seq;
143                 dst->v.sectors_written  = 0;
144                 dst->v.flags            = 0;
145                 dst->v.min_key          = b->data->min_key;
146                 set_bkey_val_bytes(&dst->k, sizeof(dst->v) + bkey_val_bytes(&src->k));
147                 memcpy(dst->v.start, src->v.start, bkey_val_bytes(&src->k));
148                 break;
149         }
150         case KEY_TYPE_btree_ptr_v2:
151                 bkey_copy(&dst->k_i, &b->key);
152                 break;
153         default:
154                 BUG();
155         }
156 }
157
158 static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
159 {
160         struct bkey_i_btree_ptr_v2 *new;
161         int ret;
162
163         new = kmalloc(BKEY_BTREE_PTR_U64s_MAX * sizeof(u64), GFP_KERNEL);
164         if (!new)
165                 return -ENOMEM;
166
167         btree_ptr_to_v2(b, new);
168         b->data->min_key        = new_min;
169         new->v.min_key          = new_min;
170         SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
171
172         ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level + 1, &new->k_i);
173         if (ret) {
174                 kfree(new);
175                 return ret;
176         }
177
178         bch2_btree_node_drop_keys_outside_node(b);
179
180         return 0;
181 }
182
183 static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
184 {
185         struct bkey_i_btree_ptr_v2 *new;
186         int ret;
187
188         ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level + 1, b->key.k.p);
189         if (ret)
190                 return ret;
191
192         new = kmalloc(BKEY_BTREE_PTR_U64s_MAX * sizeof(u64), GFP_KERNEL);
193         if (!new)
194                 return -ENOMEM;
195
196         btree_ptr_to_v2(b, new);
197         b->data->max_key        = new_max;
198         new->k.p                = new_max;
199         SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
200
201         ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level + 1, &new->k_i);
202         if (ret) {
203                 kfree(new);
204                 return ret;
205         }
206
207         bch2_btree_node_drop_keys_outside_node(b);
208
209         mutex_lock(&c->btree_cache.lock);
210         bch2_btree_node_hash_remove(&c->btree_cache, b);
211
212         bkey_copy(&b->key, &new->k_i);
213         ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
214         BUG_ON(ret);
215         mutex_unlock(&c->btree_cache.lock);
216         return 0;
217 }
218
219 static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
220                                         struct btree *prev, struct btree *cur)
221 {
222         struct bpos expected_start = !prev
223                 ? b->data->min_key
224                 : bpos_successor(prev->key.k.p);
225         char buf1[200], buf2[200];
226         int ret = 0;
227
228         if (!prev) {
229                 struct printbuf out = PBUF(buf1);
230                 pr_buf(&out, "start of node: ");
231                 bch2_bpos_to_text(&out, b->data->min_key);
232         } else {
233                 bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&prev->key));
234         }
235
236         bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(&cur->key));
237
238         if (prev &&
239             bpos_cmp(expected_start, cur->data->min_key) > 0 &&
240             BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) {
241                 /* cur overwrites prev: */
242
243                 if (mustfix_fsck_err_on(bpos_cmp(prev->data->min_key,
244                                                  cur->data->min_key) >= 0, c,
245                                 "btree node overwritten by next node at btree %s level %u:\n"
246                                 "  node %s\n"
247                                 "  next %s",
248                                 bch2_btree_ids[b->c.btree_id], b->c.level,
249                                 buf1, buf2))
250                         return DROP_PREV_NODE;
251
252                 if (mustfix_fsck_err_on(bpos_cmp(prev->key.k.p,
253                                                  bpos_predecessor(cur->data->min_key)), c,
254                                 "btree node with incorrect max_key at btree %s level %u:\n"
255                                 "  node %s\n"
256                                 "  next %s",
257                                 bch2_btree_ids[b->c.btree_id], b->c.level,
258                                 buf1, buf2))
259                         ret = set_node_max(c, prev,
260                                            bpos_predecessor(cur->data->min_key));
261         } else {
262                 /* prev overwrites cur: */
263
264                 if (mustfix_fsck_err_on(bpos_cmp(expected_start,
265                                                  cur->data->max_key) >= 0, c,
266                                 "btree node overwritten by prev node at btree %s level %u:\n"
267                                 "  prev %s\n"
268                                 "  node %s",
269                                 bch2_btree_ids[b->c.btree_id], b->c.level,
270                                 buf1, buf2))
271                         return DROP_THIS_NODE;
272
273                 if (mustfix_fsck_err_on(bpos_cmp(expected_start, cur->data->min_key), c,
274                                 "btree node with incorrect min_key at btree %s level %u:\n"
275                                 "  prev %s\n"
276                                 "  node %s",
277                                 bch2_btree_ids[b->c.btree_id], b->c.level,
278                                 buf1, buf2))
279                     ret = set_node_min(c, cur, expected_start);
280         }
281 fsck_err:
282         return ret;
283 }
284
285 static int btree_repair_node_end(struct bch_fs *c, struct btree *b,
286                                  struct btree *child)
287 {
288         char buf1[200], buf2[200];
289         int ret = 0;
290
291         if (mustfix_fsck_err_on(bpos_cmp(child->key.k.p, b->key.k.p), c,
292                         "btree node with incorrect max_key at btree %s level %u:\n"
293                         "  %s\n"
294                         "  expected %s",
295                         bch2_btree_ids[b->c.btree_id], b->c.level,
296                         (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&child->key)), buf1),
297                         (bch2_bpos_to_text(&PBUF(buf2), b->key.k.p), buf2))) {
298                 ret = set_node_max(c, child, b->key.k.p);
299                 if (ret)
300                         return ret;
301         }
302 fsck_err:
303         return ret;
304 }
305
306 static int bch2_btree_repair_topology_recurse(struct bch_fs *c, struct btree *b)
307 {
308         struct btree_and_journal_iter iter;
309         struct bkey_s_c k;
310         struct bkey_buf prev_k, cur_k;
311         struct btree *prev = NULL, *cur = NULL;
312         bool have_child, dropped_children = false;
313         char buf[200];
314         int ret = 0;
315
316         if (!b->c.level)
317                 return 0;
318 again:
319         prev = NULL;
320         have_child = dropped_children = false;
321         bch2_bkey_buf_init(&prev_k);
322         bch2_bkey_buf_init(&cur_k);
323         bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
324
325         while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
326                 BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
327                 BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
328
329                 bch2_btree_and_journal_iter_advance(&iter);
330                 bch2_bkey_buf_reassemble(&cur_k, c, k);
331
332                 cur = bch2_btree_node_get_noiter(c, cur_k.k,
333                                         b->c.btree_id, b->c.level - 1,
334                                         false);
335                 ret = PTR_ERR_OR_ZERO(cur);
336
337                 if (mustfix_fsck_err_on(ret == -EIO, c,
338                                 "Unreadable btree node at btree %s level %u:\n"
339                                 "  %s",
340                                 bch2_btree_ids[b->c.btree_id],
341                                 b->c.level - 1,
342                                 (bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(cur_k.k)), buf))) {
343                         bch2_btree_node_evict(c, cur_k.k);
344                         ret = bch2_journal_key_delete(c, b->c.btree_id,
345                                                       b->c.level, cur_k.k->k.p);
346                         if (ret)
347                                 break;
348                         continue;
349                 }
350
351                 if (ret) {
352                         bch_err(c, "%s: error %i getting btree node",
353                                 __func__, ret);
354                         break;
355                 }
356
357                 ret = btree_repair_node_boundaries(c, b, prev, cur);
358
359                 if (ret == DROP_THIS_NODE) {
360                         six_unlock_read(&cur->c.lock);
361                         bch2_btree_node_evict(c, cur_k.k);
362                         ret = bch2_journal_key_delete(c, b->c.btree_id,
363                                                       b->c.level, cur_k.k->k.p);
364                         if (ret)
365                                 break;
366                         continue;
367                 }
368
369                 if (prev)
370                         six_unlock_read(&prev->c.lock);
371                 prev = NULL;
372
373                 if (ret == DROP_PREV_NODE) {
374                         bch2_btree_node_evict(c, prev_k.k);
375                         ret = bch2_journal_key_delete(c, b->c.btree_id,
376                                                       b->c.level, prev_k.k->k.p);
377                         if (ret)
378                                 break;
379
380                         bch2_btree_and_journal_iter_exit(&iter);
381                         bch2_bkey_buf_exit(&prev_k, c);
382                         bch2_bkey_buf_exit(&cur_k, c);
383                         goto again;
384                 } else if (ret)
385                         break;
386
387                 prev = cur;
388                 cur = NULL;
389                 bch2_bkey_buf_copy(&prev_k, c, cur_k.k);
390         }
391
392         if (!ret && !IS_ERR_OR_NULL(prev)) {
393                 BUG_ON(cur);
394                 ret = btree_repair_node_end(c, b, prev);
395         }
396
397         if (!IS_ERR_OR_NULL(prev))
398                 six_unlock_read(&prev->c.lock);
399         prev = NULL;
400         if (!IS_ERR_OR_NULL(cur))
401                 six_unlock_read(&cur->c.lock);
402         cur = NULL;
403
404         if (ret)
405                 goto err;
406
407         bch2_btree_and_journal_iter_exit(&iter);
408         bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
409
410         while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
411                 bch2_bkey_buf_reassemble(&cur_k, c, k);
412                 bch2_btree_and_journal_iter_advance(&iter);
413
414                 cur = bch2_btree_node_get_noiter(c, cur_k.k,
415                                         b->c.btree_id, b->c.level - 1,
416                                         false);
417                 ret = PTR_ERR_OR_ZERO(cur);
418
419                 if (ret) {
420                         bch_err(c, "%s: error %i getting btree node",
421                                 __func__, ret);
422                         goto err;
423                 }
424
425                 ret = bch2_btree_repair_topology_recurse(c, cur);
426                 six_unlock_read(&cur->c.lock);
427                 cur = NULL;
428
429                 if (ret == DROP_THIS_NODE) {
430                         bch2_btree_node_evict(c, cur_k.k);
431                         ret = bch2_journal_key_delete(c, b->c.btree_id,
432                                                       b->c.level, cur_k.k->k.p);
433                         dropped_children = true;
434                 }
435
436                 if (ret)
437                         goto err;
438
439                 have_child = true;
440         }
441
442         if (mustfix_fsck_err_on(!have_child, c,
443                         "empty interior btree node at btree %s level %u\n"
444                         "  %s",
445                         bch2_btree_ids[b->c.btree_id],
446                         b->c.level,
447                         (bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(&b->key)), buf)))
448                 ret = DROP_THIS_NODE;
449 err:
450 fsck_err:
451         if (!IS_ERR_OR_NULL(prev))
452                 six_unlock_read(&prev->c.lock);
453         if (!IS_ERR_OR_NULL(cur))
454                 six_unlock_read(&cur->c.lock);
455
456         bch2_btree_and_journal_iter_exit(&iter);
457         bch2_bkey_buf_exit(&prev_k, c);
458         bch2_bkey_buf_exit(&cur_k, c);
459
460         if (!ret && dropped_children)
461                 goto again;
462
463         return ret;
464 }
465
466 static int bch2_repair_topology(struct bch_fs *c)
467 {
468         struct btree *b;
469         unsigned i;
470         int ret = 0;
471
472         for (i = 0; i < BTREE_ID_NR && !ret; i++) {
473                 b = c->btree_roots[i].b;
474                 if (btree_node_fake(b))
475                         continue;
476
477                 six_lock_read(&b->c.lock, NULL, NULL);
478                 ret = bch2_btree_repair_topology_recurse(c, b);
479                 six_unlock_read(&b->c.lock);
480
481                 if (ret == DROP_THIS_NODE) {
482                         bch_err(c, "empty btree root - repair unimplemented");
483                         ret = FSCK_ERR_EXIT;
484                 }
485         }
486
487         return ret;
488 }
489
490 static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
491                                unsigned level, bool is_root,
492                                struct bkey_s_c *k)
493 {
494         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(*k);
495         const union bch_extent_entry *entry;
496         struct extent_ptr_decoded p = { 0 };
497         bool do_update = false;
498         char buf[200];
499         int ret = 0;
500
501         /*
502          * XXX
503          * use check_bucket_ref here
504          */
505         bkey_for_each_ptr_decode(k->k, ptrs, p, entry) {
506                 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
507                 struct bucket *g = PTR_BUCKET(ca, &p.ptr, true);
508                 struct bucket *g2 = PTR_BUCKET(ca, &p.ptr, false);
509                 enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry->ptr);
510
511                 if (fsck_err_on(!g->gen_valid, c,
512                                 "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
513                                 "while marking %s",
514                                 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
515                                 bch2_data_types[ptr_data_type(k->k, &p.ptr)],
516                                 p.ptr.gen,
517                                 (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
518                         if (!p.ptr.cached) {
519                                 g2->_mark.gen   = g->_mark.gen          = p.ptr.gen;
520                                 g2->gen_valid   = g->gen_valid          = true;
521                                 set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags);
522                         } else {
523                                 do_update = true;
524                         }
525                 }
526
527                 if (fsck_err_on(data_type == BCH_DATA_btree &&
528                                 g->mark.gen != p.ptr.gen, c,
529                                 "bucket %u:%zu data type %s has metadata but wrong gen: %u != %u\n"
530                                 "while marking %s",
531                                 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
532                                 bch2_data_types[ptr_data_type(k->k, &p.ptr)],
533                                 p.ptr.gen, g->mark.gen,
534                                 (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
535                         g2->_mark.data_type     = g->_mark.data_type    = data_type;
536                         g2->gen_valid           = g->gen_valid          = true;
537                         set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags);
538                 }
539
540                 if (fsck_err_on(gen_cmp(p.ptr.gen, g->mark.gen) > 0, c,
541                                 "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
542                                 "while marking %s",
543                                 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
544                                 bch2_data_types[ptr_data_type(k->k, &p.ptr)],
545                                 p.ptr.gen, g->mark.gen,
546                                 (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
547                         if (!p.ptr.cached) {
548                                 g2->_mark.gen   = g->_mark.gen  = p.ptr.gen;
549                                 g2->gen_valid   = g->gen_valid  = true;
550                                 g2->_mark.data_type             = 0;
551                                 g2->_mark.dirty_sectors         = 0;
552                                 g2->_mark.cached_sectors        = 0;
553                                 set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
554                                 set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags);
555                         } else {
556                                 do_update = true;
557                         }
558                 }
559
560                 if (fsck_err_on(gen_cmp(g->mark.gen, p.ptr.gen) > BUCKET_GC_GEN_MAX, c,
561                                 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
562                                 "while marking %s",
563                                 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->mark.gen,
564                                 bch2_data_types[ptr_data_type(k->k, &p.ptr)],
565                                 p.ptr.gen,
566                                 (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
567                         do_update = true;
568
569                 if (fsck_err_on(!p.ptr.cached &&
570                                 gen_cmp(p.ptr.gen, g->mark.gen) < 0, c,
571                                 "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
572                                 "while marking %s",
573                                 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
574                                 bch2_data_types[ptr_data_type(k->k, &p.ptr)],
575                                 p.ptr.gen, g->mark.gen,
576                                 (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
577                         do_update = true;
578
579                 if (p.ptr.gen != g->mark.gen)
580                         continue;
581
582                 if (fsck_err_on(g->mark.data_type &&
583                                 g->mark.data_type != data_type, c,
584                                 "bucket %u:%zu different types of data in same bucket: %s, %s\n"
585                                 "while marking %s",
586                                 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
587                                 bch2_data_types[g->mark.data_type],
588                                 bch2_data_types[data_type],
589                                 (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
590                         if (data_type == BCH_DATA_btree) {
591                                 g2->_mark.data_type     = g->_mark.data_type    = data_type;
592                                 g2->gen_valid           = g->gen_valid          = true;
593                                 set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags);
594                         } else {
595                                 do_update = true;
596                         }
597                 }
598
599                 if (p.has_ec) {
600                         struct stripe *m = genradix_ptr(&c->stripes[true], p.ec.idx);
601
602                         if (fsck_err_on(!m || !m->alive, c,
603                                         "pointer to nonexistent stripe %llu\n"
604                                         "while marking %s",
605                                         (u64) p.ec.idx,
606                                         (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
607                                 do_update = true;
608
609                         if (fsck_err_on(!bch2_ptr_matches_stripe_m(m, p), c,
610                                         "pointer does not match stripe %llu\n"
611                                         "while marking %s",
612                                         (u64) p.ec.idx,
613                                         (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
614                                 do_update = true;
615                 }
616         }
617
618         if (do_update) {
619                 struct bkey_ptrs ptrs;
620                 union bch_extent_entry *entry;
621                 struct bch_extent_ptr *ptr;
622                 struct bkey_i *new;
623
624                 if (is_root) {
625                         bch_err(c, "cannot update btree roots yet");
626                         return -EINVAL;
627                 }
628
629                 new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
630                 if (!new) {
631                         bch_err(c, "%s: error allocating new key", __func__);
632                         return -ENOMEM;
633                 }
634
635                 bkey_reassemble(new, *k);
636
637                 if (level) {
638                         /*
639                          * We don't want to drop btree node pointers - if the
640                          * btree node isn't there anymore, the read path will
641                          * sort it out:
642                          */
643                         ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
644                         bkey_for_each_ptr(ptrs, ptr) {
645                                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
646                                 struct bucket *g = PTR_BUCKET(ca, ptr, true);
647
648                                 ptr->gen = g->mark.gen;
649                         }
650                 } else {
651                         bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, ({
652                                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
653                                 struct bucket *g = PTR_BUCKET(ca, ptr, true);
654                                 enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, ptr);
655
656                                 (ptr->cached &&
657                                  (!g->gen_valid || gen_cmp(ptr->gen, g->mark.gen) > 0)) ||
658                                 (!ptr->cached &&
659                                  gen_cmp(ptr->gen, g->mark.gen) < 0) ||
660                                 gen_cmp(g->mark.gen, ptr->gen) > BUCKET_GC_GEN_MAX ||
661                                 (g->mark.data_type &&
662                                  g->mark.data_type != data_type);
663                         }));
664 again:
665                         ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
666                         bkey_extent_entry_for_each(ptrs, entry) {
667                                 if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) {
668                                         struct stripe *m = genradix_ptr(&c->stripes[true],
669                                                                         entry->stripe_ptr.idx);
670                                         union bch_extent_entry *next_ptr;
671
672                                         bkey_extent_entry_for_each_from(ptrs, next_ptr, entry)
673                                                 if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr)
674                                                         goto found;
675                                         next_ptr = NULL;
676 found:
677                                         if (!next_ptr) {
678                                                 bch_err(c, "aieee, found stripe ptr with no data ptr");
679                                                 continue;
680                                         }
681
682                                         if (!m || !m->alive ||
683                                             !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block],
684                                                                        &next_ptr->ptr,
685                                                                        m->sectors)) {
686                                                 bch2_bkey_extent_entry_drop(new, entry);
687                                                 goto again;
688                                         }
689                                 }
690                         }
691                 }
692
693                 ret = bch2_journal_key_insert(c, btree_id, level, new);
694                 if (ret)
695                         kfree(new);
696                 else
697                         *k = bkey_i_to_s_c(new);
698         }
699 fsck_err:
700         return ret;
701 }
702
703 /* marking of btree keys/nodes: */
704
705 static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
706                             unsigned level, bool is_root,
707                             struct bkey_s_c *k,
708                             u8 *max_stale, bool initial)
709 {
710         struct bch_fs *c = trans->c;
711         struct bkey_ptrs_c ptrs;
712         const struct bch_extent_ptr *ptr;
713         unsigned flags =
714                 BTREE_TRIGGER_GC|
715                 (initial ? BTREE_TRIGGER_NOATOMIC : 0);
716         char buf[200];
717         int ret = 0;
718
719         if (initial) {
720                 BUG_ON(bch2_journal_seq_verify &&
721                        k->k->version.lo > journal_cur_seq(&c->journal));
722
723                 ret = bch2_check_fix_ptrs(c, btree_id, level, is_root, k);
724                 if (ret)
725                         goto err;
726
727                 if (fsck_err_on(k->k->version.lo > atomic64_read(&c->key_version), c,
728                                 "key version number higher than recorded: %llu > %llu",
729                                 k->k->version.lo,
730                                 atomic64_read(&c->key_version)))
731                         atomic64_set(&c->key_version, k->k->version.lo);
732
733                 if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
734                     fsck_err_on(!bch2_bkey_replicas_marked(c, *k), c,
735                                 "superblock not marked as containing replicas\n"
736                                 "  while marking %s",
737                                 (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
738                         ret = bch2_mark_bkey_replicas(c, *k);
739                         if (ret) {
740                                 bch_err(c, "error marking bkey replicas: %i", ret);
741                                 goto err;
742                         }
743                 }
744         }
745
746         ptrs = bch2_bkey_ptrs_c(*k);
747         bkey_for_each_ptr(ptrs, ptr) {
748                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
749                 struct bucket *g = PTR_BUCKET(ca, ptr, true);
750
751                 if (gen_after(g->oldest_gen, ptr->gen))
752                         g->oldest_gen = ptr->gen;
753
754                 *max_stale = max(*max_stale, ptr_stale(ca, ptr));
755         }
756
757         ret = bch2_mark_key(trans, *k, flags);
758 fsck_err:
759 err:
760         if (ret)
761                 bch_err(c, "%s: ret %i", __func__, ret);
762         return ret;
763 }
764
765 static int btree_gc_mark_node(struct btree_trans *trans, struct btree *b, u8 *max_stale,
766                               bool initial)
767 {
768         struct bch_fs *c = trans->c;
769         struct btree_node_iter iter;
770         struct bkey unpacked;
771         struct bkey_s_c k;
772         struct bkey_buf prev, cur;
773         int ret = 0;
774
775         *max_stale = 0;
776
777         if (!btree_node_type_needs_gc(btree_node_type(b)))
778                 return 0;
779
780         bch2_btree_node_iter_init_from_start(&iter, b);
781         bch2_bkey_buf_init(&prev);
782         bch2_bkey_buf_init(&cur);
783         bkey_init(&prev.k->k);
784
785         while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) {
786                 ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false,
787                                        &k, max_stale, initial);
788                 if (ret)
789                         break;
790
791                 bch2_btree_node_iter_advance(&iter, b);
792
793                 if (b->c.level) {
794                         bch2_bkey_buf_reassemble(&cur, c, k);
795
796                         ret = bch2_gc_check_topology(c, b, &prev, cur,
797                                         bch2_btree_node_iter_end(&iter));
798                         if (ret)
799                                 break;
800                 }
801         }
802
803         bch2_bkey_buf_exit(&cur, c);
804         bch2_bkey_buf_exit(&prev, c);
805         return ret;
806 }
807
808 static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree_id,
809                          bool initial, bool metadata_only)
810 {
811         struct bch_fs *c = trans->c;
812         struct btree_iter iter;
813         struct btree *b;
814         unsigned depth = metadata_only                  ? 1
815                 : bch2_expensive_debug_checks           ? 0
816                 : !btree_node_type_needs_gc(btree_id)   ? 1
817                 : 0;
818         u8 max_stale = 0;
819         int ret = 0;
820
821         gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0));
822
823         __for_each_btree_node(trans, iter, btree_id, POS_MIN,
824                               0, depth, BTREE_ITER_PREFETCH, b, ret) {
825                 bch2_verify_btree_nr_keys(b);
826
827                 gc_pos_set(c, gc_pos_btree_node(b));
828
829                 ret = btree_gc_mark_node(trans, b, &max_stale, initial);
830                 if (ret)
831                         break;
832
833                 if (!initial) {
834                         if (max_stale > 64)
835                                 bch2_btree_node_rewrite(trans, &iter, b,
836                                                 BTREE_INSERT_NOWAIT|
837                                                 BTREE_INSERT_GC_LOCK_HELD);
838                         else if (!bch2_btree_gc_rewrite_disabled &&
839                                  (bch2_btree_gc_always_rewrite || max_stale > 16))
840                                 bch2_btree_node_rewrite(trans, &iter,
841                                                 b, BTREE_INSERT_NOWAIT|
842                                                 BTREE_INSERT_GC_LOCK_HELD);
843                 }
844         }
845         bch2_trans_iter_exit(trans, &iter);
846
847         if (ret)
848                 return ret;
849
850         mutex_lock(&c->btree_root_lock);
851         b = c->btree_roots[btree_id].b;
852         if (!btree_node_fake(b)) {
853                 struct bkey_s_c k = bkey_i_to_s_c(&b->key);
854
855                 ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, true,
856                                        &k, &max_stale, initial);
857         }
858         gc_pos_set(c, gc_pos_btree_root(b->c.btree_id));
859         mutex_unlock(&c->btree_root_lock);
860
861         return ret;
862 }
863
864 static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b,
865                                       unsigned target_depth)
866 {
867         struct bch_fs *c = trans->c;
868         struct btree_and_journal_iter iter;
869         struct bkey_s_c k;
870         struct bkey_buf cur, prev;
871         u8 max_stale = 0;
872         char buf[200];
873         int ret = 0;
874
875         bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
876         bch2_bkey_buf_init(&prev);
877         bch2_bkey_buf_init(&cur);
878         bkey_init(&prev.k->k);
879
880         while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
881                 BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
882                 BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
883
884                 ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false,
885                                        &k, &max_stale, true);
886                 if (ret) {
887                         bch_err(c, "%s: error %i from bch2_gc_mark_key", __func__, ret);
888                         goto fsck_err;
889                 }
890
891                 if (b->c.level) {
892                         bch2_bkey_buf_reassemble(&cur, c, k);
893                         k = bkey_i_to_s_c(cur.k);
894
895                         bch2_btree_and_journal_iter_advance(&iter);
896
897                         ret = bch2_gc_check_topology(c, b,
898                                         &prev, cur,
899                                         !bch2_btree_and_journal_iter_peek(&iter).k);
900                         if (ret)
901                                 goto fsck_err;
902                 } else {
903                         bch2_btree_and_journal_iter_advance(&iter);
904                 }
905         }
906
907         if (b->c.level > target_depth) {
908                 bch2_btree_and_journal_iter_exit(&iter);
909                 bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
910
911                 while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
912                         struct btree *child;
913
914                         bch2_bkey_buf_reassemble(&cur, c, k);
915                         bch2_btree_and_journal_iter_advance(&iter);
916
917                         child = bch2_btree_node_get_noiter(c, cur.k,
918                                                 b->c.btree_id, b->c.level - 1,
919                                                 false);
920                         ret = PTR_ERR_OR_ZERO(child);
921
922                         if (ret == -EIO) {
923                                 bch2_topology_error(c);
924
925                                 if (__fsck_err(c,
926                                           FSCK_CAN_FIX|
927                                           FSCK_CAN_IGNORE|
928                                           FSCK_NO_RATELIMIT,
929                                           "Unreadable btree node at btree %s level %u:\n"
930                                           "  %s",
931                                           bch2_btree_ids[b->c.btree_id],
932                                           b->c.level - 1,
933                                           (bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(cur.k)), buf)) &&
934                                     !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
935                                         ret = FSCK_ERR_START_TOPOLOGY_REPAIR;
936                                         bch_info(c, "Halting mark and sweep to start topology repair pass");
937                                         goto fsck_err;
938                                 } else {
939                                         /* Continue marking when opted to not
940                                          * fix the error: */
941                                         ret = 0;
942                                         set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
943                                         continue;
944                                 }
945                         } else if (ret) {
946                                 bch_err(c, "%s: error %i getting btree node",
947                                         __func__, ret);
948                                 break;
949                         }
950
951                         ret = bch2_gc_btree_init_recurse(trans, child,
952                                                          target_depth);
953                         six_unlock_read(&child->c.lock);
954
955                         if (ret)
956                                 break;
957                 }
958         }
959 fsck_err:
960         bch2_bkey_buf_exit(&cur, c);
961         bch2_bkey_buf_exit(&prev, c);
962         bch2_btree_and_journal_iter_exit(&iter);
963         return ret;
964 }
965
966 static int bch2_gc_btree_init(struct btree_trans *trans,
967                               enum btree_id btree_id,
968                               bool metadata_only)
969 {
970         struct bch_fs *c = trans->c;
971         struct btree *b;
972         unsigned target_depth = metadata_only           ? 1
973                 : bch2_expensive_debug_checks           ? 0
974                 : !btree_node_type_needs_gc(btree_id)   ? 1
975                 : 0;
976         u8 max_stale = 0;
977         char buf[100];
978         int ret = 0;
979
980         b = c->btree_roots[btree_id].b;
981
982         if (btree_node_fake(b))
983                 return 0;
984
985         six_lock_read(&b->c.lock, NULL, NULL);
986         if (mustfix_fsck_err_on(bpos_cmp(b->data->min_key, POS_MIN), c,
987                         "btree root with incorrect min_key: %s",
988                         (bch2_bpos_to_text(&PBUF(buf), b->data->min_key), buf))) {
989                 bch_err(c, "repair unimplemented");
990                 ret = FSCK_ERR_EXIT;
991                 goto fsck_err;
992         }
993
994         if (mustfix_fsck_err_on(bpos_cmp(b->data->max_key, SPOS_MAX), c,
995                         "btree root with incorrect max_key: %s",
996                         (bch2_bpos_to_text(&PBUF(buf), b->data->max_key), buf))) {
997                 bch_err(c, "repair unimplemented");
998                 ret = FSCK_ERR_EXIT;
999                 goto fsck_err;
1000         }
1001
1002         if (b->c.level >= target_depth)
1003                 ret = bch2_gc_btree_init_recurse(trans, b, target_depth);
1004
1005         if (!ret) {
1006                 struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1007
1008                 ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, true,
1009                                        &k, &max_stale, true);
1010         }
1011 fsck_err:
1012         six_unlock_read(&b->c.lock);
1013
1014         if (ret < 0)
1015                 bch_err(c, "%s: ret %i", __func__, ret);
1016         return ret;
1017 }
1018
1019 static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
1020 {
1021         return  (int) btree_id_to_gc_phase(l) -
1022                 (int) btree_id_to_gc_phase(r);
1023 }
1024
1025 static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
1026 {
1027         struct btree_trans trans;
1028         enum btree_id ids[BTREE_ID_NR];
1029         unsigned i;
1030         int ret = 0;
1031
1032         bch2_trans_init(&trans, c, 0, 0);
1033
1034         for (i = 0; i < BTREE_ID_NR; i++)
1035                 ids[i] = i;
1036         bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
1037
1038         for (i = 0; i < BTREE_ID_NR && !ret; i++)
1039                 ret = initial
1040                         ? bch2_gc_btree_init(&trans, ids[i], metadata_only)
1041                         : bch2_gc_btree(&trans, ids[i], initial, metadata_only);
1042
1043         if (ret < 0)
1044                 bch_err(c, "%s: ret %i", __func__, ret);
1045
1046         bch2_trans_exit(&trans);
1047         return ret;
1048 }
1049
1050 static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca,
1051                                   u64 start, u64 end,
1052                                   enum bch_data_type type,
1053                                   unsigned flags)
1054 {
1055         u64 b = sector_to_bucket(ca, start);
1056
1057         do {
1058                 unsigned sectors =
1059                         min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
1060
1061                 bch2_mark_metadata_bucket(c, ca, b, type, sectors,
1062                                           gc_phase(GC_PHASE_SB), flags);
1063                 b++;
1064                 start += sectors;
1065         } while (start < end);
1066 }
1067
1068 void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
1069                               unsigned flags)
1070 {
1071         struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
1072         unsigned i;
1073         u64 b;
1074
1075         /*
1076          * This conditional is kind of gross, but we may be called from the
1077          * device add path, before the new device has actually been added to the
1078          * running filesystem:
1079          */
1080         if (c) {
1081                 lockdep_assert_held(&c->sb_lock);
1082                 percpu_down_read(&c->mark_lock);
1083         }
1084
1085         for (i = 0; i < layout->nr_superblocks; i++) {
1086                 u64 offset = le64_to_cpu(layout->sb_offset[i]);
1087
1088                 if (offset == BCH_SB_SECTOR)
1089                         mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
1090                                               BCH_DATA_sb, flags);
1091
1092                 mark_metadata_sectors(c, ca, offset,
1093                                       offset + (1 << layout->sb_max_size_bits),
1094                                       BCH_DATA_sb, flags);
1095         }
1096
1097         for (i = 0; i < ca->journal.nr; i++) {
1098                 b = ca->journal.buckets[i];
1099                 bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal,
1100                                           ca->mi.bucket_size,
1101                                           gc_phase(GC_PHASE_SB), flags);
1102         }
1103
1104         if (c)
1105                 percpu_up_read(&c->mark_lock);
1106 }
1107
1108 static void bch2_mark_superblocks(struct bch_fs *c)
1109 {
1110         struct bch_dev *ca;
1111         unsigned i;
1112
1113         mutex_lock(&c->sb_lock);
1114         gc_pos_set(c, gc_phase(GC_PHASE_SB));
1115
1116         for_each_online_member(ca, c, i)
1117                 bch2_mark_dev_superblock(c, ca, BTREE_TRIGGER_GC);
1118         mutex_unlock(&c->sb_lock);
1119 }
1120
1121 #if 0
1122 /* Also see bch2_pending_btree_node_free_insert_done() */
1123 static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
1124 {
1125         struct btree_update *as;
1126         struct pending_btree_node_free *d;
1127
1128         mutex_lock(&c->btree_interior_update_lock);
1129         gc_pos_set(c, gc_phase(GC_PHASE_PENDING_DELETE));
1130
1131         for_each_pending_btree_node_free(c, as, d)
1132                 if (d->index_update_done)
1133                         bch2_mark_key(c, bkey_i_to_s_c(&d->key), BTREE_TRIGGER_GC);
1134
1135         mutex_unlock(&c->btree_interior_update_lock);
1136 }
1137 #endif
1138
1139 static void bch2_gc_free(struct bch_fs *c)
1140 {
1141         struct bch_dev *ca;
1142         unsigned i;
1143
1144         genradix_free(&c->stripes[1]);
1145
1146         for_each_member_device(ca, c, i) {
1147                 kvpfree(rcu_dereference_protected(ca->buckets[1], 1),
1148                         sizeof(struct bucket_array) +
1149                         ca->mi.nbuckets * sizeof(struct bucket));
1150                 ca->buckets[1] = NULL;
1151
1152                 free_percpu(ca->usage_gc);
1153                 ca->usage_gc = NULL;
1154         }
1155
1156         free_percpu(c->usage_gc);
1157         c->usage_gc = NULL;
1158 }
1159
1160 static int bch2_gc_done(struct bch_fs *c,
1161                         bool initial, bool metadata_only)
1162 {
1163         struct bch_dev *ca = NULL;
1164         bool verify = !metadata_only && (!initial ||
1165                        (c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)));
1166         unsigned i, dev;
1167         int ret = 0;
1168
1169 #define copy_field(_f, _msg, ...)                                       \
1170         if (dst->_f != src->_f) {                                       \
1171                 if (verify)                                             \
1172                         fsck_err(c, _msg ": got %llu, should be %llu"   \
1173                                 , ##__VA_ARGS__, dst->_f, src->_f);     \
1174                 dst->_f = src->_f;                                      \
1175                 set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags);            \
1176         }
1177 #define copy_stripe_field(_f, _msg, ...)                                \
1178         if (dst->_f != src->_f) {                                       \
1179                 if (verify)                                             \
1180                         fsck_err(c, "stripe %zu has wrong "_msg         \
1181                                 ": got %u, should be %u",               \
1182                                 iter.pos, ##__VA_ARGS__,                \
1183                                 dst->_f, src->_f);                      \
1184                 dst->_f = src->_f;                                      \
1185                 set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags);            \
1186         }
1187 #define copy_bucket_field(_f)                                           \
1188         if (dst->b[b].mark._f != src->b[b].mark._f) {                   \
1189                 if (verify)                                             \
1190                         fsck_err(c, "bucket %u:%zu gen %u data type %s has wrong " #_f  \
1191                                 ": got %u, should be %u", dev, b,       \
1192                                 dst->b[b].mark.gen,                     \
1193                                 bch2_data_types[dst->b[b].mark.data_type],\
1194                                 dst->b[b].mark._f, src->b[b].mark._f);  \
1195                 dst->b[b]._mark._f = src->b[b].mark._f;                 \
1196                 set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags);            \
1197         }
1198 #define copy_dev_field(_f, _msg, ...)                                   \
1199         copy_field(_f, "dev %u has wrong " _msg, dev, ##__VA_ARGS__)
1200 #define copy_fs_field(_f, _msg, ...)                                    \
1201         copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__)
1202
1203         if (!metadata_only) {
1204                 struct genradix_iter iter = genradix_iter_init(&c->stripes[1], 0);
1205                 struct stripe *dst, *src;
1206
1207                 while ((src = genradix_iter_peek(&iter, &c->stripes[1]))) {
1208                         dst = genradix_ptr_alloc(&c->stripes[0], iter.pos, GFP_KERNEL);
1209
1210                         if (dst->alive          != src->alive ||
1211                             dst->sectors        != src->sectors ||
1212                             dst->algorithm      != src->algorithm ||
1213                             dst->nr_blocks      != src->nr_blocks ||
1214                             dst->nr_redundant   != src->nr_redundant) {
1215                                 bch_err(c, "unexpected stripe inconsistency at bch2_gc_done, confused");
1216                                 ret = -EINVAL;
1217                                 goto fsck_err;
1218                         }
1219
1220                         for (i = 0; i < ARRAY_SIZE(dst->block_sectors); i++)
1221                                 copy_stripe_field(block_sectors[i],
1222                                                   "block_sectors[%u]", i);
1223
1224                         dst->blocks_nonempty = 0;
1225                         for (i = 0; i < dst->nr_blocks; i++)
1226                                 dst->blocks_nonempty += dst->block_sectors[i] != 0;
1227
1228                         genradix_iter_advance(&iter, &c->stripes[1]);
1229                 }
1230         }
1231
1232         for (i = 0; i < ARRAY_SIZE(c->usage); i++)
1233                 bch2_fs_usage_acc_to_base(c, i);
1234
1235         for_each_member_device(ca, c, dev) {
1236                 struct bucket_array *dst = __bucket_array(ca, 0);
1237                 struct bucket_array *src = __bucket_array(ca, 1);
1238                 size_t b;
1239
1240                 for (b = 0; b < src->nbuckets; b++) {
1241                         copy_bucket_field(gen);
1242                         copy_bucket_field(data_type);
1243                         copy_bucket_field(stripe);
1244                         copy_bucket_field(dirty_sectors);
1245                         copy_bucket_field(cached_sectors);
1246
1247                         dst->b[b].oldest_gen = src->b[b].oldest_gen;
1248                 }
1249
1250                 {
1251                         struct bch_dev_usage *dst = ca->usage_base;
1252                         struct bch_dev_usage *src = (void *)
1253                                 bch2_acc_percpu_u64s((void *) ca->usage_gc,
1254                                                      dev_usage_u64s());
1255
1256                         copy_dev_field(buckets_ec,              "buckets_ec");
1257                         copy_dev_field(buckets_unavailable,     "buckets_unavailable");
1258
1259                         for (i = 0; i < BCH_DATA_NR; i++) {
1260                                 copy_dev_field(d[i].buckets,    "%s buckets", bch2_data_types[i]);
1261                                 copy_dev_field(d[i].sectors,    "%s sectors", bch2_data_types[i]);
1262                                 copy_dev_field(d[i].fragmented, "%s fragmented", bch2_data_types[i]);
1263                         }
1264                 }
1265         };
1266
1267         {
1268                 unsigned nr = fs_usage_u64s(c);
1269                 struct bch_fs_usage *dst = c->usage_base;
1270                 struct bch_fs_usage *src = (void *)
1271                         bch2_acc_percpu_u64s((void *) c->usage_gc, nr);
1272
1273                 copy_fs_field(hidden,           "hidden");
1274                 copy_fs_field(btree,            "btree");
1275
1276                 if (!metadata_only) {
1277                         copy_fs_field(data,     "data");
1278                         copy_fs_field(cached,   "cached");
1279                         copy_fs_field(reserved, "reserved");
1280                         copy_fs_field(nr_inodes,"nr_inodes");
1281
1282                         for (i = 0; i < BCH_REPLICAS_MAX; i++)
1283                                 copy_fs_field(persistent_reserved[i],
1284                                               "persistent_reserved[%i]", i);
1285                 }
1286
1287                 for (i = 0; i < c->replicas.nr; i++) {
1288                         struct bch_replicas_entry *e =
1289                                 cpu_replicas_entry(&c->replicas, i);
1290                         char buf[80];
1291
1292                         if (metadata_only &&
1293                             (e->data_type == BCH_DATA_user ||
1294                              e->data_type == BCH_DATA_cached))
1295                                 continue;
1296
1297                         bch2_replicas_entry_to_text(&PBUF(buf), e);
1298
1299                         copy_fs_field(replicas[i], "%s", buf);
1300                 }
1301         }
1302
1303 #undef copy_fs_field
1304 #undef copy_dev_field
1305 #undef copy_bucket_field
1306 #undef copy_stripe_field
1307 #undef copy_field
1308 fsck_err:
1309         if (ca)
1310                 percpu_ref_put(&ca->ref);
1311         if (ret)
1312                 bch_err(c, "%s: ret %i", __func__, ret);
1313         return ret;
1314 }
1315
1316 static int bch2_gc_start(struct bch_fs *c,
1317                          bool metadata_only)
1318 {
1319         struct bch_dev *ca = NULL;
1320         unsigned i;
1321         int ret;
1322
1323         BUG_ON(c->usage_gc);
1324
1325         c->usage_gc = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64),
1326                                          sizeof(u64), GFP_KERNEL);
1327         if (!c->usage_gc) {
1328                 bch_err(c, "error allocating c->usage_gc");
1329                 return -ENOMEM;
1330         }
1331
1332         for_each_member_device(ca, c, i) {
1333                 BUG_ON(ca->buckets[1]);
1334                 BUG_ON(ca->usage_gc);
1335
1336                 ca->buckets[1] = kvpmalloc(sizeof(struct bucket_array) +
1337                                 ca->mi.nbuckets * sizeof(struct bucket),
1338                                 GFP_KERNEL|__GFP_ZERO);
1339                 if (!ca->buckets[1]) {
1340                         percpu_ref_put(&ca->ref);
1341                         bch_err(c, "error allocating ca->buckets[gc]");
1342                         return -ENOMEM;
1343                 }
1344
1345                 ca->usage_gc = alloc_percpu(struct bch_dev_usage);
1346                 if (!ca->usage_gc) {
1347                         bch_err(c, "error allocating ca->usage_gc");
1348                         percpu_ref_put(&ca->ref);
1349                         return -ENOMEM;
1350                 }
1351         }
1352
1353         ret = bch2_ec_mem_alloc(c, true);
1354         if (ret) {
1355                 bch_err(c, "error allocating ec gc mem");
1356                 return ret;
1357         }
1358
1359         percpu_down_write(&c->mark_lock);
1360
1361         /*
1362          * indicate to stripe code that we need to allocate for the gc stripes
1363          * radix tree, too
1364          */
1365         gc_pos_set(c, gc_phase(GC_PHASE_START));
1366
1367         for_each_member_device(ca, c, i) {
1368                 struct bucket_array *dst = __bucket_array(ca, 1);
1369                 struct bucket_array *src = __bucket_array(ca, 0);
1370                 size_t b;
1371
1372                 dst->first_bucket       = src->first_bucket;
1373                 dst->nbuckets           = src->nbuckets;
1374
1375                 for (b = 0; b < src->nbuckets; b++) {
1376                         struct bucket *d = &dst->b[b];
1377                         struct bucket *s = &src->b[b];
1378
1379                         d->_mark.gen = dst->b[b].oldest_gen = s->mark.gen;
1380                         d->gen_valid = s->gen_valid;
1381
1382                         if (metadata_only &&
1383                             (s->mark.data_type == BCH_DATA_user ||
1384                              s->mark.data_type == BCH_DATA_cached))
1385                                 d->_mark = s->mark;
1386                 }
1387         };
1388
1389         percpu_up_write(&c->mark_lock);
1390
1391         return 0;
1392 }
1393
1394 static int bch2_gc_reflink_done_initial_fn(struct btree_trans *trans,
1395                                            struct bkey_s_c k)
1396 {
1397         struct bch_fs *c = trans->c;
1398         struct reflink_gc *r;
1399         const __le64 *refcount = bkey_refcount_c(k);
1400         char buf[200];
1401         int ret = 0;
1402
1403         if (!refcount)
1404                 return 0;
1405
1406         r = genradix_ptr(&c->reflink_gc_table, c->reflink_gc_idx++);
1407         if (!r)
1408                 return -ENOMEM;
1409
1410         if (!r ||
1411             r->offset != k.k->p.offset ||
1412             r->size != k.k->size) {
1413                 bch_err(c, "unexpected inconsistency walking reflink table at gc finish");
1414                 return -EINVAL;
1415         }
1416
1417         if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c,
1418                         "reflink key has wrong refcount:\n"
1419                         "  %s\n"
1420                         "  should be %u",
1421                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf),
1422                         r->refcount)) {
1423                 struct bkey_i *new;
1424
1425                 new = kmalloc(bkey_bytes(k.k), GFP_KERNEL);
1426                 if (!new) {
1427                         ret = -ENOMEM;
1428                         goto fsck_err;
1429                 }
1430
1431                 bkey_reassemble(new, k);
1432
1433                 if (!r->refcount) {
1434                         new->k.type = KEY_TYPE_deleted;
1435                         new->k.size = 0;
1436                 } else {
1437                         *bkey_refcount(new) = cpu_to_le64(r->refcount);
1438                 }
1439
1440                 ret = bch2_journal_key_insert(c, BTREE_ID_reflink, 0, new);
1441                 if (ret)
1442                         kfree(new);
1443         }
1444 fsck_err:
1445         return ret;
1446 }
1447
1448 static int bch2_gc_reflink_done(struct bch_fs *c, bool initial,
1449                                 bool metadata_only)
1450 {
1451         struct btree_trans trans;
1452         struct btree_iter iter;
1453         struct bkey_s_c k;
1454         struct reflink_gc *r;
1455         size_t idx = 0;
1456         char buf[200];
1457         int ret = 0;
1458
1459         if (metadata_only)
1460                 return 0;
1461
1462         bch2_trans_init(&trans, c, 0, 0);
1463
1464         if (initial) {
1465                 c->reflink_gc_idx = 0;
1466
1467                 ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_reflink,
1468                                 bch2_gc_reflink_done_initial_fn);
1469                 goto out;
1470         }
1471
1472         for_each_btree_key(&trans, iter, BTREE_ID_reflink, POS_MIN,
1473                            BTREE_ITER_PREFETCH, k, ret) {
1474                 const __le64 *refcount = bkey_refcount_c(k);
1475
1476                 if (!refcount)
1477                         continue;
1478
1479                 r = genradix_ptr(&c->reflink_gc_table, idx);
1480                 if (!r ||
1481                     r->offset != k.k->p.offset ||
1482                     r->size != k.k->size) {
1483                         bch_err(c, "unexpected inconsistency walking reflink table at gc finish");
1484                         ret = -EINVAL;
1485                         break;
1486                 }
1487
1488                 if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c,
1489                                 "reflink key has wrong refcount:\n"
1490                                 "  %s\n"
1491                                 "  should be %u",
1492                                 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf),
1493                                 r->refcount)) {
1494                         struct bkey_i *new;
1495
1496                         new = kmalloc(bkey_bytes(k.k), GFP_KERNEL);
1497                         if (!new) {
1498                                 ret = -ENOMEM;
1499                                 break;
1500                         }
1501
1502                         bkey_reassemble(new, k);
1503
1504                         if (!r->refcount)
1505                                 new->k.type = KEY_TYPE_deleted;
1506                         else
1507                                 *bkey_refcount(new) = cpu_to_le64(r->refcount);
1508
1509                         ret = __bch2_trans_do(&trans, NULL, NULL, 0,
1510                                         __bch2_btree_insert(&trans, BTREE_ID_reflink, new));
1511                         kfree(new);
1512
1513                         if (ret)
1514                                 break;
1515                 }
1516         }
1517 fsck_err:
1518         bch2_trans_iter_exit(&trans, &iter);
1519 out:
1520         genradix_free(&c->reflink_gc_table);
1521         c->reflink_gc_nr = 0;
1522         bch2_trans_exit(&trans);
1523         return ret;
1524 }
1525
1526 static int bch2_gc_reflink_start_initial_fn(struct btree_trans *trans,
1527                                             struct bkey_s_c k)
1528 {
1529
1530         struct bch_fs *c = trans->c;
1531         struct reflink_gc *r;
1532         const __le64 *refcount = bkey_refcount_c(k);
1533
1534         if (!refcount)
1535                 return 0;
1536
1537         r = genradix_ptr_alloc(&c->reflink_gc_table, c->reflink_gc_nr++,
1538                                GFP_KERNEL);
1539         if (!r)
1540                 return -ENOMEM;
1541
1542         r->offset       = k.k->p.offset;
1543         r->size         = k.k->size;
1544         r->refcount     = 0;
1545         return 0;
1546 }
1547
1548 static int bch2_gc_reflink_start(struct bch_fs *c, bool initial,
1549                                  bool metadata_only)
1550 {
1551         struct btree_trans trans;
1552         struct btree_iter iter;
1553         struct bkey_s_c k;
1554         struct reflink_gc *r;
1555         int ret = 0;
1556
1557         if (metadata_only)
1558                 return 0;
1559
1560         bch2_trans_init(&trans, c, 0, 0);
1561         genradix_free(&c->reflink_gc_table);
1562         c->reflink_gc_nr = 0;
1563
1564         if (initial) {
1565                 ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_reflink,
1566                                                 bch2_gc_reflink_start_initial_fn);
1567                 goto out;
1568         }
1569
1570         for_each_btree_key(&trans, iter, BTREE_ID_reflink, POS_MIN,
1571                            BTREE_ITER_PREFETCH, k, ret) {
1572                 const __le64 *refcount = bkey_refcount_c(k);
1573
1574                 if (!refcount)
1575                         continue;
1576
1577                 r = genradix_ptr_alloc(&c->reflink_gc_table, c->reflink_gc_nr++,
1578                                        GFP_KERNEL);
1579                 if (!r) {
1580                         ret = -ENOMEM;
1581                         break;
1582                 }
1583
1584                 r->offset       = k.k->p.offset;
1585                 r->size         = k.k->size;
1586                 r->refcount     = 0;
1587         }
1588         bch2_trans_iter_exit(&trans, &iter);
1589 out:
1590         bch2_trans_exit(&trans);
1591         return ret;
1592 }
1593
1594 /**
1595  * bch2_gc - walk _all_ references to buckets, and recompute them:
1596  *
1597  * Order matters here:
1598  *  - Concurrent GC relies on the fact that we have a total ordering for
1599  *    everything that GC walks - see  gc_will_visit_node(),
1600  *    gc_will_visit_root()
1601  *
1602  *  - also, references move around in the course of index updates and
1603  *    various other crap: everything needs to agree on the ordering
1604  *    references are allowed to move around in - e.g., we're allowed to
1605  *    start with a reference owned by an open_bucket (the allocator) and
1606  *    move it to the btree, but not the reverse.
1607  *
1608  *    This is necessary to ensure that gc doesn't miss references that
1609  *    move around - if references move backwards in the ordering GC
1610  *    uses, GC could skip past them
1611  */
1612 int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
1613 {
1614         struct bch_dev *ca;
1615         u64 start_time = local_clock();
1616         unsigned i, iter = 0;
1617         int ret;
1618
1619         lockdep_assert_held(&c->state_lock);
1620         trace_gc_start(c);
1621
1622         down_write(&c->gc_lock);
1623
1624         /* flush interior btree updates: */
1625         closure_wait_event(&c->btree_interior_update_wait,
1626                            !bch2_btree_interior_updates_nr_pending(c));
1627 again:
1628         ret   = bch2_gc_start(c, metadata_only) ?:
1629                 bch2_gc_reflink_start(c, initial, metadata_only);
1630         if (ret)
1631                 goto out;
1632
1633         bch2_mark_superblocks(c);
1634
1635         if (test_bit(BCH_FS_TOPOLOGY_ERROR, &c->flags) &&
1636             !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags) &&
1637             c->opts.fix_errors != FSCK_OPT_NO) {
1638                 bch_info(c, "starting topology repair pass");
1639                 ret = bch2_repair_topology(c);
1640                 if (ret)
1641                         goto out;
1642                 bch_info(c, "topology repair pass done");
1643
1644                 set_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags);
1645         }
1646
1647         ret = bch2_gc_btrees(c, initial, metadata_only);
1648
1649         if (ret == FSCK_ERR_START_TOPOLOGY_REPAIR &&
1650             !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags) &&
1651             !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) {
1652                 set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
1653                 ret = 0;
1654         }
1655
1656         if (ret == FSCK_ERR_START_TOPOLOGY_REPAIR)
1657                 ret = FSCK_ERR_EXIT;
1658
1659         if (ret)
1660                 goto out;
1661
1662 #if 0
1663         bch2_mark_pending_btree_node_frees(c);
1664 #endif
1665         c->gc_count++;
1666
1667         if (test_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags) ||
1668             (!iter && bch2_test_restart_gc)) {
1669                 /*
1670                  * XXX: make sure gens we fixed got saved
1671                  */
1672                 if (iter++ <= 2) {
1673                         bch_info(c, "Second GC pass needed, restarting:");
1674                         clear_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
1675                         __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
1676
1677                         percpu_down_write(&c->mark_lock);
1678                         bch2_gc_free(c);
1679                         percpu_up_write(&c->mark_lock);
1680                         /* flush fsck errors, reset counters */
1681                         bch2_flush_fsck_errs(c);
1682
1683                         goto again;
1684                 }
1685
1686                 bch_info(c, "Unable to fix bucket gens, looping");
1687                 ret = -EINVAL;
1688         }
1689 out:
1690         if (!ret) {
1691                 bch2_journal_block(&c->journal);
1692
1693                 percpu_down_write(&c->mark_lock);
1694                 ret   = bch2_gc_reflink_done(c, initial, metadata_only) ?:
1695                         bch2_gc_done(c, initial, metadata_only);
1696
1697                 bch2_journal_unblock(&c->journal);
1698         } else {
1699                 percpu_down_write(&c->mark_lock);
1700         }
1701
1702         /* Indicates that gc is no longer in progress: */
1703         __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
1704
1705         bch2_gc_free(c);
1706         percpu_up_write(&c->mark_lock);
1707
1708         up_write(&c->gc_lock);
1709
1710         trace_gc_end(c);
1711         bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
1712
1713         /*
1714          * Wake up allocator in case it was waiting for buckets
1715          * because of not being able to inc gens
1716          */
1717         for_each_member_device(ca, c, i)
1718                 bch2_wake_allocator(ca);
1719
1720         /*
1721          * At startup, allocations can happen directly instead of via the
1722          * allocator thread - issue wakeup in case they blocked on gc_lock:
1723          */
1724         closure_wake_up(&c->freelist_wait);
1725         return ret;
1726 }
1727
1728 static bool gc_btree_gens_key(struct bch_fs *c, struct bkey_s_c k)
1729 {
1730         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1731         const struct bch_extent_ptr *ptr;
1732
1733         percpu_down_read(&c->mark_lock);
1734         bkey_for_each_ptr(ptrs, ptr) {
1735                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1736                 struct bucket *g = PTR_BUCKET(ca, ptr, false);
1737
1738                 if (gen_after(g->mark.gen, ptr->gen) > 16) {
1739                         percpu_up_read(&c->mark_lock);
1740                         return true;
1741                 }
1742         }
1743
1744         bkey_for_each_ptr(ptrs, ptr) {
1745                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1746                 struct bucket *g = PTR_BUCKET(ca, ptr, false);
1747
1748                 if (gen_after(g->gc_gen, ptr->gen))
1749                         g->gc_gen = ptr->gen;
1750         }
1751         percpu_up_read(&c->mark_lock);
1752
1753         return false;
1754 }
1755
1756 /*
1757  * For recalculating oldest gen, we only need to walk keys in leaf nodes; btree
1758  * node pointers currently never have cached pointers that can become stale:
1759  */
1760 static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id)
1761 {
1762         struct btree_trans trans;
1763         struct btree_iter iter;
1764         struct bkey_s_c k;
1765         struct bkey_buf sk;
1766         int ret = 0, commit_err = 0;
1767
1768         bch2_bkey_buf_init(&sk);
1769         bch2_trans_init(&trans, c, 0, 0);
1770
1771         bch2_trans_iter_init(&trans, &iter, btree_id, POS_MIN,
1772                              BTREE_ITER_PREFETCH|
1773                              BTREE_ITER_NOT_EXTENTS|
1774                              BTREE_ITER_ALL_SNAPSHOTS);
1775
1776         while ((bch2_trans_begin(&trans),
1777                 k = bch2_btree_iter_peek(&iter)).k) {
1778                 ret = bkey_err(k);
1779
1780                 if (ret == -EINTR)
1781                         continue;
1782                 if (ret)
1783                         break;
1784
1785                 c->gc_gens_pos = iter.pos;
1786
1787                 if (gc_btree_gens_key(c, k) && !commit_err) {
1788                         bch2_bkey_buf_reassemble(&sk, c, k);
1789                         bch2_extent_normalize(c, bkey_i_to_s(sk.k));
1790
1791                         commit_err =
1792                                 bch2_trans_update(&trans, &iter, sk.k, 0) ?:
1793                                 bch2_trans_commit(&trans, NULL, NULL,
1794                                                        BTREE_INSERT_NOWAIT|
1795                                                        BTREE_INSERT_NOFAIL);
1796                         if (commit_err == -EINTR) {
1797                                 commit_err = 0;
1798                                 continue;
1799                         }
1800                 }
1801
1802                 bch2_btree_iter_advance(&iter);
1803         }
1804         bch2_trans_iter_exit(&trans, &iter);
1805
1806         bch2_trans_exit(&trans);
1807         bch2_bkey_buf_exit(&sk, c);
1808
1809         return ret;
1810 }
1811
1812 int bch2_gc_gens(struct bch_fs *c)
1813 {
1814         struct bch_dev *ca;
1815         struct bucket_array *buckets;
1816         struct bucket *g;
1817         unsigned i;
1818         int ret;
1819
1820         /*
1821          * Ideally we would be using state_lock and not gc_lock here, but that
1822          * introduces a deadlock in the RO path - we currently take the state
1823          * lock at the start of going RO, thus the gc thread may get stuck:
1824          */
1825         down_read(&c->gc_lock);
1826
1827         for_each_member_device(ca, c, i) {
1828                 down_read(&ca->bucket_lock);
1829                 buckets = bucket_array(ca);
1830
1831                 for_each_bucket(g, buckets)
1832                         g->gc_gen = g->mark.gen;
1833                 up_read(&ca->bucket_lock);
1834         }
1835
1836         for (i = 0; i < BTREE_ID_NR; i++)
1837                 if ((1 << i) & BTREE_ID_HAS_PTRS) {
1838                         c->gc_gens_btree = i;
1839                         c->gc_gens_pos = POS_MIN;
1840                         ret = bch2_gc_btree_gens(c, i);
1841                         if (ret) {
1842                                 bch_err(c, "error recalculating oldest_gen: %i", ret);
1843                                 goto err;
1844                         }
1845                 }
1846
1847         for_each_member_device(ca, c, i) {
1848                 down_read(&ca->bucket_lock);
1849                 buckets = bucket_array(ca);
1850
1851                 for_each_bucket(g, buckets)
1852                         g->oldest_gen = g->gc_gen;
1853                 up_read(&ca->bucket_lock);
1854         }
1855
1856         c->gc_gens_btree        = 0;
1857         c->gc_gens_pos          = POS_MIN;
1858
1859         c->gc_count++;
1860 err:
1861         up_read(&c->gc_lock);
1862         return ret;
1863 }
1864
1865 static int bch2_gc_thread(void *arg)
1866 {
1867         struct bch_fs *c = arg;
1868         struct io_clock *clock = &c->io_clock[WRITE];
1869         unsigned long last = atomic64_read(&clock->now);
1870         unsigned last_kick = atomic_read(&c->kick_gc);
1871         int ret;
1872
1873         set_freezable();
1874
1875         while (1) {
1876                 while (1) {
1877                         set_current_state(TASK_INTERRUPTIBLE);
1878
1879                         if (kthread_should_stop()) {
1880                                 __set_current_state(TASK_RUNNING);
1881                                 return 0;
1882                         }
1883
1884                         if (atomic_read(&c->kick_gc) != last_kick)
1885                                 break;
1886
1887                         if (c->btree_gc_periodic) {
1888                                 unsigned long next = last + c->capacity / 16;
1889
1890                                 if (atomic64_read(&clock->now) >= next)
1891                                         break;
1892
1893                                 bch2_io_clock_schedule_timeout(clock, next);
1894                         } else {
1895                                 schedule();
1896                         }
1897
1898                         try_to_freeze();
1899                 }
1900                 __set_current_state(TASK_RUNNING);
1901
1902                 last = atomic64_read(&clock->now);
1903                 last_kick = atomic_read(&c->kick_gc);
1904
1905                 /*
1906                  * Full gc is currently incompatible with btree key cache:
1907                  */
1908 #if 0
1909                 ret = bch2_gc(c, false, false);
1910 #else
1911                 ret = bch2_gc_gens(c);
1912 #endif
1913                 if (ret < 0)
1914                         bch_err(c, "btree gc failed: %i", ret);
1915
1916                 debug_check_no_locks_held();
1917         }
1918
1919         return 0;
1920 }
1921
1922 void bch2_gc_thread_stop(struct bch_fs *c)
1923 {
1924         struct task_struct *p;
1925
1926         p = c->gc_thread;
1927         c->gc_thread = NULL;
1928
1929         if (p) {
1930                 kthread_stop(p);
1931                 put_task_struct(p);
1932         }
1933 }
1934
1935 int bch2_gc_thread_start(struct bch_fs *c)
1936 {
1937         struct task_struct *p;
1938
1939         if (c->gc_thread)
1940                 return 0;
1941
1942         p = kthread_create(bch2_gc_thread, c, "bch-gc/%s", c->name);
1943         if (IS_ERR(p)) {
1944                 bch_err(c, "error creating gc thread: %li", PTR_ERR(p));
1945                 return PTR_ERR(p);
1946         }
1947
1948         get_task_struct(p);
1949         c->gc_thread = p;
1950         wake_up_process(p);
1951         return 0;
1952 }