]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/backpointers.c
Update bcachefs sources to 8fd009dd76 bcachefs: Rip out code for storing backpointers...
[bcachefs-tools-debian] / libbcachefs / backpointers.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "bbpos.h"
4 #include "alloc_background.h"
5 #include "backpointers.h"
6 #include "btree_cache.h"
7 #include "btree_update.h"
8 #include "btree_write_buffer.h"
9 #include "error.h"
10
11 #include <linux/mm.h>
12
13 static bool extent_matches_bp(struct bch_fs *c,
14                               enum btree_id btree_id, unsigned level,
15                               struct bkey_s_c k,
16                               struct bpos bucket,
17                               struct bch_backpointer bp)
18 {
19         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
20         const union bch_extent_entry *entry;
21         struct extent_ptr_decoded p;
22
23         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
24                 struct bpos bucket2;
25                 struct bch_backpointer bp2;
26
27                 if (p.ptr.cached)
28                         continue;
29
30                 bch2_extent_ptr_to_bp(c, btree_id, level, k, p,
31                                       &bucket2, &bp2);
32                 if (bpos_eq(bucket, bucket2) &&
33                     !memcmp(&bp, &bp2, sizeof(bp)))
34                         return true;
35         }
36
37         return false;
38 }
39
40 int bch2_backpointer_invalid(const struct bch_fs *c, struct bkey_s_c k,
41                              unsigned flags, struct printbuf *err)
42 {
43         struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
44         struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
45
46         if (bkey_val_bytes(bp.k) < sizeof(*bp.v)) {
47                 prt_str(err, "incorrect value size");
48                 return -BCH_ERR_invalid_bkey;
49         }
50
51         if (!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) {
52                 prt_str(err, "backpointer at wrong pos");
53                 return -BCH_ERR_invalid_bkey;
54         }
55
56         return 0;
57 }
58
59 void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp)
60 {
61         prt_printf(out, "btree=%s l=%u offset=%llu:%u len=%u pos=",
62                bch2_btree_ids[bp->btree_id],
63                bp->level,
64                (u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT),
65                (u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
66                bp->bucket_len);
67         bch2_bpos_to_text(out, bp->pos);
68 }
69
70 void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
71 {
72         prt_str(out, "bucket=");
73         bch2_bpos_to_text(out, bp_pos_to_bucket(c, k.k->p));
74         prt_str(out, " ");
75
76         bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v);
77 }
78
79 void bch2_backpointer_swab(struct bkey_s k)
80 {
81         struct bkey_s_backpointer bp = bkey_s_to_backpointer(k);
82
83         bp.v->bucket_offset     = swab32(bp.v->bucket_offset);
84         bp.v->bucket_len        = swab32(bp.v->bucket_len);
85         bch2_bpos_swab(&bp.v->pos);
86 }
87
88 static noinline int backpointer_mod_err(struct btree_trans *trans,
89                                         struct bch_backpointer bp,
90                                         struct bkey_s_c bp_k,
91                                         struct bkey_s_c orig_k,
92                                         bool insert)
93 {
94         struct bch_fs *c = trans->c;
95         struct printbuf buf = PRINTBUF;
96
97         if (insert) {
98                 prt_printf(&buf, "existing backpointer found when inserting ");
99                 bch2_backpointer_to_text(&buf, &bp);
100                 prt_newline(&buf);
101                 printbuf_indent_add(&buf, 2);
102
103                 prt_printf(&buf, "found ");
104                 bch2_bkey_val_to_text(&buf, c, bp_k);
105                 prt_newline(&buf);
106
107                 prt_printf(&buf, "for ");
108                 bch2_bkey_val_to_text(&buf, c, orig_k);
109
110                 bch_err(c, "%s", buf.buf);
111         } else if (test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
112                 prt_printf(&buf, "backpointer not found when deleting");
113                 prt_newline(&buf);
114                 printbuf_indent_add(&buf, 2);
115
116                 prt_printf(&buf, "searching for ");
117                 bch2_backpointer_to_text(&buf, &bp);
118                 prt_newline(&buf);
119
120                 prt_printf(&buf, "got ");
121                 bch2_bkey_val_to_text(&buf, c, bp_k);
122                 prt_newline(&buf);
123
124                 prt_printf(&buf, "for ");
125                 bch2_bkey_val_to_text(&buf, c, orig_k);
126
127                 bch_err(c, "%s", buf.buf);
128         }
129
130         printbuf_exit(&buf);
131
132         if (test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
133                 bch2_inconsistent_error(c);
134                 return -EIO;
135         } else {
136                 return 0;
137         }
138 }
139
140 int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans,
141                                 struct bpos bucket,
142                                 struct bch_backpointer bp,
143                                 struct bkey_s_c orig_k,
144                                 bool insert)
145 {
146         struct bch_fs *c = trans->c;
147         struct bkey_i_backpointer *bp_k;
148         struct btree_iter bp_iter;
149         struct bkey_s_c k;
150         int ret;
151
152         bp_k = bch2_trans_kmalloc_nomemzero(trans, sizeof(struct bkey_i_backpointer));
153         ret = PTR_ERR_OR_ZERO(bp_k);
154         if (ret)
155                 return ret;
156
157         bkey_backpointer_init(&bp_k->k_i);
158         bp_k->k.p = bucket_pos_to_bp(c, bucket, bp.bucket_offset);
159         bp_k->v = bp;
160
161         if (!insert) {
162                 bp_k->k.type = KEY_TYPE_deleted;
163                 set_bkey_val_u64s(&bp_k->k, 0);
164         }
165
166         bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
167                              bp_k->k.p,
168                              BTREE_ITER_INTENT|
169                              BTREE_ITER_SLOTS|
170                              BTREE_ITER_WITH_UPDATES);
171         k = bch2_btree_iter_peek_slot(&bp_iter);
172         ret = bkey_err(k);
173         if (ret)
174                 goto err;
175
176         if (insert
177             ? k.k->type
178             : (k.k->type != KEY_TYPE_backpointer ||
179                memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp)))) {
180                 ret = backpointer_mod_err(trans, bp, k, orig_k, insert);
181                 if (ret)
182                         goto err;
183         }
184
185         ret = bch2_trans_update(trans, &bp_iter, &bp_k->k_i, 0);
186 err:
187         bch2_trans_iter_exit(trans, &bp_iter);
188         return ret;
189 }
190
191 /*
192  * Find the next backpointer >= *bp_offset:
193  */
194 int bch2_get_next_backpointer(struct btree_trans *trans,
195                               struct bpos bucket, int gen,
196                               struct bpos *bp_pos,
197                               struct bch_backpointer *bp,
198                               unsigned iter_flags)
199 {
200         struct bch_fs *c = trans->c;
201         struct bpos bp_end_pos = bucket_pos_to_bp(c, bpos_nosnap_successor(bucket), 0);
202         struct btree_iter alloc_iter = { NULL }, bp_iter = { NULL };
203         struct bkey_s_c k;
204         int ret = 0;
205
206         if (bpos_ge(*bp_pos, bp_end_pos))
207                 goto done;
208
209         if (gen >= 0) {
210                 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
211                                      bucket, BTREE_ITER_CACHED|iter_flags);
212                 k = bch2_btree_iter_peek_slot(&alloc_iter);
213                 ret = bkey_err(k);
214                 if (ret)
215                         goto out;
216
217                 if (k.k->type != KEY_TYPE_alloc_v4 ||
218                     bkey_s_c_to_alloc_v4(k).v->gen != gen)
219                         goto done;
220         }
221
222         *bp_pos = bpos_max(*bp_pos, bucket_pos_to_bp(c, bucket, 0));
223
224         for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers,
225                                      *bp_pos, iter_flags, k, ret) {
226                 if (bpos_ge(k.k->p, bp_end_pos))
227                         break;
228
229                 *bp_pos = k.k->p;
230                 *bp = *bkey_s_c_to_backpointer(k).v;
231                 goto out;
232         }
233 done:
234         *bp_pos = SPOS_MAX;
235 out:
236         bch2_trans_iter_exit(trans, &bp_iter);
237         bch2_trans_iter_exit(trans, &alloc_iter);
238         return ret;
239 }
240
241 static void backpointer_not_found(struct btree_trans *trans,
242                                   struct bpos bp_pos,
243                                   struct bch_backpointer bp,
244                                   struct bkey_s_c k,
245                                   const char *thing_it_points_to)
246 {
247         struct bch_fs *c = trans->c;
248         struct printbuf buf = PRINTBUF;
249         struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
250
251         if (likely(!bch2_backpointers_no_use_write_buffer))
252                 return;
253
254         prt_printf(&buf, "backpointer doesn't match %s it points to:\n  ",
255                    thing_it_points_to);
256         prt_printf(&buf, "bucket: ");
257         bch2_bpos_to_text(&buf, bucket);
258         prt_printf(&buf, "\n  ");
259
260         prt_printf(&buf, "backpointer pos: ");
261         bch2_bpos_to_text(&buf, bp_pos);
262         prt_printf(&buf, "\n  ");
263
264         bch2_backpointer_to_text(&buf, &bp);
265         prt_printf(&buf, "\n  ");
266         bch2_bkey_val_to_text(&buf, c, k);
267         if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags))
268                 bch_err_ratelimited(c, "%s", buf.buf);
269         else
270                 bch2_trans_inconsistent(trans, "%s", buf.buf);
271
272         printbuf_exit(&buf);
273 }
274
275 struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
276                                          struct btree_iter *iter,
277                                          struct bpos bp_pos,
278                                          struct bch_backpointer bp,
279                                          unsigned iter_flags)
280 {
281         struct bch_fs *c = trans->c;
282         struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
283         struct bkey_s_c k;
284
285         bch2_trans_node_iter_init(trans, iter,
286                                   bp.btree_id,
287                                   bp.pos,
288                                   0,
289                                   min(bp.level, c->btree_roots[bp.btree_id].level),
290                                   iter_flags);
291         k = bch2_btree_iter_peek_slot(iter);
292         if (bkey_err(k)) {
293                 bch2_trans_iter_exit(trans, iter);
294                 return k;
295         }
296
297         if (bp.level == c->btree_roots[bp.btree_id].level + 1)
298                 k = bkey_i_to_s_c(&c->btree_roots[bp.btree_id].key);
299
300         if (k.k && extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp))
301                 return k;
302
303         bch2_trans_iter_exit(trans, iter);
304
305         if (unlikely(bch2_backpointers_no_use_write_buffer)) {
306                 if (bp.level) {
307                         struct btree *b;
308
309                         /*
310                          * If a backpointer for a btree node wasn't found, it may be
311                          * because it was overwritten by a new btree node that hasn't
312                          * been written out yet - backpointer_get_node() checks for
313                          * this:
314                          */
315                         b = bch2_backpointer_get_node(trans, iter, bp_pos, bp);
316                         if (!IS_ERR_OR_NULL(b))
317                                 return bkey_i_to_s_c(&b->key);
318
319                         bch2_trans_iter_exit(trans, iter);
320
321                         if (IS_ERR(b))
322                                 return bkey_s_c_err(PTR_ERR(b));
323                         return bkey_s_c_null;
324                 }
325
326                 backpointer_not_found(trans, bp_pos, bp, k, "extent");
327         }
328
329         return bkey_s_c_null;
330 }
331
332 struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
333                                         struct btree_iter *iter,
334                                         struct bpos bp_pos,
335                                         struct bch_backpointer bp)
336 {
337         struct bch_fs *c = trans->c;
338         struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
339         struct btree *b;
340
341         BUG_ON(!bp.level);
342
343         bch2_trans_node_iter_init(trans, iter,
344                                   bp.btree_id,
345                                   bp.pos,
346                                   0,
347                                   bp.level - 1,
348                                   0);
349         b = bch2_btree_iter_peek_node(iter);
350         if (IS_ERR(b))
351                 goto err;
352
353         if (b && extent_matches_bp(c, bp.btree_id, bp.level,
354                                    bkey_i_to_s_c(&b->key),
355                                    bucket, bp))
356                 return b;
357
358         if (b && btree_node_will_make_reachable(b)) {
359                 b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node);
360         } else {
361                 backpointer_not_found(trans, bp_pos, bp,
362                                       bkey_i_to_s_c(&b->key), "btree node");
363                 b = NULL;
364         }
365 err:
366         bch2_trans_iter_exit(trans, iter);
367         return b;
368 }
369
370 static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_iter *bp_iter,
371                                         struct bkey_s_c k)
372 {
373         struct bch_fs *c = trans->c;
374         struct btree_iter alloc_iter = { NULL };
375         struct bch_dev *ca;
376         struct bkey_s_c alloc_k;
377         struct printbuf buf = PRINTBUF;
378         int ret = 0;
379
380         if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
381                         "backpointer for mising device:\n%s",
382                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
383                 ret = bch2_btree_delete_at(trans, bp_iter, 0);
384                 goto out;
385         }
386
387         ca = bch_dev_bkey_exists(c, k.k->p.inode);
388
389         bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
390                              bp_pos_to_bucket(c, k.k->p), 0);
391
392         alloc_k = bch2_btree_iter_peek_slot(&alloc_iter);
393         ret = bkey_err(alloc_k);
394         if (ret)
395                 goto out;
396
397         if (fsck_err_on(alloc_k.k->type != KEY_TYPE_alloc_v4, c,
398                         "backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
399                         alloc_iter.pos.inode, alloc_iter.pos.offset,
400                         (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
401                 ret = bch2_btree_delete_at(trans, bp_iter, 0);
402                 goto out;
403         }
404 out:
405 fsck_err:
406         bch2_trans_iter_exit(trans, &alloc_iter);
407         printbuf_exit(&buf);
408         return ret;
409 }
410
411 /* verify that every backpointer has a corresponding alloc key */
412 int bch2_check_btree_backpointers(struct bch_fs *c)
413 {
414         struct btree_iter iter;
415         struct bkey_s_c k;
416
417         return bch2_trans_run(c,
418                 for_each_btree_key_commit(&trans, iter,
419                         BTREE_ID_backpointers, POS_MIN, 0, k,
420                         NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
421                   bch2_check_btree_backpointer(&trans, &iter, k)));
422 }
423
424 struct bpos_level {
425         unsigned        level;
426         struct bpos     pos;
427 };
428
429 static int check_bp_exists(struct btree_trans *trans,
430                            struct bpos bucket,
431                            struct bch_backpointer bp,
432                            struct bkey_s_c orig_k,
433                            struct bpos bucket_start,
434                            struct bpos bucket_end,
435                            struct bpos_level *last_flushed)
436 {
437         struct bch_fs *c = trans->c;
438         struct btree_iter bp_iter = { NULL };
439         struct printbuf buf = PRINTBUF;
440         struct bkey_s_c bp_k;
441         int ret;
442
443         if (bpos_lt(bucket, bucket_start) ||
444             bpos_gt(bucket, bucket_end))
445                 return 0;
446
447         if (!bch2_dev_bucket_exists(c, bucket))
448                 goto missing;
449
450         bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
451                              bucket_pos_to_bp(c, bucket, bp.bucket_offset),
452                              0);
453         bp_k = bch2_btree_iter_peek_slot(&bp_iter);
454         ret = bkey_err(bp_k);
455         if (ret)
456                 goto err;
457
458         if (bp_k.k->type != KEY_TYPE_backpointer ||
459             memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp))) {
460                 if (last_flushed->level != bp.level ||
461                     !bpos_eq(last_flushed->pos, orig_k.k->p)) {
462                         last_flushed->level = bp.level;
463                         last_flushed->pos = orig_k.k->p;
464
465                         ret = bch2_btree_write_buffer_flush_sync(trans) ?:
466                                 -BCH_ERR_transaction_restart_write_buffer_flush;
467                         goto out;
468                 }
469                 goto missing;
470         }
471 out:
472 err:
473 fsck_err:
474         bch2_trans_iter_exit(trans, &bp_iter);
475         printbuf_exit(&buf);
476         return ret;
477 missing:
478         prt_printf(&buf, "missing backpointer for btree=%s l=%u ",
479                bch2_btree_ids[bp.btree_id], bp.level);
480         bch2_bkey_val_to_text(&buf, c, orig_k);
481         prt_printf(&buf, "\nbp pos ");
482         bch2_bpos_to_text(&buf, bp_iter.pos);
483
484         if (c->sb.version < bcachefs_metadata_version_backpointers ||
485             c->opts.reconstruct_alloc ||
486             fsck_err(c, "%s", buf.buf))
487                 ret = bch2_bucket_backpointer_mod(trans, bucket, bp, orig_k, true);
488
489         goto out;
490 }
491
492 static int check_extent_to_backpointers(struct btree_trans *trans,
493                                         struct btree_iter *iter,
494                                         struct bpos bucket_start,
495                                         struct bpos bucket_end,
496                                         struct bpos_level *last_flushed)
497 {
498         struct bch_fs *c = trans->c;
499         struct bkey_ptrs_c ptrs;
500         const union bch_extent_entry *entry;
501         struct extent_ptr_decoded p;
502         struct bkey_s_c k;
503         int ret;
504
505         k = bch2_btree_iter_peek_all_levels(iter);
506         ret = bkey_err(k);
507         if (ret)
508                 return ret;
509         if (!k.k)
510                 return 0;
511
512         ptrs = bch2_bkey_ptrs_c(k);
513         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
514                 struct bpos bucket_pos;
515                 struct bch_backpointer bp;
516
517                 if (p.ptr.cached)
518                         continue;
519
520                 bch2_extent_ptr_to_bp(c, iter->btree_id, iter->path->level,
521                                       k, p, &bucket_pos, &bp);
522
523                 ret = check_bp_exists(trans, bucket_pos, bp, k,
524                                       bucket_start, bucket_end,
525                                       last_flushed);
526                 if (ret)
527                         return ret;
528         }
529
530         return 0;
531 }
532
533 static int check_btree_root_to_backpointers(struct btree_trans *trans,
534                                             enum btree_id btree_id,
535                                             struct bpos bucket_start,
536                                             struct bpos bucket_end,
537                                             struct bpos_level *last_flushed)
538 {
539         struct bch_fs *c = trans->c;
540         struct btree_iter iter;
541         struct btree *b;
542         struct bkey_s_c k;
543         struct bkey_ptrs_c ptrs;
544         struct extent_ptr_decoded p;
545         const union bch_extent_entry *entry;
546         int ret;
547
548         bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
549                                   c->btree_roots[btree_id].level, 0);
550         b = bch2_btree_iter_peek_node(&iter);
551         ret = PTR_ERR_OR_ZERO(b);
552         if (ret)
553                 goto err;
554
555         BUG_ON(b != btree_node_root(c, b));
556
557         k = bkey_i_to_s_c(&b->key);
558         ptrs = bch2_bkey_ptrs_c(k);
559         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
560                 struct bpos bucket_pos;
561                 struct bch_backpointer bp;
562
563                 if (p.ptr.cached)
564                         continue;
565
566                 bch2_extent_ptr_to_bp(c, iter.btree_id, b->c.level + 1,
567                                       k, p, &bucket_pos, &bp);
568
569                 ret = check_bp_exists(trans, bucket_pos, bp, k,
570                                       bucket_start, bucket_end,
571                                       last_flushed);
572                 if (ret)
573                         goto err;
574         }
575 err:
576         bch2_trans_iter_exit(trans, &iter);
577         return ret;
578 }
579
580 static inline struct bbpos bp_to_bbpos(struct bch_backpointer bp)
581 {
582         return (struct bbpos) {
583                 .btree  = bp.btree_id,
584                 .pos    = bp.pos,
585         };
586 }
587
588 static size_t btree_nodes_fit_in_ram(struct bch_fs *c)
589 {
590         struct sysinfo i;
591         u64 mem_bytes;
592
593         si_meminfo(&i);
594         mem_bytes = i.totalram * i.mem_unit;
595         return div_u64(mem_bytes >> 1, btree_bytes(c));
596 }
597
598 int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
599                                  unsigned btree_leaf_mask,
600                                  unsigned btree_interior_mask,
601                                  struct bbpos start, struct bbpos *end)
602 {
603         struct btree_iter iter;
604         struct bkey_s_c k;
605         size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
606         enum btree_id btree;
607         int ret = 0;
608
609         for (btree = start.btree; btree < BTREE_ID_NR && !ret; btree++) {
610                 unsigned depth = ((1U << btree) & btree_leaf_mask) ? 1 : 2;
611
612                 if (!((1U << btree) & btree_leaf_mask) &&
613                     !((1U << btree) & btree_interior_mask))
614                         continue;
615
616                 bch2_trans_node_iter_init(trans, &iter, btree,
617                                           btree == start.btree ? start.pos : POS_MIN,
618                                           0, depth, 0);
619                 /*
620                  * for_each_btree_key_contineu() doesn't check the return value
621                  * from bch2_btree_iter_advance(), which is needed when
622                  * iterating over interior nodes where we'll see keys at
623                  * SPOS_MAX:
624                  */
625                 do {
626                         k = __bch2_btree_iter_peek_and_restart(trans, &iter, 0);
627                         ret = bkey_err(k);
628                         if (!k.k || ret)
629                                 break;
630
631                         --btree_nodes;
632                         if (!btree_nodes) {
633                                 *end = BBPOS(btree, k.k->p);
634                                 bch2_trans_iter_exit(trans, &iter);
635                                 return 0;
636                         }
637                 } while (bch2_btree_iter_advance(&iter));
638                 bch2_trans_iter_exit(trans, &iter);
639         }
640
641         *end = BBPOS_MAX;
642         return ret;
643 }
644
645 static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
646                                                    struct bpos bucket_start,
647                                                    struct bpos bucket_end)
648 {
649         struct btree_iter iter;
650         enum btree_id btree_id;
651         struct bpos_level last_flushed = { UINT_MAX };
652         int ret = 0;
653
654         for (btree_id = 0; btree_id < BTREE_ID_NR; btree_id++) {
655                 unsigned depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
656
657                 bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
658                                           depth,
659                                           BTREE_ITER_ALL_LEVELS|
660                                           BTREE_ITER_PREFETCH);
661
662                 do {
663                         ret = commit_do(trans, NULL, NULL,
664                                         BTREE_INSERT_LAZY_RW|
665                                         BTREE_INSERT_NOFAIL,
666                                         check_extent_to_backpointers(trans, &iter,
667                                                                 bucket_start, bucket_end,
668                                                                 &last_flushed));
669                         if (ret)
670                                 break;
671                 } while (!bch2_btree_iter_advance(&iter));
672
673                 bch2_trans_iter_exit(trans, &iter);
674
675                 if (ret)
676                         break;
677
678                 ret = commit_do(trans, NULL, NULL,
679                                 BTREE_INSERT_LAZY_RW|
680                                 BTREE_INSERT_NOFAIL,
681                                 check_btree_root_to_backpointers(trans, btree_id,
682                                                         bucket_start, bucket_end,
683                                                         &last_flushed));
684                 if (ret)
685                         break;
686         }
687         return ret;
688 }
689
690 static struct bpos bucket_pos_to_bp_safe(const struct bch_fs *c,
691                                          struct bpos bucket)
692 {
693         return bch2_dev_exists2(c, bucket.inode)
694                 ? bucket_pos_to_bp(c, bucket, 0)
695                 : bucket;
696 }
697
698 int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
699                                  struct bpos start, struct bpos *end)
700 {
701         struct btree_iter alloc_iter;
702         struct btree_iter bp_iter;
703         struct bkey_s_c alloc_k, bp_k;
704         size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
705         bool alloc_end = false, bp_end = false;
706         int ret = 0;
707
708         bch2_trans_node_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
709                                   start, 0, 1, 0);
710         bch2_trans_node_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
711                                   bucket_pos_to_bp_safe(trans->c, start), 0, 1, 0);
712         while (1) {
713                 alloc_k = !alloc_end
714                         ? __bch2_btree_iter_peek_and_restart(trans, &alloc_iter, 0)
715                         : bkey_s_c_null;
716                 bp_k = !bp_end
717                         ? __bch2_btree_iter_peek_and_restart(trans, &bp_iter, 0)
718                         : bkey_s_c_null;
719
720                 ret = bkey_err(alloc_k) ?: bkey_err(bp_k);
721                 if ((!alloc_k.k && !bp_k.k) || ret) {
722                         *end = SPOS_MAX;
723                         break;
724                 }
725
726                 --btree_nodes;
727                 if (!btree_nodes) {
728                         *end = alloc_k.k->p;
729                         break;
730                 }
731
732                 if (bpos_lt(alloc_iter.pos, SPOS_MAX) &&
733                     bpos_lt(bucket_pos_to_bp_safe(trans->c, alloc_iter.pos), bp_iter.pos)) {
734                         if (!bch2_btree_iter_advance(&alloc_iter))
735                                 alloc_end = true;
736                 } else {
737                         if (!bch2_btree_iter_advance(&bp_iter))
738                                 bp_end = true;
739                 }
740         }
741         bch2_trans_iter_exit(trans, &bp_iter);
742         bch2_trans_iter_exit(trans, &alloc_iter);
743         return ret;
744 }
745
746 int bch2_check_extents_to_backpointers(struct bch_fs *c)
747 {
748         struct btree_trans trans;
749         struct bpos start = POS_MIN, end;
750         int ret;
751
752         bch2_trans_init(&trans, c, 0, 0);
753         while (1) {
754                 ret = bch2_get_alloc_in_memory_pos(&trans, start, &end);
755                 if (ret)
756                         break;
757
758                 if (bpos_eq(start, POS_MIN) && !bpos_eq(end, SPOS_MAX))
759                         bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
760                                     __func__, btree_nodes_fit_in_ram(c));
761
762                 if (!bpos_eq(start, POS_MIN) || !bpos_eq(end, SPOS_MAX)) {
763                         struct printbuf buf = PRINTBUF;
764
765                         prt_str(&buf, "check_extents_to_backpointers(): ");
766                         bch2_bpos_to_text(&buf, start);
767                         prt_str(&buf, "-");
768                         bch2_bpos_to_text(&buf, end);
769
770                         bch_verbose(c, "%s", buf.buf);
771                         printbuf_exit(&buf);
772                 }
773
774                 ret = bch2_check_extents_to_backpointers_pass(&trans, start, end);
775                 if (ret || bpos_eq(end, SPOS_MAX))
776                         break;
777
778                 start = bpos_successor(end);
779         }
780         bch2_trans_exit(&trans);
781
782         return ret;
783 }
784
785 static int check_one_backpointer(struct btree_trans *trans,
786                                  struct bbpos start,
787                                  struct bbpos end,
788                                  struct bkey_s_c_backpointer bp,
789                                  struct bpos *last_flushed_pos)
790 {
791         struct bch_fs *c = trans->c;
792         struct btree_iter iter;
793         struct bbpos pos = bp_to_bbpos(*bp.v);
794         struct bkey_s_c k;
795         struct printbuf buf = PRINTBUF;
796         int ret;
797
798         if (bbpos_cmp(pos, start) < 0 ||
799             bbpos_cmp(pos, end) > 0)
800                 return 0;
801
802         k = bch2_backpointer_get_key(trans, &iter, bp.k->p, *bp.v, 0);
803         ret = bkey_err(k);
804         if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
805                 return 0;
806         if (ret)
807                 return ret;
808
809         if (!k.k && !bpos_eq(*last_flushed_pos, bp.k->p)) {
810                 *last_flushed_pos = bp.k->p;
811                 ret = bch2_btree_write_buffer_flush_sync(trans) ?:
812                         -BCH_ERR_transaction_restart_write_buffer_flush;
813                 goto out;
814         }
815
816         if (fsck_err_on(!k.k, c,
817                         "backpointer for missing extent\n  %s",
818                         (bch2_backpointer_k_to_text(&buf, c, bp.s_c), buf.buf)))
819                 return bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
820 out:
821 fsck_err:
822         bch2_trans_iter_exit(trans, &iter);
823         printbuf_exit(&buf);
824         return ret;
825 }
826
827 static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
828                                                    struct bbpos start,
829                                                    struct bbpos end)
830 {
831         struct btree_iter iter;
832         struct bkey_s_c k;
833         struct bpos last_flushed_pos = SPOS_MAX;
834
835         return for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
836                                   POS_MIN, BTREE_ITER_PREFETCH, k,
837                                   NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
838                 check_one_backpointer(trans, start, end,
839                                       bkey_s_c_to_backpointer(k),
840                                       &last_flushed_pos));
841 }
842
843 int bch2_check_backpointers_to_extents(struct bch_fs *c)
844 {
845         struct btree_trans trans;
846         struct bbpos start = (struct bbpos) { .btree = 0, .pos = POS_MIN, }, end;
847         int ret;
848
849         bch2_trans_init(&trans, c, 0, 0);
850         while (1) {
851                 ret = bch2_get_btree_in_memory_pos(&trans,
852                                                    (1U << BTREE_ID_extents)|
853                                                    (1U << BTREE_ID_reflink),
854                                                    ~0,
855                                                    start, &end);
856                 if (ret)
857                         break;
858
859                 if (!bbpos_cmp(start, BBPOS_MIN) &&
860                     bbpos_cmp(end, BBPOS_MAX))
861                         bch_verbose(c, "%s(): extents do not fit in ram, running in multiple passes with %zu nodes per pass",
862                                     __func__, btree_nodes_fit_in_ram(c));
863
864                 if (bbpos_cmp(start, BBPOS_MIN) ||
865                     bbpos_cmp(end, BBPOS_MAX)) {
866                         struct printbuf buf = PRINTBUF;
867
868                         prt_str(&buf, "check_backpointers_to_extents(): ");
869                         bch2_bbpos_to_text(&buf, start);
870                         prt_str(&buf, "-");
871                         bch2_bbpos_to_text(&buf, end);
872
873                         bch_verbose(c, "%s", buf.buf);
874                         printbuf_exit(&buf);
875                 }
876
877                 ret = bch2_check_backpointers_to_extents_pass(&trans, start, end);
878                 if (ret || !bbpos_cmp(end, BBPOS_MAX))
879                         break;
880
881                 start = bbpos_successor(end);
882         }
883         bch2_trans_exit(&trans);
884
885         return ret;
886 }