]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/backpointers.c
Update bcachefs sources to b9bd69421f73 bcachefs: x-macro-ify inode flags enum
[bcachefs-tools-debian] / libbcachefs / backpointers.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "bbpos.h"
4 #include "alloc_background.h"
5 #include "backpointers.h"
6 #include "btree_cache.h"
7 #include "btree_update.h"
8 #include "btree_write_buffer.h"
9 #include "error.h"
10
11 #include <linux/mm.h>
12
13 static bool extent_matches_bp(struct bch_fs *c,
14                               enum btree_id btree_id, unsigned level,
15                               struct bkey_s_c k,
16                               struct bpos bucket,
17                               struct bch_backpointer bp)
18 {
19         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
20         const union bch_extent_entry *entry;
21         struct extent_ptr_decoded p;
22
23         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
24                 struct bpos bucket2;
25                 struct bch_backpointer bp2;
26
27                 if (p.ptr.cached)
28                         continue;
29
30                 bch2_extent_ptr_to_bp(c, btree_id, level, k, p,
31                                       &bucket2, &bp2);
32                 if (bpos_eq(bucket, bucket2) &&
33                     !memcmp(&bp, &bp2, sizeof(bp)))
34                         return true;
35         }
36
37         return false;
38 }
39
40 int bch2_backpointer_invalid(struct bch_fs *c, struct bkey_s_c k,
41                              enum bkey_invalid_flags flags,
42                              struct printbuf *err)
43 {
44         struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
45         struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
46         int ret = 0;
47
48         bkey_fsck_err_on(!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset)),
49                          c, err,
50                          backpointer_pos_wrong,
51                          "backpointer at wrong pos");
52 fsck_err:
53         return ret;
54 }
55
56 void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp)
57 {
58         prt_printf(out, "btree=%s l=%u offset=%llu:%u len=%u pos=",
59                bch2_btree_id_str(bp->btree_id),
60                bp->level,
61                (u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT),
62                (u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
63                bp->bucket_len);
64         bch2_bpos_to_text(out, bp->pos);
65 }
66
67 void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
68 {
69         prt_str(out, "bucket=");
70         bch2_bpos_to_text(out, bp_pos_to_bucket(c, k.k->p));
71         prt_str(out, " ");
72
73         bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v);
74 }
75
76 void bch2_backpointer_swab(struct bkey_s k)
77 {
78         struct bkey_s_backpointer bp = bkey_s_to_backpointer(k);
79
80         bp.v->bucket_offset     = swab32(bp.v->bucket_offset);
81         bp.v->bucket_len        = swab32(bp.v->bucket_len);
82         bch2_bpos_swab(&bp.v->pos);
83 }
84
85 static noinline int backpointer_mod_err(struct btree_trans *trans,
86                                         struct bch_backpointer bp,
87                                         struct bkey_s_c bp_k,
88                                         struct bkey_s_c orig_k,
89                                         bool insert)
90 {
91         struct bch_fs *c = trans->c;
92         struct printbuf buf = PRINTBUF;
93
94         if (insert) {
95                 prt_printf(&buf, "existing backpointer found when inserting ");
96                 bch2_backpointer_to_text(&buf, &bp);
97                 prt_newline(&buf);
98                 printbuf_indent_add(&buf, 2);
99
100                 prt_printf(&buf, "found ");
101                 bch2_bkey_val_to_text(&buf, c, bp_k);
102                 prt_newline(&buf);
103
104                 prt_printf(&buf, "for ");
105                 bch2_bkey_val_to_text(&buf, c, orig_k);
106
107                 bch_err(c, "%s", buf.buf);
108         } else if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
109                 prt_printf(&buf, "backpointer not found when deleting");
110                 prt_newline(&buf);
111                 printbuf_indent_add(&buf, 2);
112
113                 prt_printf(&buf, "searching for ");
114                 bch2_backpointer_to_text(&buf, &bp);
115                 prt_newline(&buf);
116
117                 prt_printf(&buf, "got ");
118                 bch2_bkey_val_to_text(&buf, c, bp_k);
119                 prt_newline(&buf);
120
121                 prt_printf(&buf, "for ");
122                 bch2_bkey_val_to_text(&buf, c, orig_k);
123
124                 bch_err(c, "%s", buf.buf);
125         }
126
127         printbuf_exit(&buf);
128
129         if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
130                 bch2_inconsistent_error(c);
131                 return -EIO;
132         } else {
133                 return 0;
134         }
135 }
136
137 int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans,
138                                 struct bkey_i_backpointer *bp_k,
139                                 struct bch_backpointer bp,
140                                 struct bkey_s_c orig_k,
141                                 bool insert)
142 {
143         struct btree_iter bp_iter;
144         struct bkey_s_c k;
145         int ret;
146
147         k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
148                                bp_k->k.p,
149                                BTREE_ITER_INTENT|
150                                BTREE_ITER_SLOTS|
151                                BTREE_ITER_WITH_UPDATES);
152         ret = bkey_err(k);
153         if (ret)
154                 goto err;
155
156         if (insert
157             ? k.k->type
158             : (k.k->type != KEY_TYPE_backpointer ||
159                memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp)))) {
160                 ret = backpointer_mod_err(trans, bp, k, orig_k, insert);
161                 if (ret)
162                         goto err;
163         }
164
165         ret = bch2_trans_update(trans, &bp_iter, &bp_k->k_i, 0);
166 err:
167         bch2_trans_iter_exit(trans, &bp_iter);
168         return ret;
169 }
170
171 /*
172  * Find the next backpointer >= *bp_offset:
173  */
174 int bch2_get_next_backpointer(struct btree_trans *trans,
175                               struct bpos bucket, int gen,
176                               struct bpos *bp_pos,
177                               struct bch_backpointer *bp,
178                               unsigned iter_flags)
179 {
180         struct bch_fs *c = trans->c;
181         struct bpos bp_end_pos = bucket_pos_to_bp(c, bpos_nosnap_successor(bucket), 0);
182         struct btree_iter alloc_iter = { NULL }, bp_iter = { NULL };
183         struct bkey_s_c k;
184         int ret = 0;
185
186         if (bpos_ge(*bp_pos, bp_end_pos))
187                 goto done;
188
189         if (gen >= 0) {
190                 k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc,
191                                        bucket, BTREE_ITER_CACHED|iter_flags);
192                 ret = bkey_err(k);
193                 if (ret)
194                         goto out;
195
196                 if (k.k->type != KEY_TYPE_alloc_v4 ||
197                     bkey_s_c_to_alloc_v4(k).v->gen != gen)
198                         goto done;
199         }
200
201         *bp_pos = bpos_max(*bp_pos, bucket_pos_to_bp(c, bucket, 0));
202
203         for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers,
204                                      *bp_pos, iter_flags, k, ret) {
205                 if (bpos_ge(k.k->p, bp_end_pos))
206                         break;
207
208                 *bp_pos = k.k->p;
209                 *bp = *bkey_s_c_to_backpointer(k).v;
210                 goto out;
211         }
212 done:
213         *bp_pos = SPOS_MAX;
214 out:
215         bch2_trans_iter_exit(trans, &bp_iter);
216         bch2_trans_iter_exit(trans, &alloc_iter);
217         return ret;
218 }
219
220 static void backpointer_not_found(struct btree_trans *trans,
221                                   struct bpos bp_pos,
222                                   struct bch_backpointer bp,
223                                   struct bkey_s_c k,
224                                   const char *thing_it_points_to)
225 {
226         struct bch_fs *c = trans->c;
227         struct printbuf buf = PRINTBUF;
228         struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
229
230         if (likely(!bch2_backpointers_no_use_write_buffer))
231                 return;
232
233         prt_printf(&buf, "backpointer doesn't match %s it points to:\n  ",
234                    thing_it_points_to);
235         prt_printf(&buf, "bucket: ");
236         bch2_bpos_to_text(&buf, bucket);
237         prt_printf(&buf, "\n  ");
238
239         prt_printf(&buf, "backpointer pos: ");
240         bch2_bpos_to_text(&buf, bp_pos);
241         prt_printf(&buf, "\n  ");
242
243         bch2_backpointer_to_text(&buf, &bp);
244         prt_printf(&buf, "\n  ");
245         bch2_bkey_val_to_text(&buf, c, k);
246         if (c->curr_recovery_pass >= BCH_RECOVERY_PASS_check_extents_to_backpointers)
247                 bch_err_ratelimited(c, "%s", buf.buf);
248         else
249                 bch2_trans_inconsistent(trans, "%s", buf.buf);
250
251         printbuf_exit(&buf);
252 }
253
254 struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
255                                          struct btree_iter *iter,
256                                          struct bpos bp_pos,
257                                          struct bch_backpointer bp,
258                                          unsigned iter_flags)
259 {
260         struct bch_fs *c = trans->c;
261         struct btree_root *r = bch2_btree_id_root(c, bp.btree_id);
262         struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
263         struct bkey_s_c k;
264
265         bch2_trans_node_iter_init(trans, iter,
266                                   bp.btree_id,
267                                   bp.pos,
268                                   0,
269                                   min(bp.level, r->level),
270                                   iter_flags);
271         k = bch2_btree_iter_peek_slot(iter);
272         if (bkey_err(k)) {
273                 bch2_trans_iter_exit(trans, iter);
274                 return k;
275         }
276
277         if (bp.level == r->level + 1)
278                 k = bkey_i_to_s_c(&r->key);
279
280         if (k.k && extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp))
281                 return k;
282
283         bch2_trans_iter_exit(trans, iter);
284
285         if (unlikely(bch2_backpointers_no_use_write_buffer)) {
286                 if (bp.level) {
287                         struct btree *b;
288
289                         /*
290                          * If a backpointer for a btree node wasn't found, it may be
291                          * because it was overwritten by a new btree node that hasn't
292                          * been written out yet - backpointer_get_node() checks for
293                          * this:
294                          */
295                         b = bch2_backpointer_get_node(trans, iter, bp_pos, bp);
296                         if (!IS_ERR_OR_NULL(b))
297                                 return bkey_i_to_s_c(&b->key);
298
299                         bch2_trans_iter_exit(trans, iter);
300
301                         if (IS_ERR(b))
302                                 return bkey_s_c_err(PTR_ERR(b));
303                         return bkey_s_c_null;
304                 }
305
306                 backpointer_not_found(trans, bp_pos, bp, k, "extent");
307         }
308
309         return bkey_s_c_null;
310 }
311
312 struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
313                                         struct btree_iter *iter,
314                                         struct bpos bp_pos,
315                                         struct bch_backpointer bp)
316 {
317         struct bch_fs *c = trans->c;
318         struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
319         struct btree *b;
320
321         BUG_ON(!bp.level);
322
323         bch2_trans_node_iter_init(trans, iter,
324                                   bp.btree_id,
325                                   bp.pos,
326                                   0,
327                                   bp.level - 1,
328                                   0);
329         b = bch2_btree_iter_peek_node(iter);
330         if (IS_ERR(b))
331                 goto err;
332
333         if (b && extent_matches_bp(c, bp.btree_id, bp.level,
334                                    bkey_i_to_s_c(&b->key),
335                                    bucket, bp))
336                 return b;
337
338         if (b && btree_node_will_make_reachable(b)) {
339                 b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node);
340         } else {
341                 backpointer_not_found(trans, bp_pos, bp,
342                                       bkey_i_to_s_c(&b->key), "btree node");
343                 b = NULL;
344         }
345 err:
346         bch2_trans_iter_exit(trans, iter);
347         return b;
348 }
349
350 static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_iter *bp_iter,
351                                         struct bkey_s_c k)
352 {
353         struct bch_fs *c = trans->c;
354         struct btree_iter alloc_iter = { NULL };
355         struct bkey_s_c alloc_k;
356         struct printbuf buf = PRINTBUF;
357         int ret = 0;
358
359         if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
360                         backpointer_to_missing_device,
361                         "backpointer for missing device:\n%s",
362                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
363                 ret = bch2_btree_delete_at(trans, bp_iter, 0);
364                 goto out;
365         }
366
367         alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc,
368                                      bp_pos_to_bucket(c, k.k->p), 0);
369         ret = bkey_err(alloc_k);
370         if (ret)
371                 goto out;
372
373         if (fsck_err_on(alloc_k.k->type != KEY_TYPE_alloc_v4, c,
374                         backpointer_to_missing_alloc,
375                         "backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
376                         alloc_iter.pos.inode, alloc_iter.pos.offset,
377                         (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
378                 ret = bch2_btree_delete_at(trans, bp_iter, 0);
379                 goto out;
380         }
381 out:
382 fsck_err:
383         bch2_trans_iter_exit(trans, &alloc_iter);
384         printbuf_exit(&buf);
385         return ret;
386 }
387
388 /* verify that every backpointer has a corresponding alloc key */
389 int bch2_check_btree_backpointers(struct bch_fs *c)
390 {
391         struct btree_iter iter;
392         struct bkey_s_c k;
393         int ret;
394
395         ret = bch2_trans_run(c,
396                 for_each_btree_key_commit(trans, iter,
397                         BTREE_ID_backpointers, POS_MIN, 0, k,
398                         NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
399                   bch2_check_btree_backpointer(trans, &iter, k)));
400         if (ret)
401                 bch_err_fn(c, ret);
402         return ret;
403 }
404
405 struct bpos_level {
406         unsigned        level;
407         struct bpos     pos;
408 };
409
410 static int check_bp_exists(struct btree_trans *trans,
411                            struct bpos bucket,
412                            struct bch_backpointer bp,
413                            struct bkey_s_c orig_k,
414                            struct bpos bucket_start,
415                            struct bpos bucket_end,
416                            struct bpos_level *last_flushed)
417 {
418         struct bch_fs *c = trans->c;
419         struct btree_iter bp_iter = { NULL };
420         struct printbuf buf = PRINTBUF;
421         struct bkey_s_c bp_k;
422         int ret;
423
424         if (bpos_lt(bucket, bucket_start) ||
425             bpos_gt(bucket, bucket_end))
426                 return 0;
427
428         if (!bch2_dev_bucket_exists(c, bucket))
429                 goto missing;
430
431         bp_k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
432                                   bucket_pos_to_bp(c, bucket, bp.bucket_offset),
433                                   0);
434         ret = bkey_err(bp_k);
435         if (ret)
436                 goto err;
437
438         if (bp_k.k->type != KEY_TYPE_backpointer ||
439             memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp))) {
440                 if (last_flushed->level != bp.level ||
441                     !bpos_eq(last_flushed->pos, orig_k.k->p)) {
442                         last_flushed->level = bp.level;
443                         last_flushed->pos = orig_k.k->p;
444
445                         ret = bch2_btree_write_buffer_flush_sync(trans) ?:
446                                 -BCH_ERR_transaction_restart_write_buffer_flush;
447                         goto out;
448                 }
449                 goto missing;
450         }
451 out:
452 err:
453 fsck_err:
454         bch2_trans_iter_exit(trans, &bp_iter);
455         printbuf_exit(&buf);
456         return ret;
457 missing:
458         prt_printf(&buf, "missing backpointer for btree=%s l=%u ",
459                bch2_btree_id_str(bp.btree_id), bp.level);
460         bch2_bkey_val_to_text(&buf, c, orig_k);
461         prt_printf(&buf, "\nbp pos ");
462         bch2_bpos_to_text(&buf, bp_iter.pos);
463
464         if (c->sb.version_upgrade_complete < bcachefs_metadata_version_backpointers ||
465             c->opts.reconstruct_alloc ||
466             fsck_err(c, ptr_to_missing_backpointer, "%s", buf.buf))
467                 ret = bch2_bucket_backpointer_mod(trans, bucket, bp, orig_k, true);
468
469         goto out;
470 }
471
472 static int check_extent_to_backpointers(struct btree_trans *trans,
473                                         struct btree_iter *iter,
474                                         struct bpos bucket_start,
475                                         struct bpos bucket_end,
476                                         struct bpos_level *last_flushed)
477 {
478         struct bch_fs *c = trans->c;
479         struct bkey_ptrs_c ptrs;
480         const union bch_extent_entry *entry;
481         struct extent_ptr_decoded p;
482         struct bkey_s_c k;
483         int ret;
484
485         k = bch2_btree_iter_peek_all_levels(iter);
486         ret = bkey_err(k);
487         if (ret)
488                 return ret;
489         if (!k.k)
490                 return 0;
491
492         ptrs = bch2_bkey_ptrs_c(k);
493         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
494                 struct bpos bucket_pos;
495                 struct bch_backpointer bp;
496
497                 if (p.ptr.cached)
498                         continue;
499
500                 bch2_extent_ptr_to_bp(c, iter->btree_id, iter->path->level,
501                                       k, p, &bucket_pos, &bp);
502
503                 ret = check_bp_exists(trans, bucket_pos, bp, k,
504                                       bucket_start, bucket_end,
505                                       last_flushed);
506                 if (ret)
507                         return ret;
508         }
509
510         return 0;
511 }
512
513 static int check_btree_root_to_backpointers(struct btree_trans *trans,
514                                             enum btree_id btree_id,
515                                             struct bpos bucket_start,
516                                             struct bpos bucket_end,
517                                             struct bpos_level *last_flushed)
518 {
519         struct bch_fs *c = trans->c;
520         struct btree_root *r = bch2_btree_id_root(c, btree_id);
521         struct btree_iter iter;
522         struct btree *b;
523         struct bkey_s_c k;
524         struct bkey_ptrs_c ptrs;
525         struct extent_ptr_decoded p;
526         const union bch_extent_entry *entry;
527         int ret;
528
529         bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0, r->level, 0);
530         b = bch2_btree_iter_peek_node(&iter);
531         ret = PTR_ERR_OR_ZERO(b);
532         if (ret)
533                 goto err;
534
535         BUG_ON(b != btree_node_root(c, b));
536
537         k = bkey_i_to_s_c(&b->key);
538         ptrs = bch2_bkey_ptrs_c(k);
539         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
540                 struct bpos bucket_pos;
541                 struct bch_backpointer bp;
542
543                 if (p.ptr.cached)
544                         continue;
545
546                 bch2_extent_ptr_to_bp(c, iter.btree_id, b->c.level + 1,
547                                       k, p, &bucket_pos, &bp);
548
549                 ret = check_bp_exists(trans, bucket_pos, bp, k,
550                                       bucket_start, bucket_end,
551                                       last_flushed);
552                 if (ret)
553                         goto err;
554         }
555 err:
556         bch2_trans_iter_exit(trans, &iter);
557         return ret;
558 }
559
560 static inline struct bbpos bp_to_bbpos(struct bch_backpointer bp)
561 {
562         return (struct bbpos) {
563                 .btree  = bp.btree_id,
564                 .pos    = bp.pos,
565         };
566 }
567
568 static size_t btree_nodes_fit_in_ram(struct bch_fs *c)
569 {
570         struct sysinfo i;
571         u64 mem_bytes;
572
573         si_meminfo(&i);
574         mem_bytes = i.totalram * i.mem_unit;
575         return div_u64(mem_bytes >> 1, btree_bytes(c));
576 }
577
578 static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
579                                         unsigned btree_leaf_mask,
580                                         unsigned btree_interior_mask,
581                                         struct bbpos start, struct bbpos *end)
582 {
583         struct btree_iter iter;
584         struct bkey_s_c k;
585         size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
586         enum btree_id btree;
587         int ret = 0;
588
589         for (btree = start.btree; btree < BTREE_ID_NR && !ret; btree++) {
590                 unsigned depth = ((1U << btree) & btree_leaf_mask) ? 1 : 2;
591
592                 if (!((1U << btree) & btree_leaf_mask) &&
593                     !((1U << btree) & btree_interior_mask))
594                         continue;
595
596                 bch2_trans_node_iter_init(trans, &iter, btree,
597                                           btree == start.btree ? start.pos : POS_MIN,
598                                           0, depth, 0);
599                 /*
600                  * for_each_btree_key_contineu() doesn't check the return value
601                  * from bch2_btree_iter_advance(), which is needed when
602                  * iterating over interior nodes where we'll see keys at
603                  * SPOS_MAX:
604                  */
605                 do {
606                         k = __bch2_btree_iter_peek_and_restart(trans, &iter, 0);
607                         ret = bkey_err(k);
608                         if (!k.k || ret)
609                                 break;
610
611                         --btree_nodes;
612                         if (!btree_nodes) {
613                                 *end = BBPOS(btree, k.k->p);
614                                 bch2_trans_iter_exit(trans, &iter);
615                                 return 0;
616                         }
617                 } while (bch2_btree_iter_advance(&iter));
618                 bch2_trans_iter_exit(trans, &iter);
619         }
620
621         *end = BBPOS_MAX;
622         return ret;
623 }
624
625 static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
626                                                    struct bpos bucket_start,
627                                                    struct bpos bucket_end)
628 {
629         struct bch_fs *c = trans->c;
630         struct btree_iter iter;
631         enum btree_id btree_id;
632         struct bpos_level last_flushed = { UINT_MAX, POS_MIN };
633         int ret = 0;
634
635         for (btree_id = 0; btree_id < btree_id_nr_alive(c); btree_id++) {
636                 unsigned depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
637
638                 bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
639                                           depth,
640                                           BTREE_ITER_ALL_LEVELS|
641                                           BTREE_ITER_PREFETCH);
642
643                 do {
644                         ret = commit_do(trans, NULL, NULL,
645                                         BTREE_INSERT_LAZY_RW|
646                                         BTREE_INSERT_NOFAIL,
647                                         check_extent_to_backpointers(trans, &iter,
648                                                                 bucket_start, bucket_end,
649                                                                 &last_flushed));
650                         if (ret)
651                                 break;
652                 } while (!bch2_btree_iter_advance(&iter));
653
654                 bch2_trans_iter_exit(trans, &iter);
655
656                 if (ret)
657                         break;
658
659                 ret = commit_do(trans, NULL, NULL,
660                                 BTREE_INSERT_LAZY_RW|
661                                 BTREE_INSERT_NOFAIL,
662                                 check_btree_root_to_backpointers(trans, btree_id,
663                                                         bucket_start, bucket_end,
664                                                         &last_flushed));
665                 if (ret)
666                         break;
667         }
668         return ret;
669 }
670
671 static struct bpos bucket_pos_to_bp_safe(const struct bch_fs *c,
672                                          struct bpos bucket)
673 {
674         return bch2_dev_exists2(c, bucket.inode)
675                 ? bucket_pos_to_bp(c, bucket, 0)
676                 : bucket;
677 }
678
679 static int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
680                                         struct bpos start, struct bpos *end)
681 {
682         struct btree_iter alloc_iter;
683         struct btree_iter bp_iter;
684         struct bkey_s_c alloc_k, bp_k;
685         size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
686         bool alloc_end = false, bp_end = false;
687         int ret = 0;
688
689         bch2_trans_node_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
690                                   start, 0, 1, 0);
691         bch2_trans_node_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
692                                   bucket_pos_to_bp_safe(trans->c, start), 0, 1, 0);
693         while (1) {
694                 alloc_k = !alloc_end
695                         ? __bch2_btree_iter_peek_and_restart(trans, &alloc_iter, 0)
696                         : bkey_s_c_null;
697                 bp_k = !bp_end
698                         ? __bch2_btree_iter_peek_and_restart(trans, &bp_iter, 0)
699                         : bkey_s_c_null;
700
701                 ret = bkey_err(alloc_k) ?: bkey_err(bp_k);
702                 if ((!alloc_k.k && !bp_k.k) || ret) {
703                         *end = SPOS_MAX;
704                         break;
705                 }
706
707                 --btree_nodes;
708                 if (!btree_nodes) {
709                         *end = alloc_k.k ? alloc_k.k->p : SPOS_MAX;
710                         break;
711                 }
712
713                 if (bpos_lt(alloc_iter.pos, SPOS_MAX) &&
714                     bpos_lt(bucket_pos_to_bp_safe(trans->c, alloc_iter.pos), bp_iter.pos)) {
715                         if (!bch2_btree_iter_advance(&alloc_iter))
716                                 alloc_end = true;
717                 } else {
718                         if (!bch2_btree_iter_advance(&bp_iter))
719                                 bp_end = true;
720                 }
721         }
722         bch2_trans_iter_exit(trans, &bp_iter);
723         bch2_trans_iter_exit(trans, &alloc_iter);
724         return ret;
725 }
726
727 int bch2_check_extents_to_backpointers(struct bch_fs *c)
728 {
729         struct btree_trans *trans = bch2_trans_get(c);
730         struct bpos start = POS_MIN, end;
731         int ret;
732
733         while (1) {
734                 ret = bch2_get_alloc_in_memory_pos(trans, start, &end);
735                 if (ret)
736                         break;
737
738                 if (bpos_eq(start, POS_MIN) && !bpos_eq(end, SPOS_MAX))
739                         bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
740                                     __func__, btree_nodes_fit_in_ram(c));
741
742                 if (!bpos_eq(start, POS_MIN) || !bpos_eq(end, SPOS_MAX)) {
743                         struct printbuf buf = PRINTBUF;
744
745                         prt_str(&buf, "check_extents_to_backpointers(): ");
746                         bch2_bpos_to_text(&buf, start);
747                         prt_str(&buf, "-");
748                         bch2_bpos_to_text(&buf, end);
749
750                         bch_verbose(c, "%s", buf.buf);
751                         printbuf_exit(&buf);
752                 }
753
754                 ret = bch2_check_extents_to_backpointers_pass(trans, start, end);
755                 if (ret || bpos_eq(end, SPOS_MAX))
756                         break;
757
758                 start = bpos_successor(end);
759         }
760         bch2_trans_put(trans);
761
762         if (ret)
763                 bch_err_fn(c, ret);
764         return ret;
765 }
766
767 static int check_one_backpointer(struct btree_trans *trans,
768                                  struct bbpos start,
769                                  struct bbpos end,
770                                  struct bkey_s_c_backpointer bp,
771                                  struct bpos *last_flushed_pos)
772 {
773         struct bch_fs *c = trans->c;
774         struct btree_iter iter;
775         struct bbpos pos = bp_to_bbpos(*bp.v);
776         struct bkey_s_c k;
777         struct printbuf buf = PRINTBUF;
778         int ret;
779
780         if (bbpos_cmp(pos, start) < 0 ||
781             bbpos_cmp(pos, end) > 0)
782                 return 0;
783
784         k = bch2_backpointer_get_key(trans, &iter, bp.k->p, *bp.v, 0);
785         ret = bkey_err(k);
786         if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
787                 return 0;
788         if (ret)
789                 return ret;
790
791         if (!k.k && !bpos_eq(*last_flushed_pos, bp.k->p)) {
792                 *last_flushed_pos = bp.k->p;
793                 ret = bch2_btree_write_buffer_flush_sync(trans) ?:
794                         -BCH_ERR_transaction_restart_write_buffer_flush;
795                 goto out;
796         }
797
798         if (fsck_err_on(!k.k, c,
799                         backpointer_to_missing_ptr,
800                         "backpointer for missing extent\n  %s",
801                         (bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) {
802                 ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
803                 goto out;
804         }
805 out:
806 fsck_err:
807         bch2_trans_iter_exit(trans, &iter);
808         printbuf_exit(&buf);
809         return ret;
810 }
811
812 static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
813                                                    struct bbpos start,
814                                                    struct bbpos end)
815 {
816         struct btree_iter iter;
817         struct bkey_s_c k;
818         struct bpos last_flushed_pos = SPOS_MAX;
819
820         return for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
821                                   POS_MIN, BTREE_ITER_PREFETCH, k,
822                                   NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
823                 check_one_backpointer(trans, start, end,
824                                       bkey_s_c_to_backpointer(k),
825                                       &last_flushed_pos));
826 }
827
828 int bch2_check_backpointers_to_extents(struct bch_fs *c)
829 {
830         struct btree_trans *trans = bch2_trans_get(c);
831         struct bbpos start = (struct bbpos) { .btree = 0, .pos = POS_MIN, }, end;
832         int ret;
833
834         while (1) {
835                 ret = bch2_get_btree_in_memory_pos(trans,
836                                                    (1U << BTREE_ID_extents)|
837                                                    (1U << BTREE_ID_reflink),
838                                                    ~0,
839                                                    start, &end);
840                 if (ret)
841                         break;
842
843                 if (!bbpos_cmp(start, BBPOS_MIN) &&
844                     bbpos_cmp(end, BBPOS_MAX))
845                         bch_verbose(c, "%s(): extents do not fit in ram, running in multiple passes with %zu nodes per pass",
846                                     __func__, btree_nodes_fit_in_ram(c));
847
848                 if (bbpos_cmp(start, BBPOS_MIN) ||
849                     bbpos_cmp(end, BBPOS_MAX)) {
850                         struct printbuf buf = PRINTBUF;
851
852                         prt_str(&buf, "check_backpointers_to_extents(): ");
853                         bch2_bbpos_to_text(&buf, start);
854                         prt_str(&buf, "-");
855                         bch2_bbpos_to_text(&buf, end);
856
857                         bch_verbose(c, "%s", buf.buf);
858                         printbuf_exit(&buf);
859                 }
860
861                 ret = bch2_check_backpointers_to_extents_pass(trans, start, end);
862                 if (ret || !bbpos_cmp(end, BBPOS_MAX))
863                         break;
864
865                 start = bbpos_successor(end);
866         }
867         bch2_trans_put(trans);
868
869         if (ret)
870                 bch_err_fn(c, ret);
871         return ret;
872 }