]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/backpointers.c
Update bcachefs sources to 841a95c29f4c bcachefs: fix userspace build errors
[bcachefs-tools-debian] / libbcachefs / backpointers.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "bbpos.h"
4 #include "alloc_background.h"
5 #include "backpointers.h"
6 #include "bkey_buf.h"
7 #include "btree_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
10 #include "btree_write_buffer.h"
11 #include "error.h"
12
13 #include <linux/mm.h>
14
15 static bool extent_matches_bp(struct bch_fs *c,
16                               enum btree_id btree_id, unsigned level,
17                               struct bkey_s_c k,
18                               struct bpos bucket,
19                               struct bch_backpointer bp)
20 {
21         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
22         const union bch_extent_entry *entry;
23         struct extent_ptr_decoded p;
24
25         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
26                 struct bpos bucket2;
27                 struct bch_backpointer bp2;
28
29                 if (p.ptr.cached)
30                         continue;
31
32                 bch2_extent_ptr_to_bp(c, btree_id, level, k, p,
33                                       &bucket2, &bp2);
34                 if (bpos_eq(bucket, bucket2) &&
35                     !memcmp(&bp, &bp2, sizeof(bp)))
36                         return true;
37         }
38
39         return false;
40 }
41
42 int bch2_backpointer_invalid(struct bch_fs *c, struct bkey_s_c k,
43                              enum bkey_invalid_flags flags,
44                              struct printbuf *err)
45 {
46         struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
47         struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
48         int ret = 0;
49
50         bkey_fsck_err_on(!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset)),
51                          c, err,
52                          backpointer_pos_wrong,
53                          "backpointer at wrong pos");
54 fsck_err:
55         return ret;
56 }
57
58 void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp)
59 {
60         prt_printf(out, "btree=%s l=%u offset=%llu:%u len=%u pos=",
61                bch2_btree_id_str(bp->btree_id),
62                bp->level,
63                (u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT),
64                (u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
65                bp->bucket_len);
66         bch2_bpos_to_text(out, bp->pos);
67 }
68
69 void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
70 {
71         prt_str(out, "bucket=");
72         bch2_bpos_to_text(out, bp_pos_to_bucket(c, k.k->p));
73         prt_str(out, " ");
74
75         bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v);
76 }
77
78 void bch2_backpointer_swab(struct bkey_s k)
79 {
80         struct bkey_s_backpointer bp = bkey_s_to_backpointer(k);
81
82         bp.v->bucket_offset     = swab40(bp.v->bucket_offset);
83         bp.v->bucket_len        = swab32(bp.v->bucket_len);
84         bch2_bpos_swab(&bp.v->pos);
85 }
86
87 static noinline int backpointer_mod_err(struct btree_trans *trans,
88                                         struct bch_backpointer bp,
89                                         struct bkey_s_c bp_k,
90                                         struct bkey_s_c orig_k,
91                                         bool insert)
92 {
93         struct bch_fs *c = trans->c;
94         struct printbuf buf = PRINTBUF;
95
96         if (insert) {
97                 prt_printf(&buf, "existing backpointer found when inserting ");
98                 bch2_backpointer_to_text(&buf, &bp);
99                 prt_newline(&buf);
100                 printbuf_indent_add(&buf, 2);
101
102                 prt_printf(&buf, "found ");
103                 bch2_bkey_val_to_text(&buf, c, bp_k);
104                 prt_newline(&buf);
105
106                 prt_printf(&buf, "for ");
107                 bch2_bkey_val_to_text(&buf, c, orig_k);
108
109                 bch_err(c, "%s", buf.buf);
110         } else if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
111                 prt_printf(&buf, "backpointer not found when deleting");
112                 prt_newline(&buf);
113                 printbuf_indent_add(&buf, 2);
114
115                 prt_printf(&buf, "searching for ");
116                 bch2_backpointer_to_text(&buf, &bp);
117                 prt_newline(&buf);
118
119                 prt_printf(&buf, "got ");
120                 bch2_bkey_val_to_text(&buf, c, bp_k);
121                 prt_newline(&buf);
122
123                 prt_printf(&buf, "for ");
124                 bch2_bkey_val_to_text(&buf, c, orig_k);
125
126                 bch_err(c, "%s", buf.buf);
127         }
128
129         printbuf_exit(&buf);
130
131         if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
132                 bch2_inconsistent_error(c);
133                 return -EIO;
134         } else {
135                 return 0;
136         }
137 }
138
139 int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans,
140                                 struct bpos bucket,
141                                 struct bch_backpointer bp,
142                                 struct bkey_s_c orig_k,
143                                 bool insert)
144 {
145         struct btree_iter bp_iter;
146         struct bkey_s_c k;
147         struct bkey_i_backpointer *bp_k;
148         int ret;
149
150         bp_k = bch2_trans_kmalloc_nomemzero(trans, sizeof(struct bkey_i_backpointer));
151         ret = PTR_ERR_OR_ZERO(bp_k);
152         if (ret)
153                 return ret;
154
155         bkey_backpointer_init(&bp_k->k_i);
156         bp_k->k.p = bucket_pos_to_bp(trans->c, bucket, bp.bucket_offset);
157         bp_k->v = bp;
158
159         if (!insert) {
160                 bp_k->k.type = KEY_TYPE_deleted;
161                 set_bkey_val_u64s(&bp_k->k, 0);
162         }
163
164         k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
165                                bp_k->k.p,
166                                BTREE_ITER_INTENT|
167                                BTREE_ITER_SLOTS|
168                                BTREE_ITER_WITH_UPDATES);
169         ret = bkey_err(k);
170         if (ret)
171                 goto err;
172
173         if (insert
174             ? k.k->type
175             : (k.k->type != KEY_TYPE_backpointer ||
176                memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp)))) {
177                 ret = backpointer_mod_err(trans, bp, k, orig_k, insert);
178                 if (ret)
179                         goto err;
180         }
181
182         ret = bch2_trans_update(trans, &bp_iter, &bp_k->k_i, 0);
183 err:
184         bch2_trans_iter_exit(trans, &bp_iter);
185         return ret;
186 }
187
188 /*
189  * Find the next backpointer >= *bp_offset:
190  */
191 int bch2_get_next_backpointer(struct btree_trans *trans,
192                               struct bpos bucket, int gen,
193                               struct bpos *bp_pos,
194                               struct bch_backpointer *bp,
195                               unsigned iter_flags)
196 {
197         struct bch_fs *c = trans->c;
198         struct bpos bp_end_pos = bucket_pos_to_bp(c, bpos_nosnap_successor(bucket), 0);
199         struct btree_iter alloc_iter = { NULL }, bp_iter = { NULL };
200         struct bkey_s_c k;
201         int ret = 0;
202
203         if (bpos_ge(*bp_pos, bp_end_pos))
204                 goto done;
205
206         if (gen >= 0) {
207                 k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc,
208                                        bucket, BTREE_ITER_CACHED|iter_flags);
209                 ret = bkey_err(k);
210                 if (ret)
211                         goto out;
212
213                 if (k.k->type != KEY_TYPE_alloc_v4 ||
214                     bkey_s_c_to_alloc_v4(k).v->gen != gen)
215                         goto done;
216         }
217
218         *bp_pos = bpos_max(*bp_pos, bucket_pos_to_bp(c, bucket, 0));
219
220         for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers,
221                                      *bp_pos, iter_flags, k, ret) {
222                 if (bpos_ge(k.k->p, bp_end_pos))
223                         break;
224
225                 *bp_pos = k.k->p;
226                 *bp = *bkey_s_c_to_backpointer(k).v;
227                 goto out;
228         }
229 done:
230         *bp_pos = SPOS_MAX;
231 out:
232         bch2_trans_iter_exit(trans, &bp_iter);
233         bch2_trans_iter_exit(trans, &alloc_iter);
234         return ret;
235 }
236
237 static void backpointer_not_found(struct btree_trans *trans,
238                                   struct bpos bp_pos,
239                                   struct bch_backpointer bp,
240                                   struct bkey_s_c k)
241 {
242         struct bch_fs *c = trans->c;
243         struct printbuf buf = PRINTBUF;
244         struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
245
246         /*
247          * If we're using the btree write buffer, the backpointer we were
248          * looking at may have already been deleted - failure to find what it
249          * pointed to is not an error:
250          */
251         if (likely(!bch2_backpointers_no_use_write_buffer))
252                 return;
253
254         prt_printf(&buf, "backpointer doesn't match %s it points to:\n  ",
255                    bp.level ? "btree node" : "extent");
256         prt_printf(&buf, "bucket: ");
257         bch2_bpos_to_text(&buf, bucket);
258         prt_printf(&buf, "\n  ");
259
260         prt_printf(&buf, "backpointer pos: ");
261         bch2_bpos_to_text(&buf, bp_pos);
262         prt_printf(&buf, "\n  ");
263
264         bch2_backpointer_to_text(&buf, &bp);
265         prt_printf(&buf, "\n  ");
266         bch2_bkey_val_to_text(&buf, c, k);
267         if (c->curr_recovery_pass >= BCH_RECOVERY_PASS_check_extents_to_backpointers)
268                 bch_err_ratelimited(c, "%s", buf.buf);
269         else
270                 bch2_trans_inconsistent(trans, "%s", buf.buf);
271
272         printbuf_exit(&buf);
273 }
274
275 struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
276                                          struct btree_iter *iter,
277                                          struct bpos bp_pos,
278                                          struct bch_backpointer bp,
279                                          unsigned iter_flags)
280 {
281         if (likely(!bp.level)) {
282                 struct bch_fs *c = trans->c;
283                 struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
284                 struct bkey_s_c k;
285
286                 bch2_trans_node_iter_init(trans, iter,
287                                           bp.btree_id,
288                                           bp.pos,
289                                           0, 0,
290                                           iter_flags);
291                 k = bch2_btree_iter_peek_slot(iter);
292                 if (bkey_err(k)) {
293                         bch2_trans_iter_exit(trans, iter);
294                         return k;
295                 }
296
297                 if (k.k && extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp))
298                         return k;
299
300                 bch2_trans_iter_exit(trans, iter);
301                 backpointer_not_found(trans, bp_pos, bp, k);
302                 return bkey_s_c_null;
303         } else {
304                 struct btree *b = bch2_backpointer_get_node(trans, iter, bp_pos, bp);
305
306                 if (IS_ERR_OR_NULL(b)) {
307                         bch2_trans_iter_exit(trans, iter);
308                         return IS_ERR(b) ? bkey_s_c_err(PTR_ERR(b)) : bkey_s_c_null;
309                 }
310                 return bkey_i_to_s_c(&b->key);
311         }
312 }
313
314 struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
315                                         struct btree_iter *iter,
316                                         struct bpos bp_pos,
317                                         struct bch_backpointer bp)
318 {
319         struct bch_fs *c = trans->c;
320         struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
321         struct btree *b;
322
323         BUG_ON(!bp.level);
324
325         bch2_trans_node_iter_init(trans, iter,
326                                   bp.btree_id,
327                                   bp.pos,
328                                   0,
329                                   bp.level - 1,
330                                   0);
331         b = bch2_btree_iter_peek_node(iter);
332         if (IS_ERR_OR_NULL(b))
333                 goto err;
334
335         BUG_ON(b->c.level != bp.level - 1);
336
337         if (extent_matches_bp(c, bp.btree_id, bp.level,
338                               bkey_i_to_s_c(&b->key),
339                               bucket, bp))
340                 return b;
341
342         if (btree_node_will_make_reachable(b)) {
343                 b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node);
344         } else {
345                 backpointer_not_found(trans, bp_pos, bp, bkey_i_to_s_c(&b->key));
346                 b = NULL;
347         }
348 err:
349         bch2_trans_iter_exit(trans, iter);
350         return b;
351 }
352
353 static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_iter *bp_iter,
354                                         struct bkey_s_c k)
355 {
356         struct bch_fs *c = trans->c;
357         struct btree_iter alloc_iter = { NULL };
358         struct bkey_s_c alloc_k;
359         struct printbuf buf = PRINTBUF;
360         int ret = 0;
361
362         if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
363                         backpointer_to_missing_device,
364                         "backpointer for missing device:\n%s",
365                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
366                 ret = bch2_btree_delete_at(trans, bp_iter, 0);
367                 goto out;
368         }
369
370         alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc,
371                                      bp_pos_to_bucket(c, k.k->p), 0);
372         ret = bkey_err(alloc_k);
373         if (ret)
374                 goto out;
375
376         if (fsck_err_on(alloc_k.k->type != KEY_TYPE_alloc_v4, c,
377                         backpointer_to_missing_alloc,
378                         "backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
379                         alloc_iter.pos.inode, alloc_iter.pos.offset,
380                         (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
381                 ret = bch2_btree_delete_at(trans, bp_iter, 0);
382                 goto out;
383         }
384 out:
385 fsck_err:
386         bch2_trans_iter_exit(trans, &alloc_iter);
387         printbuf_exit(&buf);
388         return ret;
389 }
390
391 /* verify that every backpointer has a corresponding alloc key */
392 int bch2_check_btree_backpointers(struct bch_fs *c)
393 {
394         struct btree_iter iter;
395         struct bkey_s_c k;
396         int ret;
397
398         ret = bch2_trans_run(c,
399                 for_each_btree_key_commit(trans, iter,
400                         BTREE_ID_backpointers, POS_MIN, 0, k,
401                         NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
402                   bch2_check_btree_backpointer(trans, &iter, k)));
403         if (ret)
404                 bch_err_fn(c, ret);
405         return ret;
406 }
407
408 static int check_bp_exists(struct btree_trans *trans,
409                            struct bpos bucket,
410                            struct bch_backpointer bp,
411                            struct bkey_s_c orig_k,
412                            struct bpos bucket_start,
413                            struct bpos bucket_end,
414                            struct bkey_buf *last_flushed)
415 {
416         struct bch_fs *c = trans->c;
417         struct btree_iter bp_iter = { NULL };
418         struct printbuf buf = PRINTBUF;
419         struct bkey_s_c bp_k;
420         struct bkey_buf tmp;
421         int ret;
422
423         bch2_bkey_buf_init(&tmp);
424
425         if (bpos_lt(bucket, bucket_start) ||
426             bpos_gt(bucket, bucket_end))
427                 return 0;
428
429         if (!bch2_dev_bucket_exists(c, bucket))
430                 goto missing;
431
432         bp_k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
433                                   bucket_pos_to_bp(c, bucket, bp.bucket_offset),
434                                   0);
435         ret = bkey_err(bp_k);
436         if (ret)
437                 goto err;
438
439         if (bp_k.k->type != KEY_TYPE_backpointer ||
440             memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp))) {
441                 if (!bpos_eq(orig_k.k->p, last_flushed->k->k.p) ||
442                     bkey_bytes(orig_k.k) != bkey_bytes(&last_flushed->k->k) ||
443                     memcmp(orig_k.v, &last_flushed->k->v, bkey_val_bytes(orig_k.k))) {
444                         bch2_bkey_buf_reassemble(&tmp, c, orig_k);
445
446                         if (bp.level) {
447                                 bch2_trans_unlock(trans);
448                                 bch2_btree_interior_updates_flush(c);
449                         }
450
451                         ret = bch2_btree_write_buffer_flush_sync(trans);
452                         if (ret)
453                                 goto err;
454
455                         bch2_bkey_buf_copy(last_flushed, c, tmp.k);
456                         ret = -BCH_ERR_transaction_restart_write_buffer_flush;
457                         goto out;
458                 }
459                 goto missing;
460         }
461 out:
462 err:
463 fsck_err:
464         bch2_trans_iter_exit(trans, &bp_iter);
465         bch2_bkey_buf_exit(&tmp, c);
466         printbuf_exit(&buf);
467         return ret;
468 missing:
469         prt_printf(&buf, "missing backpointer for btree=%s l=%u ",
470                bch2_btree_id_str(bp.btree_id), bp.level);
471         bch2_bkey_val_to_text(&buf, c, orig_k);
472         prt_printf(&buf, "\nbp pos ");
473         bch2_bpos_to_text(&buf, bp_iter.pos);
474
475         if (c->sb.version_upgrade_complete < bcachefs_metadata_version_backpointers ||
476             c->opts.reconstruct_alloc ||
477             fsck_err(c, ptr_to_missing_backpointer, "%s", buf.buf))
478                 ret = bch2_bucket_backpointer_mod(trans, bucket, bp, orig_k, true);
479
480         goto out;
481 }
482
483 static int check_extent_to_backpointers(struct btree_trans *trans,
484                                         enum btree_id btree, unsigned level,
485                                         struct bpos bucket_start,
486                                         struct bpos bucket_end,
487                                         struct bkey_buf *last_flushed,
488                                         struct bkey_s_c k)
489 {
490         struct bch_fs *c = trans->c;
491         struct bkey_ptrs_c ptrs;
492         const union bch_extent_entry *entry;
493         struct extent_ptr_decoded p;
494         int ret;
495
496         ptrs = bch2_bkey_ptrs_c(k);
497         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
498                 struct bpos bucket_pos;
499                 struct bch_backpointer bp;
500
501                 if (p.ptr.cached)
502                         continue;
503
504                 bch2_extent_ptr_to_bp(c, btree, level,
505                                       k, p, &bucket_pos, &bp);
506
507                 ret = check_bp_exists(trans, bucket_pos, bp, k,
508                                       bucket_start, bucket_end,
509                                       last_flushed);
510                 if (ret)
511                         return ret;
512         }
513
514         return 0;
515 }
516
517 static int check_btree_root_to_backpointers(struct btree_trans *trans,
518                                             enum btree_id btree_id,
519                                             struct bpos bucket_start,
520                                             struct bpos bucket_end,
521                                             struct bkey_buf *last_flushed,
522                                             int *level)
523 {
524         struct bch_fs *c = trans->c;
525         struct btree_iter iter;
526         struct btree *b;
527         struct bkey_s_c k;
528         int ret;
529 retry:
530         bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN,
531                                   0, bch2_btree_id_root(c, btree_id)->b->c.level, 0);
532         b = bch2_btree_iter_peek_node(&iter);
533         ret = PTR_ERR_OR_ZERO(b);
534         if (ret)
535                 goto err;
536
537         if (b != btree_node_root(c, b)) {
538                 bch2_trans_iter_exit(trans, &iter);
539                 goto retry;
540         }
541
542         *level = b->c.level;
543
544         k = bkey_i_to_s_c(&b->key);
545         ret = check_extent_to_backpointers(trans, btree_id, b->c.level + 1,
546                                       bucket_start, bucket_end,
547                                       last_flushed, k);
548 err:
549         bch2_trans_iter_exit(trans, &iter);
550         return ret;
551 }
552
553 static inline struct bbpos bp_to_bbpos(struct bch_backpointer bp)
554 {
555         return (struct bbpos) {
556                 .btree  = bp.btree_id,
557                 .pos    = bp.pos,
558         };
559 }
560
561 static size_t btree_nodes_fit_in_ram(struct bch_fs *c)
562 {
563         struct sysinfo i;
564         u64 mem_bytes;
565
566         si_meminfo(&i);
567         mem_bytes = i.totalram * i.mem_unit;
568         return div_u64(mem_bytes >> 1, btree_bytes(c));
569 }
570
571 static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
572                                         unsigned btree_leaf_mask,
573                                         unsigned btree_interior_mask,
574                                         struct bbpos start, struct bbpos *end)
575 {
576         struct btree_iter iter;
577         struct bkey_s_c k;
578         size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
579         enum btree_id btree;
580         int ret = 0;
581
582         for (btree = start.btree; btree < BTREE_ID_NR && !ret; btree++) {
583                 unsigned depth = ((1U << btree) & btree_leaf_mask) ? 1 : 2;
584
585                 if (!((1U << btree) & btree_leaf_mask) &&
586                     !((1U << btree) & btree_interior_mask))
587                         continue;
588
589                 bch2_trans_node_iter_init(trans, &iter, btree,
590                                           btree == start.btree ? start.pos : POS_MIN,
591                                           0, depth, 0);
592                 /*
593                  * for_each_btree_key_contineu() doesn't check the return value
594                  * from bch2_btree_iter_advance(), which is needed when
595                  * iterating over interior nodes where we'll see keys at
596                  * SPOS_MAX:
597                  */
598                 do {
599                         k = __bch2_btree_iter_peek_and_restart(trans, &iter, 0);
600                         ret = bkey_err(k);
601                         if (!k.k || ret)
602                                 break;
603
604                         --btree_nodes;
605                         if (!btree_nodes) {
606                                 *end = BBPOS(btree, k.k->p);
607                                 bch2_trans_iter_exit(trans, &iter);
608                                 return 0;
609                         }
610                 } while (bch2_btree_iter_advance(&iter));
611                 bch2_trans_iter_exit(trans, &iter);
612         }
613
614         *end = BBPOS_MAX;
615         return ret;
616 }
617
618 static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
619                                                    struct bpos bucket_start,
620                                                    struct bpos bucket_end)
621 {
622         struct bch_fs *c = trans->c;
623         struct btree_iter iter;
624         enum btree_id btree_id;
625         struct bkey_s_c k;
626         struct bkey_buf last_flushed;
627         int ret = 0;
628
629         bch2_bkey_buf_init(&last_flushed);
630         bkey_init(&last_flushed.k->k);
631
632         for (btree_id = 0; btree_id < btree_id_nr_alive(c); btree_id++) {
633                 int level, depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
634
635                 ret = commit_do(trans, NULL, NULL,
636                                 BCH_TRANS_COMMIT_no_enospc,
637                                 check_btree_root_to_backpointers(trans, btree_id,
638                                                         bucket_start, bucket_end,
639                                                         &last_flushed, &level));
640                 if (ret)
641                         return ret;
642
643                 while (level >= depth) {
644                         bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
645                                                   level,
646                                                   BTREE_ITER_PREFETCH);
647                         while (1) {
648                                 bch2_trans_begin(trans);
649                                 k = bch2_btree_iter_peek(&iter);
650                                 if (!k.k)
651                                         break;
652                                 ret = bkey_err(k) ?:
653                                         check_extent_to_backpointers(trans, btree_id, level,
654                                                                      bucket_start, bucket_end,
655                                                                      &last_flushed, k) ?:
656                                         bch2_trans_commit(trans, NULL, NULL,
657                                                           BCH_TRANS_COMMIT_no_enospc);
658                                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
659                                         ret = 0;
660                                         continue;
661                                 }
662                                 if (ret)
663                                         break;
664                                 if (bpos_eq(iter.pos, SPOS_MAX))
665                                         break;
666                                 bch2_btree_iter_advance(&iter);
667                         }
668                         bch2_trans_iter_exit(trans, &iter);
669
670                         if (ret)
671                                 return ret;
672
673                         --level;
674                 }
675         }
676
677         bch2_bkey_buf_exit(&last_flushed, c);
678         return 0;
679 }
680
681 static struct bpos bucket_pos_to_bp_safe(const struct bch_fs *c,
682                                          struct bpos bucket)
683 {
684         return bch2_dev_exists2(c, bucket.inode)
685                 ? bucket_pos_to_bp(c, bucket, 0)
686                 : bucket;
687 }
688
689 static int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
690                                         struct bpos start, struct bpos *end)
691 {
692         struct btree_iter alloc_iter;
693         struct btree_iter bp_iter;
694         struct bkey_s_c alloc_k, bp_k;
695         size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
696         bool alloc_end = false, bp_end = false;
697         int ret = 0;
698
699         bch2_trans_node_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
700                                   start, 0, 1, 0);
701         bch2_trans_node_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
702                                   bucket_pos_to_bp_safe(trans->c, start), 0, 1, 0);
703         while (1) {
704                 alloc_k = !alloc_end
705                         ? __bch2_btree_iter_peek_and_restart(trans, &alloc_iter, 0)
706                         : bkey_s_c_null;
707                 bp_k = !bp_end
708                         ? __bch2_btree_iter_peek_and_restart(trans, &bp_iter, 0)
709                         : bkey_s_c_null;
710
711                 ret = bkey_err(alloc_k) ?: bkey_err(bp_k);
712                 if ((!alloc_k.k && !bp_k.k) || ret) {
713                         *end = SPOS_MAX;
714                         break;
715                 }
716
717                 --btree_nodes;
718                 if (!btree_nodes) {
719                         *end = alloc_k.k ? alloc_k.k->p : SPOS_MAX;
720                         break;
721                 }
722
723                 if (bpos_lt(alloc_iter.pos, SPOS_MAX) &&
724                     bpos_lt(bucket_pos_to_bp_safe(trans->c, alloc_iter.pos), bp_iter.pos)) {
725                         if (!bch2_btree_iter_advance(&alloc_iter))
726                                 alloc_end = true;
727                 } else {
728                         if (!bch2_btree_iter_advance(&bp_iter))
729                                 bp_end = true;
730                 }
731         }
732         bch2_trans_iter_exit(trans, &bp_iter);
733         bch2_trans_iter_exit(trans, &alloc_iter);
734         return ret;
735 }
736
737 int bch2_check_extents_to_backpointers(struct bch_fs *c)
738 {
739         struct btree_trans *trans = bch2_trans_get(c);
740         struct bpos start = POS_MIN, end;
741         int ret;
742
743         while (1) {
744                 ret = bch2_get_alloc_in_memory_pos(trans, start, &end);
745                 if (ret)
746                         break;
747
748                 if (bpos_eq(start, POS_MIN) && !bpos_eq(end, SPOS_MAX))
749                         bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
750                                     __func__, btree_nodes_fit_in_ram(c));
751
752                 if (!bpos_eq(start, POS_MIN) || !bpos_eq(end, SPOS_MAX)) {
753                         struct printbuf buf = PRINTBUF;
754
755                         prt_str(&buf, "check_extents_to_backpointers(): ");
756                         bch2_bpos_to_text(&buf, start);
757                         prt_str(&buf, "-");
758                         bch2_bpos_to_text(&buf, end);
759
760                         bch_verbose(c, "%s", buf.buf);
761                         printbuf_exit(&buf);
762                 }
763
764                 ret = bch2_check_extents_to_backpointers_pass(trans, start, end);
765                 if (ret || bpos_eq(end, SPOS_MAX))
766                         break;
767
768                 start = bpos_successor(end);
769         }
770         bch2_trans_put(trans);
771
772         if (ret)
773                 bch_err_fn(c, ret);
774         return ret;
775 }
776
777 static int check_one_backpointer(struct btree_trans *trans,
778                                  struct bbpos start,
779                                  struct bbpos end,
780                                  struct bkey_s_c_backpointer bp,
781                                  struct bpos *last_flushed_pos)
782 {
783         struct bch_fs *c = trans->c;
784         struct btree_iter iter;
785         struct bbpos pos = bp_to_bbpos(*bp.v);
786         struct bkey_s_c k;
787         struct printbuf buf = PRINTBUF;
788         int ret;
789
790         if (bbpos_cmp(pos, start) < 0 ||
791             bbpos_cmp(pos, end) > 0)
792                 return 0;
793
794         k = bch2_backpointer_get_key(trans, &iter, bp.k->p, *bp.v, 0);
795         ret = bkey_err(k);
796         if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
797                 return 0;
798         if (ret)
799                 return ret;
800
801         if (!k.k && !bpos_eq(*last_flushed_pos, bp.k->p)) {
802                 *last_flushed_pos = bp.k->p;
803                 ret = bch2_btree_write_buffer_flush_sync(trans) ?:
804                         -BCH_ERR_transaction_restart_write_buffer_flush;
805                 goto out;
806         }
807
808         if (fsck_err_on(!k.k, c,
809                         backpointer_to_missing_ptr,
810                         "backpointer for missing %s\n  %s",
811                         bp.v->level ? "btree node" : "extent",
812                         (bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) {
813                 ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
814                 goto out;
815         }
816 out:
817 fsck_err:
818         bch2_trans_iter_exit(trans, &iter);
819         printbuf_exit(&buf);
820         return ret;
821 }
822
823 static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
824                                                    struct bbpos start,
825                                                    struct bbpos end)
826 {
827         struct btree_iter iter;
828         struct bkey_s_c k;
829         struct bpos last_flushed_pos = SPOS_MAX;
830
831         return for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
832                                   POS_MIN, BTREE_ITER_PREFETCH, k,
833                                   NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
834                 check_one_backpointer(trans, start, end,
835                                       bkey_s_c_to_backpointer(k),
836                                       &last_flushed_pos));
837 }
838
839 int bch2_check_backpointers_to_extents(struct bch_fs *c)
840 {
841         struct btree_trans *trans = bch2_trans_get(c);
842         struct bbpos start = (struct bbpos) { .btree = 0, .pos = POS_MIN, }, end;
843         int ret;
844
845         while (1) {
846                 ret = bch2_get_btree_in_memory_pos(trans,
847                                                    (1U << BTREE_ID_extents)|
848                                                    (1U << BTREE_ID_reflink),
849                                                    ~0,
850                                                    start, &end);
851                 if (ret)
852                         break;
853
854                 if (!bbpos_cmp(start, BBPOS_MIN) &&
855                     bbpos_cmp(end, BBPOS_MAX))
856                         bch_verbose(c, "%s(): extents do not fit in ram, running in multiple passes with %zu nodes per pass",
857                                     __func__, btree_nodes_fit_in_ram(c));
858
859                 if (bbpos_cmp(start, BBPOS_MIN) ||
860                     bbpos_cmp(end, BBPOS_MAX)) {
861                         struct printbuf buf = PRINTBUF;
862
863                         prt_str(&buf, "check_backpointers_to_extents(): ");
864                         bch2_bbpos_to_text(&buf, start);
865                         prt_str(&buf, "-");
866                         bch2_bbpos_to_text(&buf, end);
867
868                         bch_verbose(c, "%s", buf.buf);
869                         printbuf_exit(&buf);
870                 }
871
872                 ret = bch2_check_backpointers_to_extents_pass(trans, start, end);
873                 if (ret || !bbpos_cmp(end, BBPOS_MAX))
874                         break;
875
876                 start = bbpos_successor(end);
877         }
878         bch2_trans_put(trans);
879
880         if (ret)
881                 bch_err_fn(c, ret);
882         return ret;
883 }