]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/fsck.c
Update bcachefs sources to 24bdb6fed91c bcachefs: bch2_btree_id_str()
[bcachefs-tools-debian] / libbcachefs / fsck.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "btree_cache.h"
6 #include "btree_update.h"
7 #include "buckets.h"
8 #include "darray.h"
9 #include "dirent.h"
10 #include "error.h"
11 #include "fs-common.h"
12 #include "fsck.h"
13 #include "inode.h"
14 #include "keylist.h"
15 #include "recovery.h"
16 #include "snapshot.h"
17 #include "super.h"
18 #include "xattr.h"
19
20 #include <linux/bsearch.h>
21 #include <linux/dcache.h> /* struct qstr */
22
23 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
24
25 /*
26  * XXX: this is handling transaction restarts without returning
27  * -BCH_ERR_transaction_restart_nested, this is not how we do things anymore:
28  */
29 static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum,
30                                     u32 snapshot)
31 {
32         struct btree_iter iter;
33         struct bkey_s_c k;
34         u64 sectors = 0;
35         int ret;
36
37         for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
38                                 SPOS(inum, 0, snapshot),
39                                 POS(inum, U64_MAX),
40                                 0, k, ret)
41                 if (bkey_extent_is_allocation(k.k))
42                         sectors += k.k->size;
43
44         bch2_trans_iter_exit(trans, &iter);
45
46         return ret ?: sectors;
47 }
48
49 static s64 bch2_count_subdirs(struct btree_trans *trans, u64 inum,
50                                     u32 snapshot)
51 {
52         struct btree_iter iter;
53         struct bkey_s_c k;
54         struct bkey_s_c_dirent d;
55         u64 subdirs = 0;
56         int ret;
57
58         for_each_btree_key_upto(trans, iter, BTREE_ID_dirents,
59                                 SPOS(inum, 0, snapshot),
60                                 POS(inum, U64_MAX),
61                                 0, k, ret) {
62                 if (k.k->type != KEY_TYPE_dirent)
63                         continue;
64
65                 d = bkey_s_c_to_dirent(k);
66                 if (d.v->d_type == DT_DIR)
67                         subdirs++;
68         }
69         bch2_trans_iter_exit(trans, &iter);
70
71         return ret ?: subdirs;
72 }
73
74 static int __snapshot_lookup_subvol(struct btree_trans *trans, u32 snapshot,
75                                     u32 *subvol)
76 {
77         struct bch_snapshot s;
78         int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots,
79                                           POS(0, snapshot), 0,
80                                           snapshot, &s);
81         if (!ret)
82                 *subvol = le32_to_cpu(s.subvol);
83         else if (bch2_err_matches(ret, ENOENT))
84                 bch_err(trans->c, "snapshot %u not found", snapshot);
85         return ret;
86
87 }
88
89 static int __subvol_lookup(struct btree_trans *trans, u32 subvol,
90                            u32 *snapshot, u64 *inum)
91 {
92         struct bch_subvolume s;
93         int ret;
94
95         ret = bch2_subvolume_get(trans, subvol, false, 0, &s);
96
97         *snapshot = le32_to_cpu(s.snapshot);
98         *inum = le64_to_cpu(s.inode);
99         return ret;
100 }
101
102 static int subvol_lookup(struct btree_trans *trans, u32 subvol,
103                          u32 *snapshot, u64 *inum)
104 {
105         return lockrestart_do(trans, __subvol_lookup(trans, subvol, snapshot, inum));
106 }
107
108 static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
109                               struct bch_inode_unpacked *inode)
110 {
111         struct btree_iter iter;
112         struct bkey_s_c k;
113         int ret;
114
115         bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
116                              POS(0, inode_nr),
117                              BTREE_ITER_ALL_SNAPSHOTS);
118         k = bch2_btree_iter_peek(&iter);
119         ret = bkey_err(k);
120         if (ret)
121                 goto err;
122
123         if (!k.k || !bkey_eq(k.k->p, POS(0, inode_nr))) {
124                 ret = -BCH_ERR_ENOENT_inode;
125                 goto err;
126         }
127
128         ret = bch2_inode_unpack(k, inode);
129 err:
130         bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr);
131         bch2_trans_iter_exit(trans, &iter);
132         return ret;
133 }
134
135 static int __lookup_inode(struct btree_trans *trans, u64 inode_nr,
136                           struct bch_inode_unpacked *inode,
137                           u32 *snapshot)
138 {
139         struct btree_iter iter;
140         struct bkey_s_c k;
141         int ret;
142
143         k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
144                                SPOS(0, inode_nr, *snapshot), 0);
145         ret = bkey_err(k);
146         if (ret)
147                 goto err;
148
149         ret = bkey_is_inode(k.k)
150                 ? bch2_inode_unpack(k, inode)
151                 : -BCH_ERR_ENOENT_inode;
152         if (!ret)
153                 *snapshot = iter.pos.snapshot;
154 err:
155         bch_err_msg(trans->c, ret, "fetching inode %llu:%u", inode_nr, *snapshot);
156         bch2_trans_iter_exit(trans, &iter);
157         return ret;
158 }
159
160 static int lookup_inode(struct btree_trans *trans, u64 inode_nr,
161                         struct bch_inode_unpacked *inode,
162                         u32 *snapshot)
163 {
164         return lockrestart_do(trans, __lookup_inode(trans, inode_nr, inode, snapshot));
165 }
166
167 static int __lookup_dirent(struct btree_trans *trans,
168                            struct bch_hash_info hash_info,
169                            subvol_inum dir, struct qstr *name,
170                            u64 *target, unsigned *type)
171 {
172         struct btree_iter iter;
173         struct bkey_s_c_dirent d;
174         int ret;
175
176         ret = bch2_hash_lookup(trans, &iter, bch2_dirent_hash_desc,
177                                &hash_info, dir, name, 0);
178         if (ret)
179                 return ret;
180
181         d = bkey_s_c_to_dirent(bch2_btree_iter_peek_slot(&iter));
182         *target = le64_to_cpu(d.v->d_inum);
183         *type = d.v->d_type;
184         bch2_trans_iter_exit(trans, &iter);
185         return 0;
186 }
187
188 static int __write_inode(struct btree_trans *trans,
189                          struct bch_inode_unpacked *inode,
190                          u32 snapshot)
191 {
192         struct bkey_inode_buf *inode_p =
193                 bch2_trans_kmalloc(trans, sizeof(*inode_p));
194
195         if (IS_ERR(inode_p))
196                 return PTR_ERR(inode_p);
197
198         bch2_inode_pack(inode_p, inode);
199         inode_p->inode.k.p.snapshot = snapshot;
200
201         return bch2_btree_insert_nonextent(trans, BTREE_ID_inodes,
202                                 &inode_p->inode.k_i,
203                                 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
204 }
205
206 static int fsck_write_inode(struct btree_trans *trans,
207                             struct bch_inode_unpacked *inode,
208                             u32 snapshot)
209 {
210         int ret = commit_do(trans, NULL, NULL,
211                                   BTREE_INSERT_NOFAIL|
212                                   BTREE_INSERT_LAZY_RW,
213                                   __write_inode(trans, inode, snapshot));
214         if (ret)
215                 bch_err_fn(trans->c, ret);
216         return ret;
217 }
218
219 static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
220 {
221         struct bch_fs *c = trans->c;
222         struct btree_iter iter;
223         struct bch_inode_unpacked dir_inode;
224         struct bch_hash_info dir_hash_info;
225         int ret;
226
227         ret = lookup_first_inode(trans, pos.inode, &dir_inode);
228         if (ret)
229                 goto err;
230
231         dir_hash_info = bch2_hash_info_init(c, &dir_inode);
232
233         bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_INTENT);
234
235         ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
236                                   &dir_hash_info, &iter,
237                                   BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
238         bch2_trans_iter_exit(trans, &iter);
239 err:
240         bch_err_fn(c, ret);
241         return ret;
242 }
243
244 /* Get lost+found, create if it doesn't exist: */
245 static int lookup_lostfound(struct btree_trans *trans, u32 subvol,
246                             struct bch_inode_unpacked *lostfound)
247 {
248         struct bch_fs *c = trans->c;
249         struct bch_inode_unpacked root;
250         struct bch_hash_info root_hash_info;
251         struct qstr lostfound_str = QSTR("lost+found");
252         subvol_inum root_inum = { .subvol = subvol };
253         u64 inum = 0;
254         unsigned d_type = 0;
255         u32 snapshot;
256         int ret;
257
258         ret = __subvol_lookup(trans, subvol, &snapshot, &root_inum.inum);
259         if (ret)
260                 return ret;
261
262         ret = __lookup_inode(trans, root_inum.inum, &root, &snapshot);
263         if (ret)
264                 return ret;
265
266         root_hash_info = bch2_hash_info_init(c, &root);
267
268         ret = __lookup_dirent(trans, root_hash_info, root_inum,
269                             &lostfound_str, &inum, &d_type);
270         if (bch2_err_matches(ret, ENOENT)) {
271                 bch_notice(c, "creating lost+found");
272                 goto create_lostfound;
273         }
274
275         bch_err_fn(c, ret);
276         if (ret)
277                 return ret;
278
279         if (d_type != DT_DIR) {
280                 bch_err(c, "error looking up lost+found: not a directory");
281                 return -BCH_ERR_ENOENT_not_directory;
282         }
283
284         /*
285          * The bch2_check_dirents pass has already run, dangling dirents
286          * shouldn't exist here:
287          */
288         return __lookup_inode(trans, inum, lostfound, &snapshot);
289
290 create_lostfound:
291         bch2_inode_init_early(c, lostfound);
292
293         ret = bch2_create_trans(trans, root_inum, &root,
294                                 lostfound, &lostfound_str,
295                                 0, 0, S_IFDIR|0700, 0, NULL, NULL,
296                                 (subvol_inum) { }, 0);
297         bch_err_msg(c, ret, "creating lost+found");
298         return ret;
299 }
300
301 static int __reattach_inode(struct btree_trans *trans,
302                           struct bch_inode_unpacked *inode,
303                           u32 inode_snapshot)
304 {
305         struct bch_hash_info dir_hash;
306         struct bch_inode_unpacked lostfound;
307         char name_buf[20];
308         struct qstr name;
309         u64 dir_offset = 0;
310         u32 subvol;
311         int ret;
312
313         ret = __snapshot_lookup_subvol(trans, inode_snapshot, &subvol);
314         if (ret)
315                 return ret;
316
317         ret = lookup_lostfound(trans, subvol, &lostfound);
318         if (ret)
319                 return ret;
320
321         if (S_ISDIR(inode->bi_mode)) {
322                 lostfound.bi_nlink++;
323
324                 ret = __write_inode(trans, &lostfound, U32_MAX);
325                 if (ret)
326                         return ret;
327         }
328
329         dir_hash = bch2_hash_info_init(trans->c, &lostfound);
330
331         snprintf(name_buf, sizeof(name_buf), "%llu", inode->bi_inum);
332         name = (struct qstr) QSTR(name_buf);
333
334         ret = bch2_dirent_create(trans,
335                                  (subvol_inum) {
336                                         .subvol = subvol,
337                                         .inum = lostfound.bi_inum,
338                                  },
339                                  &dir_hash,
340                                  inode_d_type(inode),
341                                  &name, inode->bi_inum, &dir_offset,
342                                  BCH_HASH_SET_MUST_CREATE);
343         if (ret)
344                 return ret;
345
346         inode->bi_dir           = lostfound.bi_inum;
347         inode->bi_dir_offset    = dir_offset;
348
349         return __write_inode(trans, inode, inode_snapshot);
350 }
351
352 static int reattach_inode(struct btree_trans *trans,
353                           struct bch_inode_unpacked *inode,
354                           u32 inode_snapshot)
355 {
356         int ret = commit_do(trans, NULL, NULL,
357                                   BTREE_INSERT_LAZY_RW|
358                                   BTREE_INSERT_NOFAIL,
359                         __reattach_inode(trans, inode, inode_snapshot));
360         bch_err_msg(trans->c, ret, "reattaching inode %llu", inode->bi_inum);
361         return ret;
362 }
363
364 static int remove_backpointer(struct btree_trans *trans,
365                               struct bch_inode_unpacked *inode)
366 {
367         struct btree_iter iter;
368         struct bkey_s_c_dirent d;
369         int ret;
370
371         d = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_dirents,
372                                      POS(inode->bi_dir, inode->bi_dir_offset), 0,
373                                      dirent);
374         ret =   bkey_err(d) ?:
375                 __remove_dirent(trans, d.k->p);
376         bch2_trans_iter_exit(trans, &iter);
377         return ret;
378 }
379
380 struct snapshots_seen_entry {
381         u32                             id;
382         u32                             equiv;
383 };
384
385 struct snapshots_seen {
386         struct bpos                     pos;
387         DARRAY(struct snapshots_seen_entry) ids;
388 };
389
390 static inline void snapshots_seen_exit(struct snapshots_seen *s)
391 {
392         darray_exit(&s->ids);
393 }
394
395 static inline void snapshots_seen_init(struct snapshots_seen *s)
396 {
397         memset(s, 0, sizeof(*s));
398 }
399
400 static int snapshots_seen_add_inorder(struct bch_fs *c, struct snapshots_seen *s, u32 id)
401 {
402         struct snapshots_seen_entry *i, n = {
403                 .id     = id,
404                 .equiv  = bch2_snapshot_equiv(c, id),
405         };
406         int ret = 0;
407
408         darray_for_each(s->ids, i) {
409                 if (i->id == id)
410                         return 0;
411                 if (i->id > id)
412                         break;
413         }
414
415         ret = darray_insert_item(&s->ids, i - s->ids.data, n);
416         if (ret)
417                 bch_err(c, "error reallocating snapshots_seen table (size %zu)",
418                         s->ids.size);
419         return ret;
420 }
421
422 static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
423                                  enum btree_id btree_id, struct bpos pos)
424 {
425         struct snapshots_seen_entry *i, n = {
426                 .id     = pos.snapshot,
427                 .equiv  = bch2_snapshot_equiv(c, pos.snapshot),
428         };
429         int ret = 0;
430
431         if (!bkey_eq(s->pos, pos))
432                 s->ids.nr = 0;
433
434         s->pos = pos;
435         s->pos.snapshot = n.equiv;
436
437         darray_for_each(s->ids, i) {
438                 if (i->id == n.id)
439                         return 0;
440
441                 /*
442                  * We currently don't rigorously track for snapshot cleanup
443                  * needing to be run, so it shouldn't be a fsck error yet:
444                  */
445                 if (i->equiv == n.equiv) {
446                         bch_err(c, "snapshot deletion did not finish:\n"
447                                 "  duplicate keys in btree %s at %llu:%llu snapshots %u, %u (equiv %u)\n",
448                                 bch2_btree_id_str(btree_id),
449                                 pos.inode, pos.offset,
450                                 i->id, n.id, n.equiv);
451                         set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags);
452                         return bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_delete_dead_snapshots);
453                 }
454         }
455
456         ret = darray_push(&s->ids, n);
457         if (ret)
458                 bch_err(c, "error reallocating snapshots_seen table (size %zu)",
459                         s->ids.size);
460         return ret;
461 }
462
463 /**
464  * key_visible_in_snapshot - returns true if @id is a descendent of @ancestor,
465  * and @ancestor hasn't been overwritten in @seen
466  *
467  * @c:          filesystem handle
468  * @seen:       list of snapshot ids already seen at current position
469  * @id:         descendent snapshot id
470  * @ancestor:   ancestor snapshot id
471  *
472  * Returns:     whether key in @ancestor snapshot is visible in @id snapshot
473  */
474 static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *seen,
475                                     u32 id, u32 ancestor)
476 {
477         ssize_t i;
478
479         EBUG_ON(id > ancestor);
480         EBUG_ON(!bch2_snapshot_is_equiv(c, id));
481         EBUG_ON(!bch2_snapshot_is_equiv(c, ancestor));
482
483         /* @ancestor should be the snapshot most recently added to @seen */
484         EBUG_ON(ancestor != seen->pos.snapshot);
485         EBUG_ON(ancestor != seen->ids.data[seen->ids.nr - 1].equiv);
486
487         if (id == ancestor)
488                 return true;
489
490         if (!bch2_snapshot_is_ancestor(c, id, ancestor))
491                 return false;
492
493         /*
494          * We know that @id is a descendant of @ancestor, we're checking if
495          * we've seen a key that overwrote @ancestor - i.e. also a descendent of
496          * @ascestor and with @id as a descendent.
497          *
498          * But we already know that we're scanning IDs between @id and @ancestor
499          * numerically, since snapshot ID lists are kept sorted, so if we find
500          * an id that's an ancestor of @id we're done:
501          */
502
503         for (i = seen->ids.nr - 2;
504              i >= 0 && seen->ids.data[i].equiv >= id;
505              --i)
506                 if (bch2_snapshot_is_ancestor(c, id, seen->ids.data[i].equiv))
507                         return false;
508
509         return true;
510 }
511
512 /**
513  * ref_visible - given a key with snapshot id @src that points to a key with
514  * snapshot id @dst, test whether there is some snapshot in which @dst is
515  * visible.
516  *
517  * @c:          filesystem handle
518  * @s:          list of snapshot IDs already seen at @src
519  * @src:        snapshot ID of src key
520  * @dst:        snapshot ID of dst key
521  * Returns:     true if there is some snapshot in which @dst is visible
522  *
523  * Assumes we're visiting @src keys in natural key order
524  */
525 static bool ref_visible(struct bch_fs *c, struct snapshots_seen *s,
526                         u32 src, u32 dst)
527 {
528         return dst <= src
529                 ? key_visible_in_snapshot(c, s, dst, src)
530                 : bch2_snapshot_is_ancestor(c, src, dst);
531 }
532
533 static int ref_visible2(struct bch_fs *c,
534                         u32 src, struct snapshots_seen *src_seen,
535                         u32 dst, struct snapshots_seen *dst_seen)
536 {
537         src = bch2_snapshot_equiv(c, src);
538         dst = bch2_snapshot_equiv(c, dst);
539
540         if (dst > src) {
541                 swap(dst, src);
542                 swap(dst_seen, src_seen);
543         }
544         return key_visible_in_snapshot(c, src_seen, dst, src);
545 }
546
547 #define for_each_visible_inode(_c, _s, _w, _snapshot, _i)                               \
548         for (_i = (_w)->inodes.data; _i < (_w)->inodes.data + (_w)->inodes.nr &&        \
549              (_i)->snapshot <= (_snapshot); _i++)                                       \
550                 if (key_visible_in_snapshot(_c, _s, _i->snapshot, _snapshot))
551
552 struct inode_walker_entry {
553         struct bch_inode_unpacked inode;
554         u32                     snapshot;
555         bool                    seen_this_pos;
556         u64                     count;
557 };
558
559 struct inode_walker {
560         bool                            first_this_inode;
561         bool                            recalculate_sums;
562         struct bpos                     last_pos;
563
564         DARRAY(struct inode_walker_entry) inodes;
565 };
566
567 static void inode_walker_exit(struct inode_walker *w)
568 {
569         darray_exit(&w->inodes);
570 }
571
572 static struct inode_walker inode_walker_init(void)
573 {
574         return (struct inode_walker) { 0, };
575 }
576
577 static int add_inode(struct bch_fs *c, struct inode_walker *w,
578                      struct bkey_s_c inode)
579 {
580         struct bch_inode_unpacked u;
581
582         BUG_ON(bch2_inode_unpack(inode, &u));
583
584         return darray_push(&w->inodes, ((struct inode_walker_entry) {
585                 .inode          = u,
586                 .snapshot       = bch2_snapshot_equiv(c, inode.k->p.snapshot),
587         }));
588 }
589
590 static int get_inodes_all_snapshots(struct btree_trans *trans,
591                                     struct inode_walker *w, u64 inum)
592 {
593         struct bch_fs *c = trans->c;
594         struct btree_iter iter;
595         struct bkey_s_c k;
596         u32 restart_count = trans->restart_count;
597         int ret;
598
599         w->recalculate_sums = false;
600         w->inodes.nr = 0;
601
602         for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, inum),
603                            BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
604                 if (k.k->p.offset != inum)
605                         break;
606
607                 if (bkey_is_inode(k.k))
608                         add_inode(c, w, k);
609         }
610         bch2_trans_iter_exit(trans, &iter);
611
612         if (ret)
613                 return ret;
614
615         w->first_this_inode = true;
616
617         return trans_was_restarted(trans, restart_count);
618 }
619
620 static struct inode_walker_entry *
621 lookup_inode_for_snapshot(struct bch_fs *c, struct inode_walker *w,
622                           u32 snapshot, bool is_whiteout)
623 {
624         struct inode_walker_entry *i;
625
626         snapshot = bch2_snapshot_equiv(c, snapshot);
627
628         darray_for_each(w->inodes, i)
629                 if (bch2_snapshot_is_ancestor(c, snapshot, i->snapshot))
630                         goto found;
631
632         return NULL;
633 found:
634         BUG_ON(snapshot > i->snapshot);
635
636         if (snapshot != i->snapshot && !is_whiteout) {
637                 struct inode_walker_entry new = *i;
638                 size_t pos;
639                 int ret;
640
641                 new.snapshot = snapshot;
642                 new.count = 0;
643
644                 bch_info(c, "have key for inode %llu:%u but have inode in ancestor snapshot %u",
645                          w->last_pos.inode, snapshot, i->snapshot);
646
647                 while (i > w->inodes.data && i[-1].snapshot > snapshot)
648                         --i;
649
650                 pos = i - w->inodes.data;
651                 ret = darray_insert_item(&w->inodes, pos, new);
652                 if (ret)
653                         return ERR_PTR(ret);
654
655                 i = w->inodes.data + pos;
656         }
657
658         return i;
659 }
660
661 static struct inode_walker_entry *walk_inode(struct btree_trans *trans,
662                                              struct inode_walker *w, struct bpos pos,
663                                              bool is_whiteout)
664 {
665         if (w->last_pos.inode != pos.inode) {
666                 int ret = get_inodes_all_snapshots(trans, w, pos.inode);
667                 if (ret)
668                         return ERR_PTR(ret);
669         } else if (bkey_cmp(w->last_pos, pos)) {
670                 struct inode_walker_entry *i;
671
672                 darray_for_each(w->inodes, i)
673                         i->seen_this_pos = false;
674
675         }
676
677         w->last_pos = pos;
678
679         return lookup_inode_for_snapshot(trans->c, w, pos.snapshot, is_whiteout);
680 }
681
682 static int __get_visible_inodes(struct btree_trans *trans,
683                                 struct inode_walker *w,
684                                 struct snapshots_seen *s,
685                                 u64 inum)
686 {
687         struct bch_fs *c = trans->c;
688         struct btree_iter iter;
689         struct bkey_s_c k;
690         int ret;
691
692         w->inodes.nr = 0;
693
694         for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum),
695                            BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
696                 u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
697
698                 if (k.k->p.offset != inum)
699                         break;
700
701                 if (!ref_visible(c, s, s->pos.snapshot, equiv))
702                         continue;
703
704                 if (bkey_is_inode(k.k))
705                         add_inode(c, w, k);
706
707                 if (equiv >= s->pos.snapshot)
708                         break;
709         }
710         bch2_trans_iter_exit(trans, &iter);
711
712         return ret;
713 }
714
715 static int check_key_has_snapshot(struct btree_trans *trans,
716                                   struct btree_iter *iter,
717                                   struct bkey_s_c k)
718 {
719         struct bch_fs *c = trans->c;
720         struct printbuf buf = PRINTBUF;
721         int ret = 0;
722
723         if (mustfix_fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), c,
724                         "key in missing snapshot: %s",
725                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
726                 ret = bch2_btree_delete_at(trans, iter,
727                                             BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: 1;
728 fsck_err:
729         printbuf_exit(&buf);
730         return ret;
731 }
732
733 static int hash_redo_key(struct btree_trans *trans,
734                          const struct bch_hash_desc desc,
735                          struct bch_hash_info *hash_info,
736                          struct btree_iter *k_iter, struct bkey_s_c k)
737 {
738         struct bkey_i *delete;
739         struct bkey_i *tmp;
740
741         delete = bch2_trans_kmalloc(trans, sizeof(*delete));
742         if (IS_ERR(delete))
743                 return PTR_ERR(delete);
744
745         tmp = bch2_bkey_make_mut_noupdate(trans, k);
746         if (IS_ERR(tmp))
747                 return PTR_ERR(tmp);
748
749         bkey_init(&delete->k);
750         delete->k.p = k_iter->pos;
751         return  bch2_btree_iter_traverse(k_iter) ?:
752                 bch2_trans_update(trans, k_iter, delete, 0) ?:
753                 bch2_hash_set_snapshot(trans, desc, hash_info,
754                                        (subvol_inum) { 0, k.k->p.inode },
755                                        k.k->p.snapshot, tmp,
756                                        BCH_HASH_SET_MUST_CREATE,
757                                        BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
758                 bch2_trans_commit(trans, NULL, NULL,
759                                   BTREE_INSERT_NOFAIL|
760                                   BTREE_INSERT_LAZY_RW);
761 }
762
763 static int hash_check_key(struct btree_trans *trans,
764                           const struct bch_hash_desc desc,
765                           struct bch_hash_info *hash_info,
766                           struct btree_iter *k_iter, struct bkey_s_c hash_k)
767 {
768         struct bch_fs *c = trans->c;
769         struct btree_iter iter = { NULL };
770         struct printbuf buf = PRINTBUF;
771         struct bkey_s_c k;
772         u64 hash;
773         int ret = 0;
774
775         if (hash_k.k->type != desc.key_type)
776                 return 0;
777
778         hash = desc.hash_bkey(hash_info, hash_k);
779
780         if (likely(hash == hash_k.k->p.offset))
781                 return 0;
782
783         if (hash_k.k->p.offset < hash)
784                 goto bad_hash;
785
786         for_each_btree_key_norestart(trans, iter, desc.btree_id,
787                                      SPOS(hash_k.k->p.inode, hash, hash_k.k->p.snapshot),
788                                      BTREE_ITER_SLOTS, k, ret) {
789                 if (bkey_eq(k.k->p, hash_k.k->p))
790                         break;
791
792                 if (fsck_err_on(k.k->type == desc.key_type &&
793                                 !desc.cmp_bkey(k, hash_k), c,
794                                 "duplicate hash table keys:\n%s",
795                                 (printbuf_reset(&buf),
796                                  bch2_bkey_val_to_text(&buf, c, hash_k),
797                                  buf.buf))) {
798                         ret = bch2_hash_delete_at(trans, desc, hash_info, k_iter, 0) ?: 1;
799                         break;
800                 }
801
802                 if (bkey_deleted(k.k)) {
803                         bch2_trans_iter_exit(trans, &iter);
804                         goto bad_hash;
805                 }
806         }
807 out:
808         bch2_trans_iter_exit(trans, &iter);
809         printbuf_exit(&buf);
810         return ret;
811 bad_hash:
812         if (fsck_err(c, "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s",
813                      bch2_btree_id_str(desc.btree_id), hash_k.k->p.inode, hash_k.k->p.offset, hash,
814                      (printbuf_reset(&buf),
815                       bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) {
816                 ret = hash_redo_key(trans, desc, hash_info, k_iter, hash_k);
817                 bch_err_fn(c, ret);
818                 if (ret)
819                         return ret;
820                 ret = -BCH_ERR_transaction_restart_nested;
821         }
822 fsck_err:
823         goto out;
824 }
825
826 static int check_inode(struct btree_trans *trans,
827                        struct btree_iter *iter,
828                        struct bkey_s_c k,
829                        struct bch_inode_unpacked *prev,
830                        struct snapshots_seen *s,
831                        bool full)
832 {
833         struct bch_fs *c = trans->c;
834         struct bch_inode_unpacked u;
835         bool do_update = false;
836         int ret;
837
838         ret = check_key_has_snapshot(trans, iter, k);
839         if (ret < 0)
840                 goto err;
841         if (ret)
842                 return 0;
843
844         ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
845         if (ret)
846                 goto err;
847
848         if (!bkey_is_inode(k.k))
849                 return 0;
850
851         BUG_ON(bch2_inode_unpack(k, &u));
852
853         if (!full &&
854             !(u.bi_flags & (BCH_INODE_I_SIZE_DIRTY|
855                             BCH_INODE_I_SECTORS_DIRTY|
856                             BCH_INODE_UNLINKED)))
857                 return 0;
858
859         if (prev->bi_inum != u.bi_inum)
860                 *prev = u;
861
862         if (fsck_err_on(prev->bi_hash_seed      != u.bi_hash_seed ||
863                         inode_d_type(prev)      != inode_d_type(&u), c,
864                         "inodes in different snapshots don't match")) {
865                 bch_err(c, "repair not implemented yet");
866                 return -EINVAL;
867         }
868
869         if ((u.bi_flags & (BCH_INODE_I_SIZE_DIRTY|BCH_INODE_UNLINKED)) &&
870             bch2_key_has_snapshot_overwrites(trans, BTREE_ID_inodes, k.k->p)) {
871                 struct bpos new_min_pos;
872
873                 ret = bch2_propagate_key_to_snapshot_leaves(trans, iter->btree_id, k, &new_min_pos);
874                 if (ret)
875                         goto err;
876
877                 u.bi_flags &= ~BCH_INODE_I_SIZE_DIRTY|BCH_INODE_UNLINKED;
878
879                 ret = __write_inode(trans, &u, iter->pos.snapshot);
880                 bch_err_msg(c, ret, "in fsck updating inode");
881                 if (ret)
882                         return ret;
883
884                 if (!bpos_eq(new_min_pos, POS_MIN))
885                         bch2_btree_iter_set_pos(iter, bpos_predecessor(new_min_pos));
886                 return 0;
887         }
888
889         if (u.bi_flags & BCH_INODE_UNLINKED &&
890             (!c->sb.clean ||
891              fsck_err(c, "filesystem marked clean, but inode %llu unlinked",
892                       u.bi_inum))) {
893                 bch2_trans_unlock(trans);
894                 bch2_fs_lazy_rw(c);
895
896                 ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot);
897                 bch_err_msg(c, ret, "in fsck deleting inode");
898                 return ret;
899         }
900
901         if (u.bi_flags & BCH_INODE_I_SIZE_DIRTY &&
902             (!c->sb.clean ||
903              fsck_err(c, "filesystem marked clean, but inode %llu has i_size dirty",
904                       u.bi_inum))) {
905                 bch_verbose(c, "truncating inode %llu", u.bi_inum);
906
907                 bch2_trans_unlock(trans);
908                 bch2_fs_lazy_rw(c);
909
910                 /*
911                  * XXX: need to truncate partial blocks too here - or ideally
912                  * just switch units to bytes and that issue goes away
913                  */
914                 ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
915                                 SPOS(u.bi_inum, round_up(u.bi_size, block_bytes(c)) >> 9,
916                                      iter->pos.snapshot),
917                                 POS(u.bi_inum, U64_MAX),
918                                 0, NULL);
919                 bch_err_msg(c, ret, "in fsck truncating inode");
920                 if (ret)
921                         return ret;
922
923                 /*
924                  * We truncated without our normal sector accounting hook, just
925                  * make sure we recalculate it:
926                  */
927                 u.bi_flags |= BCH_INODE_I_SECTORS_DIRTY;
928
929                 u.bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
930                 do_update = true;
931         }
932
933         if (u.bi_flags & BCH_INODE_I_SECTORS_DIRTY &&
934             (!c->sb.clean ||
935              fsck_err(c, "filesystem marked clean, but inode %llu has i_sectors dirty",
936                       u.bi_inum))) {
937                 s64 sectors;
938
939                 bch_verbose(c, "recounting sectors for inode %llu",
940                             u.bi_inum);
941
942                 sectors = bch2_count_inode_sectors(trans, u.bi_inum, iter->pos.snapshot);
943                 if (sectors < 0) {
944                         bch_err_msg(c, sectors, "in fsck recounting inode sectors");
945                         return sectors;
946                 }
947
948                 u.bi_sectors = sectors;
949                 u.bi_flags &= ~BCH_INODE_I_SECTORS_DIRTY;
950                 do_update = true;
951         }
952
953         if (u.bi_flags & BCH_INODE_BACKPTR_UNTRUSTED) {
954                 u.bi_dir = 0;
955                 u.bi_dir_offset = 0;
956                 u.bi_flags &= ~BCH_INODE_BACKPTR_UNTRUSTED;
957                 do_update = true;
958         }
959
960         if (do_update) {
961                 ret = __write_inode(trans, &u, iter->pos.snapshot);
962                 bch_err_msg(c, ret, "in fsck updating inode");
963                 if (ret)
964                         return ret;
965         }
966 err:
967 fsck_err:
968         bch_err_fn(c, ret);
969         return ret;
970 }
971
972 noinline_for_stack
973 int bch2_check_inodes(struct bch_fs *c)
974 {
975         bool full = c->opts.fsck;
976         struct btree_trans *trans = bch2_trans_get(c);
977         struct btree_iter iter;
978         struct bch_inode_unpacked prev = { 0 };
979         struct snapshots_seen s;
980         struct bkey_s_c k;
981         int ret;
982
983         snapshots_seen_init(&s);
984
985         ret = for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
986                         POS_MIN,
987                         BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
988                         NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
989                 check_inode(trans, &iter, k, &prev, &s, full));
990
991         snapshots_seen_exit(&s);
992         bch2_trans_put(trans);
993         bch_err_fn(c, ret);
994         return ret;
995 }
996
997 static struct bkey_s_c_dirent dirent_get_by_pos(struct btree_trans *trans,
998                                                 struct btree_iter *iter,
999                                                 struct bpos pos)
1000 {
1001         return bch2_bkey_get_iter_typed(trans, iter, BTREE_ID_dirents, pos, 0, dirent);
1002 }
1003
1004 static bool inode_points_to_dirent(struct bch_inode_unpacked *inode,
1005                                    struct bkey_s_c_dirent d)
1006 {
1007         return  inode->bi_dir           == d.k->p.inode &&
1008                 inode->bi_dir_offset    == d.k->p.offset;
1009 }
1010
1011 static bool dirent_points_to_inode(struct bkey_s_c_dirent d,
1012                                    struct bch_inode_unpacked *inode)
1013 {
1014         return d.v->d_type == DT_SUBVOL
1015                 ? le32_to_cpu(d.v->d_child_subvol)      == inode->bi_subvol
1016                 : le64_to_cpu(d.v->d_inum)              == inode->bi_inum;
1017 }
1018
1019 static int inode_backpointer_exists(struct btree_trans *trans,
1020                                     struct bch_inode_unpacked *inode,
1021                                     u32 snapshot)
1022 {
1023         struct btree_iter iter;
1024         struct bkey_s_c_dirent d;
1025         int ret;
1026
1027         d = dirent_get_by_pos(trans, &iter,
1028                         SPOS(inode->bi_dir, inode->bi_dir_offset, snapshot));
1029         ret = bkey_err(d);
1030         if (ret)
1031                 return bch2_err_matches(ret, ENOENT) ? 0 : ret;
1032
1033         ret = dirent_points_to_inode(d, inode);
1034         bch2_trans_iter_exit(trans, &iter);
1035         return ret;
1036 }
1037
1038 static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
1039 {
1040         struct bch_fs *c = trans->c;
1041         struct inode_walker_entry *i;
1042         u32 restart_count = trans->restart_count;
1043         int ret = 0;
1044         s64 count2;
1045
1046         darray_for_each(w->inodes, i) {
1047                 if (i->inode.bi_sectors == i->count)
1048                         continue;
1049
1050                 count2 = bch2_count_inode_sectors(trans, w->last_pos.inode, i->snapshot);
1051
1052                 if (w->recalculate_sums)
1053                         i->count = count2;
1054
1055                 if (i->count != count2) {
1056                         bch_err(c, "fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu",
1057                                 w->last_pos.inode, i->snapshot, i->count, count2);
1058                         return -BCH_ERR_internal_fsck_err;
1059                 }
1060
1061                 if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SECTORS_DIRTY), c,
1062                             "inode %llu:%u has incorrect i_sectors: got %llu, should be %llu",
1063                             w->last_pos.inode, i->snapshot,
1064                             i->inode.bi_sectors, i->count)) {
1065                         i->inode.bi_sectors = i->count;
1066                         ret = fsck_write_inode(trans, &i->inode, i->snapshot);
1067                         if (ret)
1068                                 break;
1069                 }
1070         }
1071 fsck_err:
1072         bch_err_fn(c, ret);
1073         return ret ?: trans_was_restarted(trans, restart_count);
1074 }
1075
1076 struct extent_end {
1077         u32                     snapshot;
1078         u64                     offset;
1079         struct snapshots_seen   seen;
1080 };
1081
1082 struct extent_ends {
1083         struct bpos                     last_pos;
1084         DARRAY(struct extent_end)       e;
1085 };
1086
1087 static void extent_ends_reset(struct extent_ends *extent_ends)
1088 {
1089         struct extent_end *i;
1090
1091         darray_for_each(extent_ends->e, i)
1092                 snapshots_seen_exit(&i->seen);
1093
1094         extent_ends->e.nr = 0;
1095 }
1096
1097 static void extent_ends_exit(struct extent_ends *extent_ends)
1098 {
1099         extent_ends_reset(extent_ends);
1100         darray_exit(&extent_ends->e);
1101 }
1102
1103 static void extent_ends_init(struct extent_ends *extent_ends)
1104 {
1105         memset(extent_ends, 0, sizeof(*extent_ends));
1106 }
1107
1108 static int extent_ends_at(struct bch_fs *c,
1109                           struct extent_ends *extent_ends,
1110                           struct snapshots_seen *seen,
1111                           struct bkey_s_c k)
1112 {
1113         struct extent_end *i, n = (struct extent_end) {
1114                 .offset         = k.k->p.offset,
1115                 .snapshot       = k.k->p.snapshot,
1116                 .seen           = *seen,
1117         };
1118
1119         n.seen.ids.data = kmemdup(seen->ids.data,
1120                               sizeof(seen->ids.data[0]) * seen->ids.size,
1121                               GFP_KERNEL);
1122         if (!n.seen.ids.data)
1123                 return -BCH_ERR_ENOMEM_fsck_extent_ends_at;
1124
1125         darray_for_each(extent_ends->e, i) {
1126                 if (i->snapshot == k.k->p.snapshot) {
1127                         snapshots_seen_exit(&i->seen);
1128                         *i = n;
1129                         return 0;
1130                 }
1131
1132                 if (i->snapshot >= k.k->p.snapshot)
1133                         break;
1134         }
1135
1136         return darray_insert_item(&extent_ends->e, i - extent_ends->e.data, n);
1137 }
1138
1139 static int overlapping_extents_found(struct btree_trans *trans,
1140                                      enum btree_id btree,
1141                                      struct bpos pos1, struct snapshots_seen *pos1_seen,
1142                                      struct bkey pos2,
1143                                      bool *fixed,
1144                                      struct extent_end *extent_end)
1145 {
1146         struct bch_fs *c = trans->c;
1147         struct printbuf buf = PRINTBUF;
1148         struct btree_iter iter1, iter2 = { NULL };
1149         struct bkey_s_c k1, k2;
1150         int ret;
1151
1152         BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2)));
1153
1154         bch2_trans_iter_init(trans, &iter1, btree, pos1,
1155                              BTREE_ITER_ALL_SNAPSHOTS|
1156                              BTREE_ITER_NOT_EXTENTS);
1157         k1 = bch2_btree_iter_peek_upto(&iter1, POS(pos1.inode, U64_MAX));
1158         ret = bkey_err(k1);
1159         if (ret)
1160                 goto err;
1161
1162         prt_str(&buf, "\n  ");
1163         bch2_bkey_val_to_text(&buf, c, k1);
1164
1165         if (!bpos_eq(pos1, k1.k->p)) {
1166                 prt_str(&buf, "\n  wanted\n  ");
1167                 bch2_bpos_to_text(&buf, pos1);
1168                 prt_str(&buf, "\n  ");
1169                 bch2_bkey_to_text(&buf, &pos2);
1170
1171                 bch_err(c, "%s: error finding first overlapping extent when repairing, got%s",
1172                         __func__, buf.buf);
1173                 ret = -BCH_ERR_internal_fsck_err;
1174                 goto err;
1175         }
1176
1177         bch2_trans_copy_iter(&iter2, &iter1);
1178
1179         while (1) {
1180                 bch2_btree_iter_advance(&iter2);
1181
1182                 k2 = bch2_btree_iter_peek_upto(&iter2, POS(pos1.inode, U64_MAX));
1183                 ret = bkey_err(k2);
1184                 if (ret)
1185                         goto err;
1186
1187                 if (bpos_ge(k2.k->p, pos2.p))
1188                         break;
1189         }
1190
1191         prt_str(&buf, "\n  ");
1192         bch2_bkey_val_to_text(&buf, c, k2);
1193
1194         if (bpos_gt(k2.k->p, pos2.p) ||
1195             pos2.size != k2.k->size) {
1196                 bch_err(c, "%s: error finding seconding overlapping extent when repairing%s",
1197                         __func__, buf.buf);
1198                 ret = -BCH_ERR_internal_fsck_err;
1199                 goto err;
1200         }
1201
1202         prt_printf(&buf, "\n  overwriting %s extent",
1203                    pos1.snapshot >= pos2.p.snapshot ? "first" : "second");
1204
1205         if (fsck_err(c, "overlapping extents%s", buf.buf)) {
1206                 struct btree_iter *old_iter = &iter1;
1207                 struct disk_reservation res = { 0 };
1208
1209                 if (pos1.snapshot < pos2.p.snapshot) {
1210                         old_iter = &iter2;
1211                         swap(k1, k2);
1212                 }
1213
1214                 trans->extra_journal_res += bch2_bkey_sectors_compressed(k2);
1215
1216                 ret =   bch2_trans_update_extent_overwrite(trans, old_iter,
1217                                 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE,
1218                                 k1, k2) ?:
1219                         bch2_trans_commit(trans, &res, NULL,
1220                                 BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL);
1221                 bch2_disk_reservation_put(c, &res);
1222
1223                 if (ret)
1224                         goto err;
1225
1226                 *fixed = true;
1227
1228                 if (pos1.snapshot == pos2.p.snapshot) {
1229                         /*
1230                          * We overwrote the first extent, and did the overwrite
1231                          * in the same snapshot:
1232                          */
1233                         extent_end->offset = bkey_start_offset(&pos2);
1234                 } else if (pos1.snapshot > pos2.p.snapshot) {
1235                         /*
1236                          * We overwrote the first extent in pos2's snapshot:
1237                          */
1238                         ret = snapshots_seen_add_inorder(c, pos1_seen, pos2.p.snapshot);
1239                 } else {
1240                         /*
1241                          * We overwrote the second extent - restart
1242                          * check_extent() from the top:
1243                          */
1244                         ret = -BCH_ERR_transaction_restart_nested;
1245                 }
1246         }
1247 fsck_err:
1248 err:
1249         bch2_trans_iter_exit(trans, &iter2);
1250         bch2_trans_iter_exit(trans, &iter1);
1251         printbuf_exit(&buf);
1252         return ret;
1253 }
1254
1255 static int check_overlapping_extents(struct btree_trans *trans,
1256                               struct snapshots_seen *seen,
1257                               struct extent_ends *extent_ends,
1258                               struct bkey_s_c k,
1259                               u32 equiv,
1260                               struct btree_iter *iter,
1261                               bool *fixed)
1262 {
1263         struct bch_fs *c = trans->c;
1264         struct extent_end *i;
1265         int ret = 0;
1266
1267         /* transaction restart, running again */
1268         if (bpos_eq(extent_ends->last_pos, k.k->p))
1269                 return 0;
1270
1271         if (extent_ends->last_pos.inode != k.k->p.inode)
1272                 extent_ends_reset(extent_ends);
1273
1274         darray_for_each(extent_ends->e, i) {
1275                 if (i->offset <= bkey_start_offset(k.k))
1276                         continue;
1277
1278                 if (!ref_visible2(c,
1279                                   k.k->p.snapshot, seen,
1280                                   i->snapshot, &i->seen))
1281                         continue;
1282
1283                 ret = overlapping_extents_found(trans, iter->btree_id,
1284                                                 SPOS(iter->pos.inode,
1285                                                      i->offset,
1286                                                      i->snapshot),
1287                                                 &i->seen,
1288                                                 *k.k, fixed, i);
1289                 if (ret)
1290                         goto err;
1291         }
1292
1293         ret = extent_ends_at(c, extent_ends, seen, k);
1294         if (ret)
1295                 goto err;
1296
1297         extent_ends->last_pos = k.k->p;
1298 err:
1299         return ret;
1300 }
1301
1302 static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
1303                         struct bkey_s_c k,
1304                         struct inode_walker *inode,
1305                         struct snapshots_seen *s,
1306                         struct extent_ends *extent_ends)
1307 {
1308         struct bch_fs *c = trans->c;
1309         struct inode_walker_entry *i;
1310         struct printbuf buf = PRINTBUF;
1311         struct bpos equiv = k.k->p;
1312         int ret = 0;
1313
1314         equiv.snapshot = bch2_snapshot_equiv(c, k.k->p.snapshot);
1315
1316         ret = check_key_has_snapshot(trans, iter, k);
1317         if (ret) {
1318                 ret = ret < 0 ? ret : 0;
1319                 goto out;
1320         }
1321
1322         if (inode->last_pos.inode != k.k->p.inode) {
1323                 ret = check_i_sectors(trans, inode);
1324                 if (ret)
1325                         goto err;
1326         }
1327
1328         i = walk_inode(trans, inode, equiv, k.k->type == KEY_TYPE_whiteout);
1329         ret = PTR_ERR_OR_ZERO(i);
1330         if (ret)
1331                 goto err;
1332
1333         ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
1334         if (ret)
1335                 goto err;
1336
1337         if (k.k->type != KEY_TYPE_whiteout) {
1338                 if (fsck_err_on(!i, c,
1339                                 "extent in missing inode:\n  %s",
1340                                 (printbuf_reset(&buf),
1341                                  bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1342                         goto delete;
1343
1344                 if (fsck_err_on(i &&
1345                                 !S_ISREG(i->inode.bi_mode) &&
1346                                 !S_ISLNK(i->inode.bi_mode), c,
1347                                 "extent in non regular inode mode %o:\n  %s",
1348                                 i->inode.bi_mode,
1349                                 (printbuf_reset(&buf),
1350                                  bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1351                         goto delete;
1352
1353                 ret = check_overlapping_extents(trans, s, extent_ends, k,
1354                                                 equiv.snapshot, iter,
1355                                                 &inode->recalculate_sums);
1356                 if (ret)
1357                         goto err;
1358         }
1359
1360         /*
1361          * Check inodes in reverse order, from oldest snapshots to newest,
1362          * starting from the inode that matches this extent's snapshot. If we
1363          * didn't have one, iterate over all inodes:
1364          */
1365         if (!i)
1366                 i = inode->inodes.data + inode->inodes.nr - 1;
1367
1368         for (;
1369              inode->inodes.data && i >= inode->inodes.data;
1370              --i) {
1371                 if (i->snapshot > equiv.snapshot ||
1372                     !key_visible_in_snapshot(c, s, i->snapshot, equiv.snapshot))
1373                         continue;
1374
1375                 if (k.k->type != KEY_TYPE_whiteout) {
1376                         if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
1377                                         k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 &&
1378                                         !bkey_extent_is_reservation(k), c,
1379                                         "extent type past end of inode %llu:%u, i_size %llu\n  %s",
1380                                         i->inode.bi_inum, i->snapshot, i->inode.bi_size,
1381                                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1382                                 struct btree_iter iter2;
1383
1384                                 bch2_trans_copy_iter(&iter2, iter);
1385                                 bch2_btree_iter_set_snapshot(&iter2, i->snapshot);
1386                                 ret =   bch2_btree_iter_traverse(&iter2) ?:
1387                                         bch2_btree_delete_at(trans, &iter2,
1388                                                 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1389                                 bch2_trans_iter_exit(trans, &iter2);
1390                                 if (ret)
1391                                         goto err;
1392
1393                                 iter->k.type = KEY_TYPE_whiteout;
1394                         }
1395
1396                         if (bkey_extent_is_allocation(k.k))
1397                                 i->count += k.k->size;
1398                 }
1399
1400                 i->seen_this_pos = true;
1401         }
1402 out:
1403 err:
1404 fsck_err:
1405         printbuf_exit(&buf);
1406         bch_err_fn(c, ret);
1407         return ret;
1408 delete:
1409         ret = bch2_btree_delete_at(trans, iter, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1410         goto out;
1411 }
1412
1413 /*
1414  * Walk extents: verify that extents have a corresponding S_ISREG inode, and
1415  * that i_size an i_sectors are consistent
1416  */
1417 int bch2_check_extents(struct bch_fs *c)
1418 {
1419         struct inode_walker w = inode_walker_init();
1420         struct snapshots_seen s;
1421         struct btree_trans *trans = bch2_trans_get(c);
1422         struct btree_iter iter;
1423         struct bkey_s_c k;
1424         struct extent_ends extent_ends;
1425         struct disk_reservation res = { 0 };
1426         int ret = 0;
1427
1428         snapshots_seen_init(&s);
1429         extent_ends_init(&extent_ends);
1430
1431         ret = for_each_btree_key_commit(trans, iter, BTREE_ID_extents,
1432                         POS(BCACHEFS_ROOT_INO, 0),
1433                         BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1434                         &res, NULL,
1435                         BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL, ({
1436                 bch2_disk_reservation_put(c, &res);
1437                 check_extent(trans, &iter, k, &w, &s, &extent_ends);
1438         })) ?:
1439         check_i_sectors(trans, &w);
1440
1441         bch2_disk_reservation_put(c, &res);
1442         extent_ends_exit(&extent_ends);
1443         inode_walker_exit(&w);
1444         snapshots_seen_exit(&s);
1445         bch2_trans_put(trans);
1446
1447         bch_err_fn(c, ret);
1448         return ret;
1449 }
1450
1451 static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
1452 {
1453         struct bch_fs *c = trans->c;
1454         struct inode_walker_entry *i;
1455         u32 restart_count = trans->restart_count;
1456         int ret = 0;
1457         s64 count2;
1458
1459         darray_for_each(w->inodes, i) {
1460                 if (i->inode.bi_nlink == i->count)
1461                         continue;
1462
1463                 count2 = bch2_count_subdirs(trans, w->last_pos.inode, i->snapshot);
1464                 if (count2 < 0)
1465                         return count2;
1466
1467                 if (i->count != count2) {
1468                         bch_err(c, "fsck counted subdirectories wrong: got %llu should be %llu",
1469                                 i->count, count2);
1470                         i->count = count2;
1471                         if (i->inode.bi_nlink == i->count)
1472                                 continue;
1473                 }
1474
1475                 if (fsck_err_on(i->inode.bi_nlink != i->count, c,
1476                                 "directory %llu:%u with wrong i_nlink: got %u, should be %llu",
1477                                 w->last_pos.inode, i->snapshot, i->inode.bi_nlink, i->count)) {
1478                         i->inode.bi_nlink = i->count;
1479                         ret = fsck_write_inode(trans, &i->inode, i->snapshot);
1480                         if (ret)
1481                                 break;
1482                 }
1483         }
1484 fsck_err:
1485         bch_err_fn(c, ret);
1486         return ret ?: trans_was_restarted(trans, restart_count);
1487 }
1488
1489 static int check_dirent_target(struct btree_trans *trans,
1490                                struct btree_iter *iter,
1491                                struct bkey_s_c_dirent d,
1492                                struct bch_inode_unpacked *target,
1493                                u32 target_snapshot)
1494 {
1495         struct bch_fs *c = trans->c;
1496         struct bkey_i_dirent *n;
1497         bool backpointer_exists = true;
1498         struct printbuf buf = PRINTBUF;
1499         int ret = 0;
1500
1501         if (!target->bi_dir &&
1502             !target->bi_dir_offset) {
1503                 target->bi_dir          = d.k->p.inode;
1504                 target->bi_dir_offset   = d.k->p.offset;
1505
1506                 ret = __write_inode(trans, target, target_snapshot);
1507                 if (ret)
1508                         goto err;
1509         }
1510
1511         if (!inode_points_to_dirent(target, d)) {
1512                 ret = inode_backpointer_exists(trans, target, d.k->p.snapshot);
1513                 if (ret < 0)
1514                         goto err;
1515
1516                 backpointer_exists = ret;
1517                 ret = 0;
1518
1519                 if (fsck_err_on(S_ISDIR(target->bi_mode) &&
1520                                 backpointer_exists, c,
1521                                 "directory %llu with multiple links",
1522                                 target->bi_inum)) {
1523                         ret = __remove_dirent(trans, d.k->p);
1524                         goto out;
1525                 }
1526
1527                 if (fsck_err_on(backpointer_exists &&
1528                                 !target->bi_nlink, c,
1529                                 "inode %llu type %s has multiple links but i_nlink 0",
1530                                 target->bi_inum, bch2_d_types[d.v->d_type])) {
1531                         target->bi_nlink++;
1532                         target->bi_flags &= ~BCH_INODE_UNLINKED;
1533
1534                         ret = __write_inode(trans, target, target_snapshot);
1535                         if (ret)
1536                                 goto err;
1537                 }
1538
1539                 if (fsck_err_on(!backpointer_exists, c,
1540                                 "inode %llu:%u has wrong backpointer:\n"
1541                                 "got       %llu:%llu\n"
1542                                 "should be %llu:%llu",
1543                                 target->bi_inum, target_snapshot,
1544                                 target->bi_dir,
1545                                 target->bi_dir_offset,
1546                                 d.k->p.inode,
1547                                 d.k->p.offset)) {
1548                         target->bi_dir          = d.k->p.inode;
1549                         target->bi_dir_offset   = d.k->p.offset;
1550
1551                         ret = __write_inode(trans, target, target_snapshot);
1552                         if (ret)
1553                                 goto err;
1554                 }
1555         }
1556
1557         if (fsck_err_on(d.v->d_type != inode_d_type(target), c,
1558                         "incorrect d_type: got %s, should be %s:\n%s",
1559                         bch2_d_type_str(d.v->d_type),
1560                         bch2_d_type_str(inode_d_type(target)),
1561                         (printbuf_reset(&buf),
1562                          bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
1563                 n = bch2_trans_kmalloc(trans, bkey_bytes(d.k));
1564                 ret = PTR_ERR_OR_ZERO(n);
1565                 if (ret)
1566                         goto err;
1567
1568                 bkey_reassemble(&n->k_i, d.s_c);
1569                 n->v.d_type = inode_d_type(target);
1570
1571                 ret = bch2_trans_update(trans, iter, &n->k_i, 0);
1572                 if (ret)
1573                         goto err;
1574
1575                 d = dirent_i_to_s_c(n);
1576         }
1577
1578         if (d.v->d_type == DT_SUBVOL &&
1579             target->bi_parent_subvol != le32_to_cpu(d.v->d_parent_subvol) &&
1580             (c->sb.version < bcachefs_metadata_version_subvol_dirent ||
1581              fsck_err(c, "dirent has wrong d_parent_subvol field: got %u, should be %u",
1582                       le32_to_cpu(d.v->d_parent_subvol),
1583                       target->bi_parent_subvol))) {
1584                 n = bch2_trans_kmalloc(trans, bkey_bytes(d.k));
1585                 ret = PTR_ERR_OR_ZERO(n);
1586                 if (ret)
1587                         goto err;
1588
1589                 bkey_reassemble(&n->k_i, d.s_c);
1590                 n->v.d_parent_subvol = cpu_to_le32(target->bi_parent_subvol);
1591
1592                 ret = bch2_trans_update(trans, iter, &n->k_i, 0);
1593                 if (ret)
1594                         goto err;
1595
1596                 d = dirent_i_to_s_c(n);
1597         }
1598 out:
1599 err:
1600 fsck_err:
1601         printbuf_exit(&buf);
1602         bch_err_fn(c, ret);
1603         return ret;
1604 }
1605
1606 static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
1607                         struct bkey_s_c k,
1608                         struct bch_hash_info *hash_info,
1609                         struct inode_walker *dir,
1610                         struct inode_walker *target,
1611                         struct snapshots_seen *s)
1612 {
1613         struct bch_fs *c = trans->c;
1614         struct bkey_s_c_dirent d;
1615         struct inode_walker_entry *i;
1616         struct printbuf buf = PRINTBUF;
1617         struct bpos equiv;
1618         int ret = 0;
1619
1620         ret = check_key_has_snapshot(trans, iter, k);
1621         if (ret) {
1622                 ret = ret < 0 ? ret : 0;
1623                 goto out;
1624         }
1625
1626         equiv = k.k->p;
1627         equiv.snapshot = bch2_snapshot_equiv(c, k.k->p.snapshot);
1628
1629         ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
1630         if (ret)
1631                 goto err;
1632
1633         if (k.k->type == KEY_TYPE_whiteout)
1634                 goto out;
1635
1636         if (dir->last_pos.inode != k.k->p.inode) {
1637                 ret = check_subdir_count(trans, dir);
1638                 if (ret)
1639                         goto err;
1640         }
1641
1642         BUG_ON(!iter->path->should_be_locked);
1643
1644         i = walk_inode(trans, dir, equiv, k.k->type == KEY_TYPE_whiteout);
1645         ret = PTR_ERR_OR_ZERO(i);
1646         if (ret < 0)
1647                 goto err;
1648
1649         if (dir->first_this_inode && dir->inodes.nr)
1650                 *hash_info = bch2_hash_info_init(c, &dir->inodes.data[0].inode);
1651         dir->first_this_inode = false;
1652
1653         if (fsck_err_on(!i, c,
1654                         "dirent in nonexisting directory:\n%s",
1655                         (printbuf_reset(&buf),
1656                          bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1657                 ret = bch2_btree_delete_at(trans, iter,
1658                                 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1659                 goto out;
1660         }
1661
1662         if (!i)
1663                 goto out;
1664
1665         if (fsck_err_on(!S_ISDIR(i->inode.bi_mode), c,
1666                         "dirent in non directory inode type %s:\n%s",
1667                         bch2_d_type_str(inode_d_type(&i->inode)),
1668                         (printbuf_reset(&buf),
1669                          bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1670                 ret = bch2_btree_delete_at(trans, iter, 0);
1671                 goto out;
1672         }
1673
1674         ret = hash_check_key(trans, bch2_dirent_hash_desc, hash_info, iter, k);
1675         if (ret < 0)
1676                 goto err;
1677         if (ret) {
1678                 /* dirent has been deleted */
1679                 ret = 0;
1680                 goto out;
1681         }
1682
1683         if (k.k->type != KEY_TYPE_dirent)
1684                 goto out;
1685
1686         d = bkey_s_c_to_dirent(k);
1687
1688         if (d.v->d_type == DT_SUBVOL) {
1689                 struct bch_inode_unpacked subvol_root;
1690                 u32 target_subvol = le32_to_cpu(d.v->d_child_subvol);
1691                 u32 target_snapshot;
1692                 u64 target_inum;
1693
1694                 ret = __subvol_lookup(trans, target_subvol,
1695                                       &target_snapshot, &target_inum);
1696                 if (ret && !bch2_err_matches(ret, ENOENT))
1697                         goto err;
1698
1699                 if (fsck_err_on(ret, c,
1700                                 "dirent points to missing subvolume %u",
1701                                 le32_to_cpu(d.v->d_child_subvol))) {
1702                         ret = __remove_dirent(trans, d.k->p);
1703                         goto err;
1704                 }
1705
1706                 ret = __lookup_inode(trans, target_inum,
1707                                    &subvol_root, &target_snapshot);
1708                 if (ret && !bch2_err_matches(ret, ENOENT))
1709                         goto err;
1710
1711                 if (fsck_err_on(ret, c,
1712                                 "subvolume %u points to missing subvolume root %llu",
1713                                 target_subvol,
1714                                 target_inum)) {
1715                         bch_err(c, "repair not implemented yet");
1716                         ret = -EINVAL;
1717                         goto err;
1718                 }
1719
1720                 if (fsck_err_on(subvol_root.bi_subvol != target_subvol, c,
1721                                 "subvol root %llu has wrong bi_subvol field: got %u, should be %u",
1722                                 target_inum,
1723                                 subvol_root.bi_subvol, target_subvol)) {
1724                         subvol_root.bi_subvol = target_subvol;
1725                         ret = __write_inode(trans, &subvol_root, target_snapshot);
1726                         if (ret)
1727                                 goto err;
1728                 }
1729
1730                 ret = check_dirent_target(trans, iter, d, &subvol_root,
1731                                           target_snapshot);
1732                 if (ret)
1733                         goto err;
1734         } else {
1735                 ret = __get_visible_inodes(trans, target, s, le64_to_cpu(d.v->d_inum));
1736                 if (ret)
1737                         goto err;
1738
1739                 if (fsck_err_on(!target->inodes.nr, c,
1740                                 "dirent points to missing inode: (equiv %u)\n%s",
1741                                 equiv.snapshot,
1742                                 (printbuf_reset(&buf),
1743                                  bch2_bkey_val_to_text(&buf, c, k),
1744                                  buf.buf))) {
1745                         ret = __remove_dirent(trans, d.k->p);
1746                         if (ret)
1747                                 goto err;
1748                 }
1749
1750                 darray_for_each(target->inodes, i) {
1751                         ret = check_dirent_target(trans, iter, d,
1752                                                   &i->inode, i->snapshot);
1753                         if (ret)
1754                                 goto err;
1755                 }
1756         }
1757
1758         if (d.v->d_type == DT_DIR)
1759                 for_each_visible_inode(c, s, dir, equiv.snapshot, i)
1760                         i->count++;
1761
1762 out:
1763 err:
1764 fsck_err:
1765         printbuf_exit(&buf);
1766         bch_err_fn(c, ret);
1767         return ret;
1768 }
1769
1770 /*
1771  * Walk dirents: verify that they all have a corresponding S_ISDIR inode,
1772  * validate d_type
1773  */
1774 int bch2_check_dirents(struct bch_fs *c)
1775 {
1776         struct inode_walker dir = inode_walker_init();
1777         struct inode_walker target = inode_walker_init();
1778         struct snapshots_seen s;
1779         struct bch_hash_info hash_info;
1780         struct btree_trans *trans = bch2_trans_get(c);
1781         struct btree_iter iter;
1782         struct bkey_s_c k;
1783         int ret = 0;
1784
1785         snapshots_seen_init(&s);
1786
1787         ret = for_each_btree_key_commit(trans, iter, BTREE_ID_dirents,
1788                         POS(BCACHEFS_ROOT_INO, 0),
1789                         BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
1790                         k,
1791                         NULL, NULL,
1792                         BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
1793                 check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s));
1794
1795         bch2_trans_put(trans);
1796         snapshots_seen_exit(&s);
1797         inode_walker_exit(&dir);
1798         inode_walker_exit(&target);
1799         bch_err_fn(c, ret);
1800         return ret;
1801 }
1802
1803 static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
1804                        struct bkey_s_c k,
1805                        struct bch_hash_info *hash_info,
1806                        struct inode_walker *inode)
1807 {
1808         struct bch_fs *c = trans->c;
1809         struct inode_walker_entry *i;
1810         int ret;
1811
1812         ret = check_key_has_snapshot(trans, iter, k);
1813         if (ret)
1814                 return ret;
1815
1816         i = walk_inode(trans, inode, k.k->p, k.k->type == KEY_TYPE_whiteout);
1817         ret = PTR_ERR_OR_ZERO(i);
1818         if (ret)
1819                 return ret;
1820
1821         if (inode->first_this_inode && inode->inodes.nr)
1822                 *hash_info = bch2_hash_info_init(c, &inode->inodes.data[0].inode);
1823         inode->first_this_inode = false;
1824
1825         if (fsck_err_on(!i, c,
1826                         "xattr for missing inode %llu",
1827                         k.k->p.inode))
1828                 return bch2_btree_delete_at(trans, iter, 0);
1829
1830         if (!i)
1831                 return 0;
1832
1833         ret = hash_check_key(trans, bch2_xattr_hash_desc, hash_info, iter, k);
1834 fsck_err:
1835         bch_err_fn(c, ret);
1836         return ret;
1837 }
1838
1839 /*
1840  * Walk xattrs: verify that they all have a corresponding inode
1841  */
1842 int bch2_check_xattrs(struct bch_fs *c)
1843 {
1844         struct inode_walker inode = inode_walker_init();
1845         struct bch_hash_info hash_info;
1846         struct btree_iter iter;
1847         struct bkey_s_c k;
1848         int ret = 0;
1849
1850         ret = bch2_trans_run(c,
1851                 for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
1852                         POS(BCACHEFS_ROOT_INO, 0),
1853                         BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
1854                         k,
1855                         NULL, NULL,
1856                         BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
1857                 check_xattr(trans, &iter, k, &hash_info, &inode)));
1858         bch_err_fn(c, ret);
1859         return ret;
1860 }
1861
1862 static int check_root_trans(struct btree_trans *trans)
1863 {
1864         struct bch_fs *c = trans->c;
1865         struct bch_inode_unpacked root_inode;
1866         u32 snapshot;
1867         u64 inum;
1868         int ret;
1869
1870         ret = __subvol_lookup(trans, BCACHEFS_ROOT_SUBVOL, &snapshot, &inum);
1871         if (ret && !bch2_err_matches(ret, ENOENT))
1872                 return ret;
1873
1874         if (mustfix_fsck_err_on(ret, c, "root subvol missing")) {
1875                 struct bkey_i_subvolume root_subvol;
1876
1877                 snapshot        = U32_MAX;
1878                 inum            = BCACHEFS_ROOT_INO;
1879
1880                 bkey_subvolume_init(&root_subvol.k_i);
1881                 root_subvol.k.p.offset = BCACHEFS_ROOT_SUBVOL;
1882                 root_subvol.v.flags     = 0;
1883                 root_subvol.v.snapshot  = cpu_to_le32(snapshot);
1884                 root_subvol.v.inode     = cpu_to_le64(inum);
1885                 ret = commit_do(trans, NULL, NULL,
1886                                       BTREE_INSERT_NOFAIL|
1887                                       BTREE_INSERT_LAZY_RW,
1888                         bch2_btree_insert_trans(trans, BTREE_ID_subvolumes,
1889                                             &root_subvol.k_i, 0));
1890                 bch_err_msg(c, ret, "writing root subvol");
1891                 if (ret)
1892                         goto err;
1893
1894         }
1895
1896         ret = __lookup_inode(trans, BCACHEFS_ROOT_INO, &root_inode, &snapshot);
1897         if (ret && !bch2_err_matches(ret, ENOENT))
1898                 return ret;
1899
1900         if (mustfix_fsck_err_on(ret, c, "root directory missing") ||
1901             mustfix_fsck_err_on(!S_ISDIR(root_inode.bi_mode), c,
1902                                 "root inode not a directory")) {
1903                 bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755,
1904                                 0, NULL);
1905                 root_inode.bi_inum = inum;
1906
1907                 ret = __write_inode(trans, &root_inode, snapshot);
1908                 bch_err_msg(c, ret, "writing root inode");
1909         }
1910 err:
1911 fsck_err:
1912         return ret;
1913 }
1914
1915 /* Get root directory, create if it doesn't exist: */
1916 int bch2_check_root(struct bch_fs *c)
1917 {
1918         int ret;
1919
1920         ret = bch2_trans_do(c, NULL, NULL,
1921                              BTREE_INSERT_NOFAIL|
1922                              BTREE_INSERT_LAZY_RW,
1923                 check_root_trans(trans));
1924         bch_err_fn(c, ret);
1925         return ret;
1926 }
1927
1928 struct pathbuf_entry {
1929         u64     inum;
1930         u32     snapshot;
1931 };
1932
1933 typedef DARRAY(struct pathbuf_entry) pathbuf;
1934
1935 static bool path_is_dup(pathbuf *p, u64 inum, u32 snapshot)
1936 {
1937         struct pathbuf_entry *i;
1938
1939         darray_for_each(*p, i)
1940                 if (i->inum     == inum &&
1941                     i->snapshot == snapshot)
1942                         return true;
1943
1944         return false;
1945 }
1946
1947 static int path_down(struct bch_fs *c, pathbuf *p,
1948                      u64 inum, u32 snapshot)
1949 {
1950         int ret = darray_push(p, ((struct pathbuf_entry) {
1951                 .inum           = inum,
1952                 .snapshot       = snapshot,
1953         }));
1954
1955         if (ret)
1956                 bch_err(c, "fsck: error allocating memory for pathbuf, size %zu",
1957                         p->size);
1958         return ret;
1959 }
1960
1961 /*
1962  * Check that a given inode is reachable from the root:
1963  *
1964  * XXX: we should also be verifying that inodes are in the right subvolumes
1965  */
1966 static int check_path(struct btree_trans *trans,
1967                       pathbuf *p,
1968                       struct bch_inode_unpacked *inode,
1969                       u32 snapshot)
1970 {
1971         struct bch_fs *c = trans->c;
1972         int ret = 0;
1973
1974         snapshot = bch2_snapshot_equiv(c, snapshot);
1975         p->nr = 0;
1976
1977         while (!(inode->bi_inum == BCACHEFS_ROOT_INO &&
1978                  inode->bi_subvol == BCACHEFS_ROOT_SUBVOL)) {
1979                 struct btree_iter dirent_iter;
1980                 struct bkey_s_c_dirent d;
1981                 u32 parent_snapshot = snapshot;
1982
1983                 if (inode->bi_subvol) {
1984                         u64 inum;
1985
1986                         ret = subvol_lookup(trans, inode->bi_parent_subvol,
1987                                             &parent_snapshot, &inum);
1988                         if (ret)
1989                                 break;
1990                 }
1991
1992                 ret = lockrestart_do(trans,
1993                         PTR_ERR_OR_ZERO((d = dirent_get_by_pos(trans, &dirent_iter,
1994                                           SPOS(inode->bi_dir, inode->bi_dir_offset,
1995                                                parent_snapshot))).k));
1996                 if (ret && !bch2_err_matches(ret, ENOENT))
1997                         break;
1998
1999                 if (!ret && !dirent_points_to_inode(d, inode)) {
2000                         bch2_trans_iter_exit(trans, &dirent_iter);
2001                         ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
2002                 }
2003
2004                 if (bch2_err_matches(ret, ENOENT)) {
2005                         if (fsck_err(c,  "unreachable inode %llu:%u, type %s nlink %u backptr %llu:%llu",
2006                                      inode->bi_inum, snapshot,
2007                                      bch2_d_type_str(inode_d_type(inode)),
2008                                      inode->bi_nlink,
2009                                      inode->bi_dir,
2010                                      inode->bi_dir_offset))
2011                                 ret = reattach_inode(trans, inode, snapshot);
2012                         break;
2013                 }
2014
2015                 bch2_trans_iter_exit(trans, &dirent_iter);
2016
2017                 if (!S_ISDIR(inode->bi_mode))
2018                         break;
2019
2020                 ret = path_down(c, p, inode->bi_inum, snapshot);
2021                 if (ret) {
2022                         bch_err(c, "memory allocation failure");
2023                         return ret;
2024                 }
2025
2026                 snapshot = parent_snapshot;
2027
2028                 ret = lookup_inode(trans, inode->bi_dir, inode, &snapshot);
2029                 if (ret) {
2030                         /* Should have been caught in dirents pass */
2031                         bch_err(c, "error looking up parent directory: %i", ret);
2032                         break;
2033                 }
2034
2035                 if (path_is_dup(p, inode->bi_inum, snapshot)) {
2036                         struct pathbuf_entry *i;
2037
2038                         /* XXX print path */
2039                         bch_err(c, "directory structure loop");
2040
2041                         darray_for_each(*p, i)
2042                                 pr_err("%llu:%u", i->inum, i->snapshot);
2043                         pr_err("%llu:%u", inode->bi_inum, snapshot);
2044
2045                         if (!fsck_err(c, "directory structure loop"))
2046                                 return 0;
2047
2048                         ret = commit_do(trans, NULL, NULL,
2049                                               BTREE_INSERT_NOFAIL|
2050                                               BTREE_INSERT_LAZY_RW,
2051                                         remove_backpointer(trans, inode));
2052                         if (ret) {
2053                                 bch_err(c, "error removing dirent: %i", ret);
2054                                 break;
2055                         }
2056
2057                         ret = reattach_inode(trans, inode, snapshot);
2058                 }
2059         }
2060 fsck_err:
2061         bch_err_fn(c, ret);
2062         return ret;
2063 }
2064
2065 /*
2066  * Check for unreachable inodes, as well as loops in the directory structure:
2067  * After bch2_check_dirents(), if an inode backpointer doesn't exist that means it's
2068  * unreachable:
2069  */
2070 int bch2_check_directory_structure(struct bch_fs *c)
2071 {
2072         struct btree_trans *trans = bch2_trans_get(c);
2073         struct btree_iter iter;
2074         struct bkey_s_c k;
2075         struct bch_inode_unpacked u;
2076         pathbuf path = { 0, };
2077         int ret;
2078
2079         for_each_btree_key(trans, iter, BTREE_ID_inodes, POS_MIN,
2080                            BTREE_ITER_INTENT|
2081                            BTREE_ITER_PREFETCH|
2082                            BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
2083                 if (!bkey_is_inode(k.k))
2084                         continue;
2085
2086                 ret = bch2_inode_unpack(k, &u);
2087                 if (ret) {
2088                         /* Should have been caught earlier in fsck: */
2089                         bch_err(c, "error unpacking inode %llu: %i", k.k->p.offset, ret);
2090                         break;
2091                 }
2092
2093                 if (u.bi_flags & BCH_INODE_UNLINKED)
2094                         continue;
2095
2096                 ret = check_path(trans, &path, &u, iter.pos.snapshot);
2097                 if (ret)
2098                         break;
2099         }
2100         bch2_trans_iter_exit(trans, &iter);
2101         bch2_trans_put(trans);
2102         darray_exit(&path);
2103         bch_err_fn(c, ret);
2104         return ret;
2105 }
2106
2107 struct nlink_table {
2108         size_t          nr;
2109         size_t          size;
2110
2111         struct nlink {
2112                 u64     inum;
2113                 u32     snapshot;
2114                 u32     count;
2115         }               *d;
2116 };
2117
2118 static int add_nlink(struct bch_fs *c, struct nlink_table *t,
2119                      u64 inum, u32 snapshot)
2120 {
2121         if (t->nr == t->size) {
2122                 size_t new_size = max_t(size_t, 128UL, t->size * 2);
2123                 void *d = kvmalloc_array(new_size, sizeof(t->d[0]), GFP_KERNEL);
2124
2125                 if (!d) {
2126                         bch_err(c, "fsck: error allocating memory for nlink_table, size %zu",
2127                                 new_size);
2128                         return -BCH_ERR_ENOMEM_fsck_add_nlink;
2129                 }
2130
2131                 if (t->d)
2132                         memcpy(d, t->d, t->size * sizeof(t->d[0]));
2133                 kvfree(t->d);
2134
2135                 t->d = d;
2136                 t->size = new_size;
2137         }
2138
2139
2140         t->d[t->nr++] = (struct nlink) {
2141                 .inum           = inum,
2142                 .snapshot       = snapshot,
2143         };
2144
2145         return 0;
2146 }
2147
2148 static int nlink_cmp(const void *_l, const void *_r)
2149 {
2150         const struct nlink *l = _l;
2151         const struct nlink *r = _r;
2152
2153         return cmp_int(l->inum, r->inum) ?: cmp_int(l->snapshot, r->snapshot);
2154 }
2155
2156 static void inc_link(struct bch_fs *c, struct snapshots_seen *s,
2157                      struct nlink_table *links,
2158                      u64 range_start, u64 range_end, u64 inum, u32 snapshot)
2159 {
2160         struct nlink *link, key = {
2161                 .inum = inum, .snapshot = U32_MAX,
2162         };
2163
2164         if (inum < range_start || inum >= range_end)
2165                 return;
2166
2167         link = __inline_bsearch(&key, links->d, links->nr,
2168                                 sizeof(links->d[0]), nlink_cmp);
2169         if (!link)
2170                 return;
2171
2172         while (link > links->d && link[0].inum == link[-1].inum)
2173                 --link;
2174
2175         for (; link < links->d + links->nr && link->inum == inum; link++)
2176                 if (ref_visible(c, s, snapshot, link->snapshot)) {
2177                         link->count++;
2178                         if (link->snapshot >= snapshot)
2179                                 break;
2180                 }
2181 }
2182
2183 noinline_for_stack
2184 static int check_nlinks_find_hardlinks(struct bch_fs *c,
2185                                        struct nlink_table *t,
2186                                        u64 start, u64 *end)
2187 {
2188         struct btree_trans *trans = bch2_trans_get(c);
2189         struct btree_iter iter;
2190         struct bkey_s_c k;
2191         struct bch_inode_unpacked u;
2192         int ret = 0;
2193
2194         for_each_btree_key(trans, iter, BTREE_ID_inodes,
2195                            POS(0, start),
2196                            BTREE_ITER_INTENT|
2197                            BTREE_ITER_PREFETCH|
2198                            BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
2199                 if (!bkey_is_inode(k.k))
2200                         continue;
2201
2202                 /* Should never fail, checked by bch2_inode_invalid: */
2203                 BUG_ON(bch2_inode_unpack(k, &u));
2204
2205                 /*
2206                  * Backpointer and directory structure checks are sufficient for
2207                  * directories, since they can't have hardlinks:
2208                  */
2209                 if (S_ISDIR(u.bi_mode))
2210                         continue;
2211
2212                 if (!u.bi_nlink)
2213                         continue;
2214
2215                 ret = add_nlink(c, t, k.k->p.offset, k.k->p.snapshot);
2216                 if (ret) {
2217                         *end = k.k->p.offset;
2218                         ret = 0;
2219                         break;
2220                 }
2221
2222         }
2223         bch2_trans_iter_exit(trans, &iter);
2224         bch2_trans_put(trans);
2225
2226         if (ret)
2227                 bch_err(c, "error in fsck: btree error %i while walking inodes", ret);
2228
2229         return ret;
2230 }
2231
2232 noinline_for_stack
2233 static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links,
2234                                      u64 range_start, u64 range_end)
2235 {
2236         struct btree_trans *trans = bch2_trans_get(c);
2237         struct snapshots_seen s;
2238         struct btree_iter iter;
2239         struct bkey_s_c k;
2240         struct bkey_s_c_dirent d;
2241         int ret;
2242
2243         snapshots_seen_init(&s);
2244
2245         for_each_btree_key(trans, iter, BTREE_ID_dirents, POS_MIN,
2246                            BTREE_ITER_INTENT|
2247                            BTREE_ITER_PREFETCH|
2248                            BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
2249                 ret = snapshots_seen_update(c, &s, iter.btree_id, k.k->p);
2250                 if (ret)
2251                         break;
2252
2253                 switch (k.k->type) {
2254                 case KEY_TYPE_dirent:
2255                         d = bkey_s_c_to_dirent(k);
2256
2257                         if (d.v->d_type != DT_DIR &&
2258                             d.v->d_type != DT_SUBVOL)
2259                                 inc_link(c, &s, links, range_start, range_end,
2260                                          le64_to_cpu(d.v->d_inum),
2261                                          bch2_snapshot_equiv(c, d.k->p.snapshot));
2262                         break;
2263                 }
2264         }
2265         bch2_trans_iter_exit(trans, &iter);
2266
2267         if (ret)
2268                 bch_err(c, "error in fsck: btree error %i while walking dirents", ret);
2269
2270         bch2_trans_put(trans);
2271         snapshots_seen_exit(&s);
2272         return ret;
2273 }
2274
2275 static int check_nlinks_update_inode(struct btree_trans *trans, struct btree_iter *iter,
2276                                      struct bkey_s_c k,
2277                                      struct nlink_table *links,
2278                                      size_t *idx, u64 range_end)
2279 {
2280         struct bch_fs *c = trans->c;
2281         struct bch_inode_unpacked u;
2282         struct nlink *link = &links->d[*idx];
2283         int ret = 0;
2284
2285         if (k.k->p.offset >= range_end)
2286                 return 1;
2287
2288         if (!bkey_is_inode(k.k))
2289                 return 0;
2290
2291         BUG_ON(bch2_inode_unpack(k, &u));
2292
2293         if (S_ISDIR(u.bi_mode))
2294                 return 0;
2295
2296         if (!u.bi_nlink)
2297                 return 0;
2298
2299         while ((cmp_int(link->inum, k.k->p.offset) ?:
2300                 cmp_int(link->snapshot, k.k->p.snapshot)) < 0) {
2301                 BUG_ON(*idx == links->nr);
2302                 link = &links->d[++*idx];
2303         }
2304
2305         if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count, c,
2306                         "inode %llu type %s has wrong i_nlink (%u, should be %u)",
2307                         u.bi_inum, bch2_d_types[mode_to_type(u.bi_mode)],
2308                         bch2_inode_nlink_get(&u), link->count)) {
2309                 bch2_inode_nlink_set(&u, link->count);
2310                 ret = __write_inode(trans, &u, k.k->p.snapshot);
2311         }
2312 fsck_err:
2313         return ret;
2314 }
2315
2316 noinline_for_stack
2317 static int check_nlinks_update_hardlinks(struct bch_fs *c,
2318                                struct nlink_table *links,
2319                                u64 range_start, u64 range_end)
2320 {
2321         struct btree_iter iter;
2322         struct bkey_s_c k;
2323         size_t idx = 0;
2324         int ret = 0;
2325
2326         ret = bch2_trans_run(c,
2327                 for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
2328                                 POS(0, range_start),
2329                                 BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
2330                                 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
2331                         check_nlinks_update_inode(trans, &iter, k, links, &idx, range_end)));
2332         if (ret < 0) {
2333                 bch_err(c, "error in fsck: btree error %i while walking inodes", ret);
2334                 return ret;
2335         }
2336
2337         return 0;
2338 }
2339
2340 int bch2_check_nlinks(struct bch_fs *c)
2341 {
2342         struct nlink_table links = { 0 };
2343         u64 this_iter_range_start, next_iter_range_start = 0;
2344         int ret = 0;
2345
2346         do {
2347                 this_iter_range_start = next_iter_range_start;
2348                 next_iter_range_start = U64_MAX;
2349
2350                 ret = check_nlinks_find_hardlinks(c, &links,
2351                                                   this_iter_range_start,
2352                                                   &next_iter_range_start);
2353
2354                 ret = check_nlinks_walk_dirents(c, &links,
2355                                           this_iter_range_start,
2356                                           next_iter_range_start);
2357                 if (ret)
2358                         break;
2359
2360                 ret = check_nlinks_update_hardlinks(c, &links,
2361                                          this_iter_range_start,
2362                                          next_iter_range_start);
2363                 if (ret)
2364                         break;
2365
2366                 links.nr = 0;
2367         } while (next_iter_range_start != U64_MAX);
2368
2369         kvfree(links.d);
2370         bch_err_fn(c, ret);
2371         return ret;
2372 }
2373
2374 static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter,
2375                              struct bkey_s_c k)
2376 {
2377         struct bkey_s_c_reflink_p p;
2378         struct bkey_i_reflink_p *u;
2379         int ret;
2380
2381         if (k.k->type != KEY_TYPE_reflink_p)
2382                 return 0;
2383
2384         p = bkey_s_c_to_reflink_p(k);
2385
2386         if (!p.v->front_pad && !p.v->back_pad)
2387                 return 0;
2388
2389         u = bch2_trans_kmalloc(trans, sizeof(*u));
2390         ret = PTR_ERR_OR_ZERO(u);
2391         if (ret)
2392                 return ret;
2393
2394         bkey_reassemble(&u->k_i, k);
2395         u->v.front_pad  = 0;
2396         u->v.back_pad   = 0;
2397
2398         return bch2_trans_update(trans, iter, &u->k_i, BTREE_TRIGGER_NORUN);
2399 }
2400
2401 int bch2_fix_reflink_p(struct bch_fs *c)
2402 {
2403         struct btree_iter iter;
2404         struct bkey_s_c k;
2405         int ret;
2406
2407         if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix)
2408                 return 0;
2409
2410         ret = bch2_trans_run(c,
2411                 for_each_btree_key_commit(trans, iter,
2412                                 BTREE_ID_extents, POS_MIN,
2413                                 BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|
2414                                 BTREE_ITER_ALL_SNAPSHOTS, k,
2415                                 NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
2416                         fix_reflink_p_key(trans, &iter, k)));
2417         bch_err_fn(c, ret);
2418         return ret;
2419 }