]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/snapshot.c
Merge pull request #196 from Conan-Kudo/spec-libexecdir
[bcachefs-tools-debian] / libbcachefs / snapshot.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "btree_key_cache.h"
6 #include "btree_update.h"
7 #include "buckets.h"
8 #include "errcode.h"
9 #include "error.h"
10 #include "fs.h"
11 #include "snapshot.h"
12
13 #include <linux/random.h>
14
15 /*
16  * Snapshot trees:
17  *
18  * Keys in BTREE_ID_snapshot_trees identify a whole tree of snapshot nodes; they
19  * exist to provide a stable identifier for the whole lifetime of a snapshot
20  * tree.
21  */
22
23 void bch2_snapshot_tree_to_text(struct printbuf *out, struct bch_fs *c,
24                                 struct bkey_s_c k)
25 {
26         struct bkey_s_c_snapshot_tree t = bkey_s_c_to_snapshot_tree(k);
27
28         prt_printf(out, "subvol %u root snapshot %u",
29                    le32_to_cpu(t.v->master_subvol),
30                    le32_to_cpu(t.v->root_snapshot));
31 }
32
33 int bch2_snapshot_tree_invalid(struct bch_fs *c, struct bkey_s_c k,
34                                enum bkey_invalid_flags flags,
35                                struct printbuf *err)
36 {
37         int ret = 0;
38
39         bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
40                          bkey_lt(k.k->p, POS(0, 1)), c, err,
41                          snapshot_tree_pos_bad,
42                          "bad pos");
43 fsck_err:
44         return ret;
45 }
46
47 int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
48                               struct bch_snapshot_tree *s)
49 {
50         int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
51                                           BTREE_ITER_WITH_UPDATES, snapshot_tree, s);
52
53         if (bch2_err_matches(ret, ENOENT))
54                 ret = -BCH_ERR_ENOENT_snapshot_tree;
55         return ret;
56 }
57
58 struct bkey_i_snapshot_tree *
59 __bch2_snapshot_tree_create(struct btree_trans *trans)
60 {
61         struct btree_iter iter;
62         int ret = bch2_bkey_get_empty_slot(trans, &iter,
63                         BTREE_ID_snapshot_trees, POS(0, U32_MAX));
64         struct bkey_i_snapshot_tree *s_t;
65
66         if (ret == -BCH_ERR_ENOSPC_btree_slot)
67                 ret = -BCH_ERR_ENOSPC_snapshot_tree;
68         if (ret)
69                 return ERR_PTR(ret);
70
71         s_t = bch2_bkey_alloc(trans, &iter, 0, snapshot_tree);
72         ret = PTR_ERR_OR_ZERO(s_t);
73         bch2_trans_iter_exit(trans, &iter);
74         return ret ? ERR_PTR(ret) : s_t;
75 }
76
77 static int bch2_snapshot_tree_create(struct btree_trans *trans,
78                                 u32 root_id, u32 subvol_id, u32 *tree_id)
79 {
80         struct bkey_i_snapshot_tree *n_tree =
81                 __bch2_snapshot_tree_create(trans);
82
83         if (IS_ERR(n_tree))
84                 return PTR_ERR(n_tree);
85
86         n_tree->v.master_subvol = cpu_to_le32(subvol_id);
87         n_tree->v.root_snapshot = cpu_to_le32(root_id);
88         *tree_id = n_tree->k.p.offset;
89         return 0;
90 }
91
92 /* Snapshot nodes: */
93
94 static bool bch2_snapshot_is_ancestor_early(struct bch_fs *c, u32 id, u32 ancestor)
95 {
96         struct snapshot_table *t;
97
98         rcu_read_lock();
99         t = rcu_dereference(c->snapshots);
100
101         while (id && id < ancestor)
102                 id = __snapshot_t(t, id)->parent;
103         rcu_read_unlock();
104
105         return id == ancestor;
106 }
107
108 static inline u32 get_ancestor_below(struct snapshot_table *t, u32 id, u32 ancestor)
109 {
110         const struct snapshot_t *s = __snapshot_t(t, id);
111
112         if (s->skip[2] <= ancestor)
113                 return s->skip[2];
114         if (s->skip[1] <= ancestor)
115                 return s->skip[1];
116         if (s->skip[0] <= ancestor)
117                 return s->skip[0];
118         return s->parent;
119 }
120
121 bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
122 {
123         struct snapshot_table *t;
124         bool ret;
125
126         EBUG_ON(c->recovery_pass_done <= BCH_RECOVERY_PASS_check_snapshots);
127
128         rcu_read_lock();
129         t = rcu_dereference(c->snapshots);
130
131         while (id && id < ancestor - IS_ANCESTOR_BITMAP)
132                 id = get_ancestor_below(t, id, ancestor);
133
134         if (id && id < ancestor) {
135                 ret = test_bit(ancestor - id - 1, __snapshot_t(t, id)->is_ancestor);
136
137                 EBUG_ON(ret != bch2_snapshot_is_ancestor_early(c, id, ancestor));
138         } else {
139                 ret = id == ancestor;
140         }
141
142         rcu_read_unlock();
143
144         return ret;
145 }
146
147 static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id)
148 {
149         size_t idx = U32_MAX - id;
150         size_t new_size;
151         struct snapshot_table *new, *old;
152
153         new_size = max(16UL, roundup_pow_of_two(idx + 1));
154
155         new = kvzalloc(struct_size(new, s, new_size), GFP_KERNEL);
156         if (!new)
157                 return NULL;
158
159         old = rcu_dereference_protected(c->snapshots, true);
160         if (old)
161                 memcpy(new->s,
162                        rcu_dereference_protected(c->snapshots, true)->s,
163                        sizeof(new->s[0]) * c->snapshot_table_size);
164
165         rcu_assign_pointer(c->snapshots, new);
166         c->snapshot_table_size = new_size;
167         kvfree_rcu_mightsleep(old);
168
169         return &rcu_dereference_protected(c->snapshots, true)->s[idx];
170 }
171
172 static inline struct snapshot_t *snapshot_t_mut(struct bch_fs *c, u32 id)
173 {
174         size_t idx = U32_MAX - id;
175
176         lockdep_assert_held(&c->snapshot_table_lock);
177
178         if (likely(idx < c->snapshot_table_size))
179                 return &rcu_dereference_protected(c->snapshots, true)->s[idx];
180
181         return __snapshot_t_mut(c, id);
182 }
183
184 void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
185                            struct bkey_s_c k)
186 {
187         struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
188
189         prt_printf(out, "is_subvol %llu deleted %llu parent %10u children %10u %10u subvol %u tree %u",
190                BCH_SNAPSHOT_SUBVOL(s.v),
191                BCH_SNAPSHOT_DELETED(s.v),
192                le32_to_cpu(s.v->parent),
193                le32_to_cpu(s.v->children[0]),
194                le32_to_cpu(s.v->children[1]),
195                le32_to_cpu(s.v->subvol),
196                le32_to_cpu(s.v->tree));
197
198         if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, depth))
199                 prt_printf(out, " depth %u skiplist %u %u %u",
200                            le32_to_cpu(s.v->depth),
201                            le32_to_cpu(s.v->skip[0]),
202                            le32_to_cpu(s.v->skip[1]),
203                            le32_to_cpu(s.v->skip[2]));
204 }
205
206 int bch2_snapshot_invalid(struct bch_fs *c, struct bkey_s_c k,
207                           enum bkey_invalid_flags flags,
208                           struct printbuf *err)
209 {
210         struct bkey_s_c_snapshot s;
211         u32 i, id;
212         int ret = 0;
213
214         bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
215                          bkey_lt(k.k->p, POS(0, 1)), c, err,
216                          snapshot_pos_bad,
217                          "bad pos");
218
219         s = bkey_s_c_to_snapshot(k);
220
221         id = le32_to_cpu(s.v->parent);
222         bkey_fsck_err_on(id && id <= k.k->p.offset, c, err,
223                          snapshot_parent_bad,
224                          "bad parent node (%u <= %llu)",
225                          id, k.k->p.offset);
226
227         bkey_fsck_err_on(le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1]), c, err,
228                          snapshot_children_not_normalized,
229                          "children not normalized");
230
231         bkey_fsck_err_on(s.v->children[0] && s.v->children[0] == s.v->children[1], c, err,
232                          snapshot_child_duplicate,
233                          "duplicate child nodes");
234
235         for (i = 0; i < 2; i++) {
236                 id = le32_to_cpu(s.v->children[i]);
237
238                 bkey_fsck_err_on(id >= k.k->p.offset, c, err,
239                                  snapshot_child_bad,
240                                  "bad child node (%u >= %llu)",
241                                  id, k.k->p.offset);
242         }
243
244         if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, skip)) {
245                 bkey_fsck_err_on(le32_to_cpu(s.v->skip[0]) > le32_to_cpu(s.v->skip[1]) ||
246                                  le32_to_cpu(s.v->skip[1]) > le32_to_cpu(s.v->skip[2]), c, err,
247                                  snapshot_skiplist_not_normalized,
248                                  "skiplist not normalized");
249
250                 for (i = 0; i < ARRAY_SIZE(s.v->skip); i++) {
251                         id = le32_to_cpu(s.v->skip[i]);
252
253                         bkey_fsck_err_on(id && id < le32_to_cpu(s.v->parent), c, err,
254                                          snapshot_skiplist_bad,
255                                          "bad skiplist node %u", id);
256                 }
257         }
258 fsck_err:
259         return ret;
260 }
261
262 static void __set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
263 {
264         struct snapshot_t *t = snapshot_t_mut(c, id);
265         u32 parent = id;
266
267         while ((parent = bch2_snapshot_parent_early(c, parent)) &&
268                parent - id - 1 < IS_ANCESTOR_BITMAP)
269                 __set_bit(parent - id - 1, t->is_ancestor);
270 }
271
272 static void set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
273 {
274         mutex_lock(&c->snapshot_table_lock);
275         __set_is_ancestor_bitmap(c, id);
276         mutex_unlock(&c->snapshot_table_lock);
277 }
278
279 int bch2_mark_snapshot(struct btree_trans *trans,
280                        enum btree_id btree, unsigned level,
281                        struct bkey_s_c old, struct bkey_s_c new,
282                        unsigned flags)
283 {
284         struct bch_fs *c = trans->c;
285         struct snapshot_t *t;
286         u32 id = new.k->p.offset;
287         int ret = 0;
288
289         mutex_lock(&c->snapshot_table_lock);
290
291         t = snapshot_t_mut(c, id);
292         if (!t) {
293                 ret = -BCH_ERR_ENOMEM_mark_snapshot;
294                 goto err;
295         }
296
297         if (new.k->type == KEY_TYPE_snapshot) {
298                 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);
299
300                 t->parent       = le32_to_cpu(s.v->parent);
301                 t->children[0]  = le32_to_cpu(s.v->children[0]);
302                 t->children[1]  = le32_to_cpu(s.v->children[1]);
303                 t->subvol       = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0;
304                 t->tree         = le32_to_cpu(s.v->tree);
305
306                 if (bkey_val_bytes(s.k) > offsetof(struct bch_snapshot, depth)) {
307                         t->depth        = le32_to_cpu(s.v->depth);
308                         t->skip[0]      = le32_to_cpu(s.v->skip[0]);
309                         t->skip[1]      = le32_to_cpu(s.v->skip[1]);
310                         t->skip[2]      = le32_to_cpu(s.v->skip[2]);
311                 } else {
312                         t->depth        = 0;
313                         t->skip[0]      = 0;
314                         t->skip[1]      = 0;
315                         t->skip[2]      = 0;
316                 }
317
318                 __set_is_ancestor_bitmap(c, id);
319
320                 if (BCH_SNAPSHOT_DELETED(s.v)) {
321                         set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags);
322                         if (c->curr_recovery_pass > BCH_RECOVERY_PASS_delete_dead_snapshots)
323                                 bch2_delete_dead_snapshots_async(c);
324                 }
325         } else {
326                 memset(t, 0, sizeof(*t));
327         }
328 err:
329         mutex_unlock(&c->snapshot_table_lock);
330         return ret;
331 }
332
333 int bch2_snapshot_lookup(struct btree_trans *trans, u32 id,
334                          struct bch_snapshot *s)
335 {
336         return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots, POS(0, id),
337                                        BTREE_ITER_WITH_UPDATES, snapshot, s);
338 }
339
340 static int bch2_snapshot_live(struct btree_trans *trans, u32 id)
341 {
342         struct bch_snapshot v;
343         int ret;
344
345         if (!id)
346                 return 0;
347
348         ret = bch2_snapshot_lookup(trans, id, &v);
349         if (bch2_err_matches(ret, ENOENT))
350                 bch_err(trans->c, "snapshot node %u not found", id);
351         if (ret)
352                 return ret;
353
354         return !BCH_SNAPSHOT_DELETED(&v);
355 }
356
357 /*
358  * If @k is a snapshot with just one live child, it's part of a linear chain,
359  * which we consider to be an equivalence class: and then after snapshot
360  * deletion cleanup, there should only be a single key at a given position in
361  * this equivalence class.
362  *
363  * This sets the equivalence class of @k to be the child's equivalence class, if
364  * it's part of such a linear chain: this correctly sets equivalence classes on
365  * startup if we run leaf to root (i.e. in natural key order).
366  */
367 static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k)
368 {
369         struct bch_fs *c = trans->c;
370         unsigned i, nr_live = 0, live_idx = 0;
371         struct bkey_s_c_snapshot snap;
372         u32 id = k.k->p.offset, child[2];
373
374         if (k.k->type != KEY_TYPE_snapshot)
375                 return 0;
376
377         snap = bkey_s_c_to_snapshot(k);
378
379         child[0] = le32_to_cpu(snap.v->children[0]);
380         child[1] = le32_to_cpu(snap.v->children[1]);
381
382         for (i = 0; i < 2; i++) {
383                 int ret = bch2_snapshot_live(trans, child[i]);
384
385                 if (ret < 0)
386                         return ret;
387
388                 if (ret)
389                         live_idx = i;
390                 nr_live += ret;
391         }
392
393         mutex_lock(&c->snapshot_table_lock);
394
395         snapshot_t_mut(c, id)->equiv = nr_live == 1
396                 ? snapshot_t_mut(c, child[live_idx])->equiv
397                 : id;
398
399         mutex_unlock(&c->snapshot_table_lock);
400
401         return 0;
402 }
403
404 /* fsck: */
405
406 static u32 bch2_snapshot_child(struct bch_fs *c, u32 id, unsigned child)
407 {
408         return snapshot_t(c, id)->children[child];
409 }
410
411 static u32 bch2_snapshot_left_child(struct bch_fs *c, u32 id)
412 {
413         return bch2_snapshot_child(c, id, 0);
414 }
415
416 static u32 bch2_snapshot_right_child(struct bch_fs *c, u32 id)
417 {
418         return bch2_snapshot_child(c, id, 1);
419 }
420
421 static u32 bch2_snapshot_tree_next(struct bch_fs *c, u32 id)
422 {
423         u32 n, parent;
424
425         n = bch2_snapshot_left_child(c, id);
426         if (n)
427                 return n;
428
429         while ((parent = bch2_snapshot_parent(c, id))) {
430                 n = bch2_snapshot_right_child(c, parent);
431                 if (n && n != id)
432                         return n;
433                 id = parent;
434         }
435
436         return 0;
437 }
438
439 static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
440 {
441         u32 id = snapshot_root;
442         u32 subvol = 0, s;
443
444         while (id) {
445                 s = snapshot_t(c, id)->subvol;
446
447                 if (s && (!subvol || s < subvol))
448                         subvol = s;
449
450                 id = bch2_snapshot_tree_next(c, id);
451         }
452
453         return subvol;
454 }
455
456 static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans,
457                                             u32 snapshot_root, u32 *subvol_id)
458 {
459         struct bch_fs *c = trans->c;
460         struct btree_iter iter;
461         struct bkey_s_c k;
462         bool found = false;
463         int ret;
464
465         for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN,
466                                      0, k, ret) {
467                 if (k.k->type != KEY_TYPE_subvolume)
468                         continue;
469
470                 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
471                 if (!bch2_snapshot_is_ancestor(c, le32_to_cpu(s.v->snapshot), snapshot_root))
472                         continue;
473                 if (!BCH_SUBVOLUME_SNAP(s.v)) {
474                         *subvol_id = s.k->p.offset;
475                         found = true;
476                         break;
477                 }
478         }
479
480         bch2_trans_iter_exit(trans, &iter);
481
482         if (!ret && !found) {
483                 struct bkey_i_subvolume *u;
484
485                 *subvol_id = bch2_snapshot_tree_oldest_subvol(c, snapshot_root);
486
487                 u = bch2_bkey_get_mut_typed(trans, &iter,
488                                             BTREE_ID_subvolumes, POS(0, *subvol_id),
489                                             0, subvolume);
490                 ret = PTR_ERR_OR_ZERO(u);
491                 if (ret)
492                         return ret;
493
494                 SET_BCH_SUBVOLUME_SNAP(&u->v, false);
495         }
496
497         return ret;
498 }
499
500 static int check_snapshot_tree(struct btree_trans *trans,
501                                struct btree_iter *iter,
502                                struct bkey_s_c k)
503 {
504         struct bch_fs *c = trans->c;
505         struct bkey_s_c_snapshot_tree st;
506         struct bch_snapshot s;
507         struct bch_subvolume subvol;
508         struct printbuf buf = PRINTBUF;
509         u32 root_id;
510         int ret;
511
512         if (k.k->type != KEY_TYPE_snapshot_tree)
513                 return 0;
514
515         st = bkey_s_c_to_snapshot_tree(k);
516         root_id = le32_to_cpu(st.v->root_snapshot);
517
518         ret = bch2_snapshot_lookup(trans, root_id, &s);
519         if (ret && !bch2_err_matches(ret, ENOENT))
520                 goto err;
521
522         if (fsck_err_on(ret ||
523                         root_id != bch2_snapshot_root(c, root_id) ||
524                         st.k->p.offset != le32_to_cpu(s.tree),
525                         c, snapshot_tree_to_missing_snapshot,
526                         "snapshot tree points to missing/incorrect snapshot:\n  %s",
527                         (bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
528                 ret = bch2_btree_delete_at(trans, iter, 0);
529                 goto err;
530         }
531
532         ret = bch2_subvolume_get(trans, le32_to_cpu(st.v->master_subvol),
533                                  false, 0, &subvol);
534         if (ret && !bch2_err_matches(ret, ENOENT))
535                 goto err;
536
537         if (fsck_err_on(ret,
538                         c, snapshot_tree_to_missing_subvol,
539                         "snapshot tree points to missing subvolume:\n  %s",
540                         (printbuf_reset(&buf),
541                          bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
542             fsck_err_on(!bch2_snapshot_is_ancestor_early(c,
543                                                 le32_to_cpu(subvol.snapshot),
544                                                 root_id),
545                         c, snapshot_tree_to_wrong_subvol,
546                         "snapshot tree points to subvolume that does not point to snapshot in this tree:\n  %s",
547                         (printbuf_reset(&buf),
548                          bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
549             fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol),
550                         c, snapshot_tree_to_snapshot_subvol,
551                         "snapshot tree points to snapshot subvolume:\n  %s",
552                         (printbuf_reset(&buf),
553                          bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
554                 struct bkey_i_snapshot_tree *u;
555                 u32 subvol_id;
556
557                 ret = bch2_snapshot_tree_master_subvol(trans, root_id, &subvol_id);
558                 if (ret)
559                         goto err;
560
561                 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot_tree);
562                 ret = PTR_ERR_OR_ZERO(u);
563                 if (ret)
564                         goto err;
565
566                 u->v.master_subvol = cpu_to_le32(subvol_id);
567                 st = snapshot_tree_i_to_s_c(u);
568         }
569 err:
570 fsck_err:
571         printbuf_exit(&buf);
572         return ret;
573 }
574
575 /*
576  * For each snapshot_tree, make sure it points to the root of a snapshot tree
577  * and that snapshot entry points back to it, or delete it.
578  *
579  * And, make sure it points to a subvolume within that snapshot tree, or correct
580  * it to point to the oldest subvolume within that snapshot tree.
581  */
582 int bch2_check_snapshot_trees(struct bch_fs *c)
583 {
584         int ret = bch2_trans_run(c,
585                 for_each_btree_key_commit(trans, iter,
586                         BTREE_ID_snapshot_trees, POS_MIN,
587                         BTREE_ITER_PREFETCH, k,
588                         NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
589                 check_snapshot_tree(trans, &iter, k)));
590         bch_err_fn(c, ret);
591         return ret;
592 }
593
594 /*
595  * Look up snapshot tree for @tree_id and find root,
596  * make sure @snap_id is a descendent:
597  */
598 static int snapshot_tree_ptr_good(struct btree_trans *trans,
599                                   u32 snap_id, u32 tree_id)
600 {
601         struct bch_snapshot_tree s_t;
602         int ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
603
604         if (bch2_err_matches(ret, ENOENT))
605                 return 0;
606         if (ret)
607                 return ret;
608
609         return bch2_snapshot_is_ancestor_early(trans->c, snap_id, le32_to_cpu(s_t.root_snapshot));
610 }
611
612 u32 bch2_snapshot_skiplist_get(struct bch_fs *c, u32 id)
613 {
614         const struct snapshot_t *s;
615
616         if (!id)
617                 return 0;
618
619         rcu_read_lock();
620         s = snapshot_t(c, id);
621         if (s->parent)
622                 id = bch2_snapshot_nth_parent(c, id, get_random_u32_below(s->depth));
623         rcu_read_unlock();
624
625         return id;
626 }
627
628 static int snapshot_skiplist_good(struct btree_trans *trans, u32 id, struct bch_snapshot s)
629 {
630         unsigned i;
631
632         for (i = 0; i < 3; i++)
633                 if (!s.parent) {
634                         if (s.skip[i])
635                                 return false;
636                 } else {
637                         if (!bch2_snapshot_is_ancestor_early(trans->c, id, le32_to_cpu(s.skip[i])))
638                                 return false;
639                 }
640
641         return true;
642 }
643
644 /*
645  * snapshot_tree pointer was incorrect: look up root snapshot node, make sure
646  * its snapshot_tree pointer is correct (allocate new one if necessary), then
647  * update this node's pointer to root node's pointer:
648  */
649 static int snapshot_tree_ptr_repair(struct btree_trans *trans,
650                                     struct btree_iter *iter,
651                                     struct bkey_s_c k,
652                                     struct bch_snapshot *s)
653 {
654         struct bch_fs *c = trans->c;
655         struct btree_iter root_iter;
656         struct bch_snapshot_tree s_t;
657         struct bkey_s_c_snapshot root;
658         struct bkey_i_snapshot *u;
659         u32 root_id = bch2_snapshot_root(c, k.k->p.offset), tree_id;
660         int ret;
661
662         root = bch2_bkey_get_iter_typed(trans, &root_iter,
663                                BTREE_ID_snapshots, POS(0, root_id),
664                                BTREE_ITER_WITH_UPDATES, snapshot);
665         ret = bkey_err(root);
666         if (ret)
667                 goto err;
668
669         tree_id = le32_to_cpu(root.v->tree);
670
671         ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
672         if (ret && !bch2_err_matches(ret, ENOENT))
673                 return ret;
674
675         if (ret || le32_to_cpu(s_t.root_snapshot) != root_id) {
676                 u = bch2_bkey_make_mut_typed(trans, &root_iter, &root.s_c, 0, snapshot);
677                 ret =   PTR_ERR_OR_ZERO(u) ?:
678                         bch2_snapshot_tree_create(trans, root_id,
679                                 bch2_snapshot_tree_oldest_subvol(c, root_id),
680                                 &tree_id);
681                 if (ret)
682                         goto err;
683
684                 u->v.tree = cpu_to_le32(tree_id);
685                 if (k.k->p.offset == root_id)
686                         *s = u->v;
687         }
688
689         if (k.k->p.offset != root_id) {
690                 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
691                 ret = PTR_ERR_OR_ZERO(u);
692                 if (ret)
693                         goto err;
694
695                 u->v.tree = cpu_to_le32(tree_id);
696                 *s = u->v;
697         }
698 err:
699         bch2_trans_iter_exit(trans, &root_iter);
700         return ret;
701 }
702
703 static int check_snapshot(struct btree_trans *trans,
704                           struct btree_iter *iter,
705                           struct bkey_s_c k)
706 {
707         struct bch_fs *c = trans->c;
708         struct bch_snapshot s;
709         struct bch_subvolume subvol;
710         struct bch_snapshot v;
711         struct bkey_i_snapshot *u;
712         u32 parent_id = bch2_snapshot_parent_early(c, k.k->p.offset);
713         u32 real_depth;
714         struct printbuf buf = PRINTBUF;
715         bool should_have_subvol;
716         u32 i, id;
717         int ret = 0;
718
719         if (k.k->type != KEY_TYPE_snapshot)
720                 return 0;
721
722         memset(&s, 0, sizeof(s));
723         memcpy(&s, k.v, bkey_val_bytes(k.k));
724
725         id = le32_to_cpu(s.parent);
726         if (id) {
727                 ret = bch2_snapshot_lookup(trans, id, &v);
728                 if (bch2_err_matches(ret, ENOENT))
729                         bch_err(c, "snapshot with nonexistent parent:\n  %s",
730                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
731                 if (ret)
732                         goto err;
733
734                 if (le32_to_cpu(v.children[0]) != k.k->p.offset &&
735                     le32_to_cpu(v.children[1]) != k.k->p.offset) {
736                         bch_err(c, "snapshot parent %u missing pointer to child %llu",
737                                 id, k.k->p.offset);
738                         ret = -EINVAL;
739                         goto err;
740                 }
741         }
742
743         for (i = 0; i < 2 && s.children[i]; i++) {
744                 id = le32_to_cpu(s.children[i]);
745
746                 ret = bch2_snapshot_lookup(trans, id, &v);
747                 if (bch2_err_matches(ret, ENOENT))
748                         bch_err(c, "snapshot node %llu has nonexistent child %u",
749                                 k.k->p.offset, id);
750                 if (ret)
751                         goto err;
752
753                 if (le32_to_cpu(v.parent) != k.k->p.offset) {
754                         bch_err(c, "snapshot child %u has wrong parent (got %u should be %llu)",
755                                 id, le32_to_cpu(v.parent), k.k->p.offset);
756                         ret = -EINVAL;
757                         goto err;
758                 }
759         }
760
761         should_have_subvol = BCH_SNAPSHOT_SUBVOL(&s) &&
762                 !BCH_SNAPSHOT_DELETED(&s);
763
764         if (should_have_subvol) {
765                 id = le32_to_cpu(s.subvol);
766                 ret = bch2_subvolume_get(trans, id, 0, false, &subvol);
767                 if (bch2_err_matches(ret, ENOENT))
768                         bch_err(c, "snapshot points to nonexistent subvolume:\n  %s",
769                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
770                 if (ret)
771                         goto err;
772
773                 if (BCH_SNAPSHOT_SUBVOL(&s) != (le32_to_cpu(subvol.snapshot) == k.k->p.offset)) {
774                         bch_err(c, "snapshot node %llu has wrong BCH_SNAPSHOT_SUBVOL",
775                                 k.k->p.offset);
776                         ret = -EINVAL;
777                         goto err;
778                 }
779         } else {
780                 if (fsck_err_on(s.subvol,
781                                 c, snapshot_should_not_have_subvol,
782                                 "snapshot should not point to subvol:\n  %s",
783                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
784                         u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
785                         ret = PTR_ERR_OR_ZERO(u);
786                         if (ret)
787                                 goto err;
788
789                         u->v.subvol = 0;
790                         s = u->v;
791                 }
792         }
793
794         ret = snapshot_tree_ptr_good(trans, k.k->p.offset, le32_to_cpu(s.tree));
795         if (ret < 0)
796                 goto err;
797
798         if (fsck_err_on(!ret, c, snapshot_to_bad_snapshot_tree,
799                         "snapshot points to missing/incorrect tree:\n  %s",
800                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
801                 ret = snapshot_tree_ptr_repair(trans, iter, k, &s);
802                 if (ret)
803                         goto err;
804         }
805         ret = 0;
806
807         real_depth = bch2_snapshot_depth(c, parent_id);
808
809         if (le32_to_cpu(s.depth) != real_depth &&
810             (c->sb.version_upgrade_complete < bcachefs_metadata_version_snapshot_skiplists ||
811              fsck_err(c, snapshot_bad_depth,
812                       "snapshot with incorrect depth field, should be %u:\n  %s",
813                       real_depth, (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
814                 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
815                 ret = PTR_ERR_OR_ZERO(u);
816                 if (ret)
817                         goto err;
818
819                 u->v.depth = cpu_to_le32(real_depth);
820                 s = u->v;
821         }
822
823         ret = snapshot_skiplist_good(trans, k.k->p.offset, s);
824         if (ret < 0)
825                 goto err;
826
827         if (!ret &&
828             (c->sb.version_upgrade_complete < bcachefs_metadata_version_snapshot_skiplists ||
829              fsck_err(c, snapshot_bad_skiplist,
830                       "snapshot with bad skiplist field:\n  %s",
831                       (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
832                 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
833                 ret = PTR_ERR_OR_ZERO(u);
834                 if (ret)
835                         goto err;
836
837                 for (i = 0; i < ARRAY_SIZE(u->v.skip); i++)
838                         u->v.skip[i] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent_id));
839
840                 bubble_sort(u->v.skip, ARRAY_SIZE(u->v.skip), cmp_le32);
841                 s = u->v;
842         }
843         ret = 0;
844 err:
845 fsck_err:
846         printbuf_exit(&buf);
847         return ret;
848 }
849
850 int bch2_check_snapshots(struct bch_fs *c)
851 {
852         /*
853          * We iterate backwards as checking/fixing the depth field requires that
854          * the parent's depth already be correct:
855          */
856         int ret = bch2_trans_run(c,
857                 for_each_btree_key_reverse_commit(trans, iter,
858                                 BTREE_ID_snapshots, POS_MAX,
859                                 BTREE_ITER_PREFETCH, k,
860                                 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
861                         check_snapshot(trans, &iter, k)));
862         bch_err_fn(c, ret);
863         return ret;
864 }
865
866 /*
867  * Mark a snapshot as deleted, for future cleanup:
868  */
869 int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
870 {
871         struct btree_iter iter;
872         struct bkey_i_snapshot *s;
873         int ret = 0;
874
875         s = bch2_bkey_get_mut_typed(trans, &iter,
876                                     BTREE_ID_snapshots, POS(0, id),
877                                     0, snapshot);
878         ret = PTR_ERR_OR_ZERO(s);
879         if (unlikely(ret)) {
880                 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
881                                         trans->c, "missing snapshot %u", id);
882                 return ret;
883         }
884
885         /* already deleted? */
886         if (BCH_SNAPSHOT_DELETED(&s->v))
887                 goto err;
888
889         SET_BCH_SNAPSHOT_DELETED(&s->v, true);
890         SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
891         s->v.subvol = 0;
892 err:
893         bch2_trans_iter_exit(trans, &iter);
894         return ret;
895 }
896
897 static inline void normalize_snapshot_child_pointers(struct bch_snapshot *s)
898 {
899         if (le32_to_cpu(s->children[0]) < le32_to_cpu(s->children[1]))
900                 swap(s->children[0], s->children[1]);
901 }
902
903 static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
904 {
905         struct bch_fs *c = trans->c;
906         struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
907         struct btree_iter c_iter = (struct btree_iter) { NULL };
908         struct btree_iter tree_iter = (struct btree_iter) { NULL };
909         struct bkey_s_c_snapshot s;
910         u32 parent_id, child_id;
911         unsigned i;
912         int ret = 0;
913
914         s = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id),
915                                      BTREE_ITER_INTENT, snapshot);
916         ret = bkey_err(s);
917         bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
918                                 "missing snapshot %u", id);
919
920         if (ret)
921                 goto err;
922
923         BUG_ON(s.v->children[1]);
924
925         parent_id = le32_to_cpu(s.v->parent);
926         child_id = le32_to_cpu(s.v->children[0]);
927
928         if (parent_id) {
929                 struct bkey_i_snapshot *parent;
930
931                 parent = bch2_bkey_get_mut_typed(trans, &p_iter,
932                                      BTREE_ID_snapshots, POS(0, parent_id),
933                                      0, snapshot);
934                 ret = PTR_ERR_OR_ZERO(parent);
935                 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
936                                         "missing snapshot %u", parent_id);
937                 if (unlikely(ret))
938                         goto err;
939
940                 /* find entry in parent->children for node being deleted */
941                 for (i = 0; i < 2; i++)
942                         if (le32_to_cpu(parent->v.children[i]) == id)
943                                 break;
944
945                 if (bch2_fs_inconsistent_on(i == 2, c,
946                                         "snapshot %u missing child pointer to %u",
947                                         parent_id, id))
948                         goto err;
949
950                 parent->v.children[i] = cpu_to_le32(child_id);
951
952                 normalize_snapshot_child_pointers(&parent->v);
953         }
954
955         if (child_id) {
956                 struct bkey_i_snapshot *child;
957
958                 child = bch2_bkey_get_mut_typed(trans, &c_iter,
959                                      BTREE_ID_snapshots, POS(0, child_id),
960                                      0, snapshot);
961                 ret = PTR_ERR_OR_ZERO(child);
962                 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
963                                         "missing snapshot %u", child_id);
964                 if (unlikely(ret))
965                         goto err;
966
967                 child->v.parent = cpu_to_le32(parent_id);
968
969                 if (!child->v.parent) {
970                         child->v.skip[0] = 0;
971                         child->v.skip[1] = 0;
972                         child->v.skip[2] = 0;
973                 }
974         }
975
976         if (!parent_id) {
977                 /*
978                  * We're deleting the root of a snapshot tree: update the
979                  * snapshot_tree entry to point to the new root, or delete it if
980                  * this is the last snapshot ID in this tree:
981                  */
982                 struct bkey_i_snapshot_tree *s_t;
983
984                 BUG_ON(s.v->children[1]);
985
986                 s_t = bch2_bkey_get_mut_typed(trans, &tree_iter,
987                                 BTREE_ID_snapshot_trees, POS(0, le32_to_cpu(s.v->tree)),
988                                 0, snapshot_tree);
989                 ret = PTR_ERR_OR_ZERO(s_t);
990                 if (ret)
991                         goto err;
992
993                 if (s.v->children[0]) {
994                         s_t->v.root_snapshot = s.v->children[0];
995                 } else {
996                         s_t->k.type = KEY_TYPE_deleted;
997                         set_bkey_val_u64s(&s_t->k, 0);
998                 }
999         }
1000
1001         ret = bch2_btree_delete_at(trans, &iter, 0);
1002 err:
1003         bch2_trans_iter_exit(trans, &tree_iter);
1004         bch2_trans_iter_exit(trans, &p_iter);
1005         bch2_trans_iter_exit(trans, &c_iter);
1006         bch2_trans_iter_exit(trans, &iter);
1007         return ret;
1008 }
1009
1010 static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
1011                           u32 *new_snapids,
1012                           u32 *snapshot_subvols,
1013                           unsigned nr_snapids)
1014 {
1015         struct bch_fs *c = trans->c;
1016         struct btree_iter iter;
1017         struct bkey_i_snapshot *n;
1018         struct bkey_s_c k;
1019         unsigned i, j;
1020         u32 depth = bch2_snapshot_depth(c, parent);
1021         int ret;
1022
1023         bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
1024                              POS_MIN, BTREE_ITER_INTENT);
1025         k = bch2_btree_iter_peek(&iter);
1026         ret = bkey_err(k);
1027         if (ret)
1028                 goto err;
1029
1030         for (i = 0; i < nr_snapids; i++) {
1031                 k = bch2_btree_iter_prev_slot(&iter);
1032                 ret = bkey_err(k);
1033                 if (ret)
1034                         goto err;
1035
1036                 if (!k.k || !k.k->p.offset) {
1037                         ret = -BCH_ERR_ENOSPC_snapshot_create;
1038                         goto err;
1039                 }
1040
1041                 n = bch2_bkey_alloc(trans, &iter, 0, snapshot);
1042                 ret = PTR_ERR_OR_ZERO(n);
1043                 if (ret)
1044                         goto err;
1045
1046                 n->v.flags      = 0;
1047                 n->v.parent     = cpu_to_le32(parent);
1048                 n->v.subvol     = cpu_to_le32(snapshot_subvols[i]);
1049                 n->v.tree       = cpu_to_le32(tree);
1050                 n->v.depth      = cpu_to_le32(depth);
1051
1052                 for (j = 0; j < ARRAY_SIZE(n->v.skip); j++)
1053                         n->v.skip[j] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent));
1054
1055                 bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_le32);
1056                 SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
1057
1058                 ret = bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
1059                                          bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0);
1060                 if (ret)
1061                         goto err;
1062
1063                 new_snapids[i]  = iter.pos.offset;
1064
1065                 mutex_lock(&c->snapshot_table_lock);
1066                 snapshot_t_mut(c, new_snapids[i])->equiv = new_snapids[i];
1067                 mutex_unlock(&c->snapshot_table_lock);
1068         }
1069 err:
1070         bch2_trans_iter_exit(trans, &iter);
1071         return ret;
1072 }
1073
1074 /*
1075  * Create new snapshot IDs as children of an existing snapshot ID:
1076  */
1077 static int bch2_snapshot_node_create_children(struct btree_trans *trans, u32 parent,
1078                               u32 *new_snapids,
1079                               u32 *snapshot_subvols,
1080                               unsigned nr_snapids)
1081 {
1082         struct btree_iter iter;
1083         struct bkey_i_snapshot *n_parent;
1084         int ret = 0;
1085
1086         n_parent = bch2_bkey_get_mut_typed(trans, &iter,
1087                         BTREE_ID_snapshots, POS(0, parent),
1088                         0, snapshot);
1089         ret = PTR_ERR_OR_ZERO(n_parent);
1090         if (unlikely(ret)) {
1091                 if (bch2_err_matches(ret, ENOENT))
1092                         bch_err(trans->c, "snapshot %u not found", parent);
1093                 return ret;
1094         }
1095
1096         if (n_parent->v.children[0] || n_parent->v.children[1]) {
1097                 bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
1098                 ret = -EINVAL;
1099                 goto err;
1100         }
1101
1102         ret = create_snapids(trans, parent, le32_to_cpu(n_parent->v.tree),
1103                              new_snapids, snapshot_subvols, nr_snapids);
1104         if (ret)
1105                 goto err;
1106
1107         n_parent->v.children[0] = cpu_to_le32(new_snapids[0]);
1108         n_parent->v.children[1] = cpu_to_le32(new_snapids[1]);
1109         n_parent->v.subvol = 0;
1110         SET_BCH_SNAPSHOT_SUBVOL(&n_parent->v, false);
1111 err:
1112         bch2_trans_iter_exit(trans, &iter);
1113         return ret;
1114 }
1115
1116 /*
1117  * Create a snapshot node that is the root of a new tree:
1118  */
1119 static int bch2_snapshot_node_create_tree(struct btree_trans *trans,
1120                               u32 *new_snapids,
1121                               u32 *snapshot_subvols,
1122                               unsigned nr_snapids)
1123 {
1124         struct bkey_i_snapshot_tree *n_tree;
1125         int ret;
1126
1127         n_tree = __bch2_snapshot_tree_create(trans);
1128         ret =   PTR_ERR_OR_ZERO(n_tree) ?:
1129                 create_snapids(trans, 0, n_tree->k.p.offset,
1130                              new_snapids, snapshot_subvols, nr_snapids);
1131         if (ret)
1132                 return ret;
1133
1134         n_tree->v.master_subvol = cpu_to_le32(snapshot_subvols[0]);
1135         n_tree->v.root_snapshot = cpu_to_le32(new_snapids[0]);
1136         return 0;
1137 }
1138
1139 int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
1140                               u32 *new_snapids,
1141                               u32 *snapshot_subvols,
1142                               unsigned nr_snapids)
1143 {
1144         BUG_ON((parent == 0) != (nr_snapids == 1));
1145         BUG_ON((parent != 0) != (nr_snapids == 2));
1146
1147         return parent
1148                 ? bch2_snapshot_node_create_children(trans, parent,
1149                                 new_snapids, snapshot_subvols, nr_snapids)
1150                 : bch2_snapshot_node_create_tree(trans,
1151                                 new_snapids, snapshot_subvols, nr_snapids);
1152
1153 }
1154
1155 /*
1156  * If we have an unlinked inode in an internal snapshot node, and the inode
1157  * really has been deleted in all child snapshots, how does this get cleaned up?
1158  *
1159  * first there is the problem of how keys that have been overwritten in all
1160  * child snapshots get deleted (unimplemented?), but inodes may perhaps be
1161  * special?
1162  *
1163  * also: unlinked inode in internal snapshot appears to not be getting deleted
1164  * correctly if inode doesn't exist in leaf snapshots
1165  *
1166  * solution:
1167  *
1168  * for a key in an interior snapshot node that needs work to be done that
1169  * requires it to be mutated: iterate over all descendent leaf nodes and copy
1170  * that key to snapshot leaf nodes, where we can mutate it
1171  */
1172
1173 static int snapshot_delete_key(struct btree_trans *trans,
1174                                struct btree_iter *iter,
1175                                struct bkey_s_c k,
1176                                snapshot_id_list *deleted,
1177                                snapshot_id_list *equiv_seen,
1178                                struct bpos *last_pos)
1179 {
1180         struct bch_fs *c = trans->c;
1181         u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
1182
1183         if (!bkey_eq(k.k->p, *last_pos))
1184                 equiv_seen->nr = 0;
1185         *last_pos = k.k->p;
1186
1187         if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
1188             snapshot_list_has_id(equiv_seen, equiv)) {
1189                 return bch2_btree_delete_at(trans, iter,
1190                                             BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1191         } else {
1192                 return snapshot_list_add(c, equiv_seen, equiv);
1193         }
1194 }
1195
1196 static int move_key_to_correct_snapshot(struct btree_trans *trans,
1197                                struct btree_iter *iter,
1198                                struct bkey_s_c k)
1199 {
1200         struct bch_fs *c = trans->c;
1201         u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
1202
1203         /*
1204          * When we have a linear chain of snapshot nodes, we consider
1205          * those to form an equivalence class: we're going to collapse
1206          * them all down to a single node, and keep the leaf-most node -
1207          * which has the same id as the equivalence class id.
1208          *
1209          * If there are multiple keys in different snapshots at the same
1210          * position, we're only going to keep the one in the newest
1211          * snapshot - the rest have been overwritten and are redundant,
1212          * and for the key we're going to keep we need to move it to the
1213          * equivalance class ID if it's not there already.
1214          */
1215         if (equiv != k.k->p.snapshot) {
1216                 struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
1217                 struct btree_iter new_iter;
1218                 int ret;
1219
1220                 ret = PTR_ERR_OR_ZERO(new);
1221                 if (ret)
1222                         return ret;
1223
1224                 new->k.p.snapshot = equiv;
1225
1226                 bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p,
1227                                      BTREE_ITER_ALL_SNAPSHOTS|
1228                                      BTREE_ITER_CACHED|
1229                                      BTREE_ITER_INTENT);
1230
1231                 ret =   bch2_btree_iter_traverse(&new_iter) ?:
1232                         bch2_trans_update(trans, &new_iter, new,
1233                                         BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
1234                         bch2_btree_delete_at(trans, iter,
1235                                         BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1236                 bch2_trans_iter_exit(trans, &new_iter);
1237                 if (ret)
1238                         return ret;
1239         }
1240
1241         return 0;
1242 }
1243
1244 static int bch2_snapshot_needs_delete(struct btree_trans *trans, struct bkey_s_c k)
1245 {
1246         struct bkey_s_c_snapshot snap;
1247         u32 children[2];
1248         int ret;
1249
1250         if (k.k->type != KEY_TYPE_snapshot)
1251                 return 0;
1252
1253         snap = bkey_s_c_to_snapshot(k);
1254         if (BCH_SNAPSHOT_DELETED(snap.v) ||
1255             BCH_SNAPSHOT_SUBVOL(snap.v))
1256                 return 0;
1257
1258         children[0] = le32_to_cpu(snap.v->children[0]);
1259         children[1] = le32_to_cpu(snap.v->children[1]);
1260
1261         ret   = bch2_snapshot_live(trans, children[0]) ?:
1262                 bch2_snapshot_live(trans, children[1]);
1263         if (ret < 0)
1264                 return ret;
1265         return !ret;
1266 }
1267
1268 /*
1269  * For a given snapshot, if it doesn't have a subvolume that points to it, and
1270  * it doesn't have child snapshot nodes - it's now redundant and we can mark it
1271  * as deleted.
1272  */
1273 static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct bkey_s_c k)
1274 {
1275         int ret = bch2_snapshot_needs_delete(trans, k);
1276
1277         return ret <= 0
1278                 ? ret
1279                 : bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
1280 }
1281
1282 static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n,
1283                                                 snapshot_id_list *skip)
1284 {
1285         rcu_read_lock();
1286         while (snapshot_list_has_id(skip, id))
1287                 id = __bch2_snapshot_parent(c, id);
1288
1289         while (n--) {
1290                 do {
1291                         id = __bch2_snapshot_parent(c, id);
1292                 } while (snapshot_list_has_id(skip, id));
1293         }
1294         rcu_read_unlock();
1295
1296         return id;
1297 }
1298
1299 static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans,
1300                                               struct btree_iter *iter, struct bkey_s_c k,
1301                                               snapshot_id_list *deleted)
1302 {
1303         struct bch_fs *c = trans->c;
1304         u32 nr_deleted_ancestors = 0;
1305         struct bkey_i_snapshot *s;
1306         int ret;
1307
1308         if (k.k->type != KEY_TYPE_snapshot)
1309                 return 0;
1310
1311         if (snapshot_list_has_id(deleted, k.k->p.offset))
1312                 return 0;
1313
1314         s = bch2_bkey_make_mut_noupdate_typed(trans, k, snapshot);
1315         ret = PTR_ERR_OR_ZERO(s);
1316         if (ret)
1317                 return ret;
1318
1319         darray_for_each(*deleted, i)
1320                 nr_deleted_ancestors += bch2_snapshot_is_ancestor(c, s->k.p.offset, *i);
1321
1322         if (!nr_deleted_ancestors)
1323                 return 0;
1324
1325         le32_add_cpu(&s->v.depth, -nr_deleted_ancestors);
1326
1327         if (!s->v.depth) {
1328                 s->v.skip[0] = 0;
1329                 s->v.skip[1] = 0;
1330                 s->v.skip[2] = 0;
1331         } else {
1332                 u32 depth = le32_to_cpu(s->v.depth);
1333                 u32 parent = bch2_snapshot_parent(c, s->k.p.offset);
1334
1335                 for (unsigned j = 0; j < ARRAY_SIZE(s->v.skip); j++) {
1336                         u32 id = le32_to_cpu(s->v.skip[j]);
1337
1338                         if (snapshot_list_has_id(deleted, id)) {
1339                                 id = bch2_snapshot_nth_parent_skip(c,
1340                                                         parent,
1341                                                         depth > 1
1342                                                         ? get_random_u32_below(depth - 1)
1343                                                         : 0,
1344                                                         deleted);
1345                                 s->v.skip[j] = cpu_to_le32(id);
1346                         }
1347                 }
1348
1349                 bubble_sort(s->v.skip, ARRAY_SIZE(s->v.skip), cmp_le32);
1350         }
1351
1352         return bch2_trans_update(trans, iter, &s->k_i, 0);
1353 }
1354
1355 int bch2_delete_dead_snapshots(struct bch_fs *c)
1356 {
1357         struct btree_trans *trans;
1358         snapshot_id_list deleted = { 0 };
1359         snapshot_id_list deleted_interior = { 0 };
1360         u32 id;
1361         int ret = 0;
1362
1363         if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags))
1364                 return 0;
1365
1366         if (!test_bit(BCH_FS_started, &c->flags)) {
1367                 ret = bch2_fs_read_write_early(c);
1368                 bch_err_msg(c, ret, "deleting dead snapshots: error going rw");
1369                 if (ret)
1370                         return ret;
1371         }
1372
1373         trans = bch2_trans_get(c);
1374
1375         /*
1376          * For every snapshot node: If we have no live children and it's not
1377          * pointed to by a subvolume, delete it:
1378          */
1379         ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots,
1380                         POS_MIN, 0, k,
1381                         NULL, NULL, 0,
1382                 bch2_delete_redundant_snapshot(trans, k));
1383         bch_err_msg(c, ret, "deleting redundant snapshots");
1384         if (ret)
1385                 goto err;
1386
1387         ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1388                                  POS_MIN, 0, k,
1389                 bch2_snapshot_set_equiv(trans, k));
1390         bch_err_msg(c, ret, "in bch2_snapshots_set_equiv");
1391         if (ret)
1392                 goto err;
1393
1394         ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1395                                  POS_MIN, 0, k, ({
1396                 if (k.k->type != KEY_TYPE_snapshot)
1397                         continue;
1398
1399                 BCH_SNAPSHOT_DELETED(bkey_s_c_to_snapshot(k).v)
1400                         ? snapshot_list_add(c, &deleted, k.k->p.offset)
1401                         : 0;
1402         }));
1403         bch_err_msg(c, ret, "walking snapshots");
1404         if (ret)
1405                 goto err;
1406
1407         for (id = 0; id < BTREE_ID_NR; id++) {
1408                 struct bpos last_pos = POS_MIN;
1409                 snapshot_id_list equiv_seen = { 0 };
1410                 struct disk_reservation res = { 0 };
1411
1412                 if (!btree_type_has_snapshots(id))
1413                         continue;
1414
1415                 /*
1416                  * deleted inodes btree is maintained by a trigger on the inodes
1417                  * btree - no work for us to do here, and it's not safe to scan
1418                  * it because we'll see out of date keys due to the btree write
1419                  * buffer:
1420                  */
1421                 if (id == BTREE_ID_deleted_inodes)
1422                         continue;
1423
1424                 ret = for_each_btree_key_commit(trans, iter,
1425                                 id, POS_MIN,
1426                                 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1427                                 &res, NULL, BCH_TRANS_COMMIT_no_enospc,
1428                         snapshot_delete_key(trans, &iter, k, &deleted, &equiv_seen, &last_pos)) ?:
1429                       for_each_btree_key_commit(trans, iter,
1430                                 id, POS_MIN,
1431                                 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1432                                 &res, NULL, BCH_TRANS_COMMIT_no_enospc,
1433                         move_key_to_correct_snapshot(trans, &iter, k));
1434
1435                 bch2_disk_reservation_put(c, &res);
1436                 darray_exit(&equiv_seen);
1437
1438                 bch_err_msg(c, ret, "deleting keys from dying snapshots");
1439                 if (ret)
1440                         goto err;
1441         }
1442
1443         bch2_trans_unlock(trans);
1444         down_write(&c->snapshot_create_lock);
1445
1446         ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1447                                  POS_MIN, 0, k, ({
1448                 u32 snapshot = k.k->p.offset;
1449                 u32 equiv = bch2_snapshot_equiv(c, snapshot);
1450
1451                 equiv != snapshot
1452                         ? snapshot_list_add(c, &deleted_interior, snapshot)
1453                         : 0;
1454         }));
1455
1456         bch_err_msg(c, ret, "walking snapshots");
1457         if (ret)
1458                 goto err_create_lock;
1459
1460         /*
1461          * Fixing children of deleted snapshots can't be done completely
1462          * atomically, if we crash between here and when we delete the interior
1463          * nodes some depth fields will be off:
1464          */
1465         ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, POS_MIN,
1466                                   BTREE_ITER_INTENT, k,
1467                                   NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1468                 bch2_fix_child_of_deleted_snapshot(trans, &iter, k, &deleted_interior));
1469         if (ret)
1470                 goto err_create_lock;
1471
1472         darray_for_each(deleted, i) {
1473                 ret = commit_do(trans, NULL, NULL, 0,
1474                         bch2_snapshot_node_delete(trans, *i));
1475                 bch_err_msg(c, ret, "deleting snapshot %u", *i);
1476                 if (ret)
1477                         goto err_create_lock;
1478         }
1479
1480         darray_for_each(deleted_interior, i) {
1481                 ret = commit_do(trans, NULL, NULL, 0,
1482                         bch2_snapshot_node_delete(trans, *i));
1483                 bch_err_msg(c, ret, "deleting snapshot %u", *i);
1484                 if (ret)
1485                         goto err_create_lock;
1486         }
1487 err_create_lock:
1488         up_write(&c->snapshot_create_lock);
1489 err:
1490         darray_exit(&deleted_interior);
1491         darray_exit(&deleted);
1492         bch2_trans_put(trans);
1493         bch_err_fn(c, ret);
1494         return ret;
1495 }
1496
1497 void bch2_delete_dead_snapshots_work(struct work_struct *work)
1498 {
1499         struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
1500
1501         bch2_delete_dead_snapshots(c);
1502         bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1503 }
1504
1505 void bch2_delete_dead_snapshots_async(struct bch_fs *c)
1506 {
1507         if (bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots) &&
1508             !queue_work(c->write_ref_wq, &c->snapshot_delete_work))
1509                 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1510 }
1511
1512 int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
1513                                        enum btree_id id,
1514                                        struct bpos pos)
1515 {
1516         struct bch_fs *c = trans->c;
1517         struct btree_iter iter;
1518         struct bkey_s_c k;
1519         int ret;
1520
1521         bch2_trans_iter_init(trans, &iter, id, pos,
1522                              BTREE_ITER_NOT_EXTENTS|
1523                              BTREE_ITER_ALL_SNAPSHOTS);
1524         while (1) {
1525                 k = bch2_btree_iter_prev(&iter);
1526                 ret = bkey_err(k);
1527                 if (ret)
1528                         break;
1529
1530                 if (!k.k)
1531                         break;
1532
1533                 if (!bkey_eq(pos, k.k->p))
1534                         break;
1535
1536                 if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) {
1537                         ret = 1;
1538                         break;
1539                 }
1540         }
1541         bch2_trans_iter_exit(trans, &iter);
1542
1543         return ret;
1544 }
1545
1546 static u32 bch2_snapshot_smallest_child(struct bch_fs *c, u32 id)
1547 {
1548         const struct snapshot_t *s = snapshot_t(c, id);
1549
1550         return s->children[1] ?: s->children[0];
1551 }
1552
1553 static u32 bch2_snapshot_smallest_descendent(struct bch_fs *c, u32 id)
1554 {
1555         u32 child;
1556
1557         while ((child = bch2_snapshot_smallest_child(c, id)))
1558                 id = child;
1559         return id;
1560 }
1561
1562 static int bch2_propagate_key_to_snapshot_leaf(struct btree_trans *trans,
1563                                                enum btree_id btree,
1564                                                struct bkey_s_c interior_k,
1565                                                u32 leaf_id, struct bpos *new_min_pos)
1566 {
1567         struct btree_iter iter;
1568         struct bpos pos = interior_k.k->p;
1569         struct bkey_s_c k;
1570         struct bkey_i *new;
1571         int ret;
1572
1573         pos.snapshot = leaf_id;
1574
1575         bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_INTENT);
1576         k = bch2_btree_iter_peek_slot(&iter);
1577         ret = bkey_err(k);
1578         if (ret)
1579                 goto out;
1580
1581         /* key already overwritten in this snapshot? */
1582         if (k.k->p.snapshot != interior_k.k->p.snapshot)
1583                 goto out;
1584
1585         if (bpos_eq(*new_min_pos, POS_MIN)) {
1586                 *new_min_pos = k.k->p;
1587                 new_min_pos->snapshot = leaf_id;
1588         }
1589
1590         new = bch2_bkey_make_mut_noupdate(trans, interior_k);
1591         ret = PTR_ERR_OR_ZERO(new);
1592         if (ret)
1593                 goto out;
1594
1595         new->k.p.snapshot = leaf_id;
1596         ret = bch2_trans_update(trans, &iter, new, 0);
1597 out:
1598         bch2_trans_iter_exit(trans, &iter);
1599         return ret;
1600 }
1601
1602 int bch2_propagate_key_to_snapshot_leaves(struct btree_trans *trans,
1603                                           enum btree_id btree,
1604                                           struct bkey_s_c k,
1605                                           struct bpos *new_min_pos)
1606 {
1607         struct bch_fs *c = trans->c;
1608         struct bkey_buf sk;
1609         u32 restart_count = trans->restart_count;
1610         int ret = 0;
1611
1612         bch2_bkey_buf_init(&sk);
1613         bch2_bkey_buf_reassemble(&sk, c, k);
1614         k = bkey_i_to_s_c(sk.k);
1615
1616         *new_min_pos = POS_MIN;
1617
1618         for (u32 id = bch2_snapshot_smallest_descendent(c, k.k->p.snapshot);
1619              id < k.k->p.snapshot;
1620              id++) {
1621                 if (!bch2_snapshot_is_ancestor(c, id, k.k->p.snapshot) ||
1622                     !bch2_snapshot_is_leaf(c, id))
1623                         continue;
1624 again:
1625                 ret =   btree_trans_too_many_iters(trans) ?:
1626                         bch2_propagate_key_to_snapshot_leaf(trans, btree, k, id, new_min_pos) ?:
1627                         bch2_trans_commit(trans, NULL, NULL, 0);
1628                 if (ret && bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
1629                         bch2_trans_begin(trans);
1630                         goto again;
1631                 }
1632
1633                 if (ret)
1634                         break;
1635         }
1636
1637         bch2_bkey_buf_exit(&sk, c);
1638
1639         return ret ?: trans_was_restarted(trans, restart_count);
1640 }
1641
1642 static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct bkey_s_c k)
1643 {
1644         struct bch_fs *c = trans->c;
1645         struct bkey_s_c_snapshot snap;
1646         int ret = 0;
1647
1648         if (k.k->type != KEY_TYPE_snapshot)
1649                 return 0;
1650
1651         snap = bkey_s_c_to_snapshot(k);
1652         if (BCH_SNAPSHOT_DELETED(snap.v) ||
1653             bch2_snapshot_equiv(c, k.k->p.offset) != k.k->p.offset ||
1654             (ret = bch2_snapshot_needs_delete(trans, k)) > 0) {
1655                 set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags);
1656                 return 0;
1657         }
1658
1659         return ret;
1660 }
1661
1662 int bch2_snapshots_read(struct bch_fs *c)
1663 {
1664         int ret = bch2_trans_run(c,
1665                 for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1666                                    POS_MIN, 0, k,
1667                         bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
1668                         bch2_snapshot_set_equiv(trans, k) ?:
1669                         bch2_check_snapshot_needs_deletion(trans, k)) ?:
1670                 for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1671                                    POS_MIN, 0, k,
1672                            (set_is_ancestor_bitmap(c, k.k->p.offset), 0)));
1673         bch_err_fn(c, ret);
1674         return ret;
1675 }
1676
1677 void bch2_fs_snapshots_exit(struct bch_fs *c)
1678 {
1679         kfree(rcu_dereference_protected(c->snapshots, true));
1680 }