]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/snapshot.c
Update bcachefs sources to 0d63ed13ea3d closures: Fix race in closure_sync()
[bcachefs-tools-debian] / libbcachefs / snapshot.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "btree_key_cache.h"
6 #include "btree_update.h"
7 #include "buckets.h"
8 #include "errcode.h"
9 #include "error.h"
10 #include "fs.h"
11 #include "snapshot.h"
12
13 #include <linux/random.h>
14
15 /*
16  * Snapshot trees:
17  *
18  * Keys in BTREE_ID_snapshot_trees identify a whole tree of snapshot nodes; they
19  * exist to provide a stable identifier for the whole lifetime of a snapshot
20  * tree.
21  */
22
23 void bch2_snapshot_tree_to_text(struct printbuf *out, struct bch_fs *c,
24                                 struct bkey_s_c k)
25 {
26         struct bkey_s_c_snapshot_tree t = bkey_s_c_to_snapshot_tree(k);
27
28         prt_printf(out, "subvol %u root snapshot %u",
29                    le32_to_cpu(t.v->master_subvol),
30                    le32_to_cpu(t.v->root_snapshot));
31 }
32
33 int bch2_snapshot_tree_invalid(const struct bch_fs *c, struct bkey_s_c k,
34                                enum bkey_invalid_flags flags,
35                                struct printbuf *err)
36 {
37         if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
38             bkey_lt(k.k->p, POS(0, 1))) {
39                 prt_printf(err, "bad pos");
40                 return -BCH_ERR_invalid_bkey;
41         }
42
43         return 0;
44 }
45
46 int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
47                               struct bch_snapshot_tree *s)
48 {
49         int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
50                                           BTREE_ITER_WITH_UPDATES, snapshot_tree, s);
51
52         if (bch2_err_matches(ret, ENOENT))
53                 ret = -BCH_ERR_ENOENT_snapshot_tree;
54         return ret;
55 }
56
57 struct bkey_i_snapshot_tree *
58 __bch2_snapshot_tree_create(struct btree_trans *trans)
59 {
60         struct btree_iter iter;
61         int ret = bch2_bkey_get_empty_slot(trans, &iter,
62                         BTREE_ID_snapshot_trees, POS(0, U32_MAX));
63         struct bkey_i_snapshot_tree *s_t;
64
65         if (ret == -BCH_ERR_ENOSPC_btree_slot)
66                 ret = -BCH_ERR_ENOSPC_snapshot_tree;
67         if (ret)
68                 return ERR_PTR(ret);
69
70         s_t = bch2_bkey_alloc(trans, &iter, 0, snapshot_tree);
71         ret = PTR_ERR_OR_ZERO(s_t);
72         bch2_trans_iter_exit(trans, &iter);
73         return ret ? ERR_PTR(ret) : s_t;
74 }
75
76 static int bch2_snapshot_tree_create(struct btree_trans *trans,
77                                 u32 root_id, u32 subvol_id, u32 *tree_id)
78 {
79         struct bkey_i_snapshot_tree *n_tree =
80                 __bch2_snapshot_tree_create(trans);
81
82         if (IS_ERR(n_tree))
83                 return PTR_ERR(n_tree);
84
85         n_tree->v.master_subvol = cpu_to_le32(subvol_id);
86         n_tree->v.root_snapshot = cpu_to_le32(root_id);
87         *tree_id = n_tree->k.p.offset;
88         return 0;
89 }
90
91 /* Snapshot nodes: */
92
93 static bool bch2_snapshot_is_ancestor_early(struct bch_fs *c, u32 id, u32 ancestor)
94 {
95         struct snapshot_table *t;
96
97         rcu_read_lock();
98         t = rcu_dereference(c->snapshots);
99
100         while (id && id < ancestor)
101                 id = __snapshot_t(t, id)->parent;
102         rcu_read_unlock();
103
104         return id == ancestor;
105 }
106
107 static inline u32 get_ancestor_below(struct snapshot_table *t, u32 id, u32 ancestor)
108 {
109         const struct snapshot_t *s = __snapshot_t(t, id);
110
111         if (s->skip[2] <= ancestor)
112                 return s->skip[2];
113         if (s->skip[1] <= ancestor)
114                 return s->skip[1];
115         if (s->skip[0] <= ancestor)
116                 return s->skip[0];
117         return s->parent;
118 }
119
120 bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
121 {
122         struct snapshot_table *t;
123         bool ret;
124
125         EBUG_ON(c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_snapshots);
126
127         rcu_read_lock();
128         t = rcu_dereference(c->snapshots);
129
130         while (id && id < ancestor - IS_ANCESTOR_BITMAP)
131                 id = get_ancestor_below(t, id, ancestor);
132
133         if (id && id < ancestor) {
134                 ret = test_bit(ancestor - id - 1, __snapshot_t(t, id)->is_ancestor);
135
136                 EBUG_ON(ret != bch2_snapshot_is_ancestor_early(c, id, ancestor));
137         } else {
138                 ret = id == ancestor;
139         }
140
141         rcu_read_unlock();
142
143         return ret;
144 }
145
146 static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id)
147 {
148         size_t idx = U32_MAX - id;
149         size_t new_size;
150         struct snapshot_table *new, *old;
151
152         new_size = max(16UL, roundup_pow_of_two(idx + 1));
153
154         new = kvzalloc(struct_size(new, s, new_size), GFP_KERNEL);
155         if (!new)
156                 return NULL;
157
158         old = rcu_dereference_protected(c->snapshots, true);
159         if (old)
160                 memcpy(new->s,
161                        rcu_dereference_protected(c->snapshots, true)->s,
162                        sizeof(new->s[0]) * c->snapshot_table_size);
163
164         rcu_assign_pointer(c->snapshots, new);
165         c->snapshot_table_size = new_size;
166         kvfree_rcu_mightsleep(old);
167
168         return &rcu_dereference_protected(c->snapshots, true)->s[idx];
169 }
170
171 static inline struct snapshot_t *snapshot_t_mut(struct bch_fs *c, u32 id)
172 {
173         size_t idx = U32_MAX - id;
174
175         lockdep_assert_held(&c->snapshot_table_lock);
176
177         if (likely(idx < c->snapshot_table_size))
178                 return &rcu_dereference_protected(c->snapshots, true)->s[idx];
179
180         return __snapshot_t_mut(c, id);
181 }
182
183 void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
184                            struct bkey_s_c k)
185 {
186         struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
187
188         prt_printf(out, "is_subvol %llu deleted %llu parent %10u children %10u %10u subvol %u tree %u",
189                BCH_SNAPSHOT_SUBVOL(s.v),
190                BCH_SNAPSHOT_DELETED(s.v),
191                le32_to_cpu(s.v->parent),
192                le32_to_cpu(s.v->children[0]),
193                le32_to_cpu(s.v->children[1]),
194                le32_to_cpu(s.v->subvol),
195                le32_to_cpu(s.v->tree));
196
197         if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, depth))
198                 prt_printf(out, " depth %u skiplist %u %u %u",
199                            le32_to_cpu(s.v->depth),
200                            le32_to_cpu(s.v->skip[0]),
201                            le32_to_cpu(s.v->skip[1]),
202                            le32_to_cpu(s.v->skip[2]));
203 }
204
205 int bch2_snapshot_invalid(const struct bch_fs *c, struct bkey_s_c k,
206                           enum bkey_invalid_flags flags,
207                           struct printbuf *err)
208 {
209         struct bkey_s_c_snapshot s;
210         u32 i, id;
211
212         if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
213             bkey_lt(k.k->p, POS(0, 1))) {
214                 prt_printf(err, "bad pos");
215                 return -BCH_ERR_invalid_bkey;
216         }
217
218         s = bkey_s_c_to_snapshot(k);
219
220         id = le32_to_cpu(s.v->parent);
221         if (id && id <= k.k->p.offset) {
222                 prt_printf(err, "bad parent node (%u <= %llu)",
223                        id, k.k->p.offset);
224                 return -BCH_ERR_invalid_bkey;
225         }
226
227         if (le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1])) {
228                 prt_printf(err, "children not normalized");
229                 return -BCH_ERR_invalid_bkey;
230         }
231
232         if (s.v->children[0] &&
233             s.v->children[0] == s.v->children[1]) {
234                 prt_printf(err, "duplicate child nodes");
235                 return -BCH_ERR_invalid_bkey;
236         }
237
238         for (i = 0; i < 2; i++) {
239                 id = le32_to_cpu(s.v->children[i]);
240
241                 if (id >= k.k->p.offset) {
242                         prt_printf(err, "bad child node (%u >= %llu)",
243                                id, k.k->p.offset);
244                         return -BCH_ERR_invalid_bkey;
245                 }
246         }
247
248         if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, skip)) {
249                 if (le32_to_cpu(s.v->skip[0]) > le32_to_cpu(s.v->skip[1]) ||
250                     le32_to_cpu(s.v->skip[1]) > le32_to_cpu(s.v->skip[2])) {
251                         prt_printf(err, "skiplist not normalized");
252                         return -BCH_ERR_invalid_bkey;
253                 }
254
255                 for (i = 0; i < ARRAY_SIZE(s.v->skip); i++) {
256                         id = le32_to_cpu(s.v->skip[i]);
257
258                         if ((id && !s.v->parent) ||
259                             (id && id <= k.k->p.offset)) {
260                                 prt_printf(err, "bad skiplist node %u", id);
261                                 return -BCH_ERR_invalid_bkey;
262                         }
263                 }
264         }
265
266         return 0;
267 }
268
269 static void __set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
270 {
271         struct snapshot_t *t = snapshot_t_mut(c, id);
272         u32 parent = id;
273
274         while ((parent = bch2_snapshot_parent_early(c, parent)) &&
275                parent - id - 1 < IS_ANCESTOR_BITMAP)
276                 __set_bit(parent - id - 1, t->is_ancestor);
277 }
278
279 static void set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
280 {
281         mutex_lock(&c->snapshot_table_lock);
282         __set_is_ancestor_bitmap(c, id);
283         mutex_unlock(&c->snapshot_table_lock);
284 }
285
286 int bch2_mark_snapshot(struct btree_trans *trans,
287                        enum btree_id btree, unsigned level,
288                        struct bkey_s_c old, struct bkey_s_c new,
289                        unsigned flags)
290 {
291         struct bch_fs *c = trans->c;
292         struct snapshot_t *t;
293         u32 id = new.k->p.offset;
294         int ret = 0;
295
296         mutex_lock(&c->snapshot_table_lock);
297
298         t = snapshot_t_mut(c, id);
299         if (!t) {
300                 ret = -BCH_ERR_ENOMEM_mark_snapshot;
301                 goto err;
302         }
303
304         if (new.k->type == KEY_TYPE_snapshot) {
305                 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);
306
307                 t->parent       = le32_to_cpu(s.v->parent);
308                 t->children[0]  = le32_to_cpu(s.v->children[0]);
309                 t->children[1]  = le32_to_cpu(s.v->children[1]);
310                 t->subvol       = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0;
311                 t->tree         = le32_to_cpu(s.v->tree);
312
313                 if (bkey_val_bytes(s.k) > offsetof(struct bch_snapshot, depth)) {
314                         t->depth        = le32_to_cpu(s.v->depth);
315                         t->skip[0]      = le32_to_cpu(s.v->skip[0]);
316                         t->skip[1]      = le32_to_cpu(s.v->skip[1]);
317                         t->skip[2]      = le32_to_cpu(s.v->skip[2]);
318                 } else {
319                         t->depth        = 0;
320                         t->skip[0]      = 0;
321                         t->skip[1]      = 0;
322                         t->skip[2]      = 0;
323                 }
324
325                 __set_is_ancestor_bitmap(c, id);
326
327                 if (BCH_SNAPSHOT_DELETED(s.v)) {
328                         set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags);
329                         if (c->curr_recovery_pass > BCH_RECOVERY_PASS_delete_dead_snapshots)
330                                 bch2_delete_dead_snapshots_async(c);
331                 }
332         } else {
333                 memset(t, 0, sizeof(*t));
334         }
335 err:
336         mutex_unlock(&c->snapshot_table_lock);
337         return ret;
338 }
339
340 int bch2_snapshot_lookup(struct btree_trans *trans, u32 id,
341                          struct bch_snapshot *s)
342 {
343         return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots, POS(0, id),
344                                        BTREE_ITER_WITH_UPDATES, snapshot, s);
345 }
346
347 static int bch2_snapshot_live(struct btree_trans *trans, u32 id)
348 {
349         struct bch_snapshot v;
350         int ret;
351
352         if (!id)
353                 return 0;
354
355         ret = bch2_snapshot_lookup(trans, id, &v);
356         if (bch2_err_matches(ret, ENOENT))
357                 bch_err(trans->c, "snapshot node %u not found", id);
358         if (ret)
359                 return ret;
360
361         return !BCH_SNAPSHOT_DELETED(&v);
362 }
363
364 /*
365  * If @k is a snapshot with just one live child, it's part of a linear chain,
366  * which we consider to be an equivalence class: and then after snapshot
367  * deletion cleanup, there should only be a single key at a given position in
368  * this equivalence class.
369  *
370  * This sets the equivalence class of @k to be the child's equivalence class, if
371  * it's part of such a linear chain: this correctly sets equivalence classes on
372  * startup if we run leaf to root (i.e. in natural key order).
373  */
374 static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k)
375 {
376         struct bch_fs *c = trans->c;
377         unsigned i, nr_live = 0, live_idx = 0;
378         struct bkey_s_c_snapshot snap;
379         u32 id = k.k->p.offset, child[2];
380
381         if (k.k->type != KEY_TYPE_snapshot)
382                 return 0;
383
384         snap = bkey_s_c_to_snapshot(k);
385
386         child[0] = le32_to_cpu(snap.v->children[0]);
387         child[1] = le32_to_cpu(snap.v->children[1]);
388
389         for (i = 0; i < 2; i++) {
390                 int ret = bch2_snapshot_live(trans, child[i]);
391
392                 if (ret < 0)
393                         return ret;
394
395                 if (ret)
396                         live_idx = i;
397                 nr_live += ret;
398         }
399
400         mutex_lock(&c->snapshot_table_lock);
401
402         snapshot_t_mut(c, id)->equiv = nr_live == 1
403                 ? snapshot_t_mut(c, child[live_idx])->equiv
404                 : id;
405
406         mutex_unlock(&c->snapshot_table_lock);
407
408         return 0;
409 }
410
411 /* fsck: */
412
413 static u32 bch2_snapshot_child(struct bch_fs *c, u32 id, unsigned child)
414 {
415         return snapshot_t(c, id)->children[child];
416 }
417
418 static u32 bch2_snapshot_left_child(struct bch_fs *c, u32 id)
419 {
420         return bch2_snapshot_child(c, id, 0);
421 }
422
423 static u32 bch2_snapshot_right_child(struct bch_fs *c, u32 id)
424 {
425         return bch2_snapshot_child(c, id, 1);
426 }
427
428 static u32 bch2_snapshot_tree_next(struct bch_fs *c, u32 id)
429 {
430         u32 n, parent;
431
432         n = bch2_snapshot_left_child(c, id);
433         if (n)
434                 return n;
435
436         while ((parent = bch2_snapshot_parent(c, id))) {
437                 n = bch2_snapshot_right_child(c, parent);
438                 if (n && n != id)
439                         return n;
440                 id = parent;
441         }
442
443         return 0;
444 }
445
446 static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
447 {
448         u32 id = snapshot_root;
449         u32 subvol = 0, s;
450
451         while (id) {
452                 s = snapshot_t(c, id)->subvol;
453
454                 if (s && (!subvol || s < subvol))
455                         subvol = s;
456
457                 id = bch2_snapshot_tree_next(c, id);
458         }
459
460         return subvol;
461 }
462
463 static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans,
464                                             u32 snapshot_root, u32 *subvol_id)
465 {
466         struct bch_fs *c = trans->c;
467         struct btree_iter iter;
468         struct bkey_s_c k;
469         struct bkey_s_c_subvolume s;
470         bool found = false;
471         int ret;
472
473         for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN,
474                                      0, k, ret) {
475                 if (k.k->type != KEY_TYPE_subvolume)
476                         continue;
477
478                 s = bkey_s_c_to_subvolume(k);
479                 if (!bch2_snapshot_is_ancestor(c, le32_to_cpu(s.v->snapshot), snapshot_root))
480                         continue;
481                 if (!BCH_SUBVOLUME_SNAP(s.v)) {
482                         *subvol_id = s.k->p.offset;
483                         found = true;
484                         break;
485                 }
486         }
487
488         bch2_trans_iter_exit(trans, &iter);
489
490         if (!ret && !found) {
491                 struct bkey_i_subvolume *u;
492
493                 *subvol_id = bch2_snapshot_tree_oldest_subvol(c, snapshot_root);
494
495                 u = bch2_bkey_get_mut_typed(trans, &iter,
496                                             BTREE_ID_subvolumes, POS(0, *subvol_id),
497                                             0, subvolume);
498                 ret = PTR_ERR_OR_ZERO(u);
499                 if (ret)
500                         return ret;
501
502                 SET_BCH_SUBVOLUME_SNAP(&u->v, false);
503         }
504
505         return ret;
506 }
507
508 static int check_snapshot_tree(struct btree_trans *trans,
509                                struct btree_iter *iter,
510                                struct bkey_s_c k)
511 {
512         struct bch_fs *c = trans->c;
513         struct bkey_s_c_snapshot_tree st;
514         struct bch_snapshot s;
515         struct bch_subvolume subvol;
516         struct printbuf buf = PRINTBUF;
517         u32 root_id;
518         int ret;
519
520         if (k.k->type != KEY_TYPE_snapshot_tree)
521                 return 0;
522
523         st = bkey_s_c_to_snapshot_tree(k);
524         root_id = le32_to_cpu(st.v->root_snapshot);
525
526         ret = bch2_snapshot_lookup(trans, root_id, &s);
527         if (ret && !bch2_err_matches(ret, ENOENT))
528                 goto err;
529
530         if (fsck_err_on(ret ||
531                         root_id != bch2_snapshot_root(c, root_id) ||
532                         st.k->p.offset != le32_to_cpu(s.tree),
533                         c,
534                         "snapshot tree points to missing/incorrect snapshot:\n  %s",
535                         (bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
536                 ret = bch2_btree_delete_at(trans, iter, 0);
537                 goto err;
538         }
539
540         ret = bch2_subvolume_get(trans, le32_to_cpu(st.v->master_subvol),
541                                  false, 0, &subvol);
542         if (ret && !bch2_err_matches(ret, ENOENT))
543                 goto err;
544
545         if (fsck_err_on(ret, c,
546                         "snapshot tree points to missing subvolume:\n  %s",
547                         (printbuf_reset(&buf),
548                          bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
549             fsck_err_on(!bch2_snapshot_is_ancestor_early(c,
550                                                 le32_to_cpu(subvol.snapshot),
551                                                 root_id), c,
552                         "snapshot tree points to subvolume that does not point to snapshot in this tree:\n  %s",
553                         (printbuf_reset(&buf),
554                          bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
555             fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol), c,
556                         "snapshot tree points to snapshot subvolume:\n  %s",
557                         (printbuf_reset(&buf),
558                          bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
559                 struct bkey_i_snapshot_tree *u;
560                 u32 subvol_id;
561
562                 ret = bch2_snapshot_tree_master_subvol(trans, root_id, &subvol_id);
563                 if (ret)
564                         goto err;
565
566                 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot_tree);
567                 ret = PTR_ERR_OR_ZERO(u);
568                 if (ret)
569                         goto err;
570
571                 u->v.master_subvol = cpu_to_le32(subvol_id);
572                 st = snapshot_tree_i_to_s_c(u);
573         }
574 err:
575 fsck_err:
576         printbuf_exit(&buf);
577         return ret;
578 }
579
580 /*
581  * For each snapshot_tree, make sure it points to the root of a snapshot tree
582  * and that snapshot entry points back to it, or delete it.
583  *
584  * And, make sure it points to a subvolume within that snapshot tree, or correct
585  * it to point to the oldest subvolume within that snapshot tree.
586  */
587 int bch2_check_snapshot_trees(struct bch_fs *c)
588 {
589         struct btree_iter iter;
590         struct bkey_s_c k;
591         int ret;
592
593         ret = bch2_trans_run(c,
594                 for_each_btree_key_commit(trans, iter,
595                         BTREE_ID_snapshot_trees, POS_MIN,
596                         BTREE_ITER_PREFETCH, k,
597                         NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
598                 check_snapshot_tree(trans, &iter, k)));
599
600         if (ret)
601                 bch_err(c, "error %i checking snapshot trees", ret);
602         return ret;
603 }
604
605 /*
606  * Look up snapshot tree for @tree_id and find root,
607  * make sure @snap_id is a descendent:
608  */
609 static int snapshot_tree_ptr_good(struct btree_trans *trans,
610                                   u32 snap_id, u32 tree_id)
611 {
612         struct bch_snapshot_tree s_t;
613         int ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
614
615         if (bch2_err_matches(ret, ENOENT))
616                 return 0;
617         if (ret)
618                 return ret;
619
620         return bch2_snapshot_is_ancestor_early(trans->c, snap_id, le32_to_cpu(s_t.root_snapshot));
621 }
622
623 u32 bch2_snapshot_skiplist_get(struct bch_fs *c, u32 id)
624 {
625         const struct snapshot_t *s;
626
627         if (!id)
628                 return 0;
629
630         rcu_read_lock();
631         s = snapshot_t(c, id);
632         if (s->parent)
633                 id = bch2_snapshot_nth_parent(c, id, get_random_u32_below(s->depth));
634         rcu_read_unlock();
635
636         return id;
637 }
638
639 static int snapshot_skiplist_good(struct btree_trans *trans, u32 id, struct bch_snapshot s)
640 {
641         unsigned i;
642
643         for (i = 0; i < 3; i++)
644                 if (!s.parent) {
645                         if (s.skip[i])
646                                 return false;
647                 } else {
648                         if (!bch2_snapshot_is_ancestor_early(trans->c, id, le32_to_cpu(s.skip[i])))
649                                 return false;
650                 }
651
652         return true;
653 }
654
655 /*
656  * snapshot_tree pointer was incorrect: look up root snapshot node, make sure
657  * its snapshot_tree pointer is correct (allocate new one if necessary), then
658  * update this node's pointer to root node's pointer:
659  */
660 static int snapshot_tree_ptr_repair(struct btree_trans *trans,
661                                     struct btree_iter *iter,
662                                     struct bkey_s_c k,
663                                     struct bch_snapshot *s)
664 {
665         struct bch_fs *c = trans->c;
666         struct btree_iter root_iter;
667         struct bch_snapshot_tree s_t;
668         struct bkey_s_c_snapshot root;
669         struct bkey_i_snapshot *u;
670         u32 root_id = bch2_snapshot_root(c, k.k->p.offset), tree_id;
671         int ret;
672
673         root = bch2_bkey_get_iter_typed(trans, &root_iter,
674                                BTREE_ID_snapshots, POS(0, root_id),
675                                BTREE_ITER_WITH_UPDATES, snapshot);
676         ret = bkey_err(root);
677         if (ret)
678                 goto err;
679
680         tree_id = le32_to_cpu(root.v->tree);
681
682         ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
683         if (ret && !bch2_err_matches(ret, ENOENT))
684                 return ret;
685
686         if (ret || le32_to_cpu(s_t.root_snapshot) != root_id) {
687                 u = bch2_bkey_make_mut_typed(trans, &root_iter, &root.s_c, 0, snapshot);
688                 ret =   PTR_ERR_OR_ZERO(u) ?:
689                         bch2_snapshot_tree_create(trans, root_id,
690                                 bch2_snapshot_tree_oldest_subvol(c, root_id),
691                                 &tree_id);
692                 if (ret)
693                         goto err;
694
695                 u->v.tree = cpu_to_le32(tree_id);
696                 if (k.k->p.offset == root_id)
697                         *s = u->v;
698         }
699
700         if (k.k->p.offset != root_id) {
701                 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
702                 ret = PTR_ERR_OR_ZERO(u);
703                 if (ret)
704                         goto err;
705
706                 u->v.tree = cpu_to_le32(tree_id);
707                 *s = u->v;
708         }
709 err:
710         bch2_trans_iter_exit(trans, &root_iter);
711         return ret;
712 }
713
714 static int check_snapshot(struct btree_trans *trans,
715                           struct btree_iter *iter,
716                           struct bkey_s_c k)
717 {
718         struct bch_fs *c = trans->c;
719         struct bch_snapshot s;
720         struct bch_subvolume subvol;
721         struct bch_snapshot v;
722         struct bkey_i_snapshot *u;
723         u32 parent_id = bch2_snapshot_parent_early(c, k.k->p.offset);
724         u32 real_depth;
725         struct printbuf buf = PRINTBUF;
726         bool should_have_subvol;
727         u32 i, id;
728         int ret = 0;
729
730         if (k.k->type != KEY_TYPE_snapshot)
731                 return 0;
732
733         memset(&s, 0, sizeof(s));
734         memcpy(&s, k.v, bkey_val_bytes(k.k));
735
736         id = le32_to_cpu(s.parent);
737         if (id) {
738                 ret = bch2_snapshot_lookup(trans, id, &v);
739                 if (bch2_err_matches(ret, ENOENT))
740                         bch_err(c, "snapshot with nonexistent parent:\n  %s",
741                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
742                 if (ret)
743                         goto err;
744
745                 if (le32_to_cpu(v.children[0]) != k.k->p.offset &&
746                     le32_to_cpu(v.children[1]) != k.k->p.offset) {
747                         bch_err(c, "snapshot parent %u missing pointer to child %llu",
748                                 id, k.k->p.offset);
749                         ret = -EINVAL;
750                         goto err;
751                 }
752         }
753
754         for (i = 0; i < 2 && s.children[i]; i++) {
755                 id = le32_to_cpu(s.children[i]);
756
757                 ret = bch2_snapshot_lookup(trans, id, &v);
758                 if (bch2_err_matches(ret, ENOENT))
759                         bch_err(c, "snapshot node %llu has nonexistent child %u",
760                                 k.k->p.offset, id);
761                 if (ret)
762                         goto err;
763
764                 if (le32_to_cpu(v.parent) != k.k->p.offset) {
765                         bch_err(c, "snapshot child %u has wrong parent (got %u should be %llu)",
766                                 id, le32_to_cpu(v.parent), k.k->p.offset);
767                         ret = -EINVAL;
768                         goto err;
769                 }
770         }
771
772         should_have_subvol = BCH_SNAPSHOT_SUBVOL(&s) &&
773                 !BCH_SNAPSHOT_DELETED(&s);
774
775         if (should_have_subvol) {
776                 id = le32_to_cpu(s.subvol);
777                 ret = bch2_subvolume_get(trans, id, 0, false, &subvol);
778                 if (bch2_err_matches(ret, ENOENT))
779                         bch_err(c, "snapshot points to nonexistent subvolume:\n  %s",
780                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
781                 if (ret)
782                         goto err;
783
784                 if (BCH_SNAPSHOT_SUBVOL(&s) != (le32_to_cpu(subvol.snapshot) == k.k->p.offset)) {
785                         bch_err(c, "snapshot node %llu has wrong BCH_SNAPSHOT_SUBVOL",
786                                 k.k->p.offset);
787                         ret = -EINVAL;
788                         goto err;
789                 }
790         } else {
791                 if (fsck_err_on(s.subvol, c, "snapshot should not point to subvol:\n  %s",
792                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
793                         u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
794                         ret = PTR_ERR_OR_ZERO(u);
795                         if (ret)
796                                 goto err;
797
798                         u->v.subvol = 0;
799                         s = u->v;
800                 }
801         }
802
803         ret = snapshot_tree_ptr_good(trans, k.k->p.offset, le32_to_cpu(s.tree));
804         if (ret < 0)
805                 goto err;
806
807         if (fsck_err_on(!ret, c, "snapshot points to missing/incorrect tree:\n  %s",
808                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
809                 ret = snapshot_tree_ptr_repair(trans, iter, k, &s);
810                 if (ret)
811                         goto err;
812         }
813         ret = 0;
814
815         real_depth = bch2_snapshot_depth(c, parent_id);
816
817         if (le32_to_cpu(s.depth) != real_depth &&
818             (c->sb.version_upgrade_complete < bcachefs_metadata_version_snapshot_skiplists ||
819              fsck_err(c, "snapshot with incorrect depth field, should be %u:\n  %s",
820                       real_depth, (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
821                 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
822                 ret = PTR_ERR_OR_ZERO(u);
823                 if (ret)
824                         goto err;
825
826                 u->v.depth = cpu_to_le32(real_depth);
827                 s = u->v;
828         }
829
830         ret = snapshot_skiplist_good(trans, k.k->p.offset, s);
831         if (ret < 0)
832                 goto err;
833
834         if (!ret &&
835             (c->sb.version_upgrade_complete < bcachefs_metadata_version_snapshot_skiplists ||
836              fsck_err(c, "snapshot with bad skiplist field:\n  %s",
837                       (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
838                 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
839                 ret = PTR_ERR_OR_ZERO(u);
840                 if (ret)
841                         goto err;
842
843                 for (i = 0; i < ARRAY_SIZE(u->v.skip); i++)
844                         u->v.skip[i] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent_id));
845
846                 bubble_sort(u->v.skip, ARRAY_SIZE(u->v.skip), cmp_le32);
847                 s = u->v;
848         }
849         ret = 0;
850 err:
851 fsck_err:
852         printbuf_exit(&buf);
853         return ret;
854 }
855
856 int bch2_check_snapshots(struct bch_fs *c)
857 {
858         struct btree_iter iter;
859         struct bkey_s_c k;
860         int ret;
861
862         /*
863          * We iterate backwards as checking/fixing the depth field requires that
864          * the parent's depth already be correct:
865          */
866         ret = bch2_trans_run(c,
867                 for_each_btree_key_reverse_commit(trans, iter,
868                         BTREE_ID_snapshots, POS_MAX,
869                         BTREE_ITER_PREFETCH, k,
870                         NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
871                 check_snapshot(trans, &iter, k)));
872         if (ret)
873                 bch_err_fn(c, ret);
874         return ret;
875 }
876
877 /*
878  * Mark a snapshot as deleted, for future cleanup:
879  */
880 int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
881 {
882         struct btree_iter iter;
883         struct bkey_i_snapshot *s;
884         int ret = 0;
885
886         s = bch2_bkey_get_mut_typed(trans, &iter,
887                                     BTREE_ID_snapshots, POS(0, id),
888                                     0, snapshot);
889         ret = PTR_ERR_OR_ZERO(s);
890         if (unlikely(ret)) {
891                 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
892                                         trans->c, "missing snapshot %u", id);
893                 return ret;
894         }
895
896         /* already deleted? */
897         if (BCH_SNAPSHOT_DELETED(&s->v))
898                 goto err;
899
900         SET_BCH_SNAPSHOT_DELETED(&s->v, true);
901         SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
902         s->v.subvol = 0;
903 err:
904         bch2_trans_iter_exit(trans, &iter);
905         return ret;
906 }
907
908 static inline void normalize_snapshot_child_pointers(struct bch_snapshot *s)
909 {
910         if (le32_to_cpu(s->children[0]) < le32_to_cpu(s->children[1]))
911                 swap(s->children[0], s->children[1]);
912 }
913
914 static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
915 {
916         struct bch_fs *c = trans->c;
917         struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
918         struct btree_iter c_iter = (struct btree_iter) { NULL };
919         struct btree_iter tree_iter = (struct btree_iter) { NULL };
920         struct bkey_s_c_snapshot s;
921         u32 parent_id, child_id;
922         unsigned i;
923         int ret = 0;
924
925         s = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id),
926                                      BTREE_ITER_INTENT, snapshot);
927         ret = bkey_err(s);
928         bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
929                                 "missing snapshot %u", id);
930
931         if (ret)
932                 goto err;
933
934         BUG_ON(s.v->children[1]);
935
936         parent_id = le32_to_cpu(s.v->parent);
937         child_id = le32_to_cpu(s.v->children[0]);
938
939         if (parent_id) {
940                 struct bkey_i_snapshot *parent;
941
942                 parent = bch2_bkey_get_mut_typed(trans, &p_iter,
943                                      BTREE_ID_snapshots, POS(0, parent_id),
944                                      0, snapshot);
945                 ret = PTR_ERR_OR_ZERO(parent);
946                 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
947                                         "missing snapshot %u", parent_id);
948                 if (unlikely(ret))
949                         goto err;
950
951                 /* find entry in parent->children for node being deleted */
952                 for (i = 0; i < 2; i++)
953                         if (le32_to_cpu(parent->v.children[i]) == id)
954                                 break;
955
956                 if (bch2_fs_inconsistent_on(i == 2, c,
957                                         "snapshot %u missing child pointer to %u",
958                                         parent_id, id))
959                         goto err;
960
961                 parent->v.children[i] = le32_to_cpu(child_id);
962
963                 normalize_snapshot_child_pointers(&parent->v);
964         }
965
966         if (child_id) {
967                 struct bkey_i_snapshot *child;
968
969                 child = bch2_bkey_get_mut_typed(trans, &c_iter,
970                                      BTREE_ID_snapshots, POS(0, child_id),
971                                      0, snapshot);
972                 ret = PTR_ERR_OR_ZERO(child);
973                 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
974                                         "missing snapshot %u", child_id);
975                 if (unlikely(ret))
976                         goto err;
977
978                 child->v.parent = cpu_to_le32(parent_id);
979
980                 if (!child->v.parent) {
981                         child->v.skip[0] = 0;
982                         child->v.skip[1] = 0;
983                         child->v.skip[2] = 0;
984                 }
985         }
986
987         if (!parent_id) {
988                 /*
989                  * We're deleting the root of a snapshot tree: update the
990                  * snapshot_tree entry to point to the new root, or delete it if
991                  * this is the last snapshot ID in this tree:
992                  */
993                 struct bkey_i_snapshot_tree *s_t;
994
995                 BUG_ON(s.v->children[1]);
996
997                 s_t = bch2_bkey_get_mut_typed(trans, &tree_iter,
998                                 BTREE_ID_snapshot_trees, POS(0, le32_to_cpu(s.v->tree)),
999                                 0, snapshot_tree);
1000                 ret = PTR_ERR_OR_ZERO(s_t);
1001                 if (ret)
1002                         goto err;
1003
1004                 if (s.v->children[0]) {
1005                         s_t->v.root_snapshot = s.v->children[0];
1006                 } else {
1007                         s_t->k.type = KEY_TYPE_deleted;
1008                         set_bkey_val_u64s(&s_t->k, 0);
1009                 }
1010         }
1011
1012         ret = bch2_btree_delete_at(trans, &iter, 0);
1013 err:
1014         bch2_trans_iter_exit(trans, &tree_iter);
1015         bch2_trans_iter_exit(trans, &p_iter);
1016         bch2_trans_iter_exit(trans, &c_iter);
1017         bch2_trans_iter_exit(trans, &iter);
1018         return ret;
1019 }
1020
1021 static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
1022                           u32 *new_snapids,
1023                           u32 *snapshot_subvols,
1024                           unsigned nr_snapids)
1025 {
1026         struct bch_fs *c = trans->c;
1027         struct btree_iter iter;
1028         struct bkey_i_snapshot *n;
1029         struct bkey_s_c k;
1030         unsigned i, j;
1031         u32 depth = bch2_snapshot_depth(c, parent);
1032         int ret;
1033
1034         bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
1035                              POS_MIN, BTREE_ITER_INTENT);
1036         k = bch2_btree_iter_peek(&iter);
1037         ret = bkey_err(k);
1038         if (ret)
1039                 goto err;
1040
1041         for (i = 0; i < nr_snapids; i++) {
1042                 k = bch2_btree_iter_prev_slot(&iter);
1043                 ret = bkey_err(k);
1044                 if (ret)
1045                         goto err;
1046
1047                 if (!k.k || !k.k->p.offset) {
1048                         ret = -BCH_ERR_ENOSPC_snapshot_create;
1049                         goto err;
1050                 }
1051
1052                 n = bch2_bkey_alloc(trans, &iter, 0, snapshot);
1053                 ret = PTR_ERR_OR_ZERO(n);
1054                 if (ret)
1055                         goto err;
1056
1057                 n->v.flags      = 0;
1058                 n->v.parent     = cpu_to_le32(parent);
1059                 n->v.subvol     = cpu_to_le32(snapshot_subvols[i]);
1060                 n->v.tree       = cpu_to_le32(tree);
1061                 n->v.depth      = cpu_to_le32(depth);
1062
1063                 for (j = 0; j < ARRAY_SIZE(n->v.skip); j++)
1064                         n->v.skip[j] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent));
1065
1066                 bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_le32);
1067                 SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
1068
1069                 ret = bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
1070                                          bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0);
1071                 if (ret)
1072                         goto err;
1073
1074                 new_snapids[i]  = iter.pos.offset;
1075
1076                 mutex_lock(&c->snapshot_table_lock);
1077                 snapshot_t_mut(c, new_snapids[i])->equiv = new_snapids[i];
1078                 mutex_unlock(&c->snapshot_table_lock);
1079         }
1080 err:
1081         bch2_trans_iter_exit(trans, &iter);
1082         return ret;
1083 }
1084
1085 /*
1086  * Create new snapshot IDs as children of an existing snapshot ID:
1087  */
1088 static int bch2_snapshot_node_create_children(struct btree_trans *trans, u32 parent,
1089                               u32 *new_snapids,
1090                               u32 *snapshot_subvols,
1091                               unsigned nr_snapids)
1092 {
1093         struct btree_iter iter;
1094         struct bkey_i_snapshot *n_parent;
1095         int ret = 0;
1096
1097         n_parent = bch2_bkey_get_mut_typed(trans, &iter,
1098                         BTREE_ID_snapshots, POS(0, parent),
1099                         0, snapshot);
1100         ret = PTR_ERR_OR_ZERO(n_parent);
1101         if (unlikely(ret)) {
1102                 if (bch2_err_matches(ret, ENOENT))
1103                         bch_err(trans->c, "snapshot %u not found", parent);
1104                 return ret;
1105         }
1106
1107         if (n_parent->v.children[0] || n_parent->v.children[1]) {
1108                 bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
1109                 ret = -EINVAL;
1110                 goto err;
1111         }
1112
1113         ret = create_snapids(trans, parent, le32_to_cpu(n_parent->v.tree),
1114                              new_snapids, snapshot_subvols, nr_snapids);
1115         if (ret)
1116                 goto err;
1117
1118         n_parent->v.children[0] = cpu_to_le32(new_snapids[0]);
1119         n_parent->v.children[1] = cpu_to_le32(new_snapids[1]);
1120         n_parent->v.subvol = 0;
1121         SET_BCH_SNAPSHOT_SUBVOL(&n_parent->v, false);
1122 err:
1123         bch2_trans_iter_exit(trans, &iter);
1124         return ret;
1125 }
1126
1127 /*
1128  * Create a snapshot node that is the root of a new tree:
1129  */
1130 static int bch2_snapshot_node_create_tree(struct btree_trans *trans,
1131                               u32 *new_snapids,
1132                               u32 *snapshot_subvols,
1133                               unsigned nr_snapids)
1134 {
1135         struct bkey_i_snapshot_tree *n_tree;
1136         int ret;
1137
1138         n_tree = __bch2_snapshot_tree_create(trans);
1139         ret =   PTR_ERR_OR_ZERO(n_tree) ?:
1140                 create_snapids(trans, 0, n_tree->k.p.offset,
1141                              new_snapids, snapshot_subvols, nr_snapids);
1142         if (ret)
1143                 return ret;
1144
1145         n_tree->v.master_subvol = cpu_to_le32(snapshot_subvols[0]);
1146         n_tree->v.root_snapshot = cpu_to_le32(new_snapids[0]);
1147         return 0;
1148 }
1149
1150 int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
1151                               u32 *new_snapids,
1152                               u32 *snapshot_subvols,
1153                               unsigned nr_snapids)
1154 {
1155         BUG_ON((parent == 0) != (nr_snapids == 1));
1156         BUG_ON((parent != 0) != (nr_snapids == 2));
1157
1158         return parent
1159                 ? bch2_snapshot_node_create_children(trans, parent,
1160                                 new_snapids, snapshot_subvols, nr_snapids)
1161                 : bch2_snapshot_node_create_tree(trans,
1162                                 new_snapids, snapshot_subvols, nr_snapids);
1163
1164 }
1165
1166 /*
1167  * If we have an unlinked inode in an internal snapshot node, and the inode
1168  * really has been deleted in all child snapshots, how does this get cleaned up?
1169  *
1170  * first there is the problem of how keys that have been overwritten in all
1171  * child snapshots get deleted (unimplemented?), but inodes may perhaps be
1172  * special?
1173  *
1174  * also: unlinked inode in internal snapshot appears to not be getting deleted
1175  * correctly if inode doesn't exist in leaf snapshots
1176  *
1177  * solution:
1178  *
1179  * for a key in an interior snapshot node that needs work to be done that
1180  * requires it to be mutated: iterate over all descendent leaf nodes and copy
1181  * that key to snapshot leaf nodes, where we can mutate it
1182  */
1183
1184 static int snapshot_delete_key(struct btree_trans *trans,
1185                                struct btree_iter *iter,
1186                                struct bkey_s_c k,
1187                                snapshot_id_list *deleted,
1188                                snapshot_id_list *equiv_seen,
1189                                struct bpos *last_pos)
1190 {
1191         struct bch_fs *c = trans->c;
1192         u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
1193
1194         if (!bkey_eq(k.k->p, *last_pos))
1195                 equiv_seen->nr = 0;
1196         *last_pos = k.k->p;
1197
1198         if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
1199             snapshot_list_has_id(equiv_seen, equiv)) {
1200                 return bch2_btree_delete_at(trans, iter,
1201                                             BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1202         } else {
1203                 return snapshot_list_add(c, equiv_seen, equiv);
1204         }
1205 }
1206
1207 static int move_key_to_correct_snapshot(struct btree_trans *trans,
1208                                struct btree_iter *iter,
1209                                struct bkey_s_c k)
1210 {
1211         struct bch_fs *c = trans->c;
1212         u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
1213
1214         /*
1215          * When we have a linear chain of snapshot nodes, we consider
1216          * those to form an equivalence class: we're going to collapse
1217          * them all down to a single node, and keep the leaf-most node -
1218          * which has the same id as the equivalence class id.
1219          *
1220          * If there are multiple keys in different snapshots at the same
1221          * position, we're only going to keep the one in the newest
1222          * snapshot - the rest have been overwritten and are redundant,
1223          * and for the key we're going to keep we need to move it to the
1224          * equivalance class ID if it's not there already.
1225          */
1226         if (equiv != k.k->p.snapshot) {
1227                 struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
1228                 struct btree_iter new_iter;
1229                 int ret;
1230
1231                 ret = PTR_ERR_OR_ZERO(new);
1232                 if (ret)
1233                         return ret;
1234
1235                 new->k.p.snapshot = equiv;
1236
1237                 bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p,
1238                                      BTREE_ITER_ALL_SNAPSHOTS|
1239                                      BTREE_ITER_CACHED|
1240                                      BTREE_ITER_INTENT);
1241
1242                 ret =   bch2_btree_iter_traverse(&new_iter) ?:
1243                         bch2_trans_update(trans, &new_iter, new,
1244                                         BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
1245                         bch2_btree_delete_at(trans, iter,
1246                                         BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1247                 bch2_trans_iter_exit(trans, &new_iter);
1248                 if (ret)
1249                         return ret;
1250         }
1251
1252         return 0;
1253 }
1254
1255 static int bch2_snapshot_needs_delete(struct btree_trans *trans, struct bkey_s_c k)
1256 {
1257         struct bkey_s_c_snapshot snap;
1258         u32 children[2];
1259         int ret;
1260
1261         if (k.k->type != KEY_TYPE_snapshot)
1262                 return 0;
1263
1264         snap = bkey_s_c_to_snapshot(k);
1265         if (BCH_SNAPSHOT_DELETED(snap.v) ||
1266             BCH_SNAPSHOT_SUBVOL(snap.v))
1267                 return 0;
1268
1269         children[0] = le32_to_cpu(snap.v->children[0]);
1270         children[1] = le32_to_cpu(snap.v->children[1]);
1271
1272         ret   = bch2_snapshot_live(trans, children[0]) ?:
1273                 bch2_snapshot_live(trans, children[1]);
1274         if (ret < 0)
1275                 return ret;
1276         return !ret;
1277 }
1278
1279 /*
1280  * For a given snapshot, if it doesn't have a subvolume that points to it, and
1281  * it doesn't have child snapshot nodes - it's now redundant and we can mark it
1282  * as deleted.
1283  */
1284 static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct bkey_s_c k)
1285 {
1286         int ret = bch2_snapshot_needs_delete(trans, k);
1287
1288         return ret <= 0
1289                 ? ret
1290                 : bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
1291 }
1292
1293 static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n,
1294                                                 snapshot_id_list *skip)
1295 {
1296         rcu_read_lock();
1297         while (snapshot_list_has_id(skip, id))
1298                 id = __bch2_snapshot_parent(c, id);
1299
1300         while (n--) {
1301                 do {
1302                         id = __bch2_snapshot_parent(c, id);
1303                 } while (snapshot_list_has_id(skip, id));
1304         }
1305         rcu_read_unlock();
1306
1307         return id;
1308 }
1309
1310 static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans,
1311                                               struct btree_iter *iter, struct bkey_s_c k,
1312                                               snapshot_id_list *deleted)
1313 {
1314         struct bch_fs *c = trans->c;
1315         u32 nr_deleted_ancestors = 0;
1316         struct bkey_i_snapshot *s;
1317         u32 *i;
1318         int ret;
1319
1320         if (k.k->type != KEY_TYPE_snapshot)
1321                 return 0;
1322
1323         if (snapshot_list_has_id(deleted, k.k->p.offset))
1324                 return 0;
1325
1326         s = bch2_bkey_make_mut_noupdate_typed(trans, k, snapshot);
1327         ret = PTR_ERR_OR_ZERO(s);
1328         if (ret)
1329                 return ret;
1330
1331         darray_for_each(*deleted, i)
1332                 nr_deleted_ancestors += bch2_snapshot_is_ancestor(c, s->k.p.offset, *i);
1333
1334         if (!nr_deleted_ancestors)
1335                 return 0;
1336
1337         le32_add_cpu(&s->v.depth, -nr_deleted_ancestors);
1338
1339         if (!s->v.depth) {
1340                 s->v.skip[0] = 0;
1341                 s->v.skip[1] = 0;
1342                 s->v.skip[2] = 0;
1343         } else {
1344                 u32 depth = le32_to_cpu(s->v.depth);
1345                 u32 parent = bch2_snapshot_parent(c, s->k.p.offset);
1346
1347                 for (unsigned j = 0; j < ARRAY_SIZE(s->v.skip); j++) {
1348                         u32 id = le32_to_cpu(s->v.skip[j]);
1349
1350                         if (snapshot_list_has_id(deleted, id)) {
1351                                 id = depth > 1
1352                                         ? bch2_snapshot_nth_parent_skip(c,
1353                                                         parent,
1354                                                         get_random_u32_below(depth - 1),
1355                                                         deleted)
1356                                         : parent;
1357                                 s->v.skip[j] = cpu_to_le32(id);
1358                         }
1359                 }
1360
1361                 bubble_sort(s->v.skip, ARRAY_SIZE(s->v.skip), cmp_le32);
1362         }
1363
1364         return bch2_trans_update(trans, iter, &s->k_i, 0);
1365 }
1366
1367 int bch2_delete_dead_snapshots(struct bch_fs *c)
1368 {
1369         struct btree_trans *trans;
1370         struct btree_iter iter;
1371         struct bkey_s_c k;
1372         struct bkey_s_c_snapshot snap;
1373         snapshot_id_list deleted = { 0 };
1374         snapshot_id_list deleted_interior = { 0 };
1375         u32 *i, id;
1376         int ret = 0;
1377
1378         if (!test_and_clear_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags))
1379                 return 0;
1380
1381         if (!test_bit(BCH_FS_STARTED, &c->flags)) {
1382                 ret = bch2_fs_read_write_early(c);
1383                 if (ret) {
1384                         bch_err_msg(c, ret, "deleting dead snapshots: error going rw");
1385                         return ret;
1386                 }
1387         }
1388
1389         trans = bch2_trans_get(c);
1390
1391         /*
1392          * For every snapshot node: If we have no live children and it's not
1393          * pointed to by a subvolume, delete it:
1394          */
1395         ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots,
1396                         POS_MIN, 0, k,
1397                         NULL, NULL, 0,
1398                 bch2_delete_redundant_snapshot(trans, k));
1399         if (ret) {
1400                 bch_err_msg(c, ret, "deleting redundant snapshots");
1401                 goto err;
1402         }
1403
1404         ret = for_each_btree_key2(trans, iter, BTREE_ID_snapshots,
1405                                   POS_MIN, 0, k,
1406                 bch2_snapshot_set_equiv(trans, k));
1407         if (ret) {
1408                 bch_err_msg(c, ret, "in bch2_snapshots_set_equiv");
1409                 goto err;
1410         }
1411
1412         for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1413                            POS_MIN, 0, k, ret) {
1414                 if (k.k->type != KEY_TYPE_snapshot)
1415                         continue;
1416
1417                 snap = bkey_s_c_to_snapshot(k);
1418                 if (BCH_SNAPSHOT_DELETED(snap.v)) {
1419                         ret = snapshot_list_add(c, &deleted, k.k->p.offset);
1420                         if (ret)
1421                                 break;
1422                 }
1423         }
1424         bch2_trans_iter_exit(trans, &iter);
1425
1426         if (ret) {
1427                 bch_err_msg(c, ret, "walking snapshots");
1428                 goto err;
1429         }
1430
1431         for (id = 0; id < BTREE_ID_NR; id++) {
1432                 struct bpos last_pos = POS_MIN;
1433                 snapshot_id_list equiv_seen = { 0 };
1434                 struct disk_reservation res = { 0 };
1435
1436                 if (!btree_type_has_snapshots(id))
1437                         continue;
1438
1439                 ret = for_each_btree_key_commit(trans, iter,
1440                                 id, POS_MIN,
1441                                 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1442                                 &res, NULL, BTREE_INSERT_NOFAIL,
1443                         snapshot_delete_key(trans, &iter, k, &deleted, &equiv_seen, &last_pos)) ?:
1444                       for_each_btree_key_commit(trans, iter,
1445                                 id, POS_MIN,
1446                                 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1447                                 &res, NULL, BTREE_INSERT_NOFAIL,
1448                         move_key_to_correct_snapshot(trans, &iter, k));
1449
1450                 bch2_disk_reservation_put(c, &res);
1451                 darray_exit(&equiv_seen);
1452
1453                 if (ret) {
1454                         bch_err_msg(c, ret, "deleting keys from dying snapshots");
1455                         goto err;
1456                 }
1457         }
1458
1459         bch2_trans_unlock(trans);
1460         down_write(&c->snapshot_create_lock);
1461
1462         for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1463                            POS_MIN, 0, k, ret) {
1464                 u32 snapshot = k.k->p.offset;
1465                 u32 equiv = bch2_snapshot_equiv(c, snapshot);
1466
1467                 if (equiv != snapshot)
1468                         snapshot_list_add(c, &deleted_interior, snapshot);
1469         }
1470         bch2_trans_iter_exit(trans, &iter);
1471
1472         if (ret)
1473                 goto err_create_lock;
1474
1475         /*
1476          * Fixing children of deleted snapshots can't be done completely
1477          * atomically, if we crash between here and when we delete the interior
1478          * nodes some depth fields will be off:
1479          */
1480         ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, POS_MIN,
1481                                   BTREE_ITER_INTENT, k,
1482                                   NULL, NULL, BTREE_INSERT_NOFAIL,
1483                 bch2_fix_child_of_deleted_snapshot(trans, &iter, k, &deleted_interior));
1484         if (ret)
1485                 goto err_create_lock;
1486
1487         darray_for_each(deleted, i) {
1488                 ret = commit_do(trans, NULL, NULL, 0,
1489                         bch2_snapshot_node_delete(trans, *i));
1490                 if (ret) {
1491                         bch_err_msg(c, ret, "deleting snapshot %u", *i);
1492                         goto err_create_lock;
1493                 }
1494         }
1495
1496         darray_for_each(deleted_interior, i) {
1497                 ret = commit_do(trans, NULL, NULL, 0,
1498                         bch2_snapshot_node_delete(trans, *i));
1499                 if (ret) {
1500                         bch_err_msg(c, ret, "deleting snapshot %u", *i);
1501                         goto err_create_lock;
1502                 }
1503         }
1504 err_create_lock:
1505         up_write(&c->snapshot_create_lock);
1506 err:
1507         darray_exit(&deleted_interior);
1508         darray_exit(&deleted);
1509         bch2_trans_put(trans);
1510         if (ret)
1511                 bch_err_fn(c, ret);
1512         return ret;
1513 }
1514
1515 void bch2_delete_dead_snapshots_work(struct work_struct *work)
1516 {
1517         struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
1518
1519         bch2_delete_dead_snapshots(c);
1520         bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1521 }
1522
1523 void bch2_delete_dead_snapshots_async(struct bch_fs *c)
1524 {
1525         if (bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots) &&
1526             !queue_work(c->write_ref_wq, &c->snapshot_delete_work))
1527                 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1528 }
1529
1530 int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
1531                                        enum btree_id id,
1532                                        struct bpos pos)
1533 {
1534         struct bch_fs *c = trans->c;
1535         struct btree_iter iter;
1536         struct bkey_s_c k;
1537         int ret;
1538
1539         bch2_trans_iter_init(trans, &iter, id, pos,
1540                              BTREE_ITER_NOT_EXTENTS|
1541                              BTREE_ITER_ALL_SNAPSHOTS);
1542         while (1) {
1543                 k = bch2_btree_iter_prev(&iter);
1544                 ret = bkey_err(k);
1545                 if (ret)
1546                         break;
1547
1548                 if (!k.k)
1549                         break;
1550
1551                 if (!bkey_eq(pos, k.k->p))
1552                         break;
1553
1554                 if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) {
1555                         ret = 1;
1556                         break;
1557                 }
1558         }
1559         bch2_trans_iter_exit(trans, &iter);
1560
1561         return ret;
1562 }
1563
1564 static u32 bch2_snapshot_smallest_child(struct bch_fs *c, u32 id)
1565 {
1566         const struct snapshot_t *s = snapshot_t(c, id);
1567
1568         return s->children[1] ?: s->children[0];
1569 }
1570
1571 static u32 bch2_snapshot_smallest_descendent(struct bch_fs *c, u32 id)
1572 {
1573         u32 child;
1574
1575         while ((child = bch2_snapshot_smallest_child(c, id)))
1576                 id = child;
1577         return id;
1578 }
1579
1580 static int bch2_propagate_key_to_snapshot_leaf(struct btree_trans *trans,
1581                                                enum btree_id btree,
1582                                                struct bkey_s_c interior_k,
1583                                                u32 leaf_id, struct bpos *new_min_pos)
1584 {
1585         struct btree_iter iter;
1586         struct bpos pos = interior_k.k->p;
1587         struct bkey_s_c k;
1588         struct bkey_i *new;
1589         int ret;
1590
1591         pos.snapshot = leaf_id;
1592
1593         bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_INTENT);
1594         k = bch2_btree_iter_peek_slot(&iter);
1595         ret = bkey_err(k);
1596         if (ret)
1597                 goto out;
1598
1599         /* key already overwritten in this snapshot? */
1600         if (k.k->p.snapshot != interior_k.k->p.snapshot)
1601                 goto out;
1602
1603         if (bpos_eq(*new_min_pos, POS_MIN)) {
1604                 *new_min_pos = k.k->p;
1605                 new_min_pos->snapshot = leaf_id;
1606         }
1607
1608         new = bch2_bkey_make_mut_noupdate(trans, interior_k);
1609         ret = PTR_ERR_OR_ZERO(new);
1610         if (ret)
1611                 goto out;
1612
1613         new->k.p.snapshot = leaf_id;
1614         ret = bch2_trans_update(trans, &iter, new, 0);
1615 out:
1616         bch2_trans_iter_exit(trans, &iter);
1617         return ret;
1618 }
1619
1620 int bch2_propagate_key_to_snapshot_leaves(struct btree_trans *trans,
1621                                           enum btree_id btree,
1622                                           struct bkey_s_c k,
1623                                           struct bpos *new_min_pos)
1624 {
1625         struct bch_fs *c = trans->c;
1626         struct bkey_buf sk;
1627         u32 restart_count = trans->restart_count;
1628         int ret = 0;
1629
1630         bch2_bkey_buf_init(&sk);
1631         bch2_bkey_buf_reassemble(&sk, c, k);
1632         k = bkey_i_to_s_c(sk.k);
1633
1634         *new_min_pos = POS_MIN;
1635
1636         for (u32 id = bch2_snapshot_smallest_descendent(c, k.k->p.snapshot);
1637              id < k.k->p.snapshot;
1638              id++) {
1639                 if (!bch2_snapshot_is_ancestor(c, id, k.k->p.snapshot) ||
1640                     !bch2_snapshot_is_leaf(c, id))
1641                         continue;
1642 again:
1643                 ret =   btree_trans_too_many_iters(trans) ?:
1644                         bch2_propagate_key_to_snapshot_leaf(trans, btree, k, id, new_min_pos) ?:
1645                         bch2_trans_commit(trans, NULL, NULL, 0);
1646                 if (ret && bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
1647                         bch2_trans_begin(trans);
1648                         goto again;
1649                 }
1650
1651                 if (ret)
1652                         break;
1653         }
1654
1655         bch2_bkey_buf_exit(&sk, c);
1656
1657         return ret ?: trans_was_restarted(trans, restart_count);
1658 }
1659
1660 static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct bkey_s_c k)
1661 {
1662         struct bch_fs *c = trans->c;
1663         struct bkey_s_c_snapshot snap;
1664         int ret = 0;
1665
1666         if (k.k->type != KEY_TYPE_snapshot)
1667                 return 0;
1668
1669         snap = bkey_s_c_to_snapshot(k);
1670         if (BCH_SNAPSHOT_DELETED(snap.v) ||
1671             bch2_snapshot_equiv(c, k.k->p.offset) != k.k->p.offset ||
1672             (ret = bch2_snapshot_needs_delete(trans, k)) > 0) {
1673                 set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags);
1674                 return 0;
1675         }
1676
1677         return ret;
1678 }
1679
1680 int bch2_snapshots_read(struct bch_fs *c)
1681 {
1682         struct btree_iter iter;
1683         struct bkey_s_c k;
1684         int ret = 0;
1685
1686         ret = bch2_trans_run(c,
1687                 for_each_btree_key2(trans, iter, BTREE_ID_snapshots,
1688                            POS_MIN, 0, k,
1689                         bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
1690                         bch2_snapshot_set_equiv(trans, k) ?:
1691                         bch2_check_snapshot_needs_deletion(trans, k)) ?:
1692                 for_each_btree_key2(trans, iter, BTREE_ID_snapshots,
1693                            POS_MIN, 0, k,
1694                            (set_is_ancestor_bitmap(c, k.k->p.offset), 0)));
1695         if (ret)
1696                 bch_err_fn(c, ret);
1697         return ret;
1698 }
1699
1700 void bch2_fs_snapshots_exit(struct bch_fs *c)
1701 {
1702         kfree(rcu_dereference_protected(c->snapshots, true));
1703 }