]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/snapshot.c
Disable pristine-tar option in gbp.conf, since there is no pristine-tar branch.
[bcachefs-tools-debian] / libbcachefs / snapshot.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "btree_key_cache.h"
6 #include "btree_update.h"
7 #include "buckets.h"
8 #include "errcode.h"
9 #include "error.h"
10 #include "fs.h"
11 #include "snapshot.h"
12
13 #include <linux/random.h>
14
15 /*
16  * Snapshot trees:
17  *
18  * Keys in BTREE_ID_snapshot_trees identify a whole tree of snapshot nodes; they
19  * exist to provide a stable identifier for the whole lifetime of a snapshot
20  * tree.
21  */
22
23 void bch2_snapshot_tree_to_text(struct printbuf *out, struct bch_fs *c,
24                                 struct bkey_s_c k)
25 {
26         struct bkey_s_c_snapshot_tree t = bkey_s_c_to_snapshot_tree(k);
27
28         prt_printf(out, "subvol %u root snapshot %u",
29                    le32_to_cpu(t.v->master_subvol),
30                    le32_to_cpu(t.v->root_snapshot));
31 }
32
33 int bch2_snapshot_tree_invalid(struct bch_fs *c, struct bkey_s_c k,
34                                enum bkey_invalid_flags flags,
35                                struct printbuf *err)
36 {
37         int ret = 0;
38
39         bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
40                          bkey_lt(k.k->p, POS(0, 1)), c, err,
41                          snapshot_tree_pos_bad,
42                          "bad pos");
43 fsck_err:
44         return ret;
45 }
46
47 int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
48                               struct bch_snapshot_tree *s)
49 {
50         int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
51                                           BTREE_ITER_WITH_UPDATES, snapshot_tree, s);
52
53         if (bch2_err_matches(ret, ENOENT))
54                 ret = -BCH_ERR_ENOENT_snapshot_tree;
55         return ret;
56 }
57
58 struct bkey_i_snapshot_tree *
59 __bch2_snapshot_tree_create(struct btree_trans *trans)
60 {
61         struct btree_iter iter;
62         int ret = bch2_bkey_get_empty_slot(trans, &iter,
63                         BTREE_ID_snapshot_trees, POS(0, U32_MAX));
64         struct bkey_i_snapshot_tree *s_t;
65
66         if (ret == -BCH_ERR_ENOSPC_btree_slot)
67                 ret = -BCH_ERR_ENOSPC_snapshot_tree;
68         if (ret)
69                 return ERR_PTR(ret);
70
71         s_t = bch2_bkey_alloc(trans, &iter, 0, snapshot_tree);
72         ret = PTR_ERR_OR_ZERO(s_t);
73         bch2_trans_iter_exit(trans, &iter);
74         return ret ? ERR_PTR(ret) : s_t;
75 }
76
77 static int bch2_snapshot_tree_create(struct btree_trans *trans,
78                                 u32 root_id, u32 subvol_id, u32 *tree_id)
79 {
80         struct bkey_i_snapshot_tree *n_tree =
81                 __bch2_snapshot_tree_create(trans);
82
83         if (IS_ERR(n_tree))
84                 return PTR_ERR(n_tree);
85
86         n_tree->v.master_subvol = cpu_to_le32(subvol_id);
87         n_tree->v.root_snapshot = cpu_to_le32(root_id);
88         *tree_id = n_tree->k.p.offset;
89         return 0;
90 }
91
92 /* Snapshot nodes: */
93
94 static bool bch2_snapshot_is_ancestor_early(struct bch_fs *c, u32 id, u32 ancestor)
95 {
96         struct snapshot_table *t;
97
98         rcu_read_lock();
99         t = rcu_dereference(c->snapshots);
100
101         while (id && id < ancestor)
102                 id = __snapshot_t(t, id)->parent;
103         rcu_read_unlock();
104
105         return id == ancestor;
106 }
107
108 static inline u32 get_ancestor_below(struct snapshot_table *t, u32 id, u32 ancestor)
109 {
110         const struct snapshot_t *s = __snapshot_t(t, id);
111
112         if (s->skip[2] <= ancestor)
113                 return s->skip[2];
114         if (s->skip[1] <= ancestor)
115                 return s->skip[1];
116         if (s->skip[0] <= ancestor)
117                 return s->skip[0];
118         return s->parent;
119 }
120
121 bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
122 {
123         struct snapshot_table *t;
124         bool ret;
125
126         EBUG_ON(c->recovery_pass_done <= BCH_RECOVERY_PASS_check_snapshots);
127
128         rcu_read_lock();
129         t = rcu_dereference(c->snapshots);
130
131         while (id && id < ancestor - IS_ANCESTOR_BITMAP)
132                 id = get_ancestor_below(t, id, ancestor);
133
134         if (id && id < ancestor) {
135                 ret = test_bit(ancestor - id - 1, __snapshot_t(t, id)->is_ancestor);
136
137                 EBUG_ON(ret != bch2_snapshot_is_ancestor_early(c, id, ancestor));
138         } else {
139                 ret = id == ancestor;
140         }
141
142         rcu_read_unlock();
143
144         return ret;
145 }
146
147 static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id)
148 {
149         size_t idx = U32_MAX - id;
150         size_t new_size;
151         struct snapshot_table *new, *old;
152
153         new_size = max(16UL, roundup_pow_of_two(idx + 1));
154
155         new = kvzalloc(struct_size(new, s, new_size), GFP_KERNEL);
156         if (!new)
157                 return NULL;
158
159         old = rcu_dereference_protected(c->snapshots, true);
160         if (old)
161                 memcpy(new->s,
162                        rcu_dereference_protected(c->snapshots, true)->s,
163                        sizeof(new->s[0]) * c->snapshot_table_size);
164
165         rcu_assign_pointer(c->snapshots, new);
166         c->snapshot_table_size = new_size;
167         kvfree_rcu_mightsleep(old);
168
169         return &rcu_dereference_protected(c->snapshots, true)->s[idx];
170 }
171
172 static inline struct snapshot_t *snapshot_t_mut(struct bch_fs *c, u32 id)
173 {
174         size_t idx = U32_MAX - id;
175
176         lockdep_assert_held(&c->snapshot_table_lock);
177
178         if (likely(idx < c->snapshot_table_size))
179                 return &rcu_dereference_protected(c->snapshots, true)->s[idx];
180
181         return __snapshot_t_mut(c, id);
182 }
183
184 void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
185                            struct bkey_s_c k)
186 {
187         struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
188
189         prt_printf(out, "is_subvol %llu deleted %llu parent %10u children %10u %10u subvol %u tree %u",
190                BCH_SNAPSHOT_SUBVOL(s.v),
191                BCH_SNAPSHOT_DELETED(s.v),
192                le32_to_cpu(s.v->parent),
193                le32_to_cpu(s.v->children[0]),
194                le32_to_cpu(s.v->children[1]),
195                le32_to_cpu(s.v->subvol),
196                le32_to_cpu(s.v->tree));
197
198         if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, depth))
199                 prt_printf(out, " depth %u skiplist %u %u %u",
200                            le32_to_cpu(s.v->depth),
201                            le32_to_cpu(s.v->skip[0]),
202                            le32_to_cpu(s.v->skip[1]),
203                            le32_to_cpu(s.v->skip[2]));
204 }
205
206 int bch2_snapshot_invalid(struct bch_fs *c, struct bkey_s_c k,
207                           enum bkey_invalid_flags flags,
208                           struct printbuf *err)
209 {
210         struct bkey_s_c_snapshot s;
211         u32 i, id;
212         int ret = 0;
213
214         bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
215                          bkey_lt(k.k->p, POS(0, 1)), c, err,
216                          snapshot_pos_bad,
217                          "bad pos");
218
219         s = bkey_s_c_to_snapshot(k);
220
221         id = le32_to_cpu(s.v->parent);
222         bkey_fsck_err_on(id && id <= k.k->p.offset, c, err,
223                          snapshot_parent_bad,
224                          "bad parent node (%u <= %llu)",
225                          id, k.k->p.offset);
226
227         bkey_fsck_err_on(le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1]), c, err,
228                          snapshot_children_not_normalized,
229                          "children not normalized");
230
231         bkey_fsck_err_on(s.v->children[0] && s.v->children[0] == s.v->children[1], c, err,
232                          snapshot_child_duplicate,
233                          "duplicate child nodes");
234
235         for (i = 0; i < 2; i++) {
236                 id = le32_to_cpu(s.v->children[i]);
237
238                 bkey_fsck_err_on(id >= k.k->p.offset, c, err,
239                                  snapshot_child_bad,
240                                  "bad child node (%u >= %llu)",
241                                  id, k.k->p.offset);
242         }
243
244         if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, skip)) {
245                 bkey_fsck_err_on(le32_to_cpu(s.v->skip[0]) > le32_to_cpu(s.v->skip[1]) ||
246                                  le32_to_cpu(s.v->skip[1]) > le32_to_cpu(s.v->skip[2]), c, err,
247                                  snapshot_skiplist_not_normalized,
248                                  "skiplist not normalized");
249
250                 for (i = 0; i < ARRAY_SIZE(s.v->skip); i++) {
251                         id = le32_to_cpu(s.v->skip[i]);
252
253                         bkey_fsck_err_on(id && id < le32_to_cpu(s.v->parent), c, err,
254                                          snapshot_skiplist_bad,
255                                          "bad skiplist node %u", id);
256                 }
257         }
258 fsck_err:
259         return ret;
260 }
261
262 static void __set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
263 {
264         struct snapshot_t *t = snapshot_t_mut(c, id);
265         u32 parent = id;
266
267         while ((parent = bch2_snapshot_parent_early(c, parent)) &&
268                parent - id - 1 < IS_ANCESTOR_BITMAP)
269                 __set_bit(parent - id - 1, t->is_ancestor);
270 }
271
272 static void set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
273 {
274         mutex_lock(&c->snapshot_table_lock);
275         __set_is_ancestor_bitmap(c, id);
276         mutex_unlock(&c->snapshot_table_lock);
277 }
278
279 static int __bch2_mark_snapshot(struct btree_trans *trans,
280                        enum btree_id btree, unsigned level,
281                        struct bkey_s_c old, struct bkey_s_c new,
282                        unsigned flags)
283 {
284         struct bch_fs *c = trans->c;
285         struct snapshot_t *t;
286         u32 id = new.k->p.offset;
287         int ret = 0;
288
289         mutex_lock(&c->snapshot_table_lock);
290
291         t = snapshot_t_mut(c, id);
292         if (!t) {
293                 ret = -BCH_ERR_ENOMEM_mark_snapshot;
294                 goto err;
295         }
296
297         if (new.k->type == KEY_TYPE_snapshot) {
298                 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);
299
300                 t->parent       = le32_to_cpu(s.v->parent);
301                 t->children[0]  = le32_to_cpu(s.v->children[0]);
302                 t->children[1]  = le32_to_cpu(s.v->children[1]);
303                 t->subvol       = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0;
304                 t->tree         = le32_to_cpu(s.v->tree);
305
306                 if (bkey_val_bytes(s.k) > offsetof(struct bch_snapshot, depth)) {
307                         t->depth        = le32_to_cpu(s.v->depth);
308                         t->skip[0]      = le32_to_cpu(s.v->skip[0]);
309                         t->skip[1]      = le32_to_cpu(s.v->skip[1]);
310                         t->skip[2]      = le32_to_cpu(s.v->skip[2]);
311                 } else {
312                         t->depth        = 0;
313                         t->skip[0]      = 0;
314                         t->skip[1]      = 0;
315                         t->skip[2]      = 0;
316                 }
317
318                 __set_is_ancestor_bitmap(c, id);
319
320                 if (BCH_SNAPSHOT_DELETED(s.v)) {
321                         set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags);
322                         if (c->curr_recovery_pass > BCH_RECOVERY_PASS_delete_dead_snapshots)
323                                 bch2_delete_dead_snapshots_async(c);
324                 }
325         } else {
326                 memset(t, 0, sizeof(*t));
327         }
328 err:
329         mutex_unlock(&c->snapshot_table_lock);
330         return ret;
331 }
332
333 int bch2_mark_snapshot(struct btree_trans *trans,
334                        enum btree_id btree, unsigned level,
335                        struct bkey_s_c old, struct bkey_s new,
336                        unsigned flags)
337 {
338         return __bch2_mark_snapshot(trans, btree, level, old, new.s_c, flags);
339 }
340
341 int bch2_snapshot_lookup(struct btree_trans *trans, u32 id,
342                          struct bch_snapshot *s)
343 {
344         return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots, POS(0, id),
345                                        BTREE_ITER_WITH_UPDATES, snapshot, s);
346 }
347
348 static int bch2_snapshot_live(struct btree_trans *trans, u32 id)
349 {
350         struct bch_snapshot v;
351         int ret;
352
353         if (!id)
354                 return 0;
355
356         ret = bch2_snapshot_lookup(trans, id, &v);
357         if (bch2_err_matches(ret, ENOENT))
358                 bch_err(trans->c, "snapshot node %u not found", id);
359         if (ret)
360                 return ret;
361
362         return !BCH_SNAPSHOT_DELETED(&v);
363 }
364
365 /*
366  * If @k is a snapshot with just one live child, it's part of a linear chain,
367  * which we consider to be an equivalence class: and then after snapshot
368  * deletion cleanup, there should only be a single key at a given position in
369  * this equivalence class.
370  *
371  * This sets the equivalence class of @k to be the child's equivalence class, if
372  * it's part of such a linear chain: this correctly sets equivalence classes on
373  * startup if we run leaf to root (i.e. in natural key order).
374  */
375 static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k)
376 {
377         struct bch_fs *c = trans->c;
378         unsigned i, nr_live = 0, live_idx = 0;
379         struct bkey_s_c_snapshot snap;
380         u32 id = k.k->p.offset, child[2];
381
382         if (k.k->type != KEY_TYPE_snapshot)
383                 return 0;
384
385         snap = bkey_s_c_to_snapshot(k);
386
387         child[0] = le32_to_cpu(snap.v->children[0]);
388         child[1] = le32_to_cpu(snap.v->children[1]);
389
390         for (i = 0; i < 2; i++) {
391                 int ret = bch2_snapshot_live(trans, child[i]);
392
393                 if (ret < 0)
394                         return ret;
395
396                 if (ret)
397                         live_idx = i;
398                 nr_live += ret;
399         }
400
401         mutex_lock(&c->snapshot_table_lock);
402
403         snapshot_t_mut(c, id)->equiv = nr_live == 1
404                 ? snapshot_t_mut(c, child[live_idx])->equiv
405                 : id;
406
407         mutex_unlock(&c->snapshot_table_lock);
408
409         return 0;
410 }
411
412 /* fsck: */
413
414 static u32 bch2_snapshot_child(struct bch_fs *c, u32 id, unsigned child)
415 {
416         return snapshot_t(c, id)->children[child];
417 }
418
419 static u32 bch2_snapshot_left_child(struct bch_fs *c, u32 id)
420 {
421         return bch2_snapshot_child(c, id, 0);
422 }
423
424 static u32 bch2_snapshot_right_child(struct bch_fs *c, u32 id)
425 {
426         return bch2_snapshot_child(c, id, 1);
427 }
428
429 static u32 bch2_snapshot_tree_next(struct bch_fs *c, u32 id)
430 {
431         u32 n, parent;
432
433         n = bch2_snapshot_left_child(c, id);
434         if (n)
435                 return n;
436
437         while ((parent = bch2_snapshot_parent(c, id))) {
438                 n = bch2_snapshot_right_child(c, parent);
439                 if (n && n != id)
440                         return n;
441                 id = parent;
442         }
443
444         return 0;
445 }
446
447 static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
448 {
449         u32 id = snapshot_root;
450         u32 subvol = 0, s;
451
452         while (id) {
453                 s = snapshot_t(c, id)->subvol;
454
455                 if (s && (!subvol || s < subvol))
456                         subvol = s;
457
458                 id = bch2_snapshot_tree_next(c, id);
459         }
460
461         return subvol;
462 }
463
464 static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans,
465                                             u32 snapshot_root, u32 *subvol_id)
466 {
467         struct bch_fs *c = trans->c;
468         struct btree_iter iter;
469         struct bkey_s_c k;
470         bool found = false;
471         int ret;
472
473         for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN,
474                                      0, k, ret) {
475                 if (k.k->type != KEY_TYPE_subvolume)
476                         continue;
477
478                 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
479                 if (!bch2_snapshot_is_ancestor(c, le32_to_cpu(s.v->snapshot), snapshot_root))
480                         continue;
481                 if (!BCH_SUBVOLUME_SNAP(s.v)) {
482                         *subvol_id = s.k->p.offset;
483                         found = true;
484                         break;
485                 }
486         }
487
488         bch2_trans_iter_exit(trans, &iter);
489
490         if (!ret && !found) {
491                 struct bkey_i_subvolume *u;
492
493                 *subvol_id = bch2_snapshot_tree_oldest_subvol(c, snapshot_root);
494
495                 u = bch2_bkey_get_mut_typed(trans, &iter,
496                                             BTREE_ID_subvolumes, POS(0, *subvol_id),
497                                             0, subvolume);
498                 ret = PTR_ERR_OR_ZERO(u);
499                 if (ret)
500                         return ret;
501
502                 SET_BCH_SUBVOLUME_SNAP(&u->v, false);
503         }
504
505         return ret;
506 }
507
508 static int check_snapshot_tree(struct btree_trans *trans,
509                                struct btree_iter *iter,
510                                struct bkey_s_c k)
511 {
512         struct bch_fs *c = trans->c;
513         struct bkey_s_c_snapshot_tree st;
514         struct bch_snapshot s;
515         struct bch_subvolume subvol;
516         struct printbuf buf = PRINTBUF;
517         u32 root_id;
518         int ret;
519
520         if (k.k->type != KEY_TYPE_snapshot_tree)
521                 return 0;
522
523         st = bkey_s_c_to_snapshot_tree(k);
524         root_id = le32_to_cpu(st.v->root_snapshot);
525
526         ret = bch2_snapshot_lookup(trans, root_id, &s);
527         if (ret && !bch2_err_matches(ret, ENOENT))
528                 goto err;
529
530         if (fsck_err_on(ret ||
531                         root_id != bch2_snapshot_root(c, root_id) ||
532                         st.k->p.offset != le32_to_cpu(s.tree),
533                         c, snapshot_tree_to_missing_snapshot,
534                         "snapshot tree points to missing/incorrect snapshot:\n  %s",
535                         (bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
536                 ret = bch2_btree_delete_at(trans, iter, 0);
537                 goto err;
538         }
539
540         ret = bch2_subvolume_get(trans, le32_to_cpu(st.v->master_subvol),
541                                  false, 0, &subvol);
542         if (ret && !bch2_err_matches(ret, ENOENT))
543                 goto err;
544
545         if (fsck_err_on(ret,
546                         c, snapshot_tree_to_missing_subvol,
547                         "snapshot tree points to missing subvolume:\n  %s",
548                         (printbuf_reset(&buf),
549                          bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
550             fsck_err_on(!bch2_snapshot_is_ancestor_early(c,
551                                                 le32_to_cpu(subvol.snapshot),
552                                                 root_id),
553                         c, snapshot_tree_to_wrong_subvol,
554                         "snapshot tree points to subvolume that does not point to snapshot in this tree:\n  %s",
555                         (printbuf_reset(&buf),
556                          bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
557             fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol),
558                         c, snapshot_tree_to_snapshot_subvol,
559                         "snapshot tree points to snapshot subvolume:\n  %s",
560                         (printbuf_reset(&buf),
561                          bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
562                 struct bkey_i_snapshot_tree *u;
563                 u32 subvol_id;
564
565                 ret = bch2_snapshot_tree_master_subvol(trans, root_id, &subvol_id);
566                 if (ret)
567                         goto err;
568
569                 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot_tree);
570                 ret = PTR_ERR_OR_ZERO(u);
571                 if (ret)
572                         goto err;
573
574                 u->v.master_subvol = cpu_to_le32(subvol_id);
575                 st = snapshot_tree_i_to_s_c(u);
576         }
577 err:
578 fsck_err:
579         printbuf_exit(&buf);
580         return ret;
581 }
582
583 /*
584  * For each snapshot_tree, make sure it points to the root of a snapshot tree
585  * and that snapshot entry points back to it, or delete it.
586  *
587  * And, make sure it points to a subvolume within that snapshot tree, or correct
588  * it to point to the oldest subvolume within that snapshot tree.
589  */
590 int bch2_check_snapshot_trees(struct bch_fs *c)
591 {
592         int ret = bch2_trans_run(c,
593                 for_each_btree_key_commit(trans, iter,
594                         BTREE_ID_snapshot_trees, POS_MIN,
595                         BTREE_ITER_PREFETCH, k,
596                         NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
597                 check_snapshot_tree(trans, &iter, k)));
598         bch_err_fn(c, ret);
599         return ret;
600 }
601
602 /*
603  * Look up snapshot tree for @tree_id and find root,
604  * make sure @snap_id is a descendent:
605  */
606 static int snapshot_tree_ptr_good(struct btree_trans *trans,
607                                   u32 snap_id, u32 tree_id)
608 {
609         struct bch_snapshot_tree s_t;
610         int ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
611
612         if (bch2_err_matches(ret, ENOENT))
613                 return 0;
614         if (ret)
615                 return ret;
616
617         return bch2_snapshot_is_ancestor_early(trans->c, snap_id, le32_to_cpu(s_t.root_snapshot));
618 }
619
620 u32 bch2_snapshot_skiplist_get(struct bch_fs *c, u32 id)
621 {
622         const struct snapshot_t *s;
623
624         if (!id)
625                 return 0;
626
627         rcu_read_lock();
628         s = snapshot_t(c, id);
629         if (s->parent)
630                 id = bch2_snapshot_nth_parent(c, id, get_random_u32_below(s->depth));
631         rcu_read_unlock();
632
633         return id;
634 }
635
636 static int snapshot_skiplist_good(struct btree_trans *trans, u32 id, struct bch_snapshot s)
637 {
638         unsigned i;
639
640         for (i = 0; i < 3; i++)
641                 if (!s.parent) {
642                         if (s.skip[i])
643                                 return false;
644                 } else {
645                         if (!bch2_snapshot_is_ancestor_early(trans->c, id, le32_to_cpu(s.skip[i])))
646                                 return false;
647                 }
648
649         return true;
650 }
651
652 /*
653  * snapshot_tree pointer was incorrect: look up root snapshot node, make sure
654  * its snapshot_tree pointer is correct (allocate new one if necessary), then
655  * update this node's pointer to root node's pointer:
656  */
657 static int snapshot_tree_ptr_repair(struct btree_trans *trans,
658                                     struct btree_iter *iter,
659                                     struct bkey_s_c k,
660                                     struct bch_snapshot *s)
661 {
662         struct bch_fs *c = trans->c;
663         struct btree_iter root_iter;
664         struct bch_snapshot_tree s_t;
665         struct bkey_s_c_snapshot root;
666         struct bkey_i_snapshot *u;
667         u32 root_id = bch2_snapshot_root(c, k.k->p.offset), tree_id;
668         int ret;
669
670         root = bch2_bkey_get_iter_typed(trans, &root_iter,
671                                BTREE_ID_snapshots, POS(0, root_id),
672                                BTREE_ITER_WITH_UPDATES, snapshot);
673         ret = bkey_err(root);
674         if (ret)
675                 goto err;
676
677         tree_id = le32_to_cpu(root.v->tree);
678
679         ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
680         if (ret && !bch2_err_matches(ret, ENOENT))
681                 return ret;
682
683         if (ret || le32_to_cpu(s_t.root_snapshot) != root_id) {
684                 u = bch2_bkey_make_mut_typed(trans, &root_iter, &root.s_c, 0, snapshot);
685                 ret =   PTR_ERR_OR_ZERO(u) ?:
686                         bch2_snapshot_tree_create(trans, root_id,
687                                 bch2_snapshot_tree_oldest_subvol(c, root_id),
688                                 &tree_id);
689                 if (ret)
690                         goto err;
691
692                 u->v.tree = cpu_to_le32(tree_id);
693                 if (k.k->p.offset == root_id)
694                         *s = u->v;
695         }
696
697         if (k.k->p.offset != root_id) {
698                 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
699                 ret = PTR_ERR_OR_ZERO(u);
700                 if (ret)
701                         goto err;
702
703                 u->v.tree = cpu_to_le32(tree_id);
704                 *s = u->v;
705         }
706 err:
707         bch2_trans_iter_exit(trans, &root_iter);
708         return ret;
709 }
710
711 static int check_snapshot(struct btree_trans *trans,
712                           struct btree_iter *iter,
713                           struct bkey_s_c k)
714 {
715         struct bch_fs *c = trans->c;
716         struct bch_snapshot s;
717         struct bch_subvolume subvol;
718         struct bch_snapshot v;
719         struct bkey_i_snapshot *u;
720         u32 parent_id = bch2_snapshot_parent_early(c, k.k->p.offset);
721         u32 real_depth;
722         struct printbuf buf = PRINTBUF;
723         bool should_have_subvol;
724         u32 i, id;
725         int ret = 0;
726
727         if (k.k->type != KEY_TYPE_snapshot)
728                 return 0;
729
730         memset(&s, 0, sizeof(s));
731         memcpy(&s, k.v, bkey_val_bytes(k.k));
732
733         id = le32_to_cpu(s.parent);
734         if (id) {
735                 ret = bch2_snapshot_lookup(trans, id, &v);
736                 if (bch2_err_matches(ret, ENOENT))
737                         bch_err(c, "snapshot with nonexistent parent:\n  %s",
738                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
739                 if (ret)
740                         goto err;
741
742                 if (le32_to_cpu(v.children[0]) != k.k->p.offset &&
743                     le32_to_cpu(v.children[1]) != k.k->p.offset) {
744                         bch_err(c, "snapshot parent %u missing pointer to child %llu",
745                                 id, k.k->p.offset);
746                         ret = -EINVAL;
747                         goto err;
748                 }
749         }
750
751         for (i = 0; i < 2 && s.children[i]; i++) {
752                 id = le32_to_cpu(s.children[i]);
753
754                 ret = bch2_snapshot_lookup(trans, id, &v);
755                 if (bch2_err_matches(ret, ENOENT))
756                         bch_err(c, "snapshot node %llu has nonexistent child %u",
757                                 k.k->p.offset, id);
758                 if (ret)
759                         goto err;
760
761                 if (le32_to_cpu(v.parent) != k.k->p.offset) {
762                         bch_err(c, "snapshot child %u has wrong parent (got %u should be %llu)",
763                                 id, le32_to_cpu(v.parent), k.k->p.offset);
764                         ret = -EINVAL;
765                         goto err;
766                 }
767         }
768
769         should_have_subvol = BCH_SNAPSHOT_SUBVOL(&s) &&
770                 !BCH_SNAPSHOT_DELETED(&s);
771
772         if (should_have_subvol) {
773                 id = le32_to_cpu(s.subvol);
774                 ret = bch2_subvolume_get(trans, id, 0, false, &subvol);
775                 if (bch2_err_matches(ret, ENOENT))
776                         bch_err(c, "snapshot points to nonexistent subvolume:\n  %s",
777                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
778                 if (ret)
779                         goto err;
780
781                 if (BCH_SNAPSHOT_SUBVOL(&s) != (le32_to_cpu(subvol.snapshot) == k.k->p.offset)) {
782                         bch_err(c, "snapshot node %llu has wrong BCH_SNAPSHOT_SUBVOL",
783                                 k.k->p.offset);
784                         ret = -EINVAL;
785                         goto err;
786                 }
787         } else {
788                 if (fsck_err_on(s.subvol,
789                                 c, snapshot_should_not_have_subvol,
790                                 "snapshot should not point to subvol:\n  %s",
791                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
792                         u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
793                         ret = PTR_ERR_OR_ZERO(u);
794                         if (ret)
795                                 goto err;
796
797                         u->v.subvol = 0;
798                         s = u->v;
799                 }
800         }
801
802         ret = snapshot_tree_ptr_good(trans, k.k->p.offset, le32_to_cpu(s.tree));
803         if (ret < 0)
804                 goto err;
805
806         if (fsck_err_on(!ret, c, snapshot_to_bad_snapshot_tree,
807                         "snapshot points to missing/incorrect tree:\n  %s",
808                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
809                 ret = snapshot_tree_ptr_repair(trans, iter, k, &s);
810                 if (ret)
811                         goto err;
812         }
813         ret = 0;
814
815         real_depth = bch2_snapshot_depth(c, parent_id);
816
817         if (fsck_err_on(le32_to_cpu(s.depth) != real_depth,
818                         c, snapshot_bad_depth,
819                         "snapshot with incorrect depth field, should be %u:\n  %s",
820                         real_depth, (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
821                 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
822                 ret = PTR_ERR_OR_ZERO(u);
823                 if (ret)
824                         goto err;
825
826                 u->v.depth = cpu_to_le32(real_depth);
827                 s = u->v;
828         }
829
830         ret = snapshot_skiplist_good(trans, k.k->p.offset, s);
831         if (ret < 0)
832                 goto err;
833
834         if (fsck_err_on(!ret, c, snapshot_bad_skiplist,
835                         "snapshot with bad skiplist field:\n  %s",
836                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
837                 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
838                 ret = PTR_ERR_OR_ZERO(u);
839                 if (ret)
840                         goto err;
841
842                 for (i = 0; i < ARRAY_SIZE(u->v.skip); i++)
843                         u->v.skip[i] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent_id));
844
845                 bubble_sort(u->v.skip, ARRAY_SIZE(u->v.skip), cmp_le32);
846                 s = u->v;
847         }
848         ret = 0;
849 err:
850 fsck_err:
851         printbuf_exit(&buf);
852         return ret;
853 }
854
855 int bch2_check_snapshots(struct bch_fs *c)
856 {
857         /*
858          * We iterate backwards as checking/fixing the depth field requires that
859          * the parent's depth already be correct:
860          */
861         int ret = bch2_trans_run(c,
862                 for_each_btree_key_reverse_commit(trans, iter,
863                                 BTREE_ID_snapshots, POS_MAX,
864                                 BTREE_ITER_PREFETCH, k,
865                                 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
866                         check_snapshot(trans, &iter, k)));
867         bch_err_fn(c, ret);
868         return ret;
869 }
870
871 /*
872  * Mark a snapshot as deleted, for future cleanup:
873  */
874 int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
875 {
876         struct btree_iter iter;
877         struct bkey_i_snapshot *s;
878         int ret = 0;
879
880         s = bch2_bkey_get_mut_typed(trans, &iter,
881                                     BTREE_ID_snapshots, POS(0, id),
882                                     0, snapshot);
883         ret = PTR_ERR_OR_ZERO(s);
884         if (unlikely(ret)) {
885                 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
886                                         trans->c, "missing snapshot %u", id);
887                 return ret;
888         }
889
890         /* already deleted? */
891         if (BCH_SNAPSHOT_DELETED(&s->v))
892                 goto err;
893
894         SET_BCH_SNAPSHOT_DELETED(&s->v, true);
895         SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
896         s->v.subvol = 0;
897 err:
898         bch2_trans_iter_exit(trans, &iter);
899         return ret;
900 }
901
902 static inline void normalize_snapshot_child_pointers(struct bch_snapshot *s)
903 {
904         if (le32_to_cpu(s->children[0]) < le32_to_cpu(s->children[1]))
905                 swap(s->children[0], s->children[1]);
906 }
907
908 static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
909 {
910         struct bch_fs *c = trans->c;
911         struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
912         struct btree_iter c_iter = (struct btree_iter) { NULL };
913         struct btree_iter tree_iter = (struct btree_iter) { NULL };
914         struct bkey_s_c_snapshot s;
915         u32 parent_id, child_id;
916         unsigned i;
917         int ret = 0;
918
919         s = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id),
920                                      BTREE_ITER_INTENT, snapshot);
921         ret = bkey_err(s);
922         bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
923                                 "missing snapshot %u", id);
924
925         if (ret)
926                 goto err;
927
928         BUG_ON(s.v->children[1]);
929
930         parent_id = le32_to_cpu(s.v->parent);
931         child_id = le32_to_cpu(s.v->children[0]);
932
933         if (parent_id) {
934                 struct bkey_i_snapshot *parent;
935
936                 parent = bch2_bkey_get_mut_typed(trans, &p_iter,
937                                      BTREE_ID_snapshots, POS(0, parent_id),
938                                      0, snapshot);
939                 ret = PTR_ERR_OR_ZERO(parent);
940                 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
941                                         "missing snapshot %u", parent_id);
942                 if (unlikely(ret))
943                         goto err;
944
945                 /* find entry in parent->children for node being deleted */
946                 for (i = 0; i < 2; i++)
947                         if (le32_to_cpu(parent->v.children[i]) == id)
948                                 break;
949
950                 if (bch2_fs_inconsistent_on(i == 2, c,
951                                         "snapshot %u missing child pointer to %u",
952                                         parent_id, id))
953                         goto err;
954
955                 parent->v.children[i] = cpu_to_le32(child_id);
956
957                 normalize_snapshot_child_pointers(&parent->v);
958         }
959
960         if (child_id) {
961                 struct bkey_i_snapshot *child;
962
963                 child = bch2_bkey_get_mut_typed(trans, &c_iter,
964                                      BTREE_ID_snapshots, POS(0, child_id),
965                                      0, snapshot);
966                 ret = PTR_ERR_OR_ZERO(child);
967                 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
968                                         "missing snapshot %u", child_id);
969                 if (unlikely(ret))
970                         goto err;
971
972                 child->v.parent = cpu_to_le32(parent_id);
973
974                 if (!child->v.parent) {
975                         child->v.skip[0] = 0;
976                         child->v.skip[1] = 0;
977                         child->v.skip[2] = 0;
978                 }
979         }
980
981         if (!parent_id) {
982                 /*
983                  * We're deleting the root of a snapshot tree: update the
984                  * snapshot_tree entry to point to the new root, or delete it if
985                  * this is the last snapshot ID in this tree:
986                  */
987                 struct bkey_i_snapshot_tree *s_t;
988
989                 BUG_ON(s.v->children[1]);
990
991                 s_t = bch2_bkey_get_mut_typed(trans, &tree_iter,
992                                 BTREE_ID_snapshot_trees, POS(0, le32_to_cpu(s.v->tree)),
993                                 0, snapshot_tree);
994                 ret = PTR_ERR_OR_ZERO(s_t);
995                 if (ret)
996                         goto err;
997
998                 if (s.v->children[0]) {
999                         s_t->v.root_snapshot = s.v->children[0];
1000                 } else {
1001                         s_t->k.type = KEY_TYPE_deleted;
1002                         set_bkey_val_u64s(&s_t->k, 0);
1003                 }
1004         }
1005
1006         ret = bch2_btree_delete_at(trans, &iter, 0);
1007 err:
1008         bch2_trans_iter_exit(trans, &tree_iter);
1009         bch2_trans_iter_exit(trans, &p_iter);
1010         bch2_trans_iter_exit(trans, &c_iter);
1011         bch2_trans_iter_exit(trans, &iter);
1012         return ret;
1013 }
1014
1015 static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
1016                           u32 *new_snapids,
1017                           u32 *snapshot_subvols,
1018                           unsigned nr_snapids)
1019 {
1020         struct bch_fs *c = trans->c;
1021         struct btree_iter iter;
1022         struct bkey_i_snapshot *n;
1023         struct bkey_s_c k;
1024         unsigned i, j;
1025         u32 depth = bch2_snapshot_depth(c, parent);
1026         int ret;
1027
1028         bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
1029                              POS_MIN, BTREE_ITER_INTENT);
1030         k = bch2_btree_iter_peek(&iter);
1031         ret = bkey_err(k);
1032         if (ret)
1033                 goto err;
1034
1035         for (i = 0; i < nr_snapids; i++) {
1036                 k = bch2_btree_iter_prev_slot(&iter);
1037                 ret = bkey_err(k);
1038                 if (ret)
1039                         goto err;
1040
1041                 if (!k.k || !k.k->p.offset) {
1042                         ret = -BCH_ERR_ENOSPC_snapshot_create;
1043                         goto err;
1044                 }
1045
1046                 n = bch2_bkey_alloc(trans, &iter, 0, snapshot);
1047                 ret = PTR_ERR_OR_ZERO(n);
1048                 if (ret)
1049                         goto err;
1050
1051                 n->v.flags      = 0;
1052                 n->v.parent     = cpu_to_le32(parent);
1053                 n->v.subvol     = cpu_to_le32(snapshot_subvols[i]);
1054                 n->v.tree       = cpu_to_le32(tree);
1055                 n->v.depth      = cpu_to_le32(depth);
1056                 n->v.btime.lo   = cpu_to_le64(bch2_current_time(c));
1057                 n->v.btime.hi   = 0;
1058
1059                 for (j = 0; j < ARRAY_SIZE(n->v.skip); j++)
1060                         n->v.skip[j] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent));
1061
1062                 bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_le32);
1063                 SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
1064
1065                 ret = __bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
1066                                          bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0);
1067                 if (ret)
1068                         goto err;
1069
1070                 new_snapids[i]  = iter.pos.offset;
1071
1072                 mutex_lock(&c->snapshot_table_lock);
1073                 snapshot_t_mut(c, new_snapids[i])->equiv = new_snapids[i];
1074                 mutex_unlock(&c->snapshot_table_lock);
1075         }
1076 err:
1077         bch2_trans_iter_exit(trans, &iter);
1078         return ret;
1079 }
1080
1081 /*
1082  * Create new snapshot IDs as children of an existing snapshot ID:
1083  */
1084 static int bch2_snapshot_node_create_children(struct btree_trans *trans, u32 parent,
1085                               u32 *new_snapids,
1086                               u32 *snapshot_subvols,
1087                               unsigned nr_snapids)
1088 {
1089         struct btree_iter iter;
1090         struct bkey_i_snapshot *n_parent;
1091         int ret = 0;
1092
1093         n_parent = bch2_bkey_get_mut_typed(trans, &iter,
1094                         BTREE_ID_snapshots, POS(0, parent),
1095                         0, snapshot);
1096         ret = PTR_ERR_OR_ZERO(n_parent);
1097         if (unlikely(ret)) {
1098                 if (bch2_err_matches(ret, ENOENT))
1099                         bch_err(trans->c, "snapshot %u not found", parent);
1100                 return ret;
1101         }
1102
1103         if (n_parent->v.children[0] || n_parent->v.children[1]) {
1104                 bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
1105                 ret = -EINVAL;
1106                 goto err;
1107         }
1108
1109         ret = create_snapids(trans, parent, le32_to_cpu(n_parent->v.tree),
1110                              new_snapids, snapshot_subvols, nr_snapids);
1111         if (ret)
1112                 goto err;
1113
1114         n_parent->v.children[0] = cpu_to_le32(new_snapids[0]);
1115         n_parent->v.children[1] = cpu_to_le32(new_snapids[1]);
1116         n_parent->v.subvol = 0;
1117         SET_BCH_SNAPSHOT_SUBVOL(&n_parent->v, false);
1118 err:
1119         bch2_trans_iter_exit(trans, &iter);
1120         return ret;
1121 }
1122
1123 /*
1124  * Create a snapshot node that is the root of a new tree:
1125  */
1126 static int bch2_snapshot_node_create_tree(struct btree_trans *trans,
1127                               u32 *new_snapids,
1128                               u32 *snapshot_subvols,
1129                               unsigned nr_snapids)
1130 {
1131         struct bkey_i_snapshot_tree *n_tree;
1132         int ret;
1133
1134         n_tree = __bch2_snapshot_tree_create(trans);
1135         ret =   PTR_ERR_OR_ZERO(n_tree) ?:
1136                 create_snapids(trans, 0, n_tree->k.p.offset,
1137                              new_snapids, snapshot_subvols, nr_snapids);
1138         if (ret)
1139                 return ret;
1140
1141         n_tree->v.master_subvol = cpu_to_le32(snapshot_subvols[0]);
1142         n_tree->v.root_snapshot = cpu_to_le32(new_snapids[0]);
1143         return 0;
1144 }
1145
1146 int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
1147                               u32 *new_snapids,
1148                               u32 *snapshot_subvols,
1149                               unsigned nr_snapids)
1150 {
1151         BUG_ON((parent == 0) != (nr_snapids == 1));
1152         BUG_ON((parent != 0) != (nr_snapids == 2));
1153
1154         return parent
1155                 ? bch2_snapshot_node_create_children(trans, parent,
1156                                 new_snapids, snapshot_subvols, nr_snapids)
1157                 : bch2_snapshot_node_create_tree(trans,
1158                                 new_snapids, snapshot_subvols, nr_snapids);
1159
1160 }
1161
1162 /*
1163  * If we have an unlinked inode in an internal snapshot node, and the inode
1164  * really has been deleted in all child snapshots, how does this get cleaned up?
1165  *
1166  * first there is the problem of how keys that have been overwritten in all
1167  * child snapshots get deleted (unimplemented?), but inodes may perhaps be
1168  * special?
1169  *
1170  * also: unlinked inode in internal snapshot appears to not be getting deleted
1171  * correctly if inode doesn't exist in leaf snapshots
1172  *
1173  * solution:
1174  *
1175  * for a key in an interior snapshot node that needs work to be done that
1176  * requires it to be mutated: iterate over all descendent leaf nodes and copy
1177  * that key to snapshot leaf nodes, where we can mutate it
1178  */
1179
1180 static int snapshot_delete_key(struct btree_trans *trans,
1181                                struct btree_iter *iter,
1182                                struct bkey_s_c k,
1183                                snapshot_id_list *deleted,
1184                                snapshot_id_list *equiv_seen,
1185                                struct bpos *last_pos)
1186 {
1187         struct bch_fs *c = trans->c;
1188         u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
1189
1190         if (!bkey_eq(k.k->p, *last_pos))
1191                 equiv_seen->nr = 0;
1192         *last_pos = k.k->p;
1193
1194         if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
1195             snapshot_list_has_id(equiv_seen, equiv)) {
1196                 return bch2_btree_delete_at(trans, iter,
1197                                             BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1198         } else {
1199                 return snapshot_list_add(c, equiv_seen, equiv);
1200         }
1201 }
1202
1203 static int move_key_to_correct_snapshot(struct btree_trans *trans,
1204                                struct btree_iter *iter,
1205                                struct bkey_s_c k)
1206 {
1207         struct bch_fs *c = trans->c;
1208         u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
1209
1210         /*
1211          * When we have a linear chain of snapshot nodes, we consider
1212          * those to form an equivalence class: we're going to collapse
1213          * them all down to a single node, and keep the leaf-most node -
1214          * which has the same id as the equivalence class id.
1215          *
1216          * If there are multiple keys in different snapshots at the same
1217          * position, we're only going to keep the one in the newest
1218          * snapshot - the rest have been overwritten and are redundant,
1219          * and for the key we're going to keep we need to move it to the
1220          * equivalance class ID if it's not there already.
1221          */
1222         if (equiv != k.k->p.snapshot) {
1223                 struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
1224                 struct btree_iter new_iter;
1225                 int ret;
1226
1227                 ret = PTR_ERR_OR_ZERO(new);
1228                 if (ret)
1229                         return ret;
1230
1231                 new->k.p.snapshot = equiv;
1232
1233                 bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p,
1234                                      BTREE_ITER_ALL_SNAPSHOTS|
1235                                      BTREE_ITER_CACHED|
1236                                      BTREE_ITER_INTENT);
1237
1238                 ret =   bch2_btree_iter_traverse(&new_iter) ?:
1239                         bch2_trans_update(trans, &new_iter, new,
1240                                         BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
1241                         bch2_btree_delete_at(trans, iter,
1242                                         BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1243                 bch2_trans_iter_exit(trans, &new_iter);
1244                 if (ret)
1245                         return ret;
1246         }
1247
1248         return 0;
1249 }
1250
1251 static int bch2_snapshot_needs_delete(struct btree_trans *trans, struct bkey_s_c k)
1252 {
1253         struct bkey_s_c_snapshot snap;
1254         u32 children[2];
1255         int ret;
1256
1257         if (k.k->type != KEY_TYPE_snapshot)
1258                 return 0;
1259
1260         snap = bkey_s_c_to_snapshot(k);
1261         if (BCH_SNAPSHOT_DELETED(snap.v) ||
1262             BCH_SNAPSHOT_SUBVOL(snap.v))
1263                 return 0;
1264
1265         children[0] = le32_to_cpu(snap.v->children[0]);
1266         children[1] = le32_to_cpu(snap.v->children[1]);
1267
1268         ret   = bch2_snapshot_live(trans, children[0]) ?:
1269                 bch2_snapshot_live(trans, children[1]);
1270         if (ret < 0)
1271                 return ret;
1272         return !ret;
1273 }
1274
1275 /*
1276  * For a given snapshot, if it doesn't have a subvolume that points to it, and
1277  * it doesn't have child snapshot nodes - it's now redundant and we can mark it
1278  * as deleted.
1279  */
1280 static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct bkey_s_c k)
1281 {
1282         int ret = bch2_snapshot_needs_delete(trans, k);
1283
1284         return ret <= 0
1285                 ? ret
1286                 : bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
1287 }
1288
1289 static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n,
1290                                                 snapshot_id_list *skip)
1291 {
1292         rcu_read_lock();
1293         while (snapshot_list_has_id(skip, id))
1294                 id = __bch2_snapshot_parent(c, id);
1295
1296         while (n--) {
1297                 do {
1298                         id = __bch2_snapshot_parent(c, id);
1299                 } while (snapshot_list_has_id(skip, id));
1300         }
1301         rcu_read_unlock();
1302
1303         return id;
1304 }
1305
1306 static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans,
1307                                               struct btree_iter *iter, struct bkey_s_c k,
1308                                               snapshot_id_list *deleted)
1309 {
1310         struct bch_fs *c = trans->c;
1311         u32 nr_deleted_ancestors = 0;
1312         struct bkey_i_snapshot *s;
1313         int ret;
1314
1315         if (k.k->type != KEY_TYPE_snapshot)
1316                 return 0;
1317
1318         if (snapshot_list_has_id(deleted, k.k->p.offset))
1319                 return 0;
1320
1321         s = bch2_bkey_make_mut_noupdate_typed(trans, k, snapshot);
1322         ret = PTR_ERR_OR_ZERO(s);
1323         if (ret)
1324                 return ret;
1325
1326         darray_for_each(*deleted, i)
1327                 nr_deleted_ancestors += bch2_snapshot_is_ancestor(c, s->k.p.offset, *i);
1328
1329         if (!nr_deleted_ancestors)
1330                 return 0;
1331
1332         le32_add_cpu(&s->v.depth, -nr_deleted_ancestors);
1333
1334         if (!s->v.depth) {
1335                 s->v.skip[0] = 0;
1336                 s->v.skip[1] = 0;
1337                 s->v.skip[2] = 0;
1338         } else {
1339                 u32 depth = le32_to_cpu(s->v.depth);
1340                 u32 parent = bch2_snapshot_parent(c, s->k.p.offset);
1341
1342                 for (unsigned j = 0; j < ARRAY_SIZE(s->v.skip); j++) {
1343                         u32 id = le32_to_cpu(s->v.skip[j]);
1344
1345                         if (snapshot_list_has_id(deleted, id)) {
1346                                 id = bch2_snapshot_nth_parent_skip(c,
1347                                                         parent,
1348                                                         depth > 1
1349                                                         ? get_random_u32_below(depth - 1)
1350                                                         : 0,
1351                                                         deleted);
1352                                 s->v.skip[j] = cpu_to_le32(id);
1353                         }
1354                 }
1355
1356                 bubble_sort(s->v.skip, ARRAY_SIZE(s->v.skip), cmp_le32);
1357         }
1358
1359         return bch2_trans_update(trans, iter, &s->k_i, 0);
1360 }
1361
1362 int bch2_delete_dead_snapshots(struct bch_fs *c)
1363 {
1364         struct btree_trans *trans;
1365         snapshot_id_list deleted = { 0 };
1366         snapshot_id_list deleted_interior = { 0 };
1367         u32 id;
1368         int ret = 0;
1369
1370         if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags))
1371                 return 0;
1372
1373         if (!test_bit(BCH_FS_started, &c->flags)) {
1374                 ret = bch2_fs_read_write_early(c);
1375                 bch_err_msg(c, ret, "deleting dead snapshots: error going rw");
1376                 if (ret)
1377                         return ret;
1378         }
1379
1380         trans = bch2_trans_get(c);
1381
1382         /*
1383          * For every snapshot node: If we have no live children and it's not
1384          * pointed to by a subvolume, delete it:
1385          */
1386         ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots,
1387                         POS_MIN, 0, k,
1388                         NULL, NULL, 0,
1389                 bch2_delete_redundant_snapshot(trans, k));
1390         bch_err_msg(c, ret, "deleting redundant snapshots");
1391         if (ret)
1392                 goto err;
1393
1394         ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1395                                  POS_MIN, 0, k,
1396                 bch2_snapshot_set_equiv(trans, k));
1397         bch_err_msg(c, ret, "in bch2_snapshots_set_equiv");
1398         if (ret)
1399                 goto err;
1400
1401         ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1402                                  POS_MIN, 0, k, ({
1403                 if (k.k->type != KEY_TYPE_snapshot)
1404                         continue;
1405
1406                 BCH_SNAPSHOT_DELETED(bkey_s_c_to_snapshot(k).v)
1407                         ? snapshot_list_add(c, &deleted, k.k->p.offset)
1408                         : 0;
1409         }));
1410         bch_err_msg(c, ret, "walking snapshots");
1411         if (ret)
1412                 goto err;
1413
1414         for (id = 0; id < BTREE_ID_NR; id++) {
1415                 struct bpos last_pos = POS_MIN;
1416                 snapshot_id_list equiv_seen = { 0 };
1417                 struct disk_reservation res = { 0 };
1418
1419                 if (!btree_type_has_snapshots(id))
1420                         continue;
1421
1422                 /*
1423                  * deleted inodes btree is maintained by a trigger on the inodes
1424                  * btree - no work for us to do here, and it's not safe to scan
1425                  * it because we'll see out of date keys due to the btree write
1426                  * buffer:
1427                  */
1428                 if (id == BTREE_ID_deleted_inodes)
1429                         continue;
1430
1431                 ret = for_each_btree_key_commit(trans, iter,
1432                                 id, POS_MIN,
1433                                 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1434                                 &res, NULL, BCH_TRANS_COMMIT_no_enospc,
1435                         snapshot_delete_key(trans, &iter, k, &deleted, &equiv_seen, &last_pos)) ?:
1436                       for_each_btree_key_commit(trans, iter,
1437                                 id, POS_MIN,
1438                                 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1439                                 &res, NULL, BCH_TRANS_COMMIT_no_enospc,
1440                         move_key_to_correct_snapshot(trans, &iter, k));
1441
1442                 bch2_disk_reservation_put(c, &res);
1443                 darray_exit(&equiv_seen);
1444
1445                 bch_err_msg(c, ret, "deleting keys from dying snapshots");
1446                 if (ret)
1447                         goto err;
1448         }
1449
1450         bch2_trans_unlock(trans);
1451         down_write(&c->snapshot_create_lock);
1452
1453         ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1454                                  POS_MIN, 0, k, ({
1455                 u32 snapshot = k.k->p.offset;
1456                 u32 equiv = bch2_snapshot_equiv(c, snapshot);
1457
1458                 equiv != snapshot
1459                         ? snapshot_list_add(c, &deleted_interior, snapshot)
1460                         : 0;
1461         }));
1462
1463         bch_err_msg(c, ret, "walking snapshots");
1464         if (ret)
1465                 goto err_create_lock;
1466
1467         /*
1468          * Fixing children of deleted snapshots can't be done completely
1469          * atomically, if we crash between here and when we delete the interior
1470          * nodes some depth fields will be off:
1471          */
1472         ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, POS_MIN,
1473                                   BTREE_ITER_INTENT, k,
1474                                   NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1475                 bch2_fix_child_of_deleted_snapshot(trans, &iter, k, &deleted_interior));
1476         if (ret)
1477                 goto err_create_lock;
1478
1479         darray_for_each(deleted, i) {
1480                 ret = commit_do(trans, NULL, NULL, 0,
1481                         bch2_snapshot_node_delete(trans, *i));
1482                 bch_err_msg(c, ret, "deleting snapshot %u", *i);
1483                 if (ret)
1484                         goto err_create_lock;
1485         }
1486
1487         darray_for_each(deleted_interior, i) {
1488                 ret = commit_do(trans, NULL, NULL, 0,
1489                         bch2_snapshot_node_delete(trans, *i));
1490                 bch_err_msg(c, ret, "deleting snapshot %u", *i);
1491                 if (ret)
1492                         goto err_create_lock;
1493         }
1494 err_create_lock:
1495         up_write(&c->snapshot_create_lock);
1496 err:
1497         darray_exit(&deleted_interior);
1498         darray_exit(&deleted);
1499         bch2_trans_put(trans);
1500         bch_err_fn(c, ret);
1501         return ret;
1502 }
1503
1504 void bch2_delete_dead_snapshots_work(struct work_struct *work)
1505 {
1506         struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
1507
1508         bch2_delete_dead_snapshots(c);
1509         bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1510 }
1511
1512 void bch2_delete_dead_snapshots_async(struct bch_fs *c)
1513 {
1514         if (bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots) &&
1515             !queue_work(c->write_ref_wq, &c->snapshot_delete_work))
1516                 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1517 }
1518
1519 int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
1520                                        enum btree_id id,
1521                                        struct bpos pos)
1522 {
1523         struct bch_fs *c = trans->c;
1524         struct btree_iter iter;
1525         struct bkey_s_c k;
1526         int ret;
1527
1528         bch2_trans_iter_init(trans, &iter, id, pos,
1529                              BTREE_ITER_NOT_EXTENTS|
1530                              BTREE_ITER_ALL_SNAPSHOTS);
1531         while (1) {
1532                 k = bch2_btree_iter_prev(&iter);
1533                 ret = bkey_err(k);
1534                 if (ret)
1535                         break;
1536
1537                 if (!k.k)
1538                         break;
1539
1540                 if (!bkey_eq(pos, k.k->p))
1541                         break;
1542
1543                 if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) {
1544                         ret = 1;
1545                         break;
1546                 }
1547         }
1548         bch2_trans_iter_exit(trans, &iter);
1549
1550         return ret;
1551 }
1552
1553 static u32 bch2_snapshot_smallest_child(struct bch_fs *c, u32 id)
1554 {
1555         const struct snapshot_t *s = snapshot_t(c, id);
1556
1557         return s->children[1] ?: s->children[0];
1558 }
1559
1560 static u32 bch2_snapshot_smallest_descendent(struct bch_fs *c, u32 id)
1561 {
1562         u32 child;
1563
1564         while ((child = bch2_snapshot_smallest_child(c, id)))
1565                 id = child;
1566         return id;
1567 }
1568
1569 static int bch2_propagate_key_to_snapshot_leaf(struct btree_trans *trans,
1570                                                enum btree_id btree,
1571                                                struct bkey_s_c interior_k,
1572                                                u32 leaf_id, struct bpos *new_min_pos)
1573 {
1574         struct btree_iter iter;
1575         struct bpos pos = interior_k.k->p;
1576         struct bkey_s_c k;
1577         struct bkey_i *new;
1578         int ret;
1579
1580         pos.snapshot = leaf_id;
1581
1582         bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_INTENT);
1583         k = bch2_btree_iter_peek_slot(&iter);
1584         ret = bkey_err(k);
1585         if (ret)
1586                 goto out;
1587
1588         /* key already overwritten in this snapshot? */
1589         if (k.k->p.snapshot != interior_k.k->p.snapshot)
1590                 goto out;
1591
1592         if (bpos_eq(*new_min_pos, POS_MIN)) {
1593                 *new_min_pos = k.k->p;
1594                 new_min_pos->snapshot = leaf_id;
1595         }
1596
1597         new = bch2_bkey_make_mut_noupdate(trans, interior_k);
1598         ret = PTR_ERR_OR_ZERO(new);
1599         if (ret)
1600                 goto out;
1601
1602         new->k.p.snapshot = leaf_id;
1603         ret = bch2_trans_update(trans, &iter, new, 0);
1604 out:
1605         bch2_trans_iter_exit(trans, &iter);
1606         return ret;
1607 }
1608
1609 int bch2_propagate_key_to_snapshot_leaves(struct btree_trans *trans,
1610                                           enum btree_id btree,
1611                                           struct bkey_s_c k,
1612                                           struct bpos *new_min_pos)
1613 {
1614         struct bch_fs *c = trans->c;
1615         struct bkey_buf sk;
1616         u32 restart_count = trans->restart_count;
1617         int ret = 0;
1618
1619         bch2_bkey_buf_init(&sk);
1620         bch2_bkey_buf_reassemble(&sk, c, k);
1621         k = bkey_i_to_s_c(sk.k);
1622
1623         *new_min_pos = POS_MIN;
1624
1625         for (u32 id = bch2_snapshot_smallest_descendent(c, k.k->p.snapshot);
1626              id < k.k->p.snapshot;
1627              id++) {
1628                 if (!bch2_snapshot_is_ancestor(c, id, k.k->p.snapshot) ||
1629                     !bch2_snapshot_is_leaf(c, id))
1630                         continue;
1631 again:
1632                 ret =   btree_trans_too_many_iters(trans) ?:
1633                         bch2_propagate_key_to_snapshot_leaf(trans, btree, k, id, new_min_pos) ?:
1634                         bch2_trans_commit(trans, NULL, NULL, 0);
1635                 if (ret && bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
1636                         bch2_trans_begin(trans);
1637                         goto again;
1638                 }
1639
1640                 if (ret)
1641                         break;
1642         }
1643
1644         bch2_bkey_buf_exit(&sk, c);
1645
1646         return ret ?: trans_was_restarted(trans, restart_count);
1647 }
1648
1649 static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct bkey_s_c k)
1650 {
1651         struct bch_fs *c = trans->c;
1652         struct bkey_s_c_snapshot snap;
1653         int ret = 0;
1654
1655         if (k.k->type != KEY_TYPE_snapshot)
1656                 return 0;
1657
1658         snap = bkey_s_c_to_snapshot(k);
1659         if (BCH_SNAPSHOT_DELETED(snap.v) ||
1660             bch2_snapshot_equiv(c, k.k->p.offset) != k.k->p.offset ||
1661             (ret = bch2_snapshot_needs_delete(trans, k)) > 0) {
1662                 set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags);
1663                 return 0;
1664         }
1665
1666         return ret;
1667 }
1668
1669 int bch2_snapshots_read(struct bch_fs *c)
1670 {
1671         int ret = bch2_trans_run(c,
1672                 for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1673                                    POS_MIN, 0, k,
1674                         __bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
1675                         bch2_snapshot_set_equiv(trans, k) ?:
1676                         bch2_check_snapshot_needs_deletion(trans, k)) ?:
1677                 for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1678                                    POS_MIN, 0, k,
1679                            (set_is_ancestor_bitmap(c, k.k->p.offset), 0)));
1680         bch_err_fn(c, ret);
1681         return ret;
1682 }
1683
1684 void bch2_fs_snapshots_exit(struct bch_fs *c)
1685 {
1686         kvfree(rcu_dereference_protected(c->snapshots, true));
1687 }