]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/snapshot.c
56af937523ff2a8deda0a5168f45a67533a57da5
[bcachefs-tools-debian] / libbcachefs / snapshot.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "btree_key_cache.h"
6 #include "btree_update.h"
7 #include "buckets.h"
8 #include "errcode.h"
9 #include "error.h"
10 #include "fs.h"
11 #include "snapshot.h"
12
13 #include <linux/random.h>
14
15 /*
16  * Snapshot trees:
17  *
18  * Keys in BTREE_ID_snapshot_trees identify a whole tree of snapshot nodes; they
19  * exist to provide a stable identifier for the whole lifetime of a snapshot
20  * tree.
21  */
22
23 void bch2_snapshot_tree_to_text(struct printbuf *out, struct bch_fs *c,
24                                 struct bkey_s_c k)
25 {
26         struct bkey_s_c_snapshot_tree t = bkey_s_c_to_snapshot_tree(k);
27
28         prt_printf(out, "subvol %u root snapshot %u",
29                    le32_to_cpu(t.v->master_subvol),
30                    le32_to_cpu(t.v->root_snapshot));
31 }
32
33 int bch2_snapshot_tree_invalid(struct bch_fs *c, struct bkey_s_c k,
34                                enum bkey_invalid_flags flags,
35                                struct printbuf *err)
36 {
37         int ret = 0;
38
39         bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
40                          bkey_lt(k.k->p, POS(0, 1)), c, err,
41                          snapshot_tree_pos_bad,
42                          "bad pos");
43 fsck_err:
44         return ret;
45 }
46
47 int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
48                               struct bch_snapshot_tree *s)
49 {
50         int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
51                                           BTREE_ITER_WITH_UPDATES, snapshot_tree, s);
52
53         if (bch2_err_matches(ret, ENOENT))
54                 ret = -BCH_ERR_ENOENT_snapshot_tree;
55         return ret;
56 }
57
58 struct bkey_i_snapshot_tree *
59 __bch2_snapshot_tree_create(struct btree_trans *trans)
60 {
61         struct btree_iter iter;
62         int ret = bch2_bkey_get_empty_slot(trans, &iter,
63                         BTREE_ID_snapshot_trees, POS(0, U32_MAX));
64         struct bkey_i_snapshot_tree *s_t;
65
66         if (ret == -BCH_ERR_ENOSPC_btree_slot)
67                 ret = -BCH_ERR_ENOSPC_snapshot_tree;
68         if (ret)
69                 return ERR_PTR(ret);
70
71         s_t = bch2_bkey_alloc(trans, &iter, 0, snapshot_tree);
72         ret = PTR_ERR_OR_ZERO(s_t);
73         bch2_trans_iter_exit(trans, &iter);
74         return ret ? ERR_PTR(ret) : s_t;
75 }
76
77 static int bch2_snapshot_tree_create(struct btree_trans *trans,
78                                 u32 root_id, u32 subvol_id, u32 *tree_id)
79 {
80         struct bkey_i_snapshot_tree *n_tree =
81                 __bch2_snapshot_tree_create(trans);
82
83         if (IS_ERR(n_tree))
84                 return PTR_ERR(n_tree);
85
86         n_tree->v.master_subvol = cpu_to_le32(subvol_id);
87         n_tree->v.root_snapshot = cpu_to_le32(root_id);
88         *tree_id = n_tree->k.p.offset;
89         return 0;
90 }
91
92 /* Snapshot nodes: */
93
94 static bool bch2_snapshot_is_ancestor_early(struct bch_fs *c, u32 id, u32 ancestor)
95 {
96         struct snapshot_table *t;
97
98         rcu_read_lock();
99         t = rcu_dereference(c->snapshots);
100
101         while (id && id < ancestor)
102                 id = __snapshot_t(t, id)->parent;
103         rcu_read_unlock();
104
105         return id == ancestor;
106 }
107
108 static inline u32 get_ancestor_below(struct snapshot_table *t, u32 id, u32 ancestor)
109 {
110         const struct snapshot_t *s = __snapshot_t(t, id);
111
112         if (s->skip[2] <= ancestor)
113                 return s->skip[2];
114         if (s->skip[1] <= ancestor)
115                 return s->skip[1];
116         if (s->skip[0] <= ancestor)
117                 return s->skip[0];
118         return s->parent;
119 }
120
121 bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
122 {
123         struct snapshot_table *t;
124         bool ret;
125
126         EBUG_ON(c->recovery_pass_done <= BCH_RECOVERY_PASS_check_snapshots);
127
128         rcu_read_lock();
129         t = rcu_dereference(c->snapshots);
130
131         while (id && id < ancestor - IS_ANCESTOR_BITMAP)
132                 id = get_ancestor_below(t, id, ancestor);
133
134         if (id && id < ancestor) {
135                 ret = test_bit(ancestor - id - 1, __snapshot_t(t, id)->is_ancestor);
136
137                 EBUG_ON(ret != bch2_snapshot_is_ancestor_early(c, id, ancestor));
138         } else {
139                 ret = id == ancestor;
140         }
141
142         rcu_read_unlock();
143
144         return ret;
145 }
146
147 static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id)
148 {
149         size_t idx = U32_MAX - id;
150         size_t new_size;
151         struct snapshot_table *new, *old;
152
153         new_size = max(16UL, roundup_pow_of_two(idx + 1));
154
155         new = kvzalloc(struct_size(new, s, new_size), GFP_KERNEL);
156         if (!new)
157                 return NULL;
158
159         old = rcu_dereference_protected(c->snapshots, true);
160         if (old)
161                 memcpy(new->s,
162                        rcu_dereference_protected(c->snapshots, true)->s,
163                        sizeof(new->s[0]) * c->snapshot_table_size);
164
165         rcu_assign_pointer(c->snapshots, new);
166         c->snapshot_table_size = new_size;
167         kvfree_rcu_mightsleep(old);
168
169         return &rcu_dereference_protected(c->snapshots, true)->s[idx];
170 }
171
172 static inline struct snapshot_t *snapshot_t_mut(struct bch_fs *c, u32 id)
173 {
174         size_t idx = U32_MAX - id;
175
176         lockdep_assert_held(&c->snapshot_table_lock);
177
178         if (likely(idx < c->snapshot_table_size))
179                 return &rcu_dereference_protected(c->snapshots, true)->s[idx];
180
181         return __snapshot_t_mut(c, id);
182 }
183
184 void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
185                            struct bkey_s_c k)
186 {
187         struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
188
189         prt_printf(out, "is_subvol %llu deleted %llu parent %10u children %10u %10u subvol %u tree %u",
190                BCH_SNAPSHOT_SUBVOL(s.v),
191                BCH_SNAPSHOT_DELETED(s.v),
192                le32_to_cpu(s.v->parent),
193                le32_to_cpu(s.v->children[0]),
194                le32_to_cpu(s.v->children[1]),
195                le32_to_cpu(s.v->subvol),
196                le32_to_cpu(s.v->tree));
197
198         if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, depth))
199                 prt_printf(out, " depth %u skiplist %u %u %u",
200                            le32_to_cpu(s.v->depth),
201                            le32_to_cpu(s.v->skip[0]),
202                            le32_to_cpu(s.v->skip[1]),
203                            le32_to_cpu(s.v->skip[2]));
204 }
205
206 int bch2_snapshot_invalid(struct bch_fs *c, struct bkey_s_c k,
207                           enum bkey_invalid_flags flags,
208                           struct printbuf *err)
209 {
210         struct bkey_s_c_snapshot s;
211         u32 i, id;
212         int ret = 0;
213
214         bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
215                          bkey_lt(k.k->p, POS(0, 1)), c, err,
216                          snapshot_pos_bad,
217                          "bad pos");
218
219         s = bkey_s_c_to_snapshot(k);
220
221         id = le32_to_cpu(s.v->parent);
222         bkey_fsck_err_on(id && id <= k.k->p.offset, c, err,
223                          snapshot_parent_bad,
224                          "bad parent node (%u <= %llu)",
225                          id, k.k->p.offset);
226
227         bkey_fsck_err_on(le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1]), c, err,
228                          snapshot_children_not_normalized,
229                          "children not normalized");
230
231         bkey_fsck_err_on(s.v->children[0] && s.v->children[0] == s.v->children[1], c, err,
232                          snapshot_child_duplicate,
233                          "duplicate child nodes");
234
235         for (i = 0; i < 2; i++) {
236                 id = le32_to_cpu(s.v->children[i]);
237
238                 bkey_fsck_err_on(id >= k.k->p.offset, c, err,
239                                  snapshot_child_bad,
240                                  "bad child node (%u >= %llu)",
241                                  id, k.k->p.offset);
242         }
243
244         if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, skip)) {
245                 bkey_fsck_err_on(le32_to_cpu(s.v->skip[0]) > le32_to_cpu(s.v->skip[1]) ||
246                                  le32_to_cpu(s.v->skip[1]) > le32_to_cpu(s.v->skip[2]), c, err,
247                                  snapshot_skiplist_not_normalized,
248                                  "skiplist not normalized");
249
250                 for (i = 0; i < ARRAY_SIZE(s.v->skip); i++) {
251                         id = le32_to_cpu(s.v->skip[i]);
252
253                         bkey_fsck_err_on(id && id < le32_to_cpu(s.v->parent), c, err,
254                                          snapshot_skiplist_bad,
255                                          "bad skiplist node %u", id);
256                 }
257         }
258 fsck_err:
259         return ret;
260 }
261
262 static void __set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
263 {
264         struct snapshot_t *t = snapshot_t_mut(c, id);
265         u32 parent = id;
266
267         while ((parent = bch2_snapshot_parent_early(c, parent)) &&
268                parent - id - 1 < IS_ANCESTOR_BITMAP)
269                 __set_bit(parent - id - 1, t->is_ancestor);
270 }
271
272 static void set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
273 {
274         mutex_lock(&c->snapshot_table_lock);
275         __set_is_ancestor_bitmap(c, id);
276         mutex_unlock(&c->snapshot_table_lock);
277 }
278
279 static int __bch2_mark_snapshot(struct btree_trans *trans,
280                        enum btree_id btree, unsigned level,
281                        struct bkey_s_c old, struct bkey_s_c new,
282                        unsigned flags)
283 {
284         struct bch_fs *c = trans->c;
285         struct snapshot_t *t;
286         u32 id = new.k->p.offset;
287         int ret = 0;
288
289         mutex_lock(&c->snapshot_table_lock);
290
291         t = snapshot_t_mut(c, id);
292         if (!t) {
293                 ret = -BCH_ERR_ENOMEM_mark_snapshot;
294                 goto err;
295         }
296
297         if (new.k->type == KEY_TYPE_snapshot) {
298                 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);
299
300                 t->parent       = le32_to_cpu(s.v->parent);
301                 t->children[0]  = le32_to_cpu(s.v->children[0]);
302                 t->children[1]  = le32_to_cpu(s.v->children[1]);
303                 t->subvol       = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0;
304                 t->tree         = le32_to_cpu(s.v->tree);
305
306                 if (bkey_val_bytes(s.k) > offsetof(struct bch_snapshot, depth)) {
307                         t->depth        = le32_to_cpu(s.v->depth);
308                         t->skip[0]      = le32_to_cpu(s.v->skip[0]);
309                         t->skip[1]      = le32_to_cpu(s.v->skip[1]);
310                         t->skip[2]      = le32_to_cpu(s.v->skip[2]);
311                 } else {
312                         t->depth        = 0;
313                         t->skip[0]      = 0;
314                         t->skip[1]      = 0;
315                         t->skip[2]      = 0;
316                 }
317
318                 __set_is_ancestor_bitmap(c, id);
319
320                 if (BCH_SNAPSHOT_DELETED(s.v)) {
321                         set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags);
322                         if (c->curr_recovery_pass > BCH_RECOVERY_PASS_delete_dead_snapshots)
323                                 bch2_delete_dead_snapshots_async(c);
324                 }
325         } else {
326                 memset(t, 0, sizeof(*t));
327         }
328 err:
329         mutex_unlock(&c->snapshot_table_lock);
330         return ret;
331 }
332
333 int bch2_mark_snapshot(struct btree_trans *trans,
334                        enum btree_id btree, unsigned level,
335                        struct bkey_s_c old, struct bkey_s new,
336                        unsigned flags)
337 {
338         return __bch2_mark_snapshot(trans, btree, level, old, new.s_c, flags);
339 }
340
341 int bch2_snapshot_lookup(struct btree_trans *trans, u32 id,
342                          struct bch_snapshot *s)
343 {
344         return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots, POS(0, id),
345                                        BTREE_ITER_WITH_UPDATES, snapshot, s);
346 }
347
348 static int bch2_snapshot_live(struct btree_trans *trans, u32 id)
349 {
350         struct bch_snapshot v;
351         int ret;
352
353         if (!id)
354                 return 0;
355
356         ret = bch2_snapshot_lookup(trans, id, &v);
357         if (bch2_err_matches(ret, ENOENT))
358                 bch_err(trans->c, "snapshot node %u not found", id);
359         if (ret)
360                 return ret;
361
362         return !BCH_SNAPSHOT_DELETED(&v);
363 }
364
365 /*
366  * If @k is a snapshot with just one live child, it's part of a linear chain,
367  * which we consider to be an equivalence class: and then after snapshot
368  * deletion cleanup, there should only be a single key at a given position in
369  * this equivalence class.
370  *
371  * This sets the equivalence class of @k to be the child's equivalence class, if
372  * it's part of such a linear chain: this correctly sets equivalence classes on
373  * startup if we run leaf to root (i.e. in natural key order).
374  */
375 static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k)
376 {
377         struct bch_fs *c = trans->c;
378         unsigned i, nr_live = 0, live_idx = 0;
379         struct bkey_s_c_snapshot snap;
380         u32 id = k.k->p.offset, child[2];
381
382         if (k.k->type != KEY_TYPE_snapshot)
383                 return 0;
384
385         snap = bkey_s_c_to_snapshot(k);
386
387         child[0] = le32_to_cpu(snap.v->children[0]);
388         child[1] = le32_to_cpu(snap.v->children[1]);
389
390         for (i = 0; i < 2; i++) {
391                 int ret = bch2_snapshot_live(trans, child[i]);
392
393                 if (ret < 0)
394                         return ret;
395
396                 if (ret)
397                         live_idx = i;
398                 nr_live += ret;
399         }
400
401         mutex_lock(&c->snapshot_table_lock);
402
403         snapshot_t_mut(c, id)->equiv = nr_live == 1
404                 ? snapshot_t_mut(c, child[live_idx])->equiv
405                 : id;
406
407         mutex_unlock(&c->snapshot_table_lock);
408
409         return 0;
410 }
411
412 /* fsck: */
413
414 static u32 bch2_snapshot_child(struct bch_fs *c, u32 id, unsigned child)
415 {
416         return snapshot_t(c, id)->children[child];
417 }
418
419 static u32 bch2_snapshot_left_child(struct bch_fs *c, u32 id)
420 {
421         return bch2_snapshot_child(c, id, 0);
422 }
423
424 static u32 bch2_snapshot_right_child(struct bch_fs *c, u32 id)
425 {
426         return bch2_snapshot_child(c, id, 1);
427 }
428
429 static u32 bch2_snapshot_tree_next(struct bch_fs *c, u32 id)
430 {
431         u32 n, parent;
432
433         n = bch2_snapshot_left_child(c, id);
434         if (n)
435                 return n;
436
437         while ((parent = bch2_snapshot_parent(c, id))) {
438                 n = bch2_snapshot_right_child(c, parent);
439                 if (n && n != id)
440                         return n;
441                 id = parent;
442         }
443
444         return 0;
445 }
446
447 static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
448 {
449         u32 id = snapshot_root;
450         u32 subvol = 0, s;
451
452         while (id) {
453                 s = snapshot_t(c, id)->subvol;
454
455                 if (s && (!subvol || s < subvol))
456                         subvol = s;
457
458                 id = bch2_snapshot_tree_next(c, id);
459         }
460
461         return subvol;
462 }
463
464 static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans,
465                                             u32 snapshot_root, u32 *subvol_id)
466 {
467         struct bch_fs *c = trans->c;
468         struct btree_iter iter;
469         struct bkey_s_c k;
470         bool found = false;
471         int ret;
472
473         for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN,
474                                      0, k, ret) {
475                 if (k.k->type != KEY_TYPE_subvolume)
476                         continue;
477
478                 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
479                 if (!bch2_snapshot_is_ancestor(c, le32_to_cpu(s.v->snapshot), snapshot_root))
480                         continue;
481                 if (!BCH_SUBVOLUME_SNAP(s.v)) {
482                         *subvol_id = s.k->p.offset;
483                         found = true;
484                         break;
485                 }
486         }
487
488         bch2_trans_iter_exit(trans, &iter);
489
490         if (!ret && !found) {
491                 struct bkey_i_subvolume *u;
492
493                 *subvol_id = bch2_snapshot_tree_oldest_subvol(c, snapshot_root);
494
495                 u = bch2_bkey_get_mut_typed(trans, &iter,
496                                             BTREE_ID_subvolumes, POS(0, *subvol_id),
497                                             0, subvolume);
498                 ret = PTR_ERR_OR_ZERO(u);
499                 if (ret)
500                         return ret;
501
502                 SET_BCH_SUBVOLUME_SNAP(&u->v, false);
503         }
504
505         return ret;
506 }
507
508 static int check_snapshot_tree(struct btree_trans *trans,
509                                struct btree_iter *iter,
510                                struct bkey_s_c k)
511 {
512         struct bch_fs *c = trans->c;
513         struct bkey_s_c_snapshot_tree st;
514         struct bch_snapshot s;
515         struct bch_subvolume subvol;
516         struct printbuf buf = PRINTBUF;
517         u32 root_id;
518         int ret;
519
520         if (k.k->type != KEY_TYPE_snapshot_tree)
521                 return 0;
522
523         st = bkey_s_c_to_snapshot_tree(k);
524         root_id = le32_to_cpu(st.v->root_snapshot);
525
526         ret = bch2_snapshot_lookup(trans, root_id, &s);
527         if (ret && !bch2_err_matches(ret, ENOENT))
528                 goto err;
529
530         if (fsck_err_on(ret ||
531                         root_id != bch2_snapshot_root(c, root_id) ||
532                         st.k->p.offset != le32_to_cpu(s.tree),
533                         c, snapshot_tree_to_missing_snapshot,
534                         "snapshot tree points to missing/incorrect snapshot:\n  %s",
535                         (bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
536                 ret = bch2_btree_delete_at(trans, iter, 0);
537                 goto err;
538         }
539
540         ret = bch2_subvolume_get(trans, le32_to_cpu(st.v->master_subvol),
541                                  false, 0, &subvol);
542         if (ret && !bch2_err_matches(ret, ENOENT))
543                 goto err;
544
545         if (fsck_err_on(ret,
546                         c, snapshot_tree_to_missing_subvol,
547                         "snapshot tree points to missing subvolume:\n  %s",
548                         (printbuf_reset(&buf),
549                          bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
550             fsck_err_on(!bch2_snapshot_is_ancestor_early(c,
551                                                 le32_to_cpu(subvol.snapshot),
552                                                 root_id),
553                         c, snapshot_tree_to_wrong_subvol,
554                         "snapshot tree points to subvolume that does not point to snapshot in this tree:\n  %s",
555                         (printbuf_reset(&buf),
556                          bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
557             fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol),
558                         c, snapshot_tree_to_snapshot_subvol,
559                         "snapshot tree points to snapshot subvolume:\n  %s",
560                         (printbuf_reset(&buf),
561                          bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
562                 struct bkey_i_snapshot_tree *u;
563                 u32 subvol_id;
564
565                 ret = bch2_snapshot_tree_master_subvol(trans, root_id, &subvol_id);
566                 if (ret)
567                         goto err;
568
569                 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot_tree);
570                 ret = PTR_ERR_OR_ZERO(u);
571                 if (ret)
572                         goto err;
573
574                 u->v.master_subvol = cpu_to_le32(subvol_id);
575                 st = snapshot_tree_i_to_s_c(u);
576         }
577 err:
578 fsck_err:
579         printbuf_exit(&buf);
580         return ret;
581 }
582
583 /*
584  * For each snapshot_tree, make sure it points to the root of a snapshot tree
585  * and that snapshot entry points back to it, or delete it.
586  *
587  * And, make sure it points to a subvolume within that snapshot tree, or correct
588  * it to point to the oldest subvolume within that snapshot tree.
589  */
590 int bch2_check_snapshot_trees(struct bch_fs *c)
591 {
592         int ret = bch2_trans_run(c,
593                 for_each_btree_key_commit(trans, iter,
594                         BTREE_ID_snapshot_trees, POS_MIN,
595                         BTREE_ITER_PREFETCH, k,
596                         NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
597                 check_snapshot_tree(trans, &iter, k)));
598         bch_err_fn(c, ret);
599         return ret;
600 }
601
602 /*
603  * Look up snapshot tree for @tree_id and find root,
604  * make sure @snap_id is a descendent:
605  */
606 static int snapshot_tree_ptr_good(struct btree_trans *trans,
607                                   u32 snap_id, u32 tree_id)
608 {
609         struct bch_snapshot_tree s_t;
610         int ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
611
612         if (bch2_err_matches(ret, ENOENT))
613                 return 0;
614         if (ret)
615                 return ret;
616
617         return bch2_snapshot_is_ancestor_early(trans->c, snap_id, le32_to_cpu(s_t.root_snapshot));
618 }
619
620 u32 bch2_snapshot_skiplist_get(struct bch_fs *c, u32 id)
621 {
622         const struct snapshot_t *s;
623
624         if (!id)
625                 return 0;
626
627         rcu_read_lock();
628         s = snapshot_t(c, id);
629         if (s->parent)
630                 id = bch2_snapshot_nth_parent(c, id, get_random_u32_below(s->depth));
631         rcu_read_unlock();
632
633         return id;
634 }
635
636 static int snapshot_skiplist_good(struct btree_trans *trans, u32 id, struct bch_snapshot s)
637 {
638         unsigned i;
639
640         for (i = 0; i < 3; i++)
641                 if (!s.parent) {
642                         if (s.skip[i])
643                                 return false;
644                 } else {
645                         if (!bch2_snapshot_is_ancestor_early(trans->c, id, le32_to_cpu(s.skip[i])))
646                                 return false;
647                 }
648
649         return true;
650 }
651
652 /*
653  * snapshot_tree pointer was incorrect: look up root snapshot node, make sure
654  * its snapshot_tree pointer is correct (allocate new one if necessary), then
655  * update this node's pointer to root node's pointer:
656  */
657 static int snapshot_tree_ptr_repair(struct btree_trans *trans,
658                                     struct btree_iter *iter,
659                                     struct bkey_s_c k,
660                                     struct bch_snapshot *s)
661 {
662         struct bch_fs *c = trans->c;
663         struct btree_iter root_iter;
664         struct bch_snapshot_tree s_t;
665         struct bkey_s_c_snapshot root;
666         struct bkey_i_snapshot *u;
667         u32 root_id = bch2_snapshot_root(c, k.k->p.offset), tree_id;
668         int ret;
669
670         root = bch2_bkey_get_iter_typed(trans, &root_iter,
671                                BTREE_ID_snapshots, POS(0, root_id),
672                                BTREE_ITER_WITH_UPDATES, snapshot);
673         ret = bkey_err(root);
674         if (ret)
675                 goto err;
676
677         tree_id = le32_to_cpu(root.v->tree);
678
679         ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
680         if (ret && !bch2_err_matches(ret, ENOENT))
681                 return ret;
682
683         if (ret || le32_to_cpu(s_t.root_snapshot) != root_id) {
684                 u = bch2_bkey_make_mut_typed(trans, &root_iter, &root.s_c, 0, snapshot);
685                 ret =   PTR_ERR_OR_ZERO(u) ?:
686                         bch2_snapshot_tree_create(trans, root_id,
687                                 bch2_snapshot_tree_oldest_subvol(c, root_id),
688                                 &tree_id);
689                 if (ret)
690                         goto err;
691
692                 u->v.tree = cpu_to_le32(tree_id);
693                 if (k.k->p.offset == root_id)
694                         *s = u->v;
695         }
696
697         if (k.k->p.offset != root_id) {
698                 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
699                 ret = PTR_ERR_OR_ZERO(u);
700                 if (ret)
701                         goto err;
702
703                 u->v.tree = cpu_to_le32(tree_id);
704                 *s = u->v;
705         }
706 err:
707         bch2_trans_iter_exit(trans, &root_iter);
708         return ret;
709 }
710
711 static int check_snapshot(struct btree_trans *trans,
712                           struct btree_iter *iter,
713                           struct bkey_s_c k)
714 {
715         struct bch_fs *c = trans->c;
716         struct bch_snapshot s;
717         struct bch_subvolume subvol;
718         struct bch_snapshot v;
719         struct bkey_i_snapshot *u;
720         u32 parent_id = bch2_snapshot_parent_early(c, k.k->p.offset);
721         u32 real_depth;
722         struct printbuf buf = PRINTBUF;
723         bool should_have_subvol;
724         u32 i, id;
725         int ret = 0;
726
727         if (k.k->type != KEY_TYPE_snapshot)
728                 return 0;
729
730         memset(&s, 0, sizeof(s));
731         memcpy(&s, k.v, bkey_val_bytes(k.k));
732
733         id = le32_to_cpu(s.parent);
734         if (id) {
735                 ret = bch2_snapshot_lookup(trans, id, &v);
736                 if (bch2_err_matches(ret, ENOENT))
737                         bch_err(c, "snapshot with nonexistent parent:\n  %s",
738                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
739                 if (ret)
740                         goto err;
741
742                 if (le32_to_cpu(v.children[0]) != k.k->p.offset &&
743                     le32_to_cpu(v.children[1]) != k.k->p.offset) {
744                         bch_err(c, "snapshot parent %u missing pointer to child %llu",
745                                 id, k.k->p.offset);
746                         ret = -EINVAL;
747                         goto err;
748                 }
749         }
750
751         for (i = 0; i < 2 && s.children[i]; i++) {
752                 id = le32_to_cpu(s.children[i]);
753
754                 ret = bch2_snapshot_lookup(trans, id, &v);
755                 if (bch2_err_matches(ret, ENOENT))
756                         bch_err(c, "snapshot node %llu has nonexistent child %u",
757                                 k.k->p.offset, id);
758                 if (ret)
759                         goto err;
760
761                 if (le32_to_cpu(v.parent) != k.k->p.offset) {
762                         bch_err(c, "snapshot child %u has wrong parent (got %u should be %llu)",
763                                 id, le32_to_cpu(v.parent), k.k->p.offset);
764                         ret = -EINVAL;
765                         goto err;
766                 }
767         }
768
769         should_have_subvol = BCH_SNAPSHOT_SUBVOL(&s) &&
770                 !BCH_SNAPSHOT_DELETED(&s);
771
772         if (should_have_subvol) {
773                 id = le32_to_cpu(s.subvol);
774                 ret = bch2_subvolume_get(trans, id, 0, false, &subvol);
775                 if (bch2_err_matches(ret, ENOENT))
776                         bch_err(c, "snapshot points to nonexistent subvolume:\n  %s",
777                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
778                 if (ret)
779                         goto err;
780
781                 if (BCH_SNAPSHOT_SUBVOL(&s) != (le32_to_cpu(subvol.snapshot) == k.k->p.offset)) {
782                         bch_err(c, "snapshot node %llu has wrong BCH_SNAPSHOT_SUBVOL",
783                                 k.k->p.offset);
784                         ret = -EINVAL;
785                         goto err;
786                 }
787         } else {
788                 if (fsck_err_on(s.subvol,
789                                 c, snapshot_should_not_have_subvol,
790                                 "snapshot should not point to subvol:\n  %s",
791                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
792                         u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
793                         ret = PTR_ERR_OR_ZERO(u);
794                         if (ret)
795                                 goto err;
796
797                         u->v.subvol = 0;
798                         s = u->v;
799                 }
800         }
801
802         ret = snapshot_tree_ptr_good(trans, k.k->p.offset, le32_to_cpu(s.tree));
803         if (ret < 0)
804                 goto err;
805
806         if (fsck_err_on(!ret, c, snapshot_to_bad_snapshot_tree,
807                         "snapshot points to missing/incorrect tree:\n  %s",
808                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
809                 ret = snapshot_tree_ptr_repair(trans, iter, k, &s);
810                 if (ret)
811                         goto err;
812         }
813         ret = 0;
814
815         real_depth = bch2_snapshot_depth(c, parent_id);
816
817         if (fsck_err_on(le32_to_cpu(s.depth) != real_depth,
818                         c, snapshot_bad_depth,
819                         "snapshot with incorrect depth field, should be %u:\n  %s",
820                         real_depth, (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
821                 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
822                 ret = PTR_ERR_OR_ZERO(u);
823                 if (ret)
824                         goto err;
825
826                 u->v.depth = cpu_to_le32(real_depth);
827                 s = u->v;
828         }
829
830         ret = snapshot_skiplist_good(trans, k.k->p.offset, s);
831         if (ret < 0)
832                 goto err;
833
834         if (fsck_err_on(!ret, c, snapshot_bad_skiplist,
835                         "snapshot with bad skiplist field:\n  %s",
836                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
837                 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
838                 ret = PTR_ERR_OR_ZERO(u);
839                 if (ret)
840                         goto err;
841
842                 for (i = 0; i < ARRAY_SIZE(u->v.skip); i++)
843                         u->v.skip[i] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent_id));
844
845                 bubble_sort(u->v.skip, ARRAY_SIZE(u->v.skip), cmp_le32);
846                 s = u->v;
847         }
848         ret = 0;
849 err:
850 fsck_err:
851         printbuf_exit(&buf);
852         return ret;
853 }
854
855 int bch2_check_snapshots(struct bch_fs *c)
856 {
857         /*
858          * We iterate backwards as checking/fixing the depth field requires that
859          * the parent's depth already be correct:
860          */
861         int ret = bch2_trans_run(c,
862                 for_each_btree_key_reverse_commit(trans, iter,
863                                 BTREE_ID_snapshots, POS_MAX,
864                                 BTREE_ITER_PREFETCH, k,
865                                 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
866                         check_snapshot(trans, &iter, k)));
867         bch_err_fn(c, ret);
868         return ret;
869 }
870
871 /*
872  * Mark a snapshot as deleted, for future cleanup:
873  */
874 int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
875 {
876         struct btree_iter iter;
877         struct bkey_i_snapshot *s;
878         int ret = 0;
879
880         s = bch2_bkey_get_mut_typed(trans, &iter,
881                                     BTREE_ID_snapshots, POS(0, id),
882                                     0, snapshot);
883         ret = PTR_ERR_OR_ZERO(s);
884         if (unlikely(ret)) {
885                 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
886                                         trans->c, "missing snapshot %u", id);
887                 return ret;
888         }
889
890         /* already deleted? */
891         if (BCH_SNAPSHOT_DELETED(&s->v))
892                 goto err;
893
894         SET_BCH_SNAPSHOT_DELETED(&s->v, true);
895         SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
896         s->v.subvol = 0;
897 err:
898         bch2_trans_iter_exit(trans, &iter);
899         return ret;
900 }
901
902 static inline void normalize_snapshot_child_pointers(struct bch_snapshot *s)
903 {
904         if (le32_to_cpu(s->children[0]) < le32_to_cpu(s->children[1]))
905                 swap(s->children[0], s->children[1]);
906 }
907
908 static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
909 {
910         struct bch_fs *c = trans->c;
911         struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
912         struct btree_iter c_iter = (struct btree_iter) { NULL };
913         struct btree_iter tree_iter = (struct btree_iter) { NULL };
914         struct bkey_s_c_snapshot s;
915         u32 parent_id, child_id;
916         unsigned i;
917         int ret = 0;
918
919         s = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id),
920                                      BTREE_ITER_INTENT, snapshot);
921         ret = bkey_err(s);
922         bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
923                                 "missing snapshot %u", id);
924
925         if (ret)
926                 goto err;
927
928         BUG_ON(s.v->children[1]);
929
930         parent_id = le32_to_cpu(s.v->parent);
931         child_id = le32_to_cpu(s.v->children[0]);
932
933         if (parent_id) {
934                 struct bkey_i_snapshot *parent;
935
936                 parent = bch2_bkey_get_mut_typed(trans, &p_iter,
937                                      BTREE_ID_snapshots, POS(0, parent_id),
938                                      0, snapshot);
939                 ret = PTR_ERR_OR_ZERO(parent);
940                 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
941                                         "missing snapshot %u", parent_id);
942                 if (unlikely(ret))
943                         goto err;
944
945                 /* find entry in parent->children for node being deleted */
946                 for (i = 0; i < 2; i++)
947                         if (le32_to_cpu(parent->v.children[i]) == id)
948                                 break;
949
950                 if (bch2_fs_inconsistent_on(i == 2, c,
951                                         "snapshot %u missing child pointer to %u",
952                                         parent_id, id))
953                         goto err;
954
955                 parent->v.children[i] = cpu_to_le32(child_id);
956
957                 normalize_snapshot_child_pointers(&parent->v);
958         }
959
960         if (child_id) {
961                 struct bkey_i_snapshot *child;
962
963                 child = bch2_bkey_get_mut_typed(trans, &c_iter,
964                                      BTREE_ID_snapshots, POS(0, child_id),
965                                      0, snapshot);
966                 ret = PTR_ERR_OR_ZERO(child);
967                 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
968                                         "missing snapshot %u", child_id);
969                 if (unlikely(ret))
970                         goto err;
971
972                 child->v.parent = cpu_to_le32(parent_id);
973
974                 if (!child->v.parent) {
975                         child->v.skip[0] = 0;
976                         child->v.skip[1] = 0;
977                         child->v.skip[2] = 0;
978                 }
979         }
980
981         if (!parent_id) {
982                 /*
983                  * We're deleting the root of a snapshot tree: update the
984                  * snapshot_tree entry to point to the new root, or delete it if
985                  * this is the last snapshot ID in this tree:
986                  */
987                 struct bkey_i_snapshot_tree *s_t;
988
989                 BUG_ON(s.v->children[1]);
990
991                 s_t = bch2_bkey_get_mut_typed(trans, &tree_iter,
992                                 BTREE_ID_snapshot_trees, POS(0, le32_to_cpu(s.v->tree)),
993                                 0, snapshot_tree);
994                 ret = PTR_ERR_OR_ZERO(s_t);
995                 if (ret)
996                         goto err;
997
998                 if (s.v->children[0]) {
999                         s_t->v.root_snapshot = s.v->children[0];
1000                 } else {
1001                         s_t->k.type = KEY_TYPE_deleted;
1002                         set_bkey_val_u64s(&s_t->k, 0);
1003                 }
1004         }
1005
1006         ret = bch2_btree_delete_at(trans, &iter, 0);
1007 err:
1008         bch2_trans_iter_exit(trans, &tree_iter);
1009         bch2_trans_iter_exit(trans, &p_iter);
1010         bch2_trans_iter_exit(trans, &c_iter);
1011         bch2_trans_iter_exit(trans, &iter);
1012         return ret;
1013 }
1014
1015 static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
1016                           u32 *new_snapids,
1017                           u32 *snapshot_subvols,
1018                           unsigned nr_snapids)
1019 {
1020         struct bch_fs *c = trans->c;
1021         struct btree_iter iter;
1022         struct bkey_i_snapshot *n;
1023         struct bkey_s_c k;
1024         unsigned i, j;
1025         u32 depth = bch2_snapshot_depth(c, parent);
1026         int ret;
1027
1028         bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
1029                              POS_MIN, BTREE_ITER_INTENT);
1030         k = bch2_btree_iter_peek(&iter);
1031         ret = bkey_err(k);
1032         if (ret)
1033                 goto err;
1034
1035         for (i = 0; i < nr_snapids; i++) {
1036                 k = bch2_btree_iter_prev_slot(&iter);
1037                 ret = bkey_err(k);
1038                 if (ret)
1039                         goto err;
1040
1041                 if (!k.k || !k.k->p.offset) {
1042                         ret = -BCH_ERR_ENOSPC_snapshot_create;
1043                         goto err;
1044                 }
1045
1046                 n = bch2_bkey_alloc(trans, &iter, 0, snapshot);
1047                 ret = PTR_ERR_OR_ZERO(n);
1048                 if (ret)
1049                         goto err;
1050
1051                 n->v.flags      = 0;
1052                 n->v.parent     = cpu_to_le32(parent);
1053                 n->v.subvol     = cpu_to_le32(snapshot_subvols[i]);
1054                 n->v.tree       = cpu_to_le32(tree);
1055                 n->v.depth      = cpu_to_le32(depth);
1056
1057                 for (j = 0; j < ARRAY_SIZE(n->v.skip); j++)
1058                         n->v.skip[j] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent));
1059
1060                 bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_le32);
1061                 SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
1062
1063                 ret = __bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
1064                                          bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0);
1065                 if (ret)
1066                         goto err;
1067
1068                 new_snapids[i]  = iter.pos.offset;
1069
1070                 mutex_lock(&c->snapshot_table_lock);
1071                 snapshot_t_mut(c, new_snapids[i])->equiv = new_snapids[i];
1072                 mutex_unlock(&c->snapshot_table_lock);
1073         }
1074 err:
1075         bch2_trans_iter_exit(trans, &iter);
1076         return ret;
1077 }
1078
1079 /*
1080  * Create new snapshot IDs as children of an existing snapshot ID:
1081  */
1082 static int bch2_snapshot_node_create_children(struct btree_trans *trans, u32 parent,
1083                               u32 *new_snapids,
1084                               u32 *snapshot_subvols,
1085                               unsigned nr_snapids)
1086 {
1087         struct btree_iter iter;
1088         struct bkey_i_snapshot *n_parent;
1089         int ret = 0;
1090
1091         n_parent = bch2_bkey_get_mut_typed(trans, &iter,
1092                         BTREE_ID_snapshots, POS(0, parent),
1093                         0, snapshot);
1094         ret = PTR_ERR_OR_ZERO(n_parent);
1095         if (unlikely(ret)) {
1096                 if (bch2_err_matches(ret, ENOENT))
1097                         bch_err(trans->c, "snapshot %u not found", parent);
1098                 return ret;
1099         }
1100
1101         if (n_parent->v.children[0] || n_parent->v.children[1]) {
1102                 bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
1103                 ret = -EINVAL;
1104                 goto err;
1105         }
1106
1107         ret = create_snapids(trans, parent, le32_to_cpu(n_parent->v.tree),
1108                              new_snapids, snapshot_subvols, nr_snapids);
1109         if (ret)
1110                 goto err;
1111
1112         n_parent->v.children[0] = cpu_to_le32(new_snapids[0]);
1113         n_parent->v.children[1] = cpu_to_le32(new_snapids[1]);
1114         n_parent->v.subvol = 0;
1115         SET_BCH_SNAPSHOT_SUBVOL(&n_parent->v, false);
1116 err:
1117         bch2_trans_iter_exit(trans, &iter);
1118         return ret;
1119 }
1120
1121 /*
1122  * Create a snapshot node that is the root of a new tree:
1123  */
1124 static int bch2_snapshot_node_create_tree(struct btree_trans *trans,
1125                               u32 *new_snapids,
1126                               u32 *snapshot_subvols,
1127                               unsigned nr_snapids)
1128 {
1129         struct bkey_i_snapshot_tree *n_tree;
1130         int ret;
1131
1132         n_tree = __bch2_snapshot_tree_create(trans);
1133         ret =   PTR_ERR_OR_ZERO(n_tree) ?:
1134                 create_snapids(trans, 0, n_tree->k.p.offset,
1135                              new_snapids, snapshot_subvols, nr_snapids);
1136         if (ret)
1137                 return ret;
1138
1139         n_tree->v.master_subvol = cpu_to_le32(snapshot_subvols[0]);
1140         n_tree->v.root_snapshot = cpu_to_le32(new_snapids[0]);
1141         return 0;
1142 }
1143
1144 int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
1145                               u32 *new_snapids,
1146                               u32 *snapshot_subvols,
1147                               unsigned nr_snapids)
1148 {
1149         BUG_ON((parent == 0) != (nr_snapids == 1));
1150         BUG_ON((parent != 0) != (nr_snapids == 2));
1151
1152         return parent
1153                 ? bch2_snapshot_node_create_children(trans, parent,
1154                                 new_snapids, snapshot_subvols, nr_snapids)
1155                 : bch2_snapshot_node_create_tree(trans,
1156                                 new_snapids, snapshot_subvols, nr_snapids);
1157
1158 }
1159
1160 /*
1161  * If we have an unlinked inode in an internal snapshot node, and the inode
1162  * really has been deleted in all child snapshots, how does this get cleaned up?
1163  *
1164  * first there is the problem of how keys that have been overwritten in all
1165  * child snapshots get deleted (unimplemented?), but inodes may perhaps be
1166  * special?
1167  *
1168  * also: unlinked inode in internal snapshot appears to not be getting deleted
1169  * correctly if inode doesn't exist in leaf snapshots
1170  *
1171  * solution:
1172  *
1173  * for a key in an interior snapshot node that needs work to be done that
1174  * requires it to be mutated: iterate over all descendent leaf nodes and copy
1175  * that key to snapshot leaf nodes, where we can mutate it
1176  */
1177
1178 static int snapshot_delete_key(struct btree_trans *trans,
1179                                struct btree_iter *iter,
1180                                struct bkey_s_c k,
1181                                snapshot_id_list *deleted,
1182                                snapshot_id_list *equiv_seen,
1183                                struct bpos *last_pos)
1184 {
1185         struct bch_fs *c = trans->c;
1186         u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
1187
1188         if (!bkey_eq(k.k->p, *last_pos))
1189                 equiv_seen->nr = 0;
1190         *last_pos = k.k->p;
1191
1192         if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
1193             snapshot_list_has_id(equiv_seen, equiv)) {
1194                 return bch2_btree_delete_at(trans, iter,
1195                                             BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1196         } else {
1197                 return snapshot_list_add(c, equiv_seen, equiv);
1198         }
1199 }
1200
1201 static int move_key_to_correct_snapshot(struct btree_trans *trans,
1202                                struct btree_iter *iter,
1203                                struct bkey_s_c k)
1204 {
1205         struct bch_fs *c = trans->c;
1206         u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
1207
1208         /*
1209          * When we have a linear chain of snapshot nodes, we consider
1210          * those to form an equivalence class: we're going to collapse
1211          * them all down to a single node, and keep the leaf-most node -
1212          * which has the same id as the equivalence class id.
1213          *
1214          * If there are multiple keys in different snapshots at the same
1215          * position, we're only going to keep the one in the newest
1216          * snapshot - the rest have been overwritten and are redundant,
1217          * and for the key we're going to keep we need to move it to the
1218          * equivalance class ID if it's not there already.
1219          */
1220         if (equiv != k.k->p.snapshot) {
1221                 struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
1222                 struct btree_iter new_iter;
1223                 int ret;
1224
1225                 ret = PTR_ERR_OR_ZERO(new);
1226                 if (ret)
1227                         return ret;
1228
1229                 new->k.p.snapshot = equiv;
1230
1231                 bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p,
1232                                      BTREE_ITER_ALL_SNAPSHOTS|
1233                                      BTREE_ITER_CACHED|
1234                                      BTREE_ITER_INTENT);
1235
1236                 ret =   bch2_btree_iter_traverse(&new_iter) ?:
1237                         bch2_trans_update(trans, &new_iter, new,
1238                                         BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
1239                         bch2_btree_delete_at(trans, iter,
1240                                         BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1241                 bch2_trans_iter_exit(trans, &new_iter);
1242                 if (ret)
1243                         return ret;
1244         }
1245
1246         return 0;
1247 }
1248
1249 static int bch2_snapshot_needs_delete(struct btree_trans *trans, struct bkey_s_c k)
1250 {
1251         struct bkey_s_c_snapshot snap;
1252         u32 children[2];
1253         int ret;
1254
1255         if (k.k->type != KEY_TYPE_snapshot)
1256                 return 0;
1257
1258         snap = bkey_s_c_to_snapshot(k);
1259         if (BCH_SNAPSHOT_DELETED(snap.v) ||
1260             BCH_SNAPSHOT_SUBVOL(snap.v))
1261                 return 0;
1262
1263         children[0] = le32_to_cpu(snap.v->children[0]);
1264         children[1] = le32_to_cpu(snap.v->children[1]);
1265
1266         ret   = bch2_snapshot_live(trans, children[0]) ?:
1267                 bch2_snapshot_live(trans, children[1]);
1268         if (ret < 0)
1269                 return ret;
1270         return !ret;
1271 }
1272
1273 /*
1274  * For a given snapshot, if it doesn't have a subvolume that points to it, and
1275  * it doesn't have child snapshot nodes - it's now redundant and we can mark it
1276  * as deleted.
1277  */
1278 static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct bkey_s_c k)
1279 {
1280         int ret = bch2_snapshot_needs_delete(trans, k);
1281
1282         return ret <= 0
1283                 ? ret
1284                 : bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
1285 }
1286
1287 static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n,
1288                                                 snapshot_id_list *skip)
1289 {
1290         rcu_read_lock();
1291         while (snapshot_list_has_id(skip, id))
1292                 id = __bch2_snapshot_parent(c, id);
1293
1294         while (n--) {
1295                 do {
1296                         id = __bch2_snapshot_parent(c, id);
1297                 } while (snapshot_list_has_id(skip, id));
1298         }
1299         rcu_read_unlock();
1300
1301         return id;
1302 }
1303
1304 static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans,
1305                                               struct btree_iter *iter, struct bkey_s_c k,
1306                                               snapshot_id_list *deleted)
1307 {
1308         struct bch_fs *c = trans->c;
1309         u32 nr_deleted_ancestors = 0;
1310         struct bkey_i_snapshot *s;
1311         int ret;
1312
1313         if (k.k->type != KEY_TYPE_snapshot)
1314                 return 0;
1315
1316         if (snapshot_list_has_id(deleted, k.k->p.offset))
1317                 return 0;
1318
1319         s = bch2_bkey_make_mut_noupdate_typed(trans, k, snapshot);
1320         ret = PTR_ERR_OR_ZERO(s);
1321         if (ret)
1322                 return ret;
1323
1324         darray_for_each(*deleted, i)
1325                 nr_deleted_ancestors += bch2_snapshot_is_ancestor(c, s->k.p.offset, *i);
1326
1327         if (!nr_deleted_ancestors)
1328                 return 0;
1329
1330         le32_add_cpu(&s->v.depth, -nr_deleted_ancestors);
1331
1332         if (!s->v.depth) {
1333                 s->v.skip[0] = 0;
1334                 s->v.skip[1] = 0;
1335                 s->v.skip[2] = 0;
1336         } else {
1337                 u32 depth = le32_to_cpu(s->v.depth);
1338                 u32 parent = bch2_snapshot_parent(c, s->k.p.offset);
1339
1340                 for (unsigned j = 0; j < ARRAY_SIZE(s->v.skip); j++) {
1341                         u32 id = le32_to_cpu(s->v.skip[j]);
1342
1343                         if (snapshot_list_has_id(deleted, id)) {
1344                                 id = bch2_snapshot_nth_parent_skip(c,
1345                                                         parent,
1346                                                         depth > 1
1347                                                         ? get_random_u32_below(depth - 1)
1348                                                         : 0,
1349                                                         deleted);
1350                                 s->v.skip[j] = cpu_to_le32(id);
1351                         }
1352                 }
1353
1354                 bubble_sort(s->v.skip, ARRAY_SIZE(s->v.skip), cmp_le32);
1355         }
1356
1357         return bch2_trans_update(trans, iter, &s->k_i, 0);
1358 }
1359
1360 int bch2_delete_dead_snapshots(struct bch_fs *c)
1361 {
1362         struct btree_trans *trans;
1363         snapshot_id_list deleted = { 0 };
1364         snapshot_id_list deleted_interior = { 0 };
1365         u32 id;
1366         int ret = 0;
1367
1368         if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags))
1369                 return 0;
1370
1371         if (!test_bit(BCH_FS_started, &c->flags)) {
1372                 ret = bch2_fs_read_write_early(c);
1373                 bch_err_msg(c, ret, "deleting dead snapshots: error going rw");
1374                 if (ret)
1375                         return ret;
1376         }
1377
1378         trans = bch2_trans_get(c);
1379
1380         /*
1381          * For every snapshot node: If we have no live children and it's not
1382          * pointed to by a subvolume, delete it:
1383          */
1384         ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots,
1385                         POS_MIN, 0, k,
1386                         NULL, NULL, 0,
1387                 bch2_delete_redundant_snapshot(trans, k));
1388         bch_err_msg(c, ret, "deleting redundant snapshots");
1389         if (ret)
1390                 goto err;
1391
1392         ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1393                                  POS_MIN, 0, k,
1394                 bch2_snapshot_set_equiv(trans, k));
1395         bch_err_msg(c, ret, "in bch2_snapshots_set_equiv");
1396         if (ret)
1397                 goto err;
1398
1399         ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1400                                  POS_MIN, 0, k, ({
1401                 if (k.k->type != KEY_TYPE_snapshot)
1402                         continue;
1403
1404                 BCH_SNAPSHOT_DELETED(bkey_s_c_to_snapshot(k).v)
1405                         ? snapshot_list_add(c, &deleted, k.k->p.offset)
1406                         : 0;
1407         }));
1408         bch_err_msg(c, ret, "walking snapshots");
1409         if (ret)
1410                 goto err;
1411
1412         for (id = 0; id < BTREE_ID_NR; id++) {
1413                 struct bpos last_pos = POS_MIN;
1414                 snapshot_id_list equiv_seen = { 0 };
1415                 struct disk_reservation res = { 0 };
1416
1417                 if (!btree_type_has_snapshots(id))
1418                         continue;
1419
1420                 /*
1421                  * deleted inodes btree is maintained by a trigger on the inodes
1422                  * btree - no work for us to do here, and it's not safe to scan
1423                  * it because we'll see out of date keys due to the btree write
1424                  * buffer:
1425                  */
1426                 if (id == BTREE_ID_deleted_inodes)
1427                         continue;
1428
1429                 ret = for_each_btree_key_commit(trans, iter,
1430                                 id, POS_MIN,
1431                                 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1432                                 &res, NULL, BCH_TRANS_COMMIT_no_enospc,
1433                         snapshot_delete_key(trans, &iter, k, &deleted, &equiv_seen, &last_pos)) ?:
1434                       for_each_btree_key_commit(trans, iter,
1435                                 id, POS_MIN,
1436                                 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1437                                 &res, NULL, BCH_TRANS_COMMIT_no_enospc,
1438                         move_key_to_correct_snapshot(trans, &iter, k));
1439
1440                 bch2_disk_reservation_put(c, &res);
1441                 darray_exit(&equiv_seen);
1442
1443                 bch_err_msg(c, ret, "deleting keys from dying snapshots");
1444                 if (ret)
1445                         goto err;
1446         }
1447
1448         bch2_trans_unlock(trans);
1449         down_write(&c->snapshot_create_lock);
1450
1451         ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1452                                  POS_MIN, 0, k, ({
1453                 u32 snapshot = k.k->p.offset;
1454                 u32 equiv = bch2_snapshot_equiv(c, snapshot);
1455
1456                 equiv != snapshot
1457                         ? snapshot_list_add(c, &deleted_interior, snapshot)
1458                         : 0;
1459         }));
1460
1461         bch_err_msg(c, ret, "walking snapshots");
1462         if (ret)
1463                 goto err_create_lock;
1464
1465         /*
1466          * Fixing children of deleted snapshots can't be done completely
1467          * atomically, if we crash between here and when we delete the interior
1468          * nodes some depth fields will be off:
1469          */
1470         ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, POS_MIN,
1471                                   BTREE_ITER_INTENT, k,
1472                                   NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1473                 bch2_fix_child_of_deleted_snapshot(trans, &iter, k, &deleted_interior));
1474         if (ret)
1475                 goto err_create_lock;
1476
1477         darray_for_each(deleted, i) {
1478                 ret = commit_do(trans, NULL, NULL, 0,
1479                         bch2_snapshot_node_delete(trans, *i));
1480                 bch_err_msg(c, ret, "deleting snapshot %u", *i);
1481                 if (ret)
1482                         goto err_create_lock;
1483         }
1484
1485         darray_for_each(deleted_interior, i) {
1486                 ret = commit_do(trans, NULL, NULL, 0,
1487                         bch2_snapshot_node_delete(trans, *i));
1488                 bch_err_msg(c, ret, "deleting snapshot %u", *i);
1489                 if (ret)
1490                         goto err_create_lock;
1491         }
1492 err_create_lock:
1493         up_write(&c->snapshot_create_lock);
1494 err:
1495         darray_exit(&deleted_interior);
1496         darray_exit(&deleted);
1497         bch2_trans_put(trans);
1498         bch_err_fn(c, ret);
1499         return ret;
1500 }
1501
1502 void bch2_delete_dead_snapshots_work(struct work_struct *work)
1503 {
1504         struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
1505
1506         bch2_delete_dead_snapshots(c);
1507         bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1508 }
1509
1510 void bch2_delete_dead_snapshots_async(struct bch_fs *c)
1511 {
1512         if (bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots) &&
1513             !queue_work(c->write_ref_wq, &c->snapshot_delete_work))
1514                 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1515 }
1516
1517 int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
1518                                        enum btree_id id,
1519                                        struct bpos pos)
1520 {
1521         struct bch_fs *c = trans->c;
1522         struct btree_iter iter;
1523         struct bkey_s_c k;
1524         int ret;
1525
1526         bch2_trans_iter_init(trans, &iter, id, pos,
1527                              BTREE_ITER_NOT_EXTENTS|
1528                              BTREE_ITER_ALL_SNAPSHOTS);
1529         while (1) {
1530                 k = bch2_btree_iter_prev(&iter);
1531                 ret = bkey_err(k);
1532                 if (ret)
1533                         break;
1534
1535                 if (!k.k)
1536                         break;
1537
1538                 if (!bkey_eq(pos, k.k->p))
1539                         break;
1540
1541                 if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) {
1542                         ret = 1;
1543                         break;
1544                 }
1545         }
1546         bch2_trans_iter_exit(trans, &iter);
1547
1548         return ret;
1549 }
1550
1551 static u32 bch2_snapshot_smallest_child(struct bch_fs *c, u32 id)
1552 {
1553         const struct snapshot_t *s = snapshot_t(c, id);
1554
1555         return s->children[1] ?: s->children[0];
1556 }
1557
1558 static u32 bch2_snapshot_smallest_descendent(struct bch_fs *c, u32 id)
1559 {
1560         u32 child;
1561
1562         while ((child = bch2_snapshot_smallest_child(c, id)))
1563                 id = child;
1564         return id;
1565 }
1566
1567 static int bch2_propagate_key_to_snapshot_leaf(struct btree_trans *trans,
1568                                                enum btree_id btree,
1569                                                struct bkey_s_c interior_k,
1570                                                u32 leaf_id, struct bpos *new_min_pos)
1571 {
1572         struct btree_iter iter;
1573         struct bpos pos = interior_k.k->p;
1574         struct bkey_s_c k;
1575         struct bkey_i *new;
1576         int ret;
1577
1578         pos.snapshot = leaf_id;
1579
1580         bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_INTENT);
1581         k = bch2_btree_iter_peek_slot(&iter);
1582         ret = bkey_err(k);
1583         if (ret)
1584                 goto out;
1585
1586         /* key already overwritten in this snapshot? */
1587         if (k.k->p.snapshot != interior_k.k->p.snapshot)
1588                 goto out;
1589
1590         if (bpos_eq(*new_min_pos, POS_MIN)) {
1591                 *new_min_pos = k.k->p;
1592                 new_min_pos->snapshot = leaf_id;
1593         }
1594
1595         new = bch2_bkey_make_mut_noupdate(trans, interior_k);
1596         ret = PTR_ERR_OR_ZERO(new);
1597         if (ret)
1598                 goto out;
1599
1600         new->k.p.snapshot = leaf_id;
1601         ret = bch2_trans_update(trans, &iter, new, 0);
1602 out:
1603         bch2_trans_iter_exit(trans, &iter);
1604         return ret;
1605 }
1606
1607 int bch2_propagate_key_to_snapshot_leaves(struct btree_trans *trans,
1608                                           enum btree_id btree,
1609                                           struct bkey_s_c k,
1610                                           struct bpos *new_min_pos)
1611 {
1612         struct bch_fs *c = trans->c;
1613         struct bkey_buf sk;
1614         u32 restart_count = trans->restart_count;
1615         int ret = 0;
1616
1617         bch2_bkey_buf_init(&sk);
1618         bch2_bkey_buf_reassemble(&sk, c, k);
1619         k = bkey_i_to_s_c(sk.k);
1620
1621         *new_min_pos = POS_MIN;
1622
1623         for (u32 id = bch2_snapshot_smallest_descendent(c, k.k->p.snapshot);
1624              id < k.k->p.snapshot;
1625              id++) {
1626                 if (!bch2_snapshot_is_ancestor(c, id, k.k->p.snapshot) ||
1627                     !bch2_snapshot_is_leaf(c, id))
1628                         continue;
1629 again:
1630                 ret =   btree_trans_too_many_iters(trans) ?:
1631                         bch2_propagate_key_to_snapshot_leaf(trans, btree, k, id, new_min_pos) ?:
1632                         bch2_trans_commit(trans, NULL, NULL, 0);
1633                 if (ret && bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
1634                         bch2_trans_begin(trans);
1635                         goto again;
1636                 }
1637
1638                 if (ret)
1639                         break;
1640         }
1641
1642         bch2_bkey_buf_exit(&sk, c);
1643
1644         return ret ?: trans_was_restarted(trans, restart_count);
1645 }
1646
1647 static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct bkey_s_c k)
1648 {
1649         struct bch_fs *c = trans->c;
1650         struct bkey_s_c_snapshot snap;
1651         int ret = 0;
1652
1653         if (k.k->type != KEY_TYPE_snapshot)
1654                 return 0;
1655
1656         snap = bkey_s_c_to_snapshot(k);
1657         if (BCH_SNAPSHOT_DELETED(snap.v) ||
1658             bch2_snapshot_equiv(c, k.k->p.offset) != k.k->p.offset ||
1659             (ret = bch2_snapshot_needs_delete(trans, k)) > 0) {
1660                 set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags);
1661                 return 0;
1662         }
1663
1664         return ret;
1665 }
1666
1667 int bch2_snapshots_read(struct bch_fs *c)
1668 {
1669         int ret = bch2_trans_run(c,
1670                 for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1671                                    POS_MIN, 0, k,
1672                         __bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
1673                         bch2_snapshot_set_equiv(trans, k) ?:
1674                         bch2_check_snapshot_needs_deletion(trans, k)) ?:
1675                 for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1676                                    POS_MIN, 0, k,
1677                            (set_is_ancestor_bitmap(c, k.k->p.offset), 0)));
1678         bch_err_fn(c, ret);
1679         return ret;
1680 }
1681
1682 void bch2_fs_snapshots_exit(struct bch_fs *c)
1683 {
1684         kfree(rcu_dereference_protected(c->snapshots, true));
1685 }