1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_key_cache.h"
5 #include "btree_update.h"
11 #include <linux/random.h>
13 static int bch2_subvolume_delete(struct btree_trans *, u32);
15 static inline u32 get_ancestor_below(struct bch_fs *c, u32 id, u32 ancestor)
17 struct snapshot_t *s = snapshot_t(c, id);
19 if (s->skip[2] <= ancestor)
21 if (s->skip[1] <= ancestor)
23 if (s->skip[0] <= ancestor)
28 bool bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
30 EBUG_ON(c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_snapshots);
32 while (id && id < ancestor)
33 id = get_ancestor_below(c, id, ancestor);
35 return id == ancestor;
38 static bool bch2_snapshot_is_ancestor_early(struct bch_fs *c, u32 id, u32 ancestor)
40 while (id && id < ancestor)
41 id = snapshot_t(c, id)->parent;
43 return id == ancestor;
48 void bch2_snapshot_tree_to_text(struct printbuf *out, struct bch_fs *c,
51 struct bkey_s_c_snapshot_tree t = bkey_s_c_to_snapshot_tree(k);
53 prt_printf(out, "subvol %u root snapshot %u",
54 le32_to_cpu(t.v->master_subvol),
55 le32_to_cpu(t.v->root_snapshot));
58 int bch2_snapshot_tree_invalid(const struct bch_fs *c, struct bkey_s_c k,
59 enum bkey_invalid_flags flags,
62 if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
63 bkey_lt(k.k->p, POS(0, 1))) {
64 prt_printf(err, "bad pos");
65 return -BCH_ERR_invalid_bkey;
71 int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
72 struct bch_snapshot_tree *s)
74 int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
75 BTREE_ITER_WITH_UPDATES, snapshot_tree, s);
77 if (bch2_err_matches(ret, ENOENT))
78 ret = -BCH_ERR_ENOENT_snapshot_tree;
82 static struct bkey_i_snapshot_tree *
83 __snapshot_tree_create(struct btree_trans *trans)
85 struct btree_iter iter;
86 int ret = bch2_bkey_get_empty_slot(trans, &iter,
87 BTREE_ID_snapshot_trees, POS(0, U32_MAX));
88 struct bkey_i_snapshot_tree *s_t;
90 if (ret == -BCH_ERR_ENOSPC_btree_slot)
91 ret = -BCH_ERR_ENOSPC_snapshot_tree;
95 s_t = bch2_bkey_alloc(trans, &iter, 0, snapshot_tree);
96 ret = PTR_ERR_OR_ZERO(s_t);
97 bch2_trans_iter_exit(trans, &iter);
98 return ret ? ERR_PTR(ret) : s_t;
101 static int snapshot_tree_create(struct btree_trans *trans,
102 u32 root_id, u32 subvol_id, u32 *tree_id)
104 struct bkey_i_snapshot_tree *n_tree =
105 __snapshot_tree_create(trans);
108 return PTR_ERR(n_tree);
110 n_tree->v.master_subvol = cpu_to_le32(subvol_id);
111 n_tree->v.root_snapshot = cpu_to_le32(root_id);
112 *tree_id = n_tree->k.p.offset;
116 /* Snapshot nodes: */
118 void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
121 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
123 prt_printf(out, "is_subvol %llu deleted %llu parent %10u children %10u %10u subvol %u tree %u",
124 BCH_SNAPSHOT_SUBVOL(s.v),
125 BCH_SNAPSHOT_DELETED(s.v),
126 le32_to_cpu(s.v->parent),
127 le32_to_cpu(s.v->children[0]),
128 le32_to_cpu(s.v->children[1]),
129 le32_to_cpu(s.v->subvol),
130 le32_to_cpu(s.v->tree));
133 int bch2_snapshot_invalid(const struct bch_fs *c, struct bkey_s_c k,
134 enum bkey_invalid_flags flags,
135 struct printbuf *err)
137 struct bkey_s_c_snapshot s;
140 if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
141 bkey_lt(k.k->p, POS(0, 1))) {
142 prt_printf(err, "bad pos");
143 return -BCH_ERR_invalid_bkey;
146 s = bkey_s_c_to_snapshot(k);
148 id = le32_to_cpu(s.v->parent);
149 if (id && id <= k.k->p.offset) {
150 prt_printf(err, "bad parent node (%u <= %llu)",
152 return -BCH_ERR_invalid_bkey;
155 if (le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1])) {
156 prt_printf(err, "children not normalized");
157 return -BCH_ERR_invalid_bkey;
160 if (s.v->children[0] &&
161 s.v->children[0] == s.v->children[1]) {
162 prt_printf(err, "duplicate child nodes");
163 return -BCH_ERR_invalid_bkey;
166 for (i = 0; i < 2; i++) {
167 id = le32_to_cpu(s.v->children[i]);
169 if (id >= k.k->p.offset) {
170 prt_printf(err, "bad child node (%u >= %llu)",
172 return -BCH_ERR_invalid_bkey;
176 if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, skip)) {
177 if (le32_to_cpu(s.v->skip[0]) > le32_to_cpu(s.v->skip[1]) ||
178 le32_to_cpu(s.v->skip[1]) > le32_to_cpu(s.v->skip[2])) {
179 prt_printf(err, "skiplist not normalized");
180 return -BCH_ERR_invalid_bkey;
183 for (i = 0; i < ARRAY_SIZE(s.v->skip); i++) {
184 id = le32_to_cpu(s.v->skip[i]);
186 if (!id != !s.v->parent ||
188 id <= k.k->p.offset)) {
189 prt_printf(err, "bad skiplist node %u)", id);
190 return -BCH_ERR_invalid_bkey;
198 int bch2_mark_snapshot(struct btree_trans *trans,
199 enum btree_id btree, unsigned level,
200 struct bkey_s_c old, struct bkey_s_c new,
203 struct bch_fs *c = trans->c;
204 struct snapshot_t *t;
206 t = genradix_ptr_alloc(&c->snapshots,
207 U32_MAX - new.k->p.offset,
210 return -BCH_ERR_ENOMEM_mark_snapshot;
212 if (new.k->type == KEY_TYPE_snapshot) {
213 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);
215 t->parent = le32_to_cpu(s.v->parent);
216 t->skip[0] = le32_to_cpu(s.v->skip[0]);
217 t->skip[1] = le32_to_cpu(s.v->skip[1]);
218 t->skip[2] = le32_to_cpu(s.v->skip[2]);
219 t->depth = le32_to_cpu(s.v->depth);
220 t->children[0] = le32_to_cpu(s.v->children[0]);
221 t->children[1] = le32_to_cpu(s.v->children[1]);
222 t->subvol = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0;
223 t->tree = le32_to_cpu(s.v->tree);
225 if (BCH_SNAPSHOT_DELETED(s.v))
226 set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
238 static int snapshot_lookup(struct btree_trans *trans, u32 id,
239 struct bch_snapshot *s)
241 return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots, POS(0, id),
242 BTREE_ITER_WITH_UPDATES, snapshot, s);
245 static int snapshot_live(struct btree_trans *trans, u32 id)
247 struct bch_snapshot v;
253 ret = snapshot_lookup(trans, id, &v);
254 if (bch2_err_matches(ret, ENOENT))
255 bch_err(trans->c, "snapshot node %u not found", id);
259 return !BCH_SNAPSHOT_DELETED(&v);
262 static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k)
264 struct bch_fs *c = trans->c;
265 unsigned i, nr_live = 0, live_idx = 0;
266 struct bkey_s_c_snapshot snap;
267 u32 id = k.k->p.offset, child[2];
269 if (k.k->type != KEY_TYPE_snapshot)
272 snap = bkey_s_c_to_snapshot(k);
274 child[0] = le32_to_cpu(snap.v->children[0]);
275 child[1] = le32_to_cpu(snap.v->children[1]);
277 for (i = 0; i < 2; i++) {
278 int ret = snapshot_live(trans, child[i]);
288 snapshot_t(c, id)->equiv = nr_live == 1
289 ? snapshot_t(c, child[live_idx])->equiv
296 static u32 bch2_snapshot_child(struct bch_fs *c, u32 id, unsigned child)
298 return snapshot_t(c, id)->children[child];
301 static u32 bch2_snapshot_left_child(struct bch_fs *c, u32 id)
303 return bch2_snapshot_child(c, id, 0);
306 static u32 bch2_snapshot_right_child(struct bch_fs *c, u32 id)
308 return bch2_snapshot_child(c, id, 1);
311 static u32 bch2_snapshot_tree_next(struct bch_fs *c, u32 id)
315 n = bch2_snapshot_left_child(c, id);
319 while ((parent = bch2_snapshot_parent(c, id))) {
320 n = bch2_snapshot_right_child(c, parent);
329 static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
331 u32 id = snapshot_root;
335 s = snapshot_t(c, id)->subvol;
337 if (s && (!subvol || s < subvol))
340 id = bch2_snapshot_tree_next(c, id);
346 static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans,
347 u32 snapshot_root, u32 *subvol_id)
349 struct bch_fs *c = trans->c;
350 struct btree_iter iter;
352 struct bkey_s_c_subvolume s;
356 for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN,
358 if (k.k->type != KEY_TYPE_subvolume)
361 s = bkey_s_c_to_subvolume(k);
362 if (!bch2_snapshot_is_ancestor(c, le32_to_cpu(s.v->snapshot), snapshot_root))
364 if (!BCH_SUBVOLUME_SNAP(s.v)) {
365 *subvol_id = s.k->p.offset;
371 bch2_trans_iter_exit(trans, &iter);
373 if (!ret && !found) {
374 struct bkey_i_subvolume *s;
376 *subvol_id = bch2_snapshot_tree_oldest_subvol(c, snapshot_root);
378 s = bch2_bkey_get_mut_typed(trans, &iter,
379 BTREE_ID_subvolumes, POS(0, *subvol_id),
381 ret = PTR_ERR_OR_ZERO(s);
385 SET_BCH_SUBVOLUME_SNAP(&s->v, false);
391 static int check_snapshot_tree(struct btree_trans *trans,
392 struct btree_iter *iter,
395 struct bch_fs *c = trans->c;
396 struct bkey_s_c_snapshot_tree st;
397 struct bch_snapshot s;
398 struct bch_subvolume subvol;
399 struct printbuf buf = PRINTBUF;
403 if (k.k->type != KEY_TYPE_snapshot_tree)
406 st = bkey_s_c_to_snapshot_tree(k);
407 root_id = le32_to_cpu(st.v->root_snapshot);
409 ret = snapshot_lookup(trans, root_id, &s);
410 if (ret && !bch2_err_matches(ret, ENOENT))
413 if (fsck_err_on(ret ||
414 root_id != bch2_snapshot_root(c, root_id) ||
415 st.k->p.offset != le32_to_cpu(s.tree),
417 "snapshot tree points to missing/incorrect snapshot:\n %s",
418 (bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
419 ret = bch2_btree_delete_at(trans, iter, 0);
423 ret = bch2_subvolume_get(trans, le32_to_cpu(st.v->master_subvol),
425 if (ret && !bch2_err_matches(ret, ENOENT))
428 if (fsck_err_on(ret, c,
429 "snapshot tree points to missing subvolume:\n %s",
430 (printbuf_reset(&buf),
431 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
432 fsck_err_on(!bch2_snapshot_is_ancestor_early(c,
433 le32_to_cpu(subvol.snapshot),
435 "snapshot tree points to subvolume that does not point to snapshot in this tree:\n %s",
436 (printbuf_reset(&buf),
437 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
438 fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol), c,
439 "snapshot tree points to snapshot subvolume:\n %s",
440 (printbuf_reset(&buf),
441 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
442 struct bkey_i_snapshot_tree *u;
445 ret = bch2_snapshot_tree_master_subvol(trans, root_id, &subvol_id);
449 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot_tree);
450 ret = PTR_ERR_OR_ZERO(u);
454 u->v.master_subvol = cpu_to_le32(subvol_id);
455 st = snapshot_tree_i_to_s_c(u);
464 * For each snapshot_tree, make sure it points to the root of a snapshot tree
465 * and that snapshot entry points back to it, or delete it.
467 * And, make sure it points to a subvolume within that snapshot tree, or correct
468 * it to point to the oldest subvolume within that snapshot tree.
470 int bch2_check_snapshot_trees(struct bch_fs *c)
472 struct btree_iter iter;
476 ret = bch2_trans_run(c,
477 for_each_btree_key_commit(&trans, iter,
478 BTREE_ID_snapshot_trees, POS_MIN,
479 BTREE_ITER_PREFETCH, k,
480 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
481 check_snapshot_tree(&trans, &iter, k)));
484 bch_err(c, "error %i checking snapshot trees", ret);
489 * Look up snapshot tree for @tree_id and find root,
490 * make sure @snap_id is a descendent:
492 static int snapshot_tree_ptr_good(struct btree_trans *trans,
493 u32 snap_id, u32 tree_id)
495 struct bch_snapshot_tree s_t;
496 int ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
498 if (bch2_err_matches(ret, ENOENT))
503 return bch2_snapshot_is_ancestor_early(trans->c, snap_id, le32_to_cpu(s_t.root_snapshot));
506 static u32 snapshot_rand_ancestor_get(struct bch_fs *c, u32 id)
508 struct snapshot_t *s;
513 s = snapshot_t(c, id);
517 return bch2_snapshot_nth_parent(c, id, get_random_u32_below(s->depth));
520 static int snapshot_rand_ancestor_good(struct btree_trans *trans,
521 struct bch_snapshot s)
523 struct bch_snapshot a;
527 for (i = 0; i < 3; i++) {
528 if (!s.parent != !s.skip[i])
534 ret = snapshot_lookup(trans, le32_to_cpu(s.skip[i]), &a);
535 if (bch2_err_matches(ret, ENOENT))
540 if (a.tree != s.tree)
548 * snapshot_tree pointer was incorrect: look up root snapshot node, make sure
549 * its snapshot_tree pointer is correct (allocate new one if necessary), then
550 * update this node's pointer to root node's pointer:
552 static int snapshot_tree_ptr_repair(struct btree_trans *trans,
553 struct btree_iter *iter,
555 struct bch_snapshot *s)
557 struct bch_fs *c = trans->c;
558 struct btree_iter root_iter;
559 struct bch_snapshot_tree s_t;
560 struct bkey_s_c_snapshot root;
561 struct bkey_i_snapshot *u;
562 u32 root_id = bch2_snapshot_root(c, k.k->p.offset), tree_id;
565 root = bch2_bkey_get_iter_typed(trans, &root_iter,
566 BTREE_ID_snapshots, POS(0, root_id),
567 BTREE_ITER_WITH_UPDATES, snapshot);
568 ret = bkey_err(root);
572 tree_id = le32_to_cpu(root.v->tree);
574 ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
575 if (ret && !bch2_err_matches(ret, ENOENT))
578 if (ret || le32_to_cpu(s_t.root_snapshot) != root_id) {
579 u = bch2_bkey_make_mut_typed(trans, &root_iter, &root.s_c, 0, snapshot);
580 ret = PTR_ERR_OR_ZERO(u) ?:
581 snapshot_tree_create(trans, root_id,
582 bch2_snapshot_tree_oldest_subvol(c, root_id),
587 u->v.tree = cpu_to_le32(tree_id);
588 if (k.k->p.offset == root_id)
592 if (k.k->p.offset != root_id) {
593 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
594 ret = PTR_ERR_OR_ZERO(u);
598 u->v.tree = cpu_to_le32(tree_id);
602 bch2_trans_iter_exit(trans, &root_iter);
606 static int check_snapshot(struct btree_trans *trans,
607 struct btree_iter *iter,
610 struct bch_fs *c = trans->c;
611 struct bch_snapshot s;
612 struct bch_subvolume subvol;
613 struct bch_snapshot v;
614 struct bkey_i_snapshot *u;
615 u32 parent_id = bch2_snapshot_parent_early(c, k.k->p.offset);
616 struct snapshot_t *parent = parent_id
617 ? snapshot_t(c, parent_id)
619 struct printbuf buf = PRINTBUF;
620 bool should_have_subvol;
624 if (k.k->type != KEY_TYPE_snapshot)
627 memset(&s, 0, sizeof(s));
628 memcpy(&s, k.v, bkey_val_bytes(k.k));
630 id = le32_to_cpu(s.parent);
632 ret = snapshot_lookup(trans, id, &v);
633 if (bch2_err_matches(ret, ENOENT))
634 bch_err(c, "snapshot with nonexistent parent:\n %s",
635 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
639 if (le32_to_cpu(v.children[0]) != k.k->p.offset &&
640 le32_to_cpu(v.children[1]) != k.k->p.offset) {
641 bch_err(c, "snapshot parent %u missing pointer to child %llu",
648 for (i = 0; i < 2 && s.children[i]; i++) {
649 id = le32_to_cpu(s.children[i]);
651 ret = snapshot_lookup(trans, id, &v);
652 if (bch2_err_matches(ret, ENOENT))
653 bch_err(c, "snapshot node %llu has nonexistent child %u",
658 if (le32_to_cpu(v.parent) != k.k->p.offset) {
659 bch_err(c, "snapshot child %u has wrong parent (got %u should be %llu)",
660 id, le32_to_cpu(v.parent), k.k->p.offset);
666 should_have_subvol = BCH_SNAPSHOT_SUBVOL(&s) &&
667 !BCH_SNAPSHOT_DELETED(&s);
669 if (should_have_subvol) {
670 id = le32_to_cpu(s.subvol);
671 ret = bch2_subvolume_get(trans, id, 0, false, &subvol);
672 if (bch2_err_matches(ret, ENOENT))
673 bch_err(c, "snapshot points to nonexistent subvolume:\n %s",
674 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
678 if (BCH_SNAPSHOT_SUBVOL(&s) != (le32_to_cpu(subvol.snapshot) == k.k->p.offset)) {
679 bch_err(c, "snapshot node %llu has wrong BCH_SNAPSHOT_SUBVOL",
685 if (fsck_err_on(s.subvol, c, "snapshot should not point to subvol:\n %s",
686 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
687 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
688 ret = PTR_ERR_OR_ZERO(u);
697 ret = snapshot_tree_ptr_good(trans, k.k->p.offset, le32_to_cpu(s.tree));
701 if (fsck_err_on(!ret, c, "snapshot points to missing/incorrect tree:\n %s",
702 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
703 ret = snapshot_tree_ptr_repair(trans, iter, k, &s);
709 if (fsck_err_on(le32_to_cpu(s.depth) != (parent ? parent->depth + 1 : 0), c,
710 "snapshot with incorrect depth fields, should be %u:\n %s",
712 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
713 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
714 ret = PTR_ERR_OR_ZERO(u);
718 u->v.depth = cpu_to_le32(parent ? parent->depth + 1 : 0);
722 ret = snapshot_rand_ancestor_good(trans, s);
726 if (fsck_err_on(!ret, c, "snapshot with bad rand_ancestor field:\n %s",
727 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
728 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
729 ret = PTR_ERR_OR_ZERO(u);
733 for (i = 0; i < ARRAY_SIZE(u->v.skip); i++)
734 u->v.skip[i] = cpu_to_le32(snapshot_rand_ancestor_get(c, parent_id));
736 bubble_sort(u->v.skip, ARRAY_SIZE(u->v.skip), cmp_int);
746 int bch2_check_snapshots(struct bch_fs *c)
748 struct btree_iter iter;
753 * We iterate backwards as checking/fixing the depth field requires that
754 * the parent's depth already be correct:
756 ret = bch2_trans_run(c,
757 for_each_btree_key_reverse_commit(&trans, iter,
758 BTREE_ID_snapshots, POS_MAX,
759 BTREE_ITER_PREFETCH, k,
760 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
761 check_snapshot(&trans, &iter, k)));
767 static int check_subvol(struct btree_trans *trans,
768 struct btree_iter *iter,
771 struct bch_fs *c = trans->c;
772 struct bkey_s_c_subvolume subvol;
773 struct bch_snapshot snapshot;
777 if (k.k->type != KEY_TYPE_subvolume)
780 subvol = bkey_s_c_to_subvolume(k);
781 snapid = le32_to_cpu(subvol.v->snapshot);
782 ret = snapshot_lookup(trans, snapid, &snapshot);
784 if (bch2_err_matches(ret, ENOENT))
785 bch_err(c, "subvolume %llu points to nonexistent snapshot %u",
786 k.k->p.offset, snapid);
790 if (BCH_SUBVOLUME_UNLINKED(subvol.v)) {
793 ret = bch2_subvolume_delete(trans, iter->pos.offset);
795 bch_err(c, "error deleting subvolume %llu: %s",
796 iter->pos.offset, bch2_err_str(ret));
797 return ret ?: -BCH_ERR_transaction_restart_nested;
800 if (!BCH_SUBVOLUME_SNAP(subvol.v)) {
801 u32 snapshot_root = bch2_snapshot_root(c, le32_to_cpu(subvol.v->snapshot));
802 u32 snapshot_tree = snapshot_t(c, snapshot_root)->tree;
803 struct bch_snapshot_tree st;
805 ret = bch2_snapshot_tree_lookup(trans, snapshot_tree, &st);
807 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
808 "%s: snapshot tree %u not found", __func__, snapshot_tree);
813 if (fsck_err_on(le32_to_cpu(st.master_subvol) != subvol.k->p.offset, c,
814 "subvolume %llu is not set as snapshot but is not master subvolume",
816 struct bkey_i_subvolume *s =
817 bch2_bkey_make_mut_typed(trans, iter, &subvol.s_c, 0, subvolume);
818 ret = PTR_ERR_OR_ZERO(s);
822 SET_BCH_SUBVOLUME_SNAP(&s->v, true);
830 int bch2_check_subvols(struct bch_fs *c)
832 struct btree_iter iter;
836 ret = bch2_trans_run(c,
837 for_each_btree_key_commit(&trans, iter,
838 BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
839 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
840 check_subvol(&trans, &iter, k)));
846 void bch2_fs_snapshots_exit(struct bch_fs *c)
848 genradix_free(&c->snapshots);
851 int bch2_snapshots_read(struct bch_fs *c)
853 struct btree_iter iter;
857 ret = bch2_trans_run(c,
858 for_each_btree_key2(&trans, iter, BTREE_ID_snapshots,
860 bch2_mark_snapshot(&trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
861 bch2_snapshot_set_equiv(&trans, k)));
868 * Mark a snapshot as deleted, for future cleanup:
870 static int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
872 struct btree_iter iter;
873 struct bkey_i_snapshot *s;
876 s = bch2_bkey_get_mut_typed(trans, &iter,
877 BTREE_ID_snapshots, POS(0, id),
879 ret = PTR_ERR_OR_ZERO(s);
881 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
882 trans->c, "missing snapshot %u", id);
886 /* already deleted? */
887 if (BCH_SNAPSHOT_DELETED(&s->v))
890 SET_BCH_SNAPSHOT_DELETED(&s->v, true);
891 SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
894 bch2_trans_iter_exit(trans, &iter);
898 static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
900 struct bch_fs *c = trans->c;
901 struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
902 struct btree_iter tree_iter = (struct btree_iter) { NULL };
903 struct bkey_s_c_snapshot s;
908 s = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id),
909 BTREE_ITER_INTENT, snapshot);
911 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
912 "missing snapshot %u", id);
917 BUG_ON(!BCH_SNAPSHOT_DELETED(s.v));
918 parent_id = le32_to_cpu(s.v->parent);
921 struct bkey_i_snapshot *parent;
923 parent = bch2_bkey_get_mut_typed(trans, &p_iter,
924 BTREE_ID_snapshots, POS(0, parent_id),
926 ret = PTR_ERR_OR_ZERO(parent);
928 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
929 "missing snapshot %u", parent_id);
933 for (i = 0; i < 2; i++)
934 if (le32_to_cpu(parent->v.children[i]) == id)
938 bch_err(c, "snapshot %u missing child pointer to %u",
941 parent->v.children[i] = 0;
943 if (le32_to_cpu(parent->v.children[0]) <
944 le32_to_cpu(parent->v.children[1]))
945 swap(parent->v.children[0],
946 parent->v.children[1]);
949 * We're deleting the root of a snapshot tree: update the
950 * snapshot_tree entry to point to the new root, or delete it if
951 * this is the last snapshot ID in this tree:
953 struct bkey_i_snapshot_tree *s_t;
955 BUG_ON(s.v->children[1]);
957 s_t = bch2_bkey_get_mut_typed(trans, &tree_iter,
958 BTREE_ID_snapshot_trees, POS(0, le32_to_cpu(s.v->tree)),
960 ret = PTR_ERR_OR_ZERO(s_t);
964 if (s.v->children[0]) {
965 s_t->v.root_snapshot = s.v->children[0];
967 s_t->k.type = KEY_TYPE_deleted;
968 set_bkey_val_u64s(&s_t->k, 0);
972 ret = bch2_btree_delete_at(trans, &iter, 0);
974 bch2_trans_iter_exit(trans, &tree_iter);
975 bch2_trans_iter_exit(trans, &p_iter);
976 bch2_trans_iter_exit(trans, &iter);
980 static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
982 u32 *snapshot_subvols,
985 struct bch_fs *c = trans->c;
986 struct btree_iter iter;
987 struct bkey_i_snapshot *n;
990 u32 depth = parent ? snapshot_t(c, parent)->depth + 1 : 0;
993 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
994 POS_MIN, BTREE_ITER_INTENT);
995 k = bch2_btree_iter_peek(&iter);
1000 for (i = 0; i < nr_snapids; i++) {
1001 k = bch2_btree_iter_prev_slot(&iter);
1006 if (!k.k || !k.k->p.offset) {
1007 ret = -BCH_ERR_ENOSPC_snapshot_create;
1011 n = bch2_bkey_alloc(trans, &iter, 0, snapshot);
1012 ret = PTR_ERR_OR_ZERO(n);
1017 n->v.parent = cpu_to_le32(parent);
1018 n->v.subvol = cpu_to_le32(snapshot_subvols[i]);
1019 n->v.tree = cpu_to_le32(tree);
1020 n->v.depth = cpu_to_le32(depth);
1022 for (j = 0; j < ARRAY_SIZE(n->v.skip); j++)
1023 n->v.skip[j] = cpu_to_le32(snapshot_rand_ancestor_get(c, parent));
1025 bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_int);
1026 SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
1028 ret = bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
1029 bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0);
1033 new_snapids[i] = iter.pos.offset;
1036 bch2_trans_iter_exit(trans, &iter);
1041 * Create new snapshot IDs as children of an existing snapshot ID:
1043 static int bch2_snapshot_node_create_children(struct btree_trans *trans, u32 parent,
1045 u32 *snapshot_subvols,
1046 unsigned nr_snapids)
1048 struct btree_iter iter;
1049 struct bkey_i_snapshot *n_parent;
1052 n_parent = bch2_bkey_get_mut_typed(trans, &iter,
1053 BTREE_ID_snapshots, POS(0, parent),
1055 ret = PTR_ERR_OR_ZERO(n_parent);
1056 if (unlikely(ret)) {
1057 if (bch2_err_matches(ret, ENOENT))
1058 bch_err(trans->c, "snapshot %u not found", parent);
1062 if (n_parent->v.children[0] || n_parent->v.children[1]) {
1063 bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
1068 ret = create_snapids(trans, parent, le32_to_cpu(n_parent->v.tree),
1069 new_snapids, snapshot_subvols, nr_snapids);
1073 n_parent->v.children[0] = cpu_to_le32(new_snapids[0]);
1074 n_parent->v.children[1] = cpu_to_le32(new_snapids[1]);
1075 n_parent->v.subvol = 0;
1076 SET_BCH_SNAPSHOT_SUBVOL(&n_parent->v, false);
1078 bch2_trans_iter_exit(trans, &iter);
1083 * Create a snapshot node that is the root of a new tree:
1085 static int bch2_snapshot_node_create_tree(struct btree_trans *trans,
1087 u32 *snapshot_subvols,
1088 unsigned nr_snapids)
1090 struct bkey_i_snapshot_tree *n_tree;
1093 n_tree = __snapshot_tree_create(trans);
1094 ret = PTR_ERR_OR_ZERO(n_tree) ?:
1095 create_snapids(trans, 0, n_tree->k.p.offset,
1096 new_snapids, snapshot_subvols, nr_snapids);
1100 n_tree->v.master_subvol = cpu_to_le32(snapshot_subvols[0]);
1101 n_tree->v.root_snapshot = cpu_to_le32(new_snapids[0]);
1105 int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
1107 u32 *snapshot_subvols,
1108 unsigned nr_snapids)
1110 BUG_ON((parent == 0) != (nr_snapids == 1));
1111 BUG_ON((parent != 0) != (nr_snapids == 2));
1114 ? bch2_snapshot_node_create_children(trans, parent,
1115 new_snapids, snapshot_subvols, nr_snapids)
1116 : bch2_snapshot_node_create_tree(trans,
1117 new_snapids, snapshot_subvols, nr_snapids);
1121 static int snapshot_delete_key(struct btree_trans *trans,
1122 struct btree_iter *iter,
1124 snapshot_id_list *deleted,
1125 snapshot_id_list *equiv_seen,
1126 struct bpos *last_pos)
1128 struct bch_fs *c = trans->c;
1129 u32 equiv = snapshot_t(c, k.k->p.snapshot)->equiv;
1131 if (!bkey_eq(k.k->p, *last_pos))
1135 if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
1136 snapshot_list_has_id(equiv_seen, equiv)) {
1137 return bch2_btree_delete_at(trans, iter,
1138 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1140 return snapshot_list_add(c, equiv_seen, equiv);
1144 static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct btree_iter *iter,
1147 struct bkey_s_c_snapshot snap;
1151 if (k.k->type != KEY_TYPE_snapshot)
1154 snap = bkey_s_c_to_snapshot(k);
1155 if (BCH_SNAPSHOT_DELETED(snap.v) ||
1156 BCH_SNAPSHOT_SUBVOL(snap.v))
1159 children[0] = le32_to_cpu(snap.v->children[0]);
1160 children[1] = le32_to_cpu(snap.v->children[1]);
1162 ret = snapshot_live(trans, children[0]) ?:
1163 snapshot_live(trans, children[1]);
1168 return bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
1172 int bch2_delete_dead_snapshots(struct bch_fs *c)
1174 struct btree_trans trans;
1175 struct btree_iter iter;
1177 struct bkey_s_c_snapshot snap;
1178 snapshot_id_list deleted = { 0 };
1182 if (!test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags))
1185 if (!test_bit(BCH_FS_STARTED, &c->flags)) {
1186 ret = bch2_fs_read_write_early(c);
1188 bch_err(c, "error deleleting dead snapshots: error going rw: %s", bch2_err_str(ret));
1193 bch2_trans_init(&trans, c, 0, 0);
1196 * For every snapshot node: If we have no live children and it's not
1197 * pointed to by a subvolume, delete it:
1199 ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_snapshots,
1202 bch2_delete_redundant_snapshot(&trans, &iter, k));
1204 bch_err(c, "error deleting redundant snapshots: %s", bch2_err_str(ret));
1208 for_each_btree_key2(&trans, iter, BTREE_ID_snapshots,
1210 bch2_snapshot_set_equiv(&trans, k));
1212 bch_err(c, "error in bch2_snapshots_set_equiv: %s", bch2_err_str(ret));
1216 for_each_btree_key(&trans, iter, BTREE_ID_snapshots,
1217 POS_MIN, 0, k, ret) {
1218 if (k.k->type != KEY_TYPE_snapshot)
1221 snap = bkey_s_c_to_snapshot(k);
1222 if (BCH_SNAPSHOT_DELETED(snap.v)) {
1223 ret = snapshot_list_add(c, &deleted, k.k->p.offset);
1228 bch2_trans_iter_exit(&trans, &iter);
1231 bch_err(c, "error walking snapshots: %s", bch2_err_str(ret));
1235 for (id = 0; id < BTREE_ID_NR; id++) {
1236 struct bpos last_pos = POS_MIN;
1237 snapshot_id_list equiv_seen = { 0 };
1239 if (!btree_type_has_snapshots(id))
1242 ret = for_each_btree_key_commit(&trans, iter,
1244 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1245 NULL, NULL, BTREE_INSERT_NOFAIL,
1246 snapshot_delete_key(&trans, &iter, k, &deleted, &equiv_seen, &last_pos));
1248 darray_exit(&equiv_seen);
1251 bch_err(c, "error deleting snapshot keys: %s", bch2_err_str(ret));
1256 for (i = 0; i < deleted.nr; i++) {
1257 ret = commit_do(&trans, NULL, NULL, 0,
1258 bch2_snapshot_node_delete(&trans, deleted.data[i]));
1260 bch_err(c, "error deleting snapshot %u: %s",
1261 deleted.data[i], bch2_err_str(ret));
1266 clear_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
1268 darray_exit(&deleted);
1269 bch2_trans_exit(&trans);
1275 static void bch2_delete_dead_snapshots_work(struct work_struct *work)
1277 struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
1279 bch2_delete_dead_snapshots(c);
1280 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1283 void bch2_delete_dead_snapshots_async(struct bch_fs *c)
1285 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots) &&
1286 !queue_work(c->write_ref_wq, &c->snapshot_delete_work))
1287 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1290 static int bch2_delete_dead_snapshots_hook(struct btree_trans *trans,
1291 struct btree_trans_commit_hook *h)
1293 struct bch_fs *c = trans->c;
1295 set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
1297 if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_delete_dead_snapshots)
1300 bch2_delete_dead_snapshots_async(c);
1306 int bch2_subvolume_invalid(const struct bch_fs *c, struct bkey_s_c k,
1307 unsigned flags, struct printbuf *err)
1309 if (bkey_lt(k.k->p, SUBVOL_POS_MIN) ||
1310 bkey_gt(k.k->p, SUBVOL_POS_MAX)) {
1311 prt_printf(err, "invalid pos");
1312 return -BCH_ERR_invalid_bkey;
1318 void bch2_subvolume_to_text(struct printbuf *out, struct bch_fs *c,
1321 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
1323 prt_printf(out, "root %llu snapshot id %u",
1324 le64_to_cpu(s.v->inode),
1325 le32_to_cpu(s.v->snapshot));
1327 if (bkey_val_bytes(s.k) > offsetof(struct bch_subvolume, parent))
1328 prt_printf(out, " parent %u", le32_to_cpu(s.v->parent));
1331 static __always_inline int
1332 bch2_subvolume_get_inlined(struct btree_trans *trans, unsigned subvol,
1333 bool inconsistent_if_not_found,
1335 struct bch_subvolume *s)
1337 int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_subvolumes, POS(0, subvol),
1338 iter_flags, subvolume, s);
1339 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT) &&
1340 inconsistent_if_not_found,
1341 trans->c, "missing subvolume %u", subvol);
1345 int bch2_subvolume_get(struct btree_trans *trans, unsigned subvol,
1346 bool inconsistent_if_not_found,
1348 struct bch_subvolume *s)
1350 return bch2_subvolume_get_inlined(trans, subvol, inconsistent_if_not_found, iter_flags, s);
1353 int bch2_snapshot_get_subvol(struct btree_trans *trans, u32 snapshot,
1354 struct bch_subvolume *subvol)
1356 struct bch_snapshot snap;
1358 return snapshot_lookup(trans, snapshot, &snap) ?:
1359 bch2_subvolume_get(trans, le32_to_cpu(snap.subvol), true, 0, subvol);
1362 int bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvol,
1365 struct btree_iter iter;
1369 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_subvolumes, POS(0, subvol),
1371 BTREE_ITER_WITH_UPDATES);
1372 ret = bkey_err(k) ?: k.k->type == KEY_TYPE_subvolume ? 0 : -BCH_ERR_ENOENT_subvolume;
1375 *snapid = le32_to_cpu(bkey_s_c_to_subvolume(k).v->snapshot);
1376 else if (bch2_err_matches(ret, ENOENT))
1377 bch2_fs_inconsistent(trans->c, "missing subvolume %u", subvol);
1378 bch2_trans_iter_exit(trans, &iter);
1382 static int bch2_subvolume_reparent(struct btree_trans *trans,
1383 struct btree_iter *iter,
1385 u32 old_parent, u32 new_parent)
1387 struct bkey_i_subvolume *s;
1390 if (k.k->type != KEY_TYPE_subvolume)
1393 if (bkey_val_bytes(k.k) > offsetof(struct bch_subvolume, parent) &&
1394 le32_to_cpu(bkey_s_c_to_subvolume(k).v->parent) != old_parent)
1397 s = bch2_bkey_make_mut_typed(trans, iter, &k, 0, subvolume);
1398 ret = PTR_ERR_OR_ZERO(s);
1402 s->v.parent = cpu_to_le32(new_parent);
1407 * Scan for subvolumes with parent @subvolid_to_delete, reparent:
1409 static int bch2_subvolumes_reparent(struct btree_trans *trans, u32 subvolid_to_delete)
1411 struct btree_iter iter;
1413 struct bch_subvolume s;
1415 return lockrestart_do(trans,
1416 bch2_subvolume_get(trans, subvolid_to_delete, true,
1417 BTREE_ITER_CACHED, &s)) ?:
1418 for_each_btree_key_commit(trans, iter,
1419 BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
1420 NULL, NULL, BTREE_INSERT_NOFAIL,
1421 bch2_subvolume_reparent(trans, &iter, k,
1422 subvolid_to_delete, le32_to_cpu(s.parent)));
1426 * Delete subvolume, mark snapshot ID as deleted, queue up snapshot
1429 static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
1431 struct btree_iter iter;
1432 struct bkey_s_c_subvolume subvol;
1433 struct btree_trans_commit_hook *h;
1437 subvol = bch2_bkey_get_iter_typed(trans, &iter,
1438 BTREE_ID_subvolumes, POS(0, subvolid),
1439 BTREE_ITER_CACHED|BTREE_ITER_INTENT,
1441 ret = bkey_err(subvol);
1442 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
1443 "missing subvolume %u", subvolid);
1447 snapid = le32_to_cpu(subvol.v->snapshot);
1449 ret = bch2_btree_delete_at(trans, &iter, 0);
1453 ret = bch2_snapshot_node_set_deleted(trans, snapid);
1457 h = bch2_trans_kmalloc(trans, sizeof(*h));
1458 ret = PTR_ERR_OR_ZERO(h);
1462 h->fn = bch2_delete_dead_snapshots_hook;
1463 bch2_trans_commit_hook(trans, h);
1465 bch2_trans_iter_exit(trans, &iter);
1469 static int bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
1471 return bch2_subvolumes_reparent(trans, subvolid) ?:
1472 commit_do(trans, NULL, NULL, BTREE_INSERT_NOFAIL,
1473 __bch2_subvolume_delete(trans, subvolid));
1476 static void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *work)
1478 struct bch_fs *c = container_of(work, struct bch_fs,
1479 snapshot_wait_for_pagecache_and_delete_work);
1485 mutex_lock(&c->snapshots_unlinked_lock);
1486 s = c->snapshots_unlinked;
1487 darray_init(&c->snapshots_unlinked);
1488 mutex_unlock(&c->snapshots_unlinked_lock);
1493 bch2_evict_subvolume_inodes(c, &s);
1495 for (id = s.data; id < s.data + s.nr; id++) {
1496 ret = bch2_trans_run(c, bch2_subvolume_delete(&trans, *id));
1498 bch_err(c, "error deleting subvolume %u: %s", *id, bch2_err_str(ret));
1506 bch2_write_ref_put(c, BCH_WRITE_REF_snapshot_delete_pagecache);
1509 struct subvolume_unlink_hook {
1510 struct btree_trans_commit_hook h;
1514 static int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans *trans,
1515 struct btree_trans_commit_hook *_h)
1517 struct subvolume_unlink_hook *h = container_of(_h, struct subvolume_unlink_hook, h);
1518 struct bch_fs *c = trans->c;
1521 mutex_lock(&c->snapshots_unlinked_lock);
1522 if (!snapshot_list_has_id(&c->snapshots_unlinked, h->subvol))
1523 ret = snapshot_list_add(c, &c->snapshots_unlinked, h->subvol);
1524 mutex_unlock(&c->snapshots_unlinked_lock);
1529 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_snapshot_delete_pagecache))
1532 if (!queue_work(c->write_ref_wq, &c->snapshot_wait_for_pagecache_and_delete_work))
1533 bch2_write_ref_put(c, BCH_WRITE_REF_snapshot_delete_pagecache);
1537 int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
1539 struct btree_iter iter;
1540 struct bkey_i_subvolume *n;
1541 struct subvolume_unlink_hook *h;
1544 h = bch2_trans_kmalloc(trans, sizeof(*h));
1545 ret = PTR_ERR_OR_ZERO(h);
1549 h->h.fn = bch2_subvolume_wait_for_pagecache_and_delete_hook;
1550 h->subvol = subvolid;
1551 bch2_trans_commit_hook(trans, &h->h);
1553 n = bch2_bkey_get_mut_typed(trans, &iter,
1554 BTREE_ID_subvolumes, POS(0, subvolid),
1555 BTREE_ITER_CACHED, subvolume);
1556 ret = PTR_ERR_OR_ZERO(n);
1557 if (unlikely(ret)) {
1558 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
1559 "missing subvolume %u", subvolid);
1563 SET_BCH_SUBVOLUME_UNLINKED(&n->v, true);
1564 bch2_trans_iter_exit(trans, &iter);
1568 int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
1571 u32 *new_snapshotid,
1574 struct bch_fs *c = trans->c;
1575 struct btree_iter dst_iter, src_iter = (struct btree_iter) { NULL };
1576 struct bkey_i_subvolume *new_subvol = NULL;
1577 struct bkey_i_subvolume *src_subvol = NULL;
1578 u32 parent = 0, new_nodes[2], snapshot_subvols[2];
1581 ret = bch2_bkey_get_empty_slot(trans, &dst_iter,
1582 BTREE_ID_subvolumes, POS(0, U32_MAX));
1583 if (ret == -BCH_ERR_ENOSPC_btree_slot)
1584 ret = -BCH_ERR_ENOSPC_subvolume_create;
1588 snapshot_subvols[0] = dst_iter.pos.offset;
1589 snapshot_subvols[1] = src_subvolid;
1592 /* Creating a snapshot: */
1594 src_subvol = bch2_bkey_get_mut_typed(trans, &src_iter,
1595 BTREE_ID_subvolumes, POS(0, src_subvolid),
1596 BTREE_ITER_CACHED, subvolume);
1597 ret = PTR_ERR_OR_ZERO(src_subvol);
1598 if (unlikely(ret)) {
1599 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
1600 "subvolume %u not found", src_subvolid);
1604 parent = le32_to_cpu(src_subvol->v.snapshot);
1607 ret = bch2_snapshot_node_create(trans, parent, new_nodes,
1609 src_subvolid ? 2 : 1);
1614 src_subvol->v.snapshot = cpu_to_le32(new_nodes[1]);
1615 ret = bch2_trans_update(trans, &src_iter, &src_subvol->k_i, 0);
1620 new_subvol = bch2_bkey_alloc(trans, &dst_iter, 0, subvolume);
1621 ret = PTR_ERR_OR_ZERO(new_subvol);
1625 new_subvol->v.flags = 0;
1626 new_subvol->v.snapshot = cpu_to_le32(new_nodes[0]);
1627 new_subvol->v.inode = cpu_to_le64(inode);
1628 new_subvol->v.parent = cpu_to_le32(src_subvolid);
1629 new_subvol->v.otime.lo = cpu_to_le64(bch2_current_time(c));
1630 new_subvol->v.otime.hi = 0;
1632 SET_BCH_SUBVOLUME_RO(&new_subvol->v, ro);
1633 SET_BCH_SUBVOLUME_SNAP(&new_subvol->v, src_subvolid != 0);
1635 *new_subvolid = new_subvol->k.p.offset;
1636 *new_snapshotid = new_nodes[0];
1638 bch2_trans_iter_exit(trans, &src_iter);
1639 bch2_trans_iter_exit(trans, &dst_iter);
1643 int bch2_fs_subvolumes_init(struct bch_fs *c)
1645 INIT_WORK(&c->snapshot_delete_work, bch2_delete_dead_snapshots_work);
1646 INIT_WORK(&c->snapshot_wait_for_pagecache_and_delete_work,
1647 bch2_subvolume_wait_for_pagecache_and_delete);
1648 mutex_init(&c->snapshots_unlinked_lock);