1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_key_cache.h"
5 #include "btree_update.h"
11 static void bch2_delete_dead_snapshots_work(struct work_struct *);
12 static void bch2_delete_dead_snapshots(struct bch_fs *);
14 void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
17 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
19 pr_buf(out, "is_subvol %llu deleted %llu parent %u children %u %u subvol %u",
20 BCH_SNAPSHOT_SUBVOL(s.v),
21 BCH_SNAPSHOT_DELETED(s.v),
22 le32_to_cpu(s.v->parent),
23 le32_to_cpu(s.v->children[0]),
24 le32_to_cpu(s.v->children[1]),
25 le32_to_cpu(s.v->subvol));
28 const char *bch2_snapshot_invalid(const struct bch_fs *c, struct bkey_s_c k)
30 struct bkey_s_c_snapshot s;
33 if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0 ||
34 bkey_cmp(k.k->p, POS(0, 1)) < 0)
37 if (bkey_val_bytes(k.k) != sizeof(struct bch_snapshot))
38 return "bad val size";
40 s = bkey_s_c_to_snapshot(k);
42 id = le32_to_cpu(s.v->parent);
43 if (id && id <= k.k->p.offset)
44 return "bad parent node";
46 if (le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1]))
47 return "children not normalized";
49 if (s.v->children[0] &&
50 s.v->children[0] == s.v->children[1])
51 return "duplicate child nodes";
53 for (i = 0; i < 2; i++) {
54 id = le32_to_cpu(s.v->children[i]);
56 if (id >= k.k->p.offset)
57 return "bad child node";
63 int bch2_mark_snapshot(struct bch_fs *c,
64 struct bkey_s_c old, struct bkey_s_c new,
65 u64 journal_seq, unsigned flags)
69 t = genradix_ptr_alloc(&c->snapshots,
70 U32_MAX - new.k->p.offset,
75 if (new.k->type == KEY_TYPE_snapshot) {
76 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);
78 t->parent = le32_to_cpu(s.v->parent);
79 t->children[0] = le32_to_cpu(s.v->children[0]);
80 t->children[1] = le32_to_cpu(s.v->children[1]);
81 t->subvol = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0;
92 static int snapshot_lookup(struct btree_trans *trans, u32 id,
93 struct bch_snapshot *s)
95 struct btree_iter iter;
99 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id),
100 BTREE_ITER_WITH_UPDATES);
101 k = bch2_btree_iter_peek_slot(&iter);
102 ret = bkey_err(k) ?: k.k->type == KEY_TYPE_snapshot ? 0 : -ENOENT;
105 *s = *bkey_s_c_to_snapshot(k).v;
107 bch2_trans_iter_exit(trans, &iter);
111 static int snapshot_live(struct btree_trans *trans, u32 id)
113 struct bch_snapshot v;
119 ret = lockrestart_do(trans, snapshot_lookup(trans, id, &v));
121 bch_err(trans->c, "snapshot node %u not found", id);
125 return !BCH_SNAPSHOT_DELETED(&v);
128 static int bch2_snapshots_set_equiv(struct btree_trans *trans)
130 struct bch_fs *c = trans->c;
131 struct btree_iter iter;
133 struct bkey_s_c_snapshot snap;
137 for_each_btree_key(trans, iter, BTREE_ID_snapshots,
138 POS_MIN, 0, k, ret) {
139 u32 id = k.k->p.offset, child[2];
140 unsigned nr_live = 0, live_idx;
142 if (k.k->type != KEY_TYPE_snapshot)
145 snap = bkey_s_c_to_snapshot(k);
146 child[0] = le32_to_cpu(snap.v->children[0]);
147 child[1] = le32_to_cpu(snap.v->children[1]);
149 for (i = 0; i < 2; i++) {
150 ret = snapshot_live(trans, child[i]);
159 snapshot_t(c, id)->equiv = nr_live == 1
160 ? snapshot_t(c, child[live_idx])->equiv
163 bch2_trans_iter_exit(trans, &iter);
166 bch_err(c, "error walking snapshots: %i", ret);
172 static int bch2_snapshot_check(struct btree_trans *trans,
173 struct bkey_s_c_snapshot s)
175 struct bch_subvolume subvol;
176 struct bch_snapshot v;
180 id = le32_to_cpu(s.v->subvol);
181 ret = lockrestart_do(trans, bch2_subvolume_get(trans, id, 0, false, &subvol));
183 bch_err(trans->c, "snapshot node %llu has nonexistent subvolume %u",
188 if (BCH_SNAPSHOT_SUBVOL(s.v) != (le32_to_cpu(subvol.snapshot) == s.k->p.offset)) {
189 bch_err(trans->c, "snapshot node %llu has wrong BCH_SNAPSHOT_SUBVOL",
194 id = le32_to_cpu(s.v->parent);
196 ret = lockrestart_do(trans, snapshot_lookup(trans, id, &v));
198 bch_err(trans->c, "snapshot node %llu has nonexistent parent %u",
203 if (le32_to_cpu(v.children[0]) != s.k->p.offset &&
204 le32_to_cpu(v.children[1]) != s.k->p.offset) {
205 bch_err(trans->c, "snapshot parent %u missing pointer to child %llu",
211 for (i = 0; i < 2 && s.v->children[i]; i++) {
212 id = le32_to_cpu(s.v->children[i]);
214 ret = lockrestart_do(trans, snapshot_lookup(trans, id, &v));
216 bch_err(trans->c, "snapshot node %llu has nonexistent child %u",
221 if (le32_to_cpu(v.parent) != s.k->p.offset) {
222 bch_err(trans->c, "snapshot child %u has wrong parent (got %u should be %llu)",
223 id, le32_to_cpu(v.parent), s.k->p.offset);
231 int bch2_fs_snapshots_check(struct bch_fs *c)
233 struct btree_trans trans;
234 struct btree_iter iter;
236 struct bch_snapshot s;
240 bch2_trans_init(&trans, c, 0, 0);
242 for_each_btree_key(&trans, iter, BTREE_ID_snapshots,
243 POS_MIN, 0, k, ret) {
244 if (k.k->type != KEY_TYPE_snapshot)
247 ret = bch2_snapshot_check(&trans, bkey_s_c_to_snapshot(k));
251 bch2_trans_iter_exit(&trans, &iter);
254 bch_err(c, "error %i checking snapshots", ret);
258 for_each_btree_key(&trans, iter, BTREE_ID_subvolumes,
259 POS_MIN, 0, k, ret) {
260 if (k.k->type != KEY_TYPE_subvolume)
263 id = le32_to_cpu(bkey_s_c_to_subvolume(k).v->snapshot);
264 ret = snapshot_lookup(&trans, id, &s);
267 k = bch2_btree_iter_peek(&iter);
269 } else if (ret == -ENOENT)
270 bch_err(c, "subvolume %llu points to nonexistent snapshot %u",
275 bch2_trans_iter_exit(&trans, &iter);
277 bch2_trans_exit(&trans);
281 void bch2_fs_snapshots_exit(struct bch_fs *c)
283 genradix_free(&c->snapshots);
286 int bch2_fs_snapshots_start(struct bch_fs *c)
288 struct btree_trans trans;
289 struct btree_iter iter;
291 bool have_deleted = false;
294 bch2_trans_init(&trans, c, 0, 0);
296 for_each_btree_key(&trans, iter, BTREE_ID_snapshots,
297 POS_MIN, 0, k, ret) {
298 if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0)
301 if (k.k->type != KEY_TYPE_snapshot) {
302 bch_err(c, "found wrong key type %u in snapshot node table",
307 if (BCH_SNAPSHOT_DELETED(bkey_s_c_to_snapshot(k).v))
310 ret = bch2_mark_snapshot(c, bkey_s_c_null, k, 0, 0);
314 bch2_trans_iter_exit(&trans, &iter);
319 ret = bch2_snapshots_set_equiv(&trans);
323 bch2_trans_exit(&trans);
325 if (!ret && have_deleted) {
326 bch_info(c, "restarting deletion of dead snapshots");
328 bch2_delete_dead_snapshots_work(&c->snapshot_delete_work);
330 bch2_delete_dead_snapshots(c);
338 * Mark a snapshot as deleted, for future cleanup:
340 static int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
342 struct btree_iter iter;
344 struct bkey_i_snapshot *s;
347 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id),
349 k = bch2_btree_iter_peek_slot(&iter);
354 if (k.k->type != KEY_TYPE_snapshot) {
355 bch2_fs_inconsistent(trans->c, "missing snapshot %u", id);
360 /* already deleted? */
361 if (BCH_SNAPSHOT_DELETED(bkey_s_c_to_snapshot(k).v))
364 s = bch2_trans_kmalloc(trans, sizeof(*s));
365 ret = PTR_ERR_OR_ZERO(s);
369 bkey_reassemble(&s->k_i, k);
371 SET_BCH_SNAPSHOT_DELETED(&s->v, true);
372 ret = bch2_trans_update(trans, &iter, &s->k_i, 0);
376 bch2_trans_iter_exit(trans, &iter);
380 static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
382 struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
384 struct bkey_s_c_snapshot s;
385 struct bkey_i_snapshot *parent;
390 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id),
392 k = bch2_btree_iter_peek_slot(&iter);
397 if (k.k->type != KEY_TYPE_snapshot) {
398 bch2_fs_inconsistent(trans->c, "missing snapshot %u", id);
403 s = bkey_s_c_to_snapshot(k);
405 BUG_ON(!BCH_SNAPSHOT_DELETED(s.v));
406 parent_id = le32_to_cpu(s.v->parent);
409 bch2_trans_iter_init(trans, &p_iter, BTREE_ID_snapshots,
412 k = bch2_btree_iter_peek_slot(&p_iter);
417 if (k.k->type != KEY_TYPE_snapshot) {
418 bch2_fs_inconsistent(trans->c, "missing snapshot %u", parent_id);
423 parent = bch2_trans_kmalloc(trans, sizeof(*parent));
424 ret = PTR_ERR_OR_ZERO(parent);
428 bkey_reassemble(&parent->k_i, k);
430 for (i = 0; i < 2; i++)
431 if (le32_to_cpu(parent->v.children[i]) == id)
435 bch_err(trans->c, "snapshot %u missing child pointer to %u",
438 parent->v.children[i] = 0;
440 if (le32_to_cpu(parent->v.children[0]) <
441 le32_to_cpu(parent->v.children[1]))
442 swap(parent->v.children[0],
443 parent->v.children[1]);
445 ret = bch2_trans_update(trans, &p_iter, &parent->k_i, 0);
450 ret = bch2_btree_delete_at(trans, &iter, 0);
452 bch2_trans_iter_exit(trans, &p_iter);
453 bch2_trans_iter_exit(trans, &iter);
457 static int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
459 u32 *snapshot_subvols,
462 struct btree_iter iter;
463 struct bkey_i_snapshot *n;
468 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
469 POS_MIN, BTREE_ITER_INTENT);
470 k = bch2_btree_iter_peek(&iter);
475 for (i = 0; i < nr_snapids; i++) {
476 k = bch2_btree_iter_prev_slot(&iter);
481 if (!k.k || !k.k->p.offset) {
486 n = bch2_trans_kmalloc(trans, sizeof(*n));
487 ret = PTR_ERR_OR_ZERO(n);
491 bkey_snapshot_init(&n->k_i);
494 n->v.parent = cpu_to_le32(parent);
495 n->v.subvol = cpu_to_le32(snapshot_subvols[i]);
497 SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
499 bch2_trans_update(trans, &iter, &n->k_i, 0);
501 ret = bch2_mark_snapshot(trans->c, bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0, 0);
505 new_snapids[i] = iter.pos.offset;
509 bch2_btree_iter_set_pos(&iter, POS(0, parent));
510 k = bch2_btree_iter_peek(&iter);
515 if (k.k->type != KEY_TYPE_snapshot) {
516 bch_err(trans->c, "snapshot %u not found", parent);
521 n = bch2_trans_kmalloc(trans, sizeof(*n));
522 ret = PTR_ERR_OR_ZERO(n);
526 bkey_reassemble(&n->k_i, k);
528 if (n->v.children[0] || n->v.children[1]) {
529 bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
534 n->v.children[0] = cpu_to_le32(new_snapids[0]);
535 n->v.children[1] = cpu_to_le32(new_snapids[1]);
536 SET_BCH_SNAPSHOT_SUBVOL(&n->v, false);
537 bch2_trans_update(trans, &iter, &n->k_i, 0);
540 bch2_trans_iter_exit(trans, &iter);
544 /* List of snapshot IDs that are being deleted: */
545 struct snapshot_id_list {
551 static bool snapshot_list_has_id(struct snapshot_id_list *s, u32 id)
555 for (i = 0; i < s->nr; i++)
561 static int snapshot_id_add(struct snapshot_id_list *s, u32 id)
563 BUG_ON(snapshot_list_has_id(s, id));
565 if (s->nr == s->size) {
566 size_t new_size = max(8U, s->size * 2);
567 void *n = krealloc(s->d,
568 new_size * sizeof(s->d[0]),
571 pr_err("error allocating snapshot ID list");
583 static int bch2_snapshot_delete_keys_btree(struct btree_trans *trans,
584 struct snapshot_id_list *deleted,
585 enum btree_id btree_id)
587 struct bch_fs *c = trans->c;
588 struct btree_iter iter;
590 struct snapshot_id_list equiv_seen = { 0 };
591 struct bpos last_pos = POS_MIN;
595 * XXX: We should also delete whiteouts that no longer overwrite
599 bch2_trans_iter_init(trans, &iter, btree_id, POS_MIN,
602 BTREE_ITER_NOT_EXTENTS|
603 BTREE_ITER_ALL_SNAPSHOTS);
605 while ((bch2_trans_begin(trans),
606 (k = bch2_btree_iter_peek(&iter)).k) &&
607 !(ret = bkey_err(k))) {
608 u32 equiv = snapshot_t(c, k.k->p.snapshot)->equiv;
610 if (bkey_cmp(k.k->p, last_pos))
614 if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
615 snapshot_list_has_id(&equiv_seen, equiv)) {
616 if (btree_id == BTREE_ID_inodes &&
617 bch2_btree_key_cache_flush(trans, btree_id, iter.pos))
620 ret = __bch2_trans_do(trans, NULL, NULL,
622 bch2_btree_iter_traverse(&iter) ?:
623 bch2_btree_delete_at(trans, &iter,
624 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE));
628 ret = snapshot_id_add(&equiv_seen, equiv);
633 bch2_btree_iter_advance(&iter);
635 bch2_trans_iter_exit(trans, &iter);
642 static void bch2_delete_dead_snapshots_work(struct work_struct *work)
644 struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
645 struct btree_trans trans;
646 struct btree_iter iter;
648 struct bkey_s_c_snapshot snap;
649 struct snapshot_id_list deleted = { 0 };
650 u32 i, id, children[2];
653 bch2_trans_init(&trans, c, 0, 0);
656 * For every snapshot node: If we have no live children and it's not
657 * pointed to by a subvolume, delete it:
659 for_each_btree_key(&trans, iter, BTREE_ID_snapshots,
660 POS_MIN, 0, k, ret) {
661 if (k.k->type != KEY_TYPE_snapshot)
664 snap = bkey_s_c_to_snapshot(k);
665 if (BCH_SNAPSHOT_DELETED(snap.v) ||
666 BCH_SNAPSHOT_SUBVOL(snap.v))
669 children[0] = le32_to_cpu(snap.v->children[0]);
670 children[1] = le32_to_cpu(snap.v->children[1]);
672 ret = snapshot_live(&trans, children[0]) ?:
673 snapshot_live(&trans, children[1]);
679 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
680 bch2_snapshot_node_set_deleted(&trans, iter.pos.offset));
682 bch_err(c, "error deleting snapshot %llu: %i", iter.pos.offset, ret);
686 bch2_trans_iter_exit(&trans, &iter);
689 bch_err(c, "error walking snapshots: %i", ret);
693 ret = bch2_snapshots_set_equiv(&trans);
697 for_each_btree_key(&trans, iter, BTREE_ID_snapshots,
698 POS_MIN, 0, k, ret) {
699 if (k.k->type != KEY_TYPE_snapshot)
702 snap = bkey_s_c_to_snapshot(k);
703 if (BCH_SNAPSHOT_DELETED(snap.v)) {
704 ret = snapshot_id_add(&deleted, k.k->p.offset);
709 bch2_trans_iter_exit(&trans, &iter);
712 bch_err(c, "error walking snapshots: %i", ret);
716 for (id = 0; id < BTREE_ID_NR; id++) {
717 if (!btree_type_has_snapshots(id))
720 ret = bch2_snapshot_delete_keys_btree(&trans, &deleted, id);
722 bch_err(c, "error deleting snapshot keys: %i", ret);
727 for (i = 0; i < deleted.nr; i++) {
728 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
729 bch2_snapshot_node_delete(&trans, deleted.d[i]));
731 bch_err(c, "error deleting snapshot %u: %i",
738 bch2_trans_exit(&trans);
739 percpu_ref_put(&c->writes);
742 static void bch2_delete_dead_snapshots(struct bch_fs *c)
744 if (unlikely(!percpu_ref_tryget(&c->writes)))
747 if (!queue_work(system_long_wq, &c->snapshot_delete_work))
748 percpu_ref_put(&c->writes);
751 static int bch2_delete_dead_snapshots_hook(struct btree_trans *trans,
752 struct btree_trans_commit_hook *h)
754 bch2_delete_dead_snapshots(trans->c);
760 const char *bch2_subvolume_invalid(const struct bch_fs *c, struct bkey_s_c k)
762 if (bkey_cmp(k.k->p, SUBVOL_POS_MIN) < 0)
763 return "invalid pos";
765 if (bkey_cmp(k.k->p, SUBVOL_POS_MAX) > 0)
766 return "invalid pos";
768 if (bkey_val_bytes(k.k) != sizeof(struct bch_subvolume))
769 return "bad val size";
774 void bch2_subvolume_to_text(struct printbuf *out, struct bch_fs *c,
777 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
779 pr_buf(out, "root %llu snapshot id %u",
780 le64_to_cpu(s.v->inode),
781 le32_to_cpu(s.v->snapshot));
784 int bch2_subvolume_get(struct btree_trans *trans, unsigned subvol,
785 bool inconsistent_if_not_found,
787 struct bch_subvolume *s)
789 struct btree_iter iter;
793 bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes, POS(0, subvol),
795 k = bch2_btree_iter_peek_slot(&iter);
796 ret = bkey_err(k) ?: k.k->type == KEY_TYPE_subvolume ? 0 : -ENOENT;
798 if (ret == -ENOENT && inconsistent_if_not_found)
799 bch2_fs_inconsistent(trans->c, "missing subvolume %u", subvol);
801 *s = *bkey_s_c_to_subvolume(k).v;
803 bch2_trans_iter_exit(trans, &iter);
807 int bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvol,
810 struct bch_subvolume s;
813 ret = bch2_subvolume_get(trans, subvol, true,
815 BTREE_ITER_WITH_UPDATES,
818 *snapid = le32_to_cpu(s.snapshot);
822 /* XXX: mark snapshot id for deletion, walk btree and delete: */
823 int bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid,
824 int deleting_snapshot)
826 struct btree_iter iter;
828 struct bkey_s_c_subvolume subvol;
829 struct btree_trans_commit_hook *h;
830 struct bkey_i *delete;
834 bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes,
838 k = bch2_btree_iter_peek_slot(&iter);
843 if (k.k->type != KEY_TYPE_subvolume) {
844 bch2_fs_inconsistent(trans->c, "missing subvolume %u", subvolid);
849 subvol = bkey_s_c_to_subvolume(k);
850 snapid = le32_to_cpu(subvol.v->snapshot);
852 if (deleting_snapshot >= 0 &&
853 deleting_snapshot != BCH_SUBVOLUME_SNAP(subvol.v)) {
858 delete = bch2_trans_kmalloc(trans, sizeof(*delete));
859 ret = PTR_ERR_OR_ZERO(delete);
863 bkey_init(&delete->k);
864 delete->k.p = iter.pos;
865 ret = bch2_trans_update(trans, &iter, delete, 0);
869 ret = bch2_snapshot_node_set_deleted(trans, snapid);
871 h = bch2_trans_kmalloc(trans, sizeof(*h));
872 ret = PTR_ERR_OR_ZERO(h);
876 h->fn = bch2_delete_dead_snapshots_hook;
877 bch2_trans_commit_hook(trans, h);
879 bch2_trans_iter_exit(trans, &iter);
883 int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
889 struct btree_iter dst_iter, src_iter = (struct btree_iter) { NULL };
890 struct bkey_i_subvolume *new_subvol = NULL;
891 struct bkey_i_subvolume *src_subvol = NULL;
893 u32 parent = 0, new_nodes[2], snapshot_subvols[2];
896 for_each_btree_key(trans, dst_iter, BTREE_ID_subvolumes, SUBVOL_POS_MIN,
897 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
898 if (bkey_cmp(k.k->p, SUBVOL_POS_MAX) > 0)
900 if (bkey_deleted(k.k))
908 snapshot_subvols[0] = dst_iter.pos.offset;
909 snapshot_subvols[1] = src_subvolid;
912 /* Creating a snapshot: */
913 src_subvol = bch2_trans_kmalloc(trans, sizeof(*src_subvol));
914 ret = PTR_ERR_OR_ZERO(src_subvol);
918 bch2_trans_iter_init(trans, &src_iter, BTREE_ID_subvolumes,
919 POS(0, src_subvolid),
922 k = bch2_btree_iter_peek_slot(&src_iter);
927 if (k.k->type != KEY_TYPE_subvolume) {
928 bch_err(trans->c, "subvolume %u not found", src_subvolid);
933 bkey_reassemble(&src_subvol->k_i, k);
934 parent = le32_to_cpu(src_subvol->v.snapshot);
937 ret = bch2_snapshot_node_create(trans, parent, new_nodes,
939 src_subvolid ? 2 : 1);
944 src_subvol->v.snapshot = cpu_to_le32(new_nodes[1]);
945 bch2_trans_update(trans, &src_iter, &src_subvol->k_i, 0);
948 new_subvol = bch2_trans_kmalloc(trans, sizeof(*new_subvol));
949 ret = PTR_ERR_OR_ZERO(new_subvol);
953 bkey_subvolume_init(&new_subvol->k_i);
954 new_subvol->v.flags = 0;
955 new_subvol->v.snapshot = cpu_to_le32(new_nodes[0]);
956 new_subvol->v.inode = cpu_to_le64(inode);
957 SET_BCH_SUBVOLUME_RO(&new_subvol->v, ro);
958 SET_BCH_SUBVOLUME_SNAP(&new_subvol->v, src_subvolid != 0);
959 new_subvol->k.p = dst_iter.pos;
960 bch2_trans_update(trans, &dst_iter, &new_subvol->k_i, 0);
962 *new_subvolid = new_subvol->k.p.offset;
963 *new_snapshotid = new_nodes[0];
965 bch2_trans_iter_exit(trans, &src_iter);
966 bch2_trans_iter_exit(trans, &dst_iter);
970 int bch2_fs_subvolumes_init(struct bch_fs *c)
972 INIT_WORK(&c->snapshot_delete_work, bch2_delete_dead_snapshots_work);