1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_key_cache.h"
5 #include "btree_update.h"
10 #include "subvolume.h"
12 #include <linux/random.h>
14 static int bch2_subvolume_delete(struct btree_trans *, u32);
16 static int check_subvol(struct btree_trans *trans,
17 struct btree_iter *iter,
20 struct bch_fs *c = trans->c;
21 struct bkey_s_c_subvolume subvol;
22 struct bch_snapshot snapshot;
26 if (k.k->type != KEY_TYPE_subvolume)
29 subvol = bkey_s_c_to_subvolume(k);
30 snapid = le32_to_cpu(subvol.v->snapshot);
31 ret = bch2_snapshot_lookup(trans, snapid, &snapshot);
33 if (bch2_err_matches(ret, ENOENT))
34 bch_err(c, "subvolume %llu points to nonexistent snapshot %u",
35 k.k->p.offset, snapid);
39 if (BCH_SUBVOLUME_UNLINKED(subvol.v)) {
42 ret = bch2_subvolume_delete(trans, iter->pos.offset);
44 bch_err(c, "error deleting subvolume %llu: %s",
45 iter->pos.offset, bch2_err_str(ret));
46 return ret ?: -BCH_ERR_transaction_restart_nested;
49 if (!BCH_SUBVOLUME_SNAP(subvol.v)) {
50 u32 snapshot_root = bch2_snapshot_root(c, le32_to_cpu(subvol.v->snapshot));
52 struct bch_snapshot_tree st;
55 snapshot_tree = snapshot_t(c, snapshot_root)->tree;
58 ret = bch2_snapshot_tree_lookup(trans, snapshot_tree, &st);
60 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
61 "%s: snapshot tree %u not found", __func__, snapshot_tree);
66 if (fsck_err_on(le32_to_cpu(st.master_subvol) != subvol.k->p.offset, c,
67 "subvolume %llu is not set as snapshot but is not master subvolume",
69 struct bkey_i_subvolume *s =
70 bch2_bkey_make_mut_typed(trans, iter, &subvol.s_c, 0, subvolume);
71 ret = PTR_ERR_OR_ZERO(s);
75 SET_BCH_SUBVOLUME_SNAP(&s->v, true);
83 int bch2_check_subvols(struct bch_fs *c)
85 struct btree_iter iter;
89 ret = bch2_trans_run(c,
90 for_each_btree_key_commit(&trans, iter,
91 BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
92 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
93 check_subvol(&trans, &iter, k)));
100 * Mark a snapshot as deleted, for future cleanup:
102 static int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
104 struct btree_iter iter;
105 struct bkey_i_snapshot *s;
108 s = bch2_bkey_get_mut_typed(trans, &iter,
109 BTREE_ID_snapshots, POS(0, id),
111 ret = PTR_ERR_OR_ZERO(s);
113 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
114 trans->c, "missing snapshot %u", id);
118 /* already deleted? */
119 if (BCH_SNAPSHOT_DELETED(&s->v))
122 SET_BCH_SNAPSHOT_DELETED(&s->v, true);
123 SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
126 bch2_trans_iter_exit(trans, &iter);
130 static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
132 struct bch_fs *c = trans->c;
133 struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
134 struct btree_iter tree_iter = (struct btree_iter) { NULL };
135 struct bkey_s_c_snapshot s;
140 s = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id),
141 BTREE_ITER_INTENT, snapshot);
143 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
144 "missing snapshot %u", id);
149 BUG_ON(!BCH_SNAPSHOT_DELETED(s.v));
150 parent_id = le32_to_cpu(s.v->parent);
153 struct bkey_i_snapshot *parent;
155 parent = bch2_bkey_get_mut_typed(trans, &p_iter,
156 BTREE_ID_snapshots, POS(0, parent_id),
158 ret = PTR_ERR_OR_ZERO(parent);
160 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
161 "missing snapshot %u", parent_id);
165 for (i = 0; i < 2; i++)
166 if (le32_to_cpu(parent->v.children[i]) == id)
170 bch_err(c, "snapshot %u missing child pointer to %u",
173 parent->v.children[i] = 0;
175 if (le32_to_cpu(parent->v.children[0]) <
176 le32_to_cpu(parent->v.children[1]))
177 swap(parent->v.children[0],
178 parent->v.children[1]);
181 * We're deleting the root of a snapshot tree: update the
182 * snapshot_tree entry to point to the new root, or delete it if
183 * this is the last snapshot ID in this tree:
185 struct bkey_i_snapshot_tree *s_t;
187 BUG_ON(s.v->children[1]);
189 s_t = bch2_bkey_get_mut_typed(trans, &tree_iter,
190 BTREE_ID_snapshot_trees, POS(0, le32_to_cpu(s.v->tree)),
192 ret = PTR_ERR_OR_ZERO(s_t);
196 if (s.v->children[0]) {
197 s_t->v.root_snapshot = s.v->children[0];
199 s_t->k.type = KEY_TYPE_deleted;
200 set_bkey_val_u64s(&s_t->k, 0);
204 ret = bch2_btree_delete_at(trans, &iter, 0);
206 bch2_trans_iter_exit(trans, &tree_iter);
207 bch2_trans_iter_exit(trans, &p_iter);
208 bch2_trans_iter_exit(trans, &iter);
212 static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
214 u32 *snapshot_subvols,
217 struct bch_fs *c = trans->c;
218 struct btree_iter iter;
219 struct bkey_i_snapshot *n;
222 u32 depth = bch2_snapshot_depth(c, parent);
225 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
226 POS_MIN, BTREE_ITER_INTENT);
227 k = bch2_btree_iter_peek(&iter);
232 for (i = 0; i < nr_snapids; i++) {
233 k = bch2_btree_iter_prev_slot(&iter);
238 if (!k.k || !k.k->p.offset) {
239 ret = -BCH_ERR_ENOSPC_snapshot_create;
243 n = bch2_bkey_alloc(trans, &iter, 0, snapshot);
244 ret = PTR_ERR_OR_ZERO(n);
249 n->v.parent = cpu_to_le32(parent);
250 n->v.subvol = cpu_to_le32(snapshot_subvols[i]);
251 n->v.tree = cpu_to_le32(tree);
252 n->v.depth = cpu_to_le32(depth);
254 for (j = 0; j < ARRAY_SIZE(n->v.skip); j++)
255 n->v.skip[j] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent));
257 bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_le32);
258 SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
260 ret = bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
261 bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0);
265 new_snapids[i] = iter.pos.offset;
268 bch2_trans_iter_exit(trans, &iter);
273 * Create new snapshot IDs as children of an existing snapshot ID:
275 static int bch2_snapshot_node_create_children(struct btree_trans *trans, u32 parent,
277 u32 *snapshot_subvols,
280 struct btree_iter iter;
281 struct bkey_i_snapshot *n_parent;
284 n_parent = bch2_bkey_get_mut_typed(trans, &iter,
285 BTREE_ID_snapshots, POS(0, parent),
287 ret = PTR_ERR_OR_ZERO(n_parent);
289 if (bch2_err_matches(ret, ENOENT))
290 bch_err(trans->c, "snapshot %u not found", parent);
294 if (n_parent->v.children[0] || n_parent->v.children[1]) {
295 bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
300 ret = create_snapids(trans, parent, le32_to_cpu(n_parent->v.tree),
301 new_snapids, snapshot_subvols, nr_snapids);
305 n_parent->v.children[0] = cpu_to_le32(new_snapids[0]);
306 n_parent->v.children[1] = cpu_to_le32(new_snapids[1]);
307 n_parent->v.subvol = 0;
308 SET_BCH_SNAPSHOT_SUBVOL(&n_parent->v, false);
310 bch2_trans_iter_exit(trans, &iter);
315 * Create a snapshot node that is the root of a new tree:
317 static int bch2_snapshot_node_create_tree(struct btree_trans *trans,
319 u32 *snapshot_subvols,
322 struct bkey_i_snapshot_tree *n_tree;
325 n_tree = __bch2_snapshot_tree_create(trans);
326 ret = PTR_ERR_OR_ZERO(n_tree) ?:
327 create_snapids(trans, 0, n_tree->k.p.offset,
328 new_snapids, snapshot_subvols, nr_snapids);
332 n_tree->v.master_subvol = cpu_to_le32(snapshot_subvols[0]);
333 n_tree->v.root_snapshot = cpu_to_le32(new_snapids[0]);
337 int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
339 u32 *snapshot_subvols,
342 BUG_ON((parent == 0) != (nr_snapids == 1));
343 BUG_ON((parent != 0) != (nr_snapids == 2));
346 ? bch2_snapshot_node_create_children(trans, parent,
347 new_snapids, snapshot_subvols, nr_snapids)
348 : bch2_snapshot_node_create_tree(trans,
349 new_snapids, snapshot_subvols, nr_snapids);
354 * If we have an unlinked inode in an internal snapshot node, and the inode
355 * really has been deleted in all child snapshots, how does this get cleaned up?
357 * first there is the problem of how keys that have been overwritten in all
358 * child snapshots get deleted (unimplemented?), but inodes may perhaps be
361 * also: unlinked inode in internal snapshot appears to not be getting deleted
362 * correctly if inode doesn't exist in leaf snapshots
365 static int snapshot_delete_key(struct btree_trans *trans,
366 struct btree_iter *iter,
368 snapshot_id_list *deleted,
369 snapshot_id_list *equiv_seen,
370 struct bpos *last_pos)
372 struct bch_fs *c = trans->c;
373 u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
375 if (!bkey_eq(k.k->p, *last_pos))
379 if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
380 snapshot_list_has_id(equiv_seen, equiv)) {
381 return bch2_btree_delete_at(trans, iter,
382 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
384 return snapshot_list_add(c, equiv_seen, equiv);
389 * For a given snapshot, if it doesn't have a subvolume that points to it, and
390 * it doesn't have child snapshot nodes - it's now redundant and we can mark it
393 static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct btree_iter *iter,
396 struct bkey_s_c_snapshot snap;
400 if (k.k->type != KEY_TYPE_snapshot)
403 snap = bkey_s_c_to_snapshot(k);
404 if (BCH_SNAPSHOT_DELETED(snap.v) ||
405 BCH_SNAPSHOT_SUBVOL(snap.v))
408 children[0] = le32_to_cpu(snap.v->children[0]);
409 children[1] = le32_to_cpu(snap.v->children[1]);
411 ret = bch2_snapshot_live(trans, children[0]) ?:
412 bch2_snapshot_live(trans, children[1]);
417 return bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
421 int bch2_delete_dead_snapshots(struct bch_fs *c)
423 struct btree_trans trans;
424 struct btree_iter iter;
426 struct bkey_s_c_snapshot snap;
427 snapshot_id_list deleted = { 0 };
431 if (!test_bit(BCH_FS_STARTED, &c->flags)) {
432 ret = bch2_fs_read_write_early(c);
434 bch_err(c, "error deleleting dead snapshots: error going rw: %s", bch2_err_str(ret));
439 bch2_trans_init(&trans, c, 0, 0);
442 * For every snapshot node: If we have no live children and it's not
443 * pointed to by a subvolume, delete it:
445 ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_snapshots,
448 bch2_delete_redundant_snapshot(&trans, &iter, k));
450 bch_err(c, "error deleting redundant snapshots: %s", bch2_err_str(ret));
454 for_each_btree_key2(&trans, iter, BTREE_ID_snapshots,
456 bch2_snapshot_set_equiv(&trans, k));
458 bch_err(c, "error in bch2_snapshots_set_equiv: %s", bch2_err_str(ret));
462 for_each_btree_key(&trans, iter, BTREE_ID_snapshots,
463 POS_MIN, 0, k, ret) {
464 if (k.k->type != KEY_TYPE_snapshot)
467 snap = bkey_s_c_to_snapshot(k);
468 if (BCH_SNAPSHOT_DELETED(snap.v)) {
469 ret = snapshot_list_add(c, &deleted, k.k->p.offset);
474 bch2_trans_iter_exit(&trans, &iter);
477 bch_err(c, "error walking snapshots: %s", bch2_err_str(ret));
481 for (id = 0; id < BTREE_ID_NR; id++) {
482 struct bpos last_pos = POS_MIN;
483 snapshot_id_list equiv_seen = { 0 };
485 if (!btree_type_has_snapshots(id))
488 ret = for_each_btree_key_commit(&trans, iter,
490 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
491 NULL, NULL, BTREE_INSERT_NOFAIL,
492 snapshot_delete_key(&trans, &iter, k, &deleted, &equiv_seen, &last_pos));
494 darray_exit(&equiv_seen);
497 bch_err(c, "error deleting snapshot keys: %s", bch2_err_str(ret));
502 for (i = 0; i < deleted.nr; i++) {
503 ret = commit_do(&trans, NULL, NULL, 0,
504 bch2_snapshot_node_delete(&trans, deleted.data[i]));
506 bch_err(c, "error deleting snapshot %u: %s",
507 deleted.data[i], bch2_err_str(ret));
512 clear_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
514 darray_exit(&deleted);
515 bch2_trans_exit(&trans);
521 static void bch2_delete_dead_snapshots_work(struct work_struct *work)
523 struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
525 if (test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags))
526 bch2_delete_dead_snapshots(c);
527 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
530 void bch2_delete_dead_snapshots_async(struct bch_fs *c)
532 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots) &&
533 !queue_work(c->write_ref_wq, &c->snapshot_delete_work))
534 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
537 static int bch2_delete_dead_snapshots_hook(struct btree_trans *trans,
538 struct btree_trans_commit_hook *h)
540 struct bch_fs *c = trans->c;
542 set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
544 if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_delete_dead_snapshots)
547 bch2_delete_dead_snapshots_async(c);
553 int bch2_subvolume_invalid(const struct bch_fs *c, struct bkey_s_c k,
554 unsigned flags, struct printbuf *err)
556 if (bkey_lt(k.k->p, SUBVOL_POS_MIN) ||
557 bkey_gt(k.k->p, SUBVOL_POS_MAX)) {
558 prt_printf(err, "invalid pos");
559 return -BCH_ERR_invalid_bkey;
565 void bch2_subvolume_to_text(struct printbuf *out, struct bch_fs *c,
568 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
570 prt_printf(out, "root %llu snapshot id %u",
571 le64_to_cpu(s.v->inode),
572 le32_to_cpu(s.v->snapshot));
574 if (bkey_val_bytes(s.k) > offsetof(struct bch_subvolume, parent))
575 prt_printf(out, " parent %u", le32_to_cpu(s.v->parent));
578 static __always_inline int
579 bch2_subvolume_get_inlined(struct btree_trans *trans, unsigned subvol,
580 bool inconsistent_if_not_found,
582 struct bch_subvolume *s)
584 int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_subvolumes, POS(0, subvol),
585 iter_flags, subvolume, s);
586 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT) &&
587 inconsistent_if_not_found,
588 trans->c, "missing subvolume %u", subvol);
592 int bch2_subvolume_get(struct btree_trans *trans, unsigned subvol,
593 bool inconsistent_if_not_found,
595 struct bch_subvolume *s)
597 return bch2_subvolume_get_inlined(trans, subvol, inconsistent_if_not_found, iter_flags, s);
600 int bch2_snapshot_get_subvol(struct btree_trans *trans, u32 snapshot,
601 struct bch_subvolume *subvol)
603 struct bch_snapshot snap;
605 return bch2_snapshot_lookup(trans, snapshot, &snap) ?:
606 bch2_subvolume_get(trans, le32_to_cpu(snap.subvol), true, 0, subvol);
609 int bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvolid,
612 struct btree_iter iter;
613 struct bkey_s_c_subvolume subvol;
616 subvol = bch2_bkey_get_iter_typed(trans, &iter,
617 BTREE_ID_subvolumes, POS(0, subvolid),
618 BTREE_ITER_CACHED|BTREE_ITER_WITH_UPDATES,
620 ret = bkey_err(subvol);
621 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
622 "missing subvolume %u", subvolid);
625 *snapid = le32_to_cpu(subvol.v->snapshot);
626 bch2_trans_iter_exit(trans, &iter);
630 static int bch2_subvolume_reparent(struct btree_trans *trans,
631 struct btree_iter *iter,
633 u32 old_parent, u32 new_parent)
635 struct bkey_i_subvolume *s;
638 if (k.k->type != KEY_TYPE_subvolume)
641 if (bkey_val_bytes(k.k) > offsetof(struct bch_subvolume, parent) &&
642 le32_to_cpu(bkey_s_c_to_subvolume(k).v->parent) != old_parent)
645 s = bch2_bkey_make_mut_typed(trans, iter, &k, 0, subvolume);
646 ret = PTR_ERR_OR_ZERO(s);
650 s->v.parent = cpu_to_le32(new_parent);
655 * Separate from the snapshot tree in the snapshots btree, we record the tree
656 * structure of how snapshot subvolumes were created - the parent subvolume of
657 * each snapshot subvolume.
659 * When a subvolume is deleted, we scan for child subvolumes and reparant them,
660 * to avoid dangling references:
662 static int bch2_subvolumes_reparent(struct btree_trans *trans, u32 subvolid_to_delete)
664 struct btree_iter iter;
666 struct bch_subvolume s;
668 return lockrestart_do(trans,
669 bch2_subvolume_get(trans, subvolid_to_delete, true,
670 BTREE_ITER_CACHED, &s)) ?:
671 for_each_btree_key_commit(trans, iter,
672 BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
673 NULL, NULL, BTREE_INSERT_NOFAIL,
674 bch2_subvolume_reparent(trans, &iter, k,
675 subvolid_to_delete, le32_to_cpu(s.parent)));
679 * Delete subvolume, mark snapshot ID as deleted, queue up snapshot
682 static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
684 struct btree_iter iter;
685 struct bkey_s_c_subvolume subvol;
686 struct btree_trans_commit_hook *h;
690 subvol = bch2_bkey_get_iter_typed(trans, &iter,
691 BTREE_ID_subvolumes, POS(0, subvolid),
692 BTREE_ITER_CACHED|BTREE_ITER_INTENT,
694 ret = bkey_err(subvol);
695 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
696 "missing subvolume %u", subvolid);
700 snapid = le32_to_cpu(subvol.v->snapshot);
702 ret = bch2_btree_delete_at(trans, &iter, 0);
706 ret = bch2_snapshot_node_set_deleted(trans, snapid);
710 h = bch2_trans_kmalloc(trans, sizeof(*h));
711 ret = PTR_ERR_OR_ZERO(h);
715 h->fn = bch2_delete_dead_snapshots_hook;
716 bch2_trans_commit_hook(trans, h);
718 bch2_trans_iter_exit(trans, &iter);
722 static int bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
724 return bch2_subvolumes_reparent(trans, subvolid) ?:
725 commit_do(trans, NULL, NULL, BTREE_INSERT_NOFAIL,
726 __bch2_subvolume_delete(trans, subvolid));
729 static void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *work)
731 struct bch_fs *c = container_of(work, struct bch_fs,
732 snapshot_wait_for_pagecache_and_delete_work);
738 mutex_lock(&c->snapshots_unlinked_lock);
739 s = c->snapshots_unlinked;
740 darray_init(&c->snapshots_unlinked);
741 mutex_unlock(&c->snapshots_unlinked_lock);
746 bch2_evict_subvolume_inodes(c, &s);
748 for (id = s.data; id < s.data + s.nr; id++) {
749 ret = bch2_trans_run(c, bch2_subvolume_delete(&trans, *id));
751 bch_err(c, "error deleting subvolume %u: %s", *id, bch2_err_str(ret));
759 bch2_write_ref_put(c, BCH_WRITE_REF_snapshot_delete_pagecache);
762 struct subvolume_unlink_hook {
763 struct btree_trans_commit_hook h;
767 static int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans *trans,
768 struct btree_trans_commit_hook *_h)
770 struct subvolume_unlink_hook *h = container_of(_h, struct subvolume_unlink_hook, h);
771 struct bch_fs *c = trans->c;
774 mutex_lock(&c->snapshots_unlinked_lock);
775 if (!snapshot_list_has_id(&c->snapshots_unlinked, h->subvol))
776 ret = snapshot_list_add(c, &c->snapshots_unlinked, h->subvol);
777 mutex_unlock(&c->snapshots_unlinked_lock);
782 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_snapshot_delete_pagecache))
785 if (!queue_work(c->write_ref_wq, &c->snapshot_wait_for_pagecache_and_delete_work))
786 bch2_write_ref_put(c, BCH_WRITE_REF_snapshot_delete_pagecache);
790 int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
792 struct btree_iter iter;
793 struct bkey_i_subvolume *n;
794 struct subvolume_unlink_hook *h;
797 h = bch2_trans_kmalloc(trans, sizeof(*h));
798 ret = PTR_ERR_OR_ZERO(h);
802 h->h.fn = bch2_subvolume_wait_for_pagecache_and_delete_hook;
803 h->subvol = subvolid;
804 bch2_trans_commit_hook(trans, &h->h);
806 n = bch2_bkey_get_mut_typed(trans, &iter,
807 BTREE_ID_subvolumes, POS(0, subvolid),
808 BTREE_ITER_CACHED, subvolume);
809 ret = PTR_ERR_OR_ZERO(n);
811 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
812 "missing subvolume %u", subvolid);
816 SET_BCH_SUBVOLUME_UNLINKED(&n->v, true);
817 bch2_trans_iter_exit(trans, &iter);
821 int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
827 struct bch_fs *c = trans->c;
828 struct btree_iter dst_iter, src_iter = (struct btree_iter) { NULL };
829 struct bkey_i_subvolume *new_subvol = NULL;
830 struct bkey_i_subvolume *src_subvol = NULL;
831 u32 parent = 0, new_nodes[2], snapshot_subvols[2];
834 ret = bch2_bkey_get_empty_slot(trans, &dst_iter,
835 BTREE_ID_subvolumes, POS(0, U32_MAX));
836 if (ret == -BCH_ERR_ENOSPC_btree_slot)
837 ret = -BCH_ERR_ENOSPC_subvolume_create;
841 snapshot_subvols[0] = dst_iter.pos.offset;
842 snapshot_subvols[1] = src_subvolid;
845 /* Creating a snapshot: */
847 src_subvol = bch2_bkey_get_mut_typed(trans, &src_iter,
848 BTREE_ID_subvolumes, POS(0, src_subvolid),
849 BTREE_ITER_CACHED, subvolume);
850 ret = PTR_ERR_OR_ZERO(src_subvol);
852 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
853 "subvolume %u not found", src_subvolid);
857 parent = le32_to_cpu(src_subvol->v.snapshot);
860 ret = bch2_snapshot_node_create(trans, parent, new_nodes,
862 src_subvolid ? 2 : 1);
867 src_subvol->v.snapshot = cpu_to_le32(new_nodes[1]);
868 ret = bch2_trans_update(trans, &src_iter, &src_subvol->k_i, 0);
873 new_subvol = bch2_bkey_alloc(trans, &dst_iter, 0, subvolume);
874 ret = PTR_ERR_OR_ZERO(new_subvol);
878 new_subvol->v.flags = 0;
879 new_subvol->v.snapshot = cpu_to_le32(new_nodes[0]);
880 new_subvol->v.inode = cpu_to_le64(inode);
881 new_subvol->v.parent = cpu_to_le32(src_subvolid);
882 new_subvol->v.otime.lo = cpu_to_le64(bch2_current_time(c));
883 new_subvol->v.otime.hi = 0;
885 SET_BCH_SUBVOLUME_RO(&new_subvol->v, ro);
886 SET_BCH_SUBVOLUME_SNAP(&new_subvol->v, src_subvolid != 0);
888 *new_subvolid = new_subvol->k.p.offset;
889 *new_snapshotid = new_nodes[0];
891 bch2_trans_iter_exit(trans, &src_iter);
892 bch2_trans_iter_exit(trans, &dst_iter);
896 int bch2_fs_subvolumes_init(struct bch_fs *c)
898 INIT_WORK(&c->snapshot_delete_work, bch2_delete_dead_snapshots_work);
899 INIT_WORK(&c->snapshot_wait_for_pagecache_and_delete_work,
900 bch2_subvolume_wait_for_pagecache_and_delete);
901 mutex_init(&c->snapshots_unlinked_lock);