1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_key_cache.h"
5 #include "btree_update.h"
11 static int bch2_subvolume_delete(struct btree_trans *, u32);
15 void bch2_snapshot_tree_to_text(struct printbuf *out, struct bch_fs *c,
18 struct bkey_s_c_snapshot_tree t = bkey_s_c_to_snapshot_tree(k);
20 prt_printf(out, "subvol %u root snapshot %u",
21 le32_to_cpu(t.v->master_subvol),
22 le32_to_cpu(t.v->root_snapshot));
25 int bch2_snapshot_tree_invalid(const struct bch_fs *c, struct bkey_s_c k,
26 unsigned flags, struct printbuf *err)
28 if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
29 bkey_lt(k.k->p, POS(0, 1))) {
30 prt_printf(err, "bad pos");
31 return -BCH_ERR_invalid_bkey;
37 int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
38 struct bch_snapshot_tree *s)
40 int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
41 BTREE_ITER_WITH_UPDATES, snapshot_tree, s);
43 if (bch2_err_matches(ret, ENOENT))
44 ret = -BCH_ERR_ENOENT_snapshot_tree;
48 static struct bkey_i_snapshot_tree *
49 __snapshot_tree_create(struct btree_trans *trans)
51 struct btree_iter iter;
52 int ret = bch2_bkey_get_empty_slot(trans, &iter,
53 BTREE_ID_snapshot_trees, POS(0, U32_MAX));
54 struct bkey_i_snapshot_tree *s_t;
56 if (ret == -BCH_ERR_ENOSPC_btree_slot)
57 ret = -BCH_ERR_ENOSPC_snapshot_tree;
61 s_t = bch2_bkey_alloc(trans, &iter, 0, snapshot_tree);
62 ret = PTR_ERR_OR_ZERO(s_t);
63 bch2_trans_iter_exit(trans, &iter);
64 return ret ? ERR_PTR(ret) : s_t;
67 static int snapshot_tree_create(struct btree_trans *trans,
68 u32 root_id, u32 subvol_id, u32 *tree_id)
70 struct bkey_i_snapshot_tree *n_tree =
71 __snapshot_tree_create(trans);
74 return PTR_ERR(n_tree);
76 n_tree->v.master_subvol = cpu_to_le32(subvol_id);
77 n_tree->v.root_snapshot = cpu_to_le32(root_id);
78 *tree_id = n_tree->k.p.offset;
84 void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
87 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
89 prt_printf(out, "is_subvol %llu deleted %llu parent %10u children %10u %10u subvol %u tree %u",
90 BCH_SNAPSHOT_SUBVOL(s.v),
91 BCH_SNAPSHOT_DELETED(s.v),
92 le32_to_cpu(s.v->parent),
93 le32_to_cpu(s.v->children[0]),
94 le32_to_cpu(s.v->children[1]),
95 le32_to_cpu(s.v->subvol),
96 le32_to_cpu(s.v->tree));
99 int bch2_snapshot_invalid(const struct bch_fs *c, struct bkey_s_c k,
100 unsigned flags, struct printbuf *err)
102 struct bkey_s_c_snapshot s;
105 if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
106 bkey_lt(k.k->p, POS(0, 1))) {
107 prt_printf(err, "bad pos");
108 return -BCH_ERR_invalid_bkey;
111 s = bkey_s_c_to_snapshot(k);
113 id = le32_to_cpu(s.v->parent);
114 if (id && id <= k.k->p.offset) {
115 prt_printf(err, "bad parent node (%u <= %llu)",
117 return -BCH_ERR_invalid_bkey;
120 if (le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1])) {
121 prt_printf(err, "children not normalized");
122 return -BCH_ERR_invalid_bkey;
125 if (s.v->children[0] &&
126 s.v->children[0] == s.v->children[1]) {
127 prt_printf(err, "duplicate child nodes");
128 return -BCH_ERR_invalid_bkey;
131 for (i = 0; i < 2; i++) {
132 id = le32_to_cpu(s.v->children[i]);
134 if (id >= k.k->p.offset) {
135 prt_printf(err, "bad child node (%u >= %llu)",
137 return -BCH_ERR_invalid_bkey;
144 int bch2_mark_snapshot(struct btree_trans *trans,
145 enum btree_id btree, unsigned level,
146 struct bkey_s_c old, struct bkey_s_c new,
149 struct bch_fs *c = trans->c;
150 struct snapshot_t *t;
152 t = genradix_ptr_alloc(&c->snapshots,
153 U32_MAX - new.k->p.offset,
156 return -BCH_ERR_ENOMEM_mark_snapshot;
158 if (new.k->type == KEY_TYPE_snapshot) {
159 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);
161 t->parent = le32_to_cpu(s.v->parent);
162 t->children[0] = le32_to_cpu(s.v->children[0]);
163 t->children[1] = le32_to_cpu(s.v->children[1]);
164 t->subvol = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0;
165 t->tree = le32_to_cpu(s.v->tree);
177 static int snapshot_lookup(struct btree_trans *trans, u32 id,
178 struct bch_snapshot *s)
180 return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots, POS(0, id),
181 BTREE_ITER_WITH_UPDATES, snapshot, s);
184 static int snapshot_live(struct btree_trans *trans, u32 id)
186 struct bch_snapshot v;
192 ret = snapshot_lookup(trans, id, &v);
193 if (bch2_err_matches(ret, ENOENT))
194 bch_err(trans->c, "snapshot node %u not found", id);
198 return !BCH_SNAPSHOT_DELETED(&v);
201 static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k)
203 struct bch_fs *c = trans->c;
204 unsigned i, nr_live = 0, live_idx = 0;
205 struct bkey_s_c_snapshot snap;
206 u32 id = k.k->p.offset, child[2];
208 if (k.k->type != KEY_TYPE_snapshot)
211 snap = bkey_s_c_to_snapshot(k);
213 child[0] = le32_to_cpu(snap.v->children[0]);
214 child[1] = le32_to_cpu(snap.v->children[1]);
216 for (i = 0; i < 2; i++) {
217 int ret = snapshot_live(trans, child[i]);
227 snapshot_t(c, id)->equiv = nr_live == 1
228 ? snapshot_t(c, child[live_idx])->equiv
235 static u32 bch2_snapshot_child(struct bch_fs *c, u32 id, unsigned child)
237 return snapshot_t(c, id)->children[child];
240 static u32 bch2_snapshot_left_child(struct bch_fs *c, u32 id)
242 return bch2_snapshot_child(c, id, 0);
245 static u32 bch2_snapshot_right_child(struct bch_fs *c, u32 id)
247 return bch2_snapshot_child(c, id, 1);
250 static u32 bch2_snapshot_tree_next(struct bch_fs *c, u32 id)
254 n = bch2_snapshot_left_child(c, id);
258 while ((parent = bch2_snapshot_parent(c, id))) {
259 n = bch2_snapshot_right_child(c, parent);
268 static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
270 u32 id = snapshot_root;
274 s = snapshot_t(c, id)->subvol;
276 if (s && (!subvol || s < subvol))
279 id = bch2_snapshot_tree_next(c, id);
285 static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans,
286 u32 snapshot_root, u32 *subvol_id)
288 struct bch_fs *c = trans->c;
289 struct btree_iter iter;
291 struct bkey_s_c_subvolume s;
295 for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN,
297 if (k.k->type != KEY_TYPE_subvolume)
300 s = bkey_s_c_to_subvolume(k);
301 if (!bch2_snapshot_is_ancestor(c, le32_to_cpu(s.v->snapshot), snapshot_root))
303 if (!BCH_SUBVOLUME_SNAP(s.v)) {
304 *subvol_id = s.k->p.offset;
310 bch2_trans_iter_exit(trans, &iter);
312 if (!ret && !found) {
313 struct bkey_i_subvolume *s;
315 *subvol_id = bch2_snapshot_tree_oldest_subvol(c, snapshot_root);
317 s = bch2_bkey_get_mut_typed(trans, &iter,
318 BTREE_ID_subvolumes, POS(0, *subvol_id),
320 ret = PTR_ERR_OR_ZERO(s);
324 SET_BCH_SUBVOLUME_SNAP(&s->v, false);
330 static int check_snapshot_tree(struct btree_trans *trans,
331 struct btree_iter *iter,
334 struct bch_fs *c = trans->c;
335 struct bkey_s_c_snapshot_tree st;
336 struct bch_snapshot s;
337 struct bch_subvolume subvol;
338 struct printbuf buf = PRINTBUF;
342 if (k.k->type != KEY_TYPE_snapshot_tree)
345 st = bkey_s_c_to_snapshot_tree(k);
346 root_id = le32_to_cpu(st.v->root_snapshot);
348 ret = snapshot_lookup(trans, root_id, &s);
349 if (ret && !bch2_err_matches(ret, ENOENT))
352 if (fsck_err_on(ret ||
353 root_id != bch2_snapshot_root(c, root_id) ||
354 st.k->p.offset != le32_to_cpu(s.tree),
356 "snapshot tree points to missing/incorrect snapshot:\n %s",
357 (bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
358 ret = bch2_btree_delete_at(trans, iter, 0);
362 ret = bch2_subvolume_get(trans, le32_to_cpu(st.v->master_subvol),
364 if (ret && !bch2_err_matches(ret, ENOENT))
367 if (fsck_err_on(ret, c,
368 "snapshot tree points to missing subvolume:\n %s",
369 (printbuf_reset(&buf),
370 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
371 fsck_err_on(!bch2_snapshot_is_ancestor(c,
372 le32_to_cpu(subvol.snapshot),
374 "snapshot tree points to subvolume that does not point to snapshot in this tree:\n %s",
375 (printbuf_reset(&buf),
376 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
377 fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol), c,
378 "snapshot tree points to snapshot subvolume:\n %s",
379 (printbuf_reset(&buf),
380 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
381 struct bkey_i_snapshot_tree *u;
384 ret = bch2_snapshot_tree_master_subvol(trans, root_id, &subvol_id);
388 u = bch2_bkey_make_mut_typed(trans, iter, k, 0, snapshot_tree);
389 ret = PTR_ERR_OR_ZERO(u);
393 u->v.master_subvol = cpu_to_le32(subvol_id);
394 st = snapshot_tree_i_to_s_c(u);
403 * For each snapshot_tree, make sure it points to the root of a snapshot tree
404 * and that snapshot entry points back to it, or delete it.
406 * And, make sure it points to a subvolume within that snapshot tree, or correct
407 * it to point to the oldest subvolume within that snapshot tree.
409 int bch2_fs_check_snapshot_trees(struct bch_fs *c)
411 struct btree_iter iter;
415 ret = bch2_trans_run(c,
416 for_each_btree_key_commit(&trans, iter,
417 BTREE_ID_snapshot_trees, POS_MIN,
418 BTREE_ITER_PREFETCH, k,
419 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
420 check_snapshot_tree(&trans, &iter, k)));
423 bch_err(c, "error %i checking snapshot trees", ret);
428 * Look up snapshot tree for @tree_id and find root,
429 * make sure @snap_id is a descendent:
431 static int snapshot_tree_ptr_good(struct btree_trans *trans,
432 u32 snap_id, u32 tree_id)
434 struct bch_snapshot_tree s_t;
435 int ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
437 if (bch2_err_matches(ret, ENOENT))
442 return bch2_snapshot_is_ancestor(trans->c, snap_id, le32_to_cpu(s_t.root_snapshot));
446 * snapshot_tree pointer was incorrect: look up root snapshot node, make sure
447 * its snapshot_tree pointer is correct (allocate new one if necessary), then
448 * update this node's pointer to root node's pointer:
450 static int snapshot_tree_ptr_repair(struct btree_trans *trans,
451 struct btree_iter *iter,
452 struct bkey_s_c_snapshot *s)
454 struct bch_fs *c = trans->c;
455 struct btree_iter root_iter;
456 struct bch_snapshot_tree s_t;
457 struct bkey_s_c_snapshot root;
458 struct bkey_i_snapshot *u;
459 u32 root_id = bch2_snapshot_root(c, s->k->p.offset), tree_id;
462 root = bch2_bkey_get_iter_typed(trans, &root_iter,
463 BTREE_ID_snapshots, POS(0, root_id),
464 BTREE_ITER_WITH_UPDATES, snapshot);
465 ret = bkey_err(root);
469 tree_id = le32_to_cpu(root.v->tree);
471 ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
472 if (ret && !bch2_err_matches(ret, ENOENT))
475 if (ret || le32_to_cpu(s_t.root_snapshot) != root_id) {
476 u = bch2_bkey_make_mut_typed(trans, &root_iter, root.s_c, 0, snapshot);
477 ret = PTR_ERR_OR_ZERO(u) ?:
478 snapshot_tree_create(trans, root_id,
479 bch2_snapshot_tree_oldest_subvol(c, root_id),
484 u->v.tree = cpu_to_le32(tree_id);
485 if (s->k->p.snapshot == root_id)
486 *s = snapshot_i_to_s_c(u);
489 if (s->k->p.snapshot != root_id) {
490 u = bch2_bkey_make_mut_typed(trans, iter, s->s_c, 0, snapshot);
491 ret = PTR_ERR_OR_ZERO(u);
495 u->v.tree = cpu_to_le32(tree_id);
496 *s = snapshot_i_to_s_c(u);
499 bch2_trans_iter_exit(trans, &root_iter);
503 static int check_snapshot(struct btree_trans *trans,
504 struct btree_iter *iter,
507 struct bch_fs *c = trans->c;
508 struct bkey_s_c_snapshot s;
509 struct bch_subvolume subvol;
510 struct bch_snapshot v;
511 struct printbuf buf = PRINTBUF;
512 bool should_have_subvol;
516 if (k.k->type != KEY_TYPE_snapshot)
519 s = bkey_s_c_to_snapshot(k);
520 id = le32_to_cpu(s.v->parent);
522 ret = snapshot_lookup(trans, id, &v);
523 if (bch2_err_matches(ret, ENOENT))
524 bch_err(c, "snapshot with nonexistent parent:\n %s",
525 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf));
529 if (le32_to_cpu(v.children[0]) != s.k->p.offset &&
530 le32_to_cpu(v.children[1]) != s.k->p.offset) {
531 bch_err(c, "snapshot parent %u missing pointer to child %llu",
538 for (i = 0; i < 2 && s.v->children[i]; i++) {
539 id = le32_to_cpu(s.v->children[i]);
541 ret = snapshot_lookup(trans, id, &v);
542 if (bch2_err_matches(ret, ENOENT))
543 bch_err(c, "snapshot node %llu has nonexistent child %u",
548 if (le32_to_cpu(v.parent) != s.k->p.offset) {
549 bch_err(c, "snapshot child %u has wrong parent (got %u should be %llu)",
550 id, le32_to_cpu(v.parent), s.k->p.offset);
556 should_have_subvol = BCH_SNAPSHOT_SUBVOL(s.v) &&
557 !BCH_SNAPSHOT_DELETED(s.v);
559 if (should_have_subvol) {
560 id = le32_to_cpu(s.v->subvol);
561 ret = bch2_subvolume_get(trans, id, 0, false, &subvol);
562 if (bch2_err_matches(ret, ENOENT))
563 bch_err(c, "snapshot points to nonexistent subvolume:\n %s",
564 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf));
568 if (BCH_SNAPSHOT_SUBVOL(s.v) != (le32_to_cpu(subvol.snapshot) == s.k->p.offset)) {
569 bch_err(c, "snapshot node %llu has wrong BCH_SNAPSHOT_SUBVOL",
575 if (fsck_err_on(s.v->subvol, c, "snapshot should not point to subvol:\n %s",
576 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
577 struct bkey_i_snapshot *u = bch2_trans_kmalloc(trans, sizeof(*u));
579 ret = PTR_ERR_OR_ZERO(u);
583 bkey_reassemble(&u->k_i, s.s_c);
585 ret = bch2_trans_update(trans, iter, &u->k_i, 0);
589 s = snapshot_i_to_s_c(u);
593 ret = snapshot_tree_ptr_good(trans, s.k->p.offset, le32_to_cpu(s.v->tree));
597 if (fsck_err_on(!ret, c, "snapshot points to missing/incorrect tree:\n %s",
598 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
599 ret = snapshot_tree_ptr_repair(trans, iter, &s);
605 if (BCH_SNAPSHOT_DELETED(s.v))
606 set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
613 int bch2_fs_check_snapshots(struct bch_fs *c)
615 struct btree_iter iter;
619 ret = bch2_trans_run(c,
620 for_each_btree_key_commit(&trans, iter,
621 BTREE_ID_snapshots, POS_MIN,
622 BTREE_ITER_PREFETCH, k,
623 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
624 check_snapshot(&trans, &iter, k)));
626 bch_err(c, "%s: error %s", __func__, bch2_err_str(ret));
630 static int check_subvol(struct btree_trans *trans,
631 struct btree_iter *iter,
634 struct bch_fs *c = trans->c;
635 struct bkey_s_c_subvolume subvol;
636 struct bch_snapshot snapshot;
640 if (k.k->type != KEY_TYPE_subvolume)
643 subvol = bkey_s_c_to_subvolume(k);
644 snapid = le32_to_cpu(subvol.v->snapshot);
645 ret = snapshot_lookup(trans, snapid, &snapshot);
647 if (bch2_err_matches(ret, ENOENT))
648 bch_err(c, "subvolume %llu points to nonexistent snapshot %u",
649 k.k->p.offset, snapid);
653 if (BCH_SUBVOLUME_UNLINKED(subvol.v)) {
656 ret = bch2_subvolume_delete(trans, iter->pos.offset);
658 bch_err(c, "error deleting subvolume %llu: %s",
659 iter->pos.offset, bch2_err_str(ret));
660 return ret ?: -BCH_ERR_transaction_restart_nested;
663 if (!BCH_SUBVOLUME_SNAP(subvol.v)) {
664 u32 snapshot_root = bch2_snapshot_root(c, le32_to_cpu(subvol.v->snapshot));
665 u32 snapshot_tree = snapshot_t(c, snapshot_root)->tree;
666 struct bch_snapshot_tree st;
668 ret = bch2_snapshot_tree_lookup(trans, snapshot_tree, &st);
670 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
671 "%s: snapshot tree %u not found", __func__, snapshot_tree);
676 if (fsck_err_on(le32_to_cpu(st.master_subvol) != subvol.k->p.offset, c,
677 "subvolume %llu is not set as snapshot but is not master subvolume",
679 struct bkey_i_subvolume *s =
680 bch2_bkey_make_mut_typed(trans, iter, subvol.s_c, 0, subvolume);
681 ret = PTR_ERR_OR_ZERO(s);
685 SET_BCH_SUBVOLUME_SNAP(&s->v, true);
693 int bch2_fs_check_subvols(struct bch_fs *c)
695 struct btree_iter iter;
699 ret = bch2_trans_run(c,
700 for_each_btree_key_commit(&trans, iter,
701 BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
702 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
703 check_subvol(&trans, &iter, k)));
705 bch_err(c, "%s: error %s", __func__, bch2_err_str(ret));
710 void bch2_fs_snapshots_exit(struct bch_fs *c)
712 genradix_free(&c->snapshots);
715 int bch2_fs_snapshots_start(struct bch_fs *c)
717 struct btree_iter iter;
721 ret = bch2_trans_run(c,
722 for_each_btree_key2(&trans, iter, BTREE_ID_snapshots,
724 bch2_mark_snapshot(&trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
725 bch2_snapshot_set_equiv(&trans, k)));
727 bch_err(c, "error starting snapshots: %s", bch2_err_str(ret));
732 * Mark a snapshot as deleted, for future cleanup:
734 static int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
736 struct btree_iter iter;
737 struct bkey_i_snapshot *s;
740 s = bch2_bkey_get_mut_typed(trans, &iter,
741 BTREE_ID_snapshots, POS(0, id),
743 ret = PTR_ERR_OR_ZERO(s);
745 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
746 trans->c, "missing snapshot %u", id);
750 /* already deleted? */
751 if (BCH_SNAPSHOT_DELETED(&s->v))
754 SET_BCH_SNAPSHOT_DELETED(&s->v, true);
755 SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
758 bch2_trans_iter_exit(trans, &iter);
762 static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
764 struct bch_fs *c = trans->c;
765 struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
766 struct btree_iter tree_iter = (struct btree_iter) { NULL };
767 struct bkey_s_c_snapshot s;
772 s = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id),
773 BTREE_ITER_INTENT, snapshot);
775 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
776 "missing snapshot %u", id);
781 BUG_ON(!BCH_SNAPSHOT_DELETED(s.v));
782 parent_id = le32_to_cpu(s.v->parent);
785 struct bkey_i_snapshot *parent;
787 parent = bch2_bkey_get_mut_typed(trans, &p_iter,
788 BTREE_ID_snapshots, POS(0, parent_id),
790 ret = PTR_ERR_OR_ZERO(parent);
792 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
793 "missing snapshot %u", parent_id);
797 for (i = 0; i < 2; i++)
798 if (le32_to_cpu(parent->v.children[i]) == id)
802 bch_err(c, "snapshot %u missing child pointer to %u",
805 parent->v.children[i] = 0;
807 if (le32_to_cpu(parent->v.children[0]) <
808 le32_to_cpu(parent->v.children[1]))
809 swap(parent->v.children[0],
810 parent->v.children[1]);
813 * We're deleting the root of a snapshot tree: update the
814 * snapshot_tree entry to point to the new root, or delete it if
815 * this is the last snapshot ID in this tree:
817 struct bkey_i_snapshot_tree *s_t;
819 BUG_ON(s.v->children[1]);
821 s_t = bch2_bkey_get_mut_typed(trans, &tree_iter,
822 BTREE_ID_snapshot_trees, POS(0, le32_to_cpu(s.v->tree)),
824 ret = PTR_ERR_OR_ZERO(s_t);
828 if (s.v->children[0]) {
829 s_t->v.root_snapshot = cpu_to_le32(s.v->children[0]);
831 s_t->k.type = KEY_TYPE_deleted;
832 set_bkey_val_u64s(&s_t->k, 0);
836 ret = bch2_btree_delete_at(trans, &iter, 0);
838 bch2_trans_iter_exit(trans, &tree_iter);
839 bch2_trans_iter_exit(trans, &p_iter);
840 bch2_trans_iter_exit(trans, &iter);
844 static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
846 u32 *snapshot_subvols,
849 struct btree_iter iter;
850 struct bkey_i_snapshot *n;
855 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
856 POS_MIN, BTREE_ITER_INTENT);
857 k = bch2_btree_iter_peek(&iter);
862 for (i = 0; i < nr_snapids; i++) {
863 k = bch2_btree_iter_prev_slot(&iter);
868 if (!k.k || !k.k->p.offset) {
869 ret = -BCH_ERR_ENOSPC_snapshot_create;
873 n = bch2_bkey_alloc(trans, &iter, 0, snapshot);
874 ret = PTR_ERR_OR_ZERO(n);
879 n->v.parent = cpu_to_le32(parent);
880 n->v.subvol = cpu_to_le32(snapshot_subvols[i]);
881 n->v.tree = cpu_to_le32(tree);
882 SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
884 ret = bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
885 bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0);
889 new_snapids[i] = iter.pos.offset;
892 bch2_trans_iter_exit(trans, &iter);
897 * Create new snapshot IDs as children of an existing snapshot ID:
899 static int bch2_snapshot_node_create_children(struct btree_trans *trans, u32 parent,
901 u32 *snapshot_subvols,
904 struct btree_iter iter;
905 struct bkey_i_snapshot *n_parent;
908 n_parent = bch2_bkey_get_mut_typed(trans, &iter,
909 BTREE_ID_snapshots, POS(0, parent),
911 ret = PTR_ERR_OR_ZERO(n_parent);
913 if (bch2_err_matches(ret, ENOENT))
914 bch_err(trans->c, "snapshot %u not found", parent);
918 if (n_parent->v.children[0] || n_parent->v.children[1]) {
919 bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
924 ret = create_snapids(trans, parent, le32_to_cpu(n_parent->v.tree),
925 new_snapids, snapshot_subvols, nr_snapids);
929 n_parent->v.children[0] = cpu_to_le32(new_snapids[0]);
930 n_parent->v.children[1] = cpu_to_le32(new_snapids[1]);
931 n_parent->v.subvol = 0;
932 SET_BCH_SNAPSHOT_SUBVOL(&n_parent->v, false);
934 bch2_trans_iter_exit(trans, &iter);
939 * Create a snapshot node that is the root of a new tree:
941 static int bch2_snapshot_node_create_tree(struct btree_trans *trans,
943 u32 *snapshot_subvols,
946 struct bkey_i_snapshot_tree *n_tree;
949 n_tree = __snapshot_tree_create(trans);
950 ret = PTR_ERR_OR_ZERO(n_tree) ?:
951 create_snapids(trans, 0, n_tree->k.p.offset,
952 new_snapids, snapshot_subvols, nr_snapids);
956 n_tree->v.master_subvol = cpu_to_le32(snapshot_subvols[0]);
957 n_tree->v.root_snapshot = cpu_to_le32(new_snapids[0]);
961 int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
963 u32 *snapshot_subvols,
966 BUG_ON((parent == 0) != (nr_snapids == 1));
967 BUG_ON((parent != 0) != (nr_snapids == 2));
970 ? bch2_snapshot_node_create_children(trans, parent,
971 new_snapids, snapshot_subvols, nr_snapids)
972 : bch2_snapshot_node_create_tree(trans,
973 new_snapids, snapshot_subvols, nr_snapids);
977 static int snapshot_delete_key(struct btree_trans *trans,
978 struct btree_iter *iter,
980 snapshot_id_list *deleted,
981 snapshot_id_list *equiv_seen,
982 struct bpos *last_pos)
984 struct bch_fs *c = trans->c;
985 u32 equiv = snapshot_t(c, k.k->p.snapshot)->equiv;
987 if (!bkey_eq(k.k->p, *last_pos))
991 if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
992 snapshot_list_has_id(equiv_seen, equiv)) {
993 return bch2_btree_delete_at(trans, iter,
994 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
996 return snapshot_list_add(c, equiv_seen, equiv);
1000 static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct btree_iter *iter,
1003 struct bkey_s_c_snapshot snap;
1007 if (k.k->type != KEY_TYPE_snapshot)
1010 snap = bkey_s_c_to_snapshot(k);
1011 if (BCH_SNAPSHOT_DELETED(snap.v) ||
1012 BCH_SNAPSHOT_SUBVOL(snap.v))
1015 children[0] = le32_to_cpu(snap.v->children[0]);
1016 children[1] = le32_to_cpu(snap.v->children[1]);
1018 ret = snapshot_live(trans, children[0]) ?:
1019 snapshot_live(trans, children[1]);
1024 return bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
1028 int bch2_delete_dead_snapshots(struct bch_fs *c)
1030 struct btree_trans trans;
1031 struct btree_iter iter;
1033 struct bkey_s_c_snapshot snap;
1034 snapshot_id_list deleted = { 0 };
1038 if (!test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags))
1041 if (!test_bit(BCH_FS_STARTED, &c->flags)) {
1042 ret = bch2_fs_read_write_early(c);
1044 bch_err(c, "error deleleting dead snapshots: error going rw: %s", bch2_err_str(ret));
1049 bch2_trans_init(&trans, c, 0, 0);
1052 * For every snapshot node: If we have no live children and it's not
1053 * pointed to by a subvolume, delete it:
1055 ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_snapshots,
1058 bch2_delete_redundant_snapshot(&trans, &iter, k));
1060 bch_err(c, "error deleting redundant snapshots: %s", bch2_err_str(ret));
1064 for_each_btree_key2(&trans, iter, BTREE_ID_snapshots,
1066 bch2_snapshot_set_equiv(&trans, k));
1068 bch_err(c, "error in bch2_snapshots_set_equiv: %s", bch2_err_str(ret));
1072 for_each_btree_key(&trans, iter, BTREE_ID_snapshots,
1073 POS_MIN, 0, k, ret) {
1074 if (k.k->type != KEY_TYPE_snapshot)
1077 snap = bkey_s_c_to_snapshot(k);
1078 if (BCH_SNAPSHOT_DELETED(snap.v)) {
1079 ret = snapshot_list_add(c, &deleted, k.k->p.offset);
1084 bch2_trans_iter_exit(&trans, &iter);
1087 bch_err(c, "error walking snapshots: %s", bch2_err_str(ret));
1091 for (id = 0; id < BTREE_ID_NR; id++) {
1092 struct bpos last_pos = POS_MIN;
1093 snapshot_id_list equiv_seen = { 0 };
1095 if (!btree_type_has_snapshots(id))
1098 ret = for_each_btree_key_commit(&trans, iter,
1100 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1101 NULL, NULL, BTREE_INSERT_NOFAIL,
1102 snapshot_delete_key(&trans, &iter, k, &deleted, &equiv_seen, &last_pos));
1104 darray_exit(&equiv_seen);
1107 bch_err(c, "error deleting snapshot keys: %s", bch2_err_str(ret));
1112 for (i = 0; i < deleted.nr; i++) {
1113 ret = commit_do(&trans, NULL, NULL, 0,
1114 bch2_snapshot_node_delete(&trans, deleted.data[i]));
1116 bch_err(c, "error deleting snapshot %u: %s",
1117 deleted.data[i], bch2_err_str(ret));
1122 clear_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
1124 darray_exit(&deleted);
1125 bch2_trans_exit(&trans);
1129 static void bch2_delete_dead_snapshots_work(struct work_struct *work)
1131 struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
1133 bch2_delete_dead_snapshots(c);
1134 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1137 void bch2_delete_dead_snapshots_async(struct bch_fs *c)
1139 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots) &&
1140 !queue_work(c->write_ref_wq, &c->snapshot_delete_work))
1141 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1144 static int bch2_delete_dead_snapshots_hook(struct btree_trans *trans,
1145 struct btree_trans_commit_hook *h)
1147 struct bch_fs *c = trans->c;
1149 set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
1151 if (!test_bit(BCH_FS_FSCK_DONE, &c->flags))
1154 bch2_delete_dead_snapshots_async(c);
1160 int bch2_subvolume_invalid(const struct bch_fs *c, struct bkey_s_c k,
1161 unsigned flags, struct printbuf *err)
1163 if (bkey_lt(k.k->p, SUBVOL_POS_MIN) ||
1164 bkey_gt(k.k->p, SUBVOL_POS_MAX)) {
1165 prt_printf(err, "invalid pos");
1166 return -BCH_ERR_invalid_bkey;
1172 void bch2_subvolume_to_text(struct printbuf *out, struct bch_fs *c,
1175 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
1177 prt_printf(out, "root %llu snapshot id %u",
1178 le64_to_cpu(s.v->inode),
1179 le32_to_cpu(s.v->snapshot));
1181 if (bkey_val_bytes(s.k) > offsetof(struct bch_subvolume, parent))
1182 prt_printf(out, " parent %u", le32_to_cpu(s.v->parent));
1185 static __always_inline int
1186 bch2_subvolume_get_inlined(struct btree_trans *trans, unsigned subvol,
1187 bool inconsistent_if_not_found,
1189 struct bch_subvolume *s)
1191 int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_subvolumes, POS(0, subvol),
1192 iter_flags, subvolume, s);
1193 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT) &&
1194 inconsistent_if_not_found,
1195 trans->c, "missing subvolume %u", subvol);
1199 int bch2_subvolume_get(struct btree_trans *trans, unsigned subvol,
1200 bool inconsistent_if_not_found,
1202 struct bch_subvolume *s)
1204 return bch2_subvolume_get_inlined(trans, subvol, inconsistent_if_not_found, iter_flags, s);
1207 int bch2_snapshot_get_subvol(struct btree_trans *trans, u32 snapshot,
1208 struct bch_subvolume *subvol)
1210 struct bch_snapshot snap;
1212 return snapshot_lookup(trans, snapshot, &snap) ?:
1213 bch2_subvolume_get(trans, le32_to_cpu(snap.subvol), true, 0, subvol);
1216 int bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvol,
1219 struct btree_iter iter;
1223 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_subvolumes, POS(0, subvol),
1225 BTREE_ITER_WITH_UPDATES);
1226 ret = bkey_err(k) ?: k.k->type == KEY_TYPE_subvolume ? 0 : -BCH_ERR_ENOENT_subvolume;
1229 *snapid = le32_to_cpu(bkey_s_c_to_subvolume(k).v->snapshot);
1230 else if (bch2_err_matches(ret, ENOENT))
1231 bch2_fs_inconsistent(trans->c, "missing subvolume %u", subvol);
1232 bch2_trans_iter_exit(trans, &iter);
1236 static int bch2_subvolume_reparent(struct btree_trans *trans,
1237 struct btree_iter *iter,
1239 u32 old_parent, u32 new_parent)
1241 struct bkey_i_subvolume *s;
1244 if (k.k->type != KEY_TYPE_subvolume)
1247 if (bkey_val_bytes(k.k) > offsetof(struct bch_subvolume, parent) &&
1248 le32_to_cpu(bkey_s_c_to_subvolume(k).v->parent) != old_parent)
1251 s = bch2_bkey_make_mut_typed(trans, iter, k, 0, subvolume);
1252 ret = PTR_ERR_OR_ZERO(s);
1256 s->v.parent = cpu_to_le32(new_parent);
1261 * Scan for subvolumes with parent @subvolid_to_delete, reparent:
1263 static int bch2_subvolumes_reparent(struct btree_trans *trans, u32 subvolid_to_delete)
1265 struct btree_iter iter;
1267 struct bch_subvolume s;
1269 return lockrestart_do(trans,
1270 bch2_subvolume_get(trans, subvolid_to_delete, true,
1271 BTREE_ITER_CACHED, &s)) ?:
1272 for_each_btree_key_commit(trans, iter,
1273 BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
1274 NULL, NULL, BTREE_INSERT_NOFAIL,
1275 bch2_subvolume_reparent(trans, &iter, k,
1276 subvolid_to_delete, le32_to_cpu(s.parent)));
1280 * Delete subvolume, mark snapshot ID as deleted, queue up snapshot
1283 static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
1285 struct btree_iter iter;
1286 struct bkey_s_c_subvolume subvol;
1287 struct btree_trans_commit_hook *h;
1291 subvol = bch2_bkey_get_iter_typed(trans, &iter,
1292 BTREE_ID_subvolumes, POS(0, subvolid),
1293 BTREE_ITER_CACHED|BTREE_ITER_INTENT,
1295 ret = bkey_err(subvol);
1296 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
1297 "missing subvolume %u", subvolid);
1301 snapid = le32_to_cpu(subvol.v->snapshot);
1303 ret = bch2_btree_delete_at(trans, &iter, 0);
1307 ret = bch2_snapshot_node_set_deleted(trans, snapid);
1311 h = bch2_trans_kmalloc(trans, sizeof(*h));
1312 ret = PTR_ERR_OR_ZERO(h);
1316 h->fn = bch2_delete_dead_snapshots_hook;
1317 bch2_trans_commit_hook(trans, h);
1319 bch2_trans_iter_exit(trans, &iter);
1323 static int bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
1325 return bch2_subvolumes_reparent(trans, subvolid) ?:
1326 commit_do(trans, NULL, NULL, BTREE_INSERT_NOFAIL,
1327 __bch2_subvolume_delete(trans, subvolid));
1330 void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *work)
1332 struct bch_fs *c = container_of(work, struct bch_fs,
1333 snapshot_wait_for_pagecache_and_delete_work);
1339 mutex_lock(&c->snapshots_unlinked_lock);
1340 s = c->snapshots_unlinked;
1341 darray_init(&c->snapshots_unlinked);
1342 mutex_unlock(&c->snapshots_unlinked_lock);
1347 bch2_evict_subvolume_inodes(c, &s);
1349 for (id = s.data; id < s.data + s.nr; id++) {
1350 ret = bch2_trans_run(c, bch2_subvolume_delete(&trans, *id));
1352 bch_err(c, "error deleting subvolume %u: %s", *id, bch2_err_str(ret));
1360 bch2_write_ref_put(c, BCH_WRITE_REF_snapshot_delete_pagecache);
1363 struct subvolume_unlink_hook {
1364 struct btree_trans_commit_hook h;
1368 int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans *trans,
1369 struct btree_trans_commit_hook *_h)
1371 struct subvolume_unlink_hook *h = container_of(_h, struct subvolume_unlink_hook, h);
1372 struct bch_fs *c = trans->c;
1375 mutex_lock(&c->snapshots_unlinked_lock);
1376 if (!snapshot_list_has_id(&c->snapshots_unlinked, h->subvol))
1377 ret = snapshot_list_add(c, &c->snapshots_unlinked, h->subvol);
1378 mutex_unlock(&c->snapshots_unlinked_lock);
1383 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_snapshot_delete_pagecache))
1386 if (!queue_work(c->write_ref_wq, &c->snapshot_wait_for_pagecache_and_delete_work))
1387 bch2_write_ref_put(c, BCH_WRITE_REF_snapshot_delete_pagecache);
1391 int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
1393 struct btree_iter iter;
1394 struct bkey_i_subvolume *n;
1395 struct subvolume_unlink_hook *h;
1398 h = bch2_trans_kmalloc(trans, sizeof(*h));
1399 ret = PTR_ERR_OR_ZERO(h);
1403 h->h.fn = bch2_subvolume_wait_for_pagecache_and_delete_hook;
1404 h->subvol = subvolid;
1405 bch2_trans_commit_hook(trans, &h->h);
1407 n = bch2_bkey_get_mut_typed(trans, &iter,
1408 BTREE_ID_subvolumes, POS(0, subvolid),
1409 BTREE_ITER_CACHED, subvolume);
1410 ret = PTR_ERR_OR_ZERO(n);
1411 if (unlikely(ret)) {
1412 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
1413 "missing subvolume %u", subvolid);
1417 SET_BCH_SUBVOLUME_UNLINKED(&n->v, true);
1418 bch2_trans_iter_exit(trans, &iter);
1422 int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
1425 u32 *new_snapshotid,
1428 struct bch_fs *c = trans->c;
1429 struct btree_iter dst_iter, src_iter = (struct btree_iter) { NULL };
1430 struct bkey_i_subvolume *new_subvol = NULL;
1431 struct bkey_i_subvolume *src_subvol = NULL;
1432 u32 parent = 0, new_nodes[2], snapshot_subvols[2];
1435 ret = bch2_bkey_get_empty_slot(trans, &dst_iter,
1436 BTREE_ID_subvolumes, POS(0, U32_MAX));
1437 if (ret == -BCH_ERR_ENOSPC_btree_slot)
1438 ret = -BCH_ERR_ENOSPC_subvolume_create;
1442 snapshot_subvols[0] = dst_iter.pos.offset;
1443 snapshot_subvols[1] = src_subvolid;
1446 /* Creating a snapshot: */
1448 src_subvol = bch2_bkey_get_mut_typed(trans, &src_iter,
1449 BTREE_ID_subvolumes, POS(0, src_subvolid),
1450 BTREE_ITER_CACHED, subvolume);
1451 ret = PTR_ERR_OR_ZERO(src_subvol);
1452 if (unlikely(ret)) {
1453 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
1454 "subvolume %u not found", src_subvolid);
1458 parent = le32_to_cpu(src_subvol->v.snapshot);
1461 ret = bch2_snapshot_node_create(trans, parent, new_nodes,
1463 src_subvolid ? 2 : 1);
1468 src_subvol->v.snapshot = cpu_to_le32(new_nodes[1]);
1469 ret = bch2_trans_update(trans, &src_iter, &src_subvol->k_i, 0);
1474 new_subvol = bch2_bkey_alloc(trans, &dst_iter, 0, subvolume);
1475 ret = PTR_ERR_OR_ZERO(new_subvol);
1479 new_subvol->v.flags = 0;
1480 new_subvol->v.snapshot = cpu_to_le32(new_nodes[0]);
1481 new_subvol->v.inode = cpu_to_le64(inode);
1482 new_subvol->v.parent = cpu_to_le32(src_subvolid);
1483 new_subvol->v.otime.lo = cpu_to_le64(bch2_current_time(c));
1484 new_subvol->v.otime.hi = 0;
1486 SET_BCH_SUBVOLUME_RO(&new_subvol->v, ro);
1487 SET_BCH_SUBVOLUME_SNAP(&new_subvol->v, src_subvolid != 0);
1489 *new_subvolid = new_subvol->k.p.offset;
1490 *new_snapshotid = new_nodes[0];
1492 bch2_trans_iter_exit(trans, &src_iter);
1493 bch2_trans_iter_exit(trans, &dst_iter);
1497 int bch2_fs_subvolumes_init(struct bch_fs *c)
1499 INIT_WORK(&c->snapshot_delete_work, bch2_delete_dead_snapshots_work);
1500 INIT_WORK(&c->snapshot_wait_for_pagecache_and_delete_work,
1501 bch2_subvolume_wait_for_pagecache_and_delete);
1502 mutex_init(&c->snapshots_unlinked_lock);