1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_key_cache.h"
5 #include "btree_update.h"
11 static int bch2_subvolume_delete(struct btree_trans *, u32);
15 void bch2_snapshot_tree_to_text(struct printbuf *out, struct bch_fs *c,
18 struct bkey_s_c_snapshot_tree t = bkey_s_c_to_snapshot_tree(k);
20 prt_printf(out, "subvol %u root snapshot %u",
21 le32_to_cpu(t.v->master_subvol),
22 le32_to_cpu(t.v->root_snapshot));
25 int bch2_snapshot_tree_invalid(const struct bch_fs *c, struct bkey_s_c k,
26 unsigned flags, struct printbuf *err)
28 if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
29 bkey_lt(k.k->p, POS(0, 1))) {
30 prt_printf(err, "bad pos");
31 return -BCH_ERR_invalid_bkey;
37 int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
38 struct bch_snapshot_tree *s)
40 return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
41 BTREE_ITER_WITH_UPDATES, snapshot_tree, s);
44 static struct bkey_i_snapshot_tree *
45 __snapshot_tree_create(struct btree_trans *trans)
47 struct btree_iter iter;
48 int ret = bch2_bkey_get_empty_slot(trans, &iter,
49 BTREE_ID_snapshot_trees, POS(0, U32_MAX));
50 struct bkey_i_snapshot_tree *s_t;
52 if (ret == -BCH_ERR_ENOSPC_btree_slot)
53 ret = -BCH_ERR_ENOSPC_snapshot_tree;
57 s_t = bch2_bkey_alloc(trans, &iter, 0, snapshot_tree);
58 ret = PTR_ERR_OR_ZERO(s_t);
59 bch2_trans_iter_exit(trans, &iter);
60 return ret ? ERR_PTR(ret) : s_t;
63 static int snapshot_tree_create(struct btree_trans *trans,
64 u32 root_id, u32 subvol_id, u32 *tree_id)
66 struct bkey_i_snapshot_tree *n_tree =
67 __snapshot_tree_create(trans);
70 return PTR_ERR(n_tree);
72 n_tree->v.master_subvol = cpu_to_le32(subvol_id);
73 n_tree->v.root_snapshot = cpu_to_le32(root_id);
74 *tree_id = n_tree->k.p.offset;
80 void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
83 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
85 prt_printf(out, "is_subvol %llu deleted %llu parent %10u children %10u %10u subvol %u",
86 BCH_SNAPSHOT_SUBVOL(s.v),
87 BCH_SNAPSHOT_DELETED(s.v),
88 le32_to_cpu(s.v->parent),
89 le32_to_cpu(s.v->children[0]),
90 le32_to_cpu(s.v->children[1]),
91 le32_to_cpu(s.v->subvol));
94 int bch2_snapshot_invalid(const struct bch_fs *c, struct bkey_s_c k,
95 unsigned flags, struct printbuf *err)
97 struct bkey_s_c_snapshot s;
100 if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
101 bkey_lt(k.k->p, POS(0, 1))) {
102 prt_printf(err, "bad pos");
103 return -BCH_ERR_invalid_bkey;
106 s = bkey_s_c_to_snapshot(k);
108 id = le32_to_cpu(s.v->parent);
109 if (id && id <= k.k->p.offset) {
110 prt_printf(err, "bad parent node (%u <= %llu)",
112 return -BCH_ERR_invalid_bkey;
115 if (le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1])) {
116 prt_printf(err, "children not normalized");
117 return -BCH_ERR_invalid_bkey;
120 if (s.v->children[0] &&
121 s.v->children[0] == s.v->children[1]) {
122 prt_printf(err, "duplicate child nodes");
123 return -BCH_ERR_invalid_bkey;
126 for (i = 0; i < 2; i++) {
127 id = le32_to_cpu(s.v->children[i]);
129 if (id >= k.k->p.offset) {
130 prt_printf(err, "bad child node (%u >= %llu)",
132 return -BCH_ERR_invalid_bkey;
139 int bch2_mark_snapshot(struct btree_trans *trans,
140 enum btree_id btree, unsigned level,
141 struct bkey_s_c old, struct bkey_s_c new,
144 struct bch_fs *c = trans->c;
145 struct snapshot_t *t;
147 t = genradix_ptr_alloc(&c->snapshots,
148 U32_MAX - new.k->p.offset,
151 return -BCH_ERR_ENOMEM_mark_snapshot;
153 if (new.k->type == KEY_TYPE_snapshot) {
154 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);
156 t->parent = le32_to_cpu(s.v->parent);
157 t->children[0] = le32_to_cpu(s.v->children[0]);
158 t->children[1] = le32_to_cpu(s.v->children[1]);
159 t->subvol = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0;
160 t->tree = le32_to_cpu(s.v->tree);
172 static int snapshot_lookup(struct btree_trans *trans, u32 id,
173 struct bch_snapshot *s)
175 return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots, POS(0, id),
176 BTREE_ITER_WITH_UPDATES, snapshot, s);
179 static int snapshot_live(struct btree_trans *trans, u32 id)
181 struct bch_snapshot v;
187 ret = snapshot_lookup(trans, id, &v);
188 if (bch2_err_matches(ret, ENOENT))
189 bch_err(trans->c, "snapshot node %u not found", id);
193 return !BCH_SNAPSHOT_DELETED(&v);
196 static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k)
198 struct bch_fs *c = trans->c;
199 unsigned i, nr_live = 0, live_idx = 0;
200 struct bkey_s_c_snapshot snap;
201 u32 id = k.k->p.offset, child[2];
203 if (k.k->type != KEY_TYPE_snapshot)
206 snap = bkey_s_c_to_snapshot(k);
208 child[0] = le32_to_cpu(snap.v->children[0]);
209 child[1] = le32_to_cpu(snap.v->children[1]);
211 for (i = 0; i < 2; i++) {
212 int ret = snapshot_live(trans, child[i]);
222 snapshot_t(c, id)->equiv = nr_live == 1
223 ? snapshot_t(c, child[live_idx])->equiv
230 static u32 bch2_snapshot_child(struct bch_fs *c, u32 id, unsigned child)
232 return snapshot_t(c, id)->children[child];
235 static u32 bch2_snapshot_left_child(struct bch_fs *c, u32 id)
237 return bch2_snapshot_child(c, id, 0);
240 static u32 bch2_snapshot_right_child(struct bch_fs *c, u32 id)
242 return bch2_snapshot_child(c, id, 1);
245 static u32 bch2_snapshot_tree_next(struct bch_fs *c, u32 id)
249 n = bch2_snapshot_left_child(c, id);
253 while ((parent = bch2_snapshot_parent(c, id))) {
254 n = bch2_snapshot_right_child(c, parent);
263 static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
265 u32 id = snapshot_root;
269 s = snapshot_t(c, id)->subvol;
271 if (s && (!subvol || s < subvol))
274 id = bch2_snapshot_tree_next(c, id);
280 static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans,
281 u32 snapshot_root, u32 *subvol_id)
283 struct bch_fs *c = trans->c;
284 struct btree_iter iter;
286 struct bkey_s_c_subvolume s;
289 for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN,
291 if (k.k->type != KEY_TYPE_subvolume)
294 s = bkey_s_c_to_subvolume(k);
295 if (!bch2_snapshot_is_ancestor(c, le32_to_cpu(s.v->snapshot), snapshot_root))
297 if (!BCH_SUBVOLUME_SNAP(s.v)) {
298 *subvol_id = s.k->p.offset;
302 ret = ret ?: -ENOENT;
304 bch2_trans_iter_exit(trans, &iter);
306 if (bch2_err_matches(ret, ENOENT)) {
307 struct bkey_i_subvolume *s;
309 *subvol_id = bch2_snapshot_tree_oldest_subvol(c, snapshot_root);
311 s = bch2_bkey_get_mut_typed(trans, &iter,
312 BTREE_ID_subvolumes, POS(0, *subvol_id),
314 ret = PTR_ERR_OR_ZERO(s);
318 SET_BCH_SUBVOLUME_SNAP(&s->v, false);
324 static int check_snapshot_tree(struct btree_trans *trans,
325 struct btree_iter *iter,
328 struct bch_fs *c = trans->c;
329 struct bkey_s_c_snapshot_tree st;
330 struct bch_snapshot s;
331 struct bch_subvolume subvol;
332 struct printbuf buf = PRINTBUF;
336 if (k.k->type != KEY_TYPE_snapshot_tree)
339 st = bkey_s_c_to_snapshot_tree(k);
340 root_id = le32_to_cpu(st.v->root_snapshot);
342 ret = snapshot_lookup(trans, root_id, &s);
343 if (ret && !bch2_err_matches(ret, ENOENT))
346 if (fsck_err_on(ret ||
347 root_id != bch2_snapshot_root(c, root_id) ||
348 st.k->p.offset != le32_to_cpu(s.tree),
350 "snapshot tree points to missing/incorrect snapshot:\n %s",
351 (bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
352 ret = bch2_btree_delete_at(trans, iter, 0);
356 ret = bch2_subvolume_get(trans, le32_to_cpu(st.v->master_subvol),
358 if (ret && !bch2_err_matches(ret, ENOENT))
361 if (fsck_err_on(ret, c,
362 "snapshot tree points to missing subvolume:\n %s",
363 (printbuf_reset(&buf),
364 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
365 fsck_err_on(!bch2_snapshot_is_ancestor(c,
366 le32_to_cpu(subvol.snapshot),
368 "snapshot tree points to subvolume that does not point to snapshot in this tree:\n %s",
369 (printbuf_reset(&buf),
370 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
371 fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol), c,
372 "snapshot tree points to snapshot subvolume:\n %s",
373 (printbuf_reset(&buf),
374 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
375 struct bkey_i_snapshot_tree *u;
378 ret = bch2_snapshot_tree_master_subvol(trans, root_id, &subvol_id);
382 u = bch2_bkey_make_mut_typed(trans, iter, k, 0, snapshot_tree);
383 ret = PTR_ERR_OR_ZERO(u);
387 u->v.master_subvol = cpu_to_le32(subvol_id);
388 st = snapshot_tree_i_to_s_c(u);
397 * For each snapshot_tree, make sure it points to the root of a snapshot tree
398 * and that snapshot entry points back to it, or delete it.
400 * And, make sure it points to a subvolume within that snapshot tree, or correct
401 * it to point to the oldest subvolume within that snapshot tree.
403 int bch2_fs_check_snapshot_trees(struct bch_fs *c)
405 struct btree_iter iter;
409 ret = bch2_trans_run(c,
410 for_each_btree_key_commit(&trans, iter,
411 BTREE_ID_snapshot_trees, POS_MIN,
412 BTREE_ITER_PREFETCH, k,
413 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
414 check_snapshot_tree(&trans, &iter, k)));
417 bch_err(c, "error %i checking snapshot trees", ret);
422 * Look up snapshot tree for @tree_id and find root,
423 * make sure @snap_id is a descendent:
425 static int snapshot_tree_ptr_good(struct btree_trans *trans,
426 u32 snap_id, u32 tree_id)
428 struct bch_snapshot_tree s_t;
429 int ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
431 if (bch2_err_matches(ret, ENOENT))
436 return bch2_snapshot_is_ancestor(trans->c, snap_id, le32_to_cpu(s_t.root_snapshot));
440 * snapshot_tree pointer was incorrect: look up root snapshot node, make sure
441 * its snapshot_tree pointer is correct (allocate new one if necessary), then
442 * update this node's pointer to root node's pointer:
444 static int snapshot_tree_ptr_repair(struct btree_trans *trans,
445 struct btree_iter *iter,
446 struct bkey_s_c_snapshot *s)
448 struct bch_fs *c = trans->c;
449 struct btree_iter root_iter;
450 struct bch_snapshot_tree s_t;
451 struct bkey_s_c_snapshot root;
452 struct bkey_i_snapshot *u;
453 u32 root_id = bch2_snapshot_root(c, s->k->p.offset), tree_id;
456 root = bch2_bkey_get_iter_typed(trans, &root_iter,
457 BTREE_ID_snapshots, POS(0, root_id),
458 BTREE_ITER_WITH_UPDATES, snapshot);
459 ret = bkey_err(root);
463 tree_id = le32_to_cpu(root.v->tree);
465 ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
466 if (ret && !bch2_err_matches(ret, ENOENT))
469 if (ret || le32_to_cpu(s_t.root_snapshot) != root_id) {
470 u = bch2_bkey_make_mut_typed(trans, &root_iter, root.s_c, 0, snapshot);
471 ret = PTR_ERR_OR_ZERO(u) ?:
472 snapshot_tree_create(trans, root_id,
473 bch2_snapshot_tree_oldest_subvol(c, root_id),
478 u->v.tree = cpu_to_le32(tree_id);
479 if (s->k->p.snapshot == root_id)
480 *s = snapshot_i_to_s_c(u);
483 if (s->k->p.snapshot != root_id) {
484 u = bch2_bkey_make_mut_typed(trans, iter, s->s_c, 0, snapshot);
485 ret = PTR_ERR_OR_ZERO(u);
489 u->v.tree = cpu_to_le32(tree_id);
490 *s = snapshot_i_to_s_c(u);
493 bch2_trans_iter_exit(trans, &root_iter);
497 static int check_snapshot(struct btree_trans *trans,
498 struct btree_iter *iter,
501 struct bch_fs *c = trans->c;
502 struct bkey_s_c_snapshot s;
503 struct bch_subvolume subvol;
504 struct bch_snapshot v;
505 struct printbuf buf = PRINTBUF;
506 bool should_have_subvol;
510 if (k.k->type != KEY_TYPE_snapshot)
513 s = bkey_s_c_to_snapshot(k);
514 id = le32_to_cpu(s.v->parent);
516 ret = snapshot_lookup(trans, id, &v);
517 if (bch2_err_matches(ret, ENOENT))
518 bch_err(c, "snapshot with nonexistent parent:\n %s",
519 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf));
523 if (le32_to_cpu(v.children[0]) != s.k->p.offset &&
524 le32_to_cpu(v.children[1]) != s.k->p.offset) {
525 bch_err(c, "snapshot parent %u missing pointer to child %llu",
532 for (i = 0; i < 2 && s.v->children[i]; i++) {
533 id = le32_to_cpu(s.v->children[i]);
535 ret = snapshot_lookup(trans, id, &v);
536 if (bch2_err_matches(ret, ENOENT))
537 bch_err(c, "snapshot node %llu has nonexistent child %u",
542 if (le32_to_cpu(v.parent) != s.k->p.offset) {
543 bch_err(c, "snapshot child %u has wrong parent (got %u should be %llu)",
544 id, le32_to_cpu(v.parent), s.k->p.offset);
550 should_have_subvol = BCH_SNAPSHOT_SUBVOL(s.v) &&
551 !BCH_SNAPSHOT_DELETED(s.v);
553 if (should_have_subvol) {
554 id = le32_to_cpu(s.v->subvol);
555 ret = bch2_subvolume_get(trans, id, 0, false, &subvol);
556 if (bch2_err_matches(ret, ENOENT))
557 bch_err(c, "snapshot points to nonexistent subvolume:\n %s",
558 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf));
562 if (BCH_SNAPSHOT_SUBVOL(s.v) != (le32_to_cpu(subvol.snapshot) == s.k->p.offset)) {
563 bch_err(c, "snapshot node %llu has wrong BCH_SNAPSHOT_SUBVOL",
569 if (fsck_err_on(s.v->subvol, c, "snapshot should not point to subvol:\n %s",
570 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
571 struct bkey_i_snapshot *u = bch2_trans_kmalloc(trans, sizeof(*u));
573 ret = PTR_ERR_OR_ZERO(u);
577 bkey_reassemble(&u->k_i, s.s_c);
579 ret = bch2_trans_update(trans, iter, &u->k_i, 0);
583 s = snapshot_i_to_s_c(u);
587 ret = snapshot_tree_ptr_good(trans, s.k->p.offset, le32_to_cpu(s.v->tree));
591 if (fsck_err_on(!ret, c, "snapshot points to missing/incorrect tree:\n %s",
592 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
593 ret = snapshot_tree_ptr_repair(trans, iter, &s);
599 if (BCH_SNAPSHOT_DELETED(s.v))
600 set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
607 int bch2_fs_check_snapshots(struct bch_fs *c)
609 struct btree_iter iter;
613 ret = bch2_trans_run(c,
614 for_each_btree_key_commit(&trans, iter,
615 BTREE_ID_snapshots, POS_MIN,
616 BTREE_ITER_PREFETCH, k,
617 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
618 check_snapshot(&trans, &iter, k)));
620 bch_err(c, "%s: error %s", __func__, bch2_err_str(ret));
624 static int check_subvol(struct btree_trans *trans,
625 struct btree_iter *iter,
628 struct bch_fs *c = trans->c;
629 struct bkey_s_c_subvolume subvol;
630 struct bch_snapshot snapshot;
634 if (k.k->type != KEY_TYPE_subvolume)
637 subvol = bkey_s_c_to_subvolume(k);
638 snapid = le32_to_cpu(subvol.v->snapshot);
639 ret = snapshot_lookup(trans, snapid, &snapshot);
641 if (bch2_err_matches(ret, ENOENT))
642 bch_err(c, "subvolume %llu points to nonexistent snapshot %u",
643 k.k->p.offset, snapid);
647 if (BCH_SUBVOLUME_UNLINKED(subvol.v)) {
650 ret = bch2_subvolume_delete(trans, iter->pos.offset);
652 bch_err(c, "error deleting subvolume %llu: %s",
653 iter->pos.offset, bch2_err_str(ret));
654 return ret ?: -BCH_ERR_transaction_restart_nested;
657 if (!BCH_SUBVOLUME_SNAP(subvol.v)) {
658 u32 snapshot_root = bch2_snapshot_root(c, le32_to_cpu(subvol.v->snapshot));
659 u32 snapshot_tree = snapshot_t(c, snapshot_root)->tree;
660 struct bch_snapshot_tree st;
662 ret = bch2_snapshot_tree_lookup(trans, snapshot_tree, &st);
664 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
665 "%s: snapshot tree %u not found", __func__, snapshot_tree);
670 if (fsck_err_on(le32_to_cpu(st.master_subvol) != subvol.k->p.offset, c,
671 "subvolume %llu is not set as snapshot but is not master subvolume",
673 struct bkey_i_subvolume *s =
674 bch2_bkey_make_mut_typed(trans, iter, subvol.s_c, 0, subvolume);
675 ret = PTR_ERR_OR_ZERO(s);
679 SET_BCH_SUBVOLUME_SNAP(&s->v, true);
687 int bch2_fs_check_subvols(struct bch_fs *c)
689 struct btree_iter iter;
693 ret = bch2_trans_run(c,
694 for_each_btree_key_commit(&trans, iter,
695 BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
696 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
697 check_subvol(&trans, &iter, k)));
699 bch_err(c, "%s: error %s", __func__, bch2_err_str(ret));
704 void bch2_fs_snapshots_exit(struct bch_fs *c)
706 genradix_free(&c->snapshots);
709 int bch2_fs_snapshots_start(struct bch_fs *c)
711 struct btree_iter iter;
715 ret = bch2_trans_run(c,
716 for_each_btree_key2(&trans, iter, BTREE_ID_snapshots,
718 bch2_mark_snapshot(&trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
719 bch2_snapshot_set_equiv(&trans, k)));
721 bch_err(c, "error starting snapshots: %s", bch2_err_str(ret));
726 * Mark a snapshot as deleted, for future cleanup:
728 static int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
730 struct btree_iter iter;
731 struct bkey_i_snapshot *s;
734 s = bch2_bkey_get_mut_typed(trans, &iter,
735 BTREE_ID_snapshots, POS(0, id),
737 ret = PTR_ERR_OR_ZERO(s);
739 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
740 trans->c, "missing snapshot %u", id);
744 /* already deleted? */
745 if (BCH_SNAPSHOT_DELETED(&s->v))
748 SET_BCH_SNAPSHOT_DELETED(&s->v, true);
749 SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
752 bch2_trans_iter_exit(trans, &iter);
756 static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
758 struct bch_fs *c = trans->c;
759 struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
760 struct btree_iter tree_iter = (struct btree_iter) { NULL };
761 struct bkey_s_c_snapshot s;
766 s = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id),
767 BTREE_ITER_INTENT, snapshot);
769 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
770 "missing snapshot %u", id);
775 BUG_ON(!BCH_SNAPSHOT_DELETED(s.v));
776 parent_id = le32_to_cpu(s.v->parent);
779 struct bkey_i_snapshot *parent;
781 parent = bch2_bkey_get_mut_typed(trans, &p_iter,
782 BTREE_ID_snapshots, POS(0, parent_id),
784 ret = PTR_ERR_OR_ZERO(parent);
786 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
787 "missing snapshot %u", parent_id);
791 for (i = 0; i < 2; i++)
792 if (le32_to_cpu(parent->v.children[i]) == id)
796 bch_err(c, "snapshot %u missing child pointer to %u",
799 parent->v.children[i] = 0;
801 if (le32_to_cpu(parent->v.children[0]) <
802 le32_to_cpu(parent->v.children[1]))
803 swap(parent->v.children[0],
804 parent->v.children[1]);
807 * We're deleting the root of a snapshot tree: update the
808 * snapshot_tree entry to point to the new root, or delete it if
809 * this is the last snapshot ID in this tree:
811 struct bkey_i_snapshot_tree *s_t;
813 BUG_ON(s.v->children[1]);
815 s_t = bch2_bkey_get_mut_typed(trans, &tree_iter,
816 BTREE_ID_snapshot_trees, POS(0, le32_to_cpu(s.v->tree)),
818 ret = PTR_ERR_OR_ZERO(s_t);
822 if (s.v->children[0]) {
823 s_t->v.root_snapshot = cpu_to_le32(s.v->children[0]);
825 s_t->k.type = KEY_TYPE_deleted;
826 set_bkey_val_u64s(&s_t->k, 0);
830 ret = bch2_btree_delete_at(trans, &iter, 0);
832 bch2_trans_iter_exit(trans, &tree_iter);
833 bch2_trans_iter_exit(trans, &p_iter);
834 bch2_trans_iter_exit(trans, &iter);
838 static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
840 u32 *snapshot_subvols,
843 struct btree_iter iter;
844 struct bkey_i_snapshot *n;
849 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
850 POS_MIN, BTREE_ITER_INTENT);
851 k = bch2_btree_iter_peek(&iter);
856 for (i = 0; i < nr_snapids; i++) {
857 k = bch2_btree_iter_prev_slot(&iter);
862 if (!k.k || !k.k->p.offset) {
863 ret = -BCH_ERR_ENOSPC_snapshot_create;
867 n = bch2_bkey_alloc(trans, &iter, 0, snapshot);
868 ret = PTR_ERR_OR_ZERO(n);
873 n->v.parent = cpu_to_le32(parent);
874 n->v.subvol = cpu_to_le32(snapshot_subvols[i]);
875 n->v.tree = cpu_to_le32(tree);
876 SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
878 ret = bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
879 bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0);
883 new_snapids[i] = iter.pos.offset;
886 bch2_trans_iter_exit(trans, &iter);
891 * Create new snapshot IDs as children of an existing snapshot ID:
893 static int bch2_snapshot_node_create_children(struct btree_trans *trans, u32 parent,
895 u32 *snapshot_subvols,
898 struct btree_iter iter;
899 struct bkey_i_snapshot *n_parent;
902 n_parent = bch2_bkey_get_mut_typed(trans, &iter,
903 BTREE_ID_snapshots, POS(0, parent),
905 ret = PTR_ERR_OR_ZERO(n_parent);
907 if (bch2_err_matches(ret, ENOENT))
908 bch_err(trans->c, "snapshot %u not found", parent);
912 if (n_parent->v.children[0] || n_parent->v.children[1]) {
913 bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
918 ret = create_snapids(trans, parent, le32_to_cpu(n_parent->v.tree),
919 new_snapids, snapshot_subvols, nr_snapids);
923 n_parent->v.children[0] = cpu_to_le32(new_snapids[0]);
924 n_parent->v.children[1] = cpu_to_le32(new_snapids[1]);
925 n_parent->v.subvol = 0;
926 SET_BCH_SNAPSHOT_SUBVOL(&n_parent->v, false);
928 bch2_trans_iter_exit(trans, &iter);
933 * Create a snapshot node that is the root of a new tree:
935 static int bch2_snapshot_node_create_tree(struct btree_trans *trans,
937 u32 *snapshot_subvols,
940 struct bkey_i_snapshot_tree *n_tree;
943 n_tree = __snapshot_tree_create(trans);
944 ret = PTR_ERR_OR_ZERO(n_tree) ?:
945 create_snapids(trans, 0, n_tree->k.p.offset,
946 new_snapids, snapshot_subvols, nr_snapids);
950 n_tree->v.master_subvol = cpu_to_le32(snapshot_subvols[0]);
951 n_tree->v.root_snapshot = cpu_to_le32(new_snapids[0]);
955 int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
957 u32 *snapshot_subvols,
960 BUG_ON((parent == 0) != (nr_snapids == 1));
961 BUG_ON((parent != 0) != (nr_snapids == 2));
964 ? bch2_snapshot_node_create_children(trans, parent,
965 new_snapids, snapshot_subvols, nr_snapids)
966 : bch2_snapshot_node_create_tree(trans,
967 new_snapids, snapshot_subvols, nr_snapids);
971 static int snapshot_delete_key(struct btree_trans *trans,
972 struct btree_iter *iter,
974 snapshot_id_list *deleted,
975 snapshot_id_list *equiv_seen,
976 struct bpos *last_pos)
978 struct bch_fs *c = trans->c;
979 u32 equiv = snapshot_t(c, k.k->p.snapshot)->equiv;
981 if (!bkey_eq(k.k->p, *last_pos))
985 if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
986 snapshot_list_has_id(equiv_seen, equiv)) {
987 return bch2_btree_delete_at(trans, iter,
988 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
990 return snapshot_list_add(c, equiv_seen, equiv);
994 static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct btree_iter *iter,
997 struct bkey_s_c_snapshot snap;
1001 if (k.k->type != KEY_TYPE_snapshot)
1004 snap = bkey_s_c_to_snapshot(k);
1005 if (BCH_SNAPSHOT_DELETED(snap.v) ||
1006 BCH_SNAPSHOT_SUBVOL(snap.v))
1009 children[0] = le32_to_cpu(snap.v->children[0]);
1010 children[1] = le32_to_cpu(snap.v->children[1]);
1012 ret = snapshot_live(trans, children[0]) ?:
1013 snapshot_live(trans, children[1]);
1018 return bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
1022 int bch2_delete_dead_snapshots(struct bch_fs *c)
1024 struct btree_trans trans;
1025 struct btree_iter iter;
1027 struct bkey_s_c_snapshot snap;
1028 snapshot_id_list deleted = { 0 };
1032 if (!test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags))
1035 if (!test_bit(BCH_FS_STARTED, &c->flags)) {
1036 ret = bch2_fs_read_write_early(c);
1038 bch_err(c, "error deleleting dead snapshots: error going rw: %s", bch2_err_str(ret));
1043 bch2_trans_init(&trans, c, 0, 0);
1046 * For every snapshot node: If we have no live children and it's not
1047 * pointed to by a subvolume, delete it:
1049 ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_snapshots,
1052 bch2_delete_redundant_snapshot(&trans, &iter, k));
1054 bch_err(c, "error deleting redundant snapshots: %s", bch2_err_str(ret));
1058 for_each_btree_key2(&trans, iter, BTREE_ID_snapshots,
1060 bch2_snapshot_set_equiv(&trans, k));
1062 bch_err(c, "error in bch2_snapshots_set_equiv: %s", bch2_err_str(ret));
1066 for_each_btree_key(&trans, iter, BTREE_ID_snapshots,
1067 POS_MIN, 0, k, ret) {
1068 if (k.k->type != KEY_TYPE_snapshot)
1071 snap = bkey_s_c_to_snapshot(k);
1072 if (BCH_SNAPSHOT_DELETED(snap.v)) {
1073 ret = snapshot_list_add(c, &deleted, k.k->p.offset);
1078 bch2_trans_iter_exit(&trans, &iter);
1081 bch_err(c, "error walking snapshots: %s", bch2_err_str(ret));
1085 for (id = 0; id < BTREE_ID_NR; id++) {
1086 struct bpos last_pos = POS_MIN;
1087 snapshot_id_list equiv_seen = { 0 };
1089 if (!btree_type_has_snapshots(id))
1092 ret = for_each_btree_key_commit(&trans, iter,
1094 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1095 NULL, NULL, BTREE_INSERT_NOFAIL,
1096 snapshot_delete_key(&trans, &iter, k, &deleted, &equiv_seen, &last_pos));
1098 darray_exit(&equiv_seen);
1101 bch_err(c, "error deleting snapshot keys: %s", bch2_err_str(ret));
1106 for (i = 0; i < deleted.nr; i++) {
1107 ret = commit_do(&trans, NULL, NULL, 0,
1108 bch2_snapshot_node_delete(&trans, deleted.data[i]));
1110 bch_err(c, "error deleting snapshot %u: %s",
1111 deleted.data[i], bch2_err_str(ret));
1116 clear_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
1118 darray_exit(&deleted);
1119 bch2_trans_exit(&trans);
1123 static void bch2_delete_dead_snapshots_work(struct work_struct *work)
1125 struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
1127 bch2_delete_dead_snapshots(c);
1128 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1131 void bch2_delete_dead_snapshots_async(struct bch_fs *c)
1133 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots) &&
1134 !queue_work(c->write_ref_wq, &c->snapshot_delete_work))
1135 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1138 static int bch2_delete_dead_snapshots_hook(struct btree_trans *trans,
1139 struct btree_trans_commit_hook *h)
1141 struct bch_fs *c = trans->c;
1143 set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
1145 if (!test_bit(BCH_FS_FSCK_DONE, &c->flags))
1148 bch2_delete_dead_snapshots_async(c);
1154 int bch2_subvolume_invalid(const struct bch_fs *c, struct bkey_s_c k,
1155 unsigned flags, struct printbuf *err)
1157 if (bkey_lt(k.k->p, SUBVOL_POS_MIN) ||
1158 bkey_gt(k.k->p, SUBVOL_POS_MAX)) {
1159 prt_printf(err, "invalid pos");
1160 return -BCH_ERR_invalid_bkey;
1166 void bch2_subvolume_to_text(struct printbuf *out, struct bch_fs *c,
1169 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
1171 prt_printf(out, "root %llu snapshot id %u",
1172 le64_to_cpu(s.v->inode),
1173 le32_to_cpu(s.v->snapshot));
1175 if (bkey_val_bytes(s.k) > offsetof(struct bch_subvolume, parent))
1176 prt_printf(out, " parent %u", le32_to_cpu(s.v->parent));
1179 static __always_inline int
1180 bch2_subvolume_get_inlined(struct btree_trans *trans, unsigned subvol,
1181 bool inconsistent_if_not_found,
1183 struct bch_subvolume *s)
1185 int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_subvolumes, POS(0, subvol),
1186 iter_flags, subvolume, s);
1187 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT) &&
1188 inconsistent_if_not_found,
1189 trans->c, "missing subvolume %u", subvol);
1193 int bch2_subvolume_get(struct btree_trans *trans, unsigned subvol,
1194 bool inconsistent_if_not_found,
1196 struct bch_subvolume *s)
1198 return bch2_subvolume_get_inlined(trans, subvol, inconsistent_if_not_found, iter_flags, s);
1201 int bch2_snapshot_get_subvol(struct btree_trans *trans, u32 snapshot,
1202 struct bch_subvolume *subvol)
1204 struct bch_snapshot snap;
1206 return snapshot_lookup(trans, snapshot, &snap) ?:
1207 bch2_subvolume_get(trans, le32_to_cpu(snap.subvol), true, 0, subvol);
1210 int bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvol,
1213 struct btree_iter iter;
1217 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_subvolumes, POS(0, subvol),
1219 BTREE_ITER_WITH_UPDATES);
1220 ret = bkey_err(k) ?: k.k->type == KEY_TYPE_subvolume ? 0 : -ENOENT;
1223 *snapid = le32_to_cpu(bkey_s_c_to_subvolume(k).v->snapshot);
1224 else if (bch2_err_matches(ret, ENOENT))
1225 bch2_fs_inconsistent(trans->c, "missing subvolume %u", subvol);
1226 bch2_trans_iter_exit(trans, &iter);
1230 static int bch2_subvolume_reparent(struct btree_trans *trans,
1231 struct btree_iter *iter,
1233 u32 old_parent, u32 new_parent)
1235 struct bkey_i_subvolume *s;
1238 if (k.k->type != KEY_TYPE_subvolume)
1241 if (bkey_val_bytes(k.k) > offsetof(struct bch_subvolume, parent) &&
1242 le32_to_cpu(bkey_s_c_to_subvolume(k).v->parent) != old_parent)
1245 s = bch2_bkey_make_mut_typed(trans, iter, k, 0, subvolume);
1246 ret = PTR_ERR_OR_ZERO(s);
1250 s->v.parent = cpu_to_le32(new_parent);
1255 * Scan for subvolumes with parent @subvolid_to_delete, reparent:
1257 static int bch2_subvolumes_reparent(struct btree_trans *trans, u32 subvolid_to_delete)
1259 struct btree_iter iter;
1261 struct bch_subvolume s;
1263 return lockrestart_do(trans,
1264 bch2_subvolume_get(trans, subvolid_to_delete, true,
1265 BTREE_ITER_CACHED, &s)) ?:
1266 for_each_btree_key_commit(trans, iter,
1267 BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
1268 NULL, NULL, BTREE_INSERT_NOFAIL,
1269 bch2_subvolume_reparent(trans, &iter, k,
1270 subvolid_to_delete, le32_to_cpu(s.parent)));
1274 * Delete subvolume, mark snapshot ID as deleted, queue up snapshot
1277 static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
1279 struct btree_iter iter;
1280 struct bkey_s_c_subvolume subvol;
1281 struct btree_trans_commit_hook *h;
1285 subvol = bch2_bkey_get_iter_typed(trans, &iter,
1286 BTREE_ID_subvolumes, POS(0, subvolid),
1287 BTREE_ITER_CACHED|BTREE_ITER_INTENT,
1289 ret = bkey_err(subvol);
1290 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
1291 "missing subvolume %u", subvolid);
1295 snapid = le32_to_cpu(subvol.v->snapshot);
1297 ret = bch2_btree_delete_at(trans, &iter, 0);
1301 ret = bch2_snapshot_node_set_deleted(trans, snapid);
1305 h = bch2_trans_kmalloc(trans, sizeof(*h));
1306 ret = PTR_ERR_OR_ZERO(h);
1310 h->fn = bch2_delete_dead_snapshots_hook;
1311 bch2_trans_commit_hook(trans, h);
1313 bch2_trans_iter_exit(trans, &iter);
1317 static int bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
1319 return bch2_subvolumes_reparent(trans, subvolid) ?:
1320 commit_do(trans, NULL, NULL, BTREE_INSERT_NOFAIL,
1321 __bch2_subvolume_delete(trans, subvolid));
1324 void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *work)
1326 struct bch_fs *c = container_of(work, struct bch_fs,
1327 snapshot_wait_for_pagecache_and_delete_work);
1333 mutex_lock(&c->snapshots_unlinked_lock);
1334 s = c->snapshots_unlinked;
1335 darray_init(&c->snapshots_unlinked);
1336 mutex_unlock(&c->snapshots_unlinked_lock);
1341 bch2_evict_subvolume_inodes(c, &s);
1343 for (id = s.data; id < s.data + s.nr; id++) {
1344 ret = bch2_trans_run(c, bch2_subvolume_delete(&trans, *id));
1346 bch_err(c, "error deleting subvolume %u: %s", *id, bch2_err_str(ret));
1354 bch2_write_ref_put(c, BCH_WRITE_REF_snapshot_delete_pagecache);
1357 struct subvolume_unlink_hook {
1358 struct btree_trans_commit_hook h;
1362 int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans *trans,
1363 struct btree_trans_commit_hook *_h)
1365 struct subvolume_unlink_hook *h = container_of(_h, struct subvolume_unlink_hook, h);
1366 struct bch_fs *c = trans->c;
1369 mutex_lock(&c->snapshots_unlinked_lock);
1370 if (!snapshot_list_has_id(&c->snapshots_unlinked, h->subvol))
1371 ret = snapshot_list_add(c, &c->snapshots_unlinked, h->subvol);
1372 mutex_unlock(&c->snapshots_unlinked_lock);
1377 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_snapshot_delete_pagecache))
1380 if (!queue_work(c->write_ref_wq, &c->snapshot_wait_for_pagecache_and_delete_work))
1381 bch2_write_ref_put(c, BCH_WRITE_REF_snapshot_delete_pagecache);
1385 int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
1387 struct btree_iter iter;
1388 struct bkey_i_subvolume *n;
1389 struct subvolume_unlink_hook *h;
1392 h = bch2_trans_kmalloc(trans, sizeof(*h));
1393 ret = PTR_ERR_OR_ZERO(h);
1397 h->h.fn = bch2_subvolume_wait_for_pagecache_and_delete_hook;
1398 h->subvol = subvolid;
1399 bch2_trans_commit_hook(trans, &h->h);
1401 n = bch2_bkey_get_mut_typed(trans, &iter,
1402 BTREE_ID_subvolumes, POS(0, subvolid),
1403 BTREE_ITER_CACHED, subvolume);
1404 ret = PTR_ERR_OR_ZERO(n);
1405 if (unlikely(ret)) {
1406 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
1407 "missing subvolume %u", subvolid);
1411 SET_BCH_SUBVOLUME_UNLINKED(&n->v, true);
1412 bch2_trans_iter_exit(trans, &iter);
1416 int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
1419 u32 *new_snapshotid,
1422 struct bch_fs *c = trans->c;
1423 struct btree_iter dst_iter, src_iter = (struct btree_iter) { NULL };
1424 struct bkey_i_subvolume *new_subvol = NULL;
1425 struct bkey_i_subvolume *src_subvol = NULL;
1426 u32 parent = 0, new_nodes[2], snapshot_subvols[2];
1429 ret = bch2_bkey_get_empty_slot(trans, &dst_iter,
1430 BTREE_ID_subvolumes, POS(0, U32_MAX));
1431 if (ret == -BCH_ERR_ENOSPC_btree_slot)
1432 ret = -BCH_ERR_ENOSPC_subvolume_create;
1436 snapshot_subvols[0] = dst_iter.pos.offset;
1437 snapshot_subvols[1] = src_subvolid;
1440 /* Creating a snapshot: */
1442 src_subvol = bch2_bkey_get_mut_typed(trans, &src_iter,
1443 BTREE_ID_subvolumes, POS(0, src_subvolid),
1444 BTREE_ITER_CACHED, subvolume);
1445 ret = PTR_ERR_OR_ZERO(src_subvol);
1446 if (unlikely(ret)) {
1447 bch2_fs_inconsistent_on(ret == -ENOENT, c,
1448 "subvolume %u not found", src_subvolid);
1452 parent = le32_to_cpu(src_subvol->v.snapshot);
1455 ret = bch2_snapshot_node_create(trans, parent, new_nodes,
1457 src_subvolid ? 2 : 1);
1462 src_subvol->v.snapshot = cpu_to_le32(new_nodes[1]);
1463 ret = bch2_trans_update(trans, &src_iter, &src_subvol->k_i, 0);
1468 new_subvol = bch2_bkey_alloc(trans, &dst_iter, 0, subvolume);
1469 ret = PTR_ERR_OR_ZERO(new_subvol);
1473 new_subvol->v.flags = 0;
1474 new_subvol->v.snapshot = cpu_to_le32(new_nodes[0]);
1475 new_subvol->v.inode = cpu_to_le64(inode);
1476 new_subvol->v.parent = cpu_to_le32(src_subvolid);
1477 new_subvol->v.otime.lo = cpu_to_le64(bch2_current_time(c));
1478 new_subvol->v.otime.hi = 0;
1480 SET_BCH_SUBVOLUME_RO(&new_subvol->v, ro);
1481 SET_BCH_SUBVOLUME_SNAP(&new_subvol->v, src_subvolid != 0);
1483 *new_subvolid = new_subvol->k.p.offset;
1484 *new_snapshotid = new_nodes[0];
1486 bch2_trans_iter_exit(trans, &src_iter);
1487 bch2_trans_iter_exit(trans, &dst_iter);
1491 int bch2_fs_subvolumes_init(struct bch_fs *c)
1493 INIT_WORK(&c->snapshot_delete_work, bch2_delete_dead_snapshots_work);
1494 INIT_WORK(&c->snapshot_wait_for_pagecache_and_delete_work,
1495 bch2_subvolume_wait_for_pagecache_and_delete);
1496 mutex_init(&c->snapshots_unlinked_lock);