1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_key_cache.h"
5 #include "btree_update.h"
13 void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
16 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
18 prt_printf(out, "is_subvol %llu deleted %llu parent %10u children %10u %10u subvol %u",
19 BCH_SNAPSHOT_SUBVOL(s.v),
20 BCH_SNAPSHOT_DELETED(s.v),
21 le32_to_cpu(s.v->parent),
22 le32_to_cpu(s.v->children[0]),
23 le32_to_cpu(s.v->children[1]),
24 le32_to_cpu(s.v->subvol));
27 int bch2_snapshot_invalid(const struct bch_fs *c, struct bkey_s_c k,
28 unsigned flags, struct printbuf *err)
30 struct bkey_s_c_snapshot s;
33 if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
34 bkey_lt(k.k->p, POS(0, 1))) {
35 prt_printf(err, "bad pos");
36 return -BCH_ERR_invalid_bkey;
39 if (bkey_val_bytes(k.k) != sizeof(struct bch_snapshot)) {
40 prt_printf(err, "bad val size (%zu != %zu)",
41 bkey_val_bytes(k.k), sizeof(struct bch_snapshot));
42 return -BCH_ERR_invalid_bkey;
45 s = bkey_s_c_to_snapshot(k);
47 id = le32_to_cpu(s.v->parent);
48 if (id && id <= k.k->p.offset) {
49 prt_printf(err, "bad parent node (%u <= %llu)",
51 return -BCH_ERR_invalid_bkey;
54 if (le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1])) {
55 prt_printf(err, "children not normalized");
56 return -BCH_ERR_invalid_bkey;
59 if (s.v->children[0] &&
60 s.v->children[0] == s.v->children[1]) {
61 prt_printf(err, "duplicate child nodes");
62 return -BCH_ERR_invalid_bkey;
65 for (i = 0; i < 2; i++) {
66 id = le32_to_cpu(s.v->children[i]);
68 if (id >= k.k->p.offset) {
69 prt_printf(err, "bad child node (%u >= %llu)",
71 return -BCH_ERR_invalid_bkey;
78 int bch2_mark_snapshot(struct btree_trans *trans,
79 enum btree_id btree, unsigned level,
80 struct bkey_s_c old, struct bkey_s_c new,
83 struct bch_fs *c = trans->c;
86 t = genradix_ptr_alloc(&c->snapshots,
87 U32_MAX - new.k->p.offset,
92 if (new.k->type == KEY_TYPE_snapshot) {
93 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);
95 t->parent = le32_to_cpu(s.v->parent);
96 t->children[0] = le32_to_cpu(s.v->children[0]);
97 t->children[1] = le32_to_cpu(s.v->children[1]);
98 t->subvol = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0;
109 static int snapshot_lookup(struct btree_trans *trans, u32 id,
110 struct bch_snapshot *s)
112 struct btree_iter iter;
116 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id),
117 BTREE_ITER_WITH_UPDATES);
118 k = bch2_btree_iter_peek_slot(&iter);
119 ret = bkey_err(k) ?: k.k->type == KEY_TYPE_snapshot ? 0 : -ENOENT;
122 *s = *bkey_s_c_to_snapshot(k).v;
124 bch2_trans_iter_exit(trans, &iter);
128 static int snapshot_live(struct btree_trans *trans, u32 id)
130 struct bch_snapshot v;
136 ret = snapshot_lookup(trans, id, &v);
138 bch_err(trans->c, "snapshot node %u not found", id);
142 return !BCH_SNAPSHOT_DELETED(&v);
145 static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k)
147 struct bch_fs *c = trans->c;
148 unsigned i, nr_live = 0, live_idx = 0;
149 struct bkey_s_c_snapshot snap;
150 u32 id = k.k->p.offset, child[2];
152 if (k.k->type != KEY_TYPE_snapshot)
155 snap = bkey_s_c_to_snapshot(k);
157 child[0] = le32_to_cpu(snap.v->children[0]);
158 child[1] = le32_to_cpu(snap.v->children[1]);
160 for (i = 0; i < 2; i++) {
161 int ret = snapshot_live(trans, child[i]);
171 snapshot_t(c, id)->equiv = nr_live == 1
172 ? snapshot_t(c, child[live_idx])->equiv
178 static int check_snapshot(struct btree_trans *trans,
179 struct btree_iter *iter,
182 struct bch_fs *c = trans->c;
183 struct bkey_s_c_snapshot s;
184 struct bch_subvolume subvol;
185 struct bch_snapshot v;
186 struct printbuf buf = PRINTBUF;
187 bool should_have_subvol;
191 if (k.k->type != KEY_TYPE_snapshot)
194 s = bkey_s_c_to_snapshot(k);
195 id = le32_to_cpu(s.v->parent);
197 ret = snapshot_lookup(trans, id, &v);
199 bch_err(c, "snapshot with nonexistent parent:\n %s",
200 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf));
204 if (le32_to_cpu(v.children[0]) != s.k->p.offset &&
205 le32_to_cpu(v.children[1]) != s.k->p.offset) {
206 bch_err(c, "snapshot parent %u missing pointer to child %llu",
213 for (i = 0; i < 2 && s.v->children[i]; i++) {
214 id = le32_to_cpu(s.v->children[i]);
216 ret = snapshot_lookup(trans, id, &v);
218 bch_err(c, "snapshot node %llu has nonexistent child %u",
223 if (le32_to_cpu(v.parent) != s.k->p.offset) {
224 bch_err(c, "snapshot child %u has wrong parent (got %u should be %llu)",
225 id, le32_to_cpu(v.parent), s.k->p.offset);
231 should_have_subvol = BCH_SNAPSHOT_SUBVOL(s.v) &&
232 !BCH_SNAPSHOT_DELETED(s.v);
234 if (should_have_subvol) {
235 id = le32_to_cpu(s.v->subvol);
236 ret = bch2_subvolume_get(trans, id, 0, false, &subvol);
238 bch_err(c, "snapshot points to nonexistent subvolume:\n %s",
239 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf));
243 if (BCH_SNAPSHOT_SUBVOL(s.v) != (le32_to_cpu(subvol.snapshot) == s.k->p.offset)) {
244 bch_err(c, "snapshot node %llu has wrong BCH_SNAPSHOT_SUBVOL",
250 if (fsck_err_on(s.v->subvol, c, "snapshot should not point to subvol:\n %s",
251 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
252 struct bkey_i_snapshot *u = bch2_trans_kmalloc(trans, sizeof(*u));
254 ret = PTR_ERR_OR_ZERO(u);
258 bkey_reassemble(&u->k_i, s.s_c);
260 ret = bch2_trans_update(trans, iter, &u->k_i, 0);
266 if (BCH_SNAPSHOT_DELETED(s.v))
267 set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
274 int bch2_fs_check_snapshots(struct bch_fs *c)
276 struct btree_trans trans;
277 struct btree_iter iter;
281 bch2_trans_init(&trans, c, 0, 0);
283 ret = for_each_btree_key_commit(&trans, iter,
284 BTREE_ID_snapshots, POS_MIN,
285 BTREE_ITER_PREFETCH, k,
286 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
287 check_snapshot(&trans, &iter, k));
290 bch_err(c, "error %i checking snapshots", ret);
292 bch2_trans_exit(&trans);
296 static int check_subvol(struct btree_trans *trans,
297 struct btree_iter *iter,
300 struct bkey_s_c_subvolume subvol;
301 struct bch_snapshot snapshot;
305 if (k.k->type != KEY_TYPE_subvolume)
308 subvol = bkey_s_c_to_subvolume(k);
309 snapid = le32_to_cpu(subvol.v->snapshot);
310 ret = snapshot_lookup(trans, snapid, &snapshot);
313 bch_err(trans->c, "subvolume %llu points to nonexistent snapshot %u",
314 k.k->p.offset, snapid);
318 if (BCH_SUBVOLUME_UNLINKED(subvol.v)) {
319 ret = bch2_subvolume_delete(trans, iter->pos.offset);
320 if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
321 bch_err(trans->c, "error deleting subvolume %llu: %s",
322 iter->pos.offset, bch2_err_str(ret));
330 int bch2_fs_check_subvols(struct bch_fs *c)
332 struct btree_trans trans;
333 struct btree_iter iter;
337 bch2_trans_init(&trans, c, 0, 0);
339 ret = for_each_btree_key_commit(&trans, iter,
340 BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
341 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
342 check_subvol(&trans, &iter, k));
344 bch2_trans_exit(&trans);
349 void bch2_fs_snapshots_exit(struct bch_fs *c)
351 genradix_free(&c->snapshots);
354 int bch2_fs_snapshots_start(struct bch_fs *c)
356 struct btree_trans trans;
357 struct btree_iter iter;
361 bch2_trans_init(&trans, c, 0, 0);
363 for_each_btree_key2(&trans, iter, BTREE_ID_snapshots,
365 bch2_mark_snapshot(&trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
366 bch2_snapshot_set_equiv(&trans, k));
368 bch2_trans_exit(&trans);
371 bch_err(c, "error starting snapshots: %s", bch2_err_str(ret));
376 * Mark a snapshot as deleted, for future cleanup:
378 static int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
380 struct btree_iter iter;
381 struct bkey_i_snapshot *s;
384 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id),
386 s = bch2_bkey_get_mut_typed(trans, &iter, snapshot);
387 ret = PTR_ERR_OR_ZERO(s);
389 bch2_fs_inconsistent_on(ret == -ENOENT, trans->c, "missing snapshot %u", id);
393 /* already deleted? */
394 if (BCH_SNAPSHOT_DELETED(&s->v))
397 SET_BCH_SNAPSHOT_DELETED(&s->v, true);
398 SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
401 ret = bch2_trans_update(trans, &iter, &s->k_i, 0);
405 bch2_trans_iter_exit(trans, &iter);
409 static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
411 struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
413 struct bkey_s_c_snapshot s;
418 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id),
420 k = bch2_btree_iter_peek_slot(&iter);
425 if (k.k->type != KEY_TYPE_snapshot) {
426 bch2_fs_inconsistent(trans->c, "missing snapshot %u", id);
431 s = bkey_s_c_to_snapshot(k);
433 BUG_ON(!BCH_SNAPSHOT_DELETED(s.v));
434 parent_id = le32_to_cpu(s.v->parent);
437 struct bkey_i_snapshot *parent;
439 bch2_trans_iter_init(trans, &p_iter, BTREE_ID_snapshots,
442 parent = bch2_bkey_get_mut_typed(trans, &p_iter, snapshot);
443 ret = PTR_ERR_OR_ZERO(parent);
445 bch2_fs_inconsistent_on(ret == -ENOENT, trans->c, "missing snapshot %u", parent_id);
449 for (i = 0; i < 2; i++)
450 if (le32_to_cpu(parent->v.children[i]) == id)
454 bch_err(trans->c, "snapshot %u missing child pointer to %u",
457 parent->v.children[i] = 0;
459 if (le32_to_cpu(parent->v.children[0]) <
460 le32_to_cpu(parent->v.children[1]))
461 swap(parent->v.children[0],
462 parent->v.children[1]);
464 ret = bch2_trans_update(trans, &p_iter, &parent->k_i, 0);
469 ret = bch2_btree_delete_at(trans, &iter, 0);
471 bch2_trans_iter_exit(trans, &p_iter);
472 bch2_trans_iter_exit(trans, &iter);
476 int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
478 u32 *snapshot_subvols,
481 struct btree_iter iter;
482 struct bkey_i_snapshot *n;
487 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
488 POS_MIN, BTREE_ITER_INTENT);
489 k = bch2_btree_iter_peek(&iter);
494 for (i = 0; i < nr_snapids; i++) {
495 k = bch2_btree_iter_prev_slot(&iter);
500 if (!k.k || !k.k->p.offset) {
501 ret = -BCH_ERR_ENOSPC_snapshot_create;
505 n = bch2_bkey_alloc(trans, &iter, snapshot);
506 ret = PTR_ERR_OR_ZERO(n);
511 n->v.parent = cpu_to_le32(parent);
512 n->v.subvol = cpu_to_le32(snapshot_subvols[i]);
514 SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
516 ret = bch2_trans_update(trans, &iter, &n->k_i, 0);
520 new_snapids[i] = iter.pos.offset;
524 bch2_btree_iter_set_pos(&iter, POS(0, parent));
525 n = bch2_bkey_get_mut_typed(trans, &iter, snapshot);
526 ret = PTR_ERR_OR_ZERO(n);
529 bch_err(trans->c, "snapshot %u not found", parent);
533 if (n->v.children[0] || n->v.children[1]) {
534 bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
539 n->v.children[0] = cpu_to_le32(new_snapids[0]);
540 n->v.children[1] = cpu_to_le32(new_snapids[1]);
542 SET_BCH_SNAPSHOT_SUBVOL(&n->v, false);
543 ret = bch2_trans_update(trans, &iter, &n->k_i, 0);
548 bch2_trans_iter_exit(trans, &iter);
552 static int snapshot_delete_key(struct btree_trans *trans,
553 struct btree_iter *iter,
555 snapshot_id_list *deleted,
556 snapshot_id_list *equiv_seen,
557 struct bpos *last_pos)
559 struct bch_fs *c = trans->c;
560 u32 equiv = snapshot_t(c, k.k->p.snapshot)->equiv;
562 if (!bkey_eq(k.k->p, *last_pos))
566 if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
567 snapshot_list_has_id(equiv_seen, equiv)) {
568 return bch2_btree_delete_at(trans, iter,
569 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
571 return snapshot_list_add(c, equiv_seen, equiv);
575 static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct btree_iter *iter,
578 struct bkey_s_c_snapshot snap;
582 if (k.k->type != KEY_TYPE_snapshot)
585 snap = bkey_s_c_to_snapshot(k);
586 if (BCH_SNAPSHOT_DELETED(snap.v) ||
587 BCH_SNAPSHOT_SUBVOL(snap.v))
590 children[0] = le32_to_cpu(snap.v->children[0]);
591 children[1] = le32_to_cpu(snap.v->children[1]);
593 ret = snapshot_live(trans, children[0]) ?:
594 snapshot_live(trans, children[1]);
599 return bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
603 int bch2_delete_dead_snapshots(struct bch_fs *c)
605 struct btree_trans trans;
606 struct btree_iter iter;
608 struct bkey_s_c_snapshot snap;
609 snapshot_id_list deleted = { 0 };
613 if (!test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags))
616 if (!test_bit(BCH_FS_STARTED, &c->flags)) {
617 ret = bch2_fs_read_write_early(c);
619 bch_err(c, "error deleleting dead snapshots: error going rw: %s", bch2_err_str(ret));
624 bch2_trans_init(&trans, c, 0, 0);
627 * For every snapshot node: If we have no live children and it's not
628 * pointed to by a subvolume, delete it:
630 ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_snapshots,
633 bch2_delete_redundant_snapshot(&trans, &iter, k));
635 bch_err(c, "error deleting redundant snapshots: %s", bch2_err_str(ret));
639 for_each_btree_key2(&trans, iter, BTREE_ID_snapshots,
641 bch2_snapshot_set_equiv(&trans, k));
643 bch_err(c, "error in bch2_snapshots_set_equiv: %s", bch2_err_str(ret));
647 for_each_btree_key(&trans, iter, BTREE_ID_snapshots,
648 POS_MIN, 0, k, ret) {
649 if (k.k->type != KEY_TYPE_snapshot)
652 snap = bkey_s_c_to_snapshot(k);
653 if (BCH_SNAPSHOT_DELETED(snap.v)) {
654 ret = snapshot_list_add(c, &deleted, k.k->p.offset);
659 bch2_trans_iter_exit(&trans, &iter);
662 bch_err(c, "error walking snapshots: %s", bch2_err_str(ret));
666 for (id = 0; id < BTREE_ID_NR; id++) {
667 struct bpos last_pos = POS_MIN;
668 snapshot_id_list equiv_seen = { 0 };
670 if (!btree_type_has_snapshots(id))
673 ret = for_each_btree_key_commit(&trans, iter,
675 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
676 NULL, NULL, BTREE_INSERT_NOFAIL,
677 snapshot_delete_key(&trans, &iter, k, &deleted, &equiv_seen, &last_pos));
679 darray_exit(&equiv_seen);
682 bch_err(c, "error deleting snapshot keys: %s", bch2_err_str(ret));
687 for (i = 0; i < deleted.nr; i++) {
688 ret = commit_do(&trans, NULL, NULL, 0,
689 bch2_snapshot_node_delete(&trans, deleted.data[i]));
691 bch_err(c, "error deleting snapshot %u: %s",
692 deleted.data[i], bch2_err_str(ret));
697 clear_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
699 darray_exit(&deleted);
700 bch2_trans_exit(&trans);
704 static void bch2_delete_dead_snapshots_work(struct work_struct *work)
706 struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
708 bch2_delete_dead_snapshots(c);
709 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
712 void bch2_delete_dead_snapshots_async(struct bch_fs *c)
714 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots) &&
715 !queue_work(system_long_wq, &c->snapshot_delete_work))
716 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
719 static int bch2_delete_dead_snapshots_hook(struct btree_trans *trans,
720 struct btree_trans_commit_hook *h)
722 struct bch_fs *c = trans->c;
724 set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
726 if (!test_bit(BCH_FS_FSCK_DONE, &c->flags))
729 bch2_delete_dead_snapshots_async(c);
735 int bch2_subvolume_invalid(const struct bch_fs *c, struct bkey_s_c k,
736 unsigned flags, struct printbuf *err)
738 if (bkey_lt(k.k->p, SUBVOL_POS_MIN) ||
739 bkey_gt(k.k->p, SUBVOL_POS_MAX)) {
740 prt_printf(err, "invalid pos");
741 return -BCH_ERR_invalid_bkey;
744 if (bkey_val_bytes(k.k) != sizeof(struct bch_subvolume)) {
745 prt_printf(err, "incorrect value size (%zu != %zu)",
746 bkey_val_bytes(k.k), sizeof(struct bch_subvolume));
747 return -BCH_ERR_invalid_bkey;
753 void bch2_subvolume_to_text(struct printbuf *out, struct bch_fs *c,
756 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
758 prt_printf(out, "root %llu snapshot id %u",
759 le64_to_cpu(s.v->inode),
760 le32_to_cpu(s.v->snapshot));
763 static __always_inline int
764 bch2_subvolume_get_inlined(struct btree_trans *trans, unsigned subvol,
765 bool inconsistent_if_not_found,
767 struct bch_subvolume *s)
769 struct btree_iter iter;
773 bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes, POS(0, subvol),
775 k = bch2_btree_iter_peek_slot(&iter);
776 ret = bkey_err(k) ?: k.k->type == KEY_TYPE_subvolume ? 0 : -ENOENT;
778 if (ret == -ENOENT && inconsistent_if_not_found)
779 bch2_fs_inconsistent(trans->c, "missing subvolume %u", subvol);
781 *s = *bkey_s_c_to_subvolume(k).v;
783 bch2_trans_iter_exit(trans, &iter);
787 int bch2_subvolume_get(struct btree_trans *trans, unsigned subvol,
788 bool inconsistent_if_not_found,
790 struct bch_subvolume *s)
792 return bch2_subvolume_get_inlined(trans, subvol, inconsistent_if_not_found, iter_flags, s);
795 int bch2_snapshot_get_subvol(struct btree_trans *trans, u32 snapshot,
796 struct bch_subvolume *subvol)
798 struct bch_snapshot snap;
800 return snapshot_lookup(trans, snapshot, &snap) ?:
801 bch2_subvolume_get(trans, le32_to_cpu(snap.subvol), true, 0, subvol);
804 int bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvol,
807 struct bch_subvolume s;
810 ret = bch2_subvolume_get_inlined(trans, subvol, true,
812 BTREE_ITER_WITH_UPDATES,
815 *snapid = le32_to_cpu(s.snapshot);
820 * Delete subvolume, mark snapshot ID as deleted, queue up snapshot
823 int bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
825 struct btree_iter iter;
827 struct bkey_s_c_subvolume subvol;
828 struct btree_trans_commit_hook *h;
832 bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes,
836 k = bch2_btree_iter_peek_slot(&iter);
841 if (k.k->type != KEY_TYPE_subvolume) {
842 bch2_fs_inconsistent(trans->c, "missing subvolume %u", subvolid);
847 subvol = bkey_s_c_to_subvolume(k);
848 snapid = le32_to_cpu(subvol.v->snapshot);
850 ret = bch2_btree_delete_at(trans, &iter, 0);
854 ret = bch2_snapshot_node_set_deleted(trans, snapid);
858 h = bch2_trans_kmalloc(trans, sizeof(*h));
859 ret = PTR_ERR_OR_ZERO(h);
863 h->fn = bch2_delete_dead_snapshots_hook;
864 bch2_trans_commit_hook(trans, h);
866 bch2_trans_iter_exit(trans, &iter);
870 void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *work)
872 struct bch_fs *c = container_of(work, struct bch_fs,
873 snapshot_wait_for_pagecache_and_delete_work);
879 mutex_lock(&c->snapshots_unlinked_lock);
880 s = c->snapshots_unlinked;
881 darray_init(&c->snapshots_unlinked);
882 mutex_unlock(&c->snapshots_unlinked_lock);
887 bch2_evict_subvolume_inodes(c, &s);
889 for (id = s.data; id < s.data + s.nr; id++) {
890 ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_NOFAIL,
891 bch2_subvolume_delete(&trans, *id));
893 bch_err(c, "error deleting subvolume %u: %s", *id, bch2_err_str(ret));
901 bch2_write_ref_put(c, BCH_WRITE_REF_snapshot_delete_pagecache);
904 struct subvolume_unlink_hook {
905 struct btree_trans_commit_hook h;
909 int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans *trans,
910 struct btree_trans_commit_hook *_h)
912 struct subvolume_unlink_hook *h = container_of(_h, struct subvolume_unlink_hook, h);
913 struct bch_fs *c = trans->c;
916 mutex_lock(&c->snapshots_unlinked_lock);
917 if (!snapshot_list_has_id(&c->snapshots_unlinked, h->subvol))
918 ret = snapshot_list_add(c, &c->snapshots_unlinked, h->subvol);
919 mutex_unlock(&c->snapshots_unlinked_lock);
924 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_snapshot_delete_pagecache))
927 if (!queue_work(system_long_wq, &c->snapshot_wait_for_pagecache_and_delete_work))
928 bch2_write_ref_put(c, BCH_WRITE_REF_snapshot_delete_pagecache);
932 int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
934 struct btree_iter iter;
935 struct bkey_i_subvolume *n;
936 struct subvolume_unlink_hook *h;
939 bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes,
943 n = bch2_bkey_get_mut_typed(trans, &iter, subvolume);
944 ret = PTR_ERR_OR_ZERO(n);
946 bch2_fs_inconsistent_on(ret == -ENOENT, trans->c, "missing subvolume %u", subvolid);
950 SET_BCH_SUBVOLUME_UNLINKED(&n->v, true);
952 ret = bch2_trans_update(trans, &iter, &n->k_i, 0);
956 h = bch2_trans_kmalloc(trans, sizeof(*h));
957 ret = PTR_ERR_OR_ZERO(h);
961 h->h.fn = bch2_subvolume_wait_for_pagecache_and_delete_hook;
962 h->subvol = subvolid;
963 bch2_trans_commit_hook(trans, &h->h);
965 bch2_trans_iter_exit(trans, &iter);
969 int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
975 struct bch_fs *c = trans->c;
976 struct btree_iter dst_iter, src_iter = (struct btree_iter) { NULL };
977 struct bkey_i_subvolume *new_subvol = NULL;
978 struct bkey_i_subvolume *src_subvol = NULL;
980 u32 parent = 0, new_nodes[2], snapshot_subvols[2];
983 for_each_btree_key(trans, dst_iter, BTREE_ID_subvolumes, SUBVOL_POS_MIN,
984 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
985 if (bkey_gt(k.k->p, SUBVOL_POS_MAX))
989 * bch2_subvolume_delete() doesn't flush the btree key cache -
990 * ideally it would but that's tricky
992 if (bkey_deleted(k.k) &&
993 !bch2_btree_key_cache_find(c, BTREE_ID_subvolumes, dst_iter.pos))
998 ret = -BCH_ERR_ENOSPC_subvolume_create;
1001 snapshot_subvols[0] = dst_iter.pos.offset;
1002 snapshot_subvols[1] = src_subvolid;
1005 /* Creating a snapshot: */
1007 bch2_trans_iter_init(trans, &src_iter, BTREE_ID_subvolumes,
1008 POS(0, src_subvolid),
1011 src_subvol = bch2_bkey_get_mut_typed(trans, &src_iter, subvolume);
1012 ret = PTR_ERR_OR_ZERO(src_subvol);
1013 if (unlikely(ret)) {
1014 bch2_fs_inconsistent_on(ret == -ENOENT, trans->c,
1015 "subvolume %u not found", src_subvolid);
1019 parent = le32_to_cpu(src_subvol->v.snapshot);
1022 ret = bch2_snapshot_node_create(trans, parent, new_nodes,
1024 src_subvolid ? 2 : 1);
1029 src_subvol->v.snapshot = cpu_to_le32(new_nodes[1]);
1030 ret = bch2_trans_update(trans, &src_iter, &src_subvol->k_i, 0);
1035 new_subvol = bch2_bkey_alloc(trans, &dst_iter, subvolume);
1036 ret = PTR_ERR_OR_ZERO(new_subvol);
1040 new_subvol->v.flags = 0;
1041 new_subvol->v.snapshot = cpu_to_le32(new_nodes[0]);
1042 new_subvol->v.inode = cpu_to_le64(inode);
1043 SET_BCH_SUBVOLUME_RO(&new_subvol->v, ro);
1044 SET_BCH_SUBVOLUME_SNAP(&new_subvol->v, src_subvolid != 0);
1045 ret = bch2_trans_update(trans, &dst_iter, &new_subvol->k_i, 0);
1049 *new_subvolid = new_subvol->k.p.offset;
1050 *new_snapshotid = new_nodes[0];
1052 bch2_trans_iter_exit(trans, &src_iter);
1053 bch2_trans_iter_exit(trans, &dst_iter);
1057 int bch2_fs_subvolumes_init(struct bch_fs *c)
1059 INIT_WORK(&c->snapshot_delete_work, bch2_delete_dead_snapshots_work);
1060 INIT_WORK(&c->snapshot_wait_for_pagecache_and_delete_work,
1061 bch2_subvolume_wait_for_pagecache_and_delete);
1062 mutex_init(&c->snapshots_unlinked_lock);