1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_key_cache.h"
5 #include "btree_update.h"
13 void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
16 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
18 prt_printf(out, "is_subvol %llu deleted %llu parent %10u children %10u %10u subvol %u",
19 BCH_SNAPSHOT_SUBVOL(s.v),
20 BCH_SNAPSHOT_DELETED(s.v),
21 le32_to_cpu(s.v->parent),
22 le32_to_cpu(s.v->children[0]),
23 le32_to_cpu(s.v->children[1]),
24 le32_to_cpu(s.v->subvol));
27 int bch2_snapshot_invalid(const struct bch_fs *c, struct bkey_s_c k,
28 unsigned flags, struct printbuf *err)
30 struct bkey_s_c_snapshot s;
33 if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
34 bkey_lt(k.k->p, POS(0, 1))) {
35 prt_printf(err, "bad pos");
36 return -BCH_ERR_invalid_bkey;
39 if (bkey_val_bytes(k.k) != sizeof(struct bch_snapshot)) {
40 prt_printf(err, "bad val size (%zu != %zu)",
41 bkey_val_bytes(k.k), sizeof(struct bch_snapshot));
42 return -BCH_ERR_invalid_bkey;
45 s = bkey_s_c_to_snapshot(k);
47 id = le32_to_cpu(s.v->parent);
48 if (id && id <= k.k->p.offset) {
49 prt_printf(err, "bad parent node (%u <= %llu)",
51 return -BCH_ERR_invalid_bkey;
54 if (le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1])) {
55 prt_printf(err, "children not normalized");
56 return -BCH_ERR_invalid_bkey;
59 if (s.v->children[0] &&
60 s.v->children[0] == s.v->children[1]) {
61 prt_printf(err, "duplicate child nodes");
62 return -BCH_ERR_invalid_bkey;
65 for (i = 0; i < 2; i++) {
66 id = le32_to_cpu(s.v->children[i]);
68 if (id >= k.k->p.offset) {
69 prt_printf(err, "bad child node (%u >= %llu)",
71 return -BCH_ERR_invalid_bkey;
78 int bch2_mark_snapshot(struct btree_trans *trans,
79 struct bkey_s_c old, struct bkey_s_c new,
82 struct bch_fs *c = trans->c;
85 t = genradix_ptr_alloc(&c->snapshots,
86 U32_MAX - new.k->p.offset,
91 if (new.k->type == KEY_TYPE_snapshot) {
92 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);
94 t->parent = le32_to_cpu(s.v->parent);
95 t->children[0] = le32_to_cpu(s.v->children[0]);
96 t->children[1] = le32_to_cpu(s.v->children[1]);
97 t->subvol = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0;
108 static int snapshot_lookup(struct btree_trans *trans, u32 id,
109 struct bch_snapshot *s)
111 struct btree_iter iter;
115 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id),
116 BTREE_ITER_WITH_UPDATES);
117 k = bch2_btree_iter_peek_slot(&iter);
118 ret = bkey_err(k) ?: k.k->type == KEY_TYPE_snapshot ? 0 : -ENOENT;
121 *s = *bkey_s_c_to_snapshot(k).v;
123 bch2_trans_iter_exit(trans, &iter);
127 static int snapshot_live(struct btree_trans *trans, u32 id)
129 struct bch_snapshot v;
135 ret = snapshot_lookup(trans, id, &v);
137 bch_err(trans->c, "snapshot node %u not found", id);
141 return !BCH_SNAPSHOT_DELETED(&v);
144 static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k)
146 struct bch_fs *c = trans->c;
147 unsigned i, nr_live = 0, live_idx = 0;
148 struct bkey_s_c_snapshot snap;
149 u32 id = k.k->p.offset, child[2];
151 if (k.k->type != KEY_TYPE_snapshot)
154 snap = bkey_s_c_to_snapshot(k);
156 child[0] = le32_to_cpu(snap.v->children[0]);
157 child[1] = le32_to_cpu(snap.v->children[1]);
159 for (i = 0; i < 2; i++) {
160 int ret = snapshot_live(trans, child[i]);
170 snapshot_t(c, id)->equiv = nr_live == 1
171 ? snapshot_t(c, child[live_idx])->equiv
177 static int check_snapshot(struct btree_trans *trans,
178 struct btree_iter *iter,
181 struct bch_fs *c = trans->c;
182 struct bkey_s_c_snapshot s;
183 struct bch_subvolume subvol;
184 struct bch_snapshot v;
185 struct printbuf buf = PRINTBUF;
186 bool should_have_subvol;
190 if (k.k->type != KEY_TYPE_snapshot)
193 s = bkey_s_c_to_snapshot(k);
194 id = le32_to_cpu(s.v->parent);
196 ret = snapshot_lookup(trans, id, &v);
198 bch_err(c, "snapshot with nonexistent parent:\n %s",
199 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf));
203 if (le32_to_cpu(v.children[0]) != s.k->p.offset &&
204 le32_to_cpu(v.children[1]) != s.k->p.offset) {
205 bch_err(c, "snapshot parent %u missing pointer to child %llu",
212 for (i = 0; i < 2 && s.v->children[i]; i++) {
213 id = le32_to_cpu(s.v->children[i]);
215 ret = snapshot_lookup(trans, id, &v);
217 bch_err(c, "snapshot node %llu has nonexistent child %u",
222 if (le32_to_cpu(v.parent) != s.k->p.offset) {
223 bch_err(c, "snapshot child %u has wrong parent (got %u should be %llu)",
224 id, le32_to_cpu(v.parent), s.k->p.offset);
230 should_have_subvol = BCH_SNAPSHOT_SUBVOL(s.v) &&
231 !BCH_SNAPSHOT_DELETED(s.v);
233 if (should_have_subvol) {
234 id = le32_to_cpu(s.v->subvol);
235 ret = bch2_subvolume_get(trans, id, 0, false, &subvol);
237 bch_err(c, "snapshot points to nonexistent subvolume:\n %s",
238 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf));
242 if (BCH_SNAPSHOT_SUBVOL(s.v) != (le32_to_cpu(subvol.snapshot) == s.k->p.offset)) {
243 bch_err(c, "snapshot node %llu has wrong BCH_SNAPSHOT_SUBVOL",
249 if (fsck_err_on(s.v->subvol, c, "snapshot should not point to subvol:\n %s",
250 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
251 struct bkey_i_snapshot *u = bch2_trans_kmalloc(trans, sizeof(*u));
253 ret = PTR_ERR_OR_ZERO(u);
257 bkey_reassemble(&u->k_i, s.s_c);
259 ret = bch2_trans_update(trans, iter, &u->k_i, 0);
265 if (BCH_SNAPSHOT_DELETED(s.v))
266 set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
273 int bch2_fs_check_snapshots(struct bch_fs *c)
275 struct btree_trans trans;
276 struct btree_iter iter;
280 bch2_trans_init(&trans, c, 0, 0);
282 ret = for_each_btree_key_commit(&trans, iter,
283 BTREE_ID_snapshots, POS_MIN,
284 BTREE_ITER_PREFETCH, k,
285 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
286 check_snapshot(&trans, &iter, k));
289 bch_err(c, "error %i checking snapshots", ret);
291 bch2_trans_exit(&trans);
295 static int check_subvol(struct btree_trans *trans,
296 struct btree_iter *iter,
299 struct bkey_s_c_subvolume subvol;
300 struct bch_snapshot snapshot;
304 if (k.k->type != KEY_TYPE_subvolume)
307 subvol = bkey_s_c_to_subvolume(k);
308 snapid = le32_to_cpu(subvol.v->snapshot);
309 ret = snapshot_lookup(trans, snapid, &snapshot);
312 bch_err(trans->c, "subvolume %llu points to nonexistent snapshot %u",
313 k.k->p.offset, snapid);
317 if (BCH_SUBVOLUME_UNLINKED(subvol.v)) {
318 ret = bch2_subvolume_delete(trans, iter->pos.offset);
319 if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
320 bch_err(trans->c, "error deleting subvolume %llu: %s",
321 iter->pos.offset, bch2_err_str(ret));
329 int bch2_fs_check_subvols(struct bch_fs *c)
331 struct btree_trans trans;
332 struct btree_iter iter;
336 bch2_trans_init(&trans, c, 0, 0);
338 ret = for_each_btree_key_commit(&trans, iter,
339 BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
340 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
341 check_subvol(&trans, &iter, k));
343 bch2_trans_exit(&trans);
348 void bch2_fs_snapshots_exit(struct bch_fs *c)
350 genradix_free(&c->snapshots);
353 int bch2_fs_snapshots_start(struct bch_fs *c)
355 struct btree_trans trans;
356 struct btree_iter iter;
360 bch2_trans_init(&trans, c, 0, 0);
362 for_each_btree_key2(&trans, iter, BTREE_ID_snapshots,
364 bch2_mark_snapshot(&trans, bkey_s_c_null, k, 0) ?:
365 bch2_snapshot_set_equiv(&trans, k));
367 bch2_trans_exit(&trans);
370 bch_err(c, "error starting snapshots: %s", bch2_err_str(ret));
375 * Mark a snapshot as deleted, for future cleanup:
377 static int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
379 struct btree_iter iter;
380 struct bkey_i_snapshot *s;
383 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id),
385 s = bch2_bkey_get_mut_typed(trans, &iter, snapshot);
386 ret = PTR_ERR_OR_ZERO(s);
388 bch2_fs_inconsistent_on(ret == -ENOENT, trans->c, "missing snapshot %u", id);
392 /* already deleted? */
393 if (BCH_SNAPSHOT_DELETED(&s->v))
396 SET_BCH_SNAPSHOT_DELETED(&s->v, true);
397 SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
400 ret = bch2_trans_update(trans, &iter, &s->k_i, 0);
404 bch2_trans_iter_exit(trans, &iter);
408 static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
410 struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
412 struct bkey_s_c_snapshot s;
417 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id),
419 k = bch2_btree_iter_peek_slot(&iter);
424 if (k.k->type != KEY_TYPE_snapshot) {
425 bch2_fs_inconsistent(trans->c, "missing snapshot %u", id);
430 s = bkey_s_c_to_snapshot(k);
432 BUG_ON(!BCH_SNAPSHOT_DELETED(s.v));
433 parent_id = le32_to_cpu(s.v->parent);
436 struct bkey_i_snapshot *parent;
438 bch2_trans_iter_init(trans, &p_iter, BTREE_ID_snapshots,
441 parent = bch2_bkey_get_mut_typed(trans, &p_iter, snapshot);
442 ret = PTR_ERR_OR_ZERO(parent);
444 bch2_fs_inconsistent_on(ret == -ENOENT, trans->c, "missing snapshot %u", parent_id);
448 for (i = 0; i < 2; i++)
449 if (le32_to_cpu(parent->v.children[i]) == id)
453 bch_err(trans->c, "snapshot %u missing child pointer to %u",
456 parent->v.children[i] = 0;
458 if (le32_to_cpu(parent->v.children[0]) <
459 le32_to_cpu(parent->v.children[1]))
460 swap(parent->v.children[0],
461 parent->v.children[1]);
463 ret = bch2_trans_update(trans, &p_iter, &parent->k_i, 0);
468 ret = bch2_btree_delete_at(trans, &iter, 0);
470 bch2_trans_iter_exit(trans, &p_iter);
471 bch2_trans_iter_exit(trans, &iter);
475 int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
477 u32 *snapshot_subvols,
480 struct btree_iter iter;
481 struct bkey_i_snapshot *n;
486 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
487 POS_MIN, BTREE_ITER_INTENT);
488 k = bch2_btree_iter_peek(&iter);
493 for (i = 0; i < nr_snapids; i++) {
494 k = bch2_btree_iter_prev_slot(&iter);
499 if (!k.k || !k.k->p.offset) {
500 ret = -BCH_ERR_ENOSPC_snapshot_create;
504 n = bch2_bkey_alloc(trans, &iter, snapshot);
505 ret = PTR_ERR_OR_ZERO(n);
510 n->v.parent = cpu_to_le32(parent);
511 n->v.subvol = cpu_to_le32(snapshot_subvols[i]);
513 SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
515 ret = bch2_trans_update(trans, &iter, &n->k_i, 0) ?:
516 bch2_mark_snapshot(trans, bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0);
520 new_snapids[i] = iter.pos.offset;
524 bch2_btree_iter_set_pos(&iter, POS(0, parent));
525 n = bch2_bkey_get_mut_typed(trans, &iter, snapshot);
526 ret = PTR_ERR_OR_ZERO(n);
529 bch_err(trans->c, "snapshot %u not found", parent);
533 if (n->v.children[0] || n->v.children[1]) {
534 bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
539 n->v.children[0] = cpu_to_le32(new_snapids[0]);
540 n->v.children[1] = cpu_to_le32(new_snapids[1]);
542 SET_BCH_SNAPSHOT_SUBVOL(&n->v, false);
543 ret = bch2_trans_update(trans, &iter, &n->k_i, 0);
548 bch2_trans_iter_exit(trans, &iter);
552 static int snapshot_delete_key(struct btree_trans *trans,
553 struct btree_iter *iter,
555 snapshot_id_list *deleted,
556 snapshot_id_list *equiv_seen,
557 struct bpos *last_pos)
559 struct bch_fs *c = trans->c;
560 u32 equiv = snapshot_t(c, k.k->p.snapshot)->equiv;
562 if (!bkey_eq(k.k->p, *last_pos))
566 if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
567 snapshot_list_has_id(equiv_seen, equiv)) {
568 return bch2_btree_delete_at(trans, iter,
569 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
571 return snapshot_list_add(c, equiv_seen, equiv);
575 static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct btree_iter *iter,
578 struct bkey_s_c_snapshot snap;
582 if (k.k->type != KEY_TYPE_snapshot)
585 snap = bkey_s_c_to_snapshot(k);
586 if (BCH_SNAPSHOT_DELETED(snap.v) ||
587 BCH_SNAPSHOT_SUBVOL(snap.v))
590 children[0] = le32_to_cpu(snap.v->children[0]);
591 children[1] = le32_to_cpu(snap.v->children[1]);
593 ret = snapshot_live(trans, children[0]) ?:
594 snapshot_live(trans, children[1]);
599 return bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
603 int bch2_delete_dead_snapshots(struct bch_fs *c)
605 struct btree_trans trans;
606 struct btree_iter iter;
608 struct bkey_s_c_snapshot snap;
609 snapshot_id_list deleted = { 0 };
613 if (!test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags))
616 if (!test_bit(BCH_FS_STARTED, &c->flags)) {
617 ret = bch2_fs_read_write_early(c);
619 bch_err(c, "error deleleting dead snapshots: error going rw: %s", bch2_err_str(ret));
624 bch2_trans_init(&trans, c, 0, 0);
627 * For every snapshot node: If we have no live children and it's not
628 * pointed to by a subvolume, delete it:
630 ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_snapshots,
633 bch2_delete_redundant_snapshot(&trans, &iter, k));
635 bch_err(c, "error deleting redundant snapshots: %s", bch2_err_str(ret));
639 for_each_btree_key2(&trans, iter, BTREE_ID_snapshots,
641 bch2_snapshot_set_equiv(&trans, k));
643 bch_err(c, "error in bch2_snapshots_set_equiv: %s", bch2_err_str(ret));
647 for_each_btree_key(&trans, iter, BTREE_ID_snapshots,
648 POS_MIN, 0, k, ret) {
649 if (k.k->type != KEY_TYPE_snapshot)
652 snap = bkey_s_c_to_snapshot(k);
653 if (BCH_SNAPSHOT_DELETED(snap.v)) {
654 ret = snapshot_list_add(c, &deleted, k.k->p.offset);
659 bch2_trans_iter_exit(&trans, &iter);
662 bch_err(c, "error walking snapshots: %s", bch2_err_str(ret));
666 for (id = 0; id < BTREE_ID_NR; id++) {
667 struct bpos last_pos = POS_MIN;
668 snapshot_id_list equiv_seen = { 0 };
670 if (!btree_type_has_snapshots(id))
673 ret = for_each_btree_key_commit(&trans, iter,
675 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
676 NULL, NULL, BTREE_INSERT_NOFAIL,
677 snapshot_delete_key(&trans, &iter, k, &deleted, &equiv_seen, &last_pos));
679 darray_exit(&equiv_seen);
682 bch_err(c, "error deleting snapshot keys: %s", bch2_err_str(ret));
687 for (i = 0; i < deleted.nr; i++) {
688 ret = commit_do(&trans, NULL, NULL, 0,
689 bch2_snapshot_node_delete(&trans, deleted.data[i]));
691 bch_err(c, "error deleting snapshot %u: %s",
692 deleted.data[i], bch2_err_str(ret));
697 clear_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
699 darray_exit(&deleted);
700 bch2_trans_exit(&trans);
704 static void bch2_delete_dead_snapshots_work(struct work_struct *work)
706 struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
708 bch2_delete_dead_snapshots(c);
709 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
712 void bch2_delete_dead_snapshots_async(struct bch_fs *c)
714 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots) &&
715 !queue_work(system_long_wq, &c->snapshot_delete_work))
716 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
719 static int bch2_delete_dead_snapshots_hook(struct btree_trans *trans,
720 struct btree_trans_commit_hook *h)
722 struct bch_fs *c = trans->c;
724 set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
726 if (!test_bit(BCH_FS_FSCK_DONE, &c->flags))
729 bch2_delete_dead_snapshots_async(c);
735 int bch2_subvolume_invalid(const struct bch_fs *c, struct bkey_s_c k,
736 unsigned flags, struct printbuf *err)
738 if (bkey_lt(k.k->p, SUBVOL_POS_MIN) ||
739 bkey_gt(k.k->p, SUBVOL_POS_MAX)) {
740 prt_printf(err, "invalid pos");
741 return -BCH_ERR_invalid_bkey;
744 if (bkey_val_bytes(k.k) != sizeof(struct bch_subvolume)) {
745 prt_printf(err, "incorrect value size (%zu != %zu)",
746 bkey_val_bytes(k.k), sizeof(struct bch_subvolume));
747 return -BCH_ERR_invalid_bkey;
753 void bch2_subvolume_to_text(struct printbuf *out, struct bch_fs *c,
756 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
758 prt_printf(out, "root %llu snapshot id %u",
759 le64_to_cpu(s.v->inode),
760 le32_to_cpu(s.v->snapshot));
763 static __always_inline int
764 bch2_subvolume_get_inlined(struct btree_trans *trans, unsigned subvol,
765 bool inconsistent_if_not_found,
767 struct bch_subvolume *s)
769 struct btree_iter iter;
773 bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes, POS(0, subvol),
775 k = bch2_btree_iter_peek_slot(&iter);
776 ret = bkey_err(k) ?: k.k->type == KEY_TYPE_subvolume ? 0 : -ENOENT;
778 if (ret == -ENOENT && inconsistent_if_not_found)
779 bch2_fs_inconsistent(trans->c, "missing subvolume %u", subvol);
781 *s = *bkey_s_c_to_subvolume(k).v;
783 bch2_trans_iter_exit(trans, &iter);
787 int bch2_subvolume_get(struct btree_trans *trans, unsigned subvol,
788 bool inconsistent_if_not_found,
790 struct bch_subvolume *s)
792 return bch2_subvolume_get_inlined(trans, subvol, inconsistent_if_not_found, iter_flags, s);
795 int bch2_snapshot_get_subvol(struct btree_trans *trans, u32 snapshot,
796 struct bch_subvolume *subvol)
798 struct bch_snapshot snap;
800 return snapshot_lookup(trans, snapshot, &snap) ?:
801 bch2_subvolume_get(trans, le32_to_cpu(snap.subvol), true, 0, subvol);
804 int bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvol,
807 struct bch_subvolume s;
810 ret = bch2_subvolume_get_inlined(trans, subvol, true,
812 BTREE_ITER_WITH_UPDATES,
815 *snapid = le32_to_cpu(s.snapshot);
820 * Delete subvolume, mark snapshot ID as deleted, queue up snapshot
823 int bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
825 struct btree_iter iter;
827 struct bkey_s_c_subvolume subvol;
828 struct btree_trans_commit_hook *h;
832 bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes,
836 k = bch2_btree_iter_peek_slot(&iter);
841 if (k.k->type != KEY_TYPE_subvolume) {
842 bch2_fs_inconsistent(trans->c, "missing subvolume %u", subvolid);
847 subvol = bkey_s_c_to_subvolume(k);
848 snapid = le32_to_cpu(subvol.v->snapshot);
850 ret = bch2_btree_delete_at(trans, &iter, 0);
854 ret = bch2_snapshot_node_set_deleted(trans, snapid);
858 h = bch2_trans_kmalloc(trans, sizeof(*h));
859 ret = PTR_ERR_OR_ZERO(h);
863 h->fn = bch2_delete_dead_snapshots_hook;
864 bch2_trans_commit_hook(trans, h);
866 bch2_trans_iter_exit(trans, &iter);
870 void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *work)
872 struct bch_fs *c = container_of(work, struct bch_fs,
873 snapshot_wait_for_pagecache_and_delete_work);
879 mutex_lock(&c->snapshots_unlinked_lock);
880 s = c->snapshots_unlinked;
881 darray_init(&c->snapshots_unlinked);
882 mutex_unlock(&c->snapshots_unlinked_lock);
887 bch2_evict_subvolume_inodes(c, &s);
889 for (id = s.data; id < s.data + s.nr; id++) {
890 ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_NOFAIL,
891 bch2_subvolume_delete(&trans, *id));
893 bch_err(c, "error deleting subvolume %u: %s", *id, bch2_err_str(ret));
901 bch2_write_ref_put(c, BCH_WRITE_REF_snapshot_delete_pagecache);
904 struct subvolume_unlink_hook {
905 struct btree_trans_commit_hook h;
909 int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans *trans,
910 struct btree_trans_commit_hook *_h)
912 struct subvolume_unlink_hook *h = container_of(_h, struct subvolume_unlink_hook, h);
913 struct bch_fs *c = trans->c;
916 mutex_lock(&c->snapshots_unlinked_lock);
917 if (!snapshot_list_has_id(&c->snapshots_unlinked, h->subvol))
918 ret = snapshot_list_add(c, &c->snapshots_unlinked, h->subvol);
919 mutex_unlock(&c->snapshots_unlinked_lock);
924 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_snapshot_delete_pagecache))
927 if (!queue_work(system_long_wq, &c->snapshot_wait_for_pagecache_and_delete_work))
928 bch2_write_ref_put(c, BCH_WRITE_REF_snapshot_delete_pagecache);
932 int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
934 struct btree_iter iter;
935 struct bkey_i_subvolume *n;
936 struct subvolume_unlink_hook *h;
939 bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes,
943 n = bch2_bkey_get_mut_typed(trans, &iter, subvolume);
944 ret = PTR_ERR_OR_ZERO(n);
946 bch2_fs_inconsistent_on(ret == -ENOENT, trans->c, "missing subvolume %u", subvolid);
950 SET_BCH_SUBVOLUME_UNLINKED(&n->v, true);
952 ret = bch2_trans_update(trans, &iter, &n->k_i, 0);
956 h = bch2_trans_kmalloc(trans, sizeof(*h));
957 ret = PTR_ERR_OR_ZERO(h);
961 h->h.fn = bch2_subvolume_wait_for_pagecache_and_delete_hook;
962 h->subvol = subvolid;
963 bch2_trans_commit_hook(trans, &h->h);
965 bch2_trans_iter_exit(trans, &iter);
969 int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
975 struct bch_fs *c = trans->c;
976 struct btree_iter dst_iter, src_iter = (struct btree_iter) { NULL };
977 struct bkey_i_subvolume *new_subvol = NULL;
978 struct bkey_i_subvolume *src_subvol = NULL;
980 u32 parent = 0, new_nodes[2], snapshot_subvols[2];
983 for_each_btree_key(trans, dst_iter, BTREE_ID_subvolumes, SUBVOL_POS_MIN,
984 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
985 if (bkey_gt(k.k->p, SUBVOL_POS_MAX))
989 * bch2_subvolume_delete() doesn't flush the btree key cache -
990 * ideally it would but that's tricky
992 if (bkey_deleted(k.k) &&
993 !bch2_btree_key_cache_find(c, BTREE_ID_subvolumes, dst_iter.pos))
998 ret = -BCH_ERR_ENOSPC_subvolume_create;
1001 snapshot_subvols[0] = dst_iter.pos.offset;
1002 snapshot_subvols[1] = src_subvolid;
1005 /* Creating a snapshot: */
1007 bch2_trans_iter_init(trans, &src_iter, BTREE_ID_subvolumes,
1008 POS(0, src_subvolid),
1011 src_subvol = bch2_bkey_get_mut_typed(trans, &src_iter, subvolume);
1012 ret = PTR_ERR_OR_ZERO(src_subvol);
1013 if (unlikely(ret)) {
1014 bch2_fs_inconsistent_on(ret == -ENOENT, trans->c,
1015 "subvolume %u not found", src_subvolid);
1019 parent = le32_to_cpu(src_subvol->v.snapshot);
1022 ret = bch2_snapshot_node_create(trans, parent, new_nodes,
1024 src_subvolid ? 2 : 1);
1029 src_subvol->v.snapshot = cpu_to_le32(new_nodes[1]);
1030 ret = bch2_trans_update(trans, &src_iter, &src_subvol->k_i, 0);
1035 new_subvol = bch2_bkey_alloc(trans, &dst_iter, subvolume);
1036 ret = PTR_ERR_OR_ZERO(new_subvol);
1040 new_subvol->v.flags = 0;
1041 new_subvol->v.snapshot = cpu_to_le32(new_nodes[0]);
1042 new_subvol->v.inode = cpu_to_le64(inode);
1043 SET_BCH_SUBVOLUME_RO(&new_subvol->v, ro);
1044 SET_BCH_SUBVOLUME_SNAP(&new_subvol->v, src_subvolid != 0);
1045 ret = bch2_trans_update(trans, &dst_iter, &new_subvol->k_i, 0);
1049 *new_subvolid = new_subvol->k.p.offset;
1050 *new_snapshotid = new_nodes[0];
1052 bch2_trans_iter_exit(trans, &src_iter);
1053 bch2_trans_iter_exit(trans, &dst_iter);
1057 int bch2_fs_subvolumes_init(struct bch_fs *c)
1059 INIT_WORK(&c->snapshot_delete_work, bch2_delete_dead_snapshots_work);
1060 INIT_WORK(&c->snapshot_wait_for_pagecache_and_delete_work,
1061 bch2_subvolume_wait_for_pagecache_and_delete);
1062 mutex_init(&c->snapshots_unlinked_lock);