1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_key_cache.h"
5 #include "btree_update.h"
13 void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
16 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
18 prt_printf(out, "is_subvol %llu deleted %llu parent %10u children %10u %10u subvol %u",
19 BCH_SNAPSHOT_SUBVOL(s.v),
20 BCH_SNAPSHOT_DELETED(s.v),
21 le32_to_cpu(s.v->parent),
22 le32_to_cpu(s.v->children[0]),
23 le32_to_cpu(s.v->children[1]),
24 le32_to_cpu(s.v->subvol));
27 int bch2_snapshot_invalid(const struct bch_fs *c, struct bkey_s_c k,
28 int rw, struct printbuf *err)
30 struct bkey_s_c_snapshot s;
33 if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0 ||
34 bkey_cmp(k.k->p, POS(0, 1)) < 0) {
35 prt_printf(err, "bad pos");
39 if (bkey_val_bytes(k.k) != sizeof(struct bch_snapshot)) {
40 prt_printf(err, "bad val size (%zu != %zu)",
41 bkey_val_bytes(k.k), sizeof(struct bch_snapshot));
45 s = bkey_s_c_to_snapshot(k);
47 id = le32_to_cpu(s.v->parent);
48 if (id && id <= k.k->p.offset) {
49 prt_printf(err, "bad parent node (%u <= %llu)",
54 if (le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1])) {
55 prt_printf(err, "children not normalized");
59 if (s.v->children[0] &&
60 s.v->children[0] == s.v->children[1]) {
61 prt_printf(err, "duplicate child nodes");
65 for (i = 0; i < 2; i++) {
66 id = le32_to_cpu(s.v->children[i]);
68 if (id >= k.k->p.offset) {
69 prt_printf(err, "bad child node (%u >= %llu)",
78 int bch2_mark_snapshot(struct btree_trans *trans,
79 struct bkey_s_c old, struct bkey_s_c new,
82 struct bch_fs *c = trans->c;
85 t = genradix_ptr_alloc(&c->snapshots,
86 U32_MAX - new.k->p.offset,
91 if (new.k->type == KEY_TYPE_snapshot) {
92 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);
94 t->parent = le32_to_cpu(s.v->parent);
95 t->children[0] = le32_to_cpu(s.v->children[0]);
96 t->children[1] = le32_to_cpu(s.v->children[1]);
97 t->subvol = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0;
108 static int snapshot_lookup(struct btree_trans *trans, u32 id,
109 struct bch_snapshot *s)
111 struct btree_iter iter;
115 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id),
116 BTREE_ITER_WITH_UPDATES);
117 k = bch2_btree_iter_peek_slot(&iter);
118 ret = bkey_err(k) ?: k.k->type == KEY_TYPE_snapshot ? 0 : -ENOENT;
121 *s = *bkey_s_c_to_snapshot(k).v;
123 bch2_trans_iter_exit(trans, &iter);
127 static int snapshot_live(struct btree_trans *trans, u32 id)
129 struct bch_snapshot v;
135 ret = snapshot_lookup(trans, id, &v);
137 bch_err(trans->c, "snapshot node %u not found", id);
141 return !BCH_SNAPSHOT_DELETED(&v);
144 static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k)
146 struct bch_fs *c = trans->c;
147 unsigned i, nr_live = 0, live_idx = 0;
148 struct bkey_s_c_snapshot snap;
149 u32 id = k.k->p.offset, child[2];
151 if (k.k->type != KEY_TYPE_snapshot)
154 snap = bkey_s_c_to_snapshot(k);
156 child[0] = le32_to_cpu(snap.v->children[0]);
157 child[1] = le32_to_cpu(snap.v->children[1]);
159 for (i = 0; i < 2; i++) {
160 int ret = snapshot_live(trans, child[i]);
169 snapshot_t(c, id)->equiv = nr_live == 1
170 ? snapshot_t(c, child[live_idx])->equiv
176 static int check_snapshot(struct btree_trans *trans,
177 struct btree_iter *iter,
180 struct bch_fs *c = trans->c;
181 struct bkey_s_c_snapshot s;
182 struct bch_subvolume subvol;
183 struct bch_snapshot v;
184 struct printbuf buf = PRINTBUF;
185 bool should_have_subvol;
189 if (k.k->type != KEY_TYPE_snapshot)
192 s = bkey_s_c_to_snapshot(k);
193 id = le32_to_cpu(s.v->parent);
195 ret = snapshot_lookup(trans, id, &v);
197 bch_err(c, "snapshot with nonexistent parent:\n %s",
198 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf));
202 if (le32_to_cpu(v.children[0]) != s.k->p.offset &&
203 le32_to_cpu(v.children[1]) != s.k->p.offset) {
204 bch_err(c, "snapshot parent %u missing pointer to child %llu",
211 for (i = 0; i < 2 && s.v->children[i]; i++) {
212 id = le32_to_cpu(s.v->children[i]);
214 ret = snapshot_lookup(trans, id, &v);
216 bch_err(c, "snapshot node %llu has nonexistent child %u",
221 if (le32_to_cpu(v.parent) != s.k->p.offset) {
222 bch_err(c, "snapshot child %u has wrong parent (got %u should be %llu)",
223 id, le32_to_cpu(v.parent), s.k->p.offset);
229 should_have_subvol = BCH_SNAPSHOT_SUBVOL(s.v) &&
230 !BCH_SNAPSHOT_DELETED(s.v);
232 if (should_have_subvol) {
233 id = le32_to_cpu(s.v->subvol);
234 ret = bch2_subvolume_get(trans, id, 0, false, &subvol);
236 bch_err(c, "snapshot points to nonexistent subvolume:\n %s",
237 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf));
241 if (BCH_SNAPSHOT_SUBVOL(s.v) != (le32_to_cpu(subvol.snapshot) == s.k->p.offset)) {
242 bch_err(c, "snapshot node %llu has wrong BCH_SNAPSHOT_SUBVOL",
248 if (fsck_err_on(s.v->subvol, c, "snapshot should not point to subvol:\n %s",
249 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
250 struct bkey_i_snapshot *u = bch2_trans_kmalloc(trans, sizeof(*u));
252 ret = PTR_ERR_OR_ZERO(u);
256 bkey_reassemble(&u->k_i, s.s_c);
258 ret = bch2_trans_update(trans, iter, &u->k_i, 0);
264 if (BCH_SNAPSHOT_DELETED(s.v))
265 set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
272 int bch2_fs_check_snapshots(struct bch_fs *c)
274 struct btree_trans trans;
275 struct btree_iter iter;
279 bch2_trans_init(&trans, c, 0, 0);
281 ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_snapshots,
282 POS(BCACHEFS_ROOT_INO, 0),
283 BTREE_ITER_PREFETCH, k,
284 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
285 check_snapshot(&trans, &iter, k));
288 bch_err(c, "error %i checking snapshots", ret);
290 bch2_trans_exit(&trans);
294 static int check_subvol(struct btree_trans *trans,
295 struct btree_iter *iter,
298 struct bkey_s_c_subvolume subvol;
299 struct bch_snapshot snapshot;
303 if (k.k->type != KEY_TYPE_subvolume)
306 subvol = bkey_s_c_to_subvolume(k);
307 snapid = le32_to_cpu(subvol.v->snapshot);
308 ret = snapshot_lookup(trans, snapid, &snapshot);
311 bch_err(trans->c, "subvolume %llu points to nonexistent snapshot %u",
312 k.k->p.offset, snapid);
316 if (BCH_SUBVOLUME_UNLINKED(subvol.v)) {
317 ret = bch2_subvolume_delete(trans, iter->pos.offset);
318 if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
319 bch_err(trans->c, "error deleting subvolume %llu: %s",
320 iter->pos.offset, bch2_err_str(ret));
328 int bch2_fs_check_subvols(struct bch_fs *c)
330 struct btree_trans trans;
331 struct btree_iter iter;
335 bch2_trans_init(&trans, c, 0, 0);
337 ret = for_each_btree_key_commit(&trans, iter,
338 BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
339 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
340 check_subvol(&trans, &iter, k));
342 bch2_trans_exit(&trans);
347 void bch2_fs_snapshots_exit(struct bch_fs *c)
349 genradix_free(&c->snapshots);
352 int bch2_fs_snapshots_start(struct bch_fs *c)
354 struct btree_trans trans;
355 struct btree_iter iter;
359 bch2_trans_init(&trans, c, 0, 0);
361 for_each_btree_key2(&trans, iter, BTREE_ID_snapshots,
363 bch2_mark_snapshot(&trans, bkey_s_c_null, k, 0) ?:
364 bch2_snapshot_set_equiv(&trans, k));
366 bch2_trans_exit(&trans);
369 bch_err(c, "error starting snapshots: %s", bch2_err_str(ret));
374 * Mark a snapshot as deleted, for future cleanup:
376 static int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
378 struct btree_iter iter;
380 struct bkey_i_snapshot *s;
383 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id),
385 k = bch2_btree_iter_peek_slot(&iter);
390 if (k.k->type != KEY_TYPE_snapshot) {
391 bch2_fs_inconsistent(trans->c, "missing snapshot %u", id);
396 /* already deleted? */
397 if (BCH_SNAPSHOT_DELETED(bkey_s_c_to_snapshot(k).v))
400 s = bch2_trans_kmalloc(trans, sizeof(*s));
401 ret = PTR_ERR_OR_ZERO(s);
405 bkey_reassemble(&s->k_i, k);
406 SET_BCH_SNAPSHOT_DELETED(&s->v, true);
407 SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
410 ret = bch2_trans_update(trans, &iter, &s->k_i, 0);
414 bch2_trans_iter_exit(trans, &iter);
418 static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
420 struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
422 struct bkey_s_c_snapshot s;
423 struct bkey_i_snapshot *parent;
428 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id),
430 k = bch2_btree_iter_peek_slot(&iter);
435 if (k.k->type != KEY_TYPE_snapshot) {
436 bch2_fs_inconsistent(trans->c, "missing snapshot %u", id);
441 s = bkey_s_c_to_snapshot(k);
443 BUG_ON(!BCH_SNAPSHOT_DELETED(s.v));
444 parent_id = le32_to_cpu(s.v->parent);
447 bch2_trans_iter_init(trans, &p_iter, BTREE_ID_snapshots,
450 k = bch2_btree_iter_peek_slot(&p_iter);
455 if (k.k->type != KEY_TYPE_snapshot) {
456 bch2_fs_inconsistent(trans->c, "missing snapshot %u", parent_id);
461 parent = bch2_trans_kmalloc(trans, sizeof(*parent));
462 ret = PTR_ERR_OR_ZERO(parent);
466 bkey_reassemble(&parent->k_i, k);
468 for (i = 0; i < 2; i++)
469 if (le32_to_cpu(parent->v.children[i]) == id)
473 bch_err(trans->c, "snapshot %u missing child pointer to %u",
476 parent->v.children[i] = 0;
478 if (le32_to_cpu(parent->v.children[0]) <
479 le32_to_cpu(parent->v.children[1]))
480 swap(parent->v.children[0],
481 parent->v.children[1]);
483 ret = bch2_trans_update(trans, &p_iter, &parent->k_i, 0);
488 ret = bch2_btree_delete_at(trans, &iter, 0);
490 bch2_trans_iter_exit(trans, &p_iter);
491 bch2_trans_iter_exit(trans, &iter);
495 int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
497 u32 *snapshot_subvols,
500 struct btree_iter iter;
501 struct bkey_i_snapshot *n;
506 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
507 POS_MIN, BTREE_ITER_INTENT);
508 k = bch2_btree_iter_peek(&iter);
513 for (i = 0; i < nr_snapids; i++) {
514 k = bch2_btree_iter_prev_slot(&iter);
519 if (!k.k || !k.k->p.offset) {
524 n = bch2_trans_kmalloc(trans, sizeof(*n));
525 ret = PTR_ERR_OR_ZERO(n);
529 bkey_snapshot_init(&n->k_i);
532 n->v.parent = cpu_to_le32(parent);
533 n->v.subvol = cpu_to_le32(snapshot_subvols[i]);
535 SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
537 ret = bch2_trans_update(trans, &iter, &n->k_i, 0) ?:
538 bch2_mark_snapshot(trans, bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0);
542 new_snapids[i] = iter.pos.offset;
546 bch2_btree_iter_set_pos(&iter, POS(0, parent));
547 k = bch2_btree_iter_peek(&iter);
552 if (k.k->type != KEY_TYPE_snapshot) {
553 bch_err(trans->c, "snapshot %u not found", parent);
558 n = bch2_trans_kmalloc(trans, sizeof(*n));
559 ret = PTR_ERR_OR_ZERO(n);
563 bkey_reassemble(&n->k_i, k);
565 if (n->v.children[0] || n->v.children[1]) {
566 bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
571 n->v.children[0] = cpu_to_le32(new_snapids[0]);
572 n->v.children[1] = cpu_to_le32(new_snapids[1]);
574 SET_BCH_SNAPSHOT_SUBVOL(&n->v, false);
575 ret = bch2_trans_update(trans, &iter, &n->k_i, 0);
580 bch2_trans_iter_exit(trans, &iter);
584 static int snapshot_delete_key(struct btree_trans *trans,
585 struct btree_iter *iter,
587 snapshot_id_list *deleted,
588 snapshot_id_list *equiv_seen,
589 struct bpos *last_pos)
591 struct bch_fs *c = trans->c;
592 u32 equiv = snapshot_t(c, k.k->p.snapshot)->equiv;
594 if (bkey_cmp(k.k->p, *last_pos))
598 if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
599 snapshot_list_has_id(equiv_seen, equiv)) {
600 return bch2_btree_delete_at(trans, iter,
601 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
603 return snapshot_list_add(c, equiv_seen, equiv);
607 static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct btree_iter *iter,
610 struct bkey_s_c_snapshot snap;
614 if (k.k->type != KEY_TYPE_snapshot)
617 snap = bkey_s_c_to_snapshot(k);
618 if (BCH_SNAPSHOT_DELETED(snap.v) ||
619 BCH_SNAPSHOT_SUBVOL(snap.v))
622 children[0] = le32_to_cpu(snap.v->children[0]);
623 children[1] = le32_to_cpu(snap.v->children[1]);
625 ret = snapshot_live(trans, children[0]) ?:
626 snapshot_live(trans, children[1]);
631 return bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
635 int bch2_delete_dead_snapshots(struct bch_fs *c)
637 struct btree_trans trans;
638 struct btree_iter iter;
640 struct bkey_s_c_snapshot snap;
641 snapshot_id_list deleted = { 0 };
645 if (!test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags))
648 if (!test_bit(BCH_FS_STARTED, &c->flags)) {
649 ret = bch2_fs_read_write_early(c);
651 bch_err(c, "error deleleting dead snapshots: error going rw: %s", bch2_err_str(ret));
656 bch2_trans_init(&trans, c, 0, 0);
659 * For every snapshot node: If we have no live children and it's not
660 * pointed to by a subvolume, delete it:
662 ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_snapshots,
665 bch2_delete_redundant_snapshot(&trans, &iter, k));
667 bch_err(c, "error deleting redundant snapshots: %s", bch2_err_str(ret));
671 for_each_btree_key2(&trans, iter, BTREE_ID_snapshots,
673 bch2_snapshot_set_equiv(&trans, k));
675 bch_err(c, "error in bch2_snapshots_set_equiv: %s", bch2_err_str(ret));
679 for_each_btree_key(&trans, iter, BTREE_ID_snapshots,
680 POS_MIN, 0, k, ret) {
681 if (k.k->type != KEY_TYPE_snapshot)
684 snap = bkey_s_c_to_snapshot(k);
685 if (BCH_SNAPSHOT_DELETED(snap.v)) {
686 ret = snapshot_list_add(c, &deleted, k.k->p.offset);
691 bch2_trans_iter_exit(&trans, &iter);
694 bch_err(c, "error walking snapshots: %s", bch2_err_str(ret));
698 for (id = 0; id < BTREE_ID_NR; id++) {
699 struct bpos last_pos = POS_MIN;
700 snapshot_id_list equiv_seen = { 0 };
702 if (!btree_type_has_snapshots(id))
705 ret = for_each_btree_key_commit(&trans, iter,
707 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
708 NULL, NULL, BTREE_INSERT_NOFAIL,
709 snapshot_delete_key(&trans, &iter, k, &deleted, &equiv_seen, &last_pos));
711 darray_exit(&equiv_seen);
714 bch_err(c, "error deleting snapshot keys: %s", bch2_err_str(ret));
719 for (i = 0; i < deleted.nr; i++) {
720 ret = commit_do(&trans, NULL, NULL, 0,
721 bch2_snapshot_node_delete(&trans, deleted.data[i]));
723 bch_err(c, "error deleting snapshot %u: %s",
724 deleted.data[i], bch2_err_str(ret));
729 clear_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
731 darray_exit(&deleted);
732 bch2_trans_exit(&trans);
736 static void bch2_delete_dead_snapshots_work(struct work_struct *work)
738 struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
740 bch2_delete_dead_snapshots(c);
741 percpu_ref_put(&c->writes);
744 void bch2_delete_dead_snapshots_async(struct bch_fs *c)
746 if (!percpu_ref_tryget_live(&c->writes))
749 if (!queue_work(system_long_wq, &c->snapshot_delete_work))
750 percpu_ref_put(&c->writes);
753 static int bch2_delete_dead_snapshots_hook(struct btree_trans *trans,
754 struct btree_trans_commit_hook *h)
756 struct bch_fs *c = trans->c;
758 set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
760 if (!test_bit(BCH_FS_FSCK_DONE, &c->flags))
763 bch2_delete_dead_snapshots_async(c);
769 int bch2_subvolume_invalid(const struct bch_fs *c, struct bkey_s_c k,
770 int rw, struct printbuf *err)
772 if (bkey_cmp(k.k->p, SUBVOL_POS_MIN) < 0 ||
773 bkey_cmp(k.k->p, SUBVOL_POS_MAX) > 0) {
774 prt_printf(err, "invalid pos");
778 if (bkey_val_bytes(k.k) != sizeof(struct bch_subvolume)) {
779 prt_printf(err, "incorrect value size (%zu != %zu)",
780 bkey_val_bytes(k.k), sizeof(struct bch_subvolume));
787 void bch2_subvolume_to_text(struct printbuf *out, struct bch_fs *c,
790 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
792 prt_printf(out, "root %llu snapshot id %u",
793 le64_to_cpu(s.v->inode),
794 le32_to_cpu(s.v->snapshot));
797 int bch2_subvolume_get(struct btree_trans *trans, unsigned subvol,
798 bool inconsistent_if_not_found,
800 struct bch_subvolume *s)
802 struct btree_iter iter;
806 bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes, POS(0, subvol),
808 k = bch2_btree_iter_peek_slot(&iter);
809 ret = bkey_err(k) ?: k.k->type == KEY_TYPE_subvolume ? 0 : -ENOENT;
811 if (ret == -ENOENT && inconsistent_if_not_found)
812 bch2_fs_inconsistent(trans->c, "missing subvolume %u", subvol);
814 *s = *bkey_s_c_to_subvolume(k).v;
816 bch2_trans_iter_exit(trans, &iter);
820 int bch2_snapshot_get_subvol(struct btree_trans *trans, u32 snapshot,
821 struct bch_subvolume *subvol)
823 struct bch_snapshot snap;
825 return snapshot_lookup(trans, snapshot, &snap) ?:
826 bch2_subvolume_get(trans, le32_to_cpu(snap.subvol), true, 0, subvol);
829 int bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvol,
832 struct bch_subvolume s;
835 ret = bch2_subvolume_get(trans, subvol, true,
837 BTREE_ITER_WITH_UPDATES,
840 *snapid = le32_to_cpu(s.snapshot);
845 * Delete subvolume, mark snapshot ID as deleted, queue up snapshot
848 int bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
850 struct btree_iter iter;
852 struct bkey_s_c_subvolume subvol;
853 struct btree_trans_commit_hook *h;
857 bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes,
861 k = bch2_btree_iter_peek_slot(&iter);
866 if (k.k->type != KEY_TYPE_subvolume) {
867 bch2_fs_inconsistent(trans->c, "missing subvolume %u", subvolid);
872 subvol = bkey_s_c_to_subvolume(k);
873 snapid = le32_to_cpu(subvol.v->snapshot);
875 ret = bch2_btree_delete_at(trans, &iter, 0);
879 ret = bch2_snapshot_node_set_deleted(trans, snapid);
883 h = bch2_trans_kmalloc(trans, sizeof(*h));
884 ret = PTR_ERR_OR_ZERO(h);
888 h->fn = bch2_delete_dead_snapshots_hook;
889 bch2_trans_commit_hook(trans, h);
891 bch2_trans_iter_exit(trans, &iter);
895 void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *work)
897 struct bch_fs *c = container_of(work, struct bch_fs,
898 snapshot_wait_for_pagecache_and_delete_work);
904 mutex_lock(&c->snapshots_unlinked_lock);
905 s = c->snapshots_unlinked;
906 darray_init(&c->snapshots_unlinked);
907 mutex_unlock(&c->snapshots_unlinked_lock);
912 bch2_evict_subvolume_inodes(c, &s);
914 for (id = s.data; id < s.data + s.nr; id++) {
915 ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_NOFAIL,
916 bch2_subvolume_delete(&trans, *id));
918 bch_err(c, "error deleting subvolume %u: %s", *id, bch2_err_str(ret));
926 percpu_ref_put(&c->writes);
929 struct subvolume_unlink_hook {
930 struct btree_trans_commit_hook h;
934 int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans *trans,
935 struct btree_trans_commit_hook *_h)
937 struct subvolume_unlink_hook *h = container_of(_h, struct subvolume_unlink_hook, h);
938 struct bch_fs *c = trans->c;
941 mutex_lock(&c->snapshots_unlinked_lock);
942 if (!snapshot_list_has_id(&c->snapshots_unlinked, h->subvol))
943 ret = snapshot_list_add(c, &c->snapshots_unlinked, h->subvol);
944 mutex_unlock(&c->snapshots_unlinked_lock);
949 if (unlikely(!percpu_ref_tryget_live(&c->writes)))
952 if (!queue_work(system_long_wq, &c->snapshot_wait_for_pagecache_and_delete_work))
953 percpu_ref_put(&c->writes);
957 int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
959 struct btree_iter iter;
961 struct bkey_i_subvolume *n;
962 struct subvolume_unlink_hook *h;
965 bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes,
969 k = bch2_btree_iter_peek_slot(&iter);
974 if (k.k->type != KEY_TYPE_subvolume) {
975 bch2_fs_inconsistent(trans->c, "missing subvolume %u", subvolid);
980 n = bch2_trans_kmalloc(trans, sizeof(*n));
981 ret = PTR_ERR_OR_ZERO(n);
985 bkey_reassemble(&n->k_i, k);
986 SET_BCH_SUBVOLUME_UNLINKED(&n->v, true);
988 ret = bch2_trans_update(trans, &iter, &n->k_i, 0);
992 h = bch2_trans_kmalloc(trans, sizeof(*h));
993 ret = PTR_ERR_OR_ZERO(h);
997 h->h.fn = bch2_subvolume_wait_for_pagecache_and_delete_hook;
998 h->subvol = subvolid;
999 bch2_trans_commit_hook(trans, &h->h);
1001 bch2_trans_iter_exit(trans, &iter);
1005 int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
1008 u32 *new_snapshotid,
1011 struct bch_fs *c = trans->c;
1012 struct btree_iter dst_iter, src_iter = (struct btree_iter) { NULL };
1013 struct bkey_i_subvolume *new_subvol = NULL;
1014 struct bkey_i_subvolume *src_subvol = NULL;
1016 u32 parent = 0, new_nodes[2], snapshot_subvols[2];
1019 for_each_btree_key(trans, dst_iter, BTREE_ID_subvolumes, SUBVOL_POS_MIN,
1020 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
1021 if (bkey_cmp(k.k->p, SUBVOL_POS_MAX) > 0)
1025 * bch2_subvolume_delete() doesn't flush the btree key cache -
1026 * ideally it would but that's tricky
1028 if (bkey_deleted(k.k) &&
1029 !bch2_btree_key_cache_find(c, BTREE_ID_subvolumes, dst_iter.pos))
1037 snapshot_subvols[0] = dst_iter.pos.offset;
1038 snapshot_subvols[1] = src_subvolid;
1041 /* Creating a snapshot: */
1042 src_subvol = bch2_trans_kmalloc(trans, sizeof(*src_subvol));
1043 ret = PTR_ERR_OR_ZERO(src_subvol);
1047 bch2_trans_iter_init(trans, &src_iter, BTREE_ID_subvolumes,
1048 POS(0, src_subvolid),
1051 k = bch2_btree_iter_peek_slot(&src_iter);
1056 if (k.k->type != KEY_TYPE_subvolume) {
1057 bch_err(c, "subvolume %u not found", src_subvolid);
1062 bkey_reassemble(&src_subvol->k_i, k);
1063 parent = le32_to_cpu(src_subvol->v.snapshot);
1066 ret = bch2_snapshot_node_create(trans, parent, new_nodes,
1068 src_subvolid ? 2 : 1);
1073 src_subvol->v.snapshot = cpu_to_le32(new_nodes[1]);
1074 ret = bch2_trans_update(trans, &src_iter, &src_subvol->k_i, 0);
1079 new_subvol = bch2_trans_kmalloc(trans, sizeof(*new_subvol));
1080 ret = PTR_ERR_OR_ZERO(new_subvol);
1084 bkey_subvolume_init(&new_subvol->k_i);
1085 new_subvol->v.flags = 0;
1086 new_subvol->v.snapshot = cpu_to_le32(new_nodes[0]);
1087 new_subvol->v.inode = cpu_to_le64(inode);
1088 SET_BCH_SUBVOLUME_RO(&new_subvol->v, ro);
1089 SET_BCH_SUBVOLUME_SNAP(&new_subvol->v, src_subvolid != 0);
1090 new_subvol->k.p = dst_iter.pos;
1091 ret = bch2_trans_update(trans, &dst_iter, &new_subvol->k_i, 0);
1095 *new_subvolid = new_subvol->k.p.offset;
1096 *new_snapshotid = new_nodes[0];
1098 bch2_trans_iter_exit(trans, &src_iter);
1099 bch2_trans_iter_exit(trans, &dst_iter);
1103 int bch2_fs_subvolumes_init(struct bch_fs *c)
1105 INIT_WORK(&c->snapshot_delete_work, bch2_delete_dead_snapshots_work);
1106 INIT_WORK(&c->snapshot_wait_for_pagecache_and_delete_work,
1107 bch2_subvolume_wait_for_pagecache_and_delete);
1108 mutex_init(&c->snapshots_unlinked_lock);