]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/super.c
Update bcachefs sources to 386f00b639 bcachefs: Snapshot creation, deletion
[bcachefs-tools-debian] / libbcachefs / super.c
index 2a570eb0763ee32385acba222ae0c6ee7ac93443..1feb7dee2e0c1ca744683cb9158f8596fdcb4f32 100644 (file)
@@ -39,6 +39,7 @@
 #include "rebalance.h"
 #include "recovery.h"
 #include "replicas.h"
+#include "subvolume.h"
 #include "super.h"
 #include "super-io.h"
 #include "sysfs.h"
@@ -269,7 +270,7 @@ static void bch2_writes_disabled(struct percpu_ref *writes)
 void bch2_fs_read_only(struct bch_fs *c)
 {
        if (!test_bit(BCH_FS_RW, &c->flags)) {
-               BUG_ON(c->journal.reclaim_thread);
+               bch2_journal_reclaim_stop(&c->journal);
                return;
        }
 
@@ -431,12 +432,6 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
        for_each_rw_member(ca, c, i)
                bch2_wake_allocator(ca);
 
-       ret = bch2_journal_reclaim_start(&c->journal);
-       if (ret) {
-               bch_err(c, "error starting journal reclaim: %i", ret);
-               return ret;
-       }
-
        if (!early) {
                ret = bch2_fs_read_write_late(c);
                if (ret)
@@ -474,6 +469,7 @@ static void __bch2_fs_free(struct bch_fs *c)
        for (i = 0; i < BCH_TIME_STAT_NR; i++)
                bch2_time_stats_exit(&c->times[i]);
 
+       bch2_fs_snapshots_exit(c);
        bch2_fs_quota_exit(c);
        bch2_fs_fsio_exit(c);
        bch2_fs_ec_exit(c);
@@ -492,12 +488,12 @@ static void __bch2_fs_free(struct bch_fs *c)
        bch2_journal_entries_free(&c->journal_entries);
        percpu_free_rwsem(&c->mark_lock);
 
-       if (c->btree_iters_bufs)
+       if (c->btree_paths_bufs)
                for_each_possible_cpu(cpu)
-                       kfree(per_cpu_ptr(c->btree_iters_bufs, cpu)->iter);
+                       kfree(per_cpu_ptr(c->btree_paths_bufs, cpu)->path);
 
        free_percpu(c->online_reserved);
-       free_percpu(c->btree_iters_bufs);
+       free_percpu(c->btree_paths_bufs);
        free_percpu(c->pcpu);
        mempool_exit(&c->large_bkey_pool);
        mempool_exit(&c->btree_bounce_pool);
@@ -513,8 +509,8 @@ static void __bch2_fs_free(struct bch_fs *c)
                destroy_workqueue(c->io_complete_wq );
        if (c->copygc_wq)
                destroy_workqueue(c->copygc_wq);
-       if (c->btree_error_wq)
-               destroy_workqueue(c->btree_error_wq);
+       if (c->btree_io_complete_wq)
+               destroy_workqueue(c->btree_io_complete_wq);
        if (c->btree_update_wq)
                destroy_workqueue(c->btree_update_wq);
 
@@ -566,7 +562,6 @@ void __bch2_fs_stop(struct bch_fs *c)
        for_each_member_device(ca, c, i)
                cancel_work_sync(&ca->io_error_work);
 
-       cancel_work_sync(&c->btree_write_error_work);
        cancel_work_sync(&c->read_only_work);
 
        for (i = 0; i < c->sb.nr_devices; i++)
@@ -693,10 +688,9 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
        mutex_init(&c->usage_scratch_lock);
 
        mutex_init(&c->bio_bounce_pages_lock);
+       mutex_init(&c->snapshot_table_lock);
 
-       bio_list_init(&c->btree_write_error_list);
        spin_lock_init(&c->btree_write_error_lock);
-       INIT_WORK(&c->btree_write_error_work, bch2_btree_write_error_work);
 
        INIT_WORK(&c->journal_seq_blacklist_gc_work,
                  bch2_blacklist_entries_gc);
@@ -713,6 +707,9 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
        INIT_LIST_HEAD(&c->ec_stripe_new_list);
        mutex_init(&c->ec_stripe_new_lock);
 
+       INIT_LIST_HEAD(&c->data_progress_list);
+       mutex_init(&c->data_progress_lock);
+
        spin_lock_init(&c->ec_stripes_heap_lock);
 
        seqcount_init(&c->gc_pos_lock);
@@ -766,7 +763,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
 
        if (!(c->btree_update_wq = alloc_workqueue("bcachefs",
                                WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
-           !(c->btree_error_wq = alloc_workqueue("bcachefs_error",
+           !(c->btree_io_complete_wq = alloc_workqueue("bcachefs_btree_io",
                                WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
            !(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
                                WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
@@ -780,7 +777,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
                            offsetof(struct btree_write_bio, wbio.bio)),
                        BIOSET_NEED_BVECS) ||
            !(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
-           !(c->btree_iters_bufs = alloc_percpu(struct btree_iter_buf)) ||
+           !(c->btree_paths_bufs = alloc_percpu(struct btree_path_buf)) ||
            !(c->online_reserved = alloc_percpu(u64)) ||
            mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
                                        btree_bytes(c)) ||
@@ -795,6 +792,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
            bch2_fs_btree_key_cache_init(&c->btree_key_cache) ||
            bch2_fs_btree_iter_init(c) ||
            bch2_fs_btree_interior_update_init(c) ||
+           bch2_fs_subvolumes_init(c) ||
            bch2_fs_io_init(c) ||
            bch2_fs_encryption_init(c) ||
            bch2_fs_compress_init(c) ||