]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/bcachefs.h
Update bcachefs sources to c9b4a210f9 fixup! bcachefs: Fixes for going RO
[bcachefs-tools-debian] / libbcachefs / bcachefs.h
index 09afbed9511f09c8aff91663abf1fa978e70352e..72d8ef77907b0af8696415e23f74e49a319949d2 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _BCACHEFS_H
 #define _BCACHEFS_H
 
@@ -282,9 +283,7 @@ do {                                                                        \
                "Force reads to use the reconstruct path, when reading" \
                "from erasure coded extents")                           \
        BCH_DEBUG_PARAM(test_restart_gc,                                \
-               "Test restarting mark and sweep gc when bucket gens change")\
-       BCH_DEBUG_PARAM(test_reconstruct_alloc,                         \
-               "Test reconstructing the alloc btree")
+               "Test restarting mark and sweep gc when bucket gens change")
 
 #define BCH_DEBUG_PARAMS_ALL() BCH_DEBUG_PARAMS_ALWAYS() BCH_DEBUG_PARAMS_DEBUG()
 
@@ -300,7 +299,6 @@ do {                                                                        \
        x(btree_node_sort)                      \
        x(btree_node_read)                      \
        x(btree_gc)                             \
-       x(btree_update)                         \
        x(btree_lock_contended_read)            \
        x(btree_lock_contended_intent)          \
        x(btree_lock_contended_write)           \
@@ -358,6 +356,7 @@ enum gc_phase {
        GC_PHASE_BTREE_XATTRS,
        GC_PHASE_BTREE_ALLOC,
        GC_PHASE_BTREE_QUOTAS,
+       GC_PHASE_BTREE_REFLINK,
 
        GC_PHASE_PENDING_DELETE,
        GC_PHASE_ALLOC,
@@ -408,7 +407,6 @@ struct bch_dev {
         */
        struct bucket_array __rcu *buckets[2];
        unsigned long           *buckets_nouse;
-       unsigned long           *buckets_written;
        struct rw_semaphore     bucket_lock;
 
        struct bch_dev_usage __percpu *usage[2];
@@ -427,7 +425,6 @@ struct bch_dev {
         */
        alloc_fifo              free[RESERVE_NR];
        alloc_fifo              free_inc;
-       spinlock_t              freelist_lock;
 
        u8                      open_buckets_partial[OPEN_BUCKETS_COUNT];
        unsigned                open_buckets_partial_nr;
@@ -480,8 +477,10 @@ struct bch_dev {
 enum {
        /* startup: */
        BCH_FS_ALLOC_READ_DONE,
+       BCH_FS_ALLOC_CLEAN,
        BCH_FS_ALLOCATOR_STARTED,
        BCH_FS_ALLOCATOR_RUNNING,
+       BCH_FS_ALLOCATOR_STOPPING,
        BCH_FS_INITIAL_GC_DONE,
        BCH_FS_FSCK_DONE,
        BCH_FS_STARTED,
@@ -499,6 +498,7 @@ enum {
        /* misc: */
        BCH_FS_BDEV_MOUNTED,
        BCH_FS_FIXED_GENS,
+       BCH_FS_ALLOC_WRITTEN,
        BCH_FS_REBUILD_REPLICAS,
        BCH_FS_HOLD_BTREE_WRITES,
 };
@@ -523,6 +523,18 @@ struct journal_seq_blacklist_table {
        }                       entries[0];
 };
 
+struct journal_keys {
+       struct journal_key {
+               enum btree_id   btree_id:8;
+               unsigned        level:8;
+               struct bkey_i   *k;
+               u32             journal_seq;
+               u32             journal_offset;
+       }                       *d;
+       size_t                  nr;
+       u64                     journal_seq_base;
+};
+
 struct bch_fs {
        struct closure          cl;
 
@@ -590,13 +602,10 @@ struct bch_fs {
        struct bio_set          btree_bio;
 
        struct btree_root       btree_roots[BTREE_ID_NR];
-       bool                    btree_roots_dirty;
        struct mutex            btree_root_lock;
 
        struct btree_cache      btree_cache;
 
-       mempool_t               btree_reserve_pool;
-
        /*
         * Cache of allocated btree nodes - if we allocate a btree node and
         * don't use it, if we free it that space can't be reused until going
@@ -610,9 +619,16 @@ struct bch_fs {
 
        mempool_t               btree_interior_update_pool;
        struct list_head        btree_interior_update_list;
+       struct list_head        btree_interior_updates_unwritten;
        struct mutex            btree_interior_update_lock;
        struct closure_waitlist btree_interior_update_wait;
 
+       struct workqueue_struct *btree_interior_update_worker;
+       struct work_struct      btree_interior_update_work;
+
+       /* btree_iter.c: */
+       struct mutex            btree_trans_lock;
+       struct list_head        btree_trans_list;
        mempool_t               btree_iters_pool;
 
        struct workqueue_struct *wq;
@@ -716,7 +732,7 @@ struct bch_fs {
        struct rhashtable       promote_table;
 
        mempool_t               compression_bounce[2];
-       mempool_t               compress_workspace[BCH_COMPRESSION_NR];
+       mempool_t               compress_workspace[BCH_COMPRESSION_TYPE_NR];
        mempool_t               decompress_workspace;
        ZSTD_parameters         zstd_params;
 
@@ -726,6 +742,8 @@ struct bch_fs {
 
        atomic64_t              key_version;
 
+       mempool_t               large_bkey_pool;
+
        /* REBALANCE */
        struct bch_fs_rebalance rebalance;
 
@@ -739,12 +757,16 @@ struct bch_fs {
        /* ERASURE CODING */
        struct list_head        ec_new_stripe_list;
        struct mutex            ec_new_stripe_lock;
+       u64                     ec_stripe_hint;
 
        struct bio_set          ec_bioset;
 
        struct work_struct      ec_stripe_delete_work;
        struct llist_head       ec_stripe_delete_list;
 
+       /* REFLINK */
+       u64                     reflink_hint;
+
        /* VFS IO PATH - fs-io.c */
        struct bio_set          writepage_bioset;
        struct bio_set          dio_write_bioset;
@@ -782,6 +804,8 @@ struct bch_fs {
        mempool_t               btree_bounce_pool;
 
        struct journal          journal;
+       struct list_head        journal_entries;
+       struct journal_keys     journal_keys;
 
        u64                     last_bucket_seq_cleanup;