*/
alloc_fifo free[RESERVE_NR];
alloc_fifo free_inc;
- spinlock_t freelist_lock;
u8 open_buckets_partial[OPEN_BUCKETS_COUNT];
unsigned open_buckets_partial_nr;
enum {
/* startup: */
BCH_FS_ALLOC_READ_DONE,
+ BCH_FS_ALLOC_CLEAN,
BCH_FS_ALLOCATOR_STARTED,
BCH_FS_ALLOCATOR_RUNNING,
+ BCH_FS_ALLOCATOR_STOPPING,
BCH_FS_INITIAL_GC_DONE,
BCH_FS_FSCK_DONE,
BCH_FS_STARTED,
} entries[0];
};
+struct journal_keys {
+ struct journal_key {
+ enum btree_id btree_id:8;
+ unsigned level:8;
+ struct bkey_i *k;
+ u32 journal_seq;
+ u32 journal_offset;
+ } *d;
+ size_t nr;
+ u64 journal_seq_base;
+};
+
struct bch_fs {
struct closure cl;
struct bio_set btree_bio;
struct btree_root btree_roots[BTREE_ID_NR];
- bool btree_roots_dirty;
struct mutex btree_root_lock;
struct btree_cache btree_cache;
- mempool_t btree_reserve_pool;
-
/*
* Cache of allocated btree nodes - if we allocate a btree node and
* don't use it, if we free it that space can't be reused until going
mempool_t btree_interior_update_pool;
struct list_head btree_interior_update_list;
+ struct list_head btree_interior_updates_unwritten;
struct mutex btree_interior_update_lock;
struct closure_waitlist btree_interior_update_wait;
+ struct workqueue_struct *btree_interior_update_worker;
+ struct work_struct btree_interior_update_work;
+
+ /* btree_iter.c: */
+ struct mutex btree_trans_lock;
+ struct list_head btree_trans_list;
mempool_t btree_iters_pool;
struct workqueue_struct *wq;
struct rhashtable promote_table;
mempool_t compression_bounce[2];
- mempool_t compress_workspace[BCH_COMPRESSION_NR];
+ mempool_t compress_workspace[BCH_COMPRESSION_TYPE_NR];
mempool_t decompress_workspace;
ZSTD_parameters zstd_params;
mempool_t btree_bounce_pool;
struct journal journal;
+ struct list_head journal_entries;
+ struct journal_keys journal_keys;
u64 last_bucket_seq_cleanup;