+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_H
#define _BCACHEFS_H
#undef pr_fmt
#define pr_fmt(fmt) "bcachefs: %s() " fmt "\n", __func__
+#include <linux/backing-dev-defs.h>
#include <linux/bug.h>
#include <linux/bio.h>
#include <linux/closure.h>
#include <linux/kobject.h>
-#include <linux/lglock.h>
#include <linux/list.h>
+#include <linux/math64.h>
#include <linux/mutex.h>
#include <linux/percpu-refcount.h>
-#include <linux/radix-tree.h>
-#include <linux/rbtree.h>
+#include <linux/percpu-rwsem.h>
#include <linux/rhashtable.h>
#include <linux/rwsem.h>
+#include <linux/semaphore.h>
#include <linux/seqlock.h>
#include <linux/shrinker.h>
+#include <linux/srcu.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <linux/zstd.h>
#include "bcachefs_format.h"
-#include "bset.h"
+#include "errcode.h"
#include "fifo.h"
#include "opts.h"
#include "util.h"
-#include <linux/dynamic_fault.h>
+#define dynamic_fault(...) 0
+#define race_fault(...) 0
-#define bch2_fs_init_fault(name) \
+#define bch2_fs_init_fault(name) \
dynamic_fault("bcachefs:bch_fs_init:" name)
#define bch2_meta_read_fault(name) \
dynamic_fault("bcachefs:meta:read:" name)
dynamic_fault("bcachefs:meta:write:" name)
#ifdef __KERNEL__
-#define bch2_fmt(_c, fmt) "bcachefs (%s): " fmt "\n", ((_c)->name)
+#define bch2_fmt(_c, fmt) "bcachefs (%s): " fmt "\n", ((_c)->name)
+#define bch2_fmt_inum(_c, _inum, fmt) "bcachefs (%s inum %llu): " fmt "\n", ((_c)->name), (_inum)
#else
-#define bch2_fmt(_c, fmt) fmt "\n"
+#define bch2_fmt(_c, fmt) "%s: " fmt "\n", ((_c)->name)
+#define bch2_fmt_inum(_c, _inum, fmt) "%s inum %llu: " fmt "\n", ((_c)->name), (_inum)
#endif
#define bch_info(c, fmt, ...) \
printk(KERN_NOTICE bch2_fmt(c, fmt), ##__VA_ARGS__)
#define bch_warn(c, fmt, ...) \
printk(KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
+#define bch_warn_ratelimited(c, fmt, ...) \
+ printk_ratelimited(KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
#define bch_err(c, fmt, ...) \
printk(KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
+#define bch_err_ratelimited(c, fmt, ...) \
+ printk_ratelimited(KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
+#define bch_err_inum_ratelimited(c, _inum, fmt, ...) \
+ printk_ratelimited(KERN_ERR bch2_fmt_inum(c, _inum, fmt), ##__VA_ARGS__)
+
#define bch_verbose(c, fmt, ...) \
do { \
- if ((c)->opts.verbose_recovery) \
+ if ((c)->opts.verbose) \
bch_info(c, fmt, ##__VA_ARGS__); \
} while (0)
#define pr_verbose_init(opts, fmt, ...) \
do { \
- if (opt_get(opts, verbose_init)) \
+ if (opt_get(opts, verbose)) \
pr_info(fmt, ##__VA_ARGS__); \
} while (0)
BCH_DEBUG_PARAM(btree_gc_rewrite_disabled, \
"Disables rewriting of btree nodes during mark and sweep")\
BCH_DEBUG_PARAM(btree_shrinker_disabled, \
- "Disables the shrinker callback for the btree node cache")
+ "Disables the shrinker callback for the btree node cache")\
+ BCH_DEBUG_PARAM(verify_btree_ondisk, \
+ "Reread btree nodes at various points to verify the " \
+ "mergesort in the read path against modifications " \
+ "done in memory") \
+ BCH_DEBUG_PARAM(verify_all_btree_replicas, \
+ "When reading btree nodes, read all replicas and " \
+ "compare them")
/* Parameters that should only be compiled in in debug mode: */
#define BCH_DEBUG_PARAMS_DEBUG() \
BCH_DEBUG_PARAM(expensive_debug_checks, \
"Enables various runtime debugging checks that " \
"significantly affect performance") \
+ BCH_DEBUG_PARAM(debug_check_iterators, \
+ "Enables extra verification for btree iterators") \
BCH_DEBUG_PARAM(debug_check_bkeys, \
"Run bkey_debugcheck (primarily checking GC/allocation "\
"information) when iterating over keys") \
- BCH_DEBUG_PARAM(verify_btree_ondisk, \
- "Reread btree nodes at various points to verify the " \
- "mergesort in the read path against modifications " \
- "done in memory") \
+ BCH_DEBUG_PARAM(debug_check_btree_accounting, \
+ "Verify btree accounting for keys within a node") \
+ BCH_DEBUG_PARAM(journal_seq_verify, \
+ "Store the journal sequence number in the version " \
+ "number of every btree key, and verify that btree " \
+ "update ordering is preserved during recovery") \
+ BCH_DEBUG_PARAM(inject_invalid_keys, \
+ "Store the journal sequence number in the version " \
+ "number of every btree key, and verify that btree " \
+ "update ordering is preserved during recovery") \
+ BCH_DEBUG_PARAM(test_alloc_startup, \
+ "Force allocator startup to use the slowpath where it" \
+ "can't find enough free buckets without invalidating" \
+ "cached data") \
+ BCH_DEBUG_PARAM(force_reconstruct_read, \
+ "Force reads to use the reconstruct path, when reading" \
+ "from erasure coded extents") \
+ BCH_DEBUG_PARAM(test_restart_gc, \
+ "Test restarting mark and sweep gc when bucket gens change")
#define BCH_DEBUG_PARAMS_ALL() BCH_DEBUG_PARAMS_ALWAYS() BCH_DEBUG_PARAMS_DEBUG()
#define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALWAYS()
#endif
-/* name, frequency_units, duration_units */
-#define BCH_TIME_STATS() \
- BCH_TIME_STAT(btree_node_mem_alloc, sec, us) \
- BCH_TIME_STAT(btree_gc, sec, ms) \
- BCH_TIME_STAT(btree_split, sec, us) \
- BCH_TIME_STAT(btree_sort, ms, us) \
- BCH_TIME_STAT(btree_read, ms, us) \
- BCH_TIME_STAT(journal_write, us, us) \
- BCH_TIME_STAT(journal_delay, ms, us) \
- BCH_TIME_STAT(journal_blocked, sec, ms) \
- BCH_TIME_STAT(journal_flush_seq, us, us)
+#define BCH_DEBUG_PARAM(name, description) extern bool bch2_##name;
+BCH_DEBUG_PARAMS()
+#undef BCH_DEBUG_PARAM
+
+#ifndef CONFIG_BCACHEFS_DEBUG
+#define BCH_DEBUG_PARAM(name, description) static const bool bch2_##name;
+BCH_DEBUG_PARAMS_DEBUG()
+#undef BCH_DEBUG_PARAM
+#endif
+
+#define BCH_TIME_STATS() \
+ x(btree_node_mem_alloc) \
+ x(btree_node_split) \
+ x(btree_node_compact) \
+ x(btree_node_merge) \
+ x(btree_node_sort) \
+ x(btree_node_read) \
+ x(btree_interior_update_foreground) \
+ x(btree_interior_update_total) \
+ x(btree_gc) \
+ x(btree_lock_contended_read) \
+ x(btree_lock_contended_intent) \
+ x(btree_lock_contended_write) \
+ x(data_write) \
+ x(data_read) \
+ x(data_promote) \
+ x(journal_flush_write) \
+ x(journal_noflush_write) \
+ x(journal_flush_seq) \
+ x(blocked_journal) \
+ x(blocked_allocate) \
+ x(blocked_allocate_open_bucket)
+
+enum bch_time_stats {
+#define x(name) BCH_TIME_##name,
+ BCH_TIME_STATS()
+#undef x
+ BCH_TIME_STAT_NR
+};
#include "alloc_types.h"
+#include "btree_types.h"
#include "buckets_types.h"
#include "clock_types.h"
+#include "ec_types.h"
#include "journal_types.h"
#include "keylist_types.h"
#include "quota_types.h"
+#include "rebalance_types.h"
+#include "replicas_types.h"
+#include "subvolume_types.h"
#include "super_types.h"
-/*
- * Number of nodes we might have to allocate in a worst case btree split
- * operation - we split all the way up to the root, then allocate a new root.
- */
-#define btree_reserve_required_nodes(depth) (((depth) + 1) * 2 + 1)
-
/* Number of nodes btree coalesce will try to coalesce at once */
#define GC_MERGE_NODES 4U
/* Maximum number of nodes we might need to allocate atomically: */
-#define BTREE_RESERVE_MAX \
- (btree_reserve_required_nodes(BTREE_MAX_DEPTH) + GC_MERGE_NODES)
+#define BTREE_RESERVE_MAX (BTREE_MAX_DEPTH + (BTREE_MAX_DEPTH - 1))
/* Size of the freelist we allocate btree nodes from: */
-#define BTREE_NODE_RESERVE (BTREE_RESERVE_MAX * 4)
+#define BTREE_NODE_RESERVE (BTREE_RESERVE_MAX * 4)
+
+#define BTREE_NODE_OPEN_BUCKET_RESERVE (BTREE_RESERVE_MAX * BCH_REPLICAS_MAX)
struct btree;
-struct crypto_blkcipher;
-struct crypto_ahash;
enum gc_phase {
- GC_PHASE_SB = BTREE_ID_NR + 1,
+ GC_PHASE_NOT_RUNNING,
+ GC_PHASE_START,
+ GC_PHASE_SB,
+
+ GC_PHASE_BTREE_stripes,
+ GC_PHASE_BTREE_extents,
+ GC_PHASE_BTREE_inodes,
+ GC_PHASE_BTREE_dirents,
+ GC_PHASE_BTREE_xattrs,
+ GC_PHASE_BTREE_alloc,
+ GC_PHASE_BTREE_quotas,
+ GC_PHASE_BTREE_reflink,
+ GC_PHASE_BTREE_subvolumes,
+ GC_PHASE_BTREE_snapshots,
+
GC_PHASE_PENDING_DELETE,
- GC_PHASE_ALLOC,
- GC_PHASE_DONE
};
struct gc_pos {
unsigned level;
};
+struct reflink_gc {
+ u64 offset;
+ u32 size;
+ u32 refcount;
+};
+
+typedef GENRADIX(struct reflink_gc) reflink_gc_table;
+
struct io_count {
u64 sectors[2][BCH_DATA_NR];
};
char name[BDEVNAME_SIZE];
struct bch_sb_handle disk_sb;
+ struct bch_sb *sb_read_scratch;
int sb_write_error;
struct bch_devs_mask self;
/*
* Buckets:
- * Per-bucket arrays are protected by c->usage_lock, bucket_lock and
+ * Per-bucket arrays are protected by c->mark_lock, bucket_lock and
* gc_lock, for device resize - holding any is sufficient for access:
* Or rcu_read_lock(), but only for ptr_stale():
*/
- struct bucket_array __rcu *buckets;
- unsigned long *buckets_dirty;
- /* most out of date gen in the btree */
- u8 *oldest_gens;
+ struct bucket_array __rcu *buckets[2];
+ struct bucket_gens *bucket_gens;
+ unsigned long *buckets_nouse;
struct rw_semaphore bucket_lock;
- struct bch_dev_usage __percpu *usage_percpu;
- struct bch_dev_usage usage_cached;
+ struct bch_dev_usage *usage_base;
+ struct bch_dev_usage __percpu *usage[JOURNAL_BUF_NR];
+ struct bch_dev_usage __percpu *usage_gc;
/* Allocator: */
- struct task_struct *alloc_thread;
+ u64 new_fs_bucket_idx;
+ struct task_struct __rcu *alloc_thread;
/*
* free: Buckets that are ready to be used
*/
alloc_fifo free[RESERVE_NR];
alloc_fifo free_inc;
- spinlock_t freelist_lock;
- size_t nr_invalidated;
+ unsigned nr_open_buckets;
- u8 open_buckets_partial[OPEN_BUCKETS_COUNT];
- unsigned open_buckets_partial_nr;
+ open_bucket_idx_t open_buckets_partial[OPEN_BUCKETS_COUNT];
+ open_bucket_idx_t open_buckets_partial_nr;
size_t fifo_last_bucket;
- /* last calculated minimum prio */
- u16 max_last_bucket_io[2];
-
- atomic_long_t saturated_count;
size_t inc_gen_needs_gc;
size_t inc_gen_really_needs_gc;
- u64 allocator_journal_seq_flush;
- bool allocator_invalidating_data;
- bool allocator_blocked;
- alloc_heap alloc_heap;
+ enum allocator_states allocator_state;
- /* Copying GC: */
- struct task_struct *copygc_thread;
- copygc_heap copygc_heap;
- struct bch_pd_controller copygc_pd;
- struct write_point copygc_write_point;
+ alloc_heap alloc_heap;
atomic64_t rebalance_work;
struct journal_device journal;
+ u64 prev_journal_sector;
struct work_struct io_error_work;
/* The rest of this all shows up in sysfs */
- atomic_t latency[2];
+ atomic64_t cur_latency[2];
+ struct time_stats io_latency[2];
+
+#define CONGESTED_MAX 1024
+ atomic_t congested;
+ u64 congested_last;
struct io_count __percpu *io_done;
};
-/*
- * Flag bits for what phase of startup/shutdown the cache set is at, how we're
- * shutting down, etc.:
- *
- * BCH_FS_UNREGISTERING means we're not just shutting down, we're detaching
- * all the backing devices first (their cached data gets invalidated, and they
- * won't automatically reattach).
- */
enum {
/* startup: */
+ BCH_FS_INITIALIZED,
BCH_FS_ALLOC_READ_DONE,
- BCH_FS_ALLOCATOR_STARTED,
+ BCH_FS_ALLOC_CLEAN,
+ BCH_FS_ALLOCATOR_RUNNING,
+ BCH_FS_ALLOCATOR_STOPPING,
BCH_FS_INITIAL_GC_DONE,
+ BCH_FS_INITIAL_GC_UNFIXED,
+ BCH_FS_TOPOLOGY_REPAIR_DONE,
BCH_FS_FSCK_DONE,
BCH_FS_STARTED,
+ BCH_FS_RW,
+ BCH_FS_WAS_RW,
/* shutdown: */
+ BCH_FS_STOPPING,
BCH_FS_EMERGENCY_RO,
BCH_FS_WRITE_DISABLE_COMPLETE,
- BCH_FS_GC_STOPPING,
/* errors: */
BCH_FS_ERROR,
- BCH_FS_GC_FAILURE,
+ BCH_FS_TOPOLOGY_ERROR,
+ BCH_FS_ERRORS_FIXED,
+ BCH_FS_ERRORS_NOT_FIXED,
/* misc: */
- BCH_FS_BDEV_MOUNTED,
- BCH_FS_FSCK_FIXED_ERRORS,
- BCH_FS_FIXED_GENS,
+ BCH_FS_NEED_ANOTHER_GC,
+ BCH_FS_DELETED_NODES,
BCH_FS_REBUILD_REPLICAS,
BCH_FS_HOLD_BTREE_WRITES,
};
struct dentry *failed;
};
-enum bch_fs_state {
- BCH_FS_STARTING = 0,
- BCH_FS_STOPPING,
- BCH_FS_RO,
- BCH_FS_RW,
+struct bch_fs_pcpu {
+ u64 sectors_available;
+};
+
+struct journal_seq_blacklist_table {
+ size_t nr;
+ struct journal_seq_blacklist_table_entry {
+ u64 start;
+ u64 end;
+ bool dirty;
+ } entries[0];
+};
+
+struct journal_keys {
+ struct journal_key {
+ enum btree_id btree_id:8;
+ unsigned level:8;
+ bool allocated;
+ bool overwritten;
+ struct bkey_i *k;
+ u32 journal_seq;
+ u32 journal_offset;
+ } *d;
+ size_t nr;
+ size_t size;
+ u64 journal_seq_base;
+};
+
+struct btree_path_buf {
+ struct btree_path *path;
};
+#define REPLICAS_DELTA_LIST_MAX (1U << 16)
+
+struct snapshot_t {
+ u32 parent;
+ u32 children[2];
+ u32 subvol; /* Nonzero only if a subvolume points to this node: */
+ u32 equiv;
+};
+
+typedef struct {
+ u32 subvol;
+ u64 inum;
+} subvol_inum;
+
+#define BCACHEFS_ROOT_SUBVOL_INUM \
+ ((subvol_inum) { BCACHEFS_ROOT_SUBVOL, BCACHEFS_ROOT_INO })
+
struct bch_fs {
struct closure cl;
int minor;
struct device *chardev;
struct super_block *vfs_sb;
+ dev_t dev;
char name[40];
- /* ro/rw, add/remove devices: */
- struct mutex state_lock;
- enum bch_fs_state state;
+ /* ro/rw, add/remove/resize devices: */
+ struct rw_semaphore state_lock;
/* Counts outstanding writes, for clean transition to read-only */
struct percpu_ref writes;
struct bch_dev __rcu *devs[BCH_SB_MEMBERS_MAX];
- struct bch_replicas_cpu __rcu *replicas;
- struct bch_replicas_cpu __rcu *replicas_gc;
+ struct bch_replicas_cpu replicas;
+ struct bch_replicas_cpu replicas_gc;
struct mutex replicas_gc_lock;
+ mempool_t replicas_delta_pool;
+
+ struct journal_entry_res btree_root_journal_res;
+ struct journal_entry_res replicas_journal_res;
+ struct journal_entry_res clock_journal_res;
+ struct journal_entry_res dev_usage_journal_res;
struct bch_disk_groups_cpu __rcu *disk_groups;
uuid_le uuid;
uuid_le user_uuid;
- u16 encoded_extent_max;
+ u16 version;
+ u16 version_min;
u8 nr_devices;
u8 clean;
u64 time_base_lo;
u32 time_base_hi;
- u32 time_precision;
+ unsigned time_units_per_sec;
+ unsigned nsec_per_time_unit;
u64 features;
+ u64 compat;
} sb;
+
struct bch_sb_handle disk_sb;
unsigned short block_bits; /* ilog2(block_size) */
struct closure sb_write;
struct mutex sb_lock;
+ /* snapshot.c: */
+ GENRADIX(struct snapshot_t) snapshots;
+ struct bch_snapshot_table __rcu *snapshot_table;
+ struct mutex snapshot_table_lock;
+ struct work_struct snapshot_delete_work;
+ struct work_struct snapshot_wait_for_pagecache_and_delete_work;
+ struct snapshot_id_list snapshots_unlinked;
+ struct mutex snapshots_unlinked_lock;
+
/* BTREE CACHE */
struct bio_set btree_bio;
+ struct workqueue_struct *io_complete_wq;
struct btree_root btree_roots[BTREE_ID_NR];
- bool btree_roots_dirty;
struct mutex btree_root_lock;
struct btree_cache btree_cache;
- mempool_t btree_reserve_pool;
-
/*
* Cache of allocated btree nodes - if we allocate a btree node and
* don't use it, if we free it that space can't be reused until going
mempool_t btree_interior_update_pool;
struct list_head btree_interior_update_list;
+ struct list_head btree_interior_updates_unwritten;
struct mutex btree_interior_update_lock;
struct closure_waitlist btree_interior_update_wait;
- struct workqueue_struct *wq;
- /* copygc needs its own workqueue for index updates.. */
- struct workqueue_struct *copygc_wq;
+ struct workqueue_struct *btree_interior_update_worker;
+ struct work_struct btree_interior_update_work;
- /* ALLOCATION */
- struct delayed_work pd_controllers_update;
- unsigned pd_controllers_update_seconds;
+ /* btree_iter.c: */
+ struct mutex btree_trans_lock;
+ struct list_head btree_trans_list;
+ mempool_t btree_paths_pool;
+ mempool_t btree_trans_mem_pool;
+ struct btree_path_buf __percpu *btree_paths_bufs;
- /* REBALANCE */
- struct task_struct *rebalance_thread;
- struct bch_pd_controller rebalance_pd;
+ struct srcu_struct btree_trans_barrier;
+ bool btree_trans_barrier_initialized;
+
+ struct btree_key_cache btree_key_cache;
- atomic64_t rebalance_work_unknown_dev;
+ struct workqueue_struct *btree_update_wq;
+ struct workqueue_struct *btree_io_complete_wq;
+ /* copygc needs its own workqueue for index updates.. */
+ struct workqueue_struct *copygc_wq;
+ /* ALLOCATION */
struct bch_devs_mask rw_devs[BCH_DATA_NR];
u64 capacity; /* sectors */
* and forces them to be revalidated
*/
u32 capacity_gen;
+ unsigned bucket_size_max;
atomic64_t sectors_available;
+ struct mutex sectors_available_lock;
- struct bch_fs_usage __percpu *usage_percpu;
- struct bch_fs_usage usage_cached;
- struct lglock usage_lock;
+ struct bch_fs_pcpu __percpu *pcpu;
- struct closure_waitlist freelist_wait;
+ struct percpu_rw_semaphore mark_lock;
- /*
- * When we invalidate buckets, we use both the priority and the amount
- * of good data to determine which buckets to reuse first - to weight
- * those together consistently we keep track of the smallest nonzero
- * priority of any bucket.
- */
- struct bucket_clock bucket_clock[2];
+ seqcount_t usage_lock;
+ struct bch_fs_usage *usage_base;
+ struct bch_fs_usage __percpu *usage[JOURNAL_BUF_NR];
+ struct bch_fs_usage __percpu *usage_gc;
+ u64 __percpu *online_reserved;
+
+ /* single element mempool: */
+ struct mutex usage_scratch_lock;
+ struct bch_fs_usage_online *usage_scratch;
struct io_clock io_clock[2];
+ /* JOURNAL SEQ BLACKLIST */
+ struct journal_seq_blacklist_table *
+ journal_seq_blacklist_table;
+
/* ALLOCATOR */
spinlock_t freelist_lock;
- u8 open_buckets_freelist;
- u8 open_buckets_nr_free;
+ struct closure_waitlist freelist_wait;
+ u64 blocked_allocate;
+ u64 blocked_allocate_open_bucket;
+
+ open_bucket_idx_t open_buckets_freelist;
+ open_bucket_idx_t open_buckets_nr_free;
struct closure_waitlist open_buckets_wait;
struct open_bucket open_buckets[OPEN_BUCKETS_COUNT];
+ open_bucket_idx_t open_buckets_hash[OPEN_BUCKETS_COUNT];
struct write_point btree_write_point;
struct write_point rebalance_write_point;
- struct write_point write_points[WRITE_POINT_COUNT];
- struct hlist_head write_points_hash[WRITE_POINT_COUNT];
+ struct write_point write_points[WRITE_POINT_MAX];
+ struct hlist_head write_points_hash[WRITE_POINT_HASH_NR];
struct mutex write_points_hash_lock;
+ unsigned write_points_nr;
/* GARBAGE COLLECTION */
struct task_struct *gc_thread;
atomic_t kick_gc;
unsigned long gc_count;
+ enum btree_id gc_gens_btree;
+ struct bpos gc_gens_pos;
+
/*
* Tracks GC's progress - everything in the range [ZERO_KEY..gc_cur_pos]
* has been marked by GC.
*
- * gc_cur_phase is a superset of btree_ids (BTREE_ID_EXTENTS etc.)
- *
- * gc_cur_phase == GC_PHASE_DONE indicates that gc is finished/not
- * currently running, and gc marks are currently valid
+ * gc_cur_phase is a superset of btree_ids (BTREE_ID_extents etc.)
*
* Protected by gc_pos_lock. Only written to by GC thread, so GC thread
* can read without a lock.
struct rw_semaphore gc_lock;
/* IO PATH */
+ struct semaphore io_in_flight;
struct bio_set bio_read;
struct bio_set bio_read_split;
struct bio_set bio_write;
struct mutex bio_bounce_pages_lock;
mempool_t bio_bounce_pages;
+ struct rhashtable promote_table;
mempool_t compression_bounce[2];
- mempool_t compress_workspace[BCH_COMPRESSION_NR];
+ mempool_t compress_workspace[BCH_COMPRESSION_TYPE_NR];
mempool_t decompress_workspace;
ZSTD_parameters zstd_params;
struct crypto_shash *sha256;
- struct crypto_skcipher *chacha20;
+ struct crypto_sync_skcipher *chacha20;
struct crypto_shash *poly1305;
atomic64_t key_version;
+ mempool_t large_bkey_pool;
+
+ /* REBALANCE */
+ struct bch_fs_rebalance rebalance;
+
+ /* COPYGC */
+ struct task_struct *copygc_thread;
+ copygc_heap copygc_heap;
+ struct write_point copygc_write_point;
+ s64 copygc_wait;
+
+ /* DATA PROGRESS STATS */
+ struct list_head data_progress_list;
+ struct mutex data_progress_lock;
+
+ /* STRIPES: */
+ GENRADIX(struct stripe) stripes;
+ GENRADIX(struct gc_stripe) gc_stripes;
+
+ ec_stripes_heap ec_stripes_heap;
+ spinlock_t ec_stripes_heap_lock;
+
+ /* ERASURE CODING */
+ struct list_head ec_stripe_head_list;
+ struct mutex ec_stripe_head_lock;
+
+ struct list_head ec_stripe_new_list;
+ struct mutex ec_stripe_new_lock;
+
+ struct work_struct ec_stripe_create_work;
+ u64 ec_stripe_hint;
+
+ struct bio_set ec_bioset;
+
+ struct work_struct ec_stripe_delete_work;
+ struct llist_head ec_stripe_delete_list;
+
+ /* REFLINK */
+ u64 reflink_hint;
+ reflink_gc_table reflink_gc_table;
+ size_t reflink_gc_nr;
+
/* VFS IO PATH - fs-io.c */
struct bio_set writepage_bioset;
struct bio_set dio_write_bioset;
struct bio_set dio_read_bioset;
- struct bio_list btree_write_error_list;
- struct work_struct btree_write_error_work;
+
+ atomic64_t btree_writes_nr;
+ atomic64_t btree_writes_sectors;
spinlock_t btree_write_error_lock;
/* ERRORS */
struct mutex fsck_error_lock;
bool fsck_alloc_err;
- /* FILESYSTEM */
- atomic_long_t nr_inodes;
-
/* QUOTAS */
struct bch_memquota_type quotas[QTYP_NR];
/* DEBUG JUNK */
struct dentry *debug;
struct btree_debug btree_debug[BTREE_ID_NR];
-#ifdef CONFIG_BCACHEFS_DEBUG
struct btree *verify_data;
struct btree_node *verify_ondisk;
struct mutex verify_lock;
-#endif
- u64 unused_inode_hint;
+ u64 *unused_inode_hints;
+ unsigned inode_shard_bits;
/*
* A btree node on disk could have too many bsets for an iterator to fit
mempool_t btree_bounce_pool;
struct journal journal;
+ struct list_head journal_entries;
+ struct journal_keys journal_keys;
+ struct list_head journal_iters;
- unsigned bucket_journal_seq;
+ u64 last_bucket_seq_cleanup;
/* The rest of this all shows up in sysfs */
atomic_long_t read_realloc_races;
unsigned btree_gc_periodic:1;
unsigned copy_gc_enabled:1;
- unsigned rebalance_enabled:1;
- unsigned rebalance_percent;
-
-#define BCH_DEBUG_PARAM(name, description) bool name;
- BCH_DEBUG_PARAMS_ALL()
-#undef BCH_DEBUG_PARAM
+ bool promote_whole_extents;
-#define BCH_TIME_STAT(name, frequency_units, duration_units) \
- struct time_stats name##_time;
- BCH_TIME_STATS()
-#undef BCH_TIME_STAT
+ struct time_stats times[BCH_TIME_STAT_NR];
};
static inline void bch2_set_ra_pages(struct bch_fs *c, unsigned ra_pages)
#endif
}
-static inline bool bch2_fs_running(struct bch_fs *c)
-{
- return c->state == BCH_FS_RO || c->state == BCH_FS_RW;
-}
-
static inline unsigned bucket_bytes(const struct bch_dev *ca)
{
return ca->mi.bucket_size << 9;
static inline unsigned block_bytes(const struct bch_fs *c)
{
- return c->opts.block_size << 9;
+ return c->opts.block_size;
+}
+
+static inline unsigned block_sectors(const struct bch_fs *c)
+{
+ return c->opts.block_size >> 9;
+}
+
+static inline size_t btree_sectors(const struct bch_fs *c)
+{
+ return c->opts.btree_node_size >> 9;
+}
+
+static inline struct timespec64 bch2_time_to_timespec(const struct bch_fs *c, s64 time)
+{
+ struct timespec64 t;
+ s32 rem;
+
+ time += c->sb.time_base_lo;
+
+ t.tv_sec = div_s64_rem(time, c->sb.time_units_per_sec, &rem);
+ t.tv_nsec = rem * c->sb.nsec_per_time_unit;
+ return t;
+}
+
+static inline s64 timespec_to_bch2_time(const struct bch_fs *c, struct timespec64 ts)
+{
+ return (ts.tv_sec * c->sb.time_units_per_sec +
+ (int) ts.tv_nsec / c->sb.nsec_per_time_unit) - c->sb.time_base_lo;
+}
+
+static inline s64 bch2_current_time(const struct bch_fs *c)
+{
+ struct timespec64 now;
+
+ ktime_get_coarse_real_ts64(&now);
+ return timespec_to_bch2_time(c, now);
+}
+
+static inline bool bch2_dev_exists2(const struct bch_fs *c, unsigned dev)
+{
+ return dev < c->sb.nr_devices && c->devs[dev];
}
#endif /* _BCACHEFS_H */