1 /* SPDX-License-Identifier: GPL-2.0 */
6 * SOME HIGH LEVEL CODE DOCUMENTATION:
8 * Bcache mostly works with cache sets, cache devices, and backing devices.
10 * Support for multiple cache devices hasn't quite been finished off yet, but
11 * it's about 95% plumbed through. A cache set and its cache devices is sort of
12 * like a md raid array and its component devices. Most of the code doesn't care
13 * about individual cache devices, the main abstraction is the cache set.
15 * Multiple cache devices is intended to give us the ability to mirror dirty
16 * cached data and metadata, without mirroring clean cached data.
18 * Backing devices are different, in that they have a lifetime independent of a
19 * cache set. When you register a newly formatted backing device it'll come up
20 * in passthrough mode, and then you can attach and detach a backing device from
21 * a cache set at runtime - while it's mounted and in use. Detaching implicitly
22 * invalidates any cached data for that backing device.
24 * A cache set can have multiple (many) backing devices attached to it.
26 * There's also flash only volumes - this is the reason for the distinction
27 * between struct cached_dev and struct bcache_device. A flash only volume
28 * works much like a bcache device that has a backing device, except the
29 * "cached" data is always dirty. The end result is that we get thin
30 * provisioning with very little additional code.
32 * Flash only volumes work but they're not production ready because the moving
33 * garbage collector needs more work. More on that later.
37 * Bcache is primarily designed for caching, which means that in normal
38 * operation all of our available space will be allocated. Thus, we need an
39 * efficient way of deleting things from the cache so we can write new things to
42 * To do this, we first divide the cache device up into buckets. A bucket is the
43 * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
46 * Each bucket has a 16 bit priority, and an 8 bit generation associated with
47 * it. The gens and priorities for all the buckets are stored contiguously and
48 * packed on disk (in a linked list of buckets - aside from the superblock, all
49 * of bcache's metadata is stored in buckets).
51 * The priority is used to implement an LRU. We reset a bucket's priority when
52 * we allocate it or on cache it, and every so often we decrement the priority
53 * of each bucket. It could be used to implement something more sophisticated,
54 * if anyone ever gets around to it.
56 * The generation is used for invalidating buckets. Each pointer also has an 8
57 * bit generation embedded in it; for a pointer to be considered valid, its gen
58 * must match the gen of the bucket it points into. Thus, to reuse a bucket all
59 * we have to do is increment its gen (and write its new gen to disk; we batch
62 * Bcache is entirely COW - we never write twice to a bucket, even buckets that
63 * contain metadata (including btree nodes).
67 * Bcache is in large part design around the btree.
69 * At a high level, the btree is just an index of key -> ptr tuples.
71 * Keys represent extents, and thus have a size field. Keys also have a variable
72 * number of pointers attached to them (potentially zero, which is handy for
73 * invalidating the cache).
75 * The key itself is an inode:offset pair. The inode number corresponds to a
76 * backing device or a flash only volume. The offset is the ending offset of the
77 * extent within the inode - not the starting offset; this makes lookups
78 * slightly more convenient.
80 * Pointers contain the cache device id, the offset on that device, and an 8 bit
81 * generation number. More on the gen later.
83 * Index lookups are not fully abstracted - cache lookups in particular are
84 * still somewhat mixed in with the btree code, but things are headed in that
87 * Updates are fairly well abstracted, though. There are two different ways of
88 * updating the btree; insert and replace.
90 * BTREE_INSERT will just take a list of keys and insert them into the btree -
91 * overwriting (possibly only partially) any extents they overlap with. This is
92 * used to update the index after a write.
94 * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
95 * overwriting a key that matches another given key. This is used for inserting
96 * data into the cache after a cache miss, and for background writeback, and for
97 * the moving garbage collector.
99 * There is no "delete" operation; deleting things from the index is
100 * accomplished by either by invalidating pointers (by incrementing a bucket's
101 * gen) or by inserting a key with 0 pointers - which will overwrite anything
102 * previously present at that location in the index.
104 * This means that there are always stale/invalid keys in the btree. They're
105 * filtered out by the code that iterates through a btree node, and removed when
106 * a btree node is rewritten.
110 * Our unit of allocation is a bucket, and we can't arbitrarily allocate and
111 * free smaller than a bucket - so, that's how big our btree nodes are.
113 * (If buckets are really big we'll only use part of the bucket for a btree node
114 * - no less than 1/4th - but a bucket still contains no more than a single
115 * btree node. I'd actually like to change this, but for now we rely on the
116 * bucket's gen for deleting btree nodes when we rewrite/split a node.)
118 * Anyways, btree nodes are big - big enough to be inefficient with a textbook
119 * btree implementation.
121 * The way this is solved is that btree nodes are internally log structured; we
122 * can append new keys to an existing btree node without rewriting it. This
123 * means each set of keys we write is sorted, but the node is not.
125 * We maintain this log structure in memory - keeping 1Mb of keys sorted would
126 * be expensive, and we have to distinguish between the keys we have written and
127 * the keys we haven't. So to do a lookup in a btree node, we have to search
128 * each sorted set. But we do merge written sets together lazily, so the cost of
129 * these extra searches is quite low (normally most of the keys in a btree node
130 * will be in one big set, and then there'll be one or two sets that are much
133 * This log structure makes bcache's btree more of a hybrid between a
134 * conventional btree and a compacting data structure, with some of the
135 * advantages of both.
137 * GARBAGE COLLECTION:
139 * We can't just invalidate any bucket - it might contain dirty data or
140 * metadata. If it once contained dirty data, other writes might overwrite it
141 * later, leaving no valid pointers into that bucket in the index.
143 * Thus, the primary purpose of garbage collection is to find buckets to reuse.
144 * It also counts how much valid data it each bucket currently contains, so that
145 * allocation can reuse buckets sooner when they've been mostly overwritten.
147 * It also does some things that are really internal to the btree
148 * implementation. If a btree node contains pointers that are stale by more than
149 * some threshold, it rewrites the btree node to avoid the bucket's generation
150 * wrapping around. It also merges adjacent btree nodes if they're empty enough.
154 * Bcache's journal is not necessary for consistency; we always strictly
155 * order metadata writes so that the btree and everything else is consistent on
156 * disk in the event of an unclean shutdown, and in fact bcache had writeback
157 * caching (with recovery from unclean shutdown) before journalling was
160 * Rather, the journal is purely a performance optimization; we can't complete a
161 * write until we've updated the index on disk, otherwise the cache would be
162 * inconsistent in the event of an unclean shutdown. This means that without the
163 * journal, on random write workloads we constantly have to update all the leaf
164 * nodes in the btree, and those writes will be mostly empty (appending at most
165 * a few keys each) - highly inefficient in terms of amount of metadata writes,
166 * and it puts more strain on the various btree resorting/compacting code.
168 * The journal is just a log of keys we've inserted; on startup we just reinsert
169 * all the keys in the open journal entries. That means that when we're updating
170 * a node in the btree, we can wait until a 4k block of keys fills up before
173 * For simplicity, we only journal updates to leaf nodes; updates to parent
174 * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
175 * the complexity to deal with journalling them (in particular, journal replay)
176 * - updates to non leaf nodes just happen synchronously (see btree_split()).
181 #define pr_fmt(fmt) "bcachefs: %s() " fmt "\n", __func__
183 #define pr_fmt(fmt) "%s() " fmt "\n", __func__
186 #include <linux/backing-dev-defs.h>
187 #include <linux/bug.h>
188 #include <linux/bio.h>
189 #include <linux/closure.h>
190 #include <linux/kobject.h>
191 #include <linux/list.h>
192 #include <linux/math64.h>
193 #include <linux/mutex.h>
194 #include <linux/percpu-refcount.h>
195 #include <linux/percpu-rwsem.h>
196 #include <linux/rhashtable.h>
197 #include <linux/rwsem.h>
198 #include <linux/semaphore.h>
199 #include <linux/seqlock.h>
200 #include <linux/shrinker.h>
201 #include <linux/srcu.h>
202 #include <linux/types.h>
203 #include <linux/workqueue.h>
204 #include <linux/zstd.h>
206 #include "bcachefs_format.h"
209 #include "nocow_locking_types.h"
213 #ifdef CONFIG_BCACHEFS_DEBUG
214 #define BCH_WRITE_REF_DEBUG
217 #define dynamic_fault(...) 0
218 #define race_fault(...) 0
220 #define trace_and_count(_c, _name, ...) \
222 this_cpu_inc((_c)->counters[BCH_COUNTER_##_name]); \
223 trace_##_name(__VA_ARGS__); \
226 #define bch2_fs_init_fault(name) \
227 dynamic_fault("bcachefs:bch_fs_init:" name)
228 #define bch2_meta_read_fault(name) \
229 dynamic_fault("bcachefs:meta:read:" name)
230 #define bch2_meta_write_fault(name) \
231 dynamic_fault("bcachefs:meta:write:" name)
234 #define BCACHEFS_LOG_PREFIX
237 #ifdef BCACHEFS_LOG_PREFIX
239 #define bch2_log_msg(_c, fmt) "bcachefs (%s): " fmt, ((_c)->name)
240 #define bch2_fmt_dev(_ca, fmt) "bcachefs (%s): " fmt "\n", ((_ca)->name)
241 #define bch2_fmt_dev_offset(_ca, _offset, fmt) "bcachefs (%s sector %llu): " fmt "\n", ((_ca)->name), (_offset)
242 #define bch2_fmt_inum(_c, _inum, fmt) "bcachefs (%s inum %llu): " fmt "\n", ((_c)->name), (_inum)
243 #define bch2_fmt_inum_offset(_c, _inum, _offset, fmt) \
244 "bcachefs (%s inum %llu offset %llu): " fmt "\n", ((_c)->name), (_inum), (_offset)
248 #define bch2_log_msg(_c, fmt) fmt
249 #define bch2_fmt_dev(_ca, fmt) "%s: " fmt "\n", ((_ca)->name)
250 #define bch2_fmt_dev_offset(_ca, _offset, fmt) "%s sector %llu: " fmt "\n", ((_ca)->name), (_offset)
251 #define bch2_fmt_inum(_c, _inum, fmt) "inum %llu: " fmt "\n", (_inum)
252 #define bch2_fmt_inum_offset(_c, _inum, _offset, fmt) \
253 "inum %llu offset %llu: " fmt "\n", (_inum), (_offset)
257 #define bch2_fmt(_c, fmt) bch2_log_msg(_c, fmt "\n")
259 #define bch_info(c, fmt, ...) \
260 printk(KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__)
261 #define bch_notice(c, fmt, ...) \
262 printk(KERN_NOTICE bch2_fmt(c, fmt), ##__VA_ARGS__)
263 #define bch_warn(c, fmt, ...) \
264 printk(KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
265 #define bch_warn_ratelimited(c, fmt, ...) \
266 printk_ratelimited(KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
268 #define bch_err(c, fmt, ...) \
269 printk(KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
270 #define bch_err_dev(ca, fmt, ...) \
271 printk(KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
272 #define bch_err_dev_offset(ca, _offset, fmt, ...) \
273 printk(KERN_ERR bch2_fmt_dev_offset(ca, _offset, fmt), ##__VA_ARGS__)
274 #define bch_err_inum(c, _inum, fmt, ...) \
275 printk(KERN_ERR bch2_fmt_inum(c, _inum, fmt), ##__VA_ARGS__)
276 #define bch_err_inum_offset(c, _inum, _offset, fmt, ...) \
277 printk(KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
279 #define bch_err_ratelimited(c, fmt, ...) \
280 printk_ratelimited(KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
281 #define bch_err_dev_ratelimited(ca, fmt, ...) \
282 printk_ratelimited(KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
283 #define bch_err_dev_offset_ratelimited(ca, _offset, fmt, ...) \
284 printk_ratelimited(KERN_ERR bch2_fmt_dev_offset(ca, _offset, fmt), ##__VA_ARGS__)
285 #define bch_err_inum_ratelimited(c, _inum, fmt, ...) \
286 printk_ratelimited(KERN_ERR bch2_fmt_inum(c, _inum, fmt), ##__VA_ARGS__)
287 #define bch_err_inum_offset_ratelimited(c, _inum, _offset, fmt, ...) \
288 printk_ratelimited(KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
290 #define bch_verbose(c, fmt, ...) \
292 if ((c)->opts.verbose) \
293 bch_info(c, fmt, ##__VA_ARGS__); \
296 #define pr_verbose_init(opts, fmt, ...) \
298 if (opt_get(opts, verbose)) \
299 pr_info(fmt, ##__VA_ARGS__); \
302 /* Parameters that are useful for debugging, but should always be compiled in: */
303 #define BCH_DEBUG_PARAMS_ALWAYS() \
304 BCH_DEBUG_PARAM(key_merging_disabled, \
305 "Disables merging of extents") \
306 BCH_DEBUG_PARAM(btree_gc_always_rewrite, \
307 "Causes mark and sweep to compact and rewrite every " \
308 "btree node it traverses") \
309 BCH_DEBUG_PARAM(btree_gc_rewrite_disabled, \
310 "Disables rewriting of btree nodes during mark and sweep")\
311 BCH_DEBUG_PARAM(btree_shrinker_disabled, \
312 "Disables the shrinker callback for the btree node cache")\
313 BCH_DEBUG_PARAM(verify_btree_ondisk, \
314 "Reread btree nodes at various points to verify the " \
315 "mergesort in the read path against modifications " \
317 BCH_DEBUG_PARAM(verify_all_btree_replicas, \
318 "When reading btree nodes, read all replicas and " \
320 BCH_DEBUG_PARAM(backpointers_no_use_write_buffer, \
321 "Don't use the write buffer for backpointers, enabling "\
322 "extra runtime checks")
324 /* Parameters that should only be compiled in debug mode: */
325 #define BCH_DEBUG_PARAMS_DEBUG() \
326 BCH_DEBUG_PARAM(expensive_debug_checks, \
327 "Enables various runtime debugging checks that " \
328 "significantly affect performance") \
329 BCH_DEBUG_PARAM(debug_check_iterators, \
330 "Enables extra verification for btree iterators") \
331 BCH_DEBUG_PARAM(debug_check_btree_accounting, \
332 "Verify btree accounting for keys within a node") \
333 BCH_DEBUG_PARAM(journal_seq_verify, \
334 "Store the journal sequence number in the version " \
335 "number of every btree key, and verify that btree " \
336 "update ordering is preserved during recovery") \
337 BCH_DEBUG_PARAM(inject_invalid_keys, \
338 "Store the journal sequence number in the version " \
339 "number of every btree key, and verify that btree " \
340 "update ordering is preserved during recovery") \
341 BCH_DEBUG_PARAM(test_alloc_startup, \
342 "Force allocator startup to use the slowpath where it" \
343 "can't find enough free buckets without invalidating" \
345 BCH_DEBUG_PARAM(force_reconstruct_read, \
346 "Force reads to use the reconstruct path, when reading" \
347 "from erasure coded extents") \
348 BCH_DEBUG_PARAM(test_restart_gc, \
349 "Test restarting mark and sweep gc when bucket gens change")
351 #define BCH_DEBUG_PARAMS_ALL() BCH_DEBUG_PARAMS_ALWAYS() BCH_DEBUG_PARAMS_DEBUG()
353 #ifdef CONFIG_BCACHEFS_DEBUG
354 #define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALL()
356 #define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALWAYS()
359 #define BCH_DEBUG_PARAM(name, description) extern bool bch2_##name;
361 #undef BCH_DEBUG_PARAM
363 #ifndef CONFIG_BCACHEFS_DEBUG
364 #define BCH_DEBUG_PARAM(name, description) static const bool bch2_##name;
365 BCH_DEBUG_PARAMS_DEBUG()
366 #undef BCH_DEBUG_PARAM
369 #define BCH_TIME_STATS() \
370 x(btree_node_mem_alloc) \
371 x(btree_node_split) \
372 x(btree_node_compact) \
373 x(btree_node_merge) \
376 x(btree_interior_update_foreground) \
377 x(btree_interior_update_total) \
382 x(journal_flush_write) \
383 x(journal_noflush_write) \
384 x(journal_flush_seq) \
386 x(blocked_allocate) \
387 x(blocked_allocate_open_bucket) \
388 x(nocow_lock_contended)
390 enum bch_time_stats {
391 #define x(name) BCH_TIME_##name,
397 #include "alloc_types.h"
398 #include "btree_types.h"
399 #include "btree_write_buffer_types.h"
400 #include "buckets_types.h"
401 #include "buckets_waiting_for_journal_types.h"
402 #include "clock_types.h"
403 #include "ec_types.h"
404 #include "journal_types.h"
405 #include "keylist_types.h"
406 #include "quota_types.h"
407 #include "rebalance_types.h"
408 #include "replicas_types.h"
409 #include "subvolume_types.h"
410 #include "super_types.h"
412 /* Number of nodes btree coalesce will try to coalesce at once */
413 #define GC_MERGE_NODES 4U
415 /* Maximum number of nodes we might need to allocate atomically: */
416 #define BTREE_RESERVE_MAX (BTREE_MAX_DEPTH + (BTREE_MAX_DEPTH - 1))
418 /* Size of the freelist we allocate btree nodes from: */
419 #define BTREE_NODE_RESERVE (BTREE_RESERVE_MAX * 4)
421 #define BTREE_NODE_OPEN_BUCKET_RESERVE (BTREE_RESERVE_MAX * BCH_REPLICAS_MAX)
426 GC_PHASE_NOT_RUNNING,
430 GC_PHASE_BTREE_stripes,
431 GC_PHASE_BTREE_extents,
432 GC_PHASE_BTREE_inodes,
433 GC_PHASE_BTREE_dirents,
434 GC_PHASE_BTREE_xattrs,
435 GC_PHASE_BTREE_alloc,
436 GC_PHASE_BTREE_quotas,
437 GC_PHASE_BTREE_reflink,
438 GC_PHASE_BTREE_subvolumes,
439 GC_PHASE_BTREE_snapshots,
441 GC_PHASE_BTREE_freespace,
442 GC_PHASE_BTREE_need_discard,
443 GC_PHASE_BTREE_backpointers,
444 GC_PHASE_BTREE_bucket_gens,
446 GC_PHASE_PENDING_DELETE,
461 typedef GENRADIX(struct reflink_gc) reflink_gc_table;
464 u64 sectors[2][BCH_DATA_NR];
469 struct percpu_ref ref;
470 struct completion ref_completion;
471 struct percpu_ref io_ref;
472 struct completion io_ref_completion;
478 * Cached version of this device's member info from superblock
479 * Committed by bch2_write_super() -> bch_fs_mi_update()
481 struct bch_member_cpu mi;
483 char name[BDEVNAME_SIZE];
485 struct bch_sb_handle disk_sb;
486 struct bch_sb *sb_read_scratch;
491 struct bch_devs_mask self;
493 /* biosets used in cloned bios for writing multiple replicas */
494 struct bio_set replica_set;
498 * Per-bucket arrays are protected by c->mark_lock, bucket_lock and
499 * gc_lock, for device resize - holding any is sufficient for access:
500 * Or rcu_read_lock(), but only for ptr_stale():
502 struct bucket_array __rcu *buckets_gc;
503 struct bucket_gens __rcu *bucket_gens;
505 unsigned long *buckets_nouse;
506 struct rw_semaphore bucket_lock;
508 struct bch_dev_usage *usage_base;
509 struct bch_dev_usage __percpu *usage[JOURNAL_BUF_NR];
510 struct bch_dev_usage __percpu *usage_gc;
513 u64 new_fs_bucket_idx;
516 unsigned nr_open_buckets;
517 unsigned nr_btree_reserve;
519 open_bucket_idx_t open_buckets_partial[OPEN_BUCKETS_COUNT];
520 open_bucket_idx_t open_buckets_partial_nr;
522 size_t inc_gen_needs_gc;
523 size_t inc_gen_really_needs_gc;
524 size_t buckets_waiting_on_journal;
526 atomic64_t rebalance_work;
528 struct journal_device journal;
529 u64 prev_journal_sector;
531 struct work_struct io_error_work;
533 /* The rest of this all shows up in sysfs */
534 atomic64_t cur_latency[2];
535 struct bch2_time_stats io_latency[2];
537 #define CONGESTED_MAX 1024
541 struct io_count __percpu *io_done;
555 BCH_FS_WRITE_DISABLE_COMPLETE,
556 BCH_FS_CLEAN_SHUTDOWN,
559 BCH_FS_TOPOLOGY_REPAIR_DONE,
560 BCH_FS_INITIAL_GC_DONE, /* kill when we enumerate fsck passes */
561 BCH_FS_CHECK_ALLOC_DONE,
562 BCH_FS_CHECK_LRUS_DONE,
563 BCH_FS_CHECK_BACKPOINTERS_DONE,
564 BCH_FS_CHECK_ALLOC_TO_LRU_REFS_DONE,
566 BCH_FS_INITIAL_GC_UNFIXED, /* kill when we enumerate fsck errors */
567 BCH_FS_NEED_ANOTHER_GC,
569 BCH_FS_HAVE_DELETED_SNAPSHOTS,
573 BCH_FS_TOPOLOGY_ERROR,
575 BCH_FS_ERRORS_NOT_FIXED,
582 #define BCH_TRANSACTIONS_NR 128
584 struct btree_transaction_stats {
585 struct bch2_time_stats lock_hold_times;
587 unsigned nr_max_paths;
588 unsigned wb_updates_size;
590 char *max_paths_text;
594 u64 sectors_available;
597 struct journal_seq_blacklist_table {
599 struct journal_seq_blacklist_table_entry {
606 struct journal_keys {
610 enum btree_id btree_id:8;
617 * Gap buffer: instead of all the empty space in the array being at the
618 * end of the buffer - from @nr to @size - the empty space is at @gap.
619 * This means that sequential insertions are O(n) instead of O(n^2).
626 struct btree_path_buf {
627 struct btree_path *path;
630 #define REPLICAS_DELTA_LIST_MAX (1U << 16)
635 u32 subvol; /* Nonzero only if a subvolume points to this node: */
644 #define BCACHEFS_ROOT_SUBVOL_INUM \
645 ((subvol_inum) { BCACHEFS_ROOT_SUBVOL, BCACHEFS_ROOT_INO })
647 #define BCH_WRITE_REFS() \
659 x(delete_dead_snapshots) \
660 x(snapshot_delete_pagecache) \
664 #define x(n) BCH_WRITE_REF_##n,
673 struct list_head list;
675 struct kobject counters_kobj;
676 struct kobject internal;
677 struct kobject opts_dir;
678 struct kobject time_stats;
682 struct device *chardev;
683 struct super_block *vfs_sb;
687 /* ro/rw, add/remove/resize devices: */
688 struct rw_semaphore state_lock;
690 /* Counts outstanding writes, for clean transition to read-only */
691 #ifdef BCH_WRITE_REF_DEBUG
692 atomic_long_t writes[BCH_WRITE_REF_NR];
694 struct percpu_ref writes;
696 struct work_struct read_only_work;
698 struct bch_dev __rcu *devs[BCH_SB_MEMBERS_MAX];
700 struct bch_replicas_cpu replicas;
701 struct bch_replicas_cpu replicas_gc;
702 struct mutex replicas_gc_lock;
703 mempool_t replicas_delta_pool;
705 struct journal_entry_res btree_root_journal_res;
706 struct journal_entry_res replicas_journal_res;
707 struct journal_entry_res clock_journal_res;
708 struct journal_entry_res dev_usage_journal_res;
710 struct bch_disk_groups_cpu __rcu *disk_groups;
712 struct bch_opts opts;
714 /* Updated by bch2_sb_update():*/
729 unsigned time_units_per_sec;
730 unsigned nsec_per_time_unit;
736 struct bch_sb_handle disk_sb;
738 unsigned short block_bits; /* ilog2(block_size) */
740 u16 btree_foreground_merge_threshold;
742 struct closure sb_write;
743 struct mutex sb_lock;
746 GENRADIX(struct snapshot_t) snapshots;
747 struct bch_snapshot_table __rcu *snapshot_table;
748 struct mutex snapshot_table_lock;
749 struct work_struct snapshot_delete_work;
750 struct work_struct snapshot_wait_for_pagecache_and_delete_work;
751 snapshot_id_list snapshots_unlinked;
752 struct mutex snapshots_unlinked_lock;
755 struct bio_set btree_bio;
756 struct workqueue_struct *io_complete_wq;
758 struct btree_root btree_roots[BTREE_ID_NR];
759 struct mutex btree_root_lock;
761 struct btree_cache btree_cache;
764 * Cache of allocated btree nodes - if we allocate a btree node and
765 * don't use it, if we free it that space can't be reused until going
766 * _all_ the way through the allocator (which exposes us to a livelock
767 * when allocating btree reserves fail halfway through) - instead, we
768 * can stick them here:
770 struct btree_alloc btree_reserve_cache[BTREE_NODE_RESERVE * 2];
771 unsigned btree_reserve_cache_nr;
772 struct mutex btree_reserve_cache_lock;
774 mempool_t btree_interior_update_pool;
775 struct list_head btree_interior_update_list;
776 struct list_head btree_interior_updates_unwritten;
777 struct mutex btree_interior_update_lock;
778 struct closure_waitlist btree_interior_update_wait;
780 struct workqueue_struct *btree_interior_update_worker;
781 struct work_struct btree_interior_update_work;
783 struct list_head pending_node_rewrites;
784 struct mutex pending_node_rewrites_lock;
787 spinlock_t btree_write_error_lock;
788 struct btree_write_stats {
791 } btree_write_stats[BTREE_WRITE_TYPE_NR];
794 struct mutex btree_trans_lock;
795 struct list_head btree_trans_list;
796 mempool_t btree_paths_pool;
797 mempool_t btree_trans_mem_pool;
798 struct btree_path_buf __percpu *btree_paths_bufs;
800 struct srcu_struct btree_trans_barrier;
801 bool btree_trans_barrier_initialized;
803 struct btree_key_cache btree_key_cache;
804 unsigned btree_key_cache_btrees;
806 struct btree_write_buffer btree_write_buffer;
808 struct workqueue_struct *btree_update_wq;
809 struct workqueue_struct *btree_io_complete_wq;
810 /* copygc needs its own workqueue for index updates.. */
811 struct workqueue_struct *copygc_wq;
814 struct bch_devs_mask rw_devs[BCH_DATA_NR];
816 u64 capacity; /* sectors */
819 * When capacity _decreases_ (due to a disk being removed), we
820 * increment capacity_gen - this invalidates outstanding reservations
821 * and forces them to be revalidated
824 unsigned bucket_size_max;
826 atomic64_t sectors_available;
827 struct mutex sectors_available_lock;
829 struct bch_fs_pcpu __percpu *pcpu;
831 struct percpu_rw_semaphore mark_lock;
833 seqcount_t usage_lock;
834 struct bch_fs_usage *usage_base;
835 struct bch_fs_usage __percpu *usage[JOURNAL_BUF_NR];
836 struct bch_fs_usage __percpu *usage_gc;
837 u64 __percpu *online_reserved;
839 /* single element mempool: */
840 struct mutex usage_scratch_lock;
841 struct bch_fs_usage_online *usage_scratch;
843 struct io_clock io_clock[2];
845 /* JOURNAL SEQ BLACKLIST */
846 struct journal_seq_blacklist_table *
847 journal_seq_blacklist_table;
848 struct work_struct journal_seq_blacklist_gc_work;
851 spinlock_t freelist_lock;
852 struct closure_waitlist freelist_wait;
853 u64 blocked_allocate;
854 u64 blocked_allocate_open_bucket;
856 open_bucket_idx_t open_buckets_freelist;
857 open_bucket_idx_t open_buckets_nr_free;
858 struct closure_waitlist open_buckets_wait;
859 struct open_bucket open_buckets[OPEN_BUCKETS_COUNT];
860 open_bucket_idx_t open_buckets_hash[OPEN_BUCKETS_COUNT];
862 struct write_point btree_write_point;
863 struct write_point rebalance_write_point;
865 struct write_point write_points[WRITE_POINT_MAX];
866 struct hlist_head write_points_hash[WRITE_POINT_HASH_NR];
867 struct mutex write_points_hash_lock;
868 unsigned write_points_nr;
870 struct buckets_waiting_for_journal buckets_waiting_for_journal;
871 struct work_struct discard_work;
872 struct work_struct invalidate_work;
874 /* GARBAGE COLLECTION */
875 struct task_struct *gc_thread;
877 unsigned long gc_count;
879 enum btree_id gc_gens_btree;
880 struct bpos gc_gens_pos;
883 * Tracks GC's progress - everything in the range [ZERO_KEY..gc_cur_pos]
884 * has been marked by GC.
886 * gc_cur_phase is a superset of btree_ids (BTREE_ID_extents etc.)
888 * Protected by gc_pos_lock. Only written to by GC thread, so GC thread
889 * can read without a lock.
891 seqcount_t gc_pos_lock;
892 struct gc_pos gc_pos;
895 * The allocation code needs gc_mark in struct bucket to be correct, but
896 * it's not while a gc is in progress.
898 struct rw_semaphore gc_lock;
899 struct mutex gc_gens_lock;
902 struct semaphore io_in_flight;
903 struct bio_set bio_read;
904 struct bio_set bio_read_split;
905 struct bio_set bio_write;
906 struct mutex bio_bounce_pages_lock;
907 mempool_t bio_bounce_pages;
908 struct bucket_nocow_lock_table
910 struct rhashtable promote_table;
912 mempool_t compression_bounce[2];
913 mempool_t compress_workspace[BCH_COMPRESSION_TYPE_NR];
914 mempool_t decompress_workspace;
915 ZSTD_parameters zstd_params;
917 struct crypto_shash *sha256;
918 struct crypto_sync_skcipher *chacha20;
919 struct crypto_shash *poly1305;
921 atomic64_t key_version;
923 mempool_t large_bkey_pool;
926 struct bch_fs_rebalance rebalance;
929 struct task_struct *copygc_thread;
930 copygc_heap copygc_heap;
931 struct write_point copygc_write_point;
934 wait_queue_head_t copygc_running_wq;
936 /* DATA PROGRESS STATS */
937 struct list_head data_progress_list;
938 struct mutex data_progress_lock;
941 GENRADIX(struct stripe) stripes;
942 GENRADIX(struct gc_stripe) gc_stripes;
944 ec_stripes_heap ec_stripes_heap;
945 spinlock_t ec_stripes_heap_lock;
948 struct list_head ec_stripe_head_list;
949 struct mutex ec_stripe_head_lock;
951 struct list_head ec_stripe_new_list;
952 struct mutex ec_stripe_new_lock;
954 struct work_struct ec_stripe_create_work;
957 struct bio_set ec_bioset;
959 struct work_struct ec_stripe_delete_work;
960 struct llist_head ec_stripe_delete_list;
964 reflink_gc_table reflink_gc_table;
965 size_t reflink_gc_nr;
967 /* VFS IO PATH - fs-io.c */
968 struct bio_set writepage_bioset;
969 struct bio_set dio_write_bioset;
970 struct bio_set dio_read_bioset;
971 struct bio_set nocow_flush_bioset;
974 struct list_head fsck_errors;
975 struct mutex fsck_error_lock;
979 struct bch_memquota_type quotas[QTYP_NR];
982 struct dentry *fs_debug_dir;
983 struct dentry *btree_debug_dir;
984 struct btree_debug btree_debug[BTREE_ID_NR];
985 struct btree *verify_data;
986 struct btree_node *verify_ondisk;
987 struct mutex verify_lock;
989 u64 *unused_inode_hints;
990 unsigned inode_shard_bits;
993 * A btree node on disk could have too many bsets for an iterator to fit
994 * on the stack - have to dynamically allocate them
998 mempool_t btree_bounce_pool;
1000 struct journal journal;
1001 GENRADIX(struct journal_replay *) journal_entries;
1002 u64 journal_entries_base_seq;
1003 struct journal_keys journal_keys;
1004 struct list_head journal_iters;
1006 u64 last_bucket_seq_cleanup;
1008 u64 counters_on_mount[BCH_COUNTER_NR];
1009 u64 __percpu *counters;
1011 unsigned btree_gc_periodic:1;
1012 unsigned copy_gc_enabled:1;
1013 bool promote_whole_extents;
1015 struct bch2_time_stats times[BCH_TIME_STAT_NR];
1017 struct btree_transaction_stats btree_transaction_stats[BCH_TRANSACTIONS_NR];
1020 extern struct wait_queue_head bch2_read_only_wait;
1022 static inline void bch2_write_ref_get(struct bch_fs *c, enum bch_write_ref ref)
1024 #ifdef BCH_WRITE_REF_DEBUG
1025 atomic_long_inc(&c->writes[ref]);
1027 percpu_ref_get(&c->writes);
1031 static inline bool bch2_write_ref_tryget(struct bch_fs *c, enum bch_write_ref ref)
1033 #ifdef BCH_WRITE_REF_DEBUG
1034 return !test_bit(BCH_FS_GOING_RO, &c->flags) &&
1035 atomic_long_inc_not_zero(&c->writes[ref]);
1037 return percpu_ref_tryget_live(&c->writes);
1041 static inline void bch2_write_ref_put(struct bch_fs *c, enum bch_write_ref ref)
1043 #ifdef BCH_WRITE_REF_DEBUG
1044 long v = atomic_long_dec_return(&c->writes[ref]);
1049 for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++)
1050 if (atomic_long_read(&c->writes[i]))
1053 set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
1054 wake_up(&bch2_read_only_wait);
1056 percpu_ref_put(&c->writes);
1060 static inline void bch2_set_ra_pages(struct bch_fs *c, unsigned ra_pages)
1062 #ifndef NO_BCACHEFS_FS
1064 c->vfs_sb->s_bdi->ra_pages = ra_pages;
1068 static inline unsigned bucket_bytes(const struct bch_dev *ca)
1070 return ca->mi.bucket_size << 9;
1073 static inline unsigned block_bytes(const struct bch_fs *c)
1075 return c->opts.block_size;
1078 static inline unsigned block_sectors(const struct bch_fs *c)
1080 return c->opts.block_size >> 9;
1083 static inline size_t btree_sectors(const struct bch_fs *c)
1085 return c->opts.btree_node_size >> 9;
1088 static inline bool btree_id_cached(const struct bch_fs *c, enum btree_id btree)
1090 return c->btree_key_cache_btrees & (1U << btree);
1093 static inline struct timespec64 bch2_time_to_timespec(const struct bch_fs *c, s64 time)
1095 struct timespec64 t;
1098 time += c->sb.time_base_lo;
1100 t.tv_sec = div_s64_rem(time, c->sb.time_units_per_sec, &rem);
1101 t.tv_nsec = rem * c->sb.nsec_per_time_unit;
1105 static inline s64 timespec_to_bch2_time(const struct bch_fs *c, struct timespec64 ts)
1107 return (ts.tv_sec * c->sb.time_units_per_sec +
1108 (int) ts.tv_nsec / c->sb.nsec_per_time_unit) - c->sb.time_base_lo;
1111 static inline s64 bch2_current_time(const struct bch_fs *c)
1113 struct timespec64 now;
1115 ktime_get_coarse_real_ts64(&now);
1116 return timespec_to_bch2_time(c, now);
1119 static inline bool bch2_dev_exists2(const struct bch_fs *c, unsigned dev)
1121 return dev < c->sb.nr_devices && c->devs[dev];
1124 #endif /* _BCACHEFS_H */