1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_TYPES_H
3 #define _BCACHEFS_BTREE_TYPES_H
5 #include <linux/list.h>
6 #include <linux/rhashtable.h>
9 //#include "bkey_methods.h"
10 #include "buckets_types.h"
12 #include "journal_types.h"
20 struct btree_nr_keys {
23 * Amount of live metadata (i.e. size of node after a compaction) in
27 u16 bset_u64s[MAX_BSETS];
36 * We construct a binary tree in an array as if the array
37 * started at 1, so that things line up on the same cachelines
38 * better: see comments in bset.c at cacheline_to_bkey() for
42 /* size of the binary tree and prev array */
45 /* function of size - precalculated for to_inorder() */
54 struct journal_entry_pin journal;
58 struct open_buckets ob;
59 __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX);
62 struct btree_bkey_cached_common {
70 struct btree_bkey_cached_common c;
72 struct rhash_head hash;
81 struct bkey_format format;
83 struct btree_node *data;
87 * Sets of sorted keys - the real btree node - plus a binary search tree
89 * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
90 * to the memory we have allocated for this btree node. Additionally,
91 * set[0]->data points to the entire btree node as it exists on disk.
93 struct bset_tree set[MAX_BSETS];
95 struct btree_nr_keys nr;
101 struct btree_write writes[2];
103 /* Key/pointer for this btree node */
104 __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
107 * XXX: add a delete sequence number, so when bch2_btree_node_relock()
108 * fails because the lock sequence number has changed - i.e. the
109 * contents were modified - we can still relock the node if it's still
110 * the one we want, without redoing the traversal
114 * For asynchronous splits/interior node updates:
115 * When we do a split, we allocate new child nodes and update the parent
116 * node to point to them: we update the parent in memory immediately,
117 * but then we must wait until the children have been written out before
118 * the update to the parent can be written - this is a list of the
119 * btree_updates that are blocking this node from being
122 struct list_head write_blocked;
125 * Also for asynchronous splits/interior node updates:
126 * If a btree node isn't reachable yet, we don't want to kick off
127 * another write - because that write also won't yet be reachable and
128 * marking it as completed before it's reachable would be incorrect:
130 unsigned long will_make_reachable;
132 struct open_buckets ob;
135 struct list_head list;
139 struct rhashtable table;
140 bool table_init_done;
142 * We never free a struct btree, except on shutdown - we just put it on
143 * the btree_cache_freed list and reuse it later. This simplifies the
144 * code, and it doesn't cost us much memory as the memory usage is
145 * dominated by buffers that hold the actual btree node data and those
146 * can be freed - and the number of struct btrees allocated is
147 * effectively bounded.
149 * btree_cache_freeable effectively is a small cache - we use it because
150 * high order page allocations can be rather expensive, and it's quite
151 * common to delete and allocate btree nodes in quick succession. It
152 * should never grow past ~2-3 nodes in practice.
155 struct list_head live;
156 struct list_head freeable;
157 struct list_head freed_pcpu;
158 struct list_head freed_nonpcpu;
160 /* Number of elements in live + freeable lists */
164 unsigned not_freed_lock_intent;
165 unsigned not_freed_lock_write;
166 unsigned not_freed_dirty;
167 unsigned not_freed_read_in_flight;
168 unsigned not_freed_write_in_flight;
169 unsigned not_freed_noevict;
170 unsigned not_freed_write_blocked;
171 unsigned not_freed_will_make_reachable;
172 unsigned not_freed_access_bit;
174 struct shrinker shrink;
177 * If we need to allocate memory for a new btree node and that
178 * allocation fails, we can cannibalize another node in the btree cache
179 * to satisfy the allocation - lock to guarantee only one thread does
182 struct task_struct *alloc_lock;
183 struct closure_waitlist alloc_wait;
186 struct btree_node_iter {
187 struct btree_node_iter_set {
193 * Iterate over all possible positions, synthesizing deleted keys for holes:
195 #define BTREE_ITER_SLOTS (1 << 0)
196 #define BTREE_ITER_ALL_LEVELS (1 << 1)
198 * Indicates that intent locks should be taken on leaf nodes, because we expect
199 * to be doing updates:
201 #define BTREE_ITER_INTENT (1 << 2)
203 * Causes the btree iterator code to prefetch additional btree nodes from disk:
205 #define BTREE_ITER_PREFETCH (1 << 3)
207 * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
208 * @pos or the first key strictly greater than @pos
210 #define BTREE_ITER_IS_EXTENTS (1 << 4)
211 #define BTREE_ITER_NOT_EXTENTS (1 << 5)
212 #define BTREE_ITER_CACHED (1 << 6)
213 #define BTREE_ITER_WITH_KEY_CACHE (1 << 7)
214 #define BTREE_ITER_WITH_UPDATES (1 << 8)
215 #define BTREE_ITER_WITH_JOURNAL (1 << 9)
216 #define __BTREE_ITER_ALL_SNAPSHOTS (1 << 10)
217 #define BTREE_ITER_ALL_SNAPSHOTS (1 << 11)
218 #define BTREE_ITER_FILTER_SNAPSHOTS (1 << 12)
219 #define BTREE_ITER_NOPRESERVE (1 << 13)
221 enum btree_path_uptodate {
222 BTREE_ITER_UPTODATE = 0,
223 BTREE_ITER_NEED_RELOCK = 1,
224 BTREE_ITER_NEED_TRAVERSE = 2,
233 /* btree_iter_copy starts here: */
236 enum btree_id btree_id:5;
239 enum btree_path_uptodate uptodate:2;
241 * When true, failing to relock this path will cause the transaction to
244 bool should_be_locked:1;
249 struct btree_path_level {
251 struct btree_node_iter iter;
253 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
256 } l[BTREE_MAX_DEPTH];
257 #ifdef CONFIG_BCACHEFS_DEBUG
258 unsigned long ip_allocated;
262 static inline struct btree_path_level *path_l(struct btree_path *path)
264 return path->l + path->level;
268 * @pos - iterator's current position
269 * @level - current btree depth
270 * @locks_want - btree level below which we start taking intent locks
271 * @nodes_locked - bitmask indicating which nodes in @nodes are locked
272 * @nodes_intent_locked - bitmask indicating which locks are intent locks
275 struct btree_trans *trans;
276 struct btree_path *path;
277 struct btree_path *update_path;
278 struct btree_path *key_cache_path;
280 enum btree_id btree_id:8;
281 unsigned min_depth:3;
284 /* btree_iter_copy starts here: */
287 /* When we're filtering by snapshot, the snapshot ID we're looking for: */
292 * Current unpacked key - so that bch2_btree_iter_next()/
293 * bch2_btree_iter_next_slot() can correctly advance pos.
297 /* BTREE_ITER_WITH_JOURNAL: */
299 struct bpos journal_pos;
300 #ifdef CONFIG_BCACHEFS_DEBUG
301 unsigned long ip_allocated;
305 struct btree_key_cache_freelist {
306 struct bkey_cached *objs[16];
310 struct btree_key_cache {
312 struct rhashtable table;
313 bool table_init_done;
314 struct list_head freed_pcpu;
315 struct list_head freed_nonpcpu;
316 struct shrinker shrink;
317 unsigned shrink_iter;
318 struct btree_key_cache_freelist __percpu *pcpu_freed;
320 atomic_long_t nr_freed;
321 atomic_long_t nr_keys;
322 atomic_long_t nr_dirty;
325 struct bkey_cached_key {
328 } __packed __aligned(4);
330 #define BKEY_CACHED_ACCESSED 0
331 #define BKEY_CACHED_DIRTY 1
334 struct btree_bkey_cached_common c;
339 u32 btree_trans_barrier_seq;
340 struct bkey_cached_key key;
342 struct rhash_head hash;
343 struct list_head list;
345 struct journal_preres res;
346 struct journal_entry_pin journal;
351 static inline struct bpos btree_node_pos(struct btree_bkey_cached_common *b)
354 ? container_of(b, struct btree, c)->key.k.p
355 : container_of(b, struct bkey_cached, c)->key.pos;
358 struct btree_insert_entry {
361 enum btree_id btree_id:8;
364 bool insert_trigger_run:1;
365 bool overwrite_trigger_run:1;
366 bool key_cache_already_flushed:1;
368 * @old_k may be a key from the journal; @old_btree_u64s always refers
369 * to the size of the key being overwritten in the btree:
373 struct btree_path *path;
374 /* key being overwritten: */
376 const struct bch_val *old_v;
377 unsigned long ip_allocated;
380 #ifndef CONFIG_LOCKDEP
381 #define BTREE_ITER_MAX 64
383 #define BTREE_ITER_MAX 32
386 struct btree_trans_commit_hook;
387 typedef int (btree_trans_commit_hook_fn)(struct btree_trans *, struct btree_trans_commit_hook *);
389 struct btree_trans_commit_hook {
390 btree_trans_commit_hook_fn *fn;
391 struct btree_trans_commit_hook *next;
394 #define BTREE_TRANS_MEM_MAX (1U << 16)
396 #define BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS 10000
402 struct list_head list;
405 u8 lock_may_not_fail;
407 struct btree_bkey_cached_common *locking;
408 struct six_lock_waiter locking_wait;
417 bool in_traverse_all:1;
418 bool memory_allocation_failure:1;
419 bool is_initial_gc:1;
420 bool journal_replay_not_finished:1;
421 enum bch_errcode restarted:16;
423 unsigned long last_restarted_ip;
424 unsigned long srcu_lock_time;
427 * For when bch2_trans_update notices we'll be splitting a compressed
430 unsigned extra_journal_res;
431 unsigned nr_max_paths;
440 u8 sorted[BTREE_ITER_MAX];
441 struct btree_path *paths;
442 struct btree_insert_entry *updates;
445 struct btree_trans_commit_hook *hooks;
446 darray_u64 extra_journal_entries;
447 struct journal_entry_pin *journal_pin;
449 struct journal_res journal_res;
450 struct journal_preres journal_preres;
452 struct disk_reservation *disk_res;
454 unsigned journal_u64s;
455 unsigned journal_preres_u64s;
456 struct replicas_delta_list *fs_usage_deltas;
459 #define BCH_BTREE_WRITE_TYPES() \
461 x(init_next_bset, 1) \
462 x(cache_reclaim, 2) \
463 x(journal_reclaim, 3) \
466 enum btree_write_type {
467 #define x(t, n) BTREE_WRITE_##t,
468 BCH_BTREE_WRITE_TYPES()
473 #define BTREE_WRITE_TYPE_MASK (roundup_pow_of_two(BTREE_WRITE_TYPE_NR) - 1)
474 #define BTREE_WRITE_TYPE_BITS ilog2(roundup_pow_of_two(BTREE_WRITE_TYPE_NR))
476 #define BTREE_FLAGS() \
482 x(will_make_reachable) \
487 x(write_in_flight_inner) \
495 /* First bits for btree node write type */
496 BTREE_NODE_FLAGS_START = BTREE_WRITE_TYPE_BITS - 1,
497 #define x(flag) BTREE_NODE_##flag,
503 static inline bool btree_node_ ## flag(struct btree *b) \
504 { return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
506 static inline void set_btree_node_ ## flag(struct btree *b) \
507 { set_bit(BTREE_NODE_ ## flag, &b->flags); } \
509 static inline void clear_btree_node_ ## flag(struct btree *b) \
510 { clear_bit(BTREE_NODE_ ## flag, &b->flags); }
515 static inline struct btree_write *btree_current_write(struct btree *b)
517 return b->writes + btree_node_write_idx(b);
520 static inline struct btree_write *btree_prev_write(struct btree *b)
522 return b->writes + (btree_node_write_idx(b) ^ 1);
525 static inline struct bset_tree *bset_tree_last(struct btree *b)
528 return b->set + b->nsets - 1;
532 __btree_node_offset_to_ptr(const struct btree *b, u16 offset)
534 return (void *) ((u64 *) b->data + 1 + offset);
538 __btree_node_ptr_to_offset(const struct btree *b, const void *p)
540 u16 ret = (u64 *) p - 1 - (u64 *) b->data;
542 EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
546 static inline struct bset *bset(const struct btree *b,
547 const struct bset_tree *t)
549 return __btree_node_offset_to_ptr(b, t->data_offset);
552 static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
555 __btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
558 static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
559 const struct bset *i)
561 t->data_offset = __btree_node_ptr_to_offset(b, i);
562 set_btree_bset_end(b, t);
565 static inline struct bset *btree_bset_first(struct btree *b)
567 return bset(b, b->set);
570 static inline struct bset *btree_bset_last(struct btree *b)
572 return bset(b, bset_tree_last(b));
576 __btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
578 return __btree_node_ptr_to_offset(b, k);
581 static inline struct bkey_packed *
582 __btree_node_offset_to_key(const struct btree *b, u16 k)
584 return __btree_node_offset_to_ptr(b, k);
587 static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
589 return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
592 #define btree_bkey_first(_b, _t) \
594 EBUG_ON(bset(_b, _t)->start != \
595 __btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
597 bset(_b, _t)->start; \
600 #define btree_bkey_last(_b, _t) \
602 EBUG_ON(__btree_node_offset_to_key(_b, (_t)->end_offset) != \
603 vstruct_last(bset(_b, _t))); \
605 __btree_node_offset_to_key(_b, (_t)->end_offset); \
608 static inline unsigned bset_u64s(struct bset_tree *t)
610 return t->end_offset - t->data_offset -
611 sizeof(struct bset) / sizeof(u64);
614 static inline unsigned bset_dead_u64s(struct btree *b, struct bset_tree *t)
616 return bset_u64s(t) - b->nr.bset_u64s[t - b->set];
619 static inline unsigned bset_byte_offset(struct btree *b, void *i)
621 return i - (void *) b->data;
624 enum btree_node_type {
625 #define x(kwd, val) BKEY_TYPE_##kwd = val,
631 /* Type of a key in btree @id at level @level: */
632 static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
634 return level ? BKEY_TYPE_btree : (enum btree_node_type) id;
637 /* Type of keys @b contains: */
638 static inline enum btree_node_type btree_node_type(struct btree *b)
640 return __btree_node_type(b->c.level, b->c.btree_id);
643 #define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS \
644 ((1U << BKEY_TYPE_extents)| \
645 (1U << BKEY_TYPE_alloc)| \
646 (1U << BKEY_TYPE_inodes)| \
647 (1U << BKEY_TYPE_stripes)| \
648 (1U << BKEY_TYPE_reflink)| \
649 (1U << BKEY_TYPE_btree))
651 #define BTREE_NODE_TYPE_HAS_MEM_TRIGGERS \
652 ((1U << BKEY_TYPE_alloc)| \
653 (1U << BKEY_TYPE_inodes)| \
654 (1U << BKEY_TYPE_stripes)| \
655 (1U << BKEY_TYPE_snapshots))
657 #define BTREE_NODE_TYPE_HAS_TRIGGERS \
658 (BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS| \
659 BTREE_NODE_TYPE_HAS_MEM_TRIGGERS)
661 #define BTREE_ID_IS_EXTENTS \
662 ((1U << BTREE_ID_extents)| \
663 (1U << BTREE_ID_reflink)| \
664 (1U << BTREE_ID_freespace))
666 static inline bool btree_node_type_is_extents(enum btree_node_type type)
668 return (1U << type) & BTREE_ID_IS_EXTENTS;
671 #define BTREE_ID_HAS_SNAPSHOTS \
672 ((1U << BTREE_ID_extents)| \
673 (1U << BTREE_ID_inodes)| \
674 (1U << BTREE_ID_dirents)| \
675 (1U << BTREE_ID_xattrs))
677 #define BTREE_ID_HAS_PTRS \
678 ((1U << BTREE_ID_extents)| \
679 (1U << BTREE_ID_reflink))
681 static inline bool btree_type_has_snapshots(enum btree_id id)
683 return (1 << id) & BTREE_ID_HAS_SNAPSHOTS;
686 static inline bool btree_type_has_ptrs(enum btree_id id)
688 return (1 << id) & BTREE_ID_HAS_PTRS;
691 static inline bool btree_node_type_needs_gc(enum btree_node_type type)
693 return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type);
699 /* On disk root - see async splits: */
700 __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
706 enum btree_gc_coalesce_fail_reason {
707 BTREE_GC_COALESCE_FAIL_RESERVE_GET,
708 BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
709 BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
712 enum btree_node_sibling {
717 #endif /* _BCACHEFS_BTREE_TYPES_H */