1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_TYPES_H
3 #define _BCACHEFS_BTREE_TYPES_H
5 #include <linux/list.h>
6 #include <linux/rhashtable.h>
9 #include "bkey_methods.h"
10 #include "buckets_types.h"
12 #include "journal_types.h"
20 struct btree_nr_keys {
23 * Amount of live metadata (i.e. size of node after a compaction) in
27 u16 bset_u64s[MAX_BSETS];
36 * We construct a binary tree in an array as if the array
37 * started at 1, so that things line up on the same cachelines
38 * better: see comments in bset.c at cacheline_to_bkey() for
42 /* size of the binary tree and prev array */
45 /* function of size - precalculated for to_inorder() */
54 struct journal_entry_pin journal;
58 struct open_buckets ob;
59 __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX);
62 struct btree_bkey_cached_common {
69 struct btree_bkey_cached_common c;
71 struct rhash_head hash;
80 struct bkey_format format;
82 struct btree_node *data;
86 * Sets of sorted keys - the real btree node - plus a binary search tree
88 * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
89 * to the memory we have allocated for this btree node. Additionally,
90 * set[0]->data points to the entire btree node as it exists on disk.
92 struct bset_tree set[MAX_BSETS];
94 struct btree_nr_keys nr;
100 struct btree_write writes[2];
102 /* Key/pointer for this btree node */
103 __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
106 * XXX: add a delete sequence number, so when bch2_btree_node_relock()
107 * fails because the lock sequence number has changed - i.e. the
108 * contents were modified - we can still relock the node if it's still
109 * the one we want, without redoing the traversal
113 * For asynchronous splits/interior node updates:
114 * When we do a split, we allocate new child nodes and update the parent
115 * node to point to them: we update the parent in memory immediately,
116 * but then we must wait until the children have been written out before
117 * the update to the parent can be written - this is a list of the
118 * btree_updates that are blocking this node from being
121 struct list_head write_blocked;
124 * Also for asynchronous splits/interior node updates:
125 * If a btree node isn't reachable yet, we don't want to kick off
126 * another write - because that write also won't yet be reachable and
127 * marking it as completed before it's reachable would be incorrect:
129 unsigned long will_make_reachable;
131 struct open_buckets ob;
134 struct list_head list;
138 struct rhashtable table;
139 bool table_init_done;
141 * We never free a struct btree, except on shutdown - we just put it on
142 * the btree_cache_freed list and reuse it later. This simplifies the
143 * code, and it doesn't cost us much memory as the memory usage is
144 * dominated by buffers that hold the actual btree node data and those
145 * can be freed - and the number of struct btrees allocated is
146 * effectively bounded.
148 * btree_cache_freeable effectively is a small cache - we use it because
149 * high order page allocations can be rather expensive, and it's quite
150 * common to delete and allocate btree nodes in quick succession. It
151 * should never grow past ~2-3 nodes in practice.
154 struct list_head live;
155 struct list_head freeable;
156 struct list_head freed_pcpu;
157 struct list_head freed_nonpcpu;
159 /* Number of elements in live + freeable lists */
163 struct shrinker shrink;
166 * If we need to allocate memory for a new btree node and that
167 * allocation fails, we can cannibalize another node in the btree cache
168 * to satisfy the allocation - lock to guarantee only one thread does
171 struct task_struct *alloc_lock;
172 struct closure_waitlist alloc_wait;
175 struct btree_node_iter {
176 struct btree_node_iter_set {
182 * Iterate over all possible positions, synthesizing deleted keys for holes:
184 #define BTREE_ITER_SLOTS (1 << 0)
186 * Indicates that intent locks should be taken on leaf nodes, because we expect
187 * to be doing updates:
189 #define BTREE_ITER_INTENT (1 << 1)
191 * Causes the btree iterator code to prefetch additional btree nodes from disk:
193 #define BTREE_ITER_PREFETCH (1 << 2)
195 * Indicates that this iterator should not be reused until transaction commit,
196 * either because a pending update references it or because the update depends
197 * on that particular key being locked (e.g. by the str_hash code, for hash
200 #define BTREE_ITER_KEEP_UNTIL_COMMIT (1 << 3)
202 * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
203 * @pos or the first key strictly greater than @pos
205 #define BTREE_ITER_IS_EXTENTS (1 << 4)
206 #define BTREE_ITER_NOT_EXTENTS (1 << 5)
207 #define BTREE_ITER_CACHED (1 << 6)
208 #define BTREE_ITER_CACHED_NOFILL (1 << 7)
209 #define BTREE_ITER_CACHED_NOCREATE (1 << 8)
210 #define BTREE_ITER_WITH_KEY_CACHE (1 << 9)
211 #define BTREE_ITER_WITH_UPDATES (1 << 10)
212 #define BTREE_ITER_WITH_JOURNAL (1 << 11)
213 #define __BTREE_ITER_ALL_SNAPSHOTS (1 << 12)
214 #define BTREE_ITER_ALL_SNAPSHOTS (1 << 13)
215 #define BTREE_ITER_FILTER_SNAPSHOTS (1 << 14)
216 #define BTREE_ITER_NOPRESERVE (1 << 15)
218 enum btree_path_uptodate {
219 BTREE_ITER_UPTODATE = 0,
220 BTREE_ITER_NEED_RELOCK = 1,
221 BTREE_ITER_NEED_TRAVERSE = 2,
224 #define BTREE_ITER_NO_NODE_GET_LOCKS ((struct btree *) 1)
225 #define BTREE_ITER_NO_NODE_DROP ((struct btree *) 2)
226 #define BTREE_ITER_NO_NODE_LOCK_ROOT ((struct btree *) 3)
227 #define BTREE_ITER_NO_NODE_UP ((struct btree *) 4)
228 #define BTREE_ITER_NO_NODE_DOWN ((struct btree *) 5)
229 #define BTREE_ITER_NO_NODE_INIT ((struct btree *) 6)
230 #define BTREE_ITER_NO_NODE_ERROR ((struct btree *) 7)
231 #define BTREE_ITER_NO_NODE_CACHED ((struct btree *) 8)
239 /* btree_iter_copy starts here: */
242 enum btree_id btree_id:4;
245 enum btree_path_uptodate uptodate:2;
247 * When true, failing to relock this path will cause the transaction to
250 bool should_be_locked:1;
254 nodes_intent_locked:4;
256 struct btree_path_level {
258 struct btree_node_iter iter;
260 } l[BTREE_MAX_DEPTH];
261 #ifdef CONFIG_BCACHEFS_DEBUG
262 unsigned long ip_allocated;
266 static inline struct btree_path_level *path_l(struct btree_path *path)
268 return path->l + path->level;
272 * @pos - iterator's current position
273 * @level - current btree depth
274 * @locks_want - btree level below which we start taking intent locks
275 * @nodes_locked - bitmask indicating which nodes in @nodes are locked
276 * @nodes_intent_locked - bitmask indicating which locks are intent locks
279 struct btree_trans *trans;
280 struct btree_path *path;
281 struct btree_path *update_path;
282 struct btree_path *key_cache_path;
284 enum btree_id btree_id:4;
285 unsigned min_depth:4;
287 /* btree_iter_copy starts here: */
290 /* When we're filtering by snapshot, the snapshot ID we're looking for: */
294 struct bpos pos_after_commit;
296 * Current unpacked key - so that bch2_btree_iter_next()/
297 * bch2_btree_iter_next_slot() can correctly advance pos.
300 #ifdef CONFIG_BCACHEFS_DEBUG
301 unsigned long ip_allocated;
305 struct btree_key_cache {
307 struct rhashtable table;
308 bool table_init_done;
309 struct list_head freed;
310 struct shrinker shrink;
311 unsigned shrink_iter;
314 atomic_long_t nr_keys;
315 atomic_long_t nr_dirty;
318 struct bkey_cached_key {
321 } __attribute__((packed, aligned(4)));
323 #define BKEY_CACHED_ACCESSED 0
324 #define BKEY_CACHED_DIRTY 1
327 struct btree_bkey_cached_common c;
332 u32 btree_trans_barrier_seq;
333 struct bkey_cached_key key;
335 struct rhash_head hash;
336 struct list_head list;
338 struct journal_preres res;
339 struct journal_entry_pin journal;
344 struct btree_insert_entry {
347 enum btree_id btree_id:8;
350 bool insert_trigger_run:1;
351 bool overwrite_trigger_run:1;
353 * @old_k may be a key from the journal; @old_btree_u64s always refers
354 * to the size of the key being overwritten in the btree:
358 struct btree_path *path;
359 /* key being overwritten: */
361 const struct bch_val *old_v;
362 unsigned long ip_allocated;
365 #ifndef CONFIG_LOCKDEP
366 #define BTREE_ITER_MAX 64
368 #define BTREE_ITER_MAX 32
371 struct btree_trans_commit_hook;
372 typedef int (btree_trans_commit_hook_fn)(struct btree_trans *, struct btree_trans_commit_hook *);
374 struct btree_trans_commit_hook {
375 btree_trans_commit_hook_fn *fn;
376 struct btree_trans_commit_hook *next;
379 #define BTREE_TRANS_MEM_MAX (1U << 14)
384 struct list_head list;
385 struct btree *locking;
386 unsigned locking_path_idx;
387 struct bpos locking_pos;
390 u8 locking_lock_type;
398 bool in_traverse_all:1;
400 bool memory_allocation_failure:1;
401 bool journal_transaction_names:1;
402 bool is_initial_gc:1;
404 * For when bch2_trans_update notices we'll be splitting a compressed
407 unsigned extra_journal_res;
415 u8 sorted[BTREE_ITER_MAX];
416 struct btree_path *paths;
417 struct btree_insert_entry *updates;
420 struct btree_trans_commit_hook *hooks;
421 DARRAY(u64) extra_journal_entries;
422 struct journal_entry_pin *journal_pin;
424 struct journal_res journal_res;
425 struct journal_preres journal_preres;
427 struct disk_reservation *disk_res;
429 unsigned journal_u64s;
430 unsigned journal_preres_u64s;
431 struct replicas_delta_list *fs_usage_deltas;
434 #define BTREE_FLAGS() \
440 x(will_make_reachable) \
445 x(write_in_flight_inner) \
453 #define x(flag) BTREE_NODE_##flag,
459 static inline bool btree_node_ ## flag(struct btree *b) \
460 { return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
462 static inline void set_btree_node_ ## flag(struct btree *b) \
463 { set_bit(BTREE_NODE_ ## flag, &b->flags); } \
465 static inline void clear_btree_node_ ## flag(struct btree *b) \
466 { clear_bit(BTREE_NODE_ ## flag, &b->flags); }
471 static inline struct btree_write *btree_current_write(struct btree *b)
473 return b->writes + btree_node_write_idx(b);
476 static inline struct btree_write *btree_prev_write(struct btree *b)
478 return b->writes + (btree_node_write_idx(b) ^ 1);
481 static inline struct bset_tree *bset_tree_last(struct btree *b)
484 return b->set + b->nsets - 1;
488 __btree_node_offset_to_ptr(const struct btree *b, u16 offset)
490 return (void *) ((u64 *) b->data + 1 + offset);
494 __btree_node_ptr_to_offset(const struct btree *b, const void *p)
496 u16 ret = (u64 *) p - 1 - (u64 *) b->data;
498 EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
502 static inline struct bset *bset(const struct btree *b,
503 const struct bset_tree *t)
505 return __btree_node_offset_to_ptr(b, t->data_offset);
508 static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
511 __btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
514 static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
515 const struct bset *i)
517 t->data_offset = __btree_node_ptr_to_offset(b, i);
518 set_btree_bset_end(b, t);
521 static inline struct bset *btree_bset_first(struct btree *b)
523 return bset(b, b->set);
526 static inline struct bset *btree_bset_last(struct btree *b)
528 return bset(b, bset_tree_last(b));
532 __btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
534 return __btree_node_ptr_to_offset(b, k);
537 static inline struct bkey_packed *
538 __btree_node_offset_to_key(const struct btree *b, u16 k)
540 return __btree_node_offset_to_ptr(b, k);
543 static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
545 return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
548 #define btree_bkey_first(_b, _t) \
550 EBUG_ON(bset(_b, _t)->start != \
551 __btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
553 bset(_b, _t)->start; \
556 #define btree_bkey_last(_b, _t) \
558 EBUG_ON(__btree_node_offset_to_key(_b, (_t)->end_offset) != \
559 vstruct_last(bset(_b, _t))); \
561 __btree_node_offset_to_key(_b, (_t)->end_offset); \
564 static inline unsigned bset_u64s(struct bset_tree *t)
566 return t->end_offset - t->data_offset -
567 sizeof(struct bset) / sizeof(u64);
570 static inline unsigned bset_dead_u64s(struct btree *b, struct bset_tree *t)
572 return bset_u64s(t) - b->nr.bset_u64s[t - b->set];
575 static inline unsigned bset_byte_offset(struct btree *b, void *i)
577 return i - (void *) b->data;
580 enum btree_node_type {
581 #define x(kwd, val) BKEY_TYPE_##kwd = val,
587 /* Type of a key in btree @id at level @level: */
588 static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
590 return level ? BKEY_TYPE_btree : (enum btree_node_type) id;
593 /* Type of keys @b contains: */
594 static inline enum btree_node_type btree_node_type(struct btree *b)
596 return __btree_node_type(b->c.level, b->c.btree_id);
599 #define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS \
600 ((1U << BKEY_TYPE_extents)| \
601 (1U << BKEY_TYPE_alloc)| \
602 (1U << BKEY_TYPE_inodes)| \
603 (1U << BKEY_TYPE_stripes)| \
604 (1U << BKEY_TYPE_reflink)| \
605 (1U << BKEY_TYPE_btree))
607 #define BTREE_NODE_TYPE_HAS_MEM_TRIGGERS \
608 ((1U << BKEY_TYPE_alloc)| \
609 (1U << BKEY_TYPE_inodes)| \
610 (1U << BKEY_TYPE_stripes)| \
611 (1U << BKEY_TYPE_snapshots))
613 #define BTREE_NODE_TYPE_HAS_TRIGGERS \
614 (BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS| \
615 BTREE_NODE_TYPE_HAS_MEM_TRIGGERS)
617 #define BTREE_ID_IS_EXTENTS \
618 ((1U << BTREE_ID_extents)| \
619 (1U << BTREE_ID_reflink)| \
620 (1U << BTREE_ID_freespace))
622 static inline bool btree_node_type_is_extents(enum btree_node_type type)
624 return (1U << type) & BTREE_ID_IS_EXTENTS;
627 #define BTREE_ID_HAS_SNAPSHOTS \
628 ((1U << BTREE_ID_extents)| \
629 (1U << BTREE_ID_inodes)| \
630 (1U << BTREE_ID_dirents)| \
631 (1U << BTREE_ID_xattrs))
633 #define BTREE_ID_HAS_PTRS \
634 ((1U << BTREE_ID_extents)| \
635 (1U << BTREE_ID_reflink))
637 static inline bool btree_type_has_snapshots(enum btree_id id)
639 return (1 << id) & BTREE_ID_HAS_SNAPSHOTS;
642 enum btree_update_flags {
643 __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE,
644 __BTREE_UPDATE_KEY_CACHE_RECLAIM,
646 __BTREE_TRIGGER_NORUN, /* Don't run triggers at all */
648 __BTREE_TRIGGER_INSERT,
649 __BTREE_TRIGGER_OVERWRITE,
652 __BTREE_TRIGGER_BUCKET_INVALIDATE,
653 __BTREE_TRIGGER_NOATOMIC,
656 #define BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE (1U << __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE)
657 #define BTREE_UPDATE_KEY_CACHE_RECLAIM (1U << __BTREE_UPDATE_KEY_CACHE_RECLAIM)
659 #define BTREE_TRIGGER_NORUN (1U << __BTREE_TRIGGER_NORUN)
661 #define BTREE_TRIGGER_INSERT (1U << __BTREE_TRIGGER_INSERT)
662 #define BTREE_TRIGGER_OVERWRITE (1U << __BTREE_TRIGGER_OVERWRITE)
664 #define BTREE_TRIGGER_GC (1U << __BTREE_TRIGGER_GC)
665 #define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
666 #define BTREE_TRIGGER_NOATOMIC (1U << __BTREE_TRIGGER_NOATOMIC)
668 #define BTREE_TRIGGER_WANTS_OLD_AND_NEW \
669 ((1U << KEY_TYPE_alloc)| \
670 (1U << KEY_TYPE_alloc_v2)| \
671 (1U << KEY_TYPE_alloc_v3)| \
672 (1U << KEY_TYPE_alloc_v4)| \
673 (1U << KEY_TYPE_stripe)| \
674 (1U << KEY_TYPE_inode)| \
675 (1U << KEY_TYPE_inode_v2)| \
676 (1U << KEY_TYPE_snapshot))
678 static inline bool btree_node_type_needs_gc(enum btree_node_type type)
680 return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type);
686 /* On disk root - see async splits: */
687 __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
693 enum btree_insert_ret {
695 /* leaf node needs to be split */
696 BTREE_INSERT_BTREE_NODE_FULL,
697 BTREE_INSERT_NEED_MARK_REPLICAS,
698 BTREE_INSERT_NEED_JOURNAL_RES,
699 BTREE_INSERT_NEED_JOURNAL_RECLAIM,
702 enum btree_gc_coalesce_fail_reason {
703 BTREE_GC_COALESCE_FAIL_RESERVE_GET,
704 BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
705 BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
708 enum btree_node_sibling {
713 #endif /* _BCACHEFS_BTREE_TYPES_H */