1 #ifndef _BCACHEFS_BTREE_TYPES_H
2 #define _BCACHEFS_BTREE_TYPES_H
4 #include <linux/list.h>
5 #include <linux/rhashtable.h>
7 #include "bkey_methods.h"
8 #include "journal_types.h"
16 struct btree_nr_keys {
19 * Amount of live metadata (i.e. size of node after a compaction) in
23 u16 bset_u64s[MAX_BSETS];
32 * We construct a binary tree in an array as if the array
33 * started at 1, so that things line up on the same cachelines
34 * better: see comments in bset.c at cacheline_to_bkey() for
38 /* size of the binary tree and prev array */
41 /* function of size - precalculated for to_inorder() */
52 struct journal_entry_pin journal;
53 struct closure_waitlist wait;
57 struct open_buckets ob;
62 /* Hottest entries first */
63 struct rhash_head hash;
65 /* Key/pointer for this btree node */
66 __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
77 struct bkey_format format;
79 struct btree_node *data;
83 * Sets of sorted keys - the real btree node - plus a binary search tree
85 * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
86 * to the memory we have allocated for this btree node. Additionally,
87 * set[0]->data points to the entire btree node as it exists on disk.
89 struct bset_tree set[MAX_BSETS];
91 struct btree_nr_keys nr;
94 u16 uncompacted_whiteout_u64s;
99 * XXX: add a delete sequence number, so when bch2_btree_node_relock()
100 * fails because the lock sequence number has changed - i.e. the
101 * contents were modified - we can still relock the node if it's still
102 * the one we want, without redoing the traversal
106 * For asynchronous splits/interior node updates:
107 * When we do a split, we allocate new child nodes and update the parent
108 * node to point to them: we update the parent in memory immediately,
109 * but then we must wait until the children have been written out before
110 * the update to the parent can be written - this is a list of the
111 * btree_updates that are blocking this node from being
114 struct list_head write_blocked;
117 * Also for asynchronous splits/interior node updates:
118 * If a btree node isn't reachable yet, we don't want to kick off
119 * another write - because that write also won't yet be reachable and
120 * marking it as completed before it's reachable would be incorrect:
122 unsigned long will_make_reachable;
124 struct open_buckets ob;
127 struct list_head list;
129 struct btree_write writes[2];
131 #ifdef CONFIG_BCACHEFS_DEBUG
132 bool *expensive_debug_checks;
137 struct rhashtable table;
138 bool table_init_done;
140 * We never free a struct btree, except on shutdown - we just put it on
141 * the btree_cache_freed list and reuse it later. This simplifies the
142 * code, and it doesn't cost us much memory as the memory usage is
143 * dominated by buffers that hold the actual btree node data and those
144 * can be freed - and the number of struct btrees allocated is
145 * effectively bounded.
147 * btree_cache_freeable effectively is a small cache - we use it because
148 * high order page allocations can be rather expensive, and it's quite
149 * common to delete and allocate btree nodes in quick succession. It
150 * should never grow past ~2-3 nodes in practice.
153 struct list_head live;
154 struct list_head freeable;
155 struct list_head freed;
157 /* Number of elements in live + freeable lists */
160 struct shrinker shrink;
163 * If we need to allocate memory for a new btree node and that
164 * allocation fails, we can cannibalize another node in the btree cache
165 * to satisfy the allocation - lock to guarantee only one thread does
168 struct task_struct *alloc_lock;
169 struct closure_waitlist alloc_wait;
172 struct btree_node_iter {
173 struct btree_node_iter_set {
178 enum btree_iter_type {
184 #define BTREE_ITER_TYPE ((1 << 2) - 1)
186 #define BTREE_ITER_INTENT (1 << 2)
187 #define BTREE_ITER_PREFETCH (1 << 3)
189 * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
190 * @pos or the first key strictly greater than @pos
192 #define BTREE_ITER_IS_EXTENTS (1 << 4)
193 #define BTREE_ITER_ERROR (1 << 5)
194 #define BTREE_ITER_NOUNLOCK (1 << 6)
196 enum btree_iter_uptodate {
197 BTREE_ITER_UPTODATE = 0,
198 BTREE_ITER_NEED_PEEK = 1,
199 BTREE_ITER_NEED_RELOCK = 2,
200 BTREE_ITER_NEED_TRAVERSE = 3,
204 * @pos - iterator's current position
205 * @level - current btree depth
206 * @locks_want - btree level below which we start taking intent locks
207 * @nodes_locked - bitmask indicating which nodes in @nodes are locked
208 * @nodes_intent_locked - bitmask indicating which locks are intent locks
215 enum btree_iter_uptodate uptodate:4;
216 enum btree_id btree_id:4;
220 nodes_intent_locked:4;
222 struct btree_iter_level {
224 struct btree_node_iter iter;
226 } l[BTREE_MAX_DEPTH];
229 * Current unpacked key - so that bch2_btree_iter_next()/
230 * bch2_btree_iter_next_slot() can correctly advance pos.
235 * Circular linked list of linked iterators: linked iterators share
236 * locks (e.g. two linked iterators may have the same node intent
237 * locked, or read and write locked, at the same time), and insertions
238 * through one iterator won't invalidate the other linked iterators.
241 /* Must come last: */
242 struct btree_iter *next;
245 #define BTREE_ITER_MAX 8
247 struct deferred_update {
248 struct journal_entry_pin journal;
254 enum btree_id btree_id;
260 struct btree_insert_entry {
264 struct btree_iter *iter;
265 struct deferred_update *d;
284 struct btree_iter *iters;
285 u64 iter_ids[BTREE_ITER_MAX];
287 struct btree_insert_entry updates[BTREE_ITER_MAX];
289 struct btree_iter iters_onstack[2];
292 #define BTREE_FLAG(flag) \
293 static inline bool btree_node_ ## flag(struct btree *b) \
294 { return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
296 static inline void set_btree_node_ ## flag(struct btree *b) \
297 { set_bit(BTREE_NODE_ ## flag, &b->flags); } \
299 static inline void clear_btree_node_ ## flag(struct btree *b) \
300 { clear_bit(BTREE_NODE_ ## flag, &b->flags); }
303 BTREE_NODE_read_in_flight,
304 BTREE_NODE_read_error,
306 BTREE_NODE_need_write,
308 BTREE_NODE_write_idx,
310 BTREE_NODE_write_in_flight,
311 BTREE_NODE_just_written,
316 BTREE_FLAG(read_in_flight);
317 BTREE_FLAG(read_error);
319 BTREE_FLAG(need_write);
321 BTREE_FLAG(write_idx);
322 BTREE_FLAG(accessed);
323 BTREE_FLAG(write_in_flight);
324 BTREE_FLAG(just_written);
328 static inline struct btree_write *btree_current_write(struct btree *b)
330 return b->writes + btree_node_write_idx(b);
333 static inline struct btree_write *btree_prev_write(struct btree *b)
335 return b->writes + (btree_node_write_idx(b) ^ 1);
338 static inline struct bset_tree *bset_tree_last(struct btree *b)
341 return b->set + b->nsets - 1;
345 __btree_node_offset_to_ptr(const struct btree *b, u16 offset)
347 return (void *) ((u64 *) b->data + 1 + offset);
351 __btree_node_ptr_to_offset(const struct btree *b, const void *p)
353 u16 ret = (u64 *) p - 1 - (u64 *) b->data;
355 EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
359 static inline struct bset *bset(const struct btree *b,
360 const struct bset_tree *t)
362 return __btree_node_offset_to_ptr(b, t->data_offset);
365 static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
368 __btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
371 static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
372 const struct bset *i)
374 t->data_offset = __btree_node_ptr_to_offset(b, i);
375 set_btree_bset_end(b, t);
378 static inline struct bset *btree_bset_first(struct btree *b)
380 return bset(b, b->set);
383 static inline struct bset *btree_bset_last(struct btree *b)
385 return bset(b, bset_tree_last(b));
389 __btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
391 return __btree_node_ptr_to_offset(b, k);
394 static inline struct bkey_packed *
395 __btree_node_offset_to_key(const struct btree *b, u16 k)
397 return __btree_node_offset_to_ptr(b, k);
400 static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
402 return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
405 #define btree_bkey_first(_b, _t) \
407 EBUG_ON(bset(_b, _t)->start != \
408 __btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
410 bset(_b, _t)->start; \
413 #define btree_bkey_last(_b, _t) \
415 EBUG_ON(__btree_node_offset_to_key(_b, (_t)->end_offset) != \
416 vstruct_last(bset(_b, _t))); \
418 __btree_node_offset_to_key(_b, (_t)->end_offset); \
421 static inline unsigned bset_byte_offset(struct btree *b, void *i)
423 return i - (void *) b->data;
426 enum btree_node_type {
427 #define x(kwd, val, name) BKEY_TYPE_##kwd = val,
433 /* Type of a key in btree @id at level @level: */
434 static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
436 return level ? BKEY_TYPE_BTREE : (enum btree_node_type) id;
439 /* Type of keys @b contains: */
440 static inline enum btree_node_type btree_node_type(struct btree *b)
442 return __btree_node_type(b->level, b->btree_id);
445 static inline bool btree_node_type_is_extents(enum btree_node_type type)
447 return type == BKEY_TYPE_EXTENTS;
450 static inline bool btree_node_is_extents(struct btree *b)
452 return btree_node_type_is_extents(btree_node_type(b));
455 static inline bool btree_node_type_needs_gc(enum btree_node_type type)
458 case BKEY_TYPE_BTREE:
459 case BKEY_TYPE_EXTENTS:
460 case BKEY_TYPE_INODES:
471 struct btree_update *as;
473 /* On disk root - see async splits: */
474 __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
480 * Optional hook that will be called just prior to a btree node update, when
481 * we're holding the write lock and we know what key is about to be overwritten:
484 enum btree_insert_ret {
486 /* extent spanned multiple leaf nodes: have to traverse to next node: */
487 BTREE_INSERT_NEED_TRAVERSE,
488 /* leaf node needs to be split */
489 BTREE_INSERT_BTREE_NODE_FULL,
491 BTREE_INSERT_NEED_GC_LOCK,
492 BTREE_INSERT_NEED_MARK_REPLICAS,
495 enum btree_gc_coalesce_fail_reason {
496 BTREE_GC_COALESCE_FAIL_RESERVE_GET,
497 BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
498 BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
501 enum btree_node_sibling {
506 typedef struct btree_nr_keys (*sort_fix_overlapping_fn)(struct bset *,
508 struct btree_node_iter *);
510 #endif /* _BCACHEFS_BTREE_TYPES_H */