1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_TYPES_H
3 #define _BCACHEFS_BTREE_TYPES_H
5 #include <linux/list.h>
6 #include <linux/rhashtable.h>
9 #include "bkey_methods.h"
10 #include "buckets_types.h"
11 #include "journal_types.h"
19 struct btree_nr_keys {
22 * Amount of live metadata (i.e. size of node after a compaction) in
26 u16 bset_u64s[MAX_BSETS];
35 * We construct a binary tree in an array as if the array
36 * started at 1, so that things line up on the same cachelines
37 * better: see comments in bset.c at cacheline_to_bkey() for
41 /* size of the binary tree and prev array */
44 /* function of size - precalculated for to_inorder() */
55 struct journal_entry_pin journal;
59 struct open_buckets ob;
64 /* Hottest entries first */
65 struct rhash_head hash;
77 struct bkey_format format;
79 struct btree_node *data;
83 * Sets of sorted keys - the real btree node - plus a binary search tree
85 * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
86 * to the memory we have allocated for this btree node. Additionally,
87 * set[0]->data points to the entire btree node as it exists on disk.
89 struct bset_tree set[MAX_BSETS];
91 struct btree_nr_keys nr;
98 * XXX: add a delete sequence number, so when bch2_btree_node_relock()
99 * fails because the lock sequence number has changed - i.e. the
100 * contents were modified - we can still relock the node if it's still
101 * the one we want, without redoing the traversal
105 * For asynchronous splits/interior node updates:
106 * When we do a split, we allocate new child nodes and update the parent
107 * node to point to them: we update the parent in memory immediately,
108 * but then we must wait until the children have been written out before
109 * the update to the parent can be written - this is a list of the
110 * btree_updates that are blocking this node from being
113 struct list_head write_blocked;
116 * Also for asynchronous splits/interior node updates:
117 * If a btree node isn't reachable yet, we don't want to kick off
118 * another write - because that write also won't yet be reachable and
119 * marking it as completed before it's reachable would be incorrect:
121 unsigned long will_make_reachable;
123 struct open_buckets ob;
126 struct list_head list;
128 struct btree_write writes[2];
130 #ifdef CONFIG_BCACHEFS_DEBUG
131 bool *expensive_debug_checks;
134 /* Key/pointer for this btree node */
135 __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
139 struct rhashtable table;
140 bool table_init_done;
142 * We never free a struct btree, except on shutdown - we just put it on
143 * the btree_cache_freed list and reuse it later. This simplifies the
144 * code, and it doesn't cost us much memory as the memory usage is
145 * dominated by buffers that hold the actual btree node data and those
146 * can be freed - and the number of struct btrees allocated is
147 * effectively bounded.
149 * btree_cache_freeable effectively is a small cache - we use it because
150 * high order page allocations can be rather expensive, and it's quite
151 * common to delete and allocate btree nodes in quick succession. It
152 * should never grow past ~2-3 nodes in practice.
155 struct list_head live;
156 struct list_head freeable;
157 struct list_head freed;
159 /* Number of elements in live + freeable lists */
162 struct shrinker shrink;
165 * If we need to allocate memory for a new btree node and that
166 * allocation fails, we can cannibalize another node in the btree cache
167 * to satisfy the allocation - lock to guarantee only one thread does
170 struct task_struct *alloc_lock;
171 struct closure_waitlist alloc_wait;
174 struct btree_node_iter {
175 struct btree_node_iter_set {
180 enum btree_iter_type {
185 #define BTREE_ITER_TYPE ((1 << 2) - 1)
188 * Iterate over all possible positions, synthesizing deleted keys for holes:
190 #define BTREE_ITER_SLOTS (1 << 2)
192 * Indicates that intent locks should be taken on leaf nodes, because we expect
193 * to be doing updates:
195 #define BTREE_ITER_INTENT (1 << 3)
197 * Causes the btree iterator code to prefetch additional btree nodes from disk:
199 #define BTREE_ITER_PREFETCH (1 << 4)
201 * Indicates that this iterator should not be reused until transaction commit,
202 * either because a pending update references it or because the update depends
203 * on that particular key being locked (e.g. by the str_hash code, for hash
206 #define BTREE_ITER_KEEP_UNTIL_COMMIT (1 << 5)
208 * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
209 * @pos or the first key strictly greater than @pos
211 #define BTREE_ITER_IS_EXTENTS (1 << 6)
212 #define BTREE_ITER_ERROR (1 << 7)
213 #define BTREE_ITER_SET_POS_AFTER_COMMIT (1 << 8)
215 enum btree_iter_uptodate {
216 BTREE_ITER_UPTODATE = 0,
217 BTREE_ITER_NEED_PEEK = 1,
218 BTREE_ITER_NEED_RELOCK = 2,
219 BTREE_ITER_NEED_TRAVERSE = 3,
223 * @pos - iterator's current position
224 * @level - current btree depth
225 * @locks_want - btree level below which we start taking intent locks
226 * @nodes_locked - bitmask indicating which nodes in @nodes are locked
227 * @nodes_intent_locked - bitmask indicating which locks are intent locks
230 struct btree_trans *trans;
232 struct bpos pos_after_commit;
237 enum btree_id btree_id:4;
238 enum btree_iter_uptodate uptodate:4;
243 nodes_intent_locked:4;
245 struct btree_iter_level {
247 struct btree_node_iter iter;
249 } l[BTREE_MAX_DEPTH];
252 * Current unpacked key - so that bch2_btree_iter_next()/
253 * bch2_btree_iter_next_slot() can correctly advance pos.
256 unsigned long ip_allocated;
259 static inline enum btree_iter_type btree_iter_type(struct btree_iter *iter)
261 return iter->flags & BTREE_ITER_TYPE;
264 static inline struct btree_iter_level *iter_l(struct btree_iter *iter)
266 return iter->l + iter->level;
269 struct btree_insert_entry {
270 unsigned trigger_flags;
271 unsigned trans_triggers_run:1;
273 struct btree_iter *iter;
276 #ifndef CONFIG_LOCKDEP
277 #define BTREE_ITER_MAX 64
279 #define BTREE_ITER_MAX 32
294 unsigned used_mempool:1;
297 unsigned need_reset:1;
303 struct btree_iter *iters;
304 struct btree_insert_entry *updates;
305 struct btree_insert_entry *updates2;
308 struct journal_res journal_res;
309 struct journal_preres journal_preres;
311 struct disk_reservation *disk_res;
313 unsigned journal_u64s;
314 unsigned journal_preres_u64s;
315 struct replicas_delta_list *fs_usage_deltas;
317 struct btree_iter iters_onstack[2];
318 struct btree_insert_entry updates_onstack[2];
319 struct btree_insert_entry updates2_onstack[2];
322 #define BTREE_FLAG(flag) \
323 static inline bool btree_node_ ## flag(struct btree *b) \
324 { return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
326 static inline void set_btree_node_ ## flag(struct btree *b) \
327 { set_bit(BTREE_NODE_ ## flag, &b->flags); } \
329 static inline void clear_btree_node_ ## flag(struct btree *b) \
330 { clear_bit(BTREE_NODE_ ## flag, &b->flags); }
333 BTREE_NODE_read_in_flight,
334 BTREE_NODE_read_error,
336 BTREE_NODE_need_write,
338 BTREE_NODE_write_idx,
340 BTREE_NODE_write_in_flight,
341 BTREE_NODE_just_written,
344 BTREE_NODE_old_extent_overwrite,
347 BTREE_FLAG(read_in_flight);
348 BTREE_FLAG(read_error);
350 BTREE_FLAG(need_write);
352 BTREE_FLAG(write_idx);
353 BTREE_FLAG(accessed);
354 BTREE_FLAG(write_in_flight);
355 BTREE_FLAG(just_written);
358 BTREE_FLAG(old_extent_overwrite);
360 static inline struct btree_write *btree_current_write(struct btree *b)
362 return b->writes + btree_node_write_idx(b);
365 static inline struct btree_write *btree_prev_write(struct btree *b)
367 return b->writes + (btree_node_write_idx(b) ^ 1);
370 static inline struct bset_tree *bset_tree_last(struct btree *b)
373 return b->set + b->nsets - 1;
377 __btree_node_offset_to_ptr(const struct btree *b, u16 offset)
379 return (void *) ((u64 *) b->data + 1 + offset);
383 __btree_node_ptr_to_offset(const struct btree *b, const void *p)
385 u16 ret = (u64 *) p - 1 - (u64 *) b->data;
387 EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
391 static inline struct bset *bset(const struct btree *b,
392 const struct bset_tree *t)
394 return __btree_node_offset_to_ptr(b, t->data_offset);
397 static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
400 __btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
403 static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
404 const struct bset *i)
406 t->data_offset = __btree_node_ptr_to_offset(b, i);
407 set_btree_bset_end(b, t);
410 static inline struct bset *btree_bset_first(struct btree *b)
412 return bset(b, b->set);
415 static inline struct bset *btree_bset_last(struct btree *b)
417 return bset(b, bset_tree_last(b));
421 __btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
423 return __btree_node_ptr_to_offset(b, k);
426 static inline struct bkey_packed *
427 __btree_node_offset_to_key(const struct btree *b, u16 k)
429 return __btree_node_offset_to_ptr(b, k);
432 static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
434 return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
437 #define btree_bkey_first(_b, _t) \
439 EBUG_ON(bset(_b, _t)->start != \
440 __btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
442 bset(_b, _t)->start; \
445 #define btree_bkey_last(_b, _t) \
447 EBUG_ON(__btree_node_offset_to_key(_b, (_t)->end_offset) != \
448 vstruct_last(bset(_b, _t))); \
450 __btree_node_offset_to_key(_b, (_t)->end_offset); \
453 static inline unsigned bset_u64s(struct bset_tree *t)
455 return t->end_offset - t->data_offset -
456 sizeof(struct bset) / sizeof(u64);
459 static inline unsigned bset_dead_u64s(struct btree *b, struct bset_tree *t)
461 return bset_u64s(t) - b->nr.bset_u64s[t - b->set];
464 static inline unsigned bset_byte_offset(struct btree *b, void *i)
466 return i - (void *) b->data;
469 enum btree_node_type {
470 #define x(kwd, val, name) BKEY_TYPE_##kwd = val,
476 /* Type of a key in btree @id at level @level: */
477 static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
479 return level ? BKEY_TYPE_BTREE : (enum btree_node_type) id;
482 /* Type of keys @b contains: */
483 static inline enum btree_node_type btree_node_type(struct btree *b)
485 return __btree_node_type(b->level, b->btree_id);
488 static inline bool btree_node_type_is_extents(enum btree_node_type type)
491 case BKEY_TYPE_EXTENTS:
492 case BKEY_TYPE_REFLINK:
499 static inline bool btree_node_is_extents(struct btree *b)
501 return btree_node_type_is_extents(btree_node_type(b));
504 #define BTREE_NODE_TYPE_HAS_TRIGGERS \
505 ((1U << BKEY_TYPE_EXTENTS)| \
506 (1U << BKEY_TYPE_ALLOC)| \
507 (1U << BKEY_TYPE_INODES)| \
508 (1U << BKEY_TYPE_REFLINK)| \
509 (1U << BKEY_TYPE_EC)| \
510 (1U << BKEY_TYPE_BTREE))
512 #define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS \
513 ((1U << BKEY_TYPE_EXTENTS)| \
514 (1U << BKEY_TYPE_INODES)| \
515 (1U << BKEY_TYPE_REFLINK))
517 enum btree_trigger_flags {
518 __BTREE_TRIGGER_NORUN, /* Don't run triggers at all */
519 __BTREE_TRIGGER_NOOVERWRITES, /* Don't run triggers on overwrites */
521 __BTREE_TRIGGER_INSERT,
522 __BTREE_TRIGGER_OVERWRITE,
523 __BTREE_TRIGGER_OVERWRITE_SPLIT,
526 __BTREE_TRIGGER_BUCKET_INVALIDATE,
527 __BTREE_TRIGGER_ALLOC_READ,
528 __BTREE_TRIGGER_NOATOMIC,
531 #define BTREE_TRIGGER_NORUN (1U << __BTREE_TRIGGER_NORUN)
532 #define BTREE_TRIGGER_NOOVERWRITES (1U << __BTREE_TRIGGER_NOOVERWRITES)
534 #define BTREE_TRIGGER_INSERT (1U << __BTREE_TRIGGER_INSERT)
535 #define BTREE_TRIGGER_OVERWRITE (1U << __BTREE_TRIGGER_OVERWRITE)
536 #define BTREE_TRIGGER_OVERWRITE_SPLIT (1U << __BTREE_TRIGGER_OVERWRITE_SPLIT)
538 #define BTREE_TRIGGER_GC (1U << __BTREE_TRIGGER_GC)
539 #define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
540 #define BTREE_TRIGGER_ALLOC_READ (1U << __BTREE_TRIGGER_ALLOC_READ)
541 #define BTREE_TRIGGER_NOATOMIC (1U << __BTREE_TRIGGER_NOATOMIC)
543 static inline bool btree_node_type_needs_gc(enum btree_node_type type)
545 return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type);
551 /* On disk root - see async splits: */
552 __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
559 * Optional hook that will be called just prior to a btree node update, when
560 * we're holding the write lock and we know what key is about to be overwritten:
563 enum btree_insert_ret {
565 /* leaf node needs to be split */
566 BTREE_INSERT_BTREE_NODE_FULL,
568 BTREE_INSERT_NEED_MARK_REPLICAS,
569 BTREE_INSERT_NEED_JOURNAL_RES,
572 enum btree_gc_coalesce_fail_reason {
573 BTREE_GC_COALESCE_FAIL_RESERVE_GET,
574 BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
575 BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
578 enum btree_node_sibling {
583 typedef struct btree_nr_keys (*sort_fix_overlapping_fn)(struct bset *,
585 struct btree_node_iter *);
587 #endif /* _BCACHEFS_BTREE_TYPES_H */