1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_TYPES_H
3 #define _BCACHEFS_BTREE_TYPES_H
5 #include <linux/list.h>
6 #include <linux/rhashtable.h>
9 #include "bkey_methods.h"
10 #include "buckets_types.h"
11 #include "journal_types.h"
19 struct btree_nr_keys {
22 * Amount of live metadata (i.e. size of node after a compaction) in
26 u16 bset_u64s[MAX_BSETS];
35 * We construct a binary tree in an array as if the array
36 * started at 1, so that things line up on the same cachelines
37 * better: see comments in bset.c at cacheline_to_bkey() for
41 /* size of the binary tree and prev array */
44 /* function of size - precalculated for to_inorder() */
53 struct journal_entry_pin journal;
57 struct open_buckets ob;
58 __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX);
61 struct btree_bkey_cached_common {
68 struct btree_bkey_cached_common c;
70 struct rhash_head hash;
79 struct bkey_format format;
81 struct btree_node *data;
85 * Sets of sorted keys - the real btree node - plus a binary search tree
87 * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
88 * to the memory we have allocated for this btree node. Additionally,
89 * set[0]->data points to the entire btree node as it exists on disk.
91 struct bset_tree set[MAX_BSETS];
93 struct btree_nr_keys nr;
99 struct btree_write writes[2];
101 /* Key/pointer for this btree node */
102 __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
105 * XXX: add a delete sequence number, so when bch2_btree_node_relock()
106 * fails because the lock sequence number has changed - i.e. the
107 * contents were modified - we can still relock the node if it's still
108 * the one we want, without redoing the traversal
112 * For asynchronous splits/interior node updates:
113 * When we do a split, we allocate new child nodes and update the parent
114 * node to point to them: we update the parent in memory immediately,
115 * but then we must wait until the children have been written out before
116 * the update to the parent can be written - this is a list of the
117 * btree_updates that are blocking this node from being
120 struct list_head write_blocked;
123 * Also for asynchronous splits/interior node updates:
124 * If a btree node isn't reachable yet, we don't want to kick off
125 * another write - because that write also won't yet be reachable and
126 * marking it as completed before it's reachable would be incorrect:
128 unsigned long will_make_reachable;
130 struct open_buckets ob;
133 struct list_head list;
137 struct rhashtable table;
138 bool table_init_done;
140 * We never free a struct btree, except on shutdown - we just put it on
141 * the btree_cache_freed list and reuse it later. This simplifies the
142 * code, and it doesn't cost us much memory as the memory usage is
143 * dominated by buffers that hold the actual btree node data and those
144 * can be freed - and the number of struct btrees allocated is
145 * effectively bounded.
147 * btree_cache_freeable effectively is a small cache - we use it because
148 * high order page allocations can be rather expensive, and it's quite
149 * common to delete and allocate btree nodes in quick succession. It
150 * should never grow past ~2-3 nodes in practice.
153 struct list_head live;
154 struct list_head freeable;
155 struct list_head freed;
157 /* Number of elements in live + freeable lists */
161 struct shrinker shrink;
164 * If we need to allocate memory for a new btree node and that
165 * allocation fails, we can cannibalize another node in the btree cache
166 * to satisfy the allocation - lock to guarantee only one thread does
169 struct task_struct *alloc_lock;
170 struct closure_waitlist alloc_wait;
173 struct btree_node_iter {
174 struct btree_node_iter_set {
179 enum btree_iter_type {
185 #define BTREE_ITER_TYPE ((1 << 2) - 1)
188 * Iterate over all possible positions, synthesizing deleted keys for holes:
190 #define BTREE_ITER_SLOTS (1 << 2)
192 * Indicates that intent locks should be taken on leaf nodes, because we expect
193 * to be doing updates:
195 #define BTREE_ITER_INTENT (1 << 3)
197 * Causes the btree iterator code to prefetch additional btree nodes from disk:
199 #define BTREE_ITER_PREFETCH (1 << 4)
201 * Indicates that this iterator should not be reused until transaction commit,
202 * either because a pending update references it or because the update depends
203 * on that particular key being locked (e.g. by the str_hash code, for hash
206 #define BTREE_ITER_KEEP_UNTIL_COMMIT (1 << 5)
208 * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
209 * @pos or the first key strictly greater than @pos
211 #define BTREE_ITER_IS_EXTENTS (1 << 6)
212 #define BTREE_ITER_NOT_EXTENTS (1 << 7)
213 #define BTREE_ITER_ERROR (1 << 8)
214 #define BTREE_ITER_SET_POS_AFTER_COMMIT (1 << 9)
215 #define BTREE_ITER_CACHED_NOFILL (1 << 10)
216 #define BTREE_ITER_CACHED_NOCREATE (1 << 11)
217 #define BTREE_ITER_WITH_UPDATES (1 << 12)
218 #define BTREE_ITER_ALL_SNAPSHOTS (1 << 13)
220 enum btree_iter_uptodate {
221 BTREE_ITER_UPTODATE = 0,
222 BTREE_ITER_NEED_PEEK = 1,
223 BTREE_ITER_NEED_RELOCK = 2,
224 BTREE_ITER_NEED_TRAVERSE = 3,
227 #define BTREE_ITER_NO_NODE_GET_LOCKS ((struct btree *) 1)
228 #define BTREE_ITER_NO_NODE_DROP ((struct btree *) 2)
229 #define BTREE_ITER_NO_NODE_LOCK_ROOT ((struct btree *) 3)
230 #define BTREE_ITER_NO_NODE_UP ((struct btree *) 4)
231 #define BTREE_ITER_NO_NODE_DOWN ((struct btree *) 5)
232 #define BTREE_ITER_NO_NODE_INIT ((struct btree *) 6)
233 #define BTREE_ITER_NO_NODE_ERROR ((struct btree *) 7)
234 #define BTREE_ITER_NO_NODE_CACHED ((struct btree *) 8)
237 * @pos - iterator's current position
238 * @level - current btree depth
239 * @locks_want - btree level below which we start taking intent locks
240 * @nodes_locked - bitmask indicating which nodes in @nodes are locked
241 * @nodes_intent_locked - bitmask indicating which locks are intent locks
244 struct btree_trans *trans;
245 unsigned long ip_allocated;
250 /* btree_iter_copy starts here: */
253 /* When we're filtering by snapshot, the snapshot ID we're looking for: */
257 struct bpos real_pos;
258 struct bpos pos_after_commit;
260 enum btree_id btree_id:4;
261 enum btree_iter_uptodate uptodate:3;
263 * True if we've returned a key (and thus are expected to keep it
264 * locked), false after set_pos - for avoiding spurious transaction
265 * restarts in bch2_trans_relock():
267 bool should_be_locked:1;
272 nodes_intent_locked:4;
274 struct btree_iter_level {
276 struct btree_node_iter iter;
278 } l[BTREE_MAX_DEPTH];
281 * Current unpacked key - so that bch2_btree_iter_next()/
282 * bch2_btree_iter_next_slot() can correctly advance pos.
287 static inline enum btree_iter_type
288 btree_iter_type(const struct btree_iter *iter)
290 return iter->flags & BTREE_ITER_TYPE;
293 static inline bool btree_iter_is_cached(const struct btree_iter *iter)
295 return btree_iter_type(iter) == BTREE_ITER_CACHED;
298 static inline struct btree_iter_level *iter_l(struct btree_iter *iter)
300 return iter->l + iter->level;
303 struct btree_key_cache {
305 struct rhashtable table;
306 bool table_init_done;
307 struct list_head freed;
308 struct shrinker shrink;
309 unsigned shrink_iter;
312 atomic_long_t nr_keys;
313 atomic_long_t nr_dirty;
316 struct bkey_cached_key {
319 } __attribute__((packed, aligned(4)));
321 #define BKEY_CACHED_ACCESSED 0
322 #define BKEY_CACHED_DIRTY 1
325 struct btree_bkey_cached_common c;
330 u32 btree_trans_barrier_seq;
331 struct bkey_cached_key key;
333 struct rhash_head hash;
334 struct list_head list;
336 struct journal_preres res;
337 struct journal_entry_pin journal;
342 struct btree_insert_entry {
345 enum btree_id btree_id:8;
347 unsigned trans_triggers_run:1;
349 struct btree_iter *iter;
352 #ifndef CONFIG_LOCKDEP
353 #define BTREE_ITER_MAX 64
355 #define BTREE_ITER_MAX 32
358 struct btree_trans_commit_hook;
359 typedef int (btree_trans_commit_hook_fn)(struct btree_trans *, struct btree_trans_commit_hook *);
361 struct btree_trans_commit_hook {
362 btree_trans_commit_hook_fn *fn;
363 struct btree_trans_commit_hook *next;
366 #define BTREE_TRANS_MEM_MAX (1U << 14)
370 #ifdef CONFIG_BCACHEFS_DEBUG
371 struct list_head list;
372 struct btree *locking;
373 unsigned locking_iter_idx;
374 struct bpos locking_pos;
385 bool in_traverse_all:1;
388 * For when bch2_trans_update notices we'll be splitting a compressed
391 unsigned extra_journal_res;
401 struct btree_iter *iters;
402 struct btree_insert_entry *updates;
405 struct btree_trans_commit_hook *hooks;
406 struct jset_entry *extra_journal_entries;
407 unsigned extra_journal_entry_u64s;
408 struct journal_entry_pin *journal_pin;
410 struct journal_res journal_res;
411 struct journal_preres journal_preres;
413 struct disk_reservation *disk_res;
415 unsigned journal_u64s;
416 unsigned journal_preres_u64s;
417 struct replicas_delta_list *fs_usage_deltas;
420 #define BTREE_FLAG(flag) \
421 static inline bool btree_node_ ## flag(struct btree *b) \
422 { return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
424 static inline void set_btree_node_ ## flag(struct btree *b) \
425 { set_bit(BTREE_NODE_ ## flag, &b->flags); } \
427 static inline void clear_btree_node_ ## flag(struct btree *b) \
428 { clear_bit(BTREE_NODE_ ## flag, &b->flags); }
431 BTREE_NODE_read_in_flight,
432 BTREE_NODE_read_error,
434 BTREE_NODE_need_write,
436 BTREE_NODE_write_idx,
438 BTREE_NODE_write_in_flight,
439 BTREE_NODE_write_in_flight_inner,
440 BTREE_NODE_just_written,
443 BTREE_NODE_need_rewrite,
444 BTREE_NODE_never_write,
447 BTREE_FLAG(read_in_flight);
448 BTREE_FLAG(read_error);
449 BTREE_FLAG(need_write);
451 BTREE_FLAG(write_idx);
452 BTREE_FLAG(accessed);
453 BTREE_FLAG(write_in_flight);
454 BTREE_FLAG(write_in_flight_inner);
455 BTREE_FLAG(just_written);
458 BTREE_FLAG(need_rewrite);
459 BTREE_FLAG(never_write);
461 static inline struct btree_write *btree_current_write(struct btree *b)
463 return b->writes + btree_node_write_idx(b);
466 static inline struct btree_write *btree_prev_write(struct btree *b)
468 return b->writes + (btree_node_write_idx(b) ^ 1);
471 static inline struct bset_tree *bset_tree_last(struct btree *b)
474 return b->set + b->nsets - 1;
478 __btree_node_offset_to_ptr(const struct btree *b, u16 offset)
480 return (void *) ((u64 *) b->data + 1 + offset);
484 __btree_node_ptr_to_offset(const struct btree *b, const void *p)
486 u16 ret = (u64 *) p - 1 - (u64 *) b->data;
488 EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
492 static inline struct bset *bset(const struct btree *b,
493 const struct bset_tree *t)
495 return __btree_node_offset_to_ptr(b, t->data_offset);
498 static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
501 __btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
504 static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
505 const struct bset *i)
507 t->data_offset = __btree_node_ptr_to_offset(b, i);
508 set_btree_bset_end(b, t);
511 static inline struct bset *btree_bset_first(struct btree *b)
513 return bset(b, b->set);
516 static inline struct bset *btree_bset_last(struct btree *b)
518 return bset(b, bset_tree_last(b));
522 __btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
524 return __btree_node_ptr_to_offset(b, k);
527 static inline struct bkey_packed *
528 __btree_node_offset_to_key(const struct btree *b, u16 k)
530 return __btree_node_offset_to_ptr(b, k);
533 static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
535 return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
538 #define btree_bkey_first(_b, _t) \
540 EBUG_ON(bset(_b, _t)->start != \
541 __btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
543 bset(_b, _t)->start; \
546 #define btree_bkey_last(_b, _t) \
548 EBUG_ON(__btree_node_offset_to_key(_b, (_t)->end_offset) != \
549 vstruct_last(bset(_b, _t))); \
551 __btree_node_offset_to_key(_b, (_t)->end_offset); \
554 static inline unsigned bset_u64s(struct bset_tree *t)
556 return t->end_offset - t->data_offset -
557 sizeof(struct bset) / sizeof(u64);
560 static inline unsigned bset_dead_u64s(struct btree *b, struct bset_tree *t)
562 return bset_u64s(t) - b->nr.bset_u64s[t - b->set];
565 static inline unsigned bset_byte_offset(struct btree *b, void *i)
567 return i - (void *) b->data;
570 enum btree_node_type {
571 #define x(kwd, val) BKEY_TYPE_##kwd = val,
577 /* Type of a key in btree @id at level @level: */
578 static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
580 return level ? BKEY_TYPE_btree : (enum btree_node_type) id;
583 /* Type of keys @b contains: */
584 static inline enum btree_node_type btree_node_type(struct btree *b)
586 return __btree_node_type(b->c.level, b->c.btree_id);
589 static inline bool btree_node_type_is_extents(enum btree_node_type type)
592 case BKEY_TYPE_extents:
593 case BKEY_TYPE_reflink:
600 static inline bool btree_node_is_extents(struct btree *b)
602 return btree_node_type_is_extents(btree_node_type(b));
605 static inline enum btree_node_type btree_iter_key_type(struct btree_iter *iter)
607 return __btree_node_type(iter->level, iter->btree_id);
610 static inline bool btree_iter_is_extents(struct btree_iter *iter)
612 return btree_node_type_is_extents(btree_iter_key_type(iter));
615 #define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS \
616 ((1U << BKEY_TYPE_extents)| \
617 (1U << BKEY_TYPE_inodes)| \
618 (1U << BKEY_TYPE_stripes)| \
619 (1U << BKEY_TYPE_reflink)| \
620 (1U << BKEY_TYPE_btree))
622 #define BTREE_NODE_TYPE_HAS_MEM_TRIGGERS \
623 ((1U << BKEY_TYPE_alloc)| \
624 (1U << BKEY_TYPE_stripes))
626 #define BTREE_NODE_TYPE_HAS_TRIGGERS \
627 (BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS| \
628 BTREE_NODE_TYPE_HAS_MEM_TRIGGERS)
630 #define BTREE_ID_HAS_SNAPSHOTS \
631 ((1U << BTREE_ID_extents)| \
632 (1U << BTREE_ID_inodes)| \
633 (1U << BTREE_ID_dirents)| \
634 (1U << BTREE_ID_xattrs))
636 #define BTREE_ID_HAS_PTRS \
637 ((1U << BTREE_ID_extents)| \
638 (1U << BTREE_ID_reflink))
640 static inline bool btree_type_has_snapshots(enum btree_id id)
642 return (1 << id) & BTREE_ID_HAS_SNAPSHOTS;
645 enum btree_update_flags {
646 __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE,
648 __BTREE_TRIGGER_NORUN, /* Don't run triggers at all */
650 __BTREE_TRIGGER_INSERT,
651 __BTREE_TRIGGER_OVERWRITE,
654 __BTREE_TRIGGER_BUCKET_INVALIDATE,
655 __BTREE_TRIGGER_NOATOMIC,
658 #define BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE (1U << __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE)
660 #define BTREE_TRIGGER_NORUN (1U << __BTREE_TRIGGER_NORUN)
662 #define BTREE_TRIGGER_INSERT (1U << __BTREE_TRIGGER_INSERT)
663 #define BTREE_TRIGGER_OVERWRITE (1U << __BTREE_TRIGGER_OVERWRITE)
665 #define BTREE_TRIGGER_GC (1U << __BTREE_TRIGGER_GC)
666 #define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
667 #define BTREE_TRIGGER_NOATOMIC (1U << __BTREE_TRIGGER_NOATOMIC)
669 #define BTREE_TRIGGER_WANTS_OLD_AND_NEW \
670 ((1U << KEY_TYPE_stripe)| \
671 (1U << KEY_TYPE_inode))
673 static inline bool btree_node_type_needs_gc(enum btree_node_type type)
675 return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type);
681 /* On disk root - see async splits: */
682 __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
689 * Optional hook that will be called just prior to a btree node update, when
690 * we're holding the write lock and we know what key is about to be overwritten:
693 enum btree_insert_ret {
695 /* leaf node needs to be split */
696 BTREE_INSERT_BTREE_NODE_FULL,
697 BTREE_INSERT_NEED_MARK_REPLICAS,
698 BTREE_INSERT_NEED_JOURNAL_RES,
699 BTREE_INSERT_NEED_JOURNAL_RECLAIM,
702 enum btree_gc_coalesce_fail_reason {
703 BTREE_GC_COALESCE_FAIL_RESERVE_GET,
704 BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
705 BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
708 enum btree_node_sibling {
713 typedef struct btree_nr_keys (*sort_fix_overlapping_fn)(struct bset *,
715 struct btree_node_iter *);
717 #endif /* _BCACHEFS_BTREE_TYPES_H */