]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_types.h
Move c_src dirs back to toplevel
[bcachefs-tools-debian] / libbcachefs / btree_types.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_TYPES_H
3 #define _BCACHEFS_BTREE_TYPES_H
4
5 #include <linux/list.h>
6 #include <linux/rhashtable.h>
7
8 #include "btree_key_cache_types.h"
9 #include "buckets_types.h"
10 #include "darray.h"
11 #include "errcode.h"
12 #include "journal_types.h"
13 #include "replicas_types.h"
14 #include "six.h"
15
16 struct open_bucket;
17 struct btree_update;
18 struct btree_trans;
19
20 #define MAX_BSETS               3U
21
22 struct btree_nr_keys {
23
24         /*
25          * Amount of live metadata (i.e. size of node after a compaction) in
26          * units of u64s
27          */
28         u16                     live_u64s;
29         u16                     bset_u64s[MAX_BSETS];
30
31         /* live keys only: */
32         u16                     packed_keys;
33         u16                     unpacked_keys;
34 };
35
36 struct bset_tree {
37         /*
38          * We construct a binary tree in an array as if the array
39          * started at 1, so that things line up on the same cachelines
40          * better: see comments in bset.c at cacheline_to_bkey() for
41          * details
42          */
43
44         /* size of the binary tree and prev array */
45         u16                     size;
46
47         /* function of size - precalculated for to_inorder() */
48         u16                     extra;
49
50         u16                     data_offset;
51         u16                     aux_data_offset;
52         u16                     end_offset;
53 };
54
55 struct btree_write {
56         struct journal_entry_pin        journal;
57 };
58
59 struct btree_alloc {
60         struct open_buckets     ob;
61         __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX);
62 };
63
64 struct btree_bkey_cached_common {
65         struct six_lock         lock;
66         u8                      level;
67         u8                      btree_id;
68         bool                    cached;
69 };
70
71 struct btree {
72         struct btree_bkey_cached_common c;
73
74         struct rhash_head       hash;
75         u64                     hash_val;
76
77         unsigned long           flags;
78         u16                     written;
79         u8                      nsets;
80         u8                      nr_key_bits;
81         u16                     version_ondisk;
82
83         struct bkey_format      format;
84
85         struct btree_node       *data;
86         void                    *aux_data;
87
88         /*
89          * Sets of sorted keys - the real btree node - plus a binary search tree
90          *
91          * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
92          * to the memory we have allocated for this btree node. Additionally,
93          * set[0]->data points to the entire btree node as it exists on disk.
94          */
95         struct bset_tree        set[MAX_BSETS];
96
97         struct btree_nr_keys    nr;
98         u16                     sib_u64s[2];
99         u16                     whiteout_u64s;
100         u8                      byte_order;
101         u8                      unpack_fn_len;
102
103         struct btree_write      writes[2];
104
105         /* Key/pointer for this btree node */
106         __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
107
108         /*
109          * XXX: add a delete sequence number, so when bch2_btree_node_relock()
110          * fails because the lock sequence number has changed - i.e. the
111          * contents were modified - we can still relock the node if it's still
112          * the one we want, without redoing the traversal
113          */
114
115         /*
116          * For asynchronous splits/interior node updates:
117          * When we do a split, we allocate new child nodes and update the parent
118          * node to point to them: we update the parent in memory immediately,
119          * but then we must wait until the children have been written out before
120          * the update to the parent can be written - this is a list of the
121          * btree_updates that are blocking this node from being
122          * written:
123          */
124         struct list_head        write_blocked;
125
126         /*
127          * Also for asynchronous splits/interior node updates:
128          * If a btree node isn't reachable yet, we don't want to kick off
129          * another write - because that write also won't yet be reachable and
130          * marking it as completed before it's reachable would be incorrect:
131          */
132         unsigned long           will_make_reachable;
133
134         struct open_buckets     ob;
135
136         /* lru list */
137         struct list_head        list;
138 };
139
140 struct btree_cache {
141         struct rhashtable       table;
142         bool                    table_init_done;
143         /*
144          * We never free a struct btree, except on shutdown - we just put it on
145          * the btree_cache_freed list and reuse it later. This simplifies the
146          * code, and it doesn't cost us much memory as the memory usage is
147          * dominated by buffers that hold the actual btree node data and those
148          * can be freed - and the number of struct btrees allocated is
149          * effectively bounded.
150          *
151          * btree_cache_freeable effectively is a small cache - we use it because
152          * high order page allocations can be rather expensive, and it's quite
153          * common to delete and allocate btree nodes in quick succession. It
154          * should never grow past ~2-3 nodes in practice.
155          */
156         struct mutex            lock;
157         struct list_head        live;
158         struct list_head        freeable;
159         struct list_head        freed_pcpu;
160         struct list_head        freed_nonpcpu;
161
162         /* Number of elements in live + freeable lists */
163         unsigned                used;
164         unsigned                reserve;
165         atomic_t                dirty;
166         struct shrinker         *shrink;
167
168         /*
169          * If we need to allocate memory for a new btree node and that
170          * allocation fails, we can cannibalize another node in the btree cache
171          * to satisfy the allocation - lock to guarantee only one thread does
172          * this at a time:
173          */
174         struct task_struct      *alloc_lock;
175         struct closure_waitlist alloc_wait;
176 };
177
178 struct btree_node_iter {
179         struct btree_node_iter_set {
180                 u16     k, end;
181         } data[MAX_BSETS];
182 };
183
184 /*
185  * Iterate over all possible positions, synthesizing deleted keys for holes:
186  */
187 static const __maybe_unused u16 BTREE_ITER_SLOTS                = 1 << 0;
188 /*
189  * Indicates that intent locks should be taken on leaf nodes, because we expect
190  * to be doing updates:
191  */
192 static const __maybe_unused u16 BTREE_ITER_INTENT               = 1 << 1;
193 /*
194  * Causes the btree iterator code to prefetch additional btree nodes from disk:
195  */
196 static const __maybe_unused u16 BTREE_ITER_PREFETCH             = 1 << 2;
197 /*
198  * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
199  * @pos or the first key strictly greater than @pos
200  */
201 static const __maybe_unused u16 BTREE_ITER_IS_EXTENTS           = 1 << 3;
202 static const __maybe_unused u16 BTREE_ITER_NOT_EXTENTS          = 1 << 4;
203 static const __maybe_unused u16 BTREE_ITER_CACHED               = 1 << 5;
204 static const __maybe_unused u16 BTREE_ITER_WITH_KEY_CACHE       = 1 << 6;
205 static const __maybe_unused u16 BTREE_ITER_WITH_UPDATES         = 1 << 7;
206 static const __maybe_unused u16 BTREE_ITER_WITH_JOURNAL         = 1 << 8;
207 static const __maybe_unused u16 __BTREE_ITER_ALL_SNAPSHOTS      = 1 << 9;
208 static const __maybe_unused u16 BTREE_ITER_ALL_SNAPSHOTS        = 1 << 10;
209 static const __maybe_unused u16 BTREE_ITER_FILTER_SNAPSHOTS     = 1 << 11;
210 static const __maybe_unused u16 BTREE_ITER_NOPRESERVE           = 1 << 12;
211 static const __maybe_unused u16 BTREE_ITER_CACHED_NOFILL        = 1 << 13;
212 static const __maybe_unused u16 BTREE_ITER_KEY_CACHE_FILL       = 1 << 14;
213 #define __BTREE_ITER_FLAGS_END                                         15
214
215 enum btree_path_uptodate {
216         BTREE_ITER_UPTODATE             = 0,
217         BTREE_ITER_NEED_RELOCK          = 1,
218         BTREE_ITER_NEED_TRAVERSE        = 2,
219 };
220
221 #if defined(CONFIG_BCACHEFS_LOCK_TIME_STATS) || defined(CONFIG_BCACHEFS_DEBUG)
222 #define TRACK_PATH_ALLOCATED
223 #endif
224
225 typedef u16 btree_path_idx_t;
226
227 struct btree_path {
228         btree_path_idx_t        sorted_idx;
229         u8                      ref;
230         u8                      intent_ref;
231
232         /* btree_iter_copy starts here: */
233         struct bpos             pos;
234
235         enum btree_id           btree_id:5;
236         bool                    cached:1;
237         bool                    preserve:1;
238         enum btree_path_uptodate uptodate:2;
239         /*
240          * When true, failing to relock this path will cause the transaction to
241          * restart:
242          */
243         bool                    should_be_locked:1;
244         unsigned                level:3,
245                                 locks_want:3;
246         u8                      nodes_locked;
247
248         struct btree_path_level {
249                 struct btree    *b;
250                 struct btree_node_iter iter;
251                 u32             lock_seq;
252 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
253                 u64             lock_taken_time;
254 #endif
255         }                       l[BTREE_MAX_DEPTH];
256 #ifdef TRACK_PATH_ALLOCATED
257         unsigned long           ip_allocated;
258 #endif
259 };
260
261 static inline struct btree_path_level *path_l(struct btree_path *path)
262 {
263         return path->l + path->level;
264 }
265
266 static inline unsigned long btree_path_ip_allocated(struct btree_path *path)
267 {
268 #ifdef TRACK_PATH_ALLOCATED
269         return path->ip_allocated;
270 #else
271         return _THIS_IP_;
272 #endif
273 }
274
275 /*
276  * @pos                 - iterator's current position
277  * @level               - current btree depth
278  * @locks_want          - btree level below which we start taking intent locks
279  * @nodes_locked        - bitmask indicating which nodes in @nodes are locked
280  * @nodes_intent_locked - bitmask indicating which locks are intent locks
281  */
282 struct btree_iter {
283         struct btree_trans      *trans;
284         btree_path_idx_t        path;
285         btree_path_idx_t        update_path;
286         btree_path_idx_t        key_cache_path;
287
288         enum btree_id           btree_id:8;
289         u8                      min_depth;
290
291         /* btree_iter_copy starts here: */
292         u16                     flags;
293
294         /* When we're filtering by snapshot, the snapshot ID we're looking for: */
295         unsigned                snapshot;
296
297         struct bpos             pos;
298         /*
299          * Current unpacked key - so that bch2_btree_iter_next()/
300          * bch2_btree_iter_next_slot() can correctly advance pos.
301          */
302         struct bkey             k;
303
304         /* BTREE_ITER_WITH_JOURNAL: */
305         size_t                  journal_idx;
306 #ifdef TRACK_PATH_ALLOCATED
307         unsigned long           ip_allocated;
308 #endif
309 };
310
311 #define BKEY_CACHED_ACCESSED            0
312 #define BKEY_CACHED_DIRTY               1
313
314 struct bkey_cached {
315         struct btree_bkey_cached_common c;
316
317         unsigned long           flags;
318         u16                     u64s;
319         bool                    valid;
320         u32                     btree_trans_barrier_seq;
321         struct bkey_cached_key  key;
322
323         struct rhash_head       hash;
324         struct list_head        list;
325
326         struct journal_entry_pin journal;
327         u64                     seq;
328
329         struct bkey_i           *k;
330 };
331
332 static inline struct bpos btree_node_pos(struct btree_bkey_cached_common *b)
333 {
334         return !b->cached
335                 ? container_of(b, struct btree, c)->key.k.p
336                 : container_of(b, struct bkey_cached, c)->key.pos;
337 }
338
339 struct btree_insert_entry {
340         unsigned                flags;
341         u8                      bkey_type;
342         enum btree_id           btree_id:8;
343         u8                      level:4;
344         bool                    cached:1;
345         bool                    insert_trigger_run:1;
346         bool                    overwrite_trigger_run:1;
347         bool                    key_cache_already_flushed:1;
348         /*
349          * @old_k may be a key from the journal; @old_btree_u64s always refers
350          * to the size of the key being overwritten in the btree:
351          */
352         u8                      old_btree_u64s;
353         btree_path_idx_t        path;
354         struct bkey_i           *k;
355         /* key being overwritten: */
356         struct bkey             old_k;
357         const struct bch_val    *old_v;
358         unsigned long           ip_allocated;
359 };
360
361 #define BTREE_ITER_INITIAL              64
362 #define BTREE_ITER_MAX                  (1U << 10)
363
364 struct btree_trans_commit_hook;
365 typedef int (btree_trans_commit_hook_fn)(struct btree_trans *, struct btree_trans_commit_hook *);
366
367 struct btree_trans_commit_hook {
368         btree_trans_commit_hook_fn      *fn;
369         struct btree_trans_commit_hook  *next;
370 };
371
372 #define BTREE_TRANS_MEM_MAX     (1U << 16)
373
374 #define BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS       10000
375
376 struct btree_trans_paths {
377         unsigned long           nr_paths;
378         struct btree_path       paths[];
379 };
380
381 struct btree_trans {
382         struct bch_fs           *c;
383
384         unsigned long           *paths_allocated;
385         struct btree_path       *paths;
386         btree_path_idx_t        *sorted;
387         struct btree_insert_entry *updates;
388
389         void                    *mem;
390         unsigned                mem_top;
391         unsigned                mem_bytes;
392
393         btree_path_idx_t        nr_sorted;
394         btree_path_idx_t        nr_paths;
395         btree_path_idx_t        nr_paths_max;
396         u8                      fn_idx;
397         u8                      nr_updates;
398         u8                      lock_must_abort;
399         bool                    lock_may_not_fail:1;
400         bool                    srcu_held:1;
401         bool                    used_mempool:1;
402         bool                    in_traverse_all:1;
403         bool                    paths_sorted:1;
404         bool                    memory_allocation_failure:1;
405         bool                    journal_transaction_names:1;
406         bool                    journal_replay_not_finished:1;
407         bool                    notrace_relock_fail:1;
408         bool                    write_locked:1;
409         enum bch_errcode        restarted:16;
410         u32                     restart_count;
411
412         u64                     last_begin_time;
413         unsigned long           last_begin_ip;
414         unsigned long           last_restarted_ip;
415         unsigned long           srcu_lock_time;
416
417         const char              *fn;
418         struct btree_bkey_cached_common *locking;
419         struct six_lock_waiter  locking_wait;
420         int                     srcu_idx;
421
422         /* update path: */
423         u16                     journal_entries_u64s;
424         u16                     journal_entries_size;
425         struct jset_entry       *journal_entries;
426
427         struct btree_trans_commit_hook *hooks;
428         struct journal_entry_pin *journal_pin;
429
430         struct journal_res      journal_res;
431         u64                     *journal_seq;
432         struct disk_reservation *disk_res;
433         unsigned                journal_u64s;
434         unsigned                extra_disk_res; /* XXX kill */
435         struct replicas_delta_list *fs_usage_deltas;
436
437         /* Entries before this are zeroed out on every bch2_trans_get() call */
438
439         struct list_head        list;
440         struct closure          ref;
441
442         unsigned long           _paths_allocated[BITS_TO_LONGS(BTREE_ITER_INITIAL)];
443         struct btree_trans_paths trans_paths;
444         struct btree_path       _paths[BTREE_ITER_INITIAL];
445         btree_path_idx_t        _sorted[BTREE_ITER_INITIAL + 4];
446         struct btree_insert_entry _updates[BTREE_ITER_INITIAL];
447 };
448
449 static inline struct btree_path *btree_iter_path(struct btree_trans *trans, struct btree_iter *iter)
450 {
451         return trans->paths + iter->path;
452 }
453
454 static inline struct btree_path *btree_iter_key_cache_path(struct btree_trans *trans, struct btree_iter *iter)
455 {
456         return iter->key_cache_path
457                 ? trans->paths + iter->key_cache_path
458                 : NULL;
459 }
460
461 #define BCH_BTREE_WRITE_TYPES()                                         \
462         x(initial,              0)                                      \
463         x(init_next_bset,       1)                                      \
464         x(cache_reclaim,        2)                                      \
465         x(journal_reclaim,      3)                                      \
466         x(interior,             4)
467
468 enum btree_write_type {
469 #define x(t, n) BTREE_WRITE_##t,
470         BCH_BTREE_WRITE_TYPES()
471 #undef x
472         BTREE_WRITE_TYPE_NR,
473 };
474
475 #define BTREE_WRITE_TYPE_MASK   (roundup_pow_of_two(BTREE_WRITE_TYPE_NR) - 1)
476 #define BTREE_WRITE_TYPE_BITS   ilog2(roundup_pow_of_two(BTREE_WRITE_TYPE_NR))
477
478 #define BTREE_FLAGS()                                                   \
479         x(read_in_flight)                                               \
480         x(read_error)                                                   \
481         x(dirty)                                                        \
482         x(need_write)                                                   \
483         x(write_blocked)                                                \
484         x(will_make_reachable)                                          \
485         x(noevict)                                                      \
486         x(write_idx)                                                    \
487         x(accessed)                                                     \
488         x(write_in_flight)                                              \
489         x(write_in_flight_inner)                                        \
490         x(just_written)                                                 \
491         x(dying)                                                        \
492         x(fake)                                                         \
493         x(need_rewrite)                                                 \
494         x(never_write)
495
496 enum btree_flags {
497         /* First bits for btree node write type */
498         BTREE_NODE_FLAGS_START = BTREE_WRITE_TYPE_BITS - 1,
499 #define x(flag) BTREE_NODE_##flag,
500         BTREE_FLAGS()
501 #undef x
502 };
503
504 #define x(flag)                                                         \
505 static inline bool btree_node_ ## flag(struct btree *b)                 \
506 {       return test_bit(BTREE_NODE_ ## flag, &b->flags); }              \
507                                                                         \
508 static inline void set_btree_node_ ## flag(struct btree *b)             \
509 {       set_bit(BTREE_NODE_ ## flag, &b->flags); }                      \
510                                                                         \
511 static inline void clear_btree_node_ ## flag(struct btree *b)           \
512 {       clear_bit(BTREE_NODE_ ## flag, &b->flags); }
513
514 BTREE_FLAGS()
515 #undef x
516
517 static inline struct btree_write *btree_current_write(struct btree *b)
518 {
519         return b->writes + btree_node_write_idx(b);
520 }
521
522 static inline struct btree_write *btree_prev_write(struct btree *b)
523 {
524         return b->writes + (btree_node_write_idx(b) ^ 1);
525 }
526
527 static inline struct bset_tree *bset_tree_last(struct btree *b)
528 {
529         EBUG_ON(!b->nsets);
530         return b->set + b->nsets - 1;
531 }
532
533 static inline void *
534 __btree_node_offset_to_ptr(const struct btree *b, u16 offset)
535 {
536         return (void *) ((u64 *) b->data + 1 + offset);
537 }
538
539 static inline u16
540 __btree_node_ptr_to_offset(const struct btree *b, const void *p)
541 {
542         u16 ret = (u64 *) p - 1 - (u64 *) b->data;
543
544         EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
545         return ret;
546 }
547
548 static inline struct bset *bset(const struct btree *b,
549                                 const struct bset_tree *t)
550 {
551         return __btree_node_offset_to_ptr(b, t->data_offset);
552 }
553
554 static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
555 {
556         t->end_offset =
557                 __btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
558 }
559
560 static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
561                                   const struct bset *i)
562 {
563         t->data_offset = __btree_node_ptr_to_offset(b, i);
564         set_btree_bset_end(b, t);
565 }
566
567 static inline struct bset *btree_bset_first(struct btree *b)
568 {
569         return bset(b, b->set);
570 }
571
572 static inline struct bset *btree_bset_last(struct btree *b)
573 {
574         return bset(b, bset_tree_last(b));
575 }
576
577 static inline u16
578 __btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
579 {
580         return __btree_node_ptr_to_offset(b, k);
581 }
582
583 static inline struct bkey_packed *
584 __btree_node_offset_to_key(const struct btree *b, u16 k)
585 {
586         return __btree_node_offset_to_ptr(b, k);
587 }
588
589 static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
590 {
591         return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
592 }
593
594 #define btree_bkey_first(_b, _t)                                        \
595 ({                                                                      \
596         EBUG_ON(bset(_b, _t)->start !=                                  \
597                 __btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
598                                                                         \
599         bset(_b, _t)->start;                                            \
600 })
601
602 #define btree_bkey_last(_b, _t)                                         \
603 ({                                                                      \
604         EBUG_ON(__btree_node_offset_to_key(_b, (_t)->end_offset) !=     \
605                 vstruct_last(bset(_b, _t)));                            \
606                                                                         \
607         __btree_node_offset_to_key(_b, (_t)->end_offset);               \
608 })
609
610 static inline unsigned bset_u64s(struct bset_tree *t)
611 {
612         return t->end_offset - t->data_offset -
613                 sizeof(struct bset) / sizeof(u64);
614 }
615
616 static inline unsigned bset_dead_u64s(struct btree *b, struct bset_tree *t)
617 {
618         return bset_u64s(t) - b->nr.bset_u64s[t - b->set];
619 }
620
621 static inline unsigned bset_byte_offset(struct btree *b, void *i)
622 {
623         return i - (void *) b->data;
624 }
625
626 enum btree_node_type {
627         BKEY_TYPE_btree,
628 #define x(kwd, val, ...) BKEY_TYPE_##kwd = val + 1,
629         BCH_BTREE_IDS()
630 #undef x
631         BKEY_TYPE_NR
632 };
633
634 /* Type of a key in btree @id at level @level: */
635 static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
636 {
637         return level ? BKEY_TYPE_btree : (unsigned) id + 1;
638 }
639
640 /* Type of keys @b contains: */
641 static inline enum btree_node_type btree_node_type(struct btree *b)
642 {
643         return __btree_node_type(b->c.level, b->c.btree_id);
644 }
645
646 const char *bch2_btree_node_type_str(enum btree_node_type);
647
648 #define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS              \
649         (BIT_ULL(BKEY_TYPE_extents)|                    \
650          BIT_ULL(BKEY_TYPE_alloc)|                      \
651          BIT_ULL(BKEY_TYPE_inodes)|                     \
652          BIT_ULL(BKEY_TYPE_stripes)|                    \
653          BIT_ULL(BKEY_TYPE_reflink)|                    \
654          BIT_ULL(BKEY_TYPE_btree))
655
656 #define BTREE_NODE_TYPE_HAS_MEM_TRIGGERS                \
657         (BIT_ULL(BKEY_TYPE_alloc)|                      \
658          BIT_ULL(BKEY_TYPE_inodes)|                     \
659          BIT_ULL(BKEY_TYPE_stripes)|                    \
660          BIT_ULL(BKEY_TYPE_snapshots))
661
662 #define BTREE_NODE_TYPE_HAS_TRIGGERS                    \
663         (BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS|            \
664          BTREE_NODE_TYPE_HAS_MEM_TRIGGERS)
665
666 static inline bool btree_node_type_needs_gc(enum btree_node_type type)
667 {
668         return BTREE_NODE_TYPE_HAS_TRIGGERS & BIT_ULL(type);
669 }
670
671 static inline bool btree_node_type_is_extents(enum btree_node_type type)
672 {
673         const unsigned mask = 0
674 #define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_EXTENTS)) << (nr + 1))
675         BCH_BTREE_IDS()
676 #undef x
677         ;
678
679         return (1U << type) & mask;
680 }
681
682 static inline bool btree_id_is_extents(enum btree_id btree)
683 {
684         return btree_node_type_is_extents(__btree_node_type(0, btree));
685 }
686
687 static inline bool btree_type_has_snapshots(enum btree_id id)
688 {
689         const unsigned mask = 0
690 #define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_SNAPSHOTS)) << nr)
691         BCH_BTREE_IDS()
692 #undef x
693         ;
694
695         return (1U << id) & mask;
696 }
697
698 static inline bool btree_type_has_snapshot_field(enum btree_id id)
699 {
700         const unsigned mask = 0
701 #define x(name, nr, flags, ...) |((!!((flags) & (BTREE_ID_SNAPSHOT_FIELD|BTREE_ID_SNAPSHOTS))) << nr)
702         BCH_BTREE_IDS()
703 #undef x
704         ;
705
706         return (1U << id) & mask;
707 }
708
709 static inline bool btree_type_has_ptrs(enum btree_id id)
710 {
711         const unsigned mask = 0
712 #define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_DATA)) << nr)
713         BCH_BTREE_IDS()
714 #undef x
715         ;
716
717         return (1U << id) & mask;
718 }
719
720 struct btree_root {
721         struct btree            *b;
722
723         /* On disk root - see async splits: */
724         __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
725         u8                      level;
726         u8                      alive;
727         s8                      error;
728 };
729
730 enum btree_gc_coalesce_fail_reason {
731         BTREE_GC_COALESCE_FAIL_RESERVE_GET,
732         BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
733         BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
734 };
735
736 enum btree_node_sibling {
737         btree_prev_sib,
738         btree_next_sib,
739 };
740
741 #endif /* _BCACHEFS_BTREE_TYPES_H */