]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_types.h
Update bcachefs sources to 5d0a6c2b32f1 bcachefs: check_directory_structure() can...
[bcachefs-tools-debian] / libbcachefs / btree_types.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_TYPES_H
3 #define _BCACHEFS_BTREE_TYPES_H
4
5 #include <linux/list.h>
6 #include <linux/rhashtable.h>
7
8 #include "btree_key_cache_types.h"
9 #include "buckets_types.h"
10 #include "darray.h"
11 #include "errcode.h"
12 #include "journal_types.h"
13 #include "replicas_types.h"
14 #include "six.h"
15
16 struct open_bucket;
17 struct btree_update;
18 struct btree_trans;
19
20 #define MAX_BSETS               3U
21
22 struct btree_nr_keys {
23
24         /*
25          * Amount of live metadata (i.e. size of node after a compaction) in
26          * units of u64s
27          */
28         u16                     live_u64s;
29         u16                     bset_u64s[MAX_BSETS];
30
31         /* live keys only: */
32         u16                     packed_keys;
33         u16                     unpacked_keys;
34 };
35
36 struct bset_tree {
37         /*
38          * We construct a binary tree in an array as if the array
39          * started at 1, so that things line up on the same cachelines
40          * better: see comments in bset.c at cacheline_to_bkey() for
41          * details
42          */
43
44         /* size of the binary tree and prev array */
45         u16                     size;
46
47         /* function of size - precalculated for to_inorder() */
48         u16                     extra;
49
50         u16                     data_offset;
51         u16                     aux_data_offset;
52         u16                     end_offset;
53 };
54
55 struct btree_write {
56         struct journal_entry_pin        journal;
57 };
58
59 struct btree_alloc {
60         struct open_buckets     ob;
61         __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX);
62 };
63
64 struct btree_bkey_cached_common {
65         struct six_lock         lock;
66         u8                      level;
67         u8                      btree_id;
68         bool                    cached;
69 };
70
71 struct btree {
72         struct btree_bkey_cached_common c;
73
74         struct rhash_head       hash;
75         u64                     hash_val;
76
77         unsigned long           flags;
78         u16                     written;
79         u8                      nsets;
80         u8                      nr_key_bits;
81         u16                     version_ondisk;
82
83         struct bkey_format      format;
84
85         struct btree_node       *data;
86         void                    *aux_data;
87
88         /*
89          * Sets of sorted keys - the real btree node - plus a binary search tree
90          *
91          * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
92          * to the memory we have allocated for this btree node. Additionally,
93          * set[0]->data points to the entire btree node as it exists on disk.
94          */
95         struct bset_tree        set[MAX_BSETS];
96
97         struct btree_nr_keys    nr;
98         u16                     sib_u64s[2];
99         u16                     whiteout_u64s;
100         u8                      byte_order;
101         u8                      unpack_fn_len;
102
103         struct btree_write      writes[2];
104
105         /* Key/pointer for this btree node */
106         __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
107
108         /*
109          * XXX: add a delete sequence number, so when bch2_btree_node_relock()
110          * fails because the lock sequence number has changed - i.e. the
111          * contents were modified - we can still relock the node if it's still
112          * the one we want, without redoing the traversal
113          */
114
115         /*
116          * For asynchronous splits/interior node updates:
117          * When we do a split, we allocate new child nodes and update the parent
118          * node to point to them: we update the parent in memory immediately,
119          * but then we must wait until the children have been written out before
120          * the update to the parent can be written - this is a list of the
121          * btree_updates that are blocking this node from being
122          * written:
123          */
124         struct list_head        write_blocked;
125
126         /*
127          * Also for asynchronous splits/interior node updates:
128          * If a btree node isn't reachable yet, we don't want to kick off
129          * another write - because that write also won't yet be reachable and
130          * marking it as completed before it's reachable would be incorrect:
131          */
132         unsigned long           will_make_reachable;
133
134         struct open_buckets     ob;
135
136         /* lru list */
137         struct list_head        list;
138 };
139
140 struct btree_cache {
141         struct rhashtable       table;
142         bool                    table_init_done;
143         /*
144          * We never free a struct btree, except on shutdown - we just put it on
145          * the btree_cache_freed list and reuse it later. This simplifies the
146          * code, and it doesn't cost us much memory as the memory usage is
147          * dominated by buffers that hold the actual btree node data and those
148          * can be freed - and the number of struct btrees allocated is
149          * effectively bounded.
150          *
151          * btree_cache_freeable effectively is a small cache - we use it because
152          * high order page allocations can be rather expensive, and it's quite
153          * common to delete and allocate btree nodes in quick succession. It
154          * should never grow past ~2-3 nodes in practice.
155          */
156         struct mutex            lock;
157         struct list_head        live;
158         struct list_head        freeable;
159         struct list_head        freed_pcpu;
160         struct list_head        freed_nonpcpu;
161
162         /* Number of elements in live + freeable lists */
163         unsigned                used;
164         unsigned                reserve;
165         atomic_t                dirty;
166         struct shrinker         *shrink;
167
168         /*
169          * If we need to allocate memory for a new btree node and that
170          * allocation fails, we can cannibalize another node in the btree cache
171          * to satisfy the allocation - lock to guarantee only one thread does
172          * this at a time:
173          */
174         struct task_struct      *alloc_lock;
175         struct closure_waitlist alloc_wait;
176 };
177
178 struct btree_node_iter {
179         struct btree_node_iter_set {
180                 u16     k, end;
181         } data[MAX_BSETS];
182 };
183
184 /*
185  * Iterate over all possible positions, synthesizing deleted keys for holes:
186  */
187 static const __maybe_unused u16 BTREE_ITER_SLOTS                = 1 << 0;
188 /*
189  * Indicates that intent locks should be taken on leaf nodes, because we expect
190  * to be doing updates:
191  */
192 static const __maybe_unused u16 BTREE_ITER_INTENT               = 1 << 1;
193 /*
194  * Causes the btree iterator code to prefetch additional btree nodes from disk:
195  */
196 static const __maybe_unused u16 BTREE_ITER_PREFETCH             = 1 << 2;
197 /*
198  * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
199  * @pos or the first key strictly greater than @pos
200  */
201 static const __maybe_unused u16 BTREE_ITER_IS_EXTENTS           = 1 << 3;
202 static const __maybe_unused u16 BTREE_ITER_NOT_EXTENTS          = 1 << 4;
203 static const __maybe_unused u16 BTREE_ITER_CACHED               = 1 << 5;
204 static const __maybe_unused u16 BTREE_ITER_WITH_KEY_CACHE       = 1 << 6;
205 static const __maybe_unused u16 BTREE_ITER_WITH_UPDATES         = 1 << 7;
206 static const __maybe_unused u16 BTREE_ITER_WITH_JOURNAL         = 1 << 8;
207 static const __maybe_unused u16 __BTREE_ITER_ALL_SNAPSHOTS      = 1 << 9;
208 static const __maybe_unused u16 BTREE_ITER_ALL_SNAPSHOTS        = 1 << 10;
209 static const __maybe_unused u16 BTREE_ITER_FILTER_SNAPSHOTS     = 1 << 11;
210 static const __maybe_unused u16 BTREE_ITER_NOPRESERVE           = 1 << 12;
211 static const __maybe_unused u16 BTREE_ITER_CACHED_NOFILL        = 1 << 13;
212 static const __maybe_unused u16 BTREE_ITER_KEY_CACHE_FILL       = 1 << 14;
213 #define __BTREE_ITER_FLAGS_END                                         15
214
215 enum btree_path_uptodate {
216         BTREE_ITER_UPTODATE             = 0,
217         BTREE_ITER_NEED_RELOCK          = 1,
218         BTREE_ITER_NEED_TRAVERSE        = 2,
219 };
220
221 #if defined(CONFIG_BCACHEFS_LOCK_TIME_STATS) || defined(CONFIG_BCACHEFS_DEBUG)
222 #define TRACK_PATH_ALLOCATED
223 #endif
224
225 typedef u16 btree_path_idx_t;
226
227 struct btree_path {
228         btree_path_idx_t        sorted_idx;
229         u8                      ref;
230         u8                      intent_ref;
231
232         /* btree_iter_copy starts here: */
233         struct bpos             pos;
234
235         enum btree_id           btree_id:5;
236         bool                    cached:1;
237         bool                    preserve:1;
238         enum btree_path_uptodate uptodate:2;
239         /*
240          * When true, failing to relock this path will cause the transaction to
241          * restart:
242          */
243         bool                    should_be_locked:1;
244         unsigned                level:3,
245                                 locks_want:3;
246         u8                      nodes_locked;
247
248         struct btree_path_level {
249                 struct btree    *b;
250                 struct btree_node_iter iter;
251                 u32             lock_seq;
252 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
253                 u64             lock_taken_time;
254 #endif
255         }                       l[BTREE_MAX_DEPTH];
256 #ifdef TRACK_PATH_ALLOCATED
257         unsigned long           ip_allocated;
258 #endif
259 };
260
261 static inline struct btree_path_level *path_l(struct btree_path *path)
262 {
263         return path->l + path->level;
264 }
265
266 static inline unsigned long btree_path_ip_allocated(struct btree_path *path)
267 {
268 #ifdef TRACK_PATH_ALLOCATED
269         return path->ip_allocated;
270 #else
271         return _THIS_IP_;
272 #endif
273 }
274
275 /*
276  * @pos                 - iterator's current position
277  * @level               - current btree depth
278  * @locks_want          - btree level below which we start taking intent locks
279  * @nodes_locked        - bitmask indicating which nodes in @nodes are locked
280  * @nodes_intent_locked - bitmask indicating which locks are intent locks
281  */
282 struct btree_iter {
283         struct btree_trans      *trans;
284         btree_path_idx_t        path;
285         btree_path_idx_t        update_path;
286         btree_path_idx_t        key_cache_path;
287
288         enum btree_id           btree_id:8;
289         u8                      min_depth;
290
291         /* btree_iter_copy starts here: */
292         u16                     flags;
293
294         /* When we're filtering by snapshot, the snapshot ID we're looking for: */
295         unsigned                snapshot;
296
297         struct bpos             pos;
298         /*
299          * Current unpacked key - so that bch2_btree_iter_next()/
300          * bch2_btree_iter_next_slot() can correctly advance pos.
301          */
302         struct bkey             k;
303
304         /* BTREE_ITER_WITH_JOURNAL: */
305         size_t                  journal_idx;
306 #ifdef TRACK_PATH_ALLOCATED
307         unsigned long           ip_allocated;
308 #endif
309 };
310
311 #define BKEY_CACHED_ACCESSED            0
312 #define BKEY_CACHED_DIRTY               1
313
314 struct bkey_cached {
315         struct btree_bkey_cached_common c;
316
317         unsigned long           flags;
318         u16                     u64s;
319         bool                    valid;
320         u32                     btree_trans_barrier_seq;
321         struct bkey_cached_key  key;
322
323         struct rhash_head       hash;
324         struct list_head        list;
325
326         struct journal_entry_pin journal;
327         u64                     seq;
328
329         struct bkey_i           *k;
330 };
331
332 static inline struct bpos btree_node_pos(struct btree_bkey_cached_common *b)
333 {
334         return !b->cached
335                 ? container_of(b, struct btree, c)->key.k.p
336                 : container_of(b, struct bkey_cached, c)->key.pos;
337 }
338
339 struct btree_insert_entry {
340         unsigned                flags;
341         u8                      bkey_type;
342         enum btree_id           btree_id:8;
343         u8                      level:4;
344         bool                    cached:1;
345         bool                    insert_trigger_run:1;
346         bool                    overwrite_trigger_run:1;
347         bool                    key_cache_already_flushed:1;
348         /*
349          * @old_k may be a key from the journal; @old_btree_u64s always refers
350          * to the size of the key being overwritten in the btree:
351          */
352         u8                      old_btree_u64s;
353         btree_path_idx_t        path;
354         struct bkey_i           *k;
355         /* key being overwritten: */
356         struct bkey             old_k;
357         const struct bch_val    *old_v;
358         unsigned long           ip_allocated;
359 };
360
361 #define BTREE_ITER_MAX          64
362
363 struct btree_trans_commit_hook;
364 typedef int (btree_trans_commit_hook_fn)(struct btree_trans *, struct btree_trans_commit_hook *);
365
366 struct btree_trans_commit_hook {
367         btree_trans_commit_hook_fn      *fn;
368         struct btree_trans_commit_hook  *next;
369 };
370
371 #define BTREE_TRANS_MEM_MAX     (1U << 16)
372
373 #define BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS       10000
374
375 struct btree_trans_paths {
376         unsigned long           nr_paths;
377         struct btree_path       paths[];
378 };
379
380 struct btree_trans {
381         struct bch_fs           *c;
382
383         unsigned long           *paths_allocated;
384         struct btree_path       *paths;
385         u8                      *sorted;
386         struct btree_insert_entry *updates;
387
388         void                    *mem;
389         unsigned                mem_top;
390         unsigned                mem_bytes;
391
392         btree_path_idx_t        nr_sorted;
393         btree_path_idx_t        nr_paths;
394         btree_path_idx_t        nr_paths_max;
395         u8                      fn_idx;
396         u8                      nr_updates;
397         u8                      lock_must_abort;
398         bool                    lock_may_not_fail:1;
399         bool                    srcu_held:1;
400         bool                    used_mempool:1;
401         bool                    in_traverse_all:1;
402         bool                    paths_sorted:1;
403         bool                    memory_allocation_failure:1;
404         bool                    journal_transaction_names:1;
405         bool                    journal_replay_not_finished:1;
406         bool                    notrace_relock_fail:1;
407         bool                    write_locked:1;
408         enum bch_errcode        restarted:16;
409         u32                     restart_count;
410
411         u64                     last_begin_time;
412         unsigned long           last_begin_ip;
413         unsigned long           last_restarted_ip;
414         unsigned long           srcu_lock_time;
415
416         const char              *fn;
417         struct btree_bkey_cached_common *locking;
418         struct six_lock_waiter  locking_wait;
419         int                     srcu_idx;
420
421         /* update path: */
422         u16                     journal_entries_u64s;
423         u16                     journal_entries_size;
424         struct jset_entry       *journal_entries;
425
426         struct btree_trans_commit_hook *hooks;
427         struct journal_entry_pin *journal_pin;
428
429         struct journal_res      journal_res;
430         u64                     *journal_seq;
431         struct disk_reservation *disk_res;
432         unsigned                journal_u64s;
433         unsigned                extra_disk_res; /* XXX kill */
434         struct replicas_delta_list *fs_usage_deltas;
435
436         /* Entries before this are zeroed out on every bch2_trans_get() call */
437
438         struct list_head        list;
439         struct closure          ref;
440
441         unsigned long           _paths_allocated[BITS_TO_LONGS(BTREE_ITER_MAX)];
442         struct btree_trans_paths trans_paths;
443         struct btree_path       _paths[BTREE_ITER_MAX];
444         u8                      _sorted[BTREE_ITER_MAX + 8];
445         struct btree_insert_entry _updates[BTREE_ITER_MAX];
446 };
447
448 static inline struct btree_path *btree_iter_path(struct btree_trans *trans, struct btree_iter *iter)
449 {
450         return trans->paths + iter->path;
451 }
452
453 static inline struct btree_path *btree_iter_key_cache_path(struct btree_trans *trans, struct btree_iter *iter)
454 {
455         return iter->key_cache_path
456                 ? trans->paths + iter->key_cache_path
457                 : NULL;
458 }
459
460 #define BCH_BTREE_WRITE_TYPES()                                         \
461         x(initial,              0)                                      \
462         x(init_next_bset,       1)                                      \
463         x(cache_reclaim,        2)                                      \
464         x(journal_reclaim,      3)                                      \
465         x(interior,             4)
466
467 enum btree_write_type {
468 #define x(t, n) BTREE_WRITE_##t,
469         BCH_BTREE_WRITE_TYPES()
470 #undef x
471         BTREE_WRITE_TYPE_NR,
472 };
473
474 #define BTREE_WRITE_TYPE_MASK   (roundup_pow_of_two(BTREE_WRITE_TYPE_NR) - 1)
475 #define BTREE_WRITE_TYPE_BITS   ilog2(roundup_pow_of_two(BTREE_WRITE_TYPE_NR))
476
477 #define BTREE_FLAGS()                                                   \
478         x(read_in_flight)                                               \
479         x(read_error)                                                   \
480         x(dirty)                                                        \
481         x(need_write)                                                   \
482         x(write_blocked)                                                \
483         x(will_make_reachable)                                          \
484         x(noevict)                                                      \
485         x(write_idx)                                                    \
486         x(accessed)                                                     \
487         x(write_in_flight)                                              \
488         x(write_in_flight_inner)                                        \
489         x(just_written)                                                 \
490         x(dying)                                                        \
491         x(fake)                                                         \
492         x(need_rewrite)                                                 \
493         x(never_write)
494
495 enum btree_flags {
496         /* First bits for btree node write type */
497         BTREE_NODE_FLAGS_START = BTREE_WRITE_TYPE_BITS - 1,
498 #define x(flag) BTREE_NODE_##flag,
499         BTREE_FLAGS()
500 #undef x
501 };
502
503 #define x(flag)                                                         \
504 static inline bool btree_node_ ## flag(struct btree *b)                 \
505 {       return test_bit(BTREE_NODE_ ## flag, &b->flags); }              \
506                                                                         \
507 static inline void set_btree_node_ ## flag(struct btree *b)             \
508 {       set_bit(BTREE_NODE_ ## flag, &b->flags); }                      \
509                                                                         \
510 static inline void clear_btree_node_ ## flag(struct btree *b)           \
511 {       clear_bit(BTREE_NODE_ ## flag, &b->flags); }
512
513 BTREE_FLAGS()
514 #undef x
515
516 static inline struct btree_write *btree_current_write(struct btree *b)
517 {
518         return b->writes + btree_node_write_idx(b);
519 }
520
521 static inline struct btree_write *btree_prev_write(struct btree *b)
522 {
523         return b->writes + (btree_node_write_idx(b) ^ 1);
524 }
525
526 static inline struct bset_tree *bset_tree_last(struct btree *b)
527 {
528         EBUG_ON(!b->nsets);
529         return b->set + b->nsets - 1;
530 }
531
532 static inline void *
533 __btree_node_offset_to_ptr(const struct btree *b, u16 offset)
534 {
535         return (void *) ((u64 *) b->data + 1 + offset);
536 }
537
538 static inline u16
539 __btree_node_ptr_to_offset(const struct btree *b, const void *p)
540 {
541         u16 ret = (u64 *) p - 1 - (u64 *) b->data;
542
543         EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
544         return ret;
545 }
546
547 static inline struct bset *bset(const struct btree *b,
548                                 const struct bset_tree *t)
549 {
550         return __btree_node_offset_to_ptr(b, t->data_offset);
551 }
552
553 static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
554 {
555         t->end_offset =
556                 __btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
557 }
558
559 static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
560                                   const struct bset *i)
561 {
562         t->data_offset = __btree_node_ptr_to_offset(b, i);
563         set_btree_bset_end(b, t);
564 }
565
566 static inline struct bset *btree_bset_first(struct btree *b)
567 {
568         return bset(b, b->set);
569 }
570
571 static inline struct bset *btree_bset_last(struct btree *b)
572 {
573         return bset(b, bset_tree_last(b));
574 }
575
576 static inline u16
577 __btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
578 {
579         return __btree_node_ptr_to_offset(b, k);
580 }
581
582 static inline struct bkey_packed *
583 __btree_node_offset_to_key(const struct btree *b, u16 k)
584 {
585         return __btree_node_offset_to_ptr(b, k);
586 }
587
588 static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
589 {
590         return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
591 }
592
593 #define btree_bkey_first(_b, _t)                                        \
594 ({                                                                      \
595         EBUG_ON(bset(_b, _t)->start !=                                  \
596                 __btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
597                                                                         \
598         bset(_b, _t)->start;                                            \
599 })
600
601 #define btree_bkey_last(_b, _t)                                         \
602 ({                                                                      \
603         EBUG_ON(__btree_node_offset_to_key(_b, (_t)->end_offset) !=     \
604                 vstruct_last(bset(_b, _t)));                            \
605                                                                         \
606         __btree_node_offset_to_key(_b, (_t)->end_offset);               \
607 })
608
609 static inline unsigned bset_u64s(struct bset_tree *t)
610 {
611         return t->end_offset - t->data_offset -
612                 sizeof(struct bset) / sizeof(u64);
613 }
614
615 static inline unsigned bset_dead_u64s(struct btree *b, struct bset_tree *t)
616 {
617         return bset_u64s(t) - b->nr.bset_u64s[t - b->set];
618 }
619
620 static inline unsigned bset_byte_offset(struct btree *b, void *i)
621 {
622         return i - (void *) b->data;
623 }
624
625 enum btree_node_type {
626         BKEY_TYPE_btree,
627 #define x(kwd, val, ...) BKEY_TYPE_##kwd = val + 1,
628         BCH_BTREE_IDS()
629 #undef x
630         BKEY_TYPE_NR
631 };
632
633 /* Type of a key in btree @id at level @level: */
634 static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
635 {
636         return level ? BKEY_TYPE_btree : (unsigned) id + 1;
637 }
638
639 /* Type of keys @b contains: */
640 static inline enum btree_node_type btree_node_type(struct btree *b)
641 {
642         return __btree_node_type(b->c.level, b->c.btree_id);
643 }
644
645 const char *bch2_btree_node_type_str(enum btree_node_type);
646
647 #define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS              \
648         (BIT_ULL(BKEY_TYPE_extents)|                    \
649          BIT_ULL(BKEY_TYPE_alloc)|                      \
650          BIT_ULL(BKEY_TYPE_inodes)|                     \
651          BIT_ULL(BKEY_TYPE_stripes)|                    \
652          BIT_ULL(BKEY_TYPE_reflink)|                    \
653          BIT_ULL(BKEY_TYPE_btree))
654
655 #define BTREE_NODE_TYPE_HAS_MEM_TRIGGERS                \
656         (BIT_ULL(BKEY_TYPE_alloc)|                      \
657          BIT_ULL(BKEY_TYPE_inodes)|                     \
658          BIT_ULL(BKEY_TYPE_stripes)|                    \
659          BIT_ULL(BKEY_TYPE_snapshots))
660
661 #define BTREE_NODE_TYPE_HAS_TRIGGERS                    \
662         (BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS|            \
663          BTREE_NODE_TYPE_HAS_MEM_TRIGGERS)
664
665 static inline bool btree_node_type_needs_gc(enum btree_node_type type)
666 {
667         return BTREE_NODE_TYPE_HAS_TRIGGERS & BIT_ULL(type);
668 }
669
670 static inline bool btree_node_type_is_extents(enum btree_node_type type)
671 {
672         const unsigned mask = 0
673 #define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_EXTENTS)) << (nr + 1))
674         BCH_BTREE_IDS()
675 #undef x
676         ;
677
678         return (1U << type) & mask;
679 }
680
681 static inline bool btree_id_is_extents(enum btree_id btree)
682 {
683         return btree_node_type_is_extents(__btree_node_type(0, btree));
684 }
685
686 static inline bool btree_type_has_snapshots(enum btree_id id)
687 {
688         const unsigned mask = 0
689 #define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_SNAPSHOTS)) << nr)
690         BCH_BTREE_IDS()
691 #undef x
692         ;
693
694         return (1U << id) & mask;
695 }
696
697 static inline bool btree_type_has_snapshot_field(enum btree_id id)
698 {
699         const unsigned mask = 0
700 #define x(name, nr, flags, ...) |((!!((flags) & (BTREE_ID_SNAPSHOT_FIELD|BTREE_ID_SNAPSHOTS))) << nr)
701         BCH_BTREE_IDS()
702 #undef x
703         ;
704
705         return (1U << id) & mask;
706 }
707
708 static inline bool btree_type_has_ptrs(enum btree_id id)
709 {
710         const unsigned mask = 0
711 #define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_DATA)) << nr)
712         BCH_BTREE_IDS()
713 #undef x
714         ;
715
716         return (1U << id) & mask;
717 }
718
719 struct btree_root {
720         struct btree            *b;
721
722         /* On disk root - see async splits: */
723         __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
724         u8                      level;
725         u8                      alive;
726         s8                      error;
727 };
728
729 enum btree_gc_coalesce_fail_reason {
730         BTREE_GC_COALESCE_FAIL_RESERVE_GET,
731         BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
732         BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
733 };
734
735 enum btree_node_sibling {
736         btree_prev_sib,
737         btree_next_sib,
738 };
739
740 #endif /* _BCACHEFS_BTREE_TYPES_H */