]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_types.h
Update bcachefs sources to ff3a76e1af bcachefs: Change need_whiteout_for_snapshot...
[bcachefs-tools-debian] / libbcachefs / btree_types.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_TYPES_H
3 #define _BCACHEFS_BTREE_TYPES_H
4
5 #include <linux/list.h>
6 #include <linux/rhashtable.h>
7 #include <linux/six.h>
8
9 #include "bkey_methods.h"
10 #include "buckets_types.h"
11 #include "journal_types.h"
12
13 struct open_bucket;
14 struct btree_update;
15 struct btree_trans;
16
17 #define MAX_BSETS               3U
18
19 struct btree_nr_keys {
20
21         /*
22          * Amount of live metadata (i.e. size of node after a compaction) in
23          * units of u64s
24          */
25         u16                     live_u64s;
26         u16                     bset_u64s[MAX_BSETS];
27
28         /* live keys only: */
29         u16                     packed_keys;
30         u16                     unpacked_keys;
31 };
32
33 struct bset_tree {
34         /*
35          * We construct a binary tree in an array as if the array
36          * started at 1, so that things line up on the same cachelines
37          * better: see comments in bset.c at cacheline_to_bkey() for
38          * details
39          */
40
41         /* size of the binary tree and prev array */
42         u16                     size;
43
44         /* function of size - precalculated for to_inorder() */
45         u16                     extra;
46
47         u16                     data_offset;
48         u16                     aux_data_offset;
49         u16                     end_offset;
50 };
51
52 struct btree_write {
53         struct journal_entry_pin        journal;
54 };
55
56 struct btree_alloc {
57         struct open_buckets     ob;
58         __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX);
59 };
60
61 struct btree_bkey_cached_common {
62         struct six_lock         lock;
63         u8                      level;
64         u8                      btree_id;
65 };
66
67 struct btree {
68         struct btree_bkey_cached_common c;
69
70         struct rhash_head       hash;
71         u64                     hash_val;
72
73         unsigned long           flags;
74         u16                     written;
75         u8                      nsets;
76         u8                      nr_key_bits;
77         u16                     version_ondisk;
78
79         struct bkey_format      format;
80
81         struct btree_node       *data;
82         void                    *aux_data;
83
84         /*
85          * Sets of sorted keys - the real btree node - plus a binary search tree
86          *
87          * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
88          * to the memory we have allocated for this btree node. Additionally,
89          * set[0]->data points to the entire btree node as it exists on disk.
90          */
91         struct bset_tree        set[MAX_BSETS];
92
93         struct btree_nr_keys    nr;
94         u16                     sib_u64s[2];
95         u16                     whiteout_u64s;
96         u8                      byte_order;
97         u8                      unpack_fn_len;
98
99         struct btree_write      writes[2];
100
101         /* Key/pointer for this btree node */
102         __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
103
104         /*
105          * XXX: add a delete sequence number, so when bch2_btree_node_relock()
106          * fails because the lock sequence number has changed - i.e. the
107          * contents were modified - we can still relock the node if it's still
108          * the one we want, without redoing the traversal
109          */
110
111         /*
112          * For asynchronous splits/interior node updates:
113          * When we do a split, we allocate new child nodes and update the parent
114          * node to point to them: we update the parent in memory immediately,
115          * but then we must wait until the children have been written out before
116          * the update to the parent can be written - this is a list of the
117          * btree_updates that are blocking this node from being
118          * written:
119          */
120         struct list_head        write_blocked;
121
122         /*
123          * Also for asynchronous splits/interior node updates:
124          * If a btree node isn't reachable yet, we don't want to kick off
125          * another write - because that write also won't yet be reachable and
126          * marking it as completed before it's reachable would be incorrect:
127          */
128         unsigned long           will_make_reachable;
129
130         struct open_buckets     ob;
131
132         /* lru list */
133         struct list_head        list;
134 };
135
136 struct btree_cache {
137         struct rhashtable       table;
138         bool                    table_init_done;
139         /*
140          * We never free a struct btree, except on shutdown - we just put it on
141          * the btree_cache_freed list and reuse it later. This simplifies the
142          * code, and it doesn't cost us much memory as the memory usage is
143          * dominated by buffers that hold the actual btree node data and those
144          * can be freed - and the number of struct btrees allocated is
145          * effectively bounded.
146          *
147          * btree_cache_freeable effectively is a small cache - we use it because
148          * high order page allocations can be rather expensive, and it's quite
149          * common to delete and allocate btree nodes in quick succession. It
150          * should never grow past ~2-3 nodes in practice.
151          */
152         struct mutex            lock;
153         struct list_head        live;
154         struct list_head        freeable;
155         struct list_head        freed;
156
157         /* Number of elements in live + freeable lists */
158         unsigned                used;
159         unsigned                reserve;
160         atomic_t                dirty;
161         struct shrinker         shrink;
162
163         /*
164          * If we need to allocate memory for a new btree node and that
165          * allocation fails, we can cannibalize another node in the btree cache
166          * to satisfy the allocation - lock to guarantee only one thread does
167          * this at a time:
168          */
169         struct task_struct      *alloc_lock;
170         struct closure_waitlist alloc_wait;
171 };
172
173 struct btree_node_iter {
174         struct btree_node_iter_set {
175                 u16     k, end;
176         } data[MAX_BSETS];
177 };
178
179 /*
180  * Iterate over all possible positions, synthesizing deleted keys for holes:
181  */
182 #define BTREE_ITER_SLOTS                (1 << 0)
183 /*
184  * Indicates that intent locks should be taken on leaf nodes, because we expect
185  * to be doing updates:
186  */
187 #define BTREE_ITER_INTENT               (1 << 1)
188 /*
189  * Causes the btree iterator code to prefetch additional btree nodes from disk:
190  */
191 #define BTREE_ITER_PREFETCH             (1 << 2)
192 /*
193  * Indicates that this iterator should not be reused until transaction commit,
194  * either because a pending update references it or because the update depends
195  * on that particular key being locked (e.g. by the str_hash code, for hash
196  * table consistency)
197  */
198 #define BTREE_ITER_KEEP_UNTIL_COMMIT    (1 << 3)
199 /*
200  * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
201  * @pos or the first key strictly greater than @pos
202  */
203 #define BTREE_ITER_IS_EXTENTS           (1 << 4)
204 #define BTREE_ITER_NOT_EXTENTS          (1 << 5)
205 #define BTREE_ITER_ERROR                (1 << 6)
206 #define BTREE_ITER_CACHED               (1 << 7)
207 #define BTREE_ITER_CACHED_NOFILL        (1 << 8)
208 #define BTREE_ITER_CACHED_NOCREATE      (1 << 9)
209 #define BTREE_ITER_WITH_UPDATES         (1 << 10)
210 #define __BTREE_ITER_ALL_SNAPSHOTS      (1 << 11)
211 #define BTREE_ITER_ALL_SNAPSHOTS        (1 << 12)
212 #define BTREE_ITER_FILTER_SNAPSHOTS     (1 << 13)
213
214 enum btree_path_uptodate {
215         BTREE_ITER_UPTODATE             = 0,
216         BTREE_ITER_NEED_RELOCK          = 1,
217         BTREE_ITER_NEED_TRAVERSE        = 2,
218 };
219
220 #define BTREE_ITER_NO_NODE_GET_LOCKS    ((struct btree *) 1)
221 #define BTREE_ITER_NO_NODE_DROP         ((struct btree *) 2)
222 #define BTREE_ITER_NO_NODE_LOCK_ROOT    ((struct btree *) 3)
223 #define BTREE_ITER_NO_NODE_UP           ((struct btree *) 4)
224 #define BTREE_ITER_NO_NODE_DOWN         ((struct btree *) 5)
225 #define BTREE_ITER_NO_NODE_INIT         ((struct btree *) 6)
226 #define BTREE_ITER_NO_NODE_ERROR        ((struct btree *) 7)
227 #define BTREE_ITER_NO_NODE_CACHED       ((struct btree *) 8)
228
229 struct btree_path {
230         u8                      idx;
231         u8                      sorted_idx;
232         u8                      ref;
233         u8                      intent_ref;
234
235         /* btree_iter_copy starts here: */
236         struct bpos             pos;
237
238         enum btree_id           btree_id:4;
239         bool                    cached:1;
240         bool                    preserve:1;
241         enum btree_path_uptodate uptodate:2;
242         /*
243          * When true, failing to relock this path will cause the transaction to
244          * restart:
245          */
246         bool                    should_be_locked:1;
247         unsigned                level:3,
248                                 locks_want:4,
249                                 nodes_locked:4,
250                                 nodes_intent_locked:4;
251
252         struct btree_path_level {
253                 struct btree    *b;
254                 struct btree_node_iter iter;
255                 u32             lock_seq;
256         }                       l[BTREE_MAX_DEPTH];
257 #ifdef CONFIG_BCACHEFS_DEBUG
258         unsigned long           ip_allocated;
259 #endif
260 };
261
262 static inline struct btree_path_level *path_l(struct btree_path *path)
263 {
264         return path->l + path->level;
265 }
266
267 /*
268  * @pos                 - iterator's current position
269  * @level               - current btree depth
270  * @locks_want          - btree level below which we start taking intent locks
271  * @nodes_locked        - bitmask indicating which nodes in @nodes are locked
272  * @nodes_intent_locked - bitmask indicating which locks are intent locks
273  */
274 struct btree_iter {
275         struct btree_trans      *trans;
276         struct btree_path       *path;
277
278         enum btree_id           btree_id:4;
279         unsigned                min_depth:4;
280
281         /* btree_iter_copy starts here: */
282         u16                     flags;
283
284         /* When we're filtering by snapshot, the snapshot ID we're looking for: */
285         unsigned                snapshot;
286
287         struct bpos             pos;
288         struct bpos             pos_after_commit;
289         /*
290          * Current unpacked key - so that bch2_btree_iter_next()/
291          * bch2_btree_iter_next_slot() can correctly advance pos.
292          */
293         struct bkey             k;
294 #ifdef CONFIG_BCACHEFS_DEBUG
295         unsigned long           ip_allocated;
296 #endif
297 };
298
299 struct btree_key_cache {
300         struct mutex            lock;
301         struct rhashtable       table;
302         bool                    table_init_done;
303         struct list_head        freed;
304         struct shrinker         shrink;
305         unsigned                shrink_iter;
306
307         size_t                  nr_freed;
308         atomic_long_t           nr_keys;
309         atomic_long_t           nr_dirty;
310 };
311
312 struct bkey_cached_key {
313         u32                     btree_id;
314         struct bpos             pos;
315 } __attribute__((packed, aligned(4)));
316
317 #define BKEY_CACHED_ACCESSED            0
318 #define BKEY_CACHED_DIRTY               1
319
320 struct bkey_cached {
321         struct btree_bkey_cached_common c;
322
323         unsigned long           flags;
324         u8                      u64s;
325         bool                    valid;
326         u32                     btree_trans_barrier_seq;
327         struct bkey_cached_key  key;
328
329         struct rhash_head       hash;
330         struct list_head        list;
331
332         struct journal_preres   res;
333         struct journal_entry_pin journal;
334
335         struct bkey_i           *k;
336 };
337
338 struct btree_insert_entry {
339         unsigned                flags;
340         u8                      bkey_type;
341         enum btree_id           btree_id:8;
342         u8                      level;
343         bool                    cached:1;
344         bool                    insert_trigger_run:1;
345         bool                    overwrite_trigger_run:1;
346         struct bkey_i           *k;
347         struct btree_path       *path;
348         unsigned long           ip_allocated;
349 };
350
351 #ifndef CONFIG_LOCKDEP
352 #define BTREE_ITER_MAX          64
353 #else
354 #define BTREE_ITER_MAX          32
355 #endif
356
357 struct btree_trans_commit_hook;
358 typedef int (btree_trans_commit_hook_fn)(struct btree_trans *, struct btree_trans_commit_hook *);
359
360 struct btree_trans_commit_hook {
361         btree_trans_commit_hook_fn      *fn;
362         struct btree_trans_commit_hook  *next;
363 };
364
365 #define BTREE_TRANS_MEM_MAX     (1U << 14)
366
367 struct btree_trans {
368         struct bch_fs           *c;
369         struct list_head        list;
370         struct btree            *locking;
371         unsigned                locking_path_idx;
372         struct bpos             locking_pos;
373         u8                      locking_btree_id;
374         u8                      locking_level;
375         pid_t                   pid;
376         unsigned long           ip;
377         int                     srcu_idx;
378
379         u8                      nr_sorted;
380         u8                      nr_updates;
381         bool                    used_mempool:1;
382         bool                    in_traverse_all:1;
383         bool                    restarted:1;
384         /*
385          * For when bch2_trans_update notices we'll be splitting a compressed
386          * extent:
387          */
388         unsigned                extra_journal_res;
389
390         u64                     paths_allocated;
391
392         unsigned                mem_top;
393         unsigned                mem_bytes;
394         void                    *mem;
395
396         u8                      sorted[BTREE_ITER_MAX];
397         struct btree_path       *paths;
398         struct btree_insert_entry *updates;
399
400         /* update path: */
401         struct btree_trans_commit_hook *hooks;
402         struct jset_entry       *extra_journal_entries;
403         unsigned                extra_journal_entry_u64s;
404         struct journal_entry_pin *journal_pin;
405
406         struct journal_res      journal_res;
407         struct journal_preres   journal_preres;
408         u64                     *journal_seq;
409         struct disk_reservation *disk_res;
410         unsigned                flags;
411         unsigned                journal_u64s;
412         unsigned                journal_preres_u64s;
413         struct replicas_delta_list *fs_usage_deltas;
414 };
415
416 #define BTREE_FLAG(flag)                                                \
417 static inline bool btree_node_ ## flag(struct btree *b)                 \
418 {       return test_bit(BTREE_NODE_ ## flag, &b->flags); }              \
419                                                                         \
420 static inline void set_btree_node_ ## flag(struct btree *b)             \
421 {       set_bit(BTREE_NODE_ ## flag, &b->flags); }                      \
422                                                                         \
423 static inline void clear_btree_node_ ## flag(struct btree *b)           \
424 {       clear_bit(BTREE_NODE_ ## flag, &b->flags); }
425
426 enum btree_flags {
427         BTREE_NODE_read_in_flight,
428         BTREE_NODE_read_error,
429         BTREE_NODE_dirty,
430         BTREE_NODE_need_write,
431         BTREE_NODE_noevict,
432         BTREE_NODE_write_idx,
433         BTREE_NODE_accessed,
434         BTREE_NODE_write_in_flight,
435         BTREE_NODE_write_in_flight_inner,
436         BTREE_NODE_just_written,
437         BTREE_NODE_dying,
438         BTREE_NODE_fake,
439         BTREE_NODE_need_rewrite,
440         BTREE_NODE_never_write,
441 };
442
443 BTREE_FLAG(read_in_flight);
444 BTREE_FLAG(read_error);
445 BTREE_FLAG(need_write);
446 BTREE_FLAG(noevict);
447 BTREE_FLAG(write_idx);
448 BTREE_FLAG(accessed);
449 BTREE_FLAG(write_in_flight);
450 BTREE_FLAG(write_in_flight_inner);
451 BTREE_FLAG(just_written);
452 BTREE_FLAG(dying);
453 BTREE_FLAG(fake);
454 BTREE_FLAG(need_rewrite);
455 BTREE_FLAG(never_write);
456
457 static inline struct btree_write *btree_current_write(struct btree *b)
458 {
459         return b->writes + btree_node_write_idx(b);
460 }
461
462 static inline struct btree_write *btree_prev_write(struct btree *b)
463 {
464         return b->writes + (btree_node_write_idx(b) ^ 1);
465 }
466
467 static inline struct bset_tree *bset_tree_last(struct btree *b)
468 {
469         EBUG_ON(!b->nsets);
470         return b->set + b->nsets - 1;
471 }
472
473 static inline void *
474 __btree_node_offset_to_ptr(const struct btree *b, u16 offset)
475 {
476         return (void *) ((u64 *) b->data + 1 + offset);
477 }
478
479 static inline u16
480 __btree_node_ptr_to_offset(const struct btree *b, const void *p)
481 {
482         u16 ret = (u64 *) p - 1 - (u64 *) b->data;
483
484         EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
485         return ret;
486 }
487
488 static inline struct bset *bset(const struct btree *b,
489                                 const struct bset_tree *t)
490 {
491         return __btree_node_offset_to_ptr(b, t->data_offset);
492 }
493
494 static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
495 {
496         t->end_offset =
497                 __btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
498 }
499
500 static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
501                                   const struct bset *i)
502 {
503         t->data_offset = __btree_node_ptr_to_offset(b, i);
504         set_btree_bset_end(b, t);
505 }
506
507 static inline struct bset *btree_bset_first(struct btree *b)
508 {
509         return bset(b, b->set);
510 }
511
512 static inline struct bset *btree_bset_last(struct btree *b)
513 {
514         return bset(b, bset_tree_last(b));
515 }
516
517 static inline u16
518 __btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
519 {
520         return __btree_node_ptr_to_offset(b, k);
521 }
522
523 static inline struct bkey_packed *
524 __btree_node_offset_to_key(const struct btree *b, u16 k)
525 {
526         return __btree_node_offset_to_ptr(b, k);
527 }
528
529 static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
530 {
531         return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
532 }
533
534 #define btree_bkey_first(_b, _t)                                        \
535 ({                                                                      \
536         EBUG_ON(bset(_b, _t)->start !=                                  \
537                 __btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
538                                                                         \
539         bset(_b, _t)->start;                                            \
540 })
541
542 #define btree_bkey_last(_b, _t)                                         \
543 ({                                                                      \
544         EBUG_ON(__btree_node_offset_to_key(_b, (_t)->end_offset) !=     \
545                 vstruct_last(bset(_b, _t)));                            \
546                                                                         \
547         __btree_node_offset_to_key(_b, (_t)->end_offset);               \
548 })
549
550 static inline unsigned bset_u64s(struct bset_tree *t)
551 {
552         return t->end_offset - t->data_offset -
553                 sizeof(struct bset) / sizeof(u64);
554 }
555
556 static inline unsigned bset_dead_u64s(struct btree *b, struct bset_tree *t)
557 {
558         return bset_u64s(t) - b->nr.bset_u64s[t - b->set];
559 }
560
561 static inline unsigned bset_byte_offset(struct btree *b, void *i)
562 {
563         return i - (void *) b->data;
564 }
565
566 enum btree_node_type {
567 #define x(kwd, val) BKEY_TYPE_##kwd = val,
568         BCH_BTREE_IDS()
569 #undef x
570         BKEY_TYPE_btree,
571 };
572
573 /* Type of a key in btree @id at level @level: */
574 static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
575 {
576         return level ? BKEY_TYPE_btree : (enum btree_node_type) id;
577 }
578
579 /* Type of keys @b contains: */
580 static inline enum btree_node_type btree_node_type(struct btree *b)
581 {
582         return __btree_node_type(b->c.level, b->c.btree_id);
583 }
584
585 static inline bool btree_node_type_is_extents(enum btree_node_type type)
586 {
587         switch (type) {
588         case BKEY_TYPE_extents:
589         case BKEY_TYPE_reflink:
590                 return true;
591         default:
592                 return false;
593         }
594 }
595
596 static inline bool btree_node_is_extents(struct btree *b)
597 {
598         return btree_node_type_is_extents(btree_node_type(b));
599 }
600
601 #define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS              \
602         ((1U << BKEY_TYPE_extents)|                     \
603          (1U << BKEY_TYPE_inodes)|                      \
604          (1U << BKEY_TYPE_stripes)|                     \
605          (1U << BKEY_TYPE_reflink)|                     \
606          (1U << BKEY_TYPE_btree))
607
608 #define BTREE_NODE_TYPE_HAS_MEM_TRIGGERS                \
609         ((1U << BKEY_TYPE_alloc)|                       \
610          (1U << BKEY_TYPE_inodes)|                      \
611          (1U << BKEY_TYPE_stripes)|                     \
612          (1U << BKEY_TYPE_snapshots))
613
614 #define BTREE_NODE_TYPE_HAS_TRIGGERS                    \
615         (BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS|            \
616          BTREE_NODE_TYPE_HAS_MEM_TRIGGERS)
617
618 #define BTREE_ID_HAS_SNAPSHOTS                          \
619         ((1U << BTREE_ID_extents)|                      \
620          (1U << BTREE_ID_inodes)|                       \
621          (1U << BTREE_ID_dirents)|                      \
622          (1U << BTREE_ID_xattrs))
623
624 #define BTREE_ID_HAS_PTRS                               \
625         ((1U << BTREE_ID_extents)|                      \
626          (1U << BTREE_ID_reflink))
627
628 static inline bool btree_type_has_snapshots(enum btree_id id)
629 {
630         return (1 << id) & BTREE_ID_HAS_SNAPSHOTS;
631 }
632
633 enum btree_update_flags {
634         __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE,
635
636         __BTREE_TRIGGER_NORUN,          /* Don't run triggers at all */
637
638         __BTREE_TRIGGER_INSERT,
639         __BTREE_TRIGGER_OVERWRITE,
640
641         __BTREE_TRIGGER_GC,
642         __BTREE_TRIGGER_BUCKET_INVALIDATE,
643         __BTREE_TRIGGER_NOATOMIC,
644 };
645
646 #define BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE (1U << __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE)
647
648 #define BTREE_TRIGGER_NORUN             (1U << __BTREE_TRIGGER_NORUN)
649
650 #define BTREE_TRIGGER_INSERT            (1U << __BTREE_TRIGGER_INSERT)
651 #define BTREE_TRIGGER_OVERWRITE         (1U << __BTREE_TRIGGER_OVERWRITE)
652
653 #define BTREE_TRIGGER_GC                (1U << __BTREE_TRIGGER_GC)
654 #define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
655 #define BTREE_TRIGGER_NOATOMIC          (1U << __BTREE_TRIGGER_NOATOMIC)
656
657 #define BTREE_TRIGGER_WANTS_OLD_AND_NEW         \
658         ((1U << KEY_TYPE_alloc)|                \
659          (1U << KEY_TYPE_alloc_v2)|             \
660          (1U << KEY_TYPE_alloc_v3)|             \
661          (1U << KEY_TYPE_stripe)|               \
662          (1U << KEY_TYPE_inode)|                \
663          (1U << KEY_TYPE_inode_v2)|             \
664          (1U << KEY_TYPE_snapshot))
665
666 static inline bool btree_node_type_needs_gc(enum btree_node_type type)
667 {
668         return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type);
669 }
670
671 struct btree_root {
672         struct btree            *b;
673
674         /* On disk root - see async splits: */
675         __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
676         u8                      level;
677         u8                      alive;
678         s8                      error;
679 };
680
681 enum btree_insert_ret {
682         BTREE_INSERT_OK,
683         /* leaf node needs to be split */
684         BTREE_INSERT_BTREE_NODE_FULL,
685         BTREE_INSERT_NEED_MARK_REPLICAS,
686         BTREE_INSERT_NEED_JOURNAL_RES,
687         BTREE_INSERT_NEED_JOURNAL_RECLAIM,
688 };
689
690 enum btree_gc_coalesce_fail_reason {
691         BTREE_GC_COALESCE_FAIL_RESERVE_GET,
692         BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
693         BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
694 };
695
696 enum btree_node_sibling {
697         btree_prev_sib,
698         btree_next_sib,
699 };
700
701 #endif /* _BCACHEFS_BTREE_TYPES_H */