]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_types.h
914d536cd29e75e3bbee43e74adcac94712aa2a4
[bcachefs-tools-debian] / libbcachefs / btree_types.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_TYPES_H
3 #define _BCACHEFS_BTREE_TYPES_H
4
5 #include <linux/list.h>
6 #include <linux/rhashtable.h>
7 #include <linux/six.h>
8
9 #include "bkey_methods.h"
10 #include "buckets_types.h"
11 #include "journal_types.h"
12
13 struct open_bucket;
14 struct btree_update;
15 struct btree_trans;
16
17 #define MAX_BSETS               3U
18
19 struct btree_nr_keys {
20
21         /*
22          * Amount of live metadata (i.e. size of node after a compaction) in
23          * units of u64s
24          */
25         u16                     live_u64s;
26         u16                     bset_u64s[MAX_BSETS];
27
28         /* live keys only: */
29         u16                     packed_keys;
30         u16                     unpacked_keys;
31 };
32
33 struct bset_tree {
34         /*
35          * We construct a binary tree in an array as if the array
36          * started at 1, so that things line up on the same cachelines
37          * better: see comments in bset.c at cacheline_to_bkey() for
38          * details
39          */
40
41         /* size of the binary tree and prev array */
42         u16                     size;
43
44         /* function of size - precalculated for to_inorder() */
45         u16                     extra;
46
47         u16                     data_offset;
48         u16                     aux_data_offset;
49         u16                     end_offset;
50 };
51
52 struct btree_write {
53         struct journal_entry_pin        journal;
54 };
55
56 struct btree_alloc {
57         struct open_buckets     ob;
58         __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX);
59 };
60
61 struct btree_bkey_cached_common {
62         struct six_lock         lock;
63         u8                      level;
64         u8                      btree_id;
65 };
66
67 struct btree {
68         struct btree_bkey_cached_common c;
69
70         struct rhash_head       hash;
71         u64                     hash_val;
72
73         unsigned long           flags;
74         u16                     written;
75         u8                      nsets;
76         u8                      nr_key_bits;
77         u16                     version_ondisk;
78
79         struct bkey_format      format;
80
81         struct btree_node       *data;
82         void                    *aux_data;
83
84         /*
85          * Sets of sorted keys - the real btree node - plus a binary search tree
86          *
87          * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
88          * to the memory we have allocated for this btree node. Additionally,
89          * set[0]->data points to the entire btree node as it exists on disk.
90          */
91         struct bset_tree        set[MAX_BSETS];
92
93         struct btree_nr_keys    nr;
94         u16                     sib_u64s[2];
95         u16                     whiteout_u64s;
96         u8                      byte_order;
97         u8                      unpack_fn_len;
98
99         struct btree_write      writes[2];
100
101         /* Key/pointer for this btree node */
102         __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
103
104         /*
105          * XXX: add a delete sequence number, so when bch2_btree_node_relock()
106          * fails because the lock sequence number has changed - i.e. the
107          * contents were modified - we can still relock the node if it's still
108          * the one we want, without redoing the traversal
109          */
110
111         /*
112          * For asynchronous splits/interior node updates:
113          * When we do a split, we allocate new child nodes and update the parent
114          * node to point to them: we update the parent in memory immediately,
115          * but then we must wait until the children have been written out before
116          * the update to the parent can be written - this is a list of the
117          * btree_updates that are blocking this node from being
118          * written:
119          */
120         struct list_head        write_blocked;
121
122         /*
123          * Also for asynchronous splits/interior node updates:
124          * If a btree node isn't reachable yet, we don't want to kick off
125          * another write - because that write also won't yet be reachable and
126          * marking it as completed before it's reachable would be incorrect:
127          */
128         unsigned long           will_make_reachable;
129
130         struct open_buckets     ob;
131
132         /* lru list */
133         struct list_head        list;
134 };
135
136 struct btree_cache {
137         struct rhashtable       table;
138         bool                    table_init_done;
139         /*
140          * We never free a struct btree, except on shutdown - we just put it on
141          * the btree_cache_freed list and reuse it later. This simplifies the
142          * code, and it doesn't cost us much memory as the memory usage is
143          * dominated by buffers that hold the actual btree node data and those
144          * can be freed - and the number of struct btrees allocated is
145          * effectively bounded.
146          *
147          * btree_cache_freeable effectively is a small cache - we use it because
148          * high order page allocations can be rather expensive, and it's quite
149          * common to delete and allocate btree nodes in quick succession. It
150          * should never grow past ~2-3 nodes in practice.
151          */
152         struct mutex            lock;
153         struct list_head        live;
154         struct list_head        freeable;
155         struct list_head        freed;
156
157         /* Number of elements in live + freeable lists */
158         unsigned                used;
159         unsigned                reserve;
160         atomic_t                dirty;
161         struct shrinker         shrink;
162
163         /*
164          * If we need to allocate memory for a new btree node and that
165          * allocation fails, we can cannibalize another node in the btree cache
166          * to satisfy the allocation - lock to guarantee only one thread does
167          * this at a time:
168          */
169         struct task_struct      *alloc_lock;
170         struct closure_waitlist alloc_wait;
171 };
172
173 struct btree_node_iter {
174         struct btree_node_iter_set {
175                 u16     k, end;
176         } data[MAX_BSETS];
177 };
178
179 /*
180  * Iterate over all possible positions, synthesizing deleted keys for holes:
181  */
182 #define BTREE_ITER_SLOTS                (1 << 0)
183 /*
184  * Indicates that intent locks should be taken on leaf nodes, because we expect
185  * to be doing updates:
186  */
187 #define BTREE_ITER_INTENT               (1 << 1)
188 /*
189  * Causes the btree iterator code to prefetch additional btree nodes from disk:
190  */
191 #define BTREE_ITER_PREFETCH             (1 << 2)
192 /*
193  * Indicates that this iterator should not be reused until transaction commit,
194  * either because a pending update references it or because the update depends
195  * on that particular key being locked (e.g. by the str_hash code, for hash
196  * table consistency)
197  */
198 #define BTREE_ITER_KEEP_UNTIL_COMMIT    (1 << 3)
199 /*
200  * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
201  * @pos or the first key strictly greater than @pos
202  */
203 #define BTREE_ITER_IS_EXTENTS           (1 << 4)
204 #define BTREE_ITER_NOT_EXTENTS          (1 << 5)
205 #define BTREE_ITER_ERROR                (1 << 6)
206 #define BTREE_ITER_CACHED               (1 << 7)
207 #define BTREE_ITER_CACHED_NOFILL        (1 << 8)
208 #define BTREE_ITER_CACHED_NOCREATE      (1 << 9)
209 #define BTREE_ITER_WITH_UPDATES         (1 << 10)
210 #define BTREE_ITER_WITH_JOURNAL         (1 << 11)
211 #define __BTREE_ITER_ALL_SNAPSHOTS      (1 << 12)
212 #define BTREE_ITER_ALL_SNAPSHOTS        (1 << 13)
213 #define BTREE_ITER_FILTER_SNAPSHOTS     (1 << 14)
214 #define BTREE_ITER_NOPRESERVE           (1 << 15)
215
216 enum btree_path_uptodate {
217         BTREE_ITER_UPTODATE             = 0,
218         BTREE_ITER_NEED_RELOCK          = 1,
219         BTREE_ITER_NEED_TRAVERSE        = 2,
220 };
221
222 #define BTREE_ITER_NO_NODE_GET_LOCKS    ((struct btree *) 1)
223 #define BTREE_ITER_NO_NODE_DROP         ((struct btree *) 2)
224 #define BTREE_ITER_NO_NODE_LOCK_ROOT    ((struct btree *) 3)
225 #define BTREE_ITER_NO_NODE_UP           ((struct btree *) 4)
226 #define BTREE_ITER_NO_NODE_DOWN         ((struct btree *) 5)
227 #define BTREE_ITER_NO_NODE_INIT         ((struct btree *) 6)
228 #define BTREE_ITER_NO_NODE_ERROR        ((struct btree *) 7)
229 #define BTREE_ITER_NO_NODE_CACHED       ((struct btree *) 8)
230
231 struct btree_path {
232         u8                      idx;
233         u8                      sorted_idx;
234         u8                      ref;
235         u8                      intent_ref;
236
237         /* btree_iter_copy starts here: */
238         struct bpos             pos;
239
240         enum btree_id           btree_id:4;
241         bool                    cached:1;
242         bool                    preserve:1;
243         enum btree_path_uptodate uptodate:2;
244         /*
245          * When true, failing to relock this path will cause the transaction to
246          * restart:
247          */
248         bool                    should_be_locked:1;
249         unsigned                level:3,
250                                 locks_want:4,
251                                 nodes_locked:4,
252                                 nodes_intent_locked:4;
253
254         struct btree_path_level {
255                 struct btree    *b;
256                 struct btree_node_iter iter;
257                 u32             lock_seq;
258         }                       l[BTREE_MAX_DEPTH];
259 #ifdef CONFIG_BCACHEFS_DEBUG
260         unsigned long           ip_allocated;
261 #endif
262 };
263
264 static inline struct btree_path_level *path_l(struct btree_path *path)
265 {
266         return path->l + path->level;
267 }
268
269 /*
270  * @pos                 - iterator's current position
271  * @level               - current btree depth
272  * @locks_want          - btree level below which we start taking intent locks
273  * @nodes_locked        - bitmask indicating which nodes in @nodes are locked
274  * @nodes_intent_locked - bitmask indicating which locks are intent locks
275  */
276 struct btree_iter {
277         struct btree_trans      *trans;
278         struct btree_path       *path;
279
280         enum btree_id           btree_id:4;
281         unsigned                min_depth:4;
282
283         /* btree_iter_copy starts here: */
284         u16                     flags;
285
286         /* When we're filtering by snapshot, the snapshot ID we're looking for: */
287         unsigned                snapshot;
288
289         struct bpos             pos;
290         struct bpos             pos_after_commit;
291         /*
292          * Current unpacked key - so that bch2_btree_iter_next()/
293          * bch2_btree_iter_next_slot() can correctly advance pos.
294          */
295         struct bkey             k;
296 #ifdef CONFIG_BCACHEFS_DEBUG
297         unsigned long           ip_allocated;
298 #endif
299 };
300
301 struct btree_key_cache {
302         struct mutex            lock;
303         struct rhashtable       table;
304         bool                    table_init_done;
305         struct list_head        freed;
306         struct shrinker         shrink;
307         unsigned                shrink_iter;
308
309         size_t                  nr_freed;
310         atomic_long_t           nr_keys;
311         atomic_long_t           nr_dirty;
312 };
313
314 struct bkey_cached_key {
315         u32                     btree_id;
316         struct bpos             pos;
317 } __attribute__((packed, aligned(4)));
318
319 #define BKEY_CACHED_ACCESSED            0
320 #define BKEY_CACHED_DIRTY               1
321
322 struct bkey_cached {
323         struct btree_bkey_cached_common c;
324
325         unsigned long           flags;
326         u8                      u64s;
327         bool                    valid;
328         u32                     btree_trans_barrier_seq;
329         struct bkey_cached_key  key;
330
331         struct rhash_head       hash;
332         struct list_head        list;
333
334         struct journal_preres   res;
335         struct journal_entry_pin journal;
336
337         struct bkey_i           *k;
338 };
339
340 struct btree_insert_entry {
341         unsigned                flags;
342         u8                      bkey_type;
343         enum btree_id           btree_id:8;
344         u8                      level;
345         bool                    cached:1;
346         bool                    insert_trigger_run:1;
347         bool                    overwrite_trigger_run:1;
348         struct bkey_i           *k;
349         struct btree_path       *path;
350         unsigned long           ip_allocated;
351 };
352
353 #ifndef CONFIG_LOCKDEP
354 #define BTREE_ITER_MAX          64
355 #else
356 #define BTREE_ITER_MAX          32
357 #endif
358
359 struct btree_trans_commit_hook;
360 typedef int (btree_trans_commit_hook_fn)(struct btree_trans *, struct btree_trans_commit_hook *);
361
362 struct btree_trans_commit_hook {
363         btree_trans_commit_hook_fn      *fn;
364         struct btree_trans_commit_hook  *next;
365 };
366
367 #define BTREE_TRANS_MEM_MAX     (1U << 14)
368
369 struct btree_trans {
370         struct bch_fs           *c;
371         const char              *fn;
372         struct list_head        list;
373         struct btree            *locking;
374         unsigned                locking_path_idx;
375         struct bpos             locking_pos;
376         u8                      locking_btree_id;
377         u8                      locking_level;
378         pid_t                   pid;
379         int                     srcu_idx;
380
381         u8                      nr_sorted;
382         u8                      nr_updates;
383         bool                    used_mempool:1;
384         bool                    in_traverse_all:1;
385         bool                    restarted:1;
386         bool                    journal_transaction_names:1;
387         /*
388          * For when bch2_trans_update notices we'll be splitting a compressed
389          * extent:
390          */
391         unsigned                extra_journal_res;
392
393         u64                     paths_allocated;
394
395         unsigned                mem_top;
396         unsigned                mem_bytes;
397         void                    *mem;
398
399         u8                      sorted[BTREE_ITER_MAX];
400         struct btree_path       *paths;
401         struct btree_insert_entry *updates;
402
403         /* update path: */
404         struct btree_trans_commit_hook *hooks;
405         struct jset_entry       *extra_journal_entries;
406         unsigned                extra_journal_entry_u64s;
407         struct journal_entry_pin *journal_pin;
408
409         struct journal_res      journal_res;
410         struct journal_preres   journal_preres;
411         u64                     *journal_seq;
412         struct disk_reservation *disk_res;
413         unsigned                flags;
414         unsigned                journal_u64s;
415         unsigned                journal_preres_u64s;
416         struct replicas_delta_list *fs_usage_deltas;
417 };
418
419 #define BTREE_FLAG(flag)                                                \
420 static inline bool btree_node_ ## flag(struct btree *b)                 \
421 {       return test_bit(BTREE_NODE_ ## flag, &b->flags); }              \
422                                                                         \
423 static inline void set_btree_node_ ## flag(struct btree *b)             \
424 {       set_bit(BTREE_NODE_ ## flag, &b->flags); }                      \
425                                                                         \
426 static inline void clear_btree_node_ ## flag(struct btree *b)           \
427 {       clear_bit(BTREE_NODE_ ## flag, &b->flags); }
428
429 enum btree_flags {
430         BTREE_NODE_read_in_flight,
431         BTREE_NODE_read_error,
432         BTREE_NODE_dirty,
433         BTREE_NODE_need_write,
434         BTREE_NODE_noevict,
435         BTREE_NODE_write_idx,
436         BTREE_NODE_accessed,
437         BTREE_NODE_write_in_flight,
438         BTREE_NODE_write_in_flight_inner,
439         BTREE_NODE_just_written,
440         BTREE_NODE_dying,
441         BTREE_NODE_fake,
442         BTREE_NODE_need_rewrite,
443         BTREE_NODE_never_write,
444 };
445
446 BTREE_FLAG(read_in_flight);
447 BTREE_FLAG(read_error);
448 BTREE_FLAG(need_write);
449 BTREE_FLAG(noevict);
450 BTREE_FLAG(write_idx);
451 BTREE_FLAG(accessed);
452 BTREE_FLAG(write_in_flight);
453 BTREE_FLAG(write_in_flight_inner);
454 BTREE_FLAG(just_written);
455 BTREE_FLAG(dying);
456 BTREE_FLAG(fake);
457 BTREE_FLAG(need_rewrite);
458 BTREE_FLAG(never_write);
459
460 static inline struct btree_write *btree_current_write(struct btree *b)
461 {
462         return b->writes + btree_node_write_idx(b);
463 }
464
465 static inline struct btree_write *btree_prev_write(struct btree *b)
466 {
467         return b->writes + (btree_node_write_idx(b) ^ 1);
468 }
469
470 static inline struct bset_tree *bset_tree_last(struct btree *b)
471 {
472         EBUG_ON(!b->nsets);
473         return b->set + b->nsets - 1;
474 }
475
476 static inline void *
477 __btree_node_offset_to_ptr(const struct btree *b, u16 offset)
478 {
479         return (void *) ((u64 *) b->data + 1 + offset);
480 }
481
482 static inline u16
483 __btree_node_ptr_to_offset(const struct btree *b, const void *p)
484 {
485         u16 ret = (u64 *) p - 1 - (u64 *) b->data;
486
487         EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
488         return ret;
489 }
490
491 static inline struct bset *bset(const struct btree *b,
492                                 const struct bset_tree *t)
493 {
494         return __btree_node_offset_to_ptr(b, t->data_offset);
495 }
496
497 static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
498 {
499         t->end_offset =
500                 __btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
501 }
502
503 static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
504                                   const struct bset *i)
505 {
506         t->data_offset = __btree_node_ptr_to_offset(b, i);
507         set_btree_bset_end(b, t);
508 }
509
510 static inline struct bset *btree_bset_first(struct btree *b)
511 {
512         return bset(b, b->set);
513 }
514
515 static inline struct bset *btree_bset_last(struct btree *b)
516 {
517         return bset(b, bset_tree_last(b));
518 }
519
520 static inline u16
521 __btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
522 {
523         return __btree_node_ptr_to_offset(b, k);
524 }
525
526 static inline struct bkey_packed *
527 __btree_node_offset_to_key(const struct btree *b, u16 k)
528 {
529         return __btree_node_offset_to_ptr(b, k);
530 }
531
532 static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
533 {
534         return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
535 }
536
537 #define btree_bkey_first(_b, _t)                                        \
538 ({                                                                      \
539         EBUG_ON(bset(_b, _t)->start !=                                  \
540                 __btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
541                                                                         \
542         bset(_b, _t)->start;                                            \
543 })
544
545 #define btree_bkey_last(_b, _t)                                         \
546 ({                                                                      \
547         EBUG_ON(__btree_node_offset_to_key(_b, (_t)->end_offset) !=     \
548                 vstruct_last(bset(_b, _t)));                            \
549                                                                         \
550         __btree_node_offset_to_key(_b, (_t)->end_offset);               \
551 })
552
553 static inline unsigned bset_u64s(struct bset_tree *t)
554 {
555         return t->end_offset - t->data_offset -
556                 sizeof(struct bset) / sizeof(u64);
557 }
558
559 static inline unsigned bset_dead_u64s(struct btree *b, struct bset_tree *t)
560 {
561         return bset_u64s(t) - b->nr.bset_u64s[t - b->set];
562 }
563
564 static inline unsigned bset_byte_offset(struct btree *b, void *i)
565 {
566         return i - (void *) b->data;
567 }
568
569 enum btree_node_type {
570 #define x(kwd, val) BKEY_TYPE_##kwd = val,
571         BCH_BTREE_IDS()
572 #undef x
573         BKEY_TYPE_btree,
574 };
575
576 /* Type of a key in btree @id at level @level: */
577 static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
578 {
579         return level ? BKEY_TYPE_btree : (enum btree_node_type) id;
580 }
581
582 /* Type of keys @b contains: */
583 static inline enum btree_node_type btree_node_type(struct btree *b)
584 {
585         return __btree_node_type(b->c.level, b->c.btree_id);
586 }
587
588 static inline bool btree_node_type_is_extents(enum btree_node_type type)
589 {
590         switch (type) {
591         case BKEY_TYPE_extents:
592         case BKEY_TYPE_reflink:
593                 return true;
594         default:
595                 return false;
596         }
597 }
598
599 static inline bool btree_node_is_extents(struct btree *b)
600 {
601         return btree_node_type_is_extents(btree_node_type(b));
602 }
603
604 #define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS              \
605         ((1U << BKEY_TYPE_extents)|                     \
606          (1U << BKEY_TYPE_inodes)|                      \
607          (1U << BKEY_TYPE_stripes)|                     \
608          (1U << BKEY_TYPE_reflink)|                     \
609          (1U << BKEY_TYPE_btree))
610
611 #define BTREE_NODE_TYPE_HAS_MEM_TRIGGERS                \
612         ((1U << BKEY_TYPE_alloc)|                       \
613          (1U << BKEY_TYPE_inodes)|                      \
614          (1U << BKEY_TYPE_stripes)|                     \
615          (1U << BKEY_TYPE_snapshots))
616
617 #define BTREE_NODE_TYPE_HAS_TRIGGERS                    \
618         (BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS|            \
619          BTREE_NODE_TYPE_HAS_MEM_TRIGGERS)
620
621 #define BTREE_ID_HAS_SNAPSHOTS                          \
622         ((1U << BTREE_ID_extents)|                      \
623          (1U << BTREE_ID_inodes)|                       \
624          (1U << BTREE_ID_dirents)|                      \
625          (1U << BTREE_ID_xattrs))
626
627 #define BTREE_ID_HAS_PTRS                               \
628         ((1U << BTREE_ID_extents)|                      \
629          (1U << BTREE_ID_reflink))
630
631 static inline bool btree_type_has_snapshots(enum btree_id id)
632 {
633         return (1 << id) & BTREE_ID_HAS_SNAPSHOTS;
634 }
635
636 enum btree_update_flags {
637         __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE,
638
639         __BTREE_TRIGGER_NORUN,          /* Don't run triggers at all */
640
641         __BTREE_TRIGGER_INSERT,
642         __BTREE_TRIGGER_OVERWRITE,
643
644         __BTREE_TRIGGER_GC,
645         __BTREE_TRIGGER_BUCKET_INVALIDATE,
646         __BTREE_TRIGGER_NOATOMIC,
647 };
648
649 #define BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE (1U << __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE)
650
651 #define BTREE_TRIGGER_NORUN             (1U << __BTREE_TRIGGER_NORUN)
652
653 #define BTREE_TRIGGER_INSERT            (1U << __BTREE_TRIGGER_INSERT)
654 #define BTREE_TRIGGER_OVERWRITE         (1U << __BTREE_TRIGGER_OVERWRITE)
655
656 #define BTREE_TRIGGER_GC                (1U << __BTREE_TRIGGER_GC)
657 #define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
658 #define BTREE_TRIGGER_NOATOMIC          (1U << __BTREE_TRIGGER_NOATOMIC)
659
660 #define BTREE_TRIGGER_WANTS_OLD_AND_NEW         \
661         ((1U << KEY_TYPE_alloc)|                \
662          (1U << KEY_TYPE_alloc_v2)|             \
663          (1U << KEY_TYPE_alloc_v3)|             \
664          (1U << KEY_TYPE_stripe)|               \
665          (1U << KEY_TYPE_inode)|                \
666          (1U << KEY_TYPE_inode_v2)|             \
667          (1U << KEY_TYPE_snapshot))
668
669 static inline bool btree_node_type_needs_gc(enum btree_node_type type)
670 {
671         return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type);
672 }
673
674 struct btree_root {
675         struct btree            *b;
676
677         /* On disk root - see async splits: */
678         __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
679         u8                      level;
680         u8                      alive;
681         s8                      error;
682 };
683
684 enum btree_insert_ret {
685         BTREE_INSERT_OK,
686         /* leaf node needs to be split */
687         BTREE_INSERT_BTREE_NODE_FULL,
688         BTREE_INSERT_NEED_MARK_REPLICAS,
689         BTREE_INSERT_NEED_JOURNAL_RES,
690         BTREE_INSERT_NEED_JOURNAL_RECLAIM,
691 };
692
693 enum btree_gc_coalesce_fail_reason {
694         BTREE_GC_COALESCE_FAIL_RESERVE_GET,
695         BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
696         BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
697 };
698
699 enum btree_node_sibling {
700         btree_prev_sib,
701         btree_next_sib,
702 };
703
704 #endif /* _BCACHEFS_BTREE_TYPES_H */