+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_BTREE_TYPES_H
#define _BCACHEFS_BTREE_TYPES_H
#include <linux/list.h>
#include <linux/rhashtable.h>
+#include <linux/six.h>
#include "bkey_methods.h"
+#include "buckets_types.h"
+#include "darray.h"
#include "journal_types.h"
-#include "six.h"
struct open_bucket;
struct btree_update;
+struct btree_trans;
#define MAX_BSETS 3U
u16 data_offset;
u16 aux_data_offset;
u16 end_offset;
-
- struct bpos max_key;
};
struct btree_write {
struct journal_entry_pin journal;
- struct closure_waitlist wait;
};
-struct btree_ob_ref {
- u8 nr;
- u8 refs[BCH_REPLICAS_MAX];
+struct btree_alloc {
+ struct open_buckets ob;
+ __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX);
};
-struct btree_alloc {
- struct btree_ob_ref ob;
- BKEY_PADDED(k);
+struct btree_bkey_cached_common {
+ struct six_lock lock;
+ u8 level;
+ u8 btree_id;
};
struct btree {
- /* Hottest entries first */
- struct rhash_head hash;
+ struct btree_bkey_cached_common c;
- /* Key/pointer for this btree node */
- __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
-
- struct six_lock lock;
+ struct rhash_head hash;
+ u64 hash_val;
unsigned long flags;
u16 written;
- u8 level;
- u8 btree_id;
u8 nsets;
u8 nr_key_bits;
+ u16 version_ondisk;
struct bkey_format format;
struct btree_nr_keys nr;
u16 sib_u64s[2];
u16 whiteout_u64s;
- u16 uncompacted_whiteout_u64s;
- u8 page_order;
+ u8 byte_order;
u8 unpack_fn_len;
+ struct btree_write writes[2];
+
+ /* Key/pointer for this btree node */
+ __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
+
/*
* XXX: add a delete sequence number, so when bch2_btree_node_relock()
* fails because the lock sequence number has changed - i.e. the
*/
unsigned long will_make_reachable;
- struct btree_ob_ref ob;
+ struct open_buckets ob;
/* lru list */
struct list_head list;
-
- struct btree_write writes[2];
-
-#ifdef CONFIG_BCACHEFS_DEBUG
- bool *expensive_debug_checks;
-#endif
};
struct btree_cache {
struct mutex lock;
struct list_head live;
struct list_head freeable;
- struct list_head freed;
+ struct list_head freed_pcpu;
+ struct list_head freed_nonpcpu;
/* Number of elements in live + freeable lists */
unsigned used;
unsigned reserve;
+ atomic_t dirty;
struct shrinker shrink;
/*
};
struct btree_node_iter {
- u8 is_extents;
-
struct btree_node_iter_set {
u16 k, end;
} data[MAX_BSETS];
};
+/*
+ * Iterate over all possible positions, synthesizing deleted keys for holes:
+ */
#define BTREE_ITER_SLOTS (1 << 0)
+/*
+ * Indicates that intent locks should be taken on leaf nodes, because we expect
+ * to be doing updates:
+ */
#define BTREE_ITER_INTENT (1 << 1)
+/*
+ * Causes the btree iterator code to prefetch additional btree nodes from disk:
+ */
#define BTREE_ITER_PREFETCH (1 << 2)
/*
- * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
- * @pos or the first key strictly greater than @pos
+ * Indicates that this iterator should not be reused until transaction commit,
+ * either because a pending update references it or because the update depends
+ * on that particular key being locked (e.g. by the str_hash code, for hash
+ * table consistency)
*/
-#define BTREE_ITER_IS_EXTENTS (1 << 3)
+#define BTREE_ITER_KEEP_UNTIL_COMMIT (1 << 3)
/*
- * indicates we need to call bch2_btree_iter_traverse() to revalidate iterator:
+ * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
+ * @pos or the first key strictly greater than @pos
*/
-#define BTREE_ITER_AT_END_OF_LEAF (1 << 4)
-#define BTREE_ITER_ERROR (1 << 5)
-
-enum btree_iter_uptodate {
+#define BTREE_ITER_IS_EXTENTS (1 << 4)
+#define BTREE_ITER_NOT_EXTENTS (1 << 5)
+#define BTREE_ITER_CACHED (1 << 6)
+#define BTREE_ITER_CACHED_NOFILL (1 << 7)
+#define BTREE_ITER_CACHED_NOCREATE (1 << 8)
+#define BTREE_ITER_WITH_KEY_CACHE (1 << 9)
+#define BTREE_ITER_WITH_UPDATES (1 << 10)
+#define BTREE_ITER_WITH_JOURNAL (1 << 11)
+#define __BTREE_ITER_ALL_SNAPSHOTS (1 << 12)
+#define BTREE_ITER_ALL_SNAPSHOTS (1 << 13)
+#define BTREE_ITER_FILTER_SNAPSHOTS (1 << 14)
+#define BTREE_ITER_NOPRESERVE (1 << 15)
+
+enum btree_path_uptodate {
BTREE_ITER_UPTODATE = 0,
- BTREE_ITER_NEED_PEEK = 1,
- BTREE_ITER_NEED_RELOCK = 2,
- BTREE_ITER_NEED_TRAVERSE = 3,
- BTREE_ITER_END = 4,
+ BTREE_ITER_NEED_RELOCK = 1,
+ BTREE_ITER_NEED_TRAVERSE = 2,
};
+#define BTREE_ITER_NO_NODE_GET_LOCKS ((struct btree *) 1)
+#define BTREE_ITER_NO_NODE_DROP ((struct btree *) 2)
+#define BTREE_ITER_NO_NODE_LOCK_ROOT ((struct btree *) 3)
+#define BTREE_ITER_NO_NODE_UP ((struct btree *) 4)
+#define BTREE_ITER_NO_NODE_DOWN ((struct btree *) 5)
+#define BTREE_ITER_NO_NODE_INIT ((struct btree *) 6)
+#define BTREE_ITER_NO_NODE_ERROR ((struct btree *) 7)
+#define BTREE_ITER_NO_NODE_CACHED ((struct btree *) 8)
+
+struct btree_path {
+ u8 idx;
+ u8 sorted_idx;
+ u8 ref;
+ u8 intent_ref;
+
+ /* btree_iter_copy starts here: */
+ struct bpos pos;
+
+ enum btree_id btree_id:4;
+ bool cached:1;
+ bool preserve:1;
+ enum btree_path_uptodate uptodate:2;
+ /*
+ * When true, failing to relock this path will cause the transaction to
+ * restart:
+ */
+ bool should_be_locked:1;
+ unsigned level:3,
+ locks_want:4,
+ nodes_locked:4,
+ nodes_intent_locked:4;
+
+ struct btree_path_level {
+ struct btree *b;
+ struct btree_node_iter iter;
+ u32 lock_seq;
+ } l[BTREE_MAX_DEPTH];
+#ifdef CONFIG_BCACHEFS_DEBUG
+ unsigned long ip_allocated;
+#endif
+};
+
+static inline struct btree_path_level *path_l(struct btree_path *path)
+{
+ return path->l + path->level;
+}
+
/*
* @pos - iterator's current position
* @level - current btree depth
* @nodes_intent_locked - bitmask indicating which locks are intent locks
*/
struct btree_iter {
- struct bch_fs *c;
- struct bpos pos;
+ struct btree_trans *trans;
+ struct btree_path *path;
+ struct btree_path *update_path;
+ struct btree_path *key_cache_path;
- u8 flags;
- unsigned uptodate:4;
enum btree_id btree_id:4;
- unsigned level:4,
- locks_want:4,
- nodes_locked:4,
- nodes_intent_locked:4;
+ unsigned min_depth:4;
- struct btree_iter_level {
- struct btree *b;
- struct btree_node_iter iter;
- } l[BTREE_MAX_DEPTH];
+ /* btree_iter_copy starts here: */
+ u16 flags;
- u32 lock_seq[BTREE_MAX_DEPTH];
+ /* When we're filtering by snapshot, the snapshot ID we're looking for: */
+ unsigned snapshot;
+ struct bpos pos;
+ struct bpos pos_after_commit;
/*
* Current unpacked key - so that bch2_btree_iter_next()/
* bch2_btree_iter_next_slot() can correctly advance pos.
*/
struct bkey k;
+#ifdef CONFIG_BCACHEFS_DEBUG
+ unsigned long ip_allocated;
+#endif
+};
+
+struct btree_key_cache {
+ struct mutex lock;
+ struct rhashtable table;
+ bool table_init_done;
+ struct list_head freed;
+ struct shrinker shrink;
+ unsigned shrink_iter;
+
+ size_t nr_freed;
+ atomic_long_t nr_keys;
+ atomic_long_t nr_dirty;
+};
+
+struct bkey_cached_key {
+ u32 btree_id;
+ struct bpos pos;
+} __attribute__((packed, aligned(4)));
+
+#define BKEY_CACHED_ACCESSED 0
+#define BKEY_CACHED_DIRTY 1
+
+struct bkey_cached {
+ struct btree_bkey_cached_common c;
+
+ unsigned long flags;
+ u16 u64s;
+ bool valid;
+ u32 btree_trans_barrier_seq;
+ struct bkey_cached_key key;
+
+ struct rhash_head hash;
+ struct list_head list;
+
+ struct journal_preres res;
+ struct journal_entry_pin journal;
+ struct bkey_i *k;
+};
+
+struct btree_insert_entry {
+ unsigned flags;
+ u8 bkey_type;
+ enum btree_id btree_id:8;
+ u8 level:4;
+ bool cached:1;
+ bool insert_trigger_run:1;
+ bool overwrite_trigger_run:1;
/*
- * Circular linked list of linked iterators: linked iterators share
- * locks (e.g. two linked iterators may have the same node intent
- * locked, or read and write locked, at the same time), and insertions
- * through one iterator won't invalidate the other linked iterators.
+ * @old_k may be a key from the journal; @old_btree_u64s always refers
+ * to the size of the key being overwritten in the btree:
*/
+ u8 old_btree_u64s;
+ struct bkey_i *k;
+ struct btree_path *path;
+ /* key being overwritten: */
+ struct bkey old_k;
+ const struct bch_val *old_v;
+ unsigned long ip_allocated;
+};
- /* Must come last: */
- struct btree_iter *next;
+#ifndef CONFIG_LOCKDEP
+#define BTREE_ITER_MAX 64
+#else
+#define BTREE_ITER_MAX 32
+#endif
+
+struct btree_trans_commit_hook;
+typedef int (btree_trans_commit_hook_fn)(struct btree_trans *, struct btree_trans_commit_hook *);
+
+struct btree_trans_commit_hook {
+ btree_trans_commit_hook_fn *fn;
+ struct btree_trans_commit_hook *next;
};
-#define BTREE_FLAG(flag) \
+#define BTREE_TRANS_MEM_MAX (1U << 14)
+
+struct btree_trans {
+ struct bch_fs *c;
+ const char *fn;
+ struct list_head list;
+ struct btree *locking;
+ unsigned locking_path_idx;
+ struct bpos locking_pos;
+ u8 locking_btree_id;
+ u8 locking_level;
+ u8 locking_lock_type;
+ pid_t pid;
+ int srcu_idx;
+
+ u8 nr_sorted;
+ u8 nr_updates;
+ u8 traverse_all_idx;
+ bool used_mempool:1;
+ bool in_traverse_all:1;
+ bool restarted:1;
+ bool memory_allocation_failure:1;
+ bool journal_transaction_names:1;
+ bool is_initial_gc:1;
+ /*
+ * For when bch2_trans_update notices we'll be splitting a compressed
+ * extent:
+ */
+ unsigned extra_journal_res;
+
+ u64 paths_allocated;
+
+ unsigned mem_top;
+ unsigned mem_bytes;
+ void *mem;
+
+ u8 sorted[BTREE_ITER_MAX];
+ struct btree_path *paths;
+ struct btree_insert_entry *updates;
+
+ /* update path: */
+ struct btree_trans_commit_hook *hooks;
+ DARRAY(u64) extra_journal_entries;
+ struct journal_entry_pin *journal_pin;
+
+ struct journal_res journal_res;
+ struct journal_preres journal_preres;
+ u64 *journal_seq;
+ struct disk_reservation *disk_res;
+ unsigned flags;
+ unsigned journal_u64s;
+ unsigned journal_preres_u64s;
+ struct replicas_delta_list *fs_usage_deltas;
+};
+
+#define BTREE_FLAGS() \
+ x(read_in_flight) \
+ x(read_error) \
+ x(dirty) \
+ x(need_write) \
+ x(write_blocked) \
+ x(will_make_reachable) \
+ x(noevict) \
+ x(write_idx) \
+ x(accessed) \
+ x(write_in_flight) \
+ x(write_in_flight_inner) \
+ x(just_written) \
+ x(dying) \
+ x(fake) \
+ x(need_rewrite) \
+ x(never_write)
+
+enum btree_flags {
+#define x(flag) BTREE_NODE_##flag,
+ BTREE_FLAGS()
+#undef x
+};
+
+#define x(flag) \
static inline bool btree_node_ ## flag(struct btree *b) \
{ return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
\
static inline void clear_btree_node_ ## flag(struct btree *b) \
{ clear_bit(BTREE_NODE_ ## flag, &b->flags); }
-enum btree_flags {
- BTREE_NODE_read_in_flight,
- BTREE_NODE_read_error,
- BTREE_NODE_dirty,
- BTREE_NODE_need_write,
- BTREE_NODE_noevict,
- BTREE_NODE_write_idx,
- BTREE_NODE_accessed,
- BTREE_NODE_write_in_flight,
- BTREE_NODE_just_written,
- BTREE_NODE_dying,
- BTREE_NODE_fake,
-};
-
-BTREE_FLAG(read_in_flight);
-BTREE_FLAG(read_error);
-BTREE_FLAG(dirty);
-BTREE_FLAG(need_write);
-BTREE_FLAG(noevict);
-BTREE_FLAG(write_idx);
-BTREE_FLAG(accessed);
-BTREE_FLAG(write_in_flight);
-BTREE_FLAG(just_written);
-BTREE_FLAG(dying);
-BTREE_FLAG(fake);
+BTREE_FLAGS()
+#undef x
static inline struct btree_write *btree_current_write(struct btree *b)
{
return b->set + b->nsets - 1;
}
+static inline void *
+__btree_node_offset_to_ptr(const struct btree *b, u16 offset)
+{
+ return (void *) ((u64 *) b->data + 1 + offset);
+}
+
+static inline u16
+__btree_node_ptr_to_offset(const struct btree *b, const void *p)
+{
+ u16 ret = (u64 *) p - 1 - (u64 *) b->data;
+
+ EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
+ return ret;
+}
+
static inline struct bset *bset(const struct btree *b,
const struct bset_tree *t)
{
- return (void *) b->data + t->data_offset * sizeof(u64);
+ return __btree_node_offset_to_ptr(b, t->data_offset);
+}
+
+static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
+{
+ t->end_offset =
+ __btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
+}
+
+static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
+ const struct bset *i)
+{
+ t->data_offset = __btree_node_ptr_to_offset(b, i);
+ set_btree_bset_end(b, t);
}
static inline struct bset *btree_bset_first(struct btree *b)
static inline u16
__btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
{
- size_t ret = (u64 *) k - (u64 *) b->data - 1;
-
- EBUG_ON(ret > U16_MAX);
- return ret;
+ return __btree_node_ptr_to_offset(b, k);
}
static inline struct bkey_packed *
__btree_node_offset_to_key(const struct btree *b, u16 k)
{
- return (void *) ((u64 *) b->data + k + 1);
+ return __btree_node_offset_to_ptr(b, k);
}
-#define btree_bkey_first(_b, _t) (bset(_b, _t)->start)
+static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
+{
+ return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
+}
+
+#define btree_bkey_first(_b, _t) \
+({ \
+ EBUG_ON(bset(_b, _t)->start != \
+ __btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
+ \
+ bset(_b, _t)->start; \
+})
#define btree_bkey_last(_b, _t) \
({ \
__btree_node_offset_to_key(_b, (_t)->end_offset); \
})
-static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
+static inline unsigned bset_u64s(struct bset_tree *t)
{
- t->end_offset =
- __btree_node_key_to_offset(b, vstruct_last(bset(b, t)));
- btree_bkey_last(b, t);
+ return t->end_offset - t->data_offset -
+ sizeof(struct bset) / sizeof(u64);
}
-static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
- const struct bset *i)
+static inline unsigned bset_dead_u64s(struct btree *b, struct bset_tree *t)
{
- t->data_offset = (u64 *) i - (u64 *) b->data;
-
- EBUG_ON(bset(b, t) != i);
-
- set_btree_bset_end(b, t);
+ return bset_u64s(t) - b->nr.bset_u64s[t - b->set];
}
static inline unsigned bset_byte_offset(struct btree *b, void *i)
return i - (void *) b->data;
}
+enum btree_node_type {
+#define x(kwd, val) BKEY_TYPE_##kwd = val,
+ BCH_BTREE_IDS()
+#undef x
+ BKEY_TYPE_btree,
+};
+
+/* Type of a key in btree @id at level @level: */
+static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
+{
+ return level ? BKEY_TYPE_btree : (enum btree_node_type) id;
+}
+
/* Type of keys @b contains: */
-static inline enum bkey_type btree_node_type(struct btree *b)
+static inline enum btree_node_type btree_node_type(struct btree *b)
{
- return b->level ? BKEY_TYPE_BTREE : b->btree_id;
+ return __btree_node_type(b->c.level, b->c.btree_id);
}
-static inline const struct bkey_ops *btree_node_ops(struct btree *b)
+#define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS \
+ ((1U << BKEY_TYPE_extents)| \
+ (1U << BKEY_TYPE_alloc)| \
+ (1U << BKEY_TYPE_inodes)| \
+ (1U << BKEY_TYPE_stripes)| \
+ (1U << BKEY_TYPE_reflink)| \
+ (1U << BKEY_TYPE_btree))
+
+#define BTREE_NODE_TYPE_HAS_MEM_TRIGGERS \
+ ((1U << BKEY_TYPE_alloc)| \
+ (1U << BKEY_TYPE_inodes)| \
+ (1U << BKEY_TYPE_stripes)| \
+ (1U << BKEY_TYPE_snapshots))
+
+#define BTREE_NODE_TYPE_HAS_TRIGGERS \
+ (BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS| \
+ BTREE_NODE_TYPE_HAS_MEM_TRIGGERS)
+
+#define BTREE_ID_IS_EXTENTS \
+ ((1U << BTREE_ID_extents)| \
+ (1U << BTREE_ID_reflink)| \
+ (1U << BTREE_ID_freespace))
+
+static inline bool btree_node_type_is_extents(enum btree_node_type type)
{
- return &bch2_bkey_ops[btree_node_type(b)];
+ return (1U << type) & BTREE_ID_IS_EXTENTS;
}
-static inline bool btree_node_has_ptrs(struct btree *b)
+#define BTREE_ID_HAS_SNAPSHOTS \
+ ((1U << BTREE_ID_extents)| \
+ (1U << BTREE_ID_inodes)| \
+ (1U << BTREE_ID_dirents)| \
+ (1U << BTREE_ID_xattrs))
+
+#define BTREE_ID_HAS_PTRS \
+ ((1U << BTREE_ID_extents)| \
+ (1U << BTREE_ID_reflink))
+
+static inline bool btree_type_has_snapshots(enum btree_id id)
{
- return btree_type_has_ptrs(btree_node_type(b));
+ return (1 << id) & BTREE_ID_HAS_SNAPSHOTS;
}
-static inline bool btree_node_is_extents(struct btree *b)
+enum btree_update_flags {
+ __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE,
+ __BTREE_UPDATE_KEY_CACHE_RECLAIM,
+
+ __BTREE_TRIGGER_NORUN, /* Don't run triggers at all */
+
+ __BTREE_TRIGGER_INSERT,
+ __BTREE_TRIGGER_OVERWRITE,
+
+ __BTREE_TRIGGER_GC,
+ __BTREE_TRIGGER_BUCKET_INVALIDATE,
+ __BTREE_TRIGGER_NOATOMIC,
+};
+
+#define BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE (1U << __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE)
+#define BTREE_UPDATE_KEY_CACHE_RECLAIM (1U << __BTREE_UPDATE_KEY_CACHE_RECLAIM)
+
+#define BTREE_TRIGGER_NORUN (1U << __BTREE_TRIGGER_NORUN)
+
+#define BTREE_TRIGGER_INSERT (1U << __BTREE_TRIGGER_INSERT)
+#define BTREE_TRIGGER_OVERWRITE (1U << __BTREE_TRIGGER_OVERWRITE)
+
+#define BTREE_TRIGGER_GC (1U << __BTREE_TRIGGER_GC)
+#define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
+#define BTREE_TRIGGER_NOATOMIC (1U << __BTREE_TRIGGER_NOATOMIC)
+
+#define BTREE_TRIGGER_WANTS_OLD_AND_NEW \
+ ((1U << KEY_TYPE_alloc)| \
+ (1U << KEY_TYPE_alloc_v2)| \
+ (1U << KEY_TYPE_alloc_v3)| \
+ (1U << KEY_TYPE_stripe)| \
+ (1U << KEY_TYPE_inode)| \
+ (1U << KEY_TYPE_inode_v2)| \
+ (1U << KEY_TYPE_snapshot))
+
+static inline bool btree_node_type_needs_gc(enum btree_node_type type)
{
- return btree_node_type(b) == BKEY_TYPE_EXTENTS;
+ return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type);
}
struct btree_root {
struct btree *b;
- struct btree_update *as;
-
/* On disk root - see async splits: */
__BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
u8 level;
u8 alive;
+ s8 error;
};
-/*
- * Optional hook that will be called just prior to a btree node update, when
- * we're holding the write lock and we know what key is about to be overwritten:
- */
-
-struct btree_iter;
-struct btree_node_iter;
-
enum btree_insert_ret {
BTREE_INSERT_OK,
- /* extent spanned multiple leaf nodes: have to traverse to next node: */
- BTREE_INSERT_NEED_TRAVERSE,
- /* write lock held for too long */
- BTREE_INSERT_NEED_RESCHED,
/* leaf node needs to be split */
BTREE_INSERT_BTREE_NODE_FULL,
- BTREE_INSERT_JOURNAL_RES_FULL,
- BTREE_INSERT_ENOSPC,
- BTREE_INSERT_NEED_GC_LOCK,
-};
-
-struct extent_insert_hook {
- enum btree_insert_ret
- (*fn)(struct extent_insert_hook *, struct bpos, struct bpos,
- struct bkey_s_c, const struct bkey_i *);
+ BTREE_INSERT_NEED_MARK_REPLICAS,
+ BTREE_INSERT_NEED_JOURNAL_RES,
+ BTREE_INSERT_NEED_JOURNAL_RECLAIM,
};
enum btree_gc_coalesce_fail_reason {
btree_next_sib,
};
-typedef struct btree_nr_keys (*sort_fix_overlapping_fn)(struct bset *,
- struct btree *,
- struct btree_node_iter *);
-
#endif /* _BCACHEFS_BTREE_TYPES_H */