1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_CACHE_H
3 #define _BCACHEFS_BTREE_CACHE_H
6 #include "btree_types.h"
8 extern const char * const bch2_btree_node_flags[];
12 void bch2_recalc_btree_reserve(struct bch_fs *);
14 void bch2_btree_node_hash_remove(struct btree_cache *, struct btree *);
15 int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *);
16 int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *,
17 unsigned, enum btree_id);
19 void bch2_btree_cache_cannibalize_unlock(struct bch_fs *);
20 int bch2_btree_cache_cannibalize_lock(struct bch_fs *, struct closure *);
22 struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *);
23 struct btree *bch2_btree_node_mem_alloc(struct bch_fs *, bool);
25 struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_path *,
26 const struct bkey_i *, unsigned,
27 enum six_lock_type, unsigned long);
29 struct btree *bch2_btree_node_get_noiter(struct btree_trans *, const struct bkey_i *,
30 enum btree_id, unsigned, bool);
32 int bch2_btree_node_prefetch(struct bch_fs *, struct btree_trans *, struct btree_path *,
33 const struct bkey_i *, enum btree_id, unsigned);
35 void bch2_btree_node_evict(struct btree_trans *, const struct bkey_i *);
37 void bch2_fs_btree_cache_exit(struct bch_fs *);
38 int bch2_fs_btree_cache_init(struct bch_fs *);
39 void bch2_fs_btree_cache_init_early(struct btree_cache *);
41 static inline u64 btree_ptr_hash_val(const struct bkey_i *k)
44 case KEY_TYPE_btree_ptr:
45 return *((u64 *) bkey_i_to_btree_ptr_c(k)->v.start);
46 case KEY_TYPE_btree_ptr_v2:
47 return bkey_i_to_btree_ptr_v2_c(k)->v.seq;
53 static inline struct btree *btree_node_mem_ptr(const struct bkey_i *k)
55 return k->k.type == KEY_TYPE_btree_ptr_v2
56 ? (void *)(unsigned long)bkey_i_to_btree_ptr_v2_c(k)->v.mem_ptr
60 /* is btree node in hash table? */
61 static inline bool btree_node_hashed(struct btree *b)
63 return b->hash_val != 0;
66 #define for_each_cached_btree(_b, _c, _tbl, _iter, _pos) \
67 for ((_tbl) = rht_dereference_rcu((_c)->btree_cache.table.tbl, \
68 &(_c)->btree_cache.table), \
69 _iter = 0; _iter < (_tbl)->size; _iter++) \
70 rht_for_each_entry_rcu((_b), (_pos), _tbl, _iter, hash)
72 static inline size_t btree_bytes(struct bch_fs *c)
74 return c->opts.btree_node_size;
77 static inline size_t btree_max_u64s(struct bch_fs *c)
79 return (btree_bytes(c) - sizeof(struct btree_node)) / sizeof(u64);
82 static inline size_t btree_pages(struct bch_fs *c)
84 return btree_bytes(c) / PAGE_SIZE;
87 static inline unsigned btree_blocks(struct bch_fs *c)
89 return btree_sectors(c) >> c->block_bits;
92 #define BTREE_SPLIT_THRESHOLD(c) (btree_max_u64s(c) * 2 / 3)
94 #define BTREE_FOREGROUND_MERGE_THRESHOLD(c) (btree_max_u64s(c) * 1 / 3)
95 #define BTREE_FOREGROUND_MERGE_HYSTERESIS(c) \
96 (BTREE_FOREGROUND_MERGE_THRESHOLD(c) + \
97 (BTREE_FOREGROUND_MERGE_THRESHOLD(c) >> 2))
99 #define btree_node_root(_c, _b) ((_c)->btree_roots[(_b)->c.btree_id].b)
101 void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *,
103 void bch2_btree_cache_to_text(struct printbuf *, struct bch_fs *);
105 #endif /* _BCACHEFS_BTREE_CACHE_H */