]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_cache.h
Disable pristine-tar option in gbp.conf, since there is no pristine-tar branch.
[bcachefs-tools-debian] / libbcachefs / btree_cache.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_CACHE_H
3 #define _BCACHEFS_BTREE_CACHE_H
4
5 #include "bcachefs.h"
6 #include "btree_types.h"
7 #include "bkey_methods.h"
8
9 extern const char * const bch2_btree_node_flags[];
10
11 struct btree_iter;
12
13 void bch2_recalc_btree_reserve(struct bch_fs *);
14
15 void bch2_btree_node_hash_remove(struct btree_cache *, struct btree *);
16 int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *);
17 int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *,
18                                 unsigned, enum btree_id);
19
20 void bch2_btree_cache_cannibalize_unlock(struct btree_trans *);
21 int bch2_btree_cache_cannibalize_lock(struct btree_trans *, struct closure *);
22
23 struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *);
24 struct btree *bch2_btree_node_mem_alloc(struct btree_trans *, bool);
25
26 struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_path *,
27                                   const struct bkey_i *, unsigned,
28                                   enum six_lock_type, unsigned long);
29
30 struct btree *bch2_btree_node_get_noiter(struct btree_trans *, const struct bkey_i *,
31                                          enum btree_id, unsigned, bool);
32
33 int bch2_btree_node_prefetch(struct btree_trans *, struct btree_path *,
34                              const struct bkey_i *, enum btree_id, unsigned);
35
36 void bch2_btree_node_evict(struct btree_trans *, const struct bkey_i *);
37
38 void bch2_fs_btree_cache_exit(struct bch_fs *);
39 int bch2_fs_btree_cache_init(struct bch_fs *);
40 void bch2_fs_btree_cache_init_early(struct btree_cache *);
41
42 static inline u64 btree_ptr_hash_val(const struct bkey_i *k)
43 {
44         switch (k->k.type) {
45         case KEY_TYPE_btree_ptr:
46                 return *((u64 *) bkey_i_to_btree_ptr_c(k)->v.start);
47         case KEY_TYPE_btree_ptr_v2:
48                 /*
49                  * The cast/deref is only necessary to avoid sparse endianness
50                  * warnings:
51                  */
52                 return *((u64 *) &bkey_i_to_btree_ptr_v2_c(k)->v.seq);
53         default:
54                 return 0;
55         }
56 }
57
58 static inline struct btree *btree_node_mem_ptr(const struct bkey_i *k)
59 {
60         return k->k.type == KEY_TYPE_btree_ptr_v2
61                 ? (void *)(unsigned long)bkey_i_to_btree_ptr_v2_c(k)->v.mem_ptr
62                 : NULL;
63 }
64
65 /* is btree node in hash table? */
66 static inline bool btree_node_hashed(struct btree *b)
67 {
68         return b->hash_val != 0;
69 }
70
71 #define for_each_cached_btree(_b, _c, _tbl, _iter, _pos)                \
72         for ((_tbl) = rht_dereference_rcu((_c)->btree_cache.table.tbl,  \
73                                           &(_c)->btree_cache.table),    \
74              _iter = 0; _iter < (_tbl)->size; _iter++)                  \
75                 rht_for_each_entry_rcu((_b), (_pos), _tbl, _iter, hash)
76
77 static inline size_t btree_buf_bytes(const struct btree *b)
78 {
79         return 1UL << b->byte_order;
80 }
81
82 static inline size_t btree_buf_max_u64s(const struct btree *b)
83 {
84         return (btree_buf_bytes(b) - sizeof(struct btree_node)) / sizeof(u64);
85 }
86
87 static inline size_t btree_max_u64s(const struct bch_fs *c)
88 {
89         return (c->opts.btree_node_size - sizeof(struct btree_node)) / sizeof(u64);
90 }
91
92 static inline size_t btree_sectors(const struct bch_fs *c)
93 {
94         return c->opts.btree_node_size >> SECTOR_SHIFT;
95 }
96
97 static inline unsigned btree_blocks(const struct bch_fs *c)
98 {
99         return btree_sectors(c) >> c->block_bits;
100 }
101
102 #define BTREE_SPLIT_THRESHOLD(c)                (btree_max_u64s(c) * 2 / 3)
103
104 #define BTREE_FOREGROUND_MERGE_THRESHOLD(c)     (btree_max_u64s(c) * 1 / 3)
105 #define BTREE_FOREGROUND_MERGE_HYSTERESIS(c)                    \
106         (BTREE_FOREGROUND_MERGE_THRESHOLD(c) +                  \
107          (BTREE_FOREGROUND_MERGE_THRESHOLD(c) >> 2))
108
109 static inline unsigned btree_id_nr_alive(struct bch_fs *c)
110 {
111         return BTREE_ID_NR + c->btree_roots_extra.nr;
112 }
113
114 static inline struct btree_root *bch2_btree_id_root(struct bch_fs *c, unsigned id)
115 {
116         if (likely(id < BTREE_ID_NR)) {
117                 return &c->btree_roots_known[id];
118         } else {
119                 unsigned idx = id - BTREE_ID_NR;
120
121                 EBUG_ON(idx >= c->btree_roots_extra.nr);
122                 return &c->btree_roots_extra.data[idx];
123         }
124 }
125
126 static inline struct btree *btree_node_root(struct bch_fs *c, struct btree *b)
127 {
128         return bch2_btree_id_root(c, b->c.btree_id)->b;
129 }
130
131 const char *bch2_btree_id_str(enum btree_id);
132 void bch2_btree_pos_to_text(struct printbuf *, struct bch_fs *, const struct btree *);
133 void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *, const struct btree *);
134 void bch2_btree_cache_to_text(struct printbuf *, const struct bch_fs *);
135
136 #endif /* _BCACHEFS_BTREE_CACHE_H */