1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_UPDATE_INTERIOR_H
3 #define _BCACHEFS_BTREE_UPDATE_INTERIOR_H
5 #include "btree_cache.h"
6 #include "btree_locking.h"
7 #include "btree_update.h"
10 struct disk_reservation disk_res;
12 struct btree *b[BTREE_RESERVE_MAX];
15 void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *);
16 bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *,
17 struct bkey_format *);
19 /* Btree node freeing/allocation: */
22 * Tracks a btree node that has been (or is about to be) freed in memory, but
23 * has _not_ yet been freed on disk (because the write that makes the new
24 * node(s) visible and frees the old hasn't completed yet)
26 struct pending_btree_node_free {
27 bool index_update_done;
30 enum btree_id btree_id;
32 __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
36 * Tracks an in progress split/rewrite of a btree node and the update to the
39 * When we split/rewrite a node, we do all the updates in memory without
40 * waiting for any writes to complete - we allocate the new node(s) and update
41 * the parent node, possibly recursively up to the root.
43 * The end result is that we have one or more new nodes being written -
44 * possibly several, if there were multiple splits - and then a write (updating
45 * an interior node) which will make all these new nodes visible.
47 * Additionally, as we split/rewrite nodes we free the old nodes - but the old
48 * nodes can't be freed (their space on disk can't be reclaimed) until the
49 * update to the interior node that makes the new node visible completes -
50 * until then, the old nodes are still reachable on disk.
57 struct list_head list;
58 struct list_head unwritten_list;
60 /* What kind of update are we doing? */
62 BTREE_INTERIOR_NO_UPDATE,
63 BTREE_INTERIOR_UPDATING_NODE,
64 BTREE_INTERIOR_UPDATING_ROOT,
65 BTREE_INTERIOR_UPDATING_AS,
68 unsigned must_rewrite:1;
69 unsigned nodes_written:1;
71 enum btree_id btree_id;
74 struct btree_reserve *reserve;
75 struct journal_preres journal_preres;
78 * BTREE_INTERIOR_UPDATING_NODE:
79 * The update that made the new nodes visible was a regular update to an
80 * existing interior node - @b. We can't write out the update to @b
81 * until the new nodes we created are finished writing, so we block @b
82 * from writing by putting this btree_interior update on the
83 * @b->write_blocked list with @write_blocked_list:
86 struct list_head write_blocked_list;
89 * We may be freeing nodes that were dirty, and thus had journal entries
90 * pinned: we need to transfer the oldest of those pins to the
91 * btree_update operation, and release it when the new node(s)
92 * are all persistent and reachable:
94 struct journal_entry_pin journal;
98 * Protected by c->btree_node_pending_free_lock
100 struct pending_btree_node_free pending[BTREE_MAX_DEPTH + GC_MERGE_NODES];
103 /* New nodes, that will be made reachable by this update: */
104 struct btree *new_nodes[BTREE_MAX_DEPTH * 2 + GC_MERGE_NODES];
105 unsigned nr_new_nodes;
107 unsigned journal_u64s;
109 (BKEY_BTREE_PTR_U64s_MAX + 1) * (BTREE_MAX_DEPTH - 1) * 2];
111 /* Only here to reduce stack usage on recursive splits: */
112 struct keylist parent_keys;
114 * Enough room for btree_split's keys without realloc - btree node
115 * pointers never have crc/compression info, so we only need to acount
116 * for the pointers for three keys
118 u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3];
121 #define for_each_pending_btree_node_free(c, as, p) \
122 list_for_each_entry(as, &c->btree_interior_update_list, list) \
123 for (p = as->pending; p < as->pending + as->nr_pending; p++)
125 void bch2_btree_node_free_inmem(struct bch_fs *, struct btree *,
126 struct btree_iter *);
127 void bch2_btree_node_free_never_inserted(struct bch_fs *, struct btree *);
129 struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
133 void bch2_btree_update_done(struct btree_update *);
134 struct btree_update *
135 bch2_btree_update_start(struct bch_fs *, enum btree_id, unsigned,
136 unsigned, struct closure *);
138 void bch2_btree_interior_update_will_free_node(struct btree_update *,
141 void bch2_btree_insert_node(struct btree_update *, struct btree *,
142 struct btree_iter *, struct keylist *,
144 int bch2_btree_split_leaf(struct bch_fs *, struct btree_iter *, unsigned);
146 void __bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *,
147 unsigned, unsigned, enum btree_node_sibling);
149 static inline void bch2_foreground_maybe_merge_sibling(struct bch_fs *c,
150 struct btree_iter *iter,
151 unsigned level, unsigned flags,
152 enum btree_node_sibling sib)
156 if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE)
159 if (!bch2_btree_node_relock(iter, level))
162 b = iter->l[level].b;
163 if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold)
166 __bch2_foreground_maybe_merge(c, iter, level, flags, sib);
169 static inline void bch2_foreground_maybe_merge(struct bch_fs *c,
170 struct btree_iter *iter,
174 bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
176 bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
180 void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
181 void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
183 static inline unsigned btree_update_reserve_required(struct bch_fs *c,
186 unsigned depth = btree_node_root(c, b)->level + 1;
189 * Number of nodes we might have to allocate in a worst case btree
190 * split operation - we split all the way up to the root, then allocate
191 * a new root, unless we're already at max depth:
193 if (depth < BTREE_MAX_DEPTH)
194 return (depth - b->level) * 2 + 1;
196 return (depth - b->level) * 2 - 1;
199 static inline void btree_node_reset_sib_u64s(struct btree *b)
201 b->sib_u64s[0] = b->nr.live_u64s;
202 b->sib_u64s[1] = b->nr.live_u64s;
205 static inline void *btree_data_end(struct bch_fs *c, struct btree *b)
207 return (void *) b->data + btree_bytes(c);
210 static inline struct bkey_packed *unwritten_whiteouts_start(struct bch_fs *c,
213 return (void *) ((u64 *) btree_data_end(c, b) - b->whiteout_u64s);
216 static inline struct bkey_packed *unwritten_whiteouts_end(struct bch_fs *c,
219 return btree_data_end(c, b);
222 static inline void *write_block(struct btree *b)
224 return (void *) b->data + (b->written << 9);
227 static inline bool __btree_addr_written(struct btree *b, void *p)
229 return p < write_block(b);
232 static inline bool bset_written(struct btree *b, struct bset *i)
234 return __btree_addr_written(b, i);
237 static inline bool bkey_written(struct btree *b, struct bkey_packed *k)
239 return __btree_addr_written(b, k);
242 static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c,
246 ssize_t used = bset_byte_offset(b, end) / sizeof(u64) +
248 ssize_t total = c->opts.btree_node_size << 6;
253 static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c,
256 ssize_t remaining = __bch_btree_u64s_remaining(c, b,
257 btree_bkey_last(b, bset_tree_last(b)));
259 BUG_ON(remaining < 0);
261 if (bset_written(b, btree_bset_last(b)))
267 static inline unsigned btree_write_set_buffer(struct btree *b)
270 * Could buffer up larger amounts of keys for btrees with larger keys,
271 * pending benchmarking:
276 static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
279 struct bset_tree *t = bset_tree_last(b);
280 struct btree_node_entry *bne = max(write_block(b),
281 (void *) btree_bkey_last(b, bset_tree_last(b)));
282 ssize_t remaining_space =
283 __bch_btree_u64s_remaining(c, b, &bne->keys.start[0]);
285 if (unlikely(bset_written(b, bset(b, t)))) {
286 if (remaining_space > (ssize_t) (block_bytes(c) >> 3))
289 if (unlikely(bset_u64s(t) * sizeof(u64) > btree_write_set_buffer(b)) &&
290 remaining_space > (ssize_t) (btree_write_set_buffer(b) >> 3))
297 static inline void push_whiteout(struct bch_fs *c, struct btree *b,
300 struct bkey_packed k;
302 BUG_ON(bch_btree_keys_u64s_remaining(c, b) < BKEY_U64s);
304 if (!bkey_pack_pos(&k, pos, b)) {
305 struct bkey *u = (void *) &k;
311 k.needs_whiteout = true;
313 b->whiteout_u64s += k.u64s;
314 bkey_copy(unwritten_whiteouts_start(c, b), &k);
318 * write lock must be held on @b (else the dirty bset that we were going to
319 * insert into could be written out from under us)
321 static inline bool bch2_btree_node_insert_fits(struct bch_fs *c,
322 struct btree *b, unsigned u64s)
324 if (unlikely(btree_node_fake(b)))
327 return u64s <= bch_btree_keys_u64s_remaining(c, b);
330 ssize_t bch2_btree_updates_print(struct bch_fs *, char *);
332 size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *);
334 #endif /* _BCACHEFS_BTREE_UPDATE_INTERIOR_H */