1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_UPDATE_INTERIOR_H
3 #define _BCACHEFS_BTREE_UPDATE_INTERIOR_H
5 #include "btree_cache.h"
6 #include "btree_locking.h"
7 #include "btree_update.h"
9 void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *);
10 bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *,
11 struct bkey_format *);
13 #define BTREE_UPDATE_NODES_MAX ((BTREE_MAX_DEPTH - 2) * 2 + GC_MERGE_NODES)
15 #define BTREE_UPDATE_JOURNAL_RES (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1))
18 * Tracks an in progress split/rewrite of a btree node and the update to the
21 * When we split/rewrite a node, we do all the updates in memory without
22 * waiting for any writes to complete - we allocate the new node(s) and update
23 * the parent node, possibly recursively up to the root.
25 * The end result is that we have one or more new nodes being written -
26 * possibly several, if there were multiple splits - and then a write (updating
27 * an interior node) which will make all these new nodes visible.
29 * Additionally, as we split/rewrite nodes we free the old nodes - but the old
30 * nodes can't be freed (their space on disk can't be reclaimed) until the
31 * update to the interior node that makes the new node visible completes -
32 * until then, the old nodes are still reachable on disk.
39 struct list_head list;
40 struct list_head unwritten_list;
42 /* What kind of update are we doing? */
44 BTREE_INTERIOR_NO_UPDATE,
45 BTREE_INTERIOR_UPDATING_NODE,
46 BTREE_INTERIOR_UPDATING_ROOT,
47 BTREE_INTERIOR_UPDATING_AS,
50 unsigned must_rewrite:1;
51 unsigned nodes_written:1;
53 enum btree_id btree_id;
55 struct disk_reservation disk_res;
56 struct journal_preres journal_preres;
59 * BTREE_INTERIOR_UPDATING_NODE:
60 * The update that made the new nodes visible was a regular update to an
61 * existing interior node - @b. We can't write out the update to @b
62 * until the new nodes we created are finished writing, so we block @b
63 * from writing by putting this btree_interior update on the
64 * @b->write_blocked list with @write_blocked_list:
67 struct list_head write_blocked_list;
70 * We may be freeing nodes that were dirty, and thus had journal entries
71 * pinned: we need to transfer the oldest of those pins to the
72 * btree_update operation, and release it when the new node(s)
73 * are all persistent and reachable:
75 struct journal_entry_pin journal;
77 /* Preallocated nodes we reserve when we start the update: */
78 struct btree *prealloc_nodes[BTREE_UPDATE_NODES_MAX];
79 unsigned nr_prealloc_nodes;
81 /* Nodes being freed: */
82 struct keylist old_keys;
83 u64 _old_keys[BTREE_UPDATE_NODES_MAX *
84 BKEY_BTREE_PTR_VAL_U64s_MAX];
86 /* Nodes being added: */
87 struct keylist new_keys;
88 u64 _new_keys[BTREE_UPDATE_NODES_MAX *
89 BKEY_BTREE_PTR_VAL_U64s_MAX];
91 /* New nodes, that will be made reachable by this update: */
92 struct btree *new_nodes[BTREE_UPDATE_NODES_MAX];
93 unsigned nr_new_nodes;
95 open_bucket_idx_t open_buckets[BTREE_UPDATE_NODES_MAX *
97 open_bucket_idx_t nr_open_buckets;
99 unsigned journal_u64s;
100 u64 journal_entries[BTREE_UPDATE_JOURNAL_RES];
102 /* Only here to reduce stack usage on recursive splits: */
103 struct keylist parent_keys;
105 * Enough room for btree_split's keys without realloc - btree node
106 * pointers never have crc/compression info, so we only need to acount
107 * for the pointers for three keys
109 u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3];
112 void bch2_btree_node_free_inmem(struct bch_fs *, struct btree *,
113 struct btree_iter *);
114 void bch2_btree_node_free_never_inserted(struct bch_fs *, struct btree *);
116 void bch2_btree_update_get_open_buckets(struct btree_update *, struct btree *);
118 struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
122 void bch2_btree_update_done(struct btree_update *);
123 struct btree_update *
124 bch2_btree_update_start(struct btree_trans *, enum btree_id, unsigned,
125 unsigned, struct closure *);
127 void bch2_btree_interior_update_will_free_node(struct btree_update *,
129 void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);
131 void bch2_btree_insert_node(struct btree_update *, struct btree *,
132 struct btree_iter *, struct keylist *,
134 int bch2_btree_split_leaf(struct bch_fs *, struct btree_iter *, unsigned);
136 void __bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *,
137 unsigned, unsigned, enum btree_node_sibling);
139 static inline void bch2_foreground_maybe_merge_sibling(struct bch_fs *c,
140 struct btree_iter *iter,
141 unsigned level, unsigned flags,
142 enum btree_node_sibling sib)
146 if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE)
149 if (!bch2_btree_node_relock(iter, level))
152 b = iter->l[level].b;
153 if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold)
156 __bch2_foreground_maybe_merge(c, iter, level, flags, sib);
159 static inline void bch2_foreground_maybe_merge(struct bch_fs *c,
160 struct btree_iter *iter,
164 bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
166 bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
170 void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
171 void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
173 static inline unsigned btree_update_reserve_required(struct bch_fs *c,
176 unsigned depth = btree_node_root(c, b)->c.level + 1;
179 * Number of nodes we might have to allocate in a worst case btree
180 * split operation - we split all the way up to the root, then allocate
181 * a new root, unless we're already at max depth:
183 if (depth < BTREE_MAX_DEPTH)
184 return (depth - b->c.level) * 2 + 1;
186 return (depth - b->c.level) * 2 - 1;
189 static inline void btree_node_reset_sib_u64s(struct btree *b)
191 b->sib_u64s[0] = b->nr.live_u64s;
192 b->sib_u64s[1] = b->nr.live_u64s;
195 static inline void *btree_data_end(struct bch_fs *c, struct btree *b)
197 return (void *) b->data + btree_bytes(c);
200 static inline struct bkey_packed *unwritten_whiteouts_start(struct bch_fs *c,
203 return (void *) ((u64 *) btree_data_end(c, b) - b->whiteout_u64s);
206 static inline struct bkey_packed *unwritten_whiteouts_end(struct bch_fs *c,
209 return btree_data_end(c, b);
212 static inline void *write_block(struct btree *b)
214 return (void *) b->data + (b->written << 9);
217 static inline bool __btree_addr_written(struct btree *b, void *p)
219 return p < write_block(b);
222 static inline bool bset_written(struct btree *b, struct bset *i)
224 return __btree_addr_written(b, i);
227 static inline bool bkey_written(struct btree *b, struct bkey_packed *k)
229 return __btree_addr_written(b, k);
232 static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c,
236 ssize_t used = bset_byte_offset(b, end) / sizeof(u64) +
238 ssize_t total = c->opts.btree_node_size << 6;
240 /* Always leave one extra u64 for bch2_varint_decode: */
246 static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c,
249 ssize_t remaining = __bch_btree_u64s_remaining(c, b,
250 btree_bkey_last(b, bset_tree_last(b)));
252 BUG_ON(remaining < 0);
254 if (bset_written(b, btree_bset_last(b)))
260 static inline unsigned btree_write_set_buffer(struct btree *b)
263 * Could buffer up larger amounts of keys for btrees with larger keys,
264 * pending benchmarking:
269 static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
272 struct bset_tree *t = bset_tree_last(b);
273 struct btree_node_entry *bne = max(write_block(b),
274 (void *) btree_bkey_last(b, bset_tree_last(b)));
275 ssize_t remaining_space =
276 __bch_btree_u64s_remaining(c, b, &bne->keys.start[0]);
278 if (unlikely(bset_written(b, bset(b, t)))) {
279 if (remaining_space > (ssize_t) (block_bytes(c) >> 3))
282 if (unlikely(bset_u64s(t) * sizeof(u64) > btree_write_set_buffer(b)) &&
283 remaining_space > (ssize_t) (btree_write_set_buffer(b) >> 3))
290 static inline void push_whiteout(struct bch_fs *c, struct btree *b,
293 struct bkey_packed k;
295 BUG_ON(bch_btree_keys_u64s_remaining(c, b) < BKEY_U64s);
297 if (!bkey_pack_pos(&k, pos, b)) {
298 struct bkey *u = (void *) &k;
304 k.needs_whiteout = true;
306 b->whiteout_u64s += k.u64s;
307 bkey_copy(unwritten_whiteouts_start(c, b), &k);
311 * write lock must be held on @b (else the dirty bset that we were going to
312 * insert into could be written out from under us)
314 static inline bool bch2_btree_node_insert_fits(struct bch_fs *c,
315 struct btree *b, unsigned u64s)
317 if (unlikely(btree_node_need_rewrite(b)))
320 return u64s <= bch_btree_keys_u64s_remaining(c, b);
323 void bch2_btree_updates_to_text(struct printbuf *, struct bch_fs *);
325 size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *);
327 void bch2_journal_entries_to_btree_roots(struct bch_fs *, struct jset *);
328 struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *,
329 struct jset_entry *, struct jset_entry *);
331 void bch2_fs_btree_interior_update_exit(struct bch_fs *);
332 int bch2_fs_btree_interior_update_init(struct bch_fs *);
334 #endif /* _BCACHEFS_BTREE_UPDATE_INTERIOR_H */