1 #ifndef _BCACHEFS_BTREE_UPDATE_INTERIOR_H
2 #define _BCACHEFS_BTREE_UPDATE_INTERIOR_H
4 #include "btree_cache.h"
5 #include "btree_locking.h"
6 #include "btree_update.h"
9 struct disk_reservation disk_res;
11 struct btree *b[BTREE_RESERVE_MAX];
14 void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *);
15 bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *,
16 struct bkey_format *);
18 /* Btree node freeing/allocation: */
21 * Tracks a btree node that has been (or is about to be) freed in memory, but
22 * has _not_ yet been freed on disk (because the write that makes the new
23 * node(s) visible and frees the old hasn't completed yet)
25 struct pending_btree_node_free {
26 bool index_update_done;
29 enum btree_id btree_id;
31 __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
35 * Tracks an in progress split/rewrite of a btree node and the update to the
38 * When we split/rewrite a node, we do all the updates in memory without
39 * waiting for any writes to complete - we allocate the new node(s) and update
40 * the parent node, possibly recursively up to the root.
42 * The end result is that we have one or more new nodes being written -
43 * possibly several, if there were multiple splits - and then a write (updating
44 * an interior node) which will make all these new nodes visible.
46 * Additionally, as we split/rewrite nodes we free the old nodes - but the old
47 * nodes can't be freed (their space on disk can't be reclaimed) until the
48 * update to the interior node that makes the new node visible completes -
49 * until then, the old nodes are still reachable on disk.
56 struct list_head list;
58 /* What kind of update are we doing? */
60 BTREE_INTERIOR_NO_UPDATE,
61 BTREE_INTERIOR_UPDATING_NODE,
62 BTREE_INTERIOR_UPDATING_ROOT,
63 BTREE_INTERIOR_UPDATING_AS,
66 unsigned must_rewrite:1;
67 unsigned nodes_written:1;
69 enum btree_id btree_id;
71 struct btree_reserve *reserve;
74 * BTREE_INTERIOR_UPDATING_NODE:
75 * The update that made the new nodes visible was a regular update to an
76 * existing interior node - @b. We can't write out the update to @b
77 * until the new nodes we created are finished writing, so we block @b
78 * from writing by putting this btree_interior update on the
79 * @b->write_blocked list with @write_blocked_list:
82 struct list_head write_blocked_list;
85 * BTREE_INTERIOR_UPDATING_AS: btree node we updated was freed, so now
86 * we're now blocking another btree_update
87 * @parent_as - btree_update that's waiting on our nodes to finish
88 * writing, before it can make new nodes visible on disk
89 * @wait - list of child btree_updates that are waiting on this
90 * btree_update to make all the new nodes visible before they can free
91 * their old btree nodes
93 struct btree_update *parent_as;
94 struct closure_waitlist wait;
97 * We may be freeing nodes that were dirty, and thus had journal entries
98 * pinned: we need to transfer the oldest of those pins to the
99 * btree_update operation, and release it when the new node(s)
100 * are all persistent and reachable:
102 struct journal_entry_pin journal;
108 * Protected by c->btree_node_pending_free_lock
110 struct pending_btree_node_free pending[BTREE_MAX_DEPTH + GC_MERGE_NODES];
113 /* New nodes, that will be made reachable by this update: */
114 struct btree *new_nodes[BTREE_MAX_DEPTH * 2 + GC_MERGE_NODES];
115 unsigned nr_new_nodes;
117 /* Only here to reduce stack usage on recursive splits: */
118 struct keylist parent_keys;
120 * Enough room for btree_split's keys without realloc - btree node
121 * pointers never have crc/compression info, so we only need to acount
122 * for the pointers for three keys
124 u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3];
127 #define for_each_pending_btree_node_free(c, as, p) \
128 list_for_each_entry(as, &c->btree_interior_update_list, list) \
129 for (p = as->pending; p < as->pending + as->nr_pending; p++)
131 void bch2_btree_node_free_inmem(struct bch_fs *, struct btree *,
132 struct btree_iter *);
133 void bch2_btree_node_free_never_inserted(struct bch_fs *, struct btree *);
134 void bch2_btree_open_bucket_put(struct bch_fs *, struct btree *);
136 struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
140 void bch2_btree_update_done(struct btree_update *);
141 struct btree_update *
142 bch2_btree_update_start(struct bch_fs *, enum btree_id, unsigned,
143 unsigned, struct closure *);
145 void bch2_btree_interior_update_will_free_node(struct btree_update *,
148 void bch2_btree_insert_node(struct btree_update *, struct btree *,
149 struct btree_iter *, struct keylist *,
151 int bch2_btree_split_leaf(struct bch_fs *, struct btree_iter *, unsigned);
153 void __bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *,
154 unsigned, unsigned, enum btree_node_sibling);
156 static inline void bch2_foreground_maybe_merge_sibling(struct bch_fs *c,
157 struct btree_iter *iter,
158 unsigned level, unsigned flags,
159 enum btree_node_sibling sib)
163 if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE)
166 if (!bch2_btree_node_relock(iter, level))
169 b = iter->l[level].b;
170 if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold)
173 __bch2_foreground_maybe_merge(c, iter, level, flags, sib);
176 static inline void bch2_foreground_maybe_merge(struct bch_fs *c,
177 struct btree_iter *iter,
181 bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
183 bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
187 void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
188 void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
190 static inline unsigned btree_update_reserve_required(struct bch_fs *c,
193 unsigned depth = btree_node_root(c, b)->level + 1;
196 * Number of nodes we might have to allocate in a worst case btree
197 * split operation - we split all the way up to the root, then allocate
198 * a new root, unless we're already at max depth:
200 if (depth < BTREE_MAX_DEPTH)
201 return (depth - b->level) * 2 + 1;
203 return (depth - b->level) * 2 - 1;
206 static inline void btree_node_reset_sib_u64s(struct btree *b)
208 b->sib_u64s[0] = b->nr.live_u64s;
209 b->sib_u64s[1] = b->nr.live_u64s;
212 static inline void *btree_data_end(struct bch_fs *c, struct btree *b)
214 return (void *) b->data + btree_bytes(c);
217 static inline struct bkey_packed *unwritten_whiteouts_start(struct bch_fs *c,
220 return (void *) ((u64 *) btree_data_end(c, b) - b->whiteout_u64s);
223 static inline struct bkey_packed *unwritten_whiteouts_end(struct bch_fs *c,
226 return btree_data_end(c, b);
229 static inline void *write_block(struct btree *b)
231 return (void *) b->data + (b->written << 9);
234 static inline bool __btree_addr_written(struct btree *b, void *p)
236 return p < write_block(b);
239 static inline bool bset_written(struct btree *b, struct bset *i)
241 return __btree_addr_written(b, i);
244 static inline bool bkey_written(struct btree *b, struct bkey_packed *k)
246 return __btree_addr_written(b, k);
249 static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c,
253 ssize_t used = bset_byte_offset(b, end) / sizeof(u64) +
255 b->uncompacted_whiteout_u64s;
256 ssize_t total = c->opts.btree_node_size << 6;
261 static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c,
264 ssize_t remaining = __bch_btree_u64s_remaining(c, b,
265 btree_bkey_last(b, bset_tree_last(b)));
267 BUG_ON(remaining < 0);
269 if (bset_written(b, btree_bset_last(b)))
275 static inline unsigned btree_write_set_buffer(struct btree *b)
278 * Could buffer up larger amounts of keys for btrees with larger keys,
279 * pending benchmarking:
284 static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
287 struct bset *i = btree_bset_last(b);
288 struct btree_node_entry *bne = max(write_block(b),
289 (void *) btree_bkey_last(b, bset_tree_last(b)));
290 ssize_t remaining_space =
291 __bch_btree_u64s_remaining(c, b, &bne->keys.start[0]);
293 if (unlikely(bset_written(b, i))) {
294 if (remaining_space > (ssize_t) (block_bytes(c) >> 3))
297 if (unlikely(vstruct_bytes(i) > btree_write_set_buffer(b)) &&
298 remaining_space > (ssize_t) (btree_write_set_buffer(b) >> 3))
305 static inline void unreserve_whiteout(struct btree *b, struct bkey_packed *k)
307 if (bkey_written(b, k)) {
308 EBUG_ON(b->uncompacted_whiteout_u64s <
309 bkeyp_key_u64s(&b->format, k));
310 b->uncompacted_whiteout_u64s -=
311 bkeyp_key_u64s(&b->format, k);
315 static inline void reserve_whiteout(struct btree *b, struct bkey_packed *k)
317 if (bkey_written(b, k)) {
318 BUG_ON(!k->needs_whiteout);
319 b->uncompacted_whiteout_u64s +=
320 bkeyp_key_u64s(&b->format, k);
325 * write lock must be held on @b (else the dirty bset that we were going to
326 * insert into could be written out from under us)
328 static inline bool bch2_btree_node_insert_fits(struct bch_fs *c,
329 struct btree *b, unsigned u64s)
331 if (unlikely(btree_node_fake(b)))
334 return u64s <= bch_btree_keys_u64s_remaining(c, b);
337 ssize_t bch2_btree_updates_print(struct bch_fs *, char *);
339 size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *);
341 #endif /* _BCACHEFS_BTREE_UPDATE_INTERIOR_H */