1 #ifndef _BCACHE_BTREE_UPDATE_INTERIOR_H
2 #define _BCACHE_BTREE_UPDATE_INTERIOR_H
4 #include "btree_cache.h"
5 #include "btree_update.h"
8 struct disk_reservation disk_res;
10 struct btree *b[BTREE_RESERVE_MAX];
13 void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *);
14 bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *,
15 struct bkey_format *);
17 /* Btree node freeing/allocation: */
20 * Tracks a btree node that has been (or is about to be) freed in memory, but
21 * has _not_ yet been freed on disk (because the write that makes the new
22 * node(s) visible and frees the old hasn't completed yet)
24 struct pending_btree_node_free {
25 bool index_update_done;
28 enum btree_id btree_id;
30 __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
34 * Tracks an in progress split/rewrite of a btree node and the update to the
37 * When we split/rewrite a node, we do all the updates in memory without
38 * waiting for any writes to complete - we allocate the new node(s) and update
39 * the parent node, possibly recursively up to the root.
41 * The end result is that we have one or more new nodes being written -
42 * possibly several, if there were multiple splits - and then a write (updating
43 * an interior node) which will make all these new nodes visible.
45 * Additionally, as we split/rewrite nodes we free the old nodes - but the old
46 * nodes can't be freed (their space on disk can't be reclaimed) until the
47 * update to the interior node that makes the new node visible completes -
48 * until then, the old nodes are still reachable on disk.
55 struct list_head list;
57 /* What kind of update are we doing? */
59 BTREE_INTERIOR_NO_UPDATE,
60 BTREE_INTERIOR_UPDATING_NODE,
61 BTREE_INTERIOR_UPDATING_ROOT,
62 BTREE_INTERIOR_UPDATING_AS,
64 enum btree_id btree_id;
67 struct btree_reserve *reserve;
70 * BTREE_INTERIOR_UPDATING_NODE:
71 * The update that made the new nodes visible was a regular update to an
72 * existing interior node - @b. We can't write out the update to @b
73 * until the new nodes we created are finished writing, so we block @b
74 * from writing by putting this btree_interior update on the
75 * @b->write_blocked list with @write_blocked_list:
78 struct list_head write_blocked_list;
81 * BTREE_INTERIOR_UPDATING_AS: btree node we updated was freed, so now
82 * we're now blocking another btree_update
83 * @parent_as - btree_update that's waiting on our nodes to finish
84 * writing, before it can make new nodes visible on disk
85 * @wait - list of child btree_updates that are waiting on this
86 * btree_update to make all the new nodes visible before they can free
87 * their old btree nodes
89 struct btree_update *parent_as;
90 struct closure_waitlist wait;
93 * We may be freeing nodes that were dirty, and thus had journal entries
94 * pinned: we need to transfer the oldest of those pins to the
95 * btree_update operation, and release it when the new node(s)
96 * are all persistent and reachable:
98 struct journal_entry_pin journal;
104 * Protected by c->btree_node_pending_free_lock
106 struct pending_btree_node_free pending[BTREE_MAX_DEPTH + GC_MERGE_NODES];
109 /* New nodes, that will be made reachable by this update: */
110 struct btree *new_nodes[BTREE_MAX_DEPTH * 2 + GC_MERGE_NODES];
111 unsigned nr_new_nodes;
113 /* Only here to reduce stack usage on recursive splits: */
114 struct keylist parent_keys;
116 * Enough room for btree_split's keys without realloc - btree node
117 * pointers never have crc/compression info, so we only need to acount
118 * for the pointers for three keys
120 u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3];
123 #define BTREE_INTERIOR_UPDATE_MUST_REWRITE (1 << 0)
125 #define for_each_pending_btree_node_free(c, as, p) \
126 list_for_each_entry(as, &c->btree_interior_update_list, list) \
127 for (p = as->pending; p < as->pending + as->nr_pending; p++)
129 void bch2_btree_node_free_inmem(struct bch_fs *, struct btree *,
130 struct btree_iter *);
131 void bch2_btree_node_free_never_inserted(struct bch_fs *, struct btree *);
132 void bch2_btree_open_bucket_put(struct bch_fs *, struct btree *);
134 struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
138 void bch2_btree_update_done(struct btree_update *);
139 struct btree_update *
140 bch2_btree_update_start(struct bch_fs *, enum btree_id, unsigned,
141 unsigned, struct closure *);
143 void bch2_btree_interior_update_will_free_node(struct btree_update *,
146 void bch2_btree_insert_node(struct btree_update *, struct btree *,
147 struct btree_iter *, struct keylist *);
148 int bch2_btree_split_leaf(struct bch_fs *, struct btree_iter *, unsigned);
149 int bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *,
150 enum btree_node_sibling);
152 void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
153 int bch2_btree_root_alloc(struct bch_fs *, enum btree_id, struct closure *);
155 static inline unsigned btree_update_reserve_required(struct bch_fs *c,
158 unsigned depth = btree_node_root(c, b)->level - b->level;
160 return btree_reserve_required_nodes(depth);
163 static inline void btree_node_reset_sib_u64s(struct btree *b)
165 b->sib_u64s[0] = b->nr.live_u64s;
166 b->sib_u64s[1] = b->nr.live_u64s;
169 static inline void *btree_data_end(struct bch_fs *c, struct btree *b)
171 return (void *) b->data + btree_bytes(c);
174 static inline struct bkey_packed *unwritten_whiteouts_start(struct bch_fs *c,
177 return (void *) ((u64 *) btree_data_end(c, b) - b->whiteout_u64s);
180 static inline struct bkey_packed *unwritten_whiteouts_end(struct bch_fs *c,
183 return btree_data_end(c, b);
186 static inline void *write_block(struct btree *b)
188 return (void *) b->data + (b->written << 9);
191 static inline bool bset_written(struct btree *b, struct bset *i)
193 return (void *) i < write_block(b);
196 static inline bool bset_unwritten(struct btree *b, struct bset *i)
198 return (void *) i > write_block(b);
201 static inline unsigned bset_end_sector(struct bch_fs *c, struct btree *b,
204 return round_up(bset_byte_offset(b, vstruct_end(i)),
205 block_bytes(c)) >> 9;
208 static inline unsigned btree_write_set_buffer(struct btree *b)
211 * Could buffer up larger amounts of keys for btrees with larger keys,
212 * pending benchmarking:
217 static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
220 struct bset *i = btree_bset_last(b);
221 unsigned offset = max_t(unsigned, b->written << 9,
222 bset_byte_offset(b, vstruct_end(i)));
223 ssize_t n = (ssize_t) btree_bytes(c) - (ssize_t)
224 (offset + sizeof(struct btree_node_entry) +
225 b->whiteout_u64s * sizeof(u64) +
226 b->uncompacted_whiteout_u64s * sizeof(u64));
228 EBUG_ON(offset > btree_bytes(c));
230 if ((unlikely(bset_written(b, i)) && n > 0) ||
231 (unlikely(vstruct_bytes(i) > btree_write_set_buffer(b)) &&
232 n > btree_write_set_buffer(b)))
233 return (void *) b->data + offset;
238 static inline void unreserve_whiteout(struct btree *b, struct bset_tree *t,
239 struct bkey_packed *k)
241 if (bset_written(b, bset(b, t))) {
242 EBUG_ON(b->uncompacted_whiteout_u64s <
243 bkeyp_key_u64s(&b->format, k));
244 b->uncompacted_whiteout_u64s -=
245 bkeyp_key_u64s(&b->format, k);
249 static inline void reserve_whiteout(struct btree *b, struct bset_tree *t,
250 struct bkey_packed *k)
252 if (bset_written(b, bset(b, t))) {
253 BUG_ON(!k->needs_whiteout);
254 b->uncompacted_whiteout_u64s +=
255 bkeyp_key_u64s(&b->format, k);
259 static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c,
262 struct bset *i = btree_bset_last(b);
263 unsigned used = bset_byte_offset(b, vstruct_end(i)) / sizeof(u64) +
265 b->uncompacted_whiteout_u64s;
266 unsigned total = c->sb.btree_node_size << 6;
268 EBUG_ON(used > total);
270 if (bset_written(b, i))
277 * write lock must be held on @b (else the dirty bset that we were going to
278 * insert into could be written out from under us)
280 static inline bool bch2_btree_node_insert_fits(struct bch_fs *c,
281 struct btree *b, unsigned u64s)
283 if (btree_node_is_extents(b)) {
284 /* The insert key might split an existing key
285 * (bch2_insert_fixup_extent() -> BCH_EXTENT_OVERLAP_MIDDLE case:
287 u64s += BKEY_EXTENT_U64s_MAX;
290 return u64s <= bch_btree_keys_u64s_remaining(c, b);
293 static inline bool journal_res_insert_fits(struct btree_insert *trans,
294 struct btree_insert_entry *insert)
297 struct btree_insert_entry *i;
300 * If we didn't get a journal reservation, we're in journal replay and
301 * we're not journalling updates:
303 if (!trans->journal_res.ref)
306 for (i = insert; i < trans->entries + trans->nr; i++)
307 u64s += jset_u64s(i->k->k.u64s + i->extra_res);
309 return u64s <= trans->journal_res.u64s;
312 #endif /* _BCACHE_BTREE_UPDATE_INTERIOR_H */