]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_update_interior.h
Update bcachefs sources to 717b356d1d bcachefs: Convert journal validation to bkey_in...
[bcachefs-tools-debian] / libbcachefs / btree_update_interior.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_UPDATE_INTERIOR_H
3 #define _BCACHEFS_BTREE_UPDATE_INTERIOR_H
4
5 #include "btree_cache.h"
6 #include "btree_locking.h"
7 #include "btree_update.h"
8
9 void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *);
10 bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *,
11                                 struct bkey_format *);
12
13 #define BTREE_UPDATE_NODES_MAX          ((BTREE_MAX_DEPTH - 2) * 2 + GC_MERGE_NODES)
14
15 #define BTREE_UPDATE_JOURNAL_RES        (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1))
16
17 /*
18  * Tracks an in progress split/rewrite of a btree node and the update to the
19  * parent node:
20  *
21  * When we split/rewrite a node, we do all the updates in memory without
22  * waiting for any writes to complete - we allocate the new node(s) and update
23  * the parent node, possibly recursively up to the root.
24  *
25  * The end result is that we have one or more new nodes being written -
26  * possibly several, if there were multiple splits - and then a write (updating
27  * an interior node) which will make all these new nodes visible.
28  *
29  * Additionally, as we split/rewrite nodes we free the old nodes - but the old
30  * nodes can't be freed (their space on disk can't be reclaimed) until the
31  * update to the interior node that makes the new node visible completes -
32  * until then, the old nodes are still reachable on disk.
33  *
34  */
35 struct btree_update {
36         struct closure                  cl;
37         struct bch_fs                   *c;
38         u64                             start_time;
39
40         struct list_head                list;
41         struct list_head                unwritten_list;
42
43         /* What kind of update are we doing? */
44         enum {
45                 BTREE_INTERIOR_NO_UPDATE,
46                 BTREE_INTERIOR_UPDATING_NODE,
47                 BTREE_INTERIOR_UPDATING_ROOT,
48                 BTREE_INTERIOR_UPDATING_AS,
49         } mode;
50
51         unsigned                        nodes_written:1;
52         unsigned                        took_gc_lock:1;
53
54         enum btree_id                   btree_id;
55         unsigned                        update_level;
56
57         struct disk_reservation         disk_res;
58         struct journal_preres           journal_preres;
59
60         /*
61          * BTREE_INTERIOR_UPDATING_NODE:
62          * The update that made the new nodes visible was a regular update to an
63          * existing interior node - @b. We can't write out the update to @b
64          * until the new nodes we created are finished writing, so we block @b
65          * from writing by putting this btree_interior update on the
66          * @b->write_blocked list with @write_blocked_list:
67          */
68         struct btree                    *b;
69         struct list_head                write_blocked_list;
70
71         /*
72          * We may be freeing nodes that were dirty, and thus had journal entries
73          * pinned: we need to transfer the oldest of those pins to the
74          * btree_update operation, and release it when the new node(s)
75          * are all persistent and reachable:
76          */
77         struct journal_entry_pin        journal;
78
79         /* Preallocated nodes we reserve when we start the update: */
80         struct prealloc_nodes {
81                 struct btree            *b[BTREE_UPDATE_NODES_MAX];
82                 unsigned                nr;
83         }                               prealloc_nodes[2];
84
85         /* Nodes being freed: */
86         struct keylist                  old_keys;
87         u64                             _old_keys[BTREE_UPDATE_NODES_MAX *
88                                                   BKEY_BTREE_PTR_U64s_MAX];
89
90         /* Nodes being added: */
91         struct keylist                  new_keys;
92         u64                             _new_keys[BTREE_UPDATE_NODES_MAX *
93                                                   BKEY_BTREE_PTR_U64s_MAX];
94
95         /* New nodes, that will be made reachable by this update: */
96         struct btree                    *new_nodes[BTREE_UPDATE_NODES_MAX];
97         unsigned                        nr_new_nodes;
98
99         struct btree                    *old_nodes[BTREE_UPDATE_NODES_MAX];
100         __le64                          old_nodes_seq[BTREE_UPDATE_NODES_MAX];
101         unsigned                        nr_old_nodes;
102
103         open_bucket_idx_t               open_buckets[BTREE_UPDATE_NODES_MAX *
104                                                      BCH_REPLICAS_MAX];
105         open_bucket_idx_t               nr_open_buckets;
106
107         unsigned                        journal_u64s;
108         u64                             journal_entries[BTREE_UPDATE_JOURNAL_RES];
109
110         /* Only here to reduce stack usage on recursive splits: */
111         struct keylist                  parent_keys;
112         /*
113          * Enough room for btree_split's keys without realloc - btree node
114          * pointers never have crc/compression info, so we only need to acount
115          * for the pointers for three keys
116          */
117         u64                             inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3];
118 };
119
120 struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
121                                                   struct btree_trans *,
122                                                   struct btree *,
123                                                   struct bkey_format);
124
125 int bch2_btree_split_leaf(struct btree_trans *, struct btree_path *, unsigned);
126
127 int __bch2_foreground_maybe_merge(struct btree_trans *, struct btree_path *,
128                                   unsigned, unsigned, enum btree_node_sibling);
129
130 static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans,
131                                         struct btree_path *path,
132                                         unsigned level, unsigned flags,
133                                         enum btree_node_sibling sib)
134 {
135         struct btree *b;
136
137         EBUG_ON(!btree_node_locked(path, level));
138
139         b = path->l[level].b;
140         if (b->sib_u64s[sib] > trans->c->btree_foreground_merge_threshold)
141                 return 0;
142
143         return __bch2_foreground_maybe_merge(trans, path, level, flags, sib);
144 }
145
146 static inline int bch2_foreground_maybe_merge(struct btree_trans *trans,
147                                               struct btree_path *path,
148                                               unsigned level,
149                                               unsigned flags)
150 {
151         return  bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
152                                                     btree_prev_sib) ?:
153                 bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
154                                                     btree_next_sib);
155 }
156
157 int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *,
158                             struct btree *, unsigned);
159 void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *);
160 int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *,
161                                struct btree *, struct bkey_i *,
162                                unsigned, bool);
163 int bch2_btree_node_update_key_get_iter(struct btree_trans *, struct btree *,
164                                         struct bkey_i *, unsigned, bool);
165
166 void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
167 void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
168
169 static inline unsigned btree_update_reserve_required(struct bch_fs *c,
170                                                      struct btree *b)
171 {
172         unsigned depth = btree_node_root(c, b)->c.level + 1;
173
174         /*
175          * Number of nodes we might have to allocate in a worst case btree
176          * split operation - we split all the way up to the root, then allocate
177          * a new root, unless we're already at max depth:
178          */
179         if (depth < BTREE_MAX_DEPTH)
180                 return (depth - b->c.level) * 2 + 1;
181         else
182                 return (depth - b->c.level) * 2 - 1;
183 }
184
185 static inline void btree_node_reset_sib_u64s(struct btree *b)
186 {
187         b->sib_u64s[0] = b->nr.live_u64s;
188         b->sib_u64s[1] = b->nr.live_u64s;
189 }
190
191 static inline void *btree_data_end(struct bch_fs *c, struct btree *b)
192 {
193         return (void *) b->data + btree_bytes(c);
194 }
195
196 static inline struct bkey_packed *unwritten_whiteouts_start(struct bch_fs *c,
197                                                             struct btree *b)
198 {
199         return (void *) ((u64 *) btree_data_end(c, b) - b->whiteout_u64s);
200 }
201
202 static inline struct bkey_packed *unwritten_whiteouts_end(struct bch_fs *c,
203                                                           struct btree *b)
204 {
205         return btree_data_end(c, b);
206 }
207
208 static inline void *write_block(struct btree *b)
209 {
210         return (void *) b->data + (b->written << 9);
211 }
212
213 static inline bool __btree_addr_written(struct btree *b, void *p)
214 {
215         return p < write_block(b);
216 }
217
218 static inline bool bset_written(struct btree *b, struct bset *i)
219 {
220         return __btree_addr_written(b, i);
221 }
222
223 static inline bool bkey_written(struct btree *b, struct bkey_packed *k)
224 {
225         return __btree_addr_written(b, k);
226 }
227
228 static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c,
229                                                  struct btree *b,
230                                                  void *end)
231 {
232         ssize_t used = bset_byte_offset(b, end) / sizeof(u64) +
233                 b->whiteout_u64s;
234         ssize_t total = c->opts.btree_node_size >> 3;
235
236         /* Always leave one extra u64 for bch2_varint_decode: */
237         used++;
238
239         return total - used;
240 }
241
242 static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c,
243                                                    struct btree *b)
244 {
245         ssize_t remaining = __bch_btree_u64s_remaining(c, b,
246                                 btree_bkey_last(b, bset_tree_last(b)));
247
248         BUG_ON(remaining < 0);
249
250         if (bset_written(b, btree_bset_last(b)))
251                 return 0;
252
253         return remaining;
254 }
255
256 #define BTREE_WRITE_SET_U64s_BITS       9
257
258 static inline unsigned btree_write_set_buffer(struct btree *b)
259 {
260         /*
261          * Could buffer up larger amounts of keys for btrees with larger keys,
262          * pending benchmarking:
263          */
264         return 8 << BTREE_WRITE_SET_U64s_BITS;
265 }
266
267 static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
268                                                      struct btree *b)
269 {
270         struct bset_tree *t = bset_tree_last(b);
271         struct btree_node_entry *bne = max(write_block(b),
272                         (void *) btree_bkey_last(b, bset_tree_last(b)));
273         ssize_t remaining_space =
274                 __bch_btree_u64s_remaining(c, b, &bne->keys.start[0]);
275
276         if (unlikely(bset_written(b, bset(b, t)))) {
277                 if (remaining_space > (ssize_t) (block_bytes(c) >> 3))
278                         return bne;
279         } else {
280                 if (unlikely(bset_u64s(t) * sizeof(u64) > btree_write_set_buffer(b)) &&
281                     remaining_space > (ssize_t) (btree_write_set_buffer(b) >> 3))
282                         return bne;
283         }
284
285         return NULL;
286 }
287
288 static inline void push_whiteout(struct bch_fs *c, struct btree *b,
289                                  struct bpos pos)
290 {
291         struct bkey_packed k;
292
293         BUG_ON(bch_btree_keys_u64s_remaining(c, b) < BKEY_U64s);
294         EBUG_ON(btree_node_just_written(b));
295
296         if (!bkey_pack_pos(&k, pos, b)) {
297                 struct bkey *u = (void *) &k;
298
299                 bkey_init(u);
300                 u->p = pos;
301         }
302
303         k.needs_whiteout = true;
304
305         b->whiteout_u64s += k.u64s;
306         bkey_copy(unwritten_whiteouts_start(c, b), &k);
307 }
308
309 /*
310  * write lock must be held on @b (else the dirty bset that we were going to
311  * insert into could be written out from under us)
312  */
313 static inline bool bch2_btree_node_insert_fits(struct bch_fs *c,
314                                                struct btree *b, unsigned u64s)
315 {
316         if (unlikely(btree_node_need_rewrite(b)))
317                 return false;
318
319         return u64s <= bch_btree_keys_u64s_remaining(c, b);
320 }
321
322 void bch2_btree_updates_to_text(struct printbuf *, struct bch_fs *);
323
324 bool bch2_btree_interior_updates_flush(struct bch_fs *);
325
326 void bch2_journal_entry_to_btree_root(struct bch_fs *, struct jset_entry *);
327 struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *,
328                                         struct jset_entry *, struct jset_entry *);
329
330 void bch2_do_pending_node_rewrites(struct bch_fs *);
331 void bch2_free_pending_node_rewrites(struct bch_fs *);
332
333 void bch2_fs_btree_interior_update_exit(struct bch_fs *);
334 void bch2_fs_btree_interior_update_init_early(struct bch_fs *);
335 int bch2_fs_btree_interior_update_init(struct bch_fs *);
336
337 #endif /* _BCACHEFS_BTREE_UPDATE_INTERIOR_H */