]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_types.h
Merge pull request #26 from unquietwiki/master
[bcachefs-tools-debian] / libbcachefs / btree_types.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_TYPES_H
3 #define _BCACHEFS_BTREE_TYPES_H
4
5 #include <linux/list.h>
6 #include <linux/rhashtable.h>
7 #include <linux/six.h>
8
9 #include "bkey_methods.h"
10 #include "buckets_types.h"
11 #include "journal_types.h"
12
13 struct open_bucket;
14 struct btree_update;
15 struct btree_trans;
16
17 #define MAX_BSETS               3U
18
19 struct btree_nr_keys {
20
21         /*
22          * Amount of live metadata (i.e. size of node after a compaction) in
23          * units of u64s
24          */
25         u16                     live_u64s;
26         u16                     bset_u64s[MAX_BSETS];
27
28         /* live keys only: */
29         u16                     packed_keys;
30         u16                     unpacked_keys;
31 };
32
33 struct bset_tree {
34         /*
35          * We construct a binary tree in an array as if the array
36          * started at 1, so that things line up on the same cachelines
37          * better: see comments in bset.c at cacheline_to_bkey() for
38          * details
39          */
40
41         /* size of the binary tree and prev array */
42         u16                     size;
43
44         /* function of size - precalculated for to_inorder() */
45         u16                     extra;
46
47         u16                     data_offset;
48         u16                     aux_data_offset;
49         u16                     end_offset;
50
51         struct bpos             max_key;
52 };
53
54 struct btree_write {
55         struct journal_entry_pin        journal;
56         struct closure_waitlist         wait;
57 };
58
59 struct btree_alloc {
60         struct open_buckets     ob;
61         BKEY_PADDED(k);
62 };
63
64 struct btree {
65         /* Hottest entries first */
66         struct rhash_head       hash;
67         u64                     hash_val;
68
69         struct six_lock         lock;
70
71         unsigned long           flags;
72         u16                     written;
73         u8                      level;
74         u8                      btree_id;
75         u8                      nsets;
76         u8                      nr_key_bits;
77
78         struct bkey_format      format;
79
80         struct btree_node       *data;
81         void                    *aux_data;
82
83         /*
84          * Sets of sorted keys - the real btree node - plus a binary search tree
85          *
86          * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
87          * to the memory we have allocated for this btree node. Additionally,
88          * set[0]->data points to the entire btree node as it exists on disk.
89          */
90         struct bset_tree        set[MAX_BSETS];
91
92         struct btree_nr_keys    nr;
93         u16                     sib_u64s[2];
94         u16                     whiteout_u64s;
95         u8                      page_order;
96         u8                      unpack_fn_len;
97
98         /*
99          * XXX: add a delete sequence number, so when bch2_btree_node_relock()
100          * fails because the lock sequence number has changed - i.e. the
101          * contents were modified - we can still relock the node if it's still
102          * the one we want, without redoing the traversal
103          */
104
105         /*
106          * For asynchronous splits/interior node updates:
107          * When we do a split, we allocate new child nodes and update the parent
108          * node to point to them: we update the parent in memory immediately,
109          * but then we must wait until the children have been written out before
110          * the update to the parent can be written - this is a list of the
111          * btree_updates that are blocking this node from being
112          * written:
113          */
114         struct list_head        write_blocked;
115
116         /*
117          * Also for asynchronous splits/interior node updates:
118          * If a btree node isn't reachable yet, we don't want to kick off
119          * another write - because that write also won't yet be reachable and
120          * marking it as completed before it's reachable would be incorrect:
121          */
122         unsigned long           will_make_reachable;
123
124         struct open_buckets     ob;
125
126         /* lru list */
127         struct list_head        list;
128
129         struct btree_write      writes[2];
130
131 #ifdef CONFIG_BCACHEFS_DEBUG
132         bool                    *expensive_debug_checks;
133 #endif
134
135         /* Key/pointer for this btree node */
136         __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
137 };
138
139 struct btree_cache {
140         struct rhashtable       table;
141         bool                    table_init_done;
142         /*
143          * We never free a struct btree, except on shutdown - we just put it on
144          * the btree_cache_freed list and reuse it later. This simplifies the
145          * code, and it doesn't cost us much memory as the memory usage is
146          * dominated by buffers that hold the actual btree node data and those
147          * can be freed - and the number of struct btrees allocated is
148          * effectively bounded.
149          *
150          * btree_cache_freeable effectively is a small cache - we use it because
151          * high order page allocations can be rather expensive, and it's quite
152          * common to delete and allocate btree nodes in quick succession. It
153          * should never grow past ~2-3 nodes in practice.
154          */
155         struct mutex            lock;
156         struct list_head        live;
157         struct list_head        freeable;
158         struct list_head        freed;
159
160         /* Number of elements in live + freeable lists */
161         unsigned                used;
162         unsigned                reserve;
163         struct shrinker         shrink;
164
165         /*
166          * If we need to allocate memory for a new btree node and that
167          * allocation fails, we can cannibalize another node in the btree cache
168          * to satisfy the allocation - lock to guarantee only one thread does
169          * this at a time:
170          */
171         struct task_struct      *alloc_lock;
172         struct closure_waitlist alloc_wait;
173 };
174
175 struct btree_node_iter {
176         struct btree_node_iter_set {
177                 u16     k, end;
178         } data[MAX_BSETS];
179 };
180
181 enum btree_iter_type {
182         BTREE_ITER_KEYS,
183         BTREE_ITER_NODES,
184 };
185
186 #define BTREE_ITER_TYPE                 ((1 << 2) - 1)
187
188 /*
189  * Iterate over all possible positions, synthesizing deleted keys for holes:
190  */
191 #define BTREE_ITER_SLOTS                (1 << 2)
192 /*
193  * Indicates that intent locks should be taken on leaf nodes, because we expect
194  * to be doing updates:
195  */
196 #define BTREE_ITER_INTENT               (1 << 3)
197 /*
198  * Causes the btree iterator code to prefetch additional btree nodes from disk:
199  */
200 #define BTREE_ITER_PREFETCH             (1 << 4)
201 /*
202  * Indicates that this iterator should not be reused until transaction commit,
203  * either because a pending update references it or because the update depends
204  * on that particular key being locked (e.g. by the str_hash code, for hash
205  * table consistency)
206  */
207 #define BTREE_ITER_KEEP_UNTIL_COMMIT    (1 << 5)
208 /*
209  * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
210  * @pos or the first key strictly greater than @pos
211  */
212 #define BTREE_ITER_IS_EXTENTS           (1 << 6)
213 #define BTREE_ITER_ERROR                (1 << 7)
214 #define BTREE_ITER_SET_POS_AFTER_COMMIT (1 << 8)
215
216 enum btree_iter_uptodate {
217         BTREE_ITER_UPTODATE             = 0,
218         BTREE_ITER_NEED_PEEK            = 1,
219         BTREE_ITER_NEED_RELOCK          = 2,
220         BTREE_ITER_NEED_TRAVERSE        = 3,
221 };
222
223 /*
224  * @pos                 - iterator's current position
225  * @level               - current btree depth
226  * @locks_want          - btree level below which we start taking intent locks
227  * @nodes_locked        - bitmask indicating which nodes in @nodes are locked
228  * @nodes_intent_locked - bitmask indicating which locks are intent locks
229  */
230 struct btree_iter {
231         struct btree_trans      *trans;
232         struct bpos             pos;
233         struct bpos             pos_after_commit;
234
235         u16                     flags;
236         u8                      idx;
237
238         enum btree_iter_uptodate uptodate:4;
239         enum btree_id           btree_id:4;
240         unsigned                level:4,
241                                 locks_want:4,
242                                 nodes_locked:4,
243                                 nodes_intent_locked:4;
244
245         struct btree_iter_level {
246                 struct btree    *b;
247                 struct btree_node_iter iter;
248                 u32             lock_seq;
249         }                       l[BTREE_MAX_DEPTH];
250
251         /*
252          * Current unpacked key - so that bch2_btree_iter_next()/
253          * bch2_btree_iter_next_slot() can correctly advance pos.
254          */
255         struct bkey             k;
256 };
257
258 static inline enum btree_iter_type btree_iter_type(struct btree_iter *iter)
259 {
260         return iter->flags & BTREE_ITER_TYPE;
261 }
262
263 struct btree_insert_entry {
264         unsigned                trigger_flags;
265         unsigned                trans_triggers_run:1;
266         struct bkey_i           *k;
267         struct btree_iter       *iter;
268 };
269
270 #define BTREE_ITER_MAX          64
271
272 struct btree_trans {
273         struct bch_fs           *c;
274         unsigned long           ip;
275
276         u64                     iters_linked;
277         u64                     iters_live;
278         u64                     iters_touched;
279
280         u8                      nr_iters;
281         u8                      nr_updates;
282         u8                      size;
283         unsigned                used_mempool:1;
284         unsigned                error:1;
285         unsigned                nounlock:1;
286         unsigned                need_reset:1;
287
288         unsigned                mem_top;
289         unsigned                mem_bytes;
290         void                    *mem;
291
292         struct btree_iter       *iters;
293         struct btree_insert_entry *updates;
294
295         /* update path: */
296         struct journal_res      journal_res;
297         struct journal_preres   journal_preres;
298         u64                     *journal_seq;
299         struct disk_reservation *disk_res;
300         unsigned                flags;
301         unsigned                journal_u64s;
302         unsigned                journal_preres_u64s;
303         struct replicas_delta_list *fs_usage_deltas;
304
305         struct btree_iter       iters_onstack[2];
306         struct btree_insert_entry updates_onstack[2];
307 };
308
309 #define BTREE_FLAG(flag)                                                \
310 static inline bool btree_node_ ## flag(struct btree *b)                 \
311 {       return test_bit(BTREE_NODE_ ## flag, &b->flags); }              \
312                                                                         \
313 static inline void set_btree_node_ ## flag(struct btree *b)             \
314 {       set_bit(BTREE_NODE_ ## flag, &b->flags); }                      \
315                                                                         \
316 static inline void clear_btree_node_ ## flag(struct btree *b)           \
317 {       clear_bit(BTREE_NODE_ ## flag, &b->flags); }
318
319 enum btree_flags {
320         BTREE_NODE_read_in_flight,
321         BTREE_NODE_read_error,
322         BTREE_NODE_dirty,
323         BTREE_NODE_need_write,
324         BTREE_NODE_noevict,
325         BTREE_NODE_write_idx,
326         BTREE_NODE_accessed,
327         BTREE_NODE_write_in_flight,
328         BTREE_NODE_just_written,
329         BTREE_NODE_dying,
330         BTREE_NODE_fake,
331         BTREE_NODE_old_extent_overwrite,
332 };
333
334 BTREE_FLAG(read_in_flight);
335 BTREE_FLAG(read_error);
336 BTREE_FLAG(dirty);
337 BTREE_FLAG(need_write);
338 BTREE_FLAG(noevict);
339 BTREE_FLAG(write_idx);
340 BTREE_FLAG(accessed);
341 BTREE_FLAG(write_in_flight);
342 BTREE_FLAG(just_written);
343 BTREE_FLAG(dying);
344 BTREE_FLAG(fake);
345 BTREE_FLAG(old_extent_overwrite);
346
347 static inline struct btree_write *btree_current_write(struct btree *b)
348 {
349         return b->writes + btree_node_write_idx(b);
350 }
351
352 static inline struct btree_write *btree_prev_write(struct btree *b)
353 {
354         return b->writes + (btree_node_write_idx(b) ^ 1);
355 }
356
357 static inline struct bset_tree *bset_tree_last(struct btree *b)
358 {
359         EBUG_ON(!b->nsets);
360         return b->set + b->nsets - 1;
361 }
362
363 static inline void *
364 __btree_node_offset_to_ptr(const struct btree *b, u16 offset)
365 {
366         return (void *) ((u64 *) b->data + 1 + offset);
367 }
368
369 static inline u16
370 __btree_node_ptr_to_offset(const struct btree *b, const void *p)
371 {
372         u16 ret = (u64 *) p - 1 - (u64 *) b->data;
373
374         EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
375         return ret;
376 }
377
378 static inline struct bset *bset(const struct btree *b,
379                                 const struct bset_tree *t)
380 {
381         return __btree_node_offset_to_ptr(b, t->data_offset);
382 }
383
384 static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
385 {
386         t->end_offset =
387                 __btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
388 }
389
390 static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
391                                   const struct bset *i)
392 {
393         t->data_offset = __btree_node_ptr_to_offset(b, i);
394         set_btree_bset_end(b, t);
395 }
396
397 static inline struct bset *btree_bset_first(struct btree *b)
398 {
399         return bset(b, b->set);
400 }
401
402 static inline struct bset *btree_bset_last(struct btree *b)
403 {
404         return bset(b, bset_tree_last(b));
405 }
406
407 static inline u16
408 __btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
409 {
410         return __btree_node_ptr_to_offset(b, k);
411 }
412
413 static inline struct bkey_packed *
414 __btree_node_offset_to_key(const struct btree *b, u16 k)
415 {
416         return __btree_node_offset_to_ptr(b, k);
417 }
418
419 static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
420 {
421         return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
422 }
423
424 #define btree_bkey_first(_b, _t)                                        \
425 ({                                                                      \
426         EBUG_ON(bset(_b, _t)->start !=                                  \
427                 __btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
428                                                                         \
429         bset(_b, _t)->start;                                            \
430 })
431
432 #define btree_bkey_last(_b, _t)                                         \
433 ({                                                                      \
434         EBUG_ON(__btree_node_offset_to_key(_b, (_t)->end_offset) !=     \
435                 vstruct_last(bset(_b, _t)));                            \
436                                                                         \
437         __btree_node_offset_to_key(_b, (_t)->end_offset);               \
438 })
439
440 static inline unsigned bset_u64s(struct bset_tree *t)
441 {
442         return t->end_offset - t->data_offset -
443                 sizeof(struct bset) / sizeof(u64);
444 }
445
446 static inline unsigned bset_dead_u64s(struct btree *b, struct bset_tree *t)
447 {
448         return bset_u64s(t) - b->nr.bset_u64s[t - b->set];
449 }
450
451 static inline unsigned bset_byte_offset(struct btree *b, void *i)
452 {
453         return i - (void *) b->data;
454 }
455
456 enum btree_node_type {
457 #define x(kwd, val, name) BKEY_TYPE_##kwd = val,
458         BCH_BTREE_IDS()
459 #undef x
460         BKEY_TYPE_BTREE,
461 };
462
463 /* Type of a key in btree @id at level @level: */
464 static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
465 {
466         return level ? BKEY_TYPE_BTREE : (enum btree_node_type) id;
467 }
468
469 /* Type of keys @b contains: */
470 static inline enum btree_node_type btree_node_type(struct btree *b)
471 {
472         return __btree_node_type(b->level, b->btree_id);
473 }
474
475 static inline bool btree_node_type_is_extents(enum btree_node_type type)
476 {
477         switch (type) {
478         case BKEY_TYPE_EXTENTS:
479         case BKEY_TYPE_REFLINK:
480                 return true;
481         default:
482                 return false;
483         }
484 }
485
486 static inline bool btree_node_is_extents(struct btree *b)
487 {
488         return btree_node_type_is_extents(btree_node_type(b));
489 }
490
491 #define BTREE_NODE_TYPE_HAS_TRIGGERS                    \
492         ((1U << BKEY_TYPE_EXTENTS)|                     \
493          (1U << BKEY_TYPE_ALLOC)|                       \
494          (1U << BKEY_TYPE_INODES)|                      \
495          (1U << BKEY_TYPE_REFLINK)|                     \
496          (1U << BKEY_TYPE_EC)|                          \
497          (1U << BKEY_TYPE_BTREE))
498
499 #define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS              \
500         ((1U << BKEY_TYPE_EXTENTS)|                     \
501          (1U << BKEY_TYPE_INODES)|                      \
502          (1U << BKEY_TYPE_REFLINK))
503
504 enum btree_trigger_flags {
505         __BTREE_TRIGGER_NORUN,          /* Don't run triggers at all */
506         __BTREE_TRIGGER_NOOVERWRITES,   /* Don't run triggers on overwrites */
507
508         __BTREE_TRIGGER_INSERT,
509         __BTREE_TRIGGER_OVERWRITE,
510         __BTREE_TRIGGER_OVERWRITE_SPLIT,
511
512         __BTREE_TRIGGER_GC,
513         __BTREE_TRIGGER_BUCKET_INVALIDATE,
514         __BTREE_TRIGGER_ALLOC_READ,
515         __BTREE_TRIGGER_NOATOMIC,
516 };
517
518 #define BTREE_TRIGGER_NORUN             (1U << __BTREE_TRIGGER_NORUN)
519 #define BTREE_TRIGGER_NOOVERWRITES      (1U << __BTREE_TRIGGER_NOOVERWRITES)
520
521 #define BTREE_TRIGGER_INSERT            (1U << __BTREE_TRIGGER_INSERT)
522 #define BTREE_TRIGGER_OVERWRITE         (1U << __BTREE_TRIGGER_OVERWRITE)
523 #define BTREE_TRIGGER_OVERWRITE_SPLIT   (1U << __BTREE_TRIGGER_OVERWRITE_SPLIT)
524
525 #define BTREE_TRIGGER_GC                (1U << __BTREE_TRIGGER_GC)
526 #define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
527 #define BTREE_TRIGGER_ALLOC_READ        (1U << __BTREE_TRIGGER_ALLOC_READ)
528 #define BTREE_TRIGGER_NOATOMIC          (1U << __BTREE_TRIGGER_NOATOMIC)
529
530 static inline bool btree_node_type_needs_gc(enum btree_node_type type)
531 {
532         return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type);
533 }
534
535 struct btree_root {
536         struct btree            *b;
537
538         struct btree_update     *as;
539
540         /* On disk root - see async splits: */
541         __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
542         u8                      level;
543         u8                      alive;
544         s8                      error;
545 };
546
547 /*
548  * Optional hook that will be called just prior to a btree node update, when
549  * we're holding the write lock and we know what key is about to be overwritten:
550  */
551
552 enum btree_insert_ret {
553         BTREE_INSERT_OK,
554         /* leaf node needs to be split */
555         BTREE_INSERT_BTREE_NODE_FULL,
556         BTREE_INSERT_ENOSPC,
557         BTREE_INSERT_NEED_MARK_REPLICAS,
558         BTREE_INSERT_NEED_JOURNAL_RES,
559 };
560
561 enum btree_gc_coalesce_fail_reason {
562         BTREE_GC_COALESCE_FAIL_RESERVE_GET,
563         BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
564         BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
565 };
566
567 enum btree_node_sibling {
568         btree_prev_sib,
569         btree_next_sib,
570 };
571
572 typedef struct btree_nr_keys (*sort_fix_overlapping_fn)(struct bset *,
573                                                         struct btree *,
574                                                         struct btree_node_iter *);
575
576 #endif /* _BCACHEFS_BTREE_TYPES_H */