]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_types.h
cf59f12247413aca9c3a4d788d77d2110bf420c0
[bcachefs-tools-debian] / libbcachefs / btree_types.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_TYPES_H
3 #define _BCACHEFS_BTREE_TYPES_H
4
5 #include <linux/list.h>
6 #include <linux/rhashtable.h>
7 #include <linux/six.h>
8
9 #include "bkey_methods.h"
10 #include "buckets_types.h"
11 #include "journal_types.h"
12
13 struct open_bucket;
14 struct btree_update;
15 struct btree_trans;
16
17 #define MAX_BSETS               3U
18
19 struct btree_nr_keys {
20
21         /*
22          * Amount of live metadata (i.e. size of node after a compaction) in
23          * units of u64s
24          */
25         u16                     live_u64s;
26         u16                     bset_u64s[MAX_BSETS];
27
28         /* live keys only: */
29         u16                     packed_keys;
30         u16                     unpacked_keys;
31 };
32
33 struct bset_tree {
34         /*
35          * We construct a binary tree in an array as if the array
36          * started at 1, so that things line up on the same cachelines
37          * better: see comments in bset.c at cacheline_to_bkey() for
38          * details
39          */
40
41         /* size of the binary tree and prev array */
42         u16                     size;
43
44         /* function of size - precalculated for to_inorder() */
45         u16                     extra;
46
47         u16                     data_offset;
48         u16                     aux_data_offset;
49         u16                     end_offset;
50
51         struct bpos             max_key;
52 };
53
54 struct btree_write {
55         struct journal_entry_pin        journal;
56 };
57
58 struct btree_alloc {
59         struct open_buckets     ob;
60         BKEY_PADDED(k);
61 };
62
63 struct btree_bkey_cached_common {
64         struct six_lock         lock;
65         u8                      level;
66         u8                      btree_id;
67 };
68
69 struct btree {
70         struct btree_bkey_cached_common c;
71
72         struct rhash_head       hash;
73         u64                     hash_val;
74
75         unsigned long           flags;
76         u16                     written;
77         u8                      nsets;
78         u8                      nr_key_bits;
79
80         struct bkey_format      format;
81
82         struct btree_node       *data;
83         void                    *aux_data;
84
85         /*
86          * Sets of sorted keys - the real btree node - plus a binary search tree
87          *
88          * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
89          * to the memory we have allocated for this btree node. Additionally,
90          * set[0]->data points to the entire btree node as it exists on disk.
91          */
92         struct bset_tree        set[MAX_BSETS];
93
94         struct btree_nr_keys    nr;
95         u16                     sib_u64s[2];
96         u16                     whiteout_u64s;
97         u8                      byte_order;
98         u8                      unpack_fn_len;
99
100         /*
101          * XXX: add a delete sequence number, so when bch2_btree_node_relock()
102          * fails because the lock sequence number has changed - i.e. the
103          * contents were modified - we can still relock the node if it's still
104          * the one we want, without redoing the traversal
105          */
106
107         /*
108          * For asynchronous splits/interior node updates:
109          * When we do a split, we allocate new child nodes and update the parent
110          * node to point to them: we update the parent in memory immediately,
111          * but then we must wait until the children have been written out before
112          * the update to the parent can be written - this is a list of the
113          * btree_updates that are blocking this node from being
114          * written:
115          */
116         struct list_head        write_blocked;
117
118         /*
119          * Also for asynchronous splits/interior node updates:
120          * If a btree node isn't reachable yet, we don't want to kick off
121          * another write - because that write also won't yet be reachable and
122          * marking it as completed before it's reachable would be incorrect:
123          */
124         unsigned long           will_make_reachable;
125
126         struct open_buckets     ob;
127
128         /* lru list */
129         struct list_head        list;
130
131         struct btree_write      writes[2];
132
133         /* Key/pointer for this btree node */
134         __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
135 };
136
137 struct btree_cache {
138         struct rhashtable       table;
139         bool                    table_init_done;
140         /*
141          * We never free a struct btree, except on shutdown - we just put it on
142          * the btree_cache_freed list and reuse it later. This simplifies the
143          * code, and it doesn't cost us much memory as the memory usage is
144          * dominated by buffers that hold the actual btree node data and those
145          * can be freed - and the number of struct btrees allocated is
146          * effectively bounded.
147          *
148          * btree_cache_freeable effectively is a small cache - we use it because
149          * high order page allocations can be rather expensive, and it's quite
150          * common to delete and allocate btree nodes in quick succession. It
151          * should never grow past ~2-3 nodes in practice.
152          */
153         struct mutex            lock;
154         struct list_head        live;
155         struct list_head        freeable;
156         struct list_head        freed;
157
158         /* Number of elements in live + freeable lists */
159         unsigned                used;
160         unsigned                reserve;
161         atomic_t                dirty;
162         struct shrinker         shrink;
163
164         /*
165          * If we need to allocate memory for a new btree node and that
166          * allocation fails, we can cannibalize another node in the btree cache
167          * to satisfy the allocation - lock to guarantee only one thread does
168          * this at a time:
169          */
170         struct task_struct      *alloc_lock;
171         struct closure_waitlist alloc_wait;
172 };
173
174 struct btree_node_iter {
175         struct btree_node_iter_set {
176                 u16     k, end;
177         } data[MAX_BSETS];
178 };
179
180 enum btree_iter_type {
181         BTREE_ITER_KEYS,
182         BTREE_ITER_NODES,
183         BTREE_ITER_CACHED,
184 };
185
186 #define BTREE_ITER_TYPE                 ((1 << 2) - 1)
187
188 /*
189  * Iterate over all possible positions, synthesizing deleted keys for holes:
190  */
191 #define BTREE_ITER_SLOTS                (1 << 2)
192 /*
193  * Indicates that intent locks should be taken on leaf nodes, because we expect
194  * to be doing updates:
195  */
196 #define BTREE_ITER_INTENT               (1 << 3)
197 /*
198  * Causes the btree iterator code to prefetch additional btree nodes from disk:
199  */
200 #define BTREE_ITER_PREFETCH             (1 << 4)
201 /*
202  * Indicates that this iterator should not be reused until transaction commit,
203  * either because a pending update references it or because the update depends
204  * on that particular key being locked (e.g. by the str_hash code, for hash
205  * table consistency)
206  */
207 #define BTREE_ITER_KEEP_UNTIL_COMMIT    (1 << 5)
208 /*
209  * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
210  * @pos or the first key strictly greater than @pos
211  */
212 #define BTREE_ITER_IS_EXTENTS           (1 << 6)
213 #define BTREE_ITER_ERROR                (1 << 7)
214 #define BTREE_ITER_SET_POS_AFTER_COMMIT (1 << 8)
215 #define BTREE_ITER_CACHED_NOFILL        (1 << 9)
216 #define BTREE_ITER_CACHED_NOCREATE      (1 << 10)
217
218 #define BTREE_ITER_USER_FLAGS                           \
219         (BTREE_ITER_SLOTS                               \
220         |BTREE_ITER_INTENT                              \
221         |BTREE_ITER_PREFETCH                            \
222         |BTREE_ITER_CACHED_NOFILL                       \
223         |BTREE_ITER_CACHED_NOCREATE)
224
225 enum btree_iter_uptodate {
226         BTREE_ITER_UPTODATE             = 0,
227         BTREE_ITER_NEED_PEEK            = 1,
228         BTREE_ITER_NEED_RELOCK          = 2,
229         BTREE_ITER_NEED_TRAVERSE        = 3,
230 };
231
232 #define BTREE_ITER_NO_NODE_GET_LOCKS    ((struct btree *) 1)
233 #define BTREE_ITER_NO_NODE_DROP         ((struct btree *) 2)
234 #define BTREE_ITER_NO_NODE_LOCK_ROOT    ((struct btree *) 3)
235 #define BTREE_ITER_NO_NODE_UP           ((struct btree *) 4)
236 #define BTREE_ITER_NO_NODE_DOWN         ((struct btree *) 5)
237 #define BTREE_ITER_NO_NODE_INIT         ((struct btree *) 6)
238 #define BTREE_ITER_NO_NODE_ERROR        ((struct btree *) 7)
239
240 /*
241  * @pos                 - iterator's current position
242  * @level               - current btree depth
243  * @locks_want          - btree level below which we start taking intent locks
244  * @nodes_locked        - bitmask indicating which nodes in @nodes are locked
245  * @nodes_intent_locked - bitmask indicating which locks are intent locks
246  */
247 struct btree_iter {
248         struct btree_trans      *trans;
249         struct bpos             pos;
250         struct bpos             pos_after_commit;
251
252         u16                     flags;
253         u8                      idx;
254
255         enum btree_id           btree_id:4;
256         enum btree_iter_uptodate uptodate:4;
257         unsigned                level:4,
258                                 min_depth:4,
259                                 locks_want:4,
260                                 nodes_locked:4,
261                                 nodes_intent_locked:4;
262
263         struct btree_iter_level {
264                 struct btree    *b;
265                 struct btree_node_iter iter;
266                 u32             lock_seq;
267         }                       l[BTREE_MAX_DEPTH];
268
269         /*
270          * Current unpacked key - so that bch2_btree_iter_next()/
271          * bch2_btree_iter_next_slot() can correctly advance pos.
272          */
273         struct bkey             k;
274         unsigned long           ip_allocated;
275 };
276
277 static inline enum btree_iter_type
278 btree_iter_type(const struct btree_iter *iter)
279 {
280         return iter->flags & BTREE_ITER_TYPE;
281 }
282
283 static inline bool btree_iter_is_cached(const struct btree_iter *iter)
284 {
285         return btree_iter_type(iter) == BTREE_ITER_CACHED;
286 }
287
288 static inline struct btree_iter_level *iter_l(struct btree_iter *iter)
289 {
290         return iter->l + iter->level;
291 }
292
293 struct btree_key_cache {
294         struct mutex            lock;
295         struct rhashtable       table;
296         bool                    table_init_done;
297         struct list_head        freed;
298         struct list_head        clean;
299         struct list_head        dirty;
300         struct shrinker         shrink;
301
302         size_t                  nr_freed;
303         size_t                  nr_keys;
304         size_t                  nr_dirty;
305 };
306
307 struct bkey_cached_key {
308         u32                     btree_id;
309         struct bpos             pos;
310 } __attribute__((packed, aligned(4)));
311
312 #define BKEY_CACHED_ACCESSED            0
313 #define BKEY_CACHED_DIRTY               1
314
315 struct bkey_cached {
316         struct btree_bkey_cached_common c;
317
318         unsigned long           flags;
319         u8                      u64s;
320         bool                    valid;
321         u32                     btree_trans_barrier_seq;
322         struct bkey_cached_key  key;
323
324         struct rhash_head       hash;
325         struct list_head        list;
326
327         struct journal_preres   res;
328         struct journal_entry_pin journal;
329
330         struct bkey_i           *k;
331 };
332
333 struct btree_insert_entry {
334         unsigned                trigger_flags;
335         unsigned                trans_triggers_run:1;
336         struct bkey_i           *k;
337         struct btree_iter       *iter;
338 };
339
340 #ifndef CONFIG_LOCKDEP
341 #define BTREE_ITER_MAX          64
342 #else
343 #define BTREE_ITER_MAX          32
344 #endif
345
346 struct btree_trans {
347         struct bch_fs           *c;
348 #ifdef CONFIG_BCACHEFS_DEBUG
349         struct list_head        list;
350         struct btree            *locking;
351         unsigned                locking_iter_idx;
352         struct bpos             locking_pos;
353         u8                      locking_btree_id;
354         u8                      locking_level;
355         pid_t                   pid;
356 #endif
357         unsigned long           ip;
358         int                     srcu_idx;
359
360         u64                     iters_linked;
361         u64                     iters_live;
362         u64                     iters_touched;
363
364         u8                      nr_iters;
365         u8                      nr_updates;
366         u8                      nr_updates2;
367         u8                      size;
368         unsigned                used_mempool:1;
369         unsigned                error:1;
370         unsigned                nounlock:1;
371         unsigned                need_reset:1;
372         unsigned                in_traverse_all:1;
373
374         unsigned                mem_top;
375         unsigned                mem_bytes;
376         void                    *mem;
377
378         struct btree_iter       *iters;
379         struct btree_insert_entry *updates;
380         struct btree_insert_entry *updates2;
381
382         /* update path: */
383         struct jset_entry       *extra_journal_entries;
384         unsigned                extra_journal_entry_u64s;
385         struct journal_entry_pin *journal_pin;
386
387         struct journal_res      journal_res;
388         struct journal_preres   journal_preres;
389         u64                     *journal_seq;
390         struct disk_reservation *disk_res;
391         unsigned                flags;
392         unsigned                journal_u64s;
393         unsigned                journal_preres_u64s;
394         struct replicas_delta_list *fs_usage_deltas;
395 };
396
397 #define BTREE_FLAG(flag)                                                \
398 static inline bool btree_node_ ## flag(struct btree *b)                 \
399 {       return test_bit(BTREE_NODE_ ## flag, &b->flags); }              \
400                                                                         \
401 static inline void set_btree_node_ ## flag(struct btree *b)             \
402 {       set_bit(BTREE_NODE_ ## flag, &b->flags); }                      \
403                                                                         \
404 static inline void clear_btree_node_ ## flag(struct btree *b)           \
405 {       clear_bit(BTREE_NODE_ ## flag, &b->flags); }
406
407 enum btree_flags {
408         BTREE_NODE_read_in_flight,
409         BTREE_NODE_read_error,
410         BTREE_NODE_dirty,
411         BTREE_NODE_need_write,
412         BTREE_NODE_noevict,
413         BTREE_NODE_write_idx,
414         BTREE_NODE_accessed,
415         BTREE_NODE_write_in_flight,
416         BTREE_NODE_just_written,
417         BTREE_NODE_dying,
418         BTREE_NODE_fake,
419         BTREE_NODE_old_extent_overwrite,
420         BTREE_NODE_need_rewrite,
421 };
422
423 BTREE_FLAG(read_in_flight);
424 BTREE_FLAG(read_error);
425 BTREE_FLAG(need_write);
426 BTREE_FLAG(noevict);
427 BTREE_FLAG(write_idx);
428 BTREE_FLAG(accessed);
429 BTREE_FLAG(write_in_flight);
430 BTREE_FLAG(just_written);
431 BTREE_FLAG(dying);
432 BTREE_FLAG(fake);
433 BTREE_FLAG(old_extent_overwrite);
434 BTREE_FLAG(need_rewrite);
435
436 static inline struct btree_write *btree_current_write(struct btree *b)
437 {
438         return b->writes + btree_node_write_idx(b);
439 }
440
441 static inline struct btree_write *btree_prev_write(struct btree *b)
442 {
443         return b->writes + (btree_node_write_idx(b) ^ 1);
444 }
445
446 static inline struct bset_tree *bset_tree_last(struct btree *b)
447 {
448         EBUG_ON(!b->nsets);
449         return b->set + b->nsets - 1;
450 }
451
452 static inline void *
453 __btree_node_offset_to_ptr(const struct btree *b, u16 offset)
454 {
455         return (void *) ((u64 *) b->data + 1 + offset);
456 }
457
458 static inline u16
459 __btree_node_ptr_to_offset(const struct btree *b, const void *p)
460 {
461         u16 ret = (u64 *) p - 1 - (u64 *) b->data;
462
463         EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
464         return ret;
465 }
466
467 static inline struct bset *bset(const struct btree *b,
468                                 const struct bset_tree *t)
469 {
470         return __btree_node_offset_to_ptr(b, t->data_offset);
471 }
472
473 static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
474 {
475         t->end_offset =
476                 __btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
477 }
478
479 static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
480                                   const struct bset *i)
481 {
482         t->data_offset = __btree_node_ptr_to_offset(b, i);
483         set_btree_bset_end(b, t);
484 }
485
486 static inline struct bset *btree_bset_first(struct btree *b)
487 {
488         return bset(b, b->set);
489 }
490
491 static inline struct bset *btree_bset_last(struct btree *b)
492 {
493         return bset(b, bset_tree_last(b));
494 }
495
496 static inline u16
497 __btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
498 {
499         return __btree_node_ptr_to_offset(b, k);
500 }
501
502 static inline struct bkey_packed *
503 __btree_node_offset_to_key(const struct btree *b, u16 k)
504 {
505         return __btree_node_offset_to_ptr(b, k);
506 }
507
508 static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
509 {
510         return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
511 }
512
513 #define btree_bkey_first(_b, _t)                                        \
514 ({                                                                      \
515         EBUG_ON(bset(_b, _t)->start !=                                  \
516                 __btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
517                                                                         \
518         bset(_b, _t)->start;                                            \
519 })
520
521 #define btree_bkey_last(_b, _t)                                         \
522 ({                                                                      \
523         EBUG_ON(__btree_node_offset_to_key(_b, (_t)->end_offset) !=     \
524                 vstruct_last(bset(_b, _t)));                            \
525                                                                         \
526         __btree_node_offset_to_key(_b, (_t)->end_offset);               \
527 })
528
529 static inline unsigned bset_u64s(struct bset_tree *t)
530 {
531         return t->end_offset - t->data_offset -
532                 sizeof(struct bset) / sizeof(u64);
533 }
534
535 static inline unsigned bset_dead_u64s(struct btree *b, struct bset_tree *t)
536 {
537         return bset_u64s(t) - b->nr.bset_u64s[t - b->set];
538 }
539
540 static inline unsigned bset_byte_offset(struct btree *b, void *i)
541 {
542         return i - (void *) b->data;
543 }
544
545 enum btree_node_type {
546 #define x(kwd, val, name) BKEY_TYPE_##kwd = val,
547         BCH_BTREE_IDS()
548 #undef x
549         BKEY_TYPE_BTREE,
550 };
551
552 /* Type of a key in btree @id at level @level: */
553 static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
554 {
555         return level ? BKEY_TYPE_BTREE : (enum btree_node_type) id;
556 }
557
558 /* Type of keys @b contains: */
559 static inline enum btree_node_type btree_node_type(struct btree *b)
560 {
561         return __btree_node_type(b->c.level, b->c.btree_id);
562 }
563
564 static inline bool btree_node_type_is_extents(enum btree_node_type type)
565 {
566         switch (type) {
567         case BKEY_TYPE_EXTENTS:
568         case BKEY_TYPE_REFLINK:
569                 return true;
570         default:
571                 return false;
572         }
573 }
574
575 static inline bool btree_node_is_extents(struct btree *b)
576 {
577         return btree_node_type_is_extents(btree_node_type(b));
578 }
579
580 static inline enum btree_node_type btree_iter_key_type(struct btree_iter *iter)
581 {
582         return __btree_node_type(iter->level, iter->btree_id);
583 }
584
585 static inline bool btree_iter_is_extents(struct btree_iter *iter)
586 {
587         return btree_node_type_is_extents(btree_iter_key_type(iter));
588 }
589
590 #define BTREE_NODE_TYPE_HAS_TRIGGERS                    \
591         ((1U << BKEY_TYPE_EXTENTS)|                     \
592          (1U << BKEY_TYPE_ALLOC)|                       \
593          (1U << BKEY_TYPE_INODES)|                      \
594          (1U << BKEY_TYPE_REFLINK)|                     \
595          (1U << BKEY_TYPE_EC)|                          \
596          (1U << BKEY_TYPE_BTREE))
597
598 #define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS              \
599         ((1U << BKEY_TYPE_EXTENTS)|                     \
600          (1U << BKEY_TYPE_INODES)|                      \
601          (1U << BKEY_TYPE_EC)|                          \
602          (1U << BKEY_TYPE_REFLINK))
603
604 enum btree_trigger_flags {
605         __BTREE_TRIGGER_NORUN,          /* Don't run triggers at all */
606
607         __BTREE_TRIGGER_INSERT,
608         __BTREE_TRIGGER_OVERWRITE,
609         __BTREE_TRIGGER_OVERWRITE_SPLIT,
610
611         __BTREE_TRIGGER_GC,
612         __BTREE_TRIGGER_BUCKET_INVALIDATE,
613         __BTREE_TRIGGER_NOATOMIC,
614 };
615
616 #define BTREE_TRIGGER_NORUN             (1U << __BTREE_TRIGGER_NORUN)
617
618 #define BTREE_TRIGGER_INSERT            (1U << __BTREE_TRIGGER_INSERT)
619 #define BTREE_TRIGGER_OVERWRITE         (1U << __BTREE_TRIGGER_OVERWRITE)
620 #define BTREE_TRIGGER_OVERWRITE_SPLIT   (1U << __BTREE_TRIGGER_OVERWRITE_SPLIT)
621
622 #define BTREE_TRIGGER_GC                (1U << __BTREE_TRIGGER_GC)
623 #define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
624 #define BTREE_TRIGGER_NOATOMIC          (1U << __BTREE_TRIGGER_NOATOMIC)
625
626 static inline bool btree_node_type_needs_gc(enum btree_node_type type)
627 {
628         return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type);
629 }
630
631 struct btree_root {
632         struct btree            *b;
633
634         /* On disk root - see async splits: */
635         __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
636         u8                      level;
637         u8                      alive;
638         s8                      error;
639 };
640
641 /*
642  * Optional hook that will be called just prior to a btree node update, when
643  * we're holding the write lock and we know what key is about to be overwritten:
644  */
645
646 enum btree_insert_ret {
647         BTREE_INSERT_OK,
648         /* leaf node needs to be split */
649         BTREE_INSERT_BTREE_NODE_FULL,
650         BTREE_INSERT_ENOSPC,
651         BTREE_INSERT_NEED_MARK_REPLICAS,
652         BTREE_INSERT_NEED_JOURNAL_RES,
653         BTREE_INSERT_NEED_JOURNAL_RECLAIM,
654 };
655
656 enum btree_gc_coalesce_fail_reason {
657         BTREE_GC_COALESCE_FAIL_RESERVE_GET,
658         BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
659         BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
660 };
661
662 enum btree_node_sibling {
663         btree_prev_sib,
664         btree_next_sib,
665 };
666
667 typedef struct btree_nr_keys (*sort_fix_overlapping_fn)(struct bset *,
668                                                         struct btree *,
669                                                         struct btree_node_iter *);
670
671 #endif /* _BCACHEFS_BTREE_TYPES_H */