]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_types.h
Update bcachefs sources to 5a3a4087af bcachefs: Convert a BUG_ON() to a warning
[bcachefs-tools-debian] / libbcachefs / btree_types.h
index a0f5b579fe2a92f50ac69160aa9f81fb12ec25b2..b0da0963091181ac05e3800dc8675988f4221aad 100644 (file)
@@ -1,17 +1,18 @@
-#ifndef _BCACHE_BTREE_TYPES_H
-#define _BCACHE_BTREE_TYPES_H
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_TYPES_H
+#define _BCACHEFS_BTREE_TYPES_H
 
 #include <linux/list.h>
 #include <linux/rhashtable.h>
-#include <linux/semaphore.h>
-#include <linux/workqueue.h>
+#include <linux/six.h>
 
 #include "bkey_methods.h"
+#include "buckets_types.h"
 #include "journal_types.h"
-#include "six.h"
 
 struct open_bucket;
-struct btree_interior_update;
+struct btree_update;
+struct btree_trans;
 
 #define MAX_BSETS              3U
 
@@ -55,6 +56,11 @@ struct btree_write {
        struct closure_waitlist         wait;
 };
 
+struct btree_alloc {
+       struct open_buckets     ob;
+       BKEY_PADDED(k);
+};
+
 struct btree {
        /* Hottest entries first */
        struct rhash_head       hash;
@@ -105,7 +111,7 @@ struct btree {
         * node to point to them: we update the parent in memory immediately,
         * but then we must wait until the children have been written out before
         * the update to the parent can be written - this is a list of the
-        * btree_interior_updates that are blocking this node from being
+        * btree_updates that are blocking this node from being
         * written:
         */
        struct list_head        write_blocked;
@@ -116,9 +122,9 @@ struct btree {
         * another write - because that write also won't yet be reachable and
         * marking it as completed before it's reachable would be incorrect:
         */
-       struct list_head        reachable;
+       unsigned long           will_make_reachable;
 
-       struct open_bucket      *ob;
+       struct open_buckets     ob;
 
        /* lru list */
        struct list_head        list;
@@ -130,6 +136,176 @@ struct btree {
 #endif
 };
 
+struct btree_cache {
+       struct rhashtable       table;
+       bool                    table_init_done;
+       /*
+        * We never free a struct btree, except on shutdown - we just put it on
+        * the btree_cache_freed list and reuse it later. This simplifies the
+        * code, and it doesn't cost us much memory as the memory usage is
+        * dominated by buffers that hold the actual btree node data and those
+        * can be freed - and the number of struct btrees allocated is
+        * effectively bounded.
+        *
+        * btree_cache_freeable effectively is a small cache - we use it because
+        * high order page allocations can be rather expensive, and it's quite
+        * common to delete and allocate btree nodes in quick succession. It
+        * should never grow past ~2-3 nodes in practice.
+        */
+       struct mutex            lock;
+       struct list_head        live;
+       struct list_head        freeable;
+       struct list_head        freed;
+
+       /* Number of elements in live + freeable lists */
+       unsigned                used;
+       unsigned                reserve;
+       struct shrinker         shrink;
+
+       /*
+        * If we need to allocate memory for a new btree node and that
+        * allocation fails, we can cannibalize another node in the btree cache
+        * to satisfy the allocation - lock to guarantee only one thread does
+        * this at a time:
+        */
+       struct task_struct      *alloc_lock;
+       struct closure_waitlist alloc_wait;
+};
+
+struct btree_node_iter {
+       struct btree_node_iter_set {
+               u16     k, end;
+       } data[MAX_BSETS];
+};
+
+enum btree_iter_type {
+       BTREE_ITER_KEYS,
+       BTREE_ITER_SLOTS,
+       BTREE_ITER_NODES,
+};
+
+#define BTREE_ITER_TYPE                        ((1 << 2) - 1)
+
+#define BTREE_ITER_INTENT              (1 << 2)
+#define BTREE_ITER_PREFETCH            (1 << 3)
+/*
+ * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
+ * @pos or the first key strictly greater than @pos
+ */
+#define BTREE_ITER_IS_EXTENTS          (1 << 4)
+#define BTREE_ITER_ERROR               (1 << 5)
+
+enum btree_iter_uptodate {
+       BTREE_ITER_UPTODATE             = 0,
+       BTREE_ITER_NEED_PEEK            = 1,
+       BTREE_ITER_NEED_RELOCK          = 2,
+       BTREE_ITER_NEED_TRAVERSE        = 3,
+};
+
+/*
+ * @pos                        - iterator's current position
+ * @level              - current btree depth
+ * @locks_want         - btree level below which we start taking intent locks
+ * @nodes_locked       - bitmask indicating which nodes in @nodes are locked
+ * @nodes_intent_locked        - bitmask indicating which locks are intent locks
+ */
+struct btree_iter {
+       u8                      idx;
+
+       struct btree_trans      *trans;
+       struct bpos             pos;
+
+       u8                      flags;
+       enum btree_iter_uptodate uptodate:4;
+       enum btree_id           btree_id:4;
+       unsigned                level:4,
+                               locks_want:4,
+                               nodes_locked:4,
+                               nodes_intent_locked:4;
+
+       struct btree_iter_level {
+               struct btree    *b;
+               struct btree_node_iter iter;
+               u32             lock_seq;
+       }                       l[BTREE_MAX_DEPTH];
+
+       /*
+        * Current unpacked key - so that bch2_btree_iter_next()/
+        * bch2_btree_iter_next_slot() can correctly advance pos.
+        */
+       struct bkey             k;
+
+       u64                     id;
+};
+
+struct deferred_update {
+       struct journal_preres   res;
+       struct journal_entry_pin journal;
+
+       spinlock_t              lock;
+       unsigned                dirty:1;
+
+       u8                      allocated_u64s;
+       enum btree_id           btree_id;
+
+       /* must be last: */
+       struct bkey_i           k;
+};
+
+struct btree_insert_entry {
+       struct bkey_i           *k;
+
+       union {
+       struct btree_iter       *iter;
+       struct deferred_update  *d;
+       };
+
+       bool                    deferred;
+};
+
+#define BTREE_ITER_MAX         64
+
+struct btree_trans {
+       struct bch_fs           *c;
+       unsigned long           ip;
+       u64                     commit_start;
+
+       u64                     iters_linked;
+       u64                     iters_live;
+       u64                     iters_touched;
+       u64                     iters_unlink_on_restart;
+       u64                     iters_unlink_on_commit;
+
+       u8                      nr_iters;
+       u8                      nr_updates;
+       u8                      size;
+       unsigned                used_mempool:1;
+       unsigned                error:1;
+       unsigned                nounlock:1;
+
+       unsigned                mem_top;
+       unsigned                mem_bytes;
+       void                    *mem;
+
+       struct btree_iter       *iters;
+       struct btree_insert_entry *updates;
+       u8                      *updates_sorted;
+
+       /* update path: */
+       struct journal_res      journal_res;
+       struct journal_preres   journal_preres;
+       u64                     *journal_seq;
+       struct disk_reservation *disk_res;
+       unsigned                flags;
+       unsigned                journal_u64s;
+
+       struct btree_iter       iters_onstack[2];
+       struct btree_insert_entry updates_onstack[6];
+       u8                      updates_sorted_onstack[6];
+
+       struct replicas_delta_list *fs_usage_deltas;
+};
+
 #define BTREE_FLAG(flag)                                               \
 static inline bool btree_node_ ## flag(struct btree *b)                        \
 {      return test_bit(BTREE_NODE_ ## flag, &b->flags); }              \
@@ -141,8 +317,8 @@ static inline void clear_btree_node_ ## flag(struct btree *b)               \
 {      clear_bit(BTREE_NODE_ ## flag, &b->flags); }
 
 enum btree_flags {
+       BTREE_NODE_read_in_flight,
        BTREE_NODE_read_error,
-       BTREE_NODE_write_error,
        BTREE_NODE_dirty,
        BTREE_NODE_need_write,
        BTREE_NODE_noevict,
@@ -150,10 +326,12 @@ enum btree_flags {
        BTREE_NODE_accessed,
        BTREE_NODE_write_in_flight,
        BTREE_NODE_just_written,
+       BTREE_NODE_dying,
+       BTREE_NODE_fake,
 };
 
+BTREE_FLAG(read_in_flight);
 BTREE_FLAG(read_error);
-BTREE_FLAG(write_error);
 BTREE_FLAG(dirty);
 BTREE_FLAG(need_write);
 BTREE_FLAG(noevict);
@@ -161,6 +339,8 @@ BTREE_FLAG(write_idx);
 BTREE_FLAG(accessed);
 BTREE_FLAG(write_in_flight);
 BTREE_FLAG(just_written);
+BTREE_FLAG(dying);
+BTREE_FLAG(fake);
 
 static inline struct btree_write *btree_current_write(struct btree *b)
 {
@@ -178,10 +358,38 @@ static inline struct bset_tree *bset_tree_last(struct btree *b)
        return b->set + b->nsets - 1;
 }
 
+static inline void *
+__btree_node_offset_to_ptr(const struct btree *b, u16 offset)
+{
+       return (void *) ((u64 *) b->data + 1 + offset);
+}
+
+static inline u16
+__btree_node_ptr_to_offset(const struct btree *b, const void *p)
+{
+       u16 ret = (u64 *) p - 1 - (u64 *) b->data;
+
+       EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
+       return ret;
+}
+
 static inline struct bset *bset(const struct btree *b,
                                const struct bset_tree *t)
 {
-       return (void *) b->data + t->data_offset * sizeof(u64);
+       return __btree_node_offset_to_ptr(b, t->data_offset);
+}
+
+static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
+{
+       t->end_offset =
+               __btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
+}
+
+static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
+                                 const struct bset *i)
+{
+       t->data_offset = __btree_node_ptr_to_offset(b, i);
+       set_btree_bset_end(b, t);
 }
 
 static inline struct bset *btree_bset_first(struct btree *b)
@@ -197,19 +405,27 @@ static inline struct bset *btree_bset_last(struct btree *b)
 static inline u16
 __btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
 {
-       size_t ret = (u64 *) k - (u64 *) b->data - 1;
-
-       EBUG_ON(ret > U16_MAX);
-       return ret;
+       return __btree_node_ptr_to_offset(b, k);
 }
 
 static inline struct bkey_packed *
 __btree_node_offset_to_key(const struct btree *b, u16 k)
 {
-       return (void *) ((u64 *) b->data + k + 1);
+       return __btree_node_offset_to_ptr(b, k);
 }
 
-#define btree_bkey_first(_b, _t)       (bset(_b, _t)->start)
+static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
+{
+       return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
+}
+
+#define btree_bkey_first(_b, _t)                                       \
+({                                                                     \
+       EBUG_ON(bset(_b, _t)->start !=                                  \
+               __btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
+                                                                       \
+       bset(_b, _t)->start;                                            \
+})
 
 #define btree_bkey_last(_b, _t)                                                \
 ({                                                                     \
@@ -219,58 +435,71 @@ __btree_node_offset_to_key(const struct btree *b, u16 k)
        __btree_node_offset_to_key(_b, (_t)->end_offset);               \
 })
 
-static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
+static inline unsigned bset_byte_offset(struct btree *b, void *i)
 {
-       t->end_offset =
-               __btree_node_key_to_offset(b, vstruct_last(bset(b, t)));
-       btree_bkey_last(b, t);
+       return i - (void *) b->data;
 }
 
-static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
-                                 const struct bset *i)
-{
-       t->data_offset = (u64 *) i - (u64 *) b->data;
-
-       EBUG_ON(bset(b, t) != i);
-
-       set_btree_bset_end(b, t);
-}
+enum btree_node_type {
+#define x(kwd, val, name) BKEY_TYPE_##kwd = val,
+       BCH_BTREE_IDS()
+#undef x
+       BKEY_TYPE_BTREE,
+};
 
-static inline unsigned bset_byte_offset(struct btree *b, void *i)
+/* Type of a key in btree @id at level @level: */
+static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
 {
-       return i - (void *) b->data;
+       return level ? BKEY_TYPE_BTREE : (enum btree_node_type) id;
 }
 
 /* Type of keys @b contains: */
-static inline enum bkey_type btree_node_type(struct btree *b)
+static inline enum btree_node_type btree_node_type(struct btree *b)
 {
-       return b->level ? BKEY_TYPE_BTREE : b->btree_id;
+       return __btree_node_type(b->level, b->btree_id);
 }
 
-static inline const struct bkey_ops *btree_node_ops(struct btree *b)
+static inline bool btree_node_type_is_extents(enum btree_node_type type)
 {
-       return bch2_bkey_ops[btree_node_type(b)];
+       switch (type) {
+       case BKEY_TYPE_EXTENTS:
+       case BKEY_TYPE_REFLINK:
+               return true;
+       default:
+               return false;
+       }
 }
 
-static inline bool btree_node_has_ptrs(struct btree *b)
+static inline bool btree_node_is_extents(struct btree *b)
 {
-       return btree_type_has_ptrs(btree_node_type(b));
+       return btree_node_type_is_extents(btree_node_type(b));
 }
 
-static inline bool btree_node_is_extents(struct btree *b)
+static inline bool btree_node_type_needs_gc(enum btree_node_type type)
 {
-       return btree_node_type(b) == BKEY_TYPE_EXTENTS;
+       switch (type) {
+       case BKEY_TYPE_ALLOC:
+       case BKEY_TYPE_BTREE:
+       case BKEY_TYPE_EXTENTS:
+       case BKEY_TYPE_INODES:
+       case BKEY_TYPE_EC:
+       case BKEY_TYPE_REFLINK:
+               return true;
+       default:
+               return false;
+       }
 }
 
 struct btree_root {
        struct btree            *b;
 
-       struct btree_interior_update *as;
+       struct btree_update     *as;
 
        /* On disk root - see async splits: */
        __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
        u8                      level;
        u8                      alive;
+       s8                      error;
 };
 
 /*
@@ -278,32 +507,13 @@ struct btree_root {
  * we're holding the write lock and we know what key is about to be overwritten:
  */
 
-struct btree_iter;
-struct btree_node_iter;
-
-enum extent_insert_hook_ret {
-       BTREE_HOOK_DO_INSERT,
-       BTREE_HOOK_NO_INSERT,
-       BTREE_HOOK_RESTART_TRANS,
-};
-
-struct extent_insert_hook {
-       enum extent_insert_hook_ret
-       (*fn)(struct extent_insert_hook *, struct bpos, struct bpos,
-             struct bkey_s_c, const struct bkey_i *);
-};
-
 enum btree_insert_ret {
        BTREE_INSERT_OK,
-       /* extent spanned multiple leaf nodes: have to traverse to next node: */
-       BTREE_INSERT_NEED_TRAVERSE,
-       /* write lock held for too long */
-       BTREE_INSERT_NEED_RESCHED,
        /* leaf node needs to be split */
        BTREE_INSERT_BTREE_NODE_FULL,
-       BTREE_INSERT_JOURNAL_RES_FULL,
        BTREE_INSERT_ENOSPC,
-       BTREE_INSERT_NEED_GC_LOCK,
+       BTREE_INSERT_NEED_MARK_REPLICAS,
+       BTREE_INSERT_NEED_JOURNAL_RES,
 };
 
 enum btree_gc_coalesce_fail_reason {
@@ -312,8 +522,13 @@ enum btree_gc_coalesce_fail_reason {
        BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
 };
 
+enum btree_node_sibling {
+       btree_prev_sib,
+       btree_next_sib,
+};
+
 typedef struct btree_nr_keys (*sort_fix_overlapping_fn)(struct bset *,
                                                        struct btree *,
                                                        struct btree_node_iter *);
 
-#endif /* _BCACHE_BTREE_TYPES_H */
+#endif /* _BCACHEFS_BTREE_TYPES_H */