]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_types.h
Update bcachefs sources to 1a510b00b6 bcachefs: Increase BTREE_TRANS_MEM_MAX
[bcachefs-tools-debian] / libbcachefs / btree_types.h
index ea25b04b7517116b3fa31e269a9b000f7b09e9a3..ec5195daead4998bad4f16d511d48ee7bf6c3ea4 100644 (file)
@@ -47,8 +47,6 @@ struct bset_tree {
        u16                     data_offset;
        u16                     aux_data_offset;
        u16                     end_offset;
-
-       struct bpos             max_key;
 };
 
 struct btree_write {
@@ -57,7 +55,7 @@ struct btree_write {
 
 struct btree_alloc {
        struct open_buckets     ob;
-       BKEY_PADDED(k);
+       __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX);
 };
 
 struct btree_bkey_cached_common {
@@ -76,6 +74,7 @@ struct btree {
        u16                     written;
        u8                      nsets;
        u8                      nr_key_bits;
+       u16                     version_ondisk;
 
        struct bkey_format      format;
 
@@ -94,9 +93,14 @@ struct btree {
        struct btree_nr_keys    nr;
        u16                     sib_u64s[2];
        u16                     whiteout_u64s;
-       u8                      page_order;
+       u8                      byte_order;
        u8                      unpack_fn_len;
 
+       struct btree_write      writes[2];
+
+       /* Key/pointer for this btree node */
+       __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
+
        /*
         * XXX: add a delete sequence number, so when bch2_btree_node_relock()
         * fails because the lock sequence number has changed - i.e. the
@@ -127,15 +131,6 @@ struct btree {
 
        /* lru list */
        struct list_head        list;
-
-       struct btree_write      writes[2];
-
-#ifdef CONFIG_BCACHEFS_DEBUG
-       bool                    *expensive_debug_checks;
-#endif
-
-       /* Key/pointer for this btree node */
-       __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
 };
 
 struct btree_cache {
@@ -162,6 +157,7 @@ struct btree_cache {
        /* Number of elements in live + freeable lists */
        unsigned                used;
        unsigned                reserve;
+       atomic_t                dirty;
        struct shrinker         shrink;
 
        /*
@@ -213,17 +209,13 @@ enum btree_iter_type {
  * @pos or the first key strictly greater than @pos
  */
 #define BTREE_ITER_IS_EXTENTS          (1 << 6)
-#define BTREE_ITER_ERROR               (1 << 7)
-#define BTREE_ITER_SET_POS_AFTER_COMMIT        (1 << 8)
-#define BTREE_ITER_CACHED_NOFILL       (1 << 9)
-#define BTREE_ITER_CACHED_NOCREATE     (1 << 10)
-
-#define BTREE_ITER_USER_FLAGS                          \
-       (BTREE_ITER_SLOTS                               \
-       |BTREE_ITER_INTENT                              \
-       |BTREE_ITER_PREFETCH                            \
-       |BTREE_ITER_CACHED_NOFILL                       \
-       |BTREE_ITER_CACHED_NOCREATE)
+#define BTREE_ITER_NOT_EXTENTS         (1 << 7)
+#define BTREE_ITER_ERROR               (1 << 8)
+#define BTREE_ITER_SET_POS_AFTER_COMMIT        (1 << 9)
+#define BTREE_ITER_CACHED_NOFILL       (1 << 10)
+#define BTREE_ITER_CACHED_NOCREATE     (1 << 11)
+#define BTREE_ITER_WITH_UPDATES                (1 << 12)
+#define BTREE_ITER_ALL_SNAPSHOTS       (1 << 13)
 
 enum btree_iter_uptodate {
        BTREE_ITER_UPTODATE             = 0,
@@ -239,6 +231,7 @@ enum btree_iter_uptodate {
 #define BTREE_ITER_NO_NODE_DOWN                ((struct btree *) 5)
 #define BTREE_ITER_NO_NODE_INIT                ((struct btree *) 6)
 #define BTREE_ITER_NO_NODE_ERROR       ((struct btree *) 7)
+#define BTREE_ITER_NO_NODE_CACHED      ((struct btree *) 8)
 
 /*
  * @pos                        - iterator's current position
@@ -249,14 +242,29 @@ enum btree_iter_uptodate {
  */
 struct btree_iter {
        struct btree_trans      *trans;
-       struct bpos             pos;
-       struct bpos             pos_after_commit;
+       unsigned long           ip_allocated;
 
-       u16                     flags;
        u8                      idx;
+       u8                      child_idx;
+
+       /* btree_iter_copy starts here: */
+       u16                     flags;
+
+       /* When we're filtering by snapshot, the snapshot ID we're looking for: */
+       unsigned                snapshot;
+
+       struct bpos             pos;
+       struct bpos             real_pos;
+       struct bpos             pos_after_commit;
 
        enum btree_id           btree_id:4;
-       enum btree_iter_uptodate uptodate:4;
+       enum btree_iter_uptodate uptodate:3;
+       /*
+        * True if we've returned a key (and thus are expected to keep it
+        * locked), false after set_pos - for avoiding spurious transaction
+        * restarts in bch2_trans_relock():
+        */
+       bool                    should_be_locked:1;
        unsigned                level:4,
                                min_depth:4,
                                locks_want:4,
@@ -274,7 +282,6 @@ struct btree_iter {
         * bch2_btree_iter_next_slot() can correctly advance pos.
         */
        struct bkey             k;
-       unsigned long           ip_allocated;
 };
 
 static inline enum btree_iter_type
@@ -283,6 +290,11 @@ btree_iter_type(const struct btree_iter *iter)
        return iter->flags & BTREE_ITER_TYPE;
 }
 
+static inline bool btree_iter_is_cached(const struct btree_iter *iter)
+{
+       return btree_iter_type(iter) == BTREE_ITER_CACHED;
+}
+
 static inline struct btree_iter_level *iter_l(struct btree_iter *iter)
 {
        return iter->l + iter->level;
@@ -291,8 +303,14 @@ static inline struct btree_iter_level *iter_l(struct btree_iter *iter)
 struct btree_key_cache {
        struct mutex            lock;
        struct rhashtable       table;
+       bool                    table_init_done;
        struct list_head        freed;
-       struct list_head        clean;
+       struct shrinker         shrink;
+       unsigned                shrink_iter;
+
+       size_t                  nr_freed;
+       atomic_long_t           nr_keys;
+       atomic_long_t           nr_dirty;
 };
 
 struct bkey_cached_key {
@@ -300,7 +318,8 @@ struct bkey_cached_key {
        struct bpos             pos;
 } __attribute__((packed, aligned(4)));
 
-#define BKEY_CACHED_DIRTY              0
+#define BKEY_CACHED_ACCESSED           0
+#define BKEY_CACHED_DIRTY              1
 
 struct bkey_cached {
        struct btree_bkey_cached_common c;
@@ -308,6 +327,7 @@ struct bkey_cached {
        unsigned long           flags;
        u8                      u64s;
        bool                    valid;
+       u32                     btree_trans_barrier_seq;
        struct bkey_cached_key  key;
 
        struct rhash_head       hash;
@@ -321,6 +341,9 @@ struct bkey_cached {
 
 struct btree_insert_entry {
        unsigned                trigger_flags;
+       u8                      bkey_type;
+       enum btree_id           btree_id:8;
+       u8                      level;
        unsigned                trans_triggers_run:1;
        struct bkey_i           *k;
        struct btree_iter       *iter;
@@ -332,6 +355,16 @@ struct btree_insert_entry {
 #define BTREE_ITER_MAX         32
 #endif
 
+struct btree_trans_commit_hook;
+typedef int (btree_trans_commit_hook_fn)(struct btree_trans *, struct btree_trans_commit_hook *);
+
+struct btree_trans_commit_hook {
+       btree_trans_commit_hook_fn      *fn;
+       struct btree_trans_commit_hook  *next;
+};
+
+#define BTREE_TRANS_MEM_MAX    (1U << 14)
+
 struct btree_trans {
        struct bch_fs           *c;
 #ifdef CONFIG_BCACHEFS_DEBUG
@@ -344,20 +377,21 @@ struct btree_trans {
        pid_t                   pid;
 #endif
        unsigned long           ip;
+       int                     srcu_idx;
 
-       u64                     iters_linked;
-       u64                     iters_live;
-       u64                     iters_touched;
-
-       u8                      nr_iters;
        u8                      nr_updates;
-       u8                      nr_updates2;
-       u8                      size;
        unsigned                used_mempool:1;
        unsigned                error:1;
-       unsigned                nounlock:1;
-       unsigned                need_reset:1;
        unsigned                in_traverse_all:1;
+       /*
+        * For when bch2_trans_update notices we'll be splitting a compressed
+        * extent:
+        */
+       unsigned                extra_journal_res;
+
+       u64                     iters_linked;
+       u64                     iters_live;
+       u64                     iters_touched;
 
        unsigned                mem_top;
        unsigned                mem_bytes;
@@ -365,9 +399,9 @@ struct btree_trans {
 
        struct btree_iter       *iters;
        struct btree_insert_entry *updates;
-       struct btree_insert_entry *updates2;
 
        /* update path: */
+       struct btree_trans_commit_hook *hooks;
        struct jset_entry       *extra_journal_entries;
        unsigned                extra_journal_entry_u64s;
        struct journal_entry_pin *journal_pin;
@@ -380,10 +414,6 @@ struct btree_trans {
        unsigned                journal_u64s;
        unsigned                journal_preres_u64s;
        struct replicas_delta_list *fs_usage_deltas;
-
-       struct btree_iter       iters_onstack[2];
-       struct btree_insert_entry updates_onstack[2];
-       struct btree_insert_entry updates2_onstack[2];
 };
 
 #define BTREE_FLAG(flag)                                               \
@@ -408,12 +438,12 @@ enum btree_flags {
        BTREE_NODE_just_written,
        BTREE_NODE_dying,
        BTREE_NODE_fake,
-       BTREE_NODE_old_extent_overwrite,
+       BTREE_NODE_need_rewrite,
+       BTREE_NODE_never_write,
 };
 
 BTREE_FLAG(read_in_flight);
 BTREE_FLAG(read_error);
-BTREE_FLAG(dirty);
 BTREE_FLAG(need_write);
 BTREE_FLAG(noevict);
 BTREE_FLAG(write_idx);
@@ -422,7 +452,8 @@ BTREE_FLAG(write_in_flight);
 BTREE_FLAG(just_written);
 BTREE_FLAG(dying);
 BTREE_FLAG(fake);
-BTREE_FLAG(old_extent_overwrite);
+BTREE_FLAG(need_rewrite);
+BTREE_FLAG(never_write);
 
 static inline struct btree_write *btree_current_write(struct btree *b)
 {
@@ -534,16 +565,16 @@ static inline unsigned bset_byte_offset(struct btree *b, void *i)
 }
 
 enum btree_node_type {
-#define x(kwd, val, name) BKEY_TYPE_##kwd = val,
+#define x(kwd, val) BKEY_TYPE_##kwd = val,
        BCH_BTREE_IDS()
 #undef x
-       BKEY_TYPE_BTREE,
+       BKEY_TYPE_btree,
 };
 
 /* Type of a key in btree @id at level @level: */
 static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
 {
-       return level ? BKEY_TYPE_BTREE : (enum btree_node_type) id;
+       return level ? BKEY_TYPE_btree : (enum btree_node_type) id;
 }
 
 /* Type of keys @b contains: */
@@ -555,8 +586,8 @@ static inline enum btree_node_type btree_node_type(struct btree *b)
 static inline bool btree_node_type_is_extents(enum btree_node_type type)
 {
        switch (type) {
-       case BKEY_TYPE_EXTENTS:
-       case BKEY_TYPE_REFLINK:
+       case BKEY_TYPE_extents:
+       case BKEY_TYPE_reflink:
                return true;
        default:
                return false;
@@ -568,45 +599,70 @@ static inline bool btree_node_is_extents(struct btree *b)
        return btree_node_type_is_extents(btree_node_type(b));
 }
 
-#define BTREE_NODE_TYPE_HAS_TRIGGERS                   \
-       ((1U << BKEY_TYPE_EXTENTS)|                     \
-        (1U << BKEY_TYPE_ALLOC)|                       \
-        (1U << BKEY_TYPE_INODES)|                      \
-        (1U << BKEY_TYPE_REFLINK)|                     \
-        (1U << BKEY_TYPE_EC)|                          \
-        (1U << BKEY_TYPE_BTREE))
+static inline enum btree_node_type btree_iter_key_type(struct btree_iter *iter)
+{
+       return __btree_node_type(iter->level, iter->btree_id);
+}
+
+static inline bool btree_iter_is_extents(struct btree_iter *iter)
+{
+       return btree_node_type_is_extents(btree_iter_key_type(iter));
+}
 
 #define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS             \
-       ((1U << BKEY_TYPE_EXTENTS)|                     \
-        (1U << BKEY_TYPE_INODES)|                      \
-        (1U << BKEY_TYPE_REFLINK))
+       ((1U << BKEY_TYPE_extents)|                     \
+        (1U << BKEY_TYPE_inodes)|                      \
+        (1U << BKEY_TYPE_stripes)|                     \
+        (1U << BKEY_TYPE_reflink)|                     \
+        (1U << BKEY_TYPE_btree))
+
+#define BTREE_NODE_TYPE_HAS_MEM_TRIGGERS               \
+       ((1U << BKEY_TYPE_alloc)|                       \
+        (1U << BKEY_TYPE_stripes))
+
+#define BTREE_NODE_TYPE_HAS_TRIGGERS                   \
+       (BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS|            \
+        BTREE_NODE_TYPE_HAS_MEM_TRIGGERS)
+
+#define BTREE_ID_HAS_SNAPSHOTS                         \
+       ((1U << BTREE_ID_extents)|                      \
+        (1U << BTREE_ID_inodes)|                       \
+        (1U << BTREE_ID_dirents)|                      \
+        (1U << BTREE_ID_xattrs))
+
+#define BTREE_ID_HAS_PTRS                              \
+       ((1U << BTREE_ID_extents)|                      \
+        (1U << BTREE_ID_reflink))
+
+static inline bool btree_type_has_snapshots(enum btree_id id)
+{
+       return (1 << id) & BTREE_ID_HAS_SNAPSHOTS;
+}
 
 enum btree_trigger_flags {
        __BTREE_TRIGGER_NORUN,          /* Don't run triggers at all */
-       __BTREE_TRIGGER_NOOVERWRITES,   /* Don't run triggers on overwrites */
 
        __BTREE_TRIGGER_INSERT,
        __BTREE_TRIGGER_OVERWRITE,
-       __BTREE_TRIGGER_OVERWRITE_SPLIT,
 
        __BTREE_TRIGGER_GC,
        __BTREE_TRIGGER_BUCKET_INVALIDATE,
-       __BTREE_TRIGGER_ALLOC_READ,
        __BTREE_TRIGGER_NOATOMIC,
 };
 
 #define BTREE_TRIGGER_NORUN            (1U << __BTREE_TRIGGER_NORUN)
-#define BTREE_TRIGGER_NOOVERWRITES     (1U << __BTREE_TRIGGER_NOOVERWRITES)
 
 #define BTREE_TRIGGER_INSERT           (1U << __BTREE_TRIGGER_INSERT)
 #define BTREE_TRIGGER_OVERWRITE                (1U << __BTREE_TRIGGER_OVERWRITE)
-#define BTREE_TRIGGER_OVERWRITE_SPLIT  (1U << __BTREE_TRIGGER_OVERWRITE_SPLIT)
 
 #define BTREE_TRIGGER_GC               (1U << __BTREE_TRIGGER_GC)
 #define BTREE_TRIGGER_BUCKET_INVALIDATE        (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
-#define BTREE_TRIGGER_ALLOC_READ       (1U << __BTREE_TRIGGER_ALLOC_READ)
 #define BTREE_TRIGGER_NOATOMIC         (1U << __BTREE_TRIGGER_NOATOMIC)
 
+#define BTREE_TRIGGER_WANTS_OLD_AND_NEW                \
+       ((1U << KEY_TYPE_stripe)|               \
+        (1U << KEY_TYPE_inode))
+
 static inline bool btree_node_type_needs_gc(enum btree_node_type type)
 {
        return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type);
@@ -631,9 +687,9 @@ enum btree_insert_ret {
        BTREE_INSERT_OK,
        /* leaf node needs to be split */
        BTREE_INSERT_BTREE_NODE_FULL,
-       BTREE_INSERT_ENOSPC,
        BTREE_INSERT_NEED_MARK_REPLICAS,
        BTREE_INSERT_NEED_JOURNAL_RES,
+       BTREE_INSERT_NEED_JOURNAL_RECLAIM,
 };
 
 enum btree_gc_coalesce_fail_reason {