]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_types.h
Update bcachefs sources to 95ff72a6c1 fixup! mm: Centralize & improve oom reporting...
[bcachefs-tools-debian] / libbcachefs / btree_types.h
index b86a721f90acee8c6a1dc7043ec5e80788e34767..1e4d1fecc6bd336539025e15d7de80b3f50028a0 100644 (file)
@@ -182,22 +182,16 @@ struct btree_node_iter {
  * Iterate over all possible positions, synthesizing deleted keys for holes:
  */
 #define BTREE_ITER_SLOTS               (1 << 0)
+#define BTREE_ITER_ALL_LEVELS          (1 << 1)
 /*
  * Indicates that intent locks should be taken on leaf nodes, because we expect
  * to be doing updates:
  */
-#define BTREE_ITER_INTENT              (1 << 1)
+#define BTREE_ITER_INTENT              (1 << 2)
 /*
  * Causes the btree iterator code to prefetch additional btree nodes from disk:
  */
-#define BTREE_ITER_PREFETCH            (1 << 2)
-/*
- * Indicates that this iterator should not be reused until transaction commit,
- * either because a pending update references it or because the update depends
- * on that particular key being locked (e.g. by the str_hash code, for hash
- * table consistency)
- */
-#define BTREE_ITER_KEEP_UNTIL_COMMIT   (1 << 3)
+#define BTREE_ITER_PREFETCH            (1 << 3)
 /*
  * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
  * @pos or the first key strictly greater than @pos
@@ -282,7 +276,8 @@ struct btree_iter {
        struct btree_path       *key_cache_path;
 
        enum btree_id           btree_id:4;
-       unsigned                min_depth:4;
+       unsigned                min_depth:3;
+       unsigned                advanced:1;
 
        /* btree_iter_copy starts here: */
        u16                     flags;
@@ -297,11 +292,20 @@ struct btree_iter {
         * bch2_btree_iter_next_slot() can correctly advance pos.
         */
        struct bkey             k;
+
+       /* BTREE_ITER_WITH_JOURNAL: */
+       size_t                  journal_idx;
+       struct bpos             journal_pos;
 #ifdef CONFIG_BCACHEFS_DEBUG
        unsigned long           ip_allocated;
 #endif
 };
 
+struct btree_key_cache_freelist {
+       struct bkey_cached      *objs[16];
+       unsigned                nr;
+};
+
 struct btree_key_cache {
        struct mutex            lock;
        struct rhashtable       table;
@@ -309,8 +313,9 @@ struct btree_key_cache {
        struct list_head        freed;
        struct shrinker         shrink;
        unsigned                shrink_iter;
+       struct btree_key_cache_freelist __percpu *pcpu_freed;
 
-       size_t                  nr_freed;
+       atomic_long_t           nr_freed;
        atomic_long_t           nr_keys;
        atomic_long_t           nr_dirty;
 };
@@ -349,6 +354,7 @@ struct btree_insert_entry {
        bool                    cached:1;
        bool                    insert_trigger_run:1;
        bool                    overwrite_trigger_run:1;
+       bool                    key_cache_already_flushed:1;
        /*
         * @old_k may be a key from the journal; @old_btree_u64s always refers
         * to the size of the key being overwritten in the btree:
@@ -376,7 +382,7 @@ struct btree_trans_commit_hook {
        struct btree_trans_commit_hook  *next;
 };
 
-#define BTREE_TRANS_MEM_MAX    (1U << 14)
+#define BTREE_TRANS_MEM_MAX    (1U << 16)
 
 struct btree_trans {
        struct bch_fs           *c;
@@ -388,7 +394,7 @@ struct btree_trans {
        u8                      locking_btree_id;
        u8                      locking_level;
        u8                      locking_lock_type;
-       pid_t                   pid;
+       struct task_struct      *task;
        int                     srcu_idx;
 
        u8                      nr_sorted;
@@ -398,7 +404,6 @@ struct btree_trans {
        bool                    in_traverse_all:1;
        bool                    restarted:1;
        bool                    memory_allocation_failure:1;
-       bool                    journal_transaction_names:1;
        bool                    is_initial_gc:1;
        /*
         * For when bch2_trans_update notices we'll be splitting a compressed
@@ -639,40 +644,10 @@ static inline bool btree_type_has_snapshots(enum btree_id id)
        return (1 << id) & BTREE_ID_HAS_SNAPSHOTS;
 }
 
-enum btree_update_flags {
-       __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE,
-       __BTREE_UPDATE_KEY_CACHE_RECLAIM,
-
-       __BTREE_TRIGGER_NORUN,          /* Don't run triggers at all */
-
-       __BTREE_TRIGGER_INSERT,
-       __BTREE_TRIGGER_OVERWRITE,
-
-       __BTREE_TRIGGER_GC,
-       __BTREE_TRIGGER_BUCKET_INVALIDATE,
-       __BTREE_TRIGGER_NOATOMIC,
-};
-
-#define BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE (1U << __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE)
-#define BTREE_UPDATE_KEY_CACHE_RECLAIM (1U << __BTREE_UPDATE_KEY_CACHE_RECLAIM)
-
-#define BTREE_TRIGGER_NORUN            (1U << __BTREE_TRIGGER_NORUN)
-
-#define BTREE_TRIGGER_INSERT           (1U << __BTREE_TRIGGER_INSERT)
-#define BTREE_TRIGGER_OVERWRITE                (1U << __BTREE_TRIGGER_OVERWRITE)
-
-#define BTREE_TRIGGER_GC               (1U << __BTREE_TRIGGER_GC)
-#define BTREE_TRIGGER_BUCKET_INVALIDATE        (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
-#define BTREE_TRIGGER_NOATOMIC         (1U << __BTREE_TRIGGER_NOATOMIC)
-
-#define BTREE_TRIGGER_WANTS_OLD_AND_NEW                \
-       ((1U << KEY_TYPE_alloc)|                \
-        (1U << KEY_TYPE_alloc_v2)|             \
-        (1U << KEY_TYPE_alloc_v3)|             \
-        (1U << KEY_TYPE_stripe)|               \
-        (1U << KEY_TYPE_inode)|                \
-        (1U << KEY_TYPE_inode_v2)|             \
-        (1U << KEY_TYPE_snapshot))
+static inline bool btree_type_has_ptrs(enum btree_id id)
+{
+       return (1 << id) & BTREE_ID_HAS_PTRS;
+}
 
 static inline bool btree_node_type_needs_gc(enum btree_node_type type)
 {