]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_update_interior.h
Update bcachefs sources to 3856459b1b bcachefs: bch2_btree_iter_peek_node_and_restart()
[bcachefs-tools-debian] / libbcachefs / btree_update_interior.h
index aef8adf8c0321238ae91d81cfcda1de506da9fdd..dcfd7ceacc5926da051a11d72b1bdc12ec59b106 100644 (file)
@@ -6,31 +6,13 @@
 #include "btree_locking.h"
 #include "btree_update.h"
 
-struct btree_reserve {
-       struct disk_reservation disk_res;
-       unsigned                nr;
-       struct btree            *b[BTREE_RESERVE_MAX];
-};
-
 void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *);
 bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *,
                                struct bkey_format *);
 
-/* Btree node freeing/allocation: */
-
-/*
- * Tracks a btree node that has been (or is about to be) freed in memory, but
- * has _not_ yet been freed on disk (because the write that makes the new
- * node(s) visible and frees the old hasn't completed yet)
- */
-struct pending_btree_node_free {
-       bool                    index_update_done;
+#define BTREE_UPDATE_NODES_MAX         ((BTREE_MAX_DEPTH - 2) * 2 + GC_MERGE_NODES)
 
-       __le64                  seq;
-       enum btree_id           btree_id;
-       unsigned                level;
-       __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
-};
+#define BTREE_UPDATE_JOURNAL_RES       (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1))
 
 /*
  * Tracks an in progress split/rewrite of a btree node and the update to the
@@ -53,6 +35,7 @@ struct pending_btree_node_free {
 struct btree_update {
        struct closure                  cl;
        struct bch_fs                   *c;
+       u64                             start_time;
 
        struct list_head                list;
        struct list_head                unwritten_list;
@@ -65,13 +48,13 @@ struct btree_update {
                BTREE_INTERIOR_UPDATING_AS,
        } mode;
 
-       unsigned                        must_rewrite:1;
        unsigned                        nodes_written:1;
+       unsigned                        took_gc_lock:1;
 
        enum btree_id                   btree_id;
-       u8                              level;
+       unsigned                        update_level;
 
-       struct btree_reserve            *reserve;
+       struct disk_reservation         disk_res;
        struct journal_preres           journal_preres;
 
        /*
@@ -93,20 +76,36 @@ struct btree_update {
         */
        struct journal_entry_pin        journal;
 
-       /*
-        * Nodes being freed:
-        * Protected by c->btree_node_pending_free_lock
-        */
-       struct pending_btree_node_free  pending[BTREE_MAX_DEPTH + GC_MERGE_NODES];
-       unsigned                        nr_pending;
+       /* Preallocated nodes we reserve when we start the update: */
+       struct prealloc_nodes {
+               struct btree            *b[BTREE_UPDATE_NODES_MAX];
+               unsigned                nr;
+       }                               prealloc_nodes[2];
+
+       /* Nodes being freed: */
+       struct keylist                  old_keys;
+       u64                             _old_keys[BTREE_UPDATE_NODES_MAX *
+                                                 BKEY_BTREE_PTR_U64s_MAX];
+
+       /* Nodes being added: */
+       struct keylist                  new_keys;
+       u64                             _new_keys[BTREE_UPDATE_NODES_MAX *
+                                                 BKEY_BTREE_PTR_U64s_MAX];
 
        /* New nodes, that will be made reachable by this update: */
-       struct btree                    *new_nodes[BTREE_MAX_DEPTH * 2 + GC_MERGE_NODES];
+       struct btree                    *new_nodes[BTREE_UPDATE_NODES_MAX];
        unsigned                        nr_new_nodes;
 
+       struct btree                    *old_nodes[BTREE_UPDATE_NODES_MAX];
+       __le64                          old_nodes_seq[BTREE_UPDATE_NODES_MAX];
+       unsigned                        nr_old_nodes;
+
+       open_bucket_idx_t               open_buckets[BTREE_UPDATE_NODES_MAX *
+                                                    BCH_REPLICAS_MAX];
+       open_bucket_idx_t               nr_open_buckets;
+
        unsigned                        journal_u64s;
-       u64                             journal_entries[
-               (BKEY_BTREE_PTR_U64s_MAX + 1) * (BTREE_MAX_DEPTH - 1) * 2];
+       u64                             journal_entries[BTREE_UPDATE_JOURNAL_RES];
 
        /* Only here to reduce stack usage on recursive splits: */
        struct keylist                  parent_keys;
@@ -118,63 +117,41 @@ struct btree_update {
        u64                             inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3];
 };
 
-#define for_each_pending_btree_node_free(c, as, p)                     \
-       list_for_each_entry(as, &c->btree_interior_update_list, list)   \
-               for (p = as->pending; p < as->pending + as->nr_pending; p++)
-
-void bch2_btree_node_free_inmem(struct bch_fs *, struct btree *,
-                               struct btree_iter *);
-void bch2_btree_node_free_never_inserted(struct bch_fs *, struct btree *);
-
 struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
+                                                 struct btree_trans *,
                                                  struct btree *,
                                                  struct bkey_format);
 
-void bch2_btree_update_done(struct btree_update *);
-struct btree_update *
-bch2_btree_update_start(struct bch_fs *, enum btree_id, unsigned,
-                       unsigned, struct closure *);
-
-void bch2_btree_interior_update_will_free_node(struct btree_update *,
-                                              struct btree *);
+int bch2_btree_split_leaf(struct btree_trans *, struct btree_path *, unsigned);
 
-void bch2_btree_insert_node(struct btree_update *, struct btree *,
-                           struct btree_iter *, struct keylist *,
-                           unsigned);
-int bch2_btree_split_leaf(struct bch_fs *, struct btree_iter *, unsigned);
+int __bch2_foreground_maybe_merge(struct btree_trans *, struct btree_path *,
+                                 unsigned, unsigned, enum btree_node_sibling);
 
-void __bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *,
-                                  unsigned, unsigned, enum btree_node_sibling);
-
-static inline void bch2_foreground_maybe_merge_sibling(struct bch_fs *c,
-                                       struct btree_iter *iter,
+static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans,
+                                       struct btree_path *path,
                                        unsigned level, unsigned flags,
                                        enum btree_node_sibling sib)
 {
        struct btree *b;
 
-       if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE)
-               return;
-
-       if (!bch2_btree_node_relock(iter, level))
-               return;
+       EBUG_ON(!btree_node_locked(path, level));
 
-       b = iter->l[level].b;
-       if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold)
-               return;
+       b = path->l[level].b;
+       if (b->sib_u64s[sib] > trans->c->btree_foreground_merge_threshold)
+               return 0;
 
-       __bch2_foreground_maybe_merge(c, iter, level, flags, sib);
+       return __bch2_foreground_maybe_merge(trans, path, level, flags, sib);
 }
 
-static inline void bch2_foreground_maybe_merge(struct bch_fs *c,
-                                              struct btree_iter *iter,
-                                              unsigned level,
-                                              unsigned flags)
+static inline int bch2_foreground_maybe_merge(struct btree_trans *trans,
+                                             struct btree_path *path,
+                                             unsigned level,
+                                             unsigned flags)
 {
-       bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
-                                           btree_prev_sib);
-       bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
-                                           btree_next_sib);
+       return  bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
+                                                   btree_prev_sib) ?:
+               bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
+                                                   btree_next_sib);
 }
 
 void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
@@ -183,7 +160,7 @@ void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
 static inline unsigned btree_update_reserve_required(struct bch_fs *c,
                                                     struct btree *b)
 {
-       unsigned depth = btree_node_root(c, b)->level + 1;
+       unsigned depth = btree_node_root(c, b)->c.level + 1;
 
        /*
         * Number of nodes we might have to allocate in a worst case btree
@@ -191,9 +168,9 @@ static inline unsigned btree_update_reserve_required(struct bch_fs *c,
         * a new root, unless we're already at max depth:
         */
        if (depth < BTREE_MAX_DEPTH)
-               return (depth - b->level) * 2 + 1;
+               return (depth - b->c.level) * 2 + 1;
        else
-               return (depth - b->level) * 2 - 1;
+               return (depth - b->c.level) * 2 - 1;
 }
 
 static inline void btree_node_reset_sib_u64s(struct btree *b)
@@ -245,7 +222,10 @@ static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c,
 {
        ssize_t used = bset_byte_offset(b, end) / sizeof(u64) +
                b->whiteout_u64s;
-       ssize_t total = c->opts.btree_node_size << 6;
+       ssize_t total = c->opts.btree_node_size >> 3;
+
+       /* Always leave one extra u64 for bch2_varint_decode: */
+       used++;
 
        return total - used;
 }
@@ -264,13 +244,15 @@ static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c,
        return remaining;
 }
 
+#define BTREE_WRITE_SET_U64s_BITS      9
+
 static inline unsigned btree_write_set_buffer(struct btree *b)
 {
        /*
         * Could buffer up larger amounts of keys for btrees with larger keys,
         * pending benchmarking:
         */
-       return 4 << 10;
+       return 8 << BTREE_WRITE_SET_U64s_BITS;
 }
 
 static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
@@ -300,6 +282,7 @@ static inline void push_whiteout(struct bch_fs *c, struct btree *b,
        struct bkey_packed k;
 
        BUG_ON(bch_btree_keys_u64s_remaining(c, b) < BKEY_U64s);
+       EBUG_ON(btree_node_just_written(b));
 
        if (!bkey_pack_pos(&k, pos, b)) {
                struct bkey *u = (void *) &k;
@@ -321,14 +304,24 @@ static inline void push_whiteout(struct bch_fs *c, struct btree *b,
 static inline bool bch2_btree_node_insert_fits(struct bch_fs *c,
                                               struct btree *b, unsigned u64s)
 {
-       if (unlikely(btree_node_fake(b)))
+       if (unlikely(btree_node_need_rewrite(b)))
                return false;
 
        return u64s <= bch_btree_keys_u64s_remaining(c, b);
 }
 
-ssize_t bch2_btree_updates_print(struct bch_fs *, char *);
+void bch2_btree_updates_to_text(struct printbuf *, struct bch_fs *);
+
+bool bch2_btree_interior_updates_flush(struct bch_fs *);
+
+void bch2_journal_entry_to_btree_root(struct bch_fs *, struct jset_entry *);
+struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *,
+                                       struct jset_entry *, struct jset_entry *);
+
+void bch2_do_pending_node_rewrites(struct bch_fs *);
+void bch2_free_pending_node_rewrites(struct bch_fs *);
 
-size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *);
+void bch2_fs_btree_interior_update_exit(struct bch_fs *);
+int bch2_fs_btree_interior_update_init(struct bch_fs *);
 
 #endif /* _BCACHEFS_BTREE_UPDATE_INTERIOR_H */