]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_update_interior.h
Update bcachefs sources to 4837f82ee1 bcachefs: Use cached iterators for alloc btree
[bcachefs-tools-debian] / libbcachefs / btree_update_interior.h
index e6f050718586b1dcd9a143863c3d725c86f60cad..4a5b9dcfbdd0235713a7513a19c2fd9922c09a8d 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _BCACHEFS_BTREE_UPDATE_INTERIOR_H
 #define _BCACHEFS_BTREE_UPDATE_INTERIOR_H
 
@@ -5,31 +6,13 @@
 #include "btree_locking.h"
 #include "btree_update.h"
 
-struct btree_reserve {
-       struct disk_reservation disk_res;
-       unsigned                nr;
-       struct btree            *b[BTREE_RESERVE_MAX];
-};
-
 void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *);
 bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *,
                                struct bkey_format *);
 
-/* Btree node freeing/allocation: */
-
-/*
- * Tracks a btree node that has been (or is about to be) freed in memory, but
- * has _not_ yet been freed on disk (because the write that makes the new
- * node(s) visible and frees the old hasn't completed yet)
- */
-struct pending_btree_node_free {
-       bool                    index_update_done;
+#define BTREE_UPDATE_NODES_MAX         ((BTREE_MAX_DEPTH - 2) * 2 + GC_MERGE_NODES)
 
-       __le64                  seq;
-       enum btree_id           btree_id;
-       unsigned                level;
-       __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
-};
+#define BTREE_UPDATE_JOURNAL_RES       (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1))
 
 /*
  * Tracks an in progress split/rewrite of a btree node and the update to the
@@ -54,6 +37,7 @@ struct btree_update {
        struct bch_fs                   *c;
 
        struct list_head                list;
+       struct list_head                unwritten_list;
 
        /* What kind of update are we doing? */
        enum {
@@ -68,7 +52,8 @@ struct btree_update {
 
        enum btree_id                   btree_id;
 
-       struct btree_reserve            *reserve;
+       struct disk_reservation         disk_res;
+       struct journal_preres           journal_preres;
 
        /*
         * BTREE_INTERIOR_UPDATING_NODE:
@@ -81,18 +66,6 @@ struct btree_update {
        struct btree                    *b;
        struct list_head                write_blocked_list;
 
-       /*
-        * BTREE_INTERIOR_UPDATING_AS: btree node we updated was freed, so now
-        * we're now blocking another btree_update
-        * @parent_as - btree_update that's waiting on our nodes to finish
-        * writing, before it can make new nodes visible on disk
-        * @wait - list of child btree_updates that are waiting on this
-        * btree_update to make all the new nodes visible before they can free
-        * their old btree nodes
-        */
-       struct btree_update             *parent_as;
-       struct closure_waitlist         wait;
-
        /*
         * We may be freeing nodes that were dirty, and thus had journal entries
         * pinned: we need to transfer the oldest of those pins to the
@@ -101,19 +74,31 @@ struct btree_update {
         */
        struct journal_entry_pin        journal;
 
-       u64                             journal_seq;
+       /* Preallocated nodes we reserve when we start the update: */
+       struct btree                    *prealloc_nodes[BTREE_UPDATE_NODES_MAX];
+       unsigned                        nr_prealloc_nodes;
 
-       /*
-        * Nodes being freed:
-        * Protected by c->btree_node_pending_free_lock
-        */
-       struct pending_btree_node_free  pending[BTREE_MAX_DEPTH + GC_MERGE_NODES];
-       unsigned                        nr_pending;
+       /* Nodes being freed: */
+       struct keylist                  old_keys;
+       u64                             _old_keys[BTREE_UPDATE_NODES_MAX *
+                                                 BKEY_BTREE_PTR_VAL_U64s_MAX];
+
+       /* Nodes being added: */
+       struct keylist                  new_keys;
+       u64                             _new_keys[BTREE_UPDATE_NODES_MAX *
+                                                 BKEY_BTREE_PTR_VAL_U64s_MAX];
 
        /* New nodes, that will be made reachable by this update: */
-       struct btree                    *new_nodes[BTREE_MAX_DEPTH * 2 + GC_MERGE_NODES];
+       struct btree                    *new_nodes[BTREE_UPDATE_NODES_MAX];
        unsigned                        nr_new_nodes;
 
+       open_bucket_idx_t               open_buckets[BTREE_UPDATE_NODES_MAX *
+                                                    BCH_REPLICAS_MAX];
+       open_bucket_idx_t               nr_open_buckets;
+
+       unsigned                        journal_u64s;
+       u64                             journal_entries[BTREE_UPDATE_JOURNAL_RES];
+
        /* Only here to reduce stack usage on recursive splits: */
        struct keylist                  parent_keys;
        /*
@@ -124,14 +109,11 @@ struct btree_update {
        u64                             inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3];
 };
 
-#define for_each_pending_btree_node_free(c, as, p)                     \
-       list_for_each_entry(as, &c->btree_interior_update_list, list)   \
-               for (p = as->pending; p < as->pending + as->nr_pending; p++)
-
 void bch2_btree_node_free_inmem(struct bch_fs *, struct btree *,
                                struct btree_iter *);
 void bch2_btree_node_free_never_inserted(struct bch_fs *, struct btree *);
-void bch2_btree_open_bucket_put(struct bch_fs *, struct btree *);
+
+void bch2_btree_update_get_open_buckets(struct btree_update *, struct btree *);
 
 struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
                                                  struct btree *,
@@ -139,11 +121,12 @@ struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
 
 void bch2_btree_update_done(struct btree_update *);
 struct btree_update *
-bch2_btree_update_start(struct bch_fs *, enum btree_id, unsigned,
+bch2_btree_update_start(struct btree_trans *, enum btree_id, unsigned,
                        unsigned, struct closure *);
 
 void bch2_btree_interior_update_will_free_node(struct btree_update *,
                                               struct btree *);
+void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);
 
 void bch2_btree_insert_node(struct btree_update *, struct btree *,
                            struct btree_iter *, struct keylist *,
@@ -160,15 +143,6 @@ static inline void bch2_foreground_maybe_merge_sibling(struct bch_fs *c,
 {
        struct btree *b;
 
-       /*
-        * iterators are inconsistent when they hit end of leaf, until
-        * traversed again
-        *
-        * XXX inconsistent how?
-        */
-       if (iter->flags & BTREE_ITER_AT_END_OF_LEAF)
-               return;
-
        if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE)
                return;
 
@@ -199,7 +173,7 @@ void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
 static inline unsigned btree_update_reserve_required(struct bch_fs *c,
                                                     struct btree *b)
 {
-       unsigned depth = btree_node_root(c, b)->level + 1;
+       unsigned depth = btree_node_root(c, b)->c.level + 1;
 
        /*
         * Number of nodes we might have to allocate in a worst case btree
@@ -207,9 +181,9 @@ static inline unsigned btree_update_reserve_required(struct bch_fs *c,
         * a new root, unless we're already at max depth:
         */
        if (depth < BTREE_MAX_DEPTH)
-               return (depth - b->level) * 2 + 1;
+               return (depth - b->c.level) * 2 + 1;
        else
-               return (depth - b->level) * 2 - 1;
+               return (depth - b->c.level) * 2 - 1;
 }
 
 static inline void btree_node_reset_sib_u64s(struct btree *b)
@@ -240,14 +214,19 @@ static inline void *write_block(struct btree *b)
        return (void *) b->data + (b->written << 9);
 }
 
+static inline bool __btree_addr_written(struct btree *b, void *p)
+{
+       return p < write_block(b);
+}
+
 static inline bool bset_written(struct btree *b, struct bset *i)
 {
-       return (void *) i < write_block(b);
+       return __btree_addr_written(b, i);
 }
 
-static inline bool bset_unwritten(struct btree *b, struct bset *i)
+static inline bool bkey_written(struct btree *b, struct bkey_packed *k)
 {
-       return (void *) i > write_block(b);
+       return __btree_addr_written(b, k);
 }
 
 static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c,
@@ -255,8 +234,7 @@ static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c,
                                                 void *end)
 {
        ssize_t used = bset_byte_offset(b, end) / sizeof(u64) +
-               b->whiteout_u64s +
-               b->uncompacted_whiteout_u64s;
+               b->whiteout_u64s;
        ssize_t total = c->opts.btree_node_size << 6;
 
        return total - used;
@@ -288,17 +266,17 @@ static inline unsigned btree_write_set_buffer(struct btree *b)
 static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
                                                     struct btree *b)
 {
-       struct bset *i = btree_bset_last(b);
+       struct bset_tree *t = bset_tree_last(b);
        struct btree_node_entry *bne = max(write_block(b),
                        (void *) btree_bkey_last(b, bset_tree_last(b)));
        ssize_t remaining_space =
                __bch_btree_u64s_remaining(c, b, &bne->keys.start[0]);
 
-       if (unlikely(bset_written(b, i))) {
+       if (unlikely(bset_written(b, bset(b, t)))) {
                if (remaining_space > (ssize_t) (block_bytes(c) >> 3))
                        return bne;
        } else {
-               if (unlikely(vstruct_bytes(i) > btree_write_set_buffer(b)) &&
+               if (unlikely(bset_u64s(t) * sizeof(u64) > btree_write_set_buffer(b)) &&
                    remaining_space > (ssize_t) (btree_write_set_buffer(b) >> 3))
                        return bne;
        }
@@ -306,25 +284,24 @@ static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
        return NULL;
 }
 
-static inline void unreserve_whiteout(struct btree *b, struct bset_tree *t,
-                                     struct bkey_packed *k)
+static inline void push_whiteout(struct bch_fs *c, struct btree *b,
+                                struct bpos pos)
 {
-       if (bset_written(b, bset(b, t))) {
-               EBUG_ON(b->uncompacted_whiteout_u64s <
-                       bkeyp_key_u64s(&b->format, k));
-               b->uncompacted_whiteout_u64s -=
-                       bkeyp_key_u64s(&b->format, k);
-       }
-}
+       struct bkey_packed k;
 
-static inline void reserve_whiteout(struct btree *b, struct bset_tree *t,
-                                   struct bkey_packed *k)
-{
-       if (bset_written(b, bset(b, t))) {
-               BUG_ON(!k->needs_whiteout);
-               b->uncompacted_whiteout_u64s +=
-                       bkeyp_key_u64s(&b->format, k);
+       BUG_ON(bch_btree_keys_u64s_remaining(c, b) < BKEY_U64s);
+
+       if (!bkey_pack_pos(&k, pos, b)) {
+               struct bkey *u = (void *) &k;
+
+               bkey_init(u);
+               u->p = pos;
        }
+
+       k.needs_whiteout = true;
+
+       b->whiteout_u64s += k.u64s;
+       bkey_copy(unwritten_whiteouts_start(c, b), &k);
 }
 
 /*
@@ -332,42 +309,23 @@ static inline void reserve_whiteout(struct btree *b, struct bset_tree *t,
  * insert into could be written out from under us)
  */
 static inline bool bch2_btree_node_insert_fits(struct bch_fs *c,
-                                             struct btree *b, unsigned u64s)
+                                              struct btree *b, unsigned u64s)
 {
        if (unlikely(btree_node_fake(b)))
                return false;
 
-       if (btree_node_is_extents(b)) {
-               /* The insert key might split an existing key
-                * (bch2_insert_fixup_extent() -> BCH_EXTENT_OVERLAP_MIDDLE case:
-                */
-               u64s += BKEY_EXTENT_U64s_MAX;
-       }
-
        return u64s <= bch_btree_keys_u64s_remaining(c, b);
 }
 
-static inline bool journal_res_insert_fits(struct btree_insert *trans,
-                                          struct btree_insert_entry *insert)
-{
-       unsigned u64s = 0;
-       struct btree_insert_entry *i;
-
-       /*
-        * If we didn't get a journal reservation, we're in journal replay and
-        * we're not journalling updates:
-        */
-       if (!trans->journal_res.ref)
-               return true;
-
-       for (i = insert; i < trans->entries + trans->nr; i++)
-               u64s += jset_u64s(i->k->k.u64s + i->extra_res);
-
-       return u64s <= trans->journal_res.u64s;
-}
-
 ssize_t bch2_btree_updates_print(struct bch_fs *, char *);
 
 size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *);
 
+void bch2_journal_entries_to_btree_roots(struct bch_fs *, struct jset *);
+struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *,
+                                       struct jset_entry *, struct jset_entry *);
+
+void bch2_fs_btree_interior_update_exit(struct bch_fs *);
+int bch2_fs_btree_interior_update_init(struct bch_fs *);
+
 #endif /* _BCACHEFS_BTREE_UPDATE_INTERIOR_H */