]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_update_interior.h
Move c_src dirs back to toplevel
[bcachefs-tools-debian] / libbcachefs / btree_update_interior.h
index 25bfc7ab9ee01937b92b9e56e5fa45f496df6ae4..adfc62083844cf3b93d16d25d8269564f5b022a3 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _BCACHEFS_BTREE_UPDATE_INTERIOR_H
 #define _BCACHEFS_BTREE_UPDATE_INTERIOR_H
 
@@ -5,31 +6,9 @@
 #include "btree_locking.h"
 #include "btree_update.h"
 
-struct btree_reserve {
-       struct disk_reservation disk_res;
-       unsigned                nr;
-       struct btree            *b[BTREE_RESERVE_MAX];
-};
-
-void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *);
-bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *,
-                               struct bkey_format *);
+#define BTREE_UPDATE_NODES_MAX         ((BTREE_MAX_DEPTH - 2) * 2 + GC_MERGE_NODES)
 
-/* Btree node freeing/allocation: */
-
-/*
- * Tracks a btree node that has been (or is about to be) freed in memory, but
- * has _not_ yet been freed on disk (because the write that makes the new
- * node(s) visible and frees the old hasn't completed yet)
- */
-struct pending_btree_node_free {
-       bool                    index_update_done;
-
-       __le64                  seq;
-       enum btree_id           btree_id;
-       unsigned                level;
-       __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
-};
+#define BTREE_UPDATE_JOURNAL_RES       (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1))
 
 /*
  * Tracks an in progress split/rewrite of a btree node and the update to the
@@ -52,8 +31,10 @@ struct pending_btree_node_free {
 struct btree_update {
        struct closure                  cl;
        struct bch_fs                   *c;
+       u64                             start_time;
 
        struct list_head                list;
+       struct list_head                unwritten_list;
 
        /* What kind of update are we doing? */
        enum {
@@ -63,12 +44,13 @@ struct btree_update {
                BTREE_INTERIOR_UPDATING_AS,
        } mode;
 
-       unsigned                        must_rewrite:1;
        unsigned                        nodes_written:1;
+       unsigned                        took_gc_lock:1;
 
        enum btree_id                   btree_id;
+       unsigned                        update_level;
 
-       struct btree_reserve            *reserve;
+       struct disk_reservation         disk_res;
 
        /*
         * BTREE_INTERIOR_UPDATING_NODE:
@@ -81,18 +63,6 @@ struct btree_update {
        struct btree                    *b;
        struct list_head                write_blocked_list;
 
-       /*
-        * BTREE_INTERIOR_UPDATING_AS: btree node we updated was freed, so now
-        * we're now blocking another btree_update
-        * @parent_as - btree_update that's waiting on our nodes to finish
-        * writing, before it can make new nodes visible on disk
-        * @wait - list of child btree_updates that are waiting on this
-        * btree_update to make all the new nodes visible before they can free
-        * their old btree nodes
-        */
-       struct btree_update             *parent_as;
-       struct closure_waitlist         wait;
-
        /*
         * We may be freeing nodes that were dirty, and thus had journal entries
         * pinned: we need to transfer the oldest of those pins to the
@@ -101,19 +71,37 @@ struct btree_update {
         */
        struct journal_entry_pin        journal;
 
-       u64                             journal_seq;
+       /* Preallocated nodes we reserve when we start the update: */
+       struct prealloc_nodes {
+               struct btree            *b[BTREE_UPDATE_NODES_MAX];
+               unsigned                nr;
+       }                               prealloc_nodes[2];
 
-       /*
-        * Nodes being freed:
-        * Protected by c->btree_node_pending_free_lock
-        */
-       struct pending_btree_node_free  pending[BTREE_MAX_DEPTH + GC_MERGE_NODES];
-       unsigned                        nr_pending;
+       /* Nodes being freed: */
+       struct keylist                  old_keys;
+       u64                             _old_keys[BTREE_UPDATE_NODES_MAX *
+                                                 BKEY_BTREE_PTR_U64s_MAX];
+
+       /* Nodes being added: */
+       struct keylist                  new_keys;
+       u64                             _new_keys[BTREE_UPDATE_NODES_MAX *
+                                                 BKEY_BTREE_PTR_U64s_MAX];
 
        /* New nodes, that will be made reachable by this update: */
-       struct btree                    *new_nodes[BTREE_MAX_DEPTH * 2 + GC_MERGE_NODES];
+       struct btree                    *new_nodes[BTREE_UPDATE_NODES_MAX];
        unsigned                        nr_new_nodes;
 
+       struct btree                    *old_nodes[BTREE_UPDATE_NODES_MAX];
+       __le64                          old_nodes_seq[BTREE_UPDATE_NODES_MAX];
+       unsigned                        nr_old_nodes;
+
+       open_bucket_idx_t               open_buckets[BTREE_UPDATE_NODES_MAX *
+                                                    BCH_REPLICAS_MAX];
+       open_bucket_idx_t               nr_open_buckets;
+
+       unsigned                        journal_u64s;
+       u64                             journal_entries[BTREE_UPDATE_JOURNAL_RES];
+
        /* Only here to reduce stack usage on recursive splits: */
        struct keylist                  parent_keys;
        /*
@@ -124,68 +112,70 @@ struct btree_update {
        u64                             inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3];
 };
 
-#define for_each_pending_btree_node_free(c, as, p)                     \
-       list_for_each_entry(as, &c->btree_interior_update_list, list)   \
-               for (p = as->pending; p < as->pending + as->nr_pending; p++)
-
-void bch2_btree_node_free_inmem(struct bch_fs *, struct btree *,
-                               struct btree_iter *);
-void bch2_btree_node_free_never_inserted(struct bch_fs *, struct btree *);
-void bch2_btree_open_bucket_put(struct bch_fs *, struct btree *);
-
 struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
+                                                 struct btree_trans *,
                                                  struct btree *,
                                                  struct bkey_format);
 
-void bch2_btree_update_done(struct btree_update *);
-struct btree_update *
-bch2_btree_update_start(struct bch_fs *, enum btree_id, unsigned,
-                       unsigned, struct closure *);
-
-void bch2_btree_interior_update_will_free_node(struct btree_update *,
-                                              struct btree *);
-
-void bch2_btree_insert_node(struct btree_update *, struct btree *,
-                           struct btree_iter *, struct keylist *);
-int bch2_btree_split_leaf(struct bch_fs *, struct btree_iter *, unsigned);
+int bch2_btree_split_leaf(struct btree_trans *, btree_path_idx_t, unsigned);
 
-int __bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *,
-                                 unsigned, enum btree_node_sibling);
+int __bch2_foreground_maybe_merge(struct btree_trans *, btree_path_idx_t,
+                                 unsigned, unsigned, enum btree_node_sibling);
 
-static inline int bch2_foreground_maybe_merge_sibling(struct bch_fs *c,
-                                       struct btree_iter *iter,
-                                       unsigned level,
+static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans,
+                                       btree_path_idx_t path_idx,
+                                       unsigned level, unsigned flags,
                                        enum btree_node_sibling sib)
 {
+       struct btree_path *path = trans->paths + path_idx;
        struct btree *b;
 
-       if (!bch2_btree_node_relock(iter, level))
-               return 0;
+       EBUG_ON(!btree_node_locked(path, level));
 
-       b = iter->l[level].b;
-       if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold)
+       b = path->l[level].b;
+       if (b->sib_u64s[sib] > trans->c->btree_foreground_merge_threshold)
                return 0;
 
-       return __bch2_foreground_maybe_merge(c, iter, level, sib);
+       return __bch2_foreground_maybe_merge(trans, path_idx, level, flags, sib);
 }
 
-static inline void bch2_foreground_maybe_merge(struct bch_fs *c,
-                                              struct btree_iter *iter,
-                                              unsigned level)
+static inline int bch2_foreground_maybe_merge(struct btree_trans *trans,
+                                             btree_path_idx_t path,
+                                             unsigned level,
+                                             unsigned flags)
 {
-       bch2_foreground_maybe_merge_sibling(c, iter, level, btree_prev_sib);
-       bch2_foreground_maybe_merge_sibling(c, iter, level, btree_next_sib);
+       return  bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
+                                                   btree_prev_sib) ?:
+               bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
+                                                   btree_next_sib);
 }
 
+int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *,
+                           struct btree *, unsigned);
+void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *);
+int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *,
+                              struct btree *, struct bkey_i *,
+                              unsigned, bool);
+int bch2_btree_node_update_key_get_iter(struct btree_trans *, struct btree *,
+                                       struct bkey_i *, unsigned, bool);
+
 void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
 void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
 
 static inline unsigned btree_update_reserve_required(struct bch_fs *c,
                                                     struct btree *b)
 {
-       unsigned depth = btree_node_root(c, b)->level - b->level;
+       unsigned depth = btree_node_root(c, b)->c.level + 1;
 
-       return btree_reserve_required_nodes(depth);
+       /*
+        * Number of nodes we might have to allocate in a worst case btree
+        * split operation - we split all the way up to the root, then allocate
+        * a new root, unless we're already at max depth:
+        */
+       if (depth < BTREE_MAX_DEPTH)
+               return (depth - b->c.level) * 2 + 1;
+       else
+               return (depth - b->c.level) * 2 - 1;
 }
 
 static inline void btree_node_reset_sib_u64s(struct btree *b)
@@ -216,14 +206,19 @@ static inline void *write_block(struct btree *b)
        return (void *) b->data + (b->written << 9);
 }
 
+static inline bool __btree_addr_written(struct btree *b, void *p)
+{
+       return p < write_block(b);
+}
+
 static inline bool bset_written(struct btree *b, struct bset *i)
 {
-       return (void *) i < write_block(b);
+       return __btree_addr_written(b, i);
 }
 
-static inline bool bset_unwritten(struct btree *b, struct bset *i)
+static inline bool bkey_written(struct btree *b, struct bkey_packed *k)
 {
-       return (void *) i > write_block(b);
+       return __btree_addr_written(b, k);
 }
 
 static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c,
@@ -231,9 +226,11 @@ static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c,
                                                 void *end)
 {
        ssize_t used = bset_byte_offset(b, end) / sizeof(u64) +
-               b->whiteout_u64s +
-               b->uncompacted_whiteout_u64s;
-       ssize_t total = c->opts.btree_node_size << 6;
+               b->whiteout_u64s;
+       ssize_t total = c->opts.btree_node_size >> 3;
+
+       /* Always leave one extra u64 for bch2_varint_decode: */
+       used++;
 
        return total - used;
 }
@@ -252,29 +249,31 @@ static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c,
        return remaining;
 }
 
+#define BTREE_WRITE_SET_U64s_BITS      9
+
 static inline unsigned btree_write_set_buffer(struct btree *b)
 {
        /*
         * Could buffer up larger amounts of keys for btrees with larger keys,
         * pending benchmarking:
         */
-       return 4 << 10;
+       return 8 << BTREE_WRITE_SET_U64s_BITS;
 }
 
 static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
                                                     struct btree *b)
 {
-       struct bset *i = btree_bset_last(b);
+       struct bset_tree *t = bset_tree_last(b);
        struct btree_node_entry *bne = max(write_block(b),
                        (void *) btree_bkey_last(b, bset_tree_last(b)));
        ssize_t remaining_space =
-               __bch_btree_u64s_remaining(c, b, &bne->keys.start[0]);
+               __bch_btree_u64s_remaining(c, b, bne->keys.start);
 
-       if (unlikely(bset_written(b, i))) {
+       if (unlikely(bset_written(b, bset(b, t)))) {
                if (remaining_space > (ssize_t) (block_bytes(c) >> 3))
                        return bne;
        } else {
-               if (unlikely(vstruct_bytes(i) > btree_write_set_buffer(b)) &&
+               if (unlikely(bset_u64s(t) * sizeof(u64) > btree_write_set_buffer(b)) &&
                    remaining_space > (ssize_t) (btree_write_set_buffer(b) >> 3))
                        return bne;
        }
@@ -282,25 +281,25 @@ static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
        return NULL;
 }
 
-static inline void unreserve_whiteout(struct btree *b, struct bset_tree *t,
-                                     struct bkey_packed *k)
+static inline void push_whiteout(struct bch_fs *c, struct btree *b,
+                                struct bpos pos)
 {
-       if (bset_written(b, bset(b, t))) {
-               EBUG_ON(b->uncompacted_whiteout_u64s <
-                       bkeyp_key_u64s(&b->format, k));
-               b->uncompacted_whiteout_u64s -=
-                       bkeyp_key_u64s(&b->format, k);
-       }
-}
+       struct bkey_packed k;
 
-static inline void reserve_whiteout(struct btree *b, struct bset_tree *t,
-                                   struct bkey_packed *k)
-{
-       if (bset_written(b, bset(b, t))) {
-               BUG_ON(!k->needs_whiteout);
-               b->uncompacted_whiteout_u64s +=
-                       bkeyp_key_u64s(&b->format, k);
+       BUG_ON(bch_btree_keys_u64s_remaining(c, b) < BKEY_U64s);
+       EBUG_ON(btree_node_just_written(b));
+
+       if (!bkey_pack_pos(&k, pos, b)) {
+               struct bkey *u = (void *) &k;
+
+               bkey_init(u);
+               u->p = pos;
        }
+
+       k.needs_whiteout = true;
+
+       b->whiteout_u64s += k.u64s;
+       bkey_p_copy(unwritten_whiteouts_start(c, b), &k);
 }
 
 /*
@@ -308,42 +307,27 @@ static inline void reserve_whiteout(struct btree *b, struct bset_tree *t,
  * insert into could be written out from under us)
  */
 static inline bool bch2_btree_node_insert_fits(struct bch_fs *c,
-                                             struct btree *b, unsigned u64s)
+                                              struct btree *b, unsigned u64s)
 {
-       if (unlikely(btree_node_fake(b)))
+       if (unlikely(btree_node_need_rewrite(b)))
                return false;
 
-       if (btree_node_is_extents(b)) {
-               /* The insert key might split an existing key
-                * (bch2_insert_fixup_extent() -> BCH_EXTENT_OVERLAP_MIDDLE case:
-                */
-               u64s += BKEY_EXTENT_U64s_MAX;
-       }
-
        return u64s <= bch_btree_keys_u64s_remaining(c, b);
 }
 
-static inline bool journal_res_insert_fits(struct btree_insert *trans,
-                                          struct btree_insert_entry *insert)
-{
-       unsigned u64s = 0;
-       struct btree_insert_entry *i;
+void bch2_btree_updates_to_text(struct printbuf *, struct bch_fs *);
 
-       /*
-        * If we didn't get a journal reservation, we're in journal replay and
-        * we're not journalling updates:
-        */
-       if (!trans->journal_res.ref)
-               return true;
+bool bch2_btree_interior_updates_flush(struct bch_fs *);
 
-       for (i = insert; i < trans->entries + trans->nr; i++)
-               u64s += jset_u64s(i->k->k.u64s + i->extra_res);
-
-       return u64s <= trans->journal_res.u64s;
-}
+void bch2_journal_entry_to_btree_root(struct bch_fs *, struct jset_entry *);
+struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *,
+                                       struct jset_entry *, unsigned long);
 
-ssize_t bch2_btree_updates_print(struct bch_fs *, char *);
+void bch2_do_pending_node_rewrites(struct bch_fs *);
+void bch2_free_pending_node_rewrites(struct bch_fs *);
 
-size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *);
+void bch2_fs_btree_interior_update_exit(struct bch_fs *);
+void bch2_fs_btree_interior_update_init_early(struct bch_fs *);
+int bch2_fs_btree_interior_update_init(struct bch_fs *);
 
 #endif /* _BCACHEFS_BTREE_UPDATE_INTERIOR_H */