X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;ds=sidebyside;f=libbcachefs%2Fbtree_update_interior.h;h=dcfd7ceacc5926da051a11d72b1bdc12ec59b106;hb=fa358537725c8065b058b558125cf15359936f94;hp=2d8e0b7f3aaf1a27790ccc20f81851e004b23275;hpb=b74c93d43cb5e02c169569b4d4ca312548b964ba;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_update_interior.h b/libbcachefs/btree_update_interior.h index 2d8e0b7..dcfd7ce 100644 --- a/libbcachefs/btree_update_interior.h +++ b/libbcachefs/btree_update_interior.h @@ -6,31 +6,13 @@ #include "btree_locking.h" #include "btree_update.h" -struct btree_reserve { - struct disk_reservation disk_res; - unsigned nr; - struct btree *b[BTREE_RESERVE_MAX]; -}; - void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *); bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *, struct bkey_format *); -/* Btree node freeing/allocation: */ - -/* - * Tracks a btree node that has been (or is about to be) freed in memory, but - * has _not_ yet been freed on disk (because the write that makes the new - * node(s) visible and frees the old hasn't completed yet) - */ -struct pending_btree_node_free { - bool index_update_done; +#define BTREE_UPDATE_NODES_MAX ((BTREE_MAX_DEPTH - 2) * 2 + GC_MERGE_NODES) - __le64 seq; - enum btree_id btree_id; - unsigned level; - __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX); -}; +#define BTREE_UPDATE_JOURNAL_RES (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1)) /* * Tracks an in progress split/rewrite of a btree node and the update to the @@ -53,8 +35,10 @@ struct pending_btree_node_free { struct btree_update { struct closure cl; struct bch_fs *c; + u64 start_time; struct list_head list; + struct list_head unwritten_list; /* What kind of update are we doing? */ enum { @@ -64,12 +48,14 @@ struct btree_update { BTREE_INTERIOR_UPDATING_AS, } mode; - unsigned must_rewrite:1; unsigned nodes_written:1; + unsigned took_gc_lock:1; enum btree_id btree_id; + unsigned update_level; - struct btree_reserve *reserve; + struct disk_reservation disk_res; + struct journal_preres journal_preres; /* * BTREE_INTERIOR_UPDATING_NODE: @@ -82,18 +68,6 @@ struct btree_update { struct btree *b; struct list_head write_blocked_list; - /* - * BTREE_INTERIOR_UPDATING_AS: btree node we updated was freed, so now - * we're now blocking another btree_update - * @parent_as - btree_update that's waiting on our nodes to finish - * writing, before it can make new nodes visible on disk - * @wait - list of child btree_updates that are waiting on this - * btree_update to make all the new nodes visible before they can free - * their old btree nodes - */ - struct btree_update *parent_as; - struct closure_waitlist wait; - /* * We may be freeing nodes that were dirty, and thus had journal entries * pinned: we need to transfer the oldest of those pins to the @@ -102,19 +76,37 @@ struct btree_update { */ struct journal_entry_pin journal; - u64 journal_seq; + /* Preallocated nodes we reserve when we start the update: */ + struct prealloc_nodes { + struct btree *b[BTREE_UPDATE_NODES_MAX]; + unsigned nr; + } prealloc_nodes[2]; - /* - * Nodes being freed: - * Protected by c->btree_node_pending_free_lock - */ - struct pending_btree_node_free pending[BTREE_MAX_DEPTH + GC_MERGE_NODES]; - unsigned nr_pending; + /* Nodes being freed: */ + struct keylist old_keys; + u64 _old_keys[BTREE_UPDATE_NODES_MAX * + BKEY_BTREE_PTR_U64s_MAX]; + + /* Nodes being added: */ + struct keylist new_keys; + u64 _new_keys[BTREE_UPDATE_NODES_MAX * + BKEY_BTREE_PTR_U64s_MAX]; /* New nodes, that will be made reachable by this update: */ - struct btree *new_nodes[BTREE_MAX_DEPTH * 2 + GC_MERGE_NODES]; + struct btree *new_nodes[BTREE_UPDATE_NODES_MAX]; unsigned nr_new_nodes; + struct btree *old_nodes[BTREE_UPDATE_NODES_MAX]; + __le64 old_nodes_seq[BTREE_UPDATE_NODES_MAX]; + unsigned nr_old_nodes; + + open_bucket_idx_t open_buckets[BTREE_UPDATE_NODES_MAX * + BCH_REPLICAS_MAX]; + open_bucket_idx_t nr_open_buckets; + + unsigned journal_u64s; + u64 journal_entries[BTREE_UPDATE_JOURNAL_RES]; + /* Only here to reduce stack usage on recursive splits: */ struct keylist parent_keys; /* @@ -125,63 +117,41 @@ struct btree_update { u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3]; }; -#define for_each_pending_btree_node_free(c, as, p) \ - list_for_each_entry(as, &c->btree_interior_update_list, list) \ - for (p = as->pending; p < as->pending + as->nr_pending; p++) - -void bch2_btree_node_free_inmem(struct bch_fs *, struct btree *, - struct btree_iter *); -void bch2_btree_node_free_never_inserted(struct bch_fs *, struct btree *); - struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *, + struct btree_trans *, struct btree *, struct bkey_format); -void bch2_btree_update_done(struct btree_update *); -struct btree_update * -bch2_btree_update_start(struct bch_fs *, enum btree_id, unsigned, - unsigned, struct closure *); - -void bch2_btree_interior_update_will_free_node(struct btree_update *, - struct btree *); - -void bch2_btree_insert_node(struct btree_update *, struct btree *, - struct btree_iter *, struct keylist *, - unsigned); -int bch2_btree_split_leaf(struct bch_fs *, struct btree_iter *, unsigned); +int bch2_btree_split_leaf(struct btree_trans *, struct btree_path *, unsigned); -void __bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *, - unsigned, unsigned, enum btree_node_sibling); +int __bch2_foreground_maybe_merge(struct btree_trans *, struct btree_path *, + unsigned, unsigned, enum btree_node_sibling); -static inline void bch2_foreground_maybe_merge_sibling(struct bch_fs *c, - struct btree_iter *iter, +static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans, + struct btree_path *path, unsigned level, unsigned flags, enum btree_node_sibling sib) { struct btree *b; - if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE) - return; + EBUG_ON(!btree_node_locked(path, level)); - if (!bch2_btree_node_relock(iter, level)) - return; - - b = iter->l[level].b; - if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold) - return; + b = path->l[level].b; + if (b->sib_u64s[sib] > trans->c->btree_foreground_merge_threshold) + return 0; - __bch2_foreground_maybe_merge(c, iter, level, flags, sib); + return __bch2_foreground_maybe_merge(trans, path, level, flags, sib); } -static inline void bch2_foreground_maybe_merge(struct bch_fs *c, - struct btree_iter *iter, - unsigned level, - unsigned flags) +static inline int bch2_foreground_maybe_merge(struct btree_trans *trans, + struct btree_path *path, + unsigned level, + unsigned flags) { - bch2_foreground_maybe_merge_sibling(c, iter, level, flags, - btree_prev_sib); - bch2_foreground_maybe_merge_sibling(c, iter, level, flags, - btree_next_sib); + return bch2_foreground_maybe_merge_sibling(trans, path, level, flags, + btree_prev_sib) ?: + bch2_foreground_maybe_merge_sibling(trans, path, level, flags, + btree_next_sib); } void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *); @@ -190,7 +160,7 @@ void bch2_btree_root_alloc(struct bch_fs *, enum btree_id); static inline unsigned btree_update_reserve_required(struct bch_fs *c, struct btree *b) { - unsigned depth = btree_node_root(c, b)->level + 1; + unsigned depth = btree_node_root(c, b)->c.level + 1; /* * Number of nodes we might have to allocate in a worst case btree @@ -198,9 +168,9 @@ static inline unsigned btree_update_reserve_required(struct bch_fs *c, * a new root, unless we're already at max depth: */ if (depth < BTREE_MAX_DEPTH) - return (depth - b->level) * 2 + 1; + return (depth - b->c.level) * 2 + 1; else - return (depth - b->level) * 2 - 1; + return (depth - b->c.level) * 2 - 1; } static inline void btree_node_reset_sib_u64s(struct btree *b) @@ -252,7 +222,10 @@ static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c, { ssize_t used = bset_byte_offset(b, end) / sizeof(u64) + b->whiteout_u64s; - ssize_t total = c->opts.btree_node_size << 6; + ssize_t total = c->opts.btree_node_size >> 3; + + /* Always leave one extra u64 for bch2_varint_decode: */ + used++; return total - used; } @@ -271,13 +244,15 @@ static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c, return remaining; } +#define BTREE_WRITE_SET_U64s_BITS 9 + static inline unsigned btree_write_set_buffer(struct btree *b) { /* * Could buffer up larger amounts of keys for btrees with larger keys, * pending benchmarking: */ - return 4 << 10; + return 8 << BTREE_WRITE_SET_U64s_BITS; } static inline struct btree_node_entry *want_new_bset(struct bch_fs *c, @@ -302,18 +277,24 @@ static inline struct btree_node_entry *want_new_bset(struct bch_fs *c, } static inline void push_whiteout(struct bch_fs *c, struct btree *b, - struct bkey_packed *k) + struct bpos pos) { - unsigned u64s = bkeyp_key_u64s(&b->format, k); - struct bkey_packed *dst; + struct bkey_packed k; - BUG_ON(u64s > bch_btree_keys_u64s_remaining(c, b)); + BUG_ON(bch_btree_keys_u64s_remaining(c, b) < BKEY_U64s); + EBUG_ON(btree_node_just_written(b)); - b->whiteout_u64s += bkeyp_key_u64s(&b->format, k); - dst = unwritten_whiteouts_start(c, b); - memcpy_u64s(dst, k, u64s); - dst->u64s = u64s; - dst->type = KEY_TYPE_deleted; + if (!bkey_pack_pos(&k, pos, b)) { + struct bkey *u = (void *) &k; + + bkey_init(u); + u->p = pos; + } + + k.needs_whiteout = true; + + b->whiteout_u64s += k.u64s; + bkey_copy(unwritten_whiteouts_start(c, b), &k); } /* @@ -323,14 +304,24 @@ static inline void push_whiteout(struct bch_fs *c, struct btree *b, static inline bool bch2_btree_node_insert_fits(struct bch_fs *c, struct btree *b, unsigned u64s) { - if (unlikely(btree_node_fake(b))) + if (unlikely(btree_node_need_rewrite(b))) return false; return u64s <= bch_btree_keys_u64s_remaining(c, b); } -ssize_t bch2_btree_updates_print(struct bch_fs *, char *); +void bch2_btree_updates_to_text(struct printbuf *, struct bch_fs *); + +bool bch2_btree_interior_updates_flush(struct bch_fs *); + +void bch2_journal_entry_to_btree_root(struct bch_fs *, struct jset_entry *); +struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *, + struct jset_entry *, struct jset_entry *); + +void bch2_do_pending_node_rewrites(struct bch_fs *); +void bch2_free_pending_node_rewrites(struct bch_fs *); -size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *); +void bch2_fs_btree_interior_update_exit(struct bch_fs *); +int bch2_fs_btree_interior_update_init(struct bch_fs *); #endif /* _BCACHEFS_BTREE_UPDATE_INTERIOR_H */