X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_update_interior.h;h=adfc62083844cf3b93d16d25d8269564f5b022a3;hb=b5fd066153c40a70a29caa1ea7987723ab687763;hp=e129b24ece76138862f703d7c0375b81a43661af;hpb=8351bbc05bc163758d3410ce6d6cab8eb4441609;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_update_interior.h b/libbcachefs/btree_update_interior.h index e129b24..adfc620 100644 --- a/libbcachefs/btree_update_interior.h +++ b/libbcachefs/btree_update_interior.h @@ -1,34 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_BTREE_UPDATE_INTERIOR_H #define _BCACHEFS_BTREE_UPDATE_INTERIOR_H #include "btree_cache.h" +#include "btree_locking.h" #include "btree_update.h" -struct btree_reserve { - struct disk_reservation disk_res; - unsigned nr; - struct btree *b[BTREE_RESERVE_MAX]; -}; - -void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *); -bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *, - struct bkey_format *); +#define BTREE_UPDATE_NODES_MAX ((BTREE_MAX_DEPTH - 2) * 2 + GC_MERGE_NODES) -/* Btree node freeing/allocation: */ - -/* - * Tracks a btree node that has been (or is about to be) freed in memory, but - * has _not_ yet been freed on disk (because the write that makes the new - * node(s) visible and frees the old hasn't completed yet) - */ -struct pending_btree_node_free { - bool index_update_done; - - __le64 seq; - enum btree_id btree_id; - unsigned level; - __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX); -}; +#define BTREE_UPDATE_JOURNAL_RES (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1)) /* * Tracks an in progress split/rewrite of a btree node and the update to the @@ -51,8 +31,10 @@ struct pending_btree_node_free { struct btree_update { struct closure cl; struct bch_fs *c; + u64 start_time; struct list_head list; + struct list_head unwritten_list; /* What kind of update are we doing? */ enum { @@ -61,10 +43,14 @@ struct btree_update { BTREE_INTERIOR_UPDATING_ROOT, BTREE_INTERIOR_UPDATING_AS, } mode; + + unsigned nodes_written:1; + unsigned took_gc_lock:1; + enum btree_id btree_id; + unsigned update_level; - unsigned flags; - struct btree_reserve *reserve; + struct disk_reservation disk_res; /* * BTREE_INTERIOR_UPDATING_NODE: @@ -77,18 +63,6 @@ struct btree_update { struct btree *b; struct list_head write_blocked_list; - /* - * BTREE_INTERIOR_UPDATING_AS: btree node we updated was freed, so now - * we're now blocking another btree_update - * @parent_as - btree_update that's waiting on our nodes to finish - * writing, before it can make new nodes visible on disk - * @wait - list of child btree_updates that are waiting on this - * btree_update to make all the new nodes visible before they can free - * their old btree nodes - */ - struct btree_update *parent_as; - struct closure_waitlist wait; - /* * We may be freeing nodes that were dirty, and thus had journal entries * pinned: we need to transfer the oldest of those pins to the @@ -97,19 +71,37 @@ struct btree_update { */ struct journal_entry_pin journal; - u64 journal_seq; + /* Preallocated nodes we reserve when we start the update: */ + struct prealloc_nodes { + struct btree *b[BTREE_UPDATE_NODES_MAX]; + unsigned nr; + } prealloc_nodes[2]; - /* - * Nodes being freed: - * Protected by c->btree_node_pending_free_lock - */ - struct pending_btree_node_free pending[BTREE_MAX_DEPTH + GC_MERGE_NODES]; - unsigned nr_pending; + /* Nodes being freed: */ + struct keylist old_keys; + u64 _old_keys[BTREE_UPDATE_NODES_MAX * + BKEY_BTREE_PTR_U64s_MAX]; + + /* Nodes being added: */ + struct keylist new_keys; + u64 _new_keys[BTREE_UPDATE_NODES_MAX * + BKEY_BTREE_PTR_U64s_MAX]; /* New nodes, that will be made reachable by this update: */ - struct btree *new_nodes[BTREE_MAX_DEPTH * 2 + GC_MERGE_NODES]; + struct btree *new_nodes[BTREE_UPDATE_NODES_MAX]; unsigned nr_new_nodes; + struct btree *old_nodes[BTREE_UPDATE_NODES_MAX]; + __le64 old_nodes_seq[BTREE_UPDATE_NODES_MAX]; + unsigned nr_old_nodes; + + open_bucket_idx_t open_buckets[BTREE_UPDATE_NODES_MAX * + BCH_REPLICAS_MAX]; + open_bucket_idx_t nr_open_buckets; + + unsigned journal_u64s; + u64 journal_entries[BTREE_UPDATE_JOURNAL_RES]; + /* Only here to reduce stack usage on recursive splits: */ struct keylist parent_keys; /* @@ -120,44 +112,70 @@ struct btree_update { u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3]; }; -#define BTREE_INTERIOR_UPDATE_MUST_REWRITE (1 << 0) - -#define for_each_pending_btree_node_free(c, as, p) \ - list_for_each_entry(as, &c->btree_interior_update_list, list) \ - for (p = as->pending; p < as->pending + as->nr_pending; p++) - -void bch2_btree_node_free_inmem(struct bch_fs *, struct btree *, - struct btree_iter *); -void bch2_btree_node_free_never_inserted(struct bch_fs *, struct btree *); -void bch2_btree_open_bucket_put(struct bch_fs *, struct btree *); - struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *, + struct btree_trans *, struct btree *, struct bkey_format); -void bch2_btree_update_done(struct btree_update *); -struct btree_update * -bch2_btree_update_start(struct bch_fs *, enum btree_id, unsigned, - unsigned, struct closure *); +int bch2_btree_split_leaf(struct btree_trans *, btree_path_idx_t, unsigned); -void bch2_btree_interior_update_will_free_node(struct btree_update *, - struct btree *); +int __bch2_foreground_maybe_merge(struct btree_trans *, btree_path_idx_t, + unsigned, unsigned, enum btree_node_sibling); + +static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans, + btree_path_idx_t path_idx, + unsigned level, unsigned flags, + enum btree_node_sibling sib) +{ + struct btree_path *path = trans->paths + path_idx; + struct btree *b; -void bch2_btree_insert_node(struct btree_update *, struct btree *, - struct btree_iter *, struct keylist *); -int bch2_btree_split_leaf(struct bch_fs *, struct btree_iter *, unsigned); -int bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *, - enum btree_node_sibling); + EBUG_ON(!btree_node_locked(path, level)); + + b = path->l[level].b; + if (b->sib_u64s[sib] > trans->c->btree_foreground_merge_threshold) + return 0; + + return __bch2_foreground_maybe_merge(trans, path_idx, level, flags, sib); +} + +static inline int bch2_foreground_maybe_merge(struct btree_trans *trans, + btree_path_idx_t path, + unsigned level, + unsigned flags) +{ + return bch2_foreground_maybe_merge_sibling(trans, path, level, flags, + btree_prev_sib) ?: + bch2_foreground_maybe_merge_sibling(trans, path, level, flags, + btree_next_sib); +} + +int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *, + struct btree *, unsigned); +void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *); +int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *, + struct btree *, struct bkey_i *, + unsigned, bool); +int bch2_btree_node_update_key_get_iter(struct btree_trans *, struct btree *, + struct bkey_i *, unsigned, bool); void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *); -int bch2_btree_root_alloc(struct bch_fs *, enum btree_id, struct closure *); +void bch2_btree_root_alloc(struct bch_fs *, enum btree_id); static inline unsigned btree_update_reserve_required(struct bch_fs *c, struct btree *b) { - unsigned depth = btree_node_root(c, b)->level - b->level; + unsigned depth = btree_node_root(c, b)->c.level + 1; - return btree_reserve_required_nodes(depth); + /* + * Number of nodes we might have to allocate in a worst case btree + * split operation - we split all the way up to the root, then allocate + * a new root, unless we're already at max depth: + */ + if (depth < BTREE_MAX_DEPTH) + return (depth - b->c.level) * 2 + 1; + else + return (depth - b->c.level) * 2 - 1; } static inline void btree_node_reset_sib_u64s(struct btree *b) @@ -188,89 +206,100 @@ static inline void *write_block(struct btree *b) return (void *) b->data + (b->written << 9); } +static inline bool __btree_addr_written(struct btree *b, void *p) +{ + return p < write_block(b); +} + static inline bool bset_written(struct btree *b, struct bset *i) { - return (void *) i < write_block(b); + return __btree_addr_written(b, i); } -static inline bool bset_unwritten(struct btree *b, struct bset *i) +static inline bool bkey_written(struct btree *b, struct bkey_packed *k) { - return (void *) i > write_block(b); + return __btree_addr_written(b, k); } -static inline unsigned bset_end_sector(struct bch_fs *c, struct btree *b, - struct bset *i) +static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c, + struct btree *b, + void *end) { - return round_up(bset_byte_offset(b, vstruct_end(i)), - block_bytes(c)) >> 9; + ssize_t used = bset_byte_offset(b, end) / sizeof(u64) + + b->whiteout_u64s; + ssize_t total = c->opts.btree_node_size >> 3; + + /* Always leave one extra u64 for bch2_varint_decode: */ + used++; + + return total - used; } +static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c, + struct btree *b) +{ + ssize_t remaining = __bch_btree_u64s_remaining(c, b, + btree_bkey_last(b, bset_tree_last(b))); + + BUG_ON(remaining < 0); + + if (bset_written(b, btree_bset_last(b))) + return 0; + + return remaining; +} + +#define BTREE_WRITE_SET_U64s_BITS 9 + static inline unsigned btree_write_set_buffer(struct btree *b) { /* * Could buffer up larger amounts of keys for btrees with larger keys, * pending benchmarking: */ - return 4 << 10; + return 8 << BTREE_WRITE_SET_U64s_BITS; } static inline struct btree_node_entry *want_new_bset(struct bch_fs *c, struct btree *b) { - struct bset *i = btree_bset_last(b); - unsigned offset = max_t(unsigned, b->written << 9, - bset_byte_offset(b, vstruct_end(i))); - ssize_t n = (ssize_t) btree_bytes(c) - (ssize_t) - (offset + sizeof(struct btree_node_entry) + - b->whiteout_u64s * sizeof(u64) + - b->uncompacted_whiteout_u64s * sizeof(u64)); - - EBUG_ON(offset > btree_bytes(c)); - - if ((unlikely(bset_written(b, i)) && n > 0) || - (unlikely(vstruct_bytes(i) > btree_write_set_buffer(b)) && - n > btree_write_set_buffer(b))) - return (void *) b->data + offset; + struct bset_tree *t = bset_tree_last(b); + struct btree_node_entry *bne = max(write_block(b), + (void *) btree_bkey_last(b, bset_tree_last(b))); + ssize_t remaining_space = + __bch_btree_u64s_remaining(c, b, bne->keys.start); + + if (unlikely(bset_written(b, bset(b, t)))) { + if (remaining_space > (ssize_t) (block_bytes(c) >> 3)) + return bne; + } else { + if (unlikely(bset_u64s(t) * sizeof(u64) > btree_write_set_buffer(b)) && + remaining_space > (ssize_t) (btree_write_set_buffer(b) >> 3)) + return bne; + } return NULL; } -static inline void unreserve_whiteout(struct btree *b, struct bset_tree *t, - struct bkey_packed *k) +static inline void push_whiteout(struct bch_fs *c, struct btree *b, + struct bpos pos) { - if (bset_written(b, bset(b, t))) { - EBUG_ON(b->uncompacted_whiteout_u64s < - bkeyp_key_u64s(&b->format, k)); - b->uncompacted_whiteout_u64s -= - bkeyp_key_u64s(&b->format, k); - } -} + struct bkey_packed k; -static inline void reserve_whiteout(struct btree *b, struct bset_tree *t, - struct bkey_packed *k) -{ - if (bset_written(b, bset(b, t))) { - BUG_ON(!k->needs_whiteout); - b->uncompacted_whiteout_u64s += - bkeyp_key_u64s(&b->format, k); - } -} + BUG_ON(bch_btree_keys_u64s_remaining(c, b) < BKEY_U64s); + EBUG_ON(btree_node_just_written(b)); -static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c, - struct btree *b) -{ - struct bset *i = btree_bset_last(b); - unsigned used = bset_byte_offset(b, vstruct_end(i)) / sizeof(u64) + - b->whiteout_u64s + - b->uncompacted_whiteout_u64s; - unsigned total = c->opts.btree_node_size << 6; + if (!bkey_pack_pos(&k, pos, b)) { + struct bkey *u = (void *) &k; - EBUG_ON(used > total); + bkey_init(u); + u->p = pos; + } - if (bset_written(b, i)) - return 0; + k.needs_whiteout = true; - return total - used; + b->whiteout_u64s += k.u64s; + bkey_p_copy(unwritten_whiteouts_start(c, b), &k); } /* @@ -278,35 +307,27 @@ static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c, * insert into could be written out from under us) */ static inline bool bch2_btree_node_insert_fits(struct bch_fs *c, - struct btree *b, unsigned u64s) + struct btree *b, unsigned u64s) { - if (btree_node_is_extents(b)) { - /* The insert key might split an existing key - * (bch2_insert_fixup_extent() -> BCH_EXTENT_OVERLAP_MIDDLE case: - */ - u64s += BKEY_EXTENT_U64s_MAX; - } + if (unlikely(btree_node_need_rewrite(b))) + return false; return u64s <= bch_btree_keys_u64s_remaining(c, b); } -static inline bool journal_res_insert_fits(struct btree_insert *trans, - struct btree_insert_entry *insert) -{ - unsigned u64s = 0; - struct btree_insert_entry *i; +void bch2_btree_updates_to_text(struct printbuf *, struct bch_fs *); - /* - * If we didn't get a journal reservation, we're in journal replay and - * we're not journalling updates: - */ - if (!trans->journal_res.ref) - return true; +bool bch2_btree_interior_updates_flush(struct bch_fs *); - for (i = insert; i < trans->entries + trans->nr; i++) - u64s += jset_u64s(i->k->k.u64s + i->extra_res); +void bch2_journal_entry_to_btree_root(struct bch_fs *, struct jset_entry *); +struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *, + struct jset_entry *, unsigned long); - return u64s <= trans->journal_res.u64s; -} +void bch2_do_pending_node_rewrites(struct bch_fs *); +void bch2_free_pending_node_rewrites(struct bch_fs *); + +void bch2_fs_btree_interior_update_exit(struct bch_fs *); +void bch2_fs_btree_interior_update_init_early(struct bch_fs *); +int bch2_fs_btree_interior_update_init(struct bch_fs *); #endif /* _BCACHEFS_BTREE_UPDATE_INTERIOR_H */