X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_update_interior.h;h=adfc62083844cf3b93d16d25d8269564f5b022a3;hb=b5fd066153c40a70a29caa1ea7987723ab687763;hp=aef8adf8c0321238ae91d81cfcda1de506da9fdd;hpb=a62d8713f84f49d723aebc9d0271abf4c9dae335;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_update_interior.h b/libbcachefs/btree_update_interior.h index aef8adf..adfc620 100644 --- a/libbcachefs/btree_update_interior.h +++ b/libbcachefs/btree_update_interior.h @@ -6,31 +6,9 @@ #include "btree_locking.h" #include "btree_update.h" -struct btree_reserve { - struct disk_reservation disk_res; - unsigned nr; - struct btree *b[BTREE_RESERVE_MAX]; -}; - -void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *); -bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *, - struct bkey_format *); +#define BTREE_UPDATE_NODES_MAX ((BTREE_MAX_DEPTH - 2) * 2 + GC_MERGE_NODES) -/* Btree node freeing/allocation: */ - -/* - * Tracks a btree node that has been (or is about to be) freed in memory, but - * has _not_ yet been freed on disk (because the write that makes the new - * node(s) visible and frees the old hasn't completed yet) - */ -struct pending_btree_node_free { - bool index_update_done; - - __le64 seq; - enum btree_id btree_id; - unsigned level; - __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX); -}; +#define BTREE_UPDATE_JOURNAL_RES (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1)) /* * Tracks an in progress split/rewrite of a btree node and the update to the @@ -53,6 +31,7 @@ struct pending_btree_node_free { struct btree_update { struct closure cl; struct bch_fs *c; + u64 start_time; struct list_head list; struct list_head unwritten_list; @@ -65,14 +44,13 @@ struct btree_update { BTREE_INTERIOR_UPDATING_AS, } mode; - unsigned must_rewrite:1; unsigned nodes_written:1; + unsigned took_gc_lock:1; enum btree_id btree_id; - u8 level; + unsigned update_level; - struct btree_reserve *reserve; - struct journal_preres journal_preres; + struct disk_reservation disk_res; /* * BTREE_INTERIOR_UPDATING_NODE: @@ -93,20 +71,36 @@ struct btree_update { */ struct journal_entry_pin journal; - /* - * Nodes being freed: - * Protected by c->btree_node_pending_free_lock - */ - struct pending_btree_node_free pending[BTREE_MAX_DEPTH + GC_MERGE_NODES]; - unsigned nr_pending; + /* Preallocated nodes we reserve when we start the update: */ + struct prealloc_nodes { + struct btree *b[BTREE_UPDATE_NODES_MAX]; + unsigned nr; + } prealloc_nodes[2]; + + /* Nodes being freed: */ + struct keylist old_keys; + u64 _old_keys[BTREE_UPDATE_NODES_MAX * + BKEY_BTREE_PTR_U64s_MAX]; + + /* Nodes being added: */ + struct keylist new_keys; + u64 _new_keys[BTREE_UPDATE_NODES_MAX * + BKEY_BTREE_PTR_U64s_MAX]; /* New nodes, that will be made reachable by this update: */ - struct btree *new_nodes[BTREE_MAX_DEPTH * 2 + GC_MERGE_NODES]; + struct btree *new_nodes[BTREE_UPDATE_NODES_MAX]; unsigned nr_new_nodes; + struct btree *old_nodes[BTREE_UPDATE_NODES_MAX]; + __le64 old_nodes_seq[BTREE_UPDATE_NODES_MAX]; + unsigned nr_old_nodes; + + open_bucket_idx_t open_buckets[BTREE_UPDATE_NODES_MAX * + BCH_REPLICAS_MAX]; + open_bucket_idx_t nr_open_buckets; + unsigned journal_u64s; - u64 journal_entries[ - (BKEY_BTREE_PTR_U64s_MAX + 1) * (BTREE_MAX_DEPTH - 1) * 2]; + u64 journal_entries[BTREE_UPDATE_JOURNAL_RES]; /* Only here to reduce stack usage on recursive splits: */ struct keylist parent_keys; @@ -118,72 +112,60 @@ struct btree_update { u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3]; }; -#define for_each_pending_btree_node_free(c, as, p) \ - list_for_each_entry(as, &c->btree_interior_update_list, list) \ - for (p = as->pending; p < as->pending + as->nr_pending; p++) - -void bch2_btree_node_free_inmem(struct bch_fs *, struct btree *, - struct btree_iter *); -void bch2_btree_node_free_never_inserted(struct bch_fs *, struct btree *); - struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *, + struct btree_trans *, struct btree *, struct bkey_format); -void bch2_btree_update_done(struct btree_update *); -struct btree_update * -bch2_btree_update_start(struct bch_fs *, enum btree_id, unsigned, - unsigned, struct closure *); - -void bch2_btree_interior_update_will_free_node(struct btree_update *, - struct btree *); +int bch2_btree_split_leaf(struct btree_trans *, btree_path_idx_t, unsigned); -void bch2_btree_insert_node(struct btree_update *, struct btree *, - struct btree_iter *, struct keylist *, - unsigned); -int bch2_btree_split_leaf(struct bch_fs *, struct btree_iter *, unsigned); +int __bch2_foreground_maybe_merge(struct btree_trans *, btree_path_idx_t, + unsigned, unsigned, enum btree_node_sibling); -void __bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *, - unsigned, unsigned, enum btree_node_sibling); - -static inline void bch2_foreground_maybe_merge_sibling(struct bch_fs *c, - struct btree_iter *iter, +static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans, + btree_path_idx_t path_idx, unsigned level, unsigned flags, enum btree_node_sibling sib) { + struct btree_path *path = trans->paths + path_idx; struct btree *b; - if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE) - return; - - if (!bch2_btree_node_relock(iter, level)) - return; + EBUG_ON(!btree_node_locked(path, level)); - b = iter->l[level].b; - if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold) - return; + b = path->l[level].b; + if (b->sib_u64s[sib] > trans->c->btree_foreground_merge_threshold) + return 0; - __bch2_foreground_maybe_merge(c, iter, level, flags, sib); + return __bch2_foreground_maybe_merge(trans, path_idx, level, flags, sib); } -static inline void bch2_foreground_maybe_merge(struct bch_fs *c, - struct btree_iter *iter, - unsigned level, - unsigned flags) +static inline int bch2_foreground_maybe_merge(struct btree_trans *trans, + btree_path_idx_t path, + unsigned level, + unsigned flags) { - bch2_foreground_maybe_merge_sibling(c, iter, level, flags, - btree_prev_sib); - bch2_foreground_maybe_merge_sibling(c, iter, level, flags, - btree_next_sib); + return bch2_foreground_maybe_merge_sibling(trans, path, level, flags, + btree_prev_sib) ?: + bch2_foreground_maybe_merge_sibling(trans, path, level, flags, + btree_next_sib); } +int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *, + struct btree *, unsigned); +void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *); +int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *, + struct btree *, struct bkey_i *, + unsigned, bool); +int bch2_btree_node_update_key_get_iter(struct btree_trans *, struct btree *, + struct bkey_i *, unsigned, bool); + void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *); void bch2_btree_root_alloc(struct bch_fs *, enum btree_id); static inline unsigned btree_update_reserve_required(struct bch_fs *c, struct btree *b) { - unsigned depth = btree_node_root(c, b)->level + 1; + unsigned depth = btree_node_root(c, b)->c.level + 1; /* * Number of nodes we might have to allocate in a worst case btree @@ -191,9 +173,9 @@ static inline unsigned btree_update_reserve_required(struct bch_fs *c, * a new root, unless we're already at max depth: */ if (depth < BTREE_MAX_DEPTH) - return (depth - b->level) * 2 + 1; + return (depth - b->c.level) * 2 + 1; else - return (depth - b->level) * 2 - 1; + return (depth - b->c.level) * 2 - 1; } static inline void btree_node_reset_sib_u64s(struct btree *b) @@ -245,7 +227,10 @@ static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c, { ssize_t used = bset_byte_offset(b, end) / sizeof(u64) + b->whiteout_u64s; - ssize_t total = c->opts.btree_node_size << 6; + ssize_t total = c->opts.btree_node_size >> 3; + + /* Always leave one extra u64 for bch2_varint_decode: */ + used++; return total - used; } @@ -264,13 +249,15 @@ static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c, return remaining; } +#define BTREE_WRITE_SET_U64s_BITS 9 + static inline unsigned btree_write_set_buffer(struct btree *b) { /* * Could buffer up larger amounts of keys for btrees with larger keys, * pending benchmarking: */ - return 4 << 10; + return 8 << BTREE_WRITE_SET_U64s_BITS; } static inline struct btree_node_entry *want_new_bset(struct bch_fs *c, @@ -280,7 +267,7 @@ static inline struct btree_node_entry *want_new_bset(struct bch_fs *c, struct btree_node_entry *bne = max(write_block(b), (void *) btree_bkey_last(b, bset_tree_last(b))); ssize_t remaining_space = - __bch_btree_u64s_remaining(c, b, &bne->keys.start[0]); + __bch_btree_u64s_remaining(c, b, bne->keys.start); if (unlikely(bset_written(b, bset(b, t)))) { if (remaining_space > (ssize_t) (block_bytes(c) >> 3)) @@ -300,6 +287,7 @@ static inline void push_whiteout(struct bch_fs *c, struct btree *b, struct bkey_packed k; BUG_ON(bch_btree_keys_u64s_remaining(c, b) < BKEY_U64s); + EBUG_ON(btree_node_just_written(b)); if (!bkey_pack_pos(&k, pos, b)) { struct bkey *u = (void *) &k; @@ -311,7 +299,7 @@ static inline void push_whiteout(struct bch_fs *c, struct btree *b, k.needs_whiteout = true; b->whiteout_u64s += k.u64s; - bkey_copy(unwritten_whiteouts_start(c, b), &k); + bkey_p_copy(unwritten_whiteouts_start(c, b), &k); } /* @@ -321,14 +309,25 @@ static inline void push_whiteout(struct bch_fs *c, struct btree *b, static inline bool bch2_btree_node_insert_fits(struct bch_fs *c, struct btree *b, unsigned u64s) { - if (unlikely(btree_node_fake(b))) + if (unlikely(btree_node_need_rewrite(b))) return false; return u64s <= bch_btree_keys_u64s_remaining(c, b); } -ssize_t bch2_btree_updates_print(struct bch_fs *, char *); +void bch2_btree_updates_to_text(struct printbuf *, struct bch_fs *); + +bool bch2_btree_interior_updates_flush(struct bch_fs *); + +void bch2_journal_entry_to_btree_root(struct bch_fs *, struct jset_entry *); +struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *, + struct jset_entry *, unsigned long); + +void bch2_do_pending_node_rewrites(struct bch_fs *); +void bch2_free_pending_node_rewrites(struct bch_fs *); -size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *); +void bch2_fs_btree_interior_update_exit(struct bch_fs *); +void bch2_fs_btree_interior_update_init_early(struct bch_fs *); +int bch2_fs_btree_interior_update_init(struct bch_fs *); #endif /* _BCACHEFS_BTREE_UPDATE_INTERIOR_H */