#include "btree_locking.h"
#include "btree_update.h"
-struct btree_reserve {
- struct disk_reservation disk_res;
- unsigned nr;
- struct btree *b[BTREE_RESERVE_MAX];
-};
-
-void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *);
-bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *,
- struct bkey_format *);
+#define BTREE_UPDATE_NODES_MAX ((BTREE_MAX_DEPTH - 2) * 2 + GC_MERGE_NODES)
-/* Btree node freeing/allocation: */
-
-/*
- * Tracks a btree node that has been (or is about to be) freed in memory, but
- * has _not_ yet been freed on disk (because the write that makes the new
- * node(s) visible and frees the old hasn't completed yet)
- */
-struct pending_btree_node_free {
- bool index_update_done;
-
- __le64 seq;
- enum btree_id btree_id;
- unsigned level;
- __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
-};
+#define BTREE_UPDATE_JOURNAL_RES (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1))
/*
* Tracks an in progress split/rewrite of a btree node and the update to the
struct btree_update {
struct closure cl;
struct bch_fs *c;
+ u64 start_time;
struct list_head list;
+ struct list_head unwritten_list;
/* What kind of update are we doing? */
enum {
BTREE_INTERIOR_UPDATING_AS,
} mode;
- unsigned must_rewrite:1;
unsigned nodes_written:1;
+ unsigned took_gc_lock:1;
enum btree_id btree_id;
+ unsigned update_level;
- struct btree_reserve *reserve;
+ struct disk_reservation disk_res;
/*
* BTREE_INTERIOR_UPDATING_NODE:
struct btree *b;
struct list_head write_blocked_list;
- /*
- * BTREE_INTERIOR_UPDATING_AS: btree node we updated was freed, so now
- * we're now blocking another btree_update
- * @parent_as - btree_update that's waiting on our nodes to finish
- * writing, before it can make new nodes visible on disk
- * @wait - list of child btree_updates that are waiting on this
- * btree_update to make all the new nodes visible before they can free
- * their old btree nodes
- */
- struct btree_update *parent_as;
- struct closure_waitlist wait;
-
/*
* We may be freeing nodes that were dirty, and thus had journal entries
* pinned: we need to transfer the oldest of those pins to the
*/
struct journal_entry_pin journal;
- u64 journal_seq;
+ /* Preallocated nodes we reserve when we start the update: */
+ struct prealloc_nodes {
+ struct btree *b[BTREE_UPDATE_NODES_MAX];
+ unsigned nr;
+ } prealloc_nodes[2];
- /*
- * Nodes being freed:
- * Protected by c->btree_node_pending_free_lock
- */
- struct pending_btree_node_free pending[BTREE_MAX_DEPTH + GC_MERGE_NODES];
- unsigned nr_pending;
+ /* Nodes being freed: */
+ struct keylist old_keys;
+ u64 _old_keys[BTREE_UPDATE_NODES_MAX *
+ BKEY_BTREE_PTR_U64s_MAX];
+
+ /* Nodes being added: */
+ struct keylist new_keys;
+ u64 _new_keys[BTREE_UPDATE_NODES_MAX *
+ BKEY_BTREE_PTR_U64s_MAX];
/* New nodes, that will be made reachable by this update: */
- struct btree *new_nodes[BTREE_MAX_DEPTH * 2 + GC_MERGE_NODES];
+ struct btree *new_nodes[BTREE_UPDATE_NODES_MAX];
unsigned nr_new_nodes;
+ struct btree *old_nodes[BTREE_UPDATE_NODES_MAX];
+ __le64 old_nodes_seq[BTREE_UPDATE_NODES_MAX];
+ unsigned nr_old_nodes;
+
+ open_bucket_idx_t open_buckets[BTREE_UPDATE_NODES_MAX *
+ BCH_REPLICAS_MAX];
+ open_bucket_idx_t nr_open_buckets;
+
+ unsigned journal_u64s;
+ u64 journal_entries[BTREE_UPDATE_JOURNAL_RES];
+
/* Only here to reduce stack usage on recursive splits: */
struct keylist parent_keys;
/*
u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3];
};
-#define for_each_pending_btree_node_free(c, as, p) \
- list_for_each_entry(as, &c->btree_interior_update_list, list) \
- for (p = as->pending; p < as->pending + as->nr_pending; p++)
-
-void bch2_btree_node_free_inmem(struct bch_fs *, struct btree *,
- struct btree_iter *);
-void bch2_btree_node_free_never_inserted(struct bch_fs *, struct btree *);
-
struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
+ struct btree_trans *,
struct btree *,
struct bkey_format);
-void bch2_btree_update_done(struct btree_update *);
-struct btree_update *
-bch2_btree_update_start(struct bch_fs *, enum btree_id, unsigned,
- unsigned, struct closure *);
+int bch2_btree_split_leaf(struct btree_trans *, btree_path_idx_t, unsigned);
-void bch2_btree_interior_update_will_free_node(struct btree_update *,
- struct btree *);
+int __bch2_foreground_maybe_merge(struct btree_trans *, btree_path_idx_t,
+ unsigned, unsigned, enum btree_node_sibling);
-void bch2_btree_insert_node(struct btree_update *, struct btree *,
- struct btree_iter *, struct keylist *,
- unsigned);
-int bch2_btree_split_leaf(struct bch_fs *, struct btree_iter *, unsigned);
-
-void __bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *,
- unsigned, unsigned, enum btree_node_sibling);
-
-static inline void bch2_foreground_maybe_merge_sibling(struct bch_fs *c,
- struct btree_iter *iter,
+static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans,
+ btree_path_idx_t path_idx,
unsigned level, unsigned flags,
enum btree_node_sibling sib)
{
+ struct btree_path *path = trans->paths + path_idx;
struct btree *b;
- if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE)
- return;
-
- if (!bch2_btree_node_relock(iter, level))
- return;
+ EBUG_ON(!btree_node_locked(path, level));
- b = iter->l[level].b;
- if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold)
- return;
+ b = path->l[level].b;
+ if (b->sib_u64s[sib] > trans->c->btree_foreground_merge_threshold)
+ return 0;
- __bch2_foreground_maybe_merge(c, iter, level, flags, sib);
+ return __bch2_foreground_maybe_merge(trans, path_idx, level, flags, sib);
}
-static inline void bch2_foreground_maybe_merge(struct bch_fs *c,
- struct btree_iter *iter,
- unsigned level,
- unsigned flags)
+static inline int bch2_foreground_maybe_merge(struct btree_trans *trans,
+ btree_path_idx_t path,
+ unsigned level,
+ unsigned flags)
{
- bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
- btree_prev_sib);
- bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
- btree_next_sib);
+ return bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
+ btree_prev_sib) ?:
+ bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
+ btree_next_sib);
}
+int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *,
+ struct btree *, unsigned);
+void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *);
+int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *,
+ struct btree *, struct bkey_i *,
+ unsigned, bool);
+int bch2_btree_node_update_key_get_iter(struct btree_trans *, struct btree *,
+ struct bkey_i *, unsigned, bool);
+
void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
static inline unsigned btree_update_reserve_required(struct bch_fs *c,
struct btree *b)
{
- unsigned depth = btree_node_root(c, b)->level + 1;
+ unsigned depth = btree_node_root(c, b)->c.level + 1;
/*
* Number of nodes we might have to allocate in a worst case btree
* a new root, unless we're already at max depth:
*/
if (depth < BTREE_MAX_DEPTH)
- return (depth - b->level) * 2 + 1;
+ return (depth - b->c.level) * 2 + 1;
else
- return (depth - b->level) * 2 - 1;
+ return (depth - b->c.level) * 2 - 1;
}
static inline void btree_node_reset_sib_u64s(struct btree *b)
void *end)
{
ssize_t used = bset_byte_offset(b, end) / sizeof(u64) +
- b->whiteout_u64s +
- b->uncompacted_whiteout_u64s;
- ssize_t total = c->opts.btree_node_size << 6;
+ b->whiteout_u64s;
+ ssize_t total = c->opts.btree_node_size >> 3;
+
+ /* Always leave one extra u64 for bch2_varint_decode: */
+ used++;
return total - used;
}
return remaining;
}
+#define BTREE_WRITE_SET_U64s_BITS 9
+
static inline unsigned btree_write_set_buffer(struct btree *b)
{
/*
* Could buffer up larger amounts of keys for btrees with larger keys,
* pending benchmarking:
*/
- return 4 << 10;
+ return 8 << BTREE_WRITE_SET_U64s_BITS;
}
static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
struct btree_node_entry *bne = max(write_block(b),
(void *) btree_bkey_last(b, bset_tree_last(b)));
ssize_t remaining_space =
- __bch_btree_u64s_remaining(c, b, &bne->keys.start[0]);
+ __bch_btree_u64s_remaining(c, b, bne->keys.start);
if (unlikely(bset_written(b, bset(b, t)))) {
if (remaining_space > (ssize_t) (block_bytes(c) >> 3))
return NULL;
}
-static inline void unreserve_whiteout(struct btree *b, struct bkey_packed *k)
+static inline void push_whiteout(struct bch_fs *c, struct btree *b,
+ struct bpos pos)
{
- if (bkey_written(b, k)) {
- EBUG_ON(b->uncompacted_whiteout_u64s <
- bkeyp_key_u64s(&b->format, k));
- b->uncompacted_whiteout_u64s -=
- bkeyp_key_u64s(&b->format, k);
- }
-}
+ struct bkey_packed k;
-static inline void reserve_whiteout(struct btree *b, struct bkey_packed *k)
-{
- if (bkey_written(b, k)) {
- BUG_ON(!k->needs_whiteout);
- b->uncompacted_whiteout_u64s +=
- bkeyp_key_u64s(&b->format, k);
+ BUG_ON(bch_btree_keys_u64s_remaining(c, b) < BKEY_U64s);
+ EBUG_ON(btree_node_just_written(b));
+
+ if (!bkey_pack_pos(&k, pos, b)) {
+ struct bkey *u = (void *) &k;
+
+ bkey_init(u);
+ u->p = pos;
}
+
+ k.needs_whiteout = true;
+
+ b->whiteout_u64s += k.u64s;
+ bkey_p_copy(unwritten_whiteouts_start(c, b), &k);
}
/*
static inline bool bch2_btree_node_insert_fits(struct bch_fs *c,
struct btree *b, unsigned u64s)
{
- if (unlikely(btree_node_fake(b)))
+ if (unlikely(btree_node_need_rewrite(b)))
return false;
return u64s <= bch_btree_keys_u64s_remaining(c, b);
}
-ssize_t bch2_btree_updates_print(struct bch_fs *, char *);
+void bch2_btree_updates_to_text(struct printbuf *, struct bch_fs *);
+
+bool bch2_btree_interior_updates_flush(struct bch_fs *);
+
+void bch2_journal_entry_to_btree_root(struct bch_fs *, struct jset_entry *);
+struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *,
+ struct jset_entry *, unsigned long);
+
+void bch2_do_pending_node_rewrites(struct bch_fs *);
+void bch2_free_pending_node_rewrites(struct bch_fs *);
-size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *);
+void bch2_fs_btree_interior_update_exit(struct bch_fs *);
+void bch2_fs_btree_interior_update_init_early(struct bch_fs *);
+int bch2_fs_btree_interior_update_init(struct bch_fs *);
#endif /* _BCACHEFS_BTREE_UPDATE_INTERIOR_H */