+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_BTREE_UPDATE_INTERIOR_H
#define _BCACHEFS_BTREE_UPDATE_INTERIOR_H
#include "btree_cache.h"
+#include "btree_locking.h"
#include "btree_update.h"
struct btree_reserve {
BTREE_INTERIOR_UPDATING_ROOT,
BTREE_INTERIOR_UPDATING_AS,
} mode;
+
+ unsigned must_rewrite:1;
+ unsigned nodes_written:1;
+
enum btree_id btree_id;
- unsigned flags;
struct btree_reserve *reserve;
/*
u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3];
};
-#define BTREE_INTERIOR_UPDATE_MUST_REWRITE (1 << 0)
-
#define for_each_pending_btree_node_free(c, as, p) \
list_for_each_entry(as, &c->btree_interior_update_list, list) \
for (p = as->pending; p < as->pending + as->nr_pending; p++)
void bch2_btree_node_free_inmem(struct bch_fs *, struct btree *,
struct btree_iter *);
void bch2_btree_node_free_never_inserted(struct bch_fs *, struct btree *);
-void bch2_btree_open_bucket_put(struct bch_fs *, struct btree *);
struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
struct btree *,
struct btree *);
void bch2_btree_insert_node(struct btree_update *, struct btree *,
- struct btree_iter *, struct keylist *);
+ struct btree_iter *, struct keylist *,
+ unsigned);
int bch2_btree_split_leaf(struct bch_fs *, struct btree_iter *, unsigned);
-int bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *,
- enum btree_node_sibling);
+
+void __bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *,
+ unsigned, unsigned, enum btree_node_sibling);
+
+static inline void bch2_foreground_maybe_merge_sibling(struct bch_fs *c,
+ struct btree_iter *iter,
+ unsigned level, unsigned flags,
+ enum btree_node_sibling sib)
+{
+ struct btree *b;
+
+ if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE)
+ return;
+
+ if (!bch2_btree_node_relock(iter, level))
+ return;
+
+ b = iter->l[level].b;
+ if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold)
+ return;
+
+ __bch2_foreground_maybe_merge(c, iter, level, flags, sib);
+}
+
+static inline void bch2_foreground_maybe_merge(struct bch_fs *c,
+ struct btree_iter *iter,
+ unsigned level,
+ unsigned flags)
+{
+ bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
+ btree_prev_sib);
+ bch2_foreground_maybe_merge_sibling(c, iter, level, flags,
+ btree_next_sib);
+}
void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
static inline unsigned btree_update_reserve_required(struct bch_fs *c,
struct btree *b)
{
- unsigned depth = btree_node_root(c, b)->level - b->level;
+ unsigned depth = btree_node_root(c, b)->level + 1;
- return btree_reserve_required_nodes(depth);
+ /*
+ * Number of nodes we might have to allocate in a worst case btree
+ * split operation - we split all the way up to the root, then allocate
+ * a new root, unless we're already at max depth:
+ */
+ if (depth < BTREE_MAX_DEPTH)
+ return (depth - b->level) * 2 + 1;
+ else
+ return (depth - b->level) * 2 - 1;
}
static inline void btree_node_reset_sib_u64s(struct btree *b)
return (void *) b->data + (b->written << 9);
}
+static inline bool __btree_addr_written(struct btree *b, void *p)
+{
+ return p < write_block(b);
+}
+
static inline bool bset_written(struct btree *b, struct bset *i)
{
- return (void *) i < write_block(b);
+ return __btree_addr_written(b, i);
}
-static inline bool bset_unwritten(struct btree *b, struct bset *i)
+static inline bool bkey_written(struct btree *b, struct bkey_packed *k)
{
- return (void *) i > write_block(b);
+ return __btree_addr_written(b, k);
}
-static inline unsigned bset_end_sector(struct bch_fs *c, struct btree *b,
- struct bset *i)
+static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c,
+ struct btree *b,
+ void *end)
{
- return round_up(bset_byte_offset(b, vstruct_end(i)),
- block_bytes(c)) >> 9;
+ ssize_t used = bset_byte_offset(b, end) / sizeof(u64) +
+ b->whiteout_u64s;
+ ssize_t total = c->opts.btree_node_size << 6;
+
+ return total - used;
+}
+
+static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c,
+ struct btree *b)
+{
+ ssize_t remaining = __bch_btree_u64s_remaining(c, b,
+ btree_bkey_last(b, bset_tree_last(b)));
+
+ BUG_ON(remaining < 0);
+
+ if (bset_written(b, btree_bset_last(b)))
+ return 0;
+
+ return remaining;
}
static inline unsigned btree_write_set_buffer(struct btree *b)
static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
struct btree *b)
{
- struct bset *i = btree_bset_last(b);
- unsigned offset = max_t(unsigned, b->written << 9,
- bset_byte_offset(b, vstruct_end(i)));
- ssize_t n = (ssize_t) btree_bytes(c) - (ssize_t)
- (offset + sizeof(struct btree_node_entry) +
- b->whiteout_u64s * sizeof(u64) +
- b->uncompacted_whiteout_u64s * sizeof(u64));
-
- EBUG_ON(offset > btree_bytes(c));
-
- if ((unlikely(bset_written(b, i)) && n > 0) ||
- (unlikely(vstruct_bytes(i) > btree_write_set_buffer(b)) &&
- n > btree_write_set_buffer(b)))
- return (void *) b->data + offset;
+ struct bset_tree *t = bset_tree_last(b);
+ struct btree_node_entry *bne = max(write_block(b),
+ (void *) btree_bkey_last(b, bset_tree_last(b)));
+ ssize_t remaining_space =
+ __bch_btree_u64s_remaining(c, b, &bne->keys.start[0]);
+
+ if (unlikely(bset_written(b, bset(b, t)))) {
+ if (remaining_space > (ssize_t) (block_bytes(c) >> 3))
+ return bne;
+ } else {
+ if (unlikely(bset_u64s(t) * sizeof(u64) > btree_write_set_buffer(b)) &&
+ remaining_space > (ssize_t) (btree_write_set_buffer(b) >> 3))
+ return bne;
+ }
return NULL;
}
-static inline void unreserve_whiteout(struct btree *b, struct bset_tree *t,
- struct bkey_packed *k)
+static inline void push_whiteout(struct bch_fs *c, struct btree *b,
+ struct bkey_packed *k)
{
- if (bset_written(b, bset(b, t))) {
- EBUG_ON(b->uncompacted_whiteout_u64s <
- bkeyp_key_u64s(&b->format, k));
- b->uncompacted_whiteout_u64s -=
- bkeyp_key_u64s(&b->format, k);
- }
-}
+ unsigned u64s = bkeyp_key_u64s(&b->format, k);
+ struct bkey_packed *dst;
-static inline void reserve_whiteout(struct btree *b, struct bset_tree *t,
- struct bkey_packed *k)
-{
- if (bset_written(b, bset(b, t))) {
- BUG_ON(!k->needs_whiteout);
- b->uncompacted_whiteout_u64s +=
- bkeyp_key_u64s(&b->format, k);
- }
-}
+ BUG_ON(u64s > bch_btree_keys_u64s_remaining(c, b));
-static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c,
- struct btree *b)
-{
- struct bset *i = btree_bset_last(b);
- unsigned used = bset_byte_offset(b, vstruct_end(i)) / sizeof(u64) +
- b->whiteout_u64s +
- b->uncompacted_whiteout_u64s;
- unsigned total = c->opts.btree_node_size << 6;
-
- EBUG_ON(used > total);
-
- if (bset_written(b, i))
- return 0;
-
- return total - used;
+ b->whiteout_u64s += bkeyp_key_u64s(&b->format, k);
+ dst = unwritten_whiteouts_start(c, b);
+ memcpy_u64s(dst, k, u64s);
+ dst->u64s = u64s;
+ dst->type = KEY_TYPE_deleted;
}
/*
* insert into could be written out from under us)
*/
static inline bool bch2_btree_node_insert_fits(struct bch_fs *c,
- struct btree *b, unsigned u64s)
+ struct btree *b, unsigned u64s)
{
if (unlikely(btree_node_fake(b)))
return false;
- if (btree_node_is_extents(b)) {
- /* The insert key might split an existing key
- * (bch2_insert_fixup_extent() -> BCH_EXTENT_OVERLAP_MIDDLE case:
- */
- u64s += BKEY_EXTENT_U64s_MAX;
- }
-
return u64s <= bch_btree_keys_u64s_remaining(c, b);
}
-static inline bool journal_res_insert_fits(struct btree_insert *trans,
- struct btree_insert_entry *insert)
-{
- unsigned u64s = 0;
- struct btree_insert_entry *i;
-
- /*
- * If we didn't get a journal reservation, we're in journal replay and
- * we're not journalling updates:
- */
- if (!trans->journal_res.ref)
- return true;
-
- for (i = insert; i < trans->entries + trans->nr; i++)
- u64s += jset_u64s(i->k->k.u64s + i->extra_res);
+ssize_t bch2_btree_updates_print(struct bch_fs *, char *);
- return u64s <= trans->journal_res.u64s;
-}
+size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *);
#endif /* _BCACHEFS_BTREE_UPDATE_INTERIOR_H */