#include "btree_cache.h"
#include "bset.h"
#include "eytzinger.h"
+#include "trace.h"
#include "util.h"
#include <asm/unaligned.h>
#include <linux/random.h>
#include <linux/prefetch.h>
-/* hack.. */
-#include "alloc_types.h"
-#include <trace/events/bcachefs.h>
-
static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *,
struct btree *);
struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
{
- unsigned offset = __btree_node_key_to_offset(b, k);
- struct bset_tree *t;
-
- for_each_bset(b, t)
- if (offset <= t->end_offset) {
- EBUG_ON(offset < btree_bkey_first_offset(t));
- return t;
- }
-
- BUG();
+ return bch2_bkey_to_bset_inlined(b, k);
}
/*
* by the time we actually do the insert will all be deleted.
*/
-void bch2_dump_bset(struct btree *b, struct bset *i, unsigned set)
+void bch2_dump_bset(struct bch_fs *c, struct btree *b,
+ struct bset *i, unsigned set)
{
struct bkey_packed *_k, *_n;
- struct bkey k, n;
- char buf[120];
+ struct bkey uk, n;
+ struct bkey_s_c k;
+ struct printbuf buf = PRINTBUF;
if (!i->u64s)
return;
- for (_k = i->start, k = bkey_unpack_key(b, _k);
+ for (_k = i->start;
_k < vstruct_last(i);
- _k = _n, k = n) {
- _n = bkey_next_skip_noops(_k, vstruct_last(i));
+ _k = _n) {
+ _n = bkey_p_next(_k);
+
+ if (!_k->u64s) {
+ printk(KERN_ERR "block %u key %5zu - u64s 0? aieee!\n", set,
+ _k->_data - i->_data);
+ break;
+ }
- bch2_bkey_to_text(&PBUF(buf), &k);
- printk(KERN_ERR "block %u key %5u: %s\n", set,
- __btree_node_key_to_offset(b, _k), buf);
+ k = bkey_disassemble(b, _k, &uk);
+
+ printbuf_reset(&buf);
+ if (c)
+ bch2_bkey_val_to_text(&buf, c, k);
+ else
+ bch2_bkey_to_text(&buf, k.k);
+ printk(KERN_ERR "block %u key %5zu: %s\n", set,
+ _k->_data - i->_data, buf.buf);
if (_n == vstruct_last(i))
continue;
n = bkey_unpack_key(b, _n);
- if (bkey_cmp(bkey_start_pos(&n), k.p) < 0) {
+ if (bpos_lt(n.p, k.k->p)) {
printk(KERN_ERR "Key skipped backwards\n");
continue;
}
- /*
- * Weird check for duplicate non extent keys: extents are
- * deleted iff they have 0 size, so if it has zero size and it's
- * not deleted these aren't extents:
- */
- if (((!k.size && !bkey_deleted(&k)) ||
- (!n.size && !bkey_deleted(&n))) &&
- !bkey_deleted(&k) &&
- !bkey_cmp(n.p, k.p))
+ if (!bkey_deleted(k.k) && bpos_eq(n.p, k.k->p))
printk(KERN_ERR "Duplicate keys\n");
}
+
+ printbuf_exit(&buf);
}
-void bch2_dump_btree_node(struct btree *b)
+void bch2_dump_btree_node(struct bch_fs *c, struct btree *b)
{
struct bset_tree *t;
console_lock();
for_each_bset(b, t)
- bch2_dump_bset(b, bset(b, t), t - b->set);
+ bch2_dump_bset(c, b, bset(b, t), t - b->set);
console_unlock();
}
struct btree_node_iter *iter)
{
struct btree_node_iter_set *set;
+ struct printbuf buf = PRINTBUF;
printk(KERN_ERR "btree node iter with %u/%u sets:\n",
__btree_node_iter_used(iter), b->nsets);
struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
struct bset_tree *t = bch2_bkey_to_bset(b, k);
struct bkey uk = bkey_unpack_key(b, k);
- char buf[100];
- bch2_bkey_to_text(&PBUF(buf), &uk);
+ printbuf_reset(&buf);
+ bch2_bkey_to_text(&buf, &uk);
printk(KERN_ERR "set %zu key %u: %s\n",
- t - b->set, set->k, buf);
+ t - b->set, set->k, buf.buf);
}
+
+ printbuf_exit(&buf);
}
#ifdef CONFIG_BCACHEFS_DEBUG
for_each_bset(b, t)
bset_tree_for_each_key(b, t, k)
- if (!bkey_whiteout(k))
+ if (!bkey_deleted(k))
btree_keys_account_key_add(&nr, t - b->set, k);
BUG_ON(memcmp(&nr, &b->nr, sizeof(nr)));
struct btree_node_iter_set *set;
struct bkey ku = bkey_unpack_key(b, k);
struct bkey nu = bkey_unpack_key(b, n);
- char buf1[80], buf2[80];
+ struct printbuf buf1 = PRINTBUF;
+ struct printbuf buf2 = PRINTBUF;
- bch2_dump_btree_node(b);
- bch2_bkey_to_text(&PBUF(buf1), &ku);
- bch2_bkey_to_text(&PBUF(buf2), &nu);
+ bch2_dump_btree_node(NULL, b);
+ bch2_bkey_to_text(&buf1, &ku);
+ bch2_bkey_to_text(&buf2, &nu);
printk(KERN_ERR "out of order/overlapping:\n%s\n%s\n",
- buf1, buf2);
+ buf1.buf, buf2.buf);
printk(KERN_ERR "iter was:");
btree_node_iter_for_each(_iter, set) {
- struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
- struct bset_tree *t = bch2_bkey_to_bset(b, k);
+ struct bkey_packed *k2 = __btree_node_offset_to_key(b, set->k);
+ struct bset_tree *t = bch2_bkey_to_bset(b, k2);
printk(" [%zi %zi]", t - b->set,
- k->_data - bset(b, t)->_data);
+ k2->_data - bset(b, t)->_data);
}
panic("\n");
}
return;
/* Verify no duplicates: */
- btree_node_iter_for_each(iter, set)
+ btree_node_iter_for_each(iter, set) {
+ BUG_ON(set->k > set->end);
btree_node_iter_for_each(iter, s2)
BUG_ON(set != s2 && set->end == s2->end);
+ }
/* Verify that set->end is correct: */
btree_node_iter_for_each(iter, set) {
{
struct bset_tree *t = bch2_bkey_to_bset(b, where);
struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where);
- struct bkey_packed *next = (void *) (where->_data + clobber_u64s);
+ struct bkey_packed *next = (void *) ((u64 *) where->_data + clobber_u64s);
+ struct printbuf buf1 = PRINTBUF;
+ struct printbuf buf2 = PRINTBUF;
#if 0
BUG_ON(prev &&
bkey_iter_cmp(b, prev, insert) > 0);
bkey_iter_cmp(b, prev, insert) > 0) {
struct bkey k1 = bkey_unpack_key(b, prev);
struct bkey k2 = bkey_unpack_key(b, insert);
- char buf1[100];
- char buf2[100];
- bch2_dump_btree_node(b);
- bch2_bkey_to_text(&PBUF(buf1), &k1);
- bch2_bkey_to_text(&PBUF(buf2), &k2);
+ bch2_dump_btree_node(NULL, b);
+ bch2_bkey_to_text(&buf1, &k1);
+ bch2_bkey_to_text(&buf2, &k2);
panic("prev > insert:\n"
"prev key %s\n"
"insert key %s\n",
- buf1, buf2);
+ buf1.buf, buf2.buf);
}
#endif
#if 0
bkey_iter_cmp(b, insert, next) > 0) {
struct bkey k1 = bkey_unpack_key(b, insert);
struct bkey k2 = bkey_unpack_key(b, next);
- char buf1[100];
- char buf2[100];
- bch2_dump_btree_node(b);
- bch2_bkey_to_text(&PBUF(buf1), &k1);
- bch2_bkey_to_text(&PBUF(buf2), &k2);
+ bch2_dump_btree_node(NULL, b);
+ bch2_bkey_to_text(&buf1, &k1);
+ bch2_bkey_to_text(&buf2, &k2);
panic("insert > next:\n"
"insert key %s\n"
"next key %s\n",
- buf1, buf2);
+ buf1.buf, buf2.buf);
}
#endif
}
}
struct ro_aux_tree {
- struct bkey_float f[0];
+ u8 nothing[0];
+ struct bkey_float f[];
};
struct rw_aux_tree {
struct bpos k;
};
-/*
- * BSET_CACHELINE was originally intended to match the hardware cacheline size -
- * it used to be 64, but I realized the lookup code would touch slightly less
- * memory if it was 128.
- *
- * It definites the number of bytes (in struct bset) per struct bkey_float in
- * the auxiliar search tree - when we're done searching the bset_float tree we
- * have this many bytes left that we do a linear search over.
- *
- * Since (after level 5) every level of the bset_tree is on a new cacheline,
- * we're touching one fewer cacheline in the bset tree in exchange for one more
- * cacheline in the linear search - but the linear search might stop before it
- * gets to the second cacheline.
- */
-
-#define BSET_CACHELINE 128
-
-/* Space required for the btree node keys */
-static inline size_t btree_keys_bytes(struct btree *b)
-{
- return PAGE_SIZE << b->page_order;
-}
-
-static inline size_t btree_keys_cachelines(struct btree *b)
-{
- return btree_keys_bytes(b) / BSET_CACHELINE;
-}
-
-static inline size_t btree_aux_data_bytes(struct btree *b)
-{
- return btree_keys_cachelines(b) * 8;
-}
-
-static inline size_t btree_aux_data_u64s(struct btree *b)
-{
- return btree_aux_data_bytes(b) / sizeof(u64);
-}
-
static unsigned bset_aux_tree_buf_end(const struct bset_tree *t)
{
BUG_ON(t->aux_data_offset == U16_MAX);
return ro_aux_tree_base(b, t)->f + idx;
}
-static void bset_aux_tree_verify(struct btree *b)
+static void bset_aux_tree_verify(const struct btree *b)
{
#ifdef CONFIG_BCACHEFS_DEBUG
- struct bset_tree *t;
+ const struct bset_tree *t;
for_each_bset(b, t) {
if (t->aux_data_offset == U16_MAX)
#endif
}
-/* Memory allocation */
-
-void bch2_btree_keys_free(struct btree *b)
-{
- vfree(b->aux_data);
- b->aux_data = NULL;
-}
-
-#ifndef PAGE_KERNEL_EXEC
-# define PAGE_KERNEL_EXEC PAGE_KERNEL
-#endif
-
-int bch2_btree_keys_alloc(struct btree *b, unsigned page_order, gfp_t gfp)
-{
- b->page_order = page_order;
- b->aux_data = __vmalloc(btree_aux_data_bytes(b), gfp,
- PAGE_KERNEL_EXEC);
- if (!b->aux_data)
- return -ENOMEM;
-
- return 0;
-}
-
-void bch2_btree_keys_init(struct btree *b, bool *expensive_debug_checks)
+void bch2_btree_keys_init(struct btree *b)
{
unsigned i;
b->nsets = 0;
memset(&b->nr, 0, sizeof(b->nr));
-#ifdef CONFIG_BCACHEFS_DEBUG
- b->expensive_debug_checks = expensive_debug_checks;
-#endif
+
for (i = 0; i < MAX_BSETS; i++)
b->set[i].data_offset = U16_MAX;
unsigned j)
{
return cacheline_to_bkey(b, t,
- __eytzinger1_to_inorder(j, t->size, t->extra),
+ __eytzinger1_to_inorder(j, t->size - 1, t->extra),
bkey_float(b, t, j)->key_offset);
}
{
unsigned prev_u64s = ro_aux_tree_prev(b, t)[j];
- return (void *) (tree_to_bkey(b, t, j)->_data - prev_u64s);
+ return (void *) ((u64 *) tree_to_bkey(b, t, j)->_data - prev_u64s);
}
static struct rw_aux_tree *rw_aux_tree(const struct btree *b,
struct bkey_packed *k = btree_bkey_first(b, t);
unsigned j = 0;
- if (!btree_keys_expensive_checks(b))
+ if (!bch2_expensive_debug_checks)
return;
BUG_ON(bset_has_ro_aux_tree(t));
goto start;
while (1) {
if (rw_aux_to_bkey(b, t, j) == k) {
- BUG_ON(bkey_cmp(rw_aux_tree(b, t)[j].k,
+ BUG_ON(!bpos_eq(rw_aux_tree(b, t)[j].k,
bkey_unpack_pos(b, k)));
start:
if (++j == t->size)
rw_aux_tree(b, t)[j - 1].offset);
}
- k = bkey_next_skip_noops(k, btree_bkey_last(b, t));
+ k = bkey_p_next(k);
BUG_ON(k >= btree_bkey_last(b, t));
}
}
return (u16) v;
}
-static void make_bfloat(struct btree *b, struct bset_tree *t,
- unsigned j,
- struct bkey_packed *min_key,
- struct bkey_packed *max_key)
+static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
+ unsigned j,
+ struct bkey_packed *min_key,
+ struct bkey_packed *max_key)
{
struct bkey_float *f = bkey_float(b, t, j);
struct bkey_packed *m = tree_to_bkey(b, t, j);
- struct bkey_packed *l, *r;
+ struct bkey_packed *l = is_power_of_2(j)
+ ? min_key
+ : tree_to_prev_bkey(b, t, j >> ffs(j));
+ struct bkey_packed *r = is_power_of_2(j + 1)
+ ? max_key
+ : tree_to_bkey(b, t, j >> (ffz(j) + 1));
unsigned mantissa;
int shift, exponent, high_bit;
- if (is_power_of_2(j)) {
- l = min_key;
-
- if (!l->u64s) {
- if (!bkey_pack_pos(l, b->data->min_key, b)) {
- struct bkey_i tmp;
-
- bkey_init(&tmp.k);
- tmp.k.p = b->data->min_key;
- bkey_copy(l, &tmp);
- }
- }
- } else {
- l = tree_to_prev_bkey(b, t, j >> ffs(j));
-
- EBUG_ON(m < l);
- }
-
- if (is_power_of_2(j + 1)) {
- r = max_key;
-
- if (!r->u64s) {
- if (!bkey_pack_pos(r, t->max_key, b)) {
- struct bkey_i tmp;
-
- bkey_init(&tmp.k);
- tmp.k.p = t->max_key;
- bkey_copy(r, &tmp);
- }
- }
- } else {
- r = tree_to_bkey(b, t, j >> (ffz(j) + 1));
-
- EBUG_ON(m > r);
- }
-
/*
* for failed bfloats, the lookup code falls back to comparing against
* the original key.
}
/* bytes remaining - only valid for last bset: */
-static unsigned __bset_tree_capacity(struct btree *b, struct bset_tree *t)
+static unsigned __bset_tree_capacity(const struct btree *b, const struct bset_tree *t)
{
bset_aux_tree_verify(b);
return btree_aux_data_bytes(b) - t->aux_data_offset * sizeof(u64);
}
-static unsigned bset_ro_tree_capacity(struct btree *b, struct bset_tree *t)
+static unsigned bset_ro_tree_capacity(const struct btree *b, const struct bset_tree *t)
{
return __bset_tree_capacity(b, t) /
(sizeof(struct bkey_float) + sizeof(u8));
}
-static unsigned bset_rw_tree_capacity(struct btree *b, struct bset_tree *t)
+static unsigned bset_rw_tree_capacity(const struct btree *b, const struct bset_tree *t)
{
return __bset_tree_capacity(b, t) / sizeof(struct rw_aux_tree);
}
-static void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
+static noinline void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
{
struct bkey_packed *k;
}
}
-static void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
+static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
{
struct bkey_packed *prev = NULL, *k = btree_bkey_first(b, t);
- struct bkey_packed min_key, max_key;
+ struct bkey_i min_key, max_key;
unsigned j, cacheline = 1;
- /* signal to make_bfloat() that they're uninitialized: */
- min_key.u64s = max_key.u64s = 0;
-
t->size = min(bkey_to_cacheline(b, t, btree_bkey_last(b, t)),
bset_ro_tree_capacity(b, t));
retry:
t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
/* First we figure out where the first key in each cacheline is */
- eytzinger1_for_each(j, t->size) {
+ eytzinger1_for_each(j, t->size - 1) {
while (bkey_to_cacheline(b, t, k) < cacheline)
- prev = k, k = bkey_next_skip_noops(k, btree_bkey_last(b, t));
+ prev = k, k = bkey_p_next(k);
if (k >= btree_bkey_last(b, t)) {
/* XXX: this path sucks */
}
while (k != btree_bkey_last(b, t))
- prev = k, k = bkey_next_skip_noops(k, btree_bkey_last(b, t));
+ prev = k, k = bkey_p_next(k);
+
+ if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) {
+ bkey_init(&min_key.k);
+ min_key.k.p = b->data->min_key;
+ }
- t->max_key = bkey_unpack_pos(b, prev);
+ if (!bkey_pack_pos(bkey_to_packed(&max_key), b->data->max_key, b)) {
+ bkey_init(&max_key.k);
+ max_key.k.p = b->data->max_key;
+ }
/* Then we build the tree */
- eytzinger1_for_each(j, t->size)
- make_bfloat(b, t, j, &min_key, &max_key);
+ eytzinger1_for_each(j, t->size - 1)
+ make_bfloat(b, t, j,
+ bkey_to_packed(&min_key),
+ bkey_to_packed(&max_key));
}
static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
do {
p = j ? tree_to_bkey(b, t,
__inorder_to_eytzinger1(j--,
- t->size, t->extra))
+ t->size - 1, t->extra))
: btree_bkey_first(b, t);
} while (p >= k);
break;
struct bkey_packed *p, *i, *ret = NULL, *orig_k = k;
while ((p = __bkey_prev(b, t, k)) && !ret) {
- for (i = p; i != k; i = bkey_next_skip_noops(i, k))
+ for (i = p; i != k; i = bkey_p_next(i))
if (i->type >= min_key_type)
ret = i;
k = p;
}
- if (btree_keys_expensive_checks(b)) {
+ if (bch2_expensive_debug_checks) {
BUG_ON(ret >= orig_k);
for (i = ret
- ? bkey_next_skip_noops(ret, orig_k)
+ ? bkey_p_next(ret)
: btree_bkey_first(b, t);
i != orig_k;
- i = bkey_next_skip_noops(i, orig_k))
+ i = bkey_p_next(i))
BUG_ON(i->type >= min_key_type);
}
/* Insert */
-static void rw_aux_tree_fix_invalidated_key(struct btree *b,
- struct bset_tree *t,
- struct bkey_packed *k)
-{
- unsigned offset = __btree_node_key_to_offset(b, k);
- unsigned j = rw_aux_tree_bsearch(b, t, offset);
-
- if (j < t->size &&
- rw_aux_tree(b, t)[j].offset == offset)
- rw_aux_tree_set(b, t, j, k);
-
- bch2_bset_verify_rw_aux_tree(b, t);
-}
-
-static void ro_aux_tree_fix_invalidated_key(struct btree *b,
- struct bset_tree *t,
- struct bkey_packed *k)
-{
- struct bkey_packed min_key, max_key;
- unsigned inorder, j;
-
- EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
-
- /* signal to make_bfloat() that they're uninitialized: */
- min_key.u64s = max_key.u64s = 0;
-
- if (bkey_next_skip_noops(k, btree_bkey_last(b, t)) == btree_bkey_last(b, t)) {
- t->max_key = bkey_unpack_pos(b, k);
-
- for (j = 1; j < t->size; j = j * 2 + 1)
- make_bfloat(b, t, j, &min_key, &max_key);
- }
-
- inorder = bkey_to_cacheline(b, t, k);
-
- if (inorder &&
- inorder < t->size) {
- j = __inorder_to_eytzinger1(inorder, t->size, t->extra);
-
- if (k == tree_to_bkey(b, t, j)) {
- /* Fix the node this key corresponds to */
- make_bfloat(b, t, j, &min_key, &max_key);
-
- /* Children for which this key is the right boundary */
- for (j = eytzinger1_left_child(j);
- j < t->size;
- j = eytzinger1_right_child(j))
- make_bfloat(b, t, j, &min_key, &max_key);
- }
- }
-
- if (inorder + 1 < t->size) {
- j = __inorder_to_eytzinger1(inorder + 1, t->size, t->extra);
-
- if (k == tree_to_prev_bkey(b, t, j)) {
- make_bfloat(b, t, j, &min_key, &max_key);
-
- /* Children for which this key is the left boundary */
- for (j = eytzinger1_right_child(j);
- j < t->size;
- j = eytzinger1_left_child(j))
- make_bfloat(b, t, j, &min_key, &max_key);
- }
- }
-}
-
-/**
- * bch2_bset_fix_invalidated_key() - given an existing key @k that has been
- * modified, fix any auxiliary search tree by remaking all the nodes in the
- * auxiliary search tree that @k corresponds to
- */
-void bch2_bset_fix_invalidated_key(struct btree *b, struct bkey_packed *k)
-{
- struct bset_tree *t = bch2_bkey_to_bset(b, k);
-
- switch (bset_aux_tree_type(t)) {
- case BSET_NO_AUX_TREE:
- break;
- case BSET_RO_AUX_TREE:
- ro_aux_tree_fix_invalidated_key(b, t, k);
- break;
- case BSET_RW_AUX_TREE:
- rw_aux_tree_fix_invalidated_key(b, t, k);
- break;
- }
-}
-
static void bch2_bset_fix_lookup_table(struct btree *b,
struct bset_tree *t,
struct bkey_packed *_where,
t->size -= j - l;
for (j = l; j < t->size; j++)
- rw_aux_tree(b, t)[j].offset += shift;
+ rw_aux_tree(b, t)[j].offset += shift;
EBUG_ON(l < t->size &&
rw_aux_tree(b, t)[l].offset ==
struct bkey_packed *k = start;
while (1) {
- k = bkey_next_skip_noops(k, end);
+ k = bkey_p_next(k);
if (k == end)
break;
if (bch2_bkey_pack_key(&packed, &insert->k, f))
src = &packed;
- if (!bkey_whiteout(&insert->k))
+ if (!bkey_deleted(&insert->k))
btree_keys_account_key_add(&b->nr, t - b->set, src);
if (src->u64s != clobber_u64s) {
- u64 *src_p = where->_data + clobber_u64s;
- u64 *dst_p = where->_data + src->u64s;
+ u64 *src_p = (u64 *) where->_data + clobber_u64s;
+ u64 *dst_p = (u64 *) where->_data + src->u64s;
EBUG_ON((int) le16_to_cpu(bset(b, t)->u64s) <
(int) clobber_u64s - src->u64s);
set_btree_bset_end(b, t);
}
- memcpy_u64s(where, src,
+ memcpy_u64s_small(where, src,
bkeyp_key_u64s(f, src));
memcpy_u64s(bkeyp_val(f, where), &insert->v,
bkeyp_val_u64s(f, src));
- bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s);
+ if (src->u64s != clobber_u64s)
+ bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s);
bch2_verify_btree_nr_keys(b);
}
unsigned clobber_u64s)
{
struct bset_tree *t = bset_tree_last(b);
- u64 *src_p = where->_data + clobber_u64s;
+ u64 *src_p = (u64 *) where->_data + clobber_u64s;
u64 *dst_p = where->_data;
bch2_bset_verify_rw_aux_tree(b, t);
__flatten
static struct bkey_packed *bset_search_write_set(const struct btree *b,
struct bset_tree *t,
- struct bpos *search,
- const struct bkey_packed *packed_search)
+ struct bpos *search)
{
unsigned l = 0, r = t->size;
while (l + 1 != r) {
unsigned m = (l + r) >> 1;
- if (bkey_cmp(rw_aux_tree(b, t)[m].k, *search) < 0)
+ if (bpos_lt(rw_aux_tree(b, t)[m].k, *search))
l = m;
else
r = m;
static inline void prefetch_four_cachelines(void *p)
{
#ifdef CONFIG_X86_64
- asm(".intel_syntax noprefix;"
- "prefetcht0 [%0 - 127 + 64 * 0];"
- "prefetcht0 [%0 - 127 + 64 * 1];"
- "prefetcht0 [%0 - 127 + 64 * 2];"
- "prefetcht0 [%0 - 127 + 64 * 3];"
- ".att_syntax prefix;"
+ asm("prefetcht0 (-127 + 64 * 0)(%0);"
+ "prefetcht0 (-127 + 64 * 1)(%0);"
+ "prefetcht0 (-127 + 64 * 2)(%0);"
+ "prefetcht0 (-127 + 64 * 3)(%0);"
:
: "r" (p + 127));
#else
__flatten
static struct bkey_packed *bset_search_tree(const struct btree *b,
- struct bset_tree *t,
- struct bpos *search,
+ const struct bset_tree *t,
+ const struct bpos *search,
const struct bkey_packed *packed_search)
{
struct ro_aux_tree *base = ro_aux_tree_base(b, t);
prefetch(&base->f[n << 4]);
f = &base->f[n];
-
- if (!unlikely(packed_search))
- goto slowpath;
if (unlikely(f->exponent >= BFLOAT_FAILED))
goto slowpath;
n = n * 2 + (cmp < 0);
} while (n < t->size);
- inorder = __eytzinger1_to_inorder(n >> 1, t->size, t->extra);
+ inorder = __eytzinger1_to_inorder(n >> 1, t->size - 1, t->extra);
/*
* n would have been the node we recursed to - the low bit tells us if
if (unlikely(!inorder))
return btree_bkey_first(b, t);
- f = &base->f[eytzinger1_prev(n >> 1, t->size)];
+ f = &base->f[eytzinger1_prev(n >> 1, t->size - 1)];
}
return cacheline_to_bkey(b, t, inorder, f->key_offset);
case BSET_NO_AUX_TREE:
return btree_bkey_first(b, t);
case BSET_RW_AUX_TREE:
- return bset_search_write_set(b, t, search, lossy_packed_search);
+ return bset_search_write_set(b, t, search);
case BSET_RO_AUX_TREE:
- /*
- * Each node in the auxiliary search tree covers a certain range
- * of bits, and keys above and below the set it covers might
- * differ outside those bits - so we have to special case the
- * start and end - handle that here:
- */
-
- if (bkey_cmp(*search, t->max_key) > 0)
- return btree_bkey_last(b, t);
-
return bset_search_tree(b, t, search, lossy_packed_search);
default:
- unreachable();
+ BUG();
}
}
{
if (lossy_packed_search)
while (m != btree_bkey_last(b, t) &&
- bkey_iter_cmp_p_or_unp(b, search, lossy_packed_search,
- m) > 0)
- m = bkey_next_skip_noops(m, btree_bkey_last(b, t));
+ bkey_iter_cmp_p_or_unp(b, m,
+ lossy_packed_search, search) < 0)
+ m = bkey_p_next(m);
if (!packed_search)
while (m != btree_bkey_last(b, t) &&
- bkey_iter_pos_cmp(b, search, m) > 0)
- m = bkey_next_skip_noops(m, btree_bkey_last(b, t));
+ bkey_iter_pos_cmp(b, m, search) < 0)
+ m = bkey_p_next(m);
- if (btree_keys_expensive_checks(b)) {
+ if (bch2_expensive_debug_checks) {
struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
BUG_ON(prev &&
- bkey_iter_cmp_p_or_unp(b, search, packed_search,
- prev) <= 0);
+ bkey_iter_cmp_p_or_unp(b, prev,
+ packed_search, search) >= 0);
}
return m;
}
-/*
- * Returns the first key greater than or equal to @search
- */
-static __always_inline __flatten
-struct bkey_packed *bch2_bset_search(struct btree *b,
- struct bset_tree *t,
- struct bpos *search,
- struct bkey_packed *packed_search,
- const struct bkey_packed *lossy_packed_search)
-{
- struct bkey_packed *m = __bch2_bset_search(b, t, search,
- lossy_packed_search);
-
- return bch2_bset_search_linear(b, t, search,
- packed_search, lossy_packed_search, m);
-}
-
/* Btree node iterator */
static inline void __bch2_btree_node_iter_push(struct btree_node_iter *iter,
bch2_btree_node_iter_sort(iter, b);
}
-noinline __flatten __attribute__((cold))
+noinline __flatten __cold
static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
struct btree *b, struct bpos *search)
{
- struct bset_tree *t;
+ struct bkey_packed *k;
trace_bkey_pack_pos_fail(search);
- for_each_bset(b, t)
- __bch2_btree_node_iter_push(iter, b,
- bch2_bset_search(b, t, search, NULL, NULL),
- btree_bkey_last(b, t));
+ bch2_btree_node_iter_init_from_start(iter, b);
- bch2_btree_node_iter_sort(iter, b);
+ while ((k = bch2_btree_node_iter_peek(iter, b)) &&
+ bkey_iter_pos_cmp(b, k, search) < 0)
+ bch2_btree_node_iter_advance(iter, b);
}
/**
- * bch_btree_node_iter_init - initialize a btree node iterator, starting from a
+ * bch2_btree_node_iter_init - initialize a btree node iterator, starting from a
* given position
*
+ * @iter: iterator to initialize
+ * @b: btree node to search
+ * @search: search key
+ *
* Main entry point to the lookup code for individual btree nodes:
*
* NOTE:
* to the search key is going to have 0 sectors after the search key.
*
* But this does mean that we can't just search for
- * bkey_successor(start_of_range) to get the first extent that overlaps with
+ * bpos_successor(start_of_range) to get the first extent that overlaps with
* the range we want - if we're unlucky and there's an extent that ends
* exactly where we searched, then there could be a deleted key at the same
* position and we'd get that when we search instead of the preceding extent
struct bkey_packed *k[MAX_BSETS];
unsigned i;
- EBUG_ON(bkey_cmp(*search, b->data->min_key) < 0);
+ EBUG_ON(bpos_lt(*search, b->data->min_key));
+ EBUG_ON(bpos_gt(*search, b->data->max_key));
bset_aux_tree_verify(b);
memset(iter, 0, sizeof(*iter));
EBUG_ON(iter->data->k > iter->data->end);
- while (!__btree_node_iter_set_end(iter, 0) &&
- !__bch2_btree_node_iter_peek_all(iter, b)->u64s)
- iter->data->k++;
-
if (unlikely(__btree_node_iter_set_end(iter, 0))) {
- bch2_btree_node_iter_set_drop(iter, iter->data);
+ /* avoid an expensive memmove call: */
+ iter->data[0] = iter->data[1];
+ iter->data[1] = iter->data[2];
+ iter->data[2] = (struct btree_node_iter_set) { 0, 0 };
return;
}
void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
struct btree *b)
{
- if (btree_keys_expensive_checks(b)) {
+ if (bch2_expensive_debug_checks) {
bch2_btree_node_iter_verify(iter, b);
bch2_btree_node_iter_next_check(iter, b);
}
struct bset_tree *t;
unsigned end = 0;
- bch2_btree_node_iter_verify(iter, b);
+ if (bch2_expensive_debug_checks)
+ bch2_btree_node_iter_verify(iter, b);
for_each_bset(b, t) {
k = bch2_bkey_prev_all(b, t,
iter->data[0].k = __btree_node_key_to_offset(b, prev);
iter->data[0].end = end;
- bch2_btree_node_iter_verify(iter, b);
+ if (bch2_expensive_debug_checks)
+ bch2_btree_node_iter_verify(iter, b);
return prev;
}
-struct bkey_packed *bch2_btree_node_iter_prev_filter(struct btree_node_iter *iter,
- struct btree *b,
- unsigned min_key_type)
+struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *iter,
+ struct btree *b)
{
struct bkey_packed *prev;
do {
prev = bch2_btree_node_iter_prev_all(iter, b);
- } while (prev && prev->type < min_key_type);
+ } while (prev && bkey_deleted(prev));
return prev;
}
/* Mergesort */
-void bch2_btree_keys_stats(struct btree *b, struct bset_stats *stats)
+void bch2_btree_keys_stats(const struct btree *b, struct bset_stats *stats)
{
- struct bset_tree *t;
+ const struct bset_tree *t;
for_each_bset(b, t) {
enum bset_aux_tree_type type = bset_aux_tree_type(t);
struct bkey uk;
unsigned j, inorder;
- if (out->pos != out->end)
- *out->pos = '\0';
-
if (!bset_has_ro_aux_tree(t))
return;
if (!inorder || inorder >= t->size)
return;
- j = __inorder_to_eytzinger1(inorder, t->size, t->extra);
+ j = __inorder_to_eytzinger1(inorder, t->size - 1, t->extra);
if (k != tree_to_bkey(b, t, j))
return;
switch (bkey_float(b, t, j)->exponent) {
case BFLOAT_FAILED:
uk = bkey_unpack_key(b, k);
- pr_buf(out,
+ prt_printf(out,
" failed unpacked at depth %u\n"
- "\t%llu:%llu\n",
- ilog2(j),
- uk.p.inode, uk.p.offset);
+ "\t",
+ ilog2(j));
+ bch2_bpos_to_text(out, uk.p);
+ prt_printf(out, "\n");
break;
}
}