/* btree_node_iter_large: */
#define btree_node_iter_cmp_heap(h, _l, _r) \
- __btree_node_iter_cmp((iter)->is_extents, b, \
+ __btree_node_iter_cmp(b, \
__btree_node_offset_to_key(b, (_l).k), \
__btree_node_offset_to_key(b, (_r).k))
sort_iter_sort(iter, sort_extent_whiteouts_cmp);
while ((in = sort_iter_next(iter, sort_extent_whiteouts_cmp))) {
+ if (bkey_deleted(in))
+ continue;
+
EBUG_ON(bkeyp_val_u64s(f, in));
EBUG_ON(in->type != KEY_TYPE_DISCARD);
if (mode == COMPACT_LAZY) {
if (should_compact_bset_lazy(b, t) ||
- (compacting && bset_unwritten(b, bset(b, t))))
+ (compacting && !bset_written(b, bset(b, t))))
return dead_u64s;
} else {
if (bset_written(b, bset(b, t)))
struct bkey_packed *k, *n, *out, *start, *end;
struct btree_node_entry *src = NULL, *dst = NULL;
- if (t != b->set && bset_unwritten(b, i)) {
+ if (t != b->set && !bset_written(b, i)) {
src = container_of(i, struct btree_node_entry, keys);
dst = max(write_block(b),
(void *) btree_bkey_last(b, t -1));
continue;
if (bkey_whiteout(k)) {
- unreserve_whiteout(b, t, k);
+ unreserve_whiteout(b, k);
memcpy_u64s(u_pos, k, bkeyp_key_u64s(f, k));
set_bkeyp_val_u64s(f, u_pos, 0);
u_pos = bkey_next(u_pos);
start = btree_bkey_first(b, t);
end = btree_bkey_last(b, t);
- if (bset_unwritten(b, i) &&
+ if (!bset_written(b, i) &&
t != b->set) {
struct bset *dst =
max_t(struct bset *, write_block(b),
bch2_bset_set_no_aux_tree(dst, dst->set);
- bch2_btree_node_iter_init_from_start(&src_iter, src,
- btree_node_is_extents(src));
+ bch2_btree_node_iter_init_from_start(&src_iter, src);
if (btree_node_ops(src)->key_normalize ||
btree_node_ops(src)->key_merge)
for (unwritten_idx = 0;
unwritten_idx < b->nsets;
unwritten_idx++)
- if (bset_unwritten(b, bset(b, &b->set[unwritten_idx])))
+ if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
break;
if (b->nsets - unwritten_idx > 1) {
for_each_bset(b, t)
bch2_bset_build_aux_tree(b, t,
- bset_unwritten(b, bset(b, t)) &&
+ !bset_written(b, bset(b, t)) &&
t == bset_tree_last(b));
}
char *out = buf, *end = buf + len;
out += scnprintf(out, end - out,
- "error validating btree node %s "
+ "error validating btree node %s"
"at btree %u level %u/%u\n"
"pos %llu:%llu node offset %u",
write ? "before write " : "",
bch2_bkey_val_to_text(c, type, buf, sizeof(buf), u);
btree_err(BTREE_ERR_FIXABLE, c, b, i,
- "invalid bkey:\n%s\n%s", buf, invalid);
+ "invalid bkey:\n%s\n%s", invalid, buf);
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
memmove_u64s_down(k, bkey_next(k),
int ret, retry_read = 0, write = READ;
iter = mempool_alloc(&c->fill_iter, GFP_NOIO);
- __bch2_btree_node_iter_large_init(iter, btree_node_is_extents(b));
+ iter->used = 0;
if (bch2_meta_read_fault("btree"))
btree_err(BTREE_ERR_MUST_RETRY, c, b, NULL,
struct bkey_s_c u = bkey_disassemble(b, k, &tmp);
const char *invalid = bch2_bkey_val_invalid(c, type, u);
- if (invalid) {
+ if (invalid ||
+ (inject_invalid_keys(c) &&
+ !bversion_cmp(u.k->version, MAX_VERSION))) {
char buf[160];
bch2_bkey_val_to_text(c, type, buf, sizeof(buf), u);
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
memmove_u64s_down(k, bkey_next(k),
(u64 *) vstruct_end(i) - (u64 *) k);
+ set_btree_bset_end(b, b->set);
continue;
}
__bch2_btree_iter_init(&iter, c, b->btree_id, b->key.k.p,
BTREE_MAX_DEPTH,
- b->level, 0);
+ b->level, BTREE_ITER_NODES);
retry:
ret = bch2_btree_iter_traverse(&iter);
if (ret)
clear_btree_node_just_written(b);
/*
- * Note: immediately after write, bset_unwritten()/bset_written() don't
- * work - the amount of data we had to write after compaction might have
- * been smaller than the offset of the last bset.
+ * Note: immediately after write, bset_written() doesn't work - the
+ * amount of data we had to write after compaction might have been
+ * smaller than the offset of the last bset.
*
* However, we know that all bsets have been written here, as long as
* we're still holding the write lock: