+// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "bkey_methods.h"
+#include "bkey_sort.h"
#include "btree_cache.h"
#include "btree_io.h"
#include "btree_iter.h"
#include "journal_seq_blacklist.h"
#include "super-io.h"
+#include <linux/sched/mm.h>
#include <trace/events/bcachefs.h>
-/* btree_node_iter_large: */
-
-#define btree_node_iter_cmp_heap(h, _l, _r) btree_node_iter_cmp(b, _l, _r)
-
-void bch2_btree_node_iter_large_push(struct btree_node_iter_large *iter,
- struct btree *b,
- const struct bkey_packed *k,
- const struct bkey_packed *end)
-{
- if (k != end) {
- struct btree_node_iter_set n =
- ((struct btree_node_iter_set) {
- __btree_node_key_to_offset(b, k),
- __btree_node_key_to_offset(b, end)
- });
-
- __heap_add(iter, n, btree_node_iter_cmp_heap, NULL);
- }
-}
-
-void bch2_btree_node_iter_large_advance(struct btree_node_iter_large *iter,
- struct btree *b)
-{
- iter->data->k += __btree_node_offset_to_key(b, iter->data->k)->u64s;
-
- EBUG_ON(!iter->used);
- EBUG_ON(iter->data->k > iter->data->end);
-
- if (iter->data->k == iter->data->end)
- heap_del(iter, 0, btree_node_iter_cmp_heap, NULL);
- else
- heap_sift_down(iter, 0, btree_node_iter_cmp_heap, NULL);
-}
-
static void verify_no_dups(struct btree *b,
struct bkey_packed *start,
- struct bkey_packed *end)
+ struct bkey_packed *end,
+ bool extents)
{
#ifdef CONFIG_BCACHEFS_DEBUG
- struct bkey_packed *k;
+ struct bkey_packed *k, *p;
- for (k = start; k != end && bkey_next(k) != end; k = bkey_next(k)) {
- struct bkey l = bkey_unpack_key(b, k);
- struct bkey r = bkey_unpack_key(b, bkey_next(k));
+ if (start == end)
+ return;
- BUG_ON(btree_node_is_extents(b)
+ for (p = start, k = bkey_next_skip_noops(start, end);
+ k != end;
+ p = k, k = bkey_next_skip_noops(k, end)) {
+ struct bkey l = bkey_unpack_key(b, p);
+ struct bkey r = bkey_unpack_key(b, k);
+
+ BUG_ON(extents
? bkey_cmp(l.p, bkey_start_pos(&r)) > 0
: bkey_cmp(l.p, bkey_start_pos(&r)) >= 0);
- //BUG_ON(bkey_cmp_packed(&b->format, k, bkey_next(k)) >= 0);
+ //BUG_ON(bkey_cmp_packed(&b->format, p, k) >= 0);
}
#endif
}
-static void clear_needs_whiteout(struct bset *i)
-{
- struct bkey_packed *k;
-
- for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
- k->needs_whiteout = false;
-}
-
-static void set_needs_whiteout(struct bset *i)
+static void set_needs_whiteout(struct bset *i, int v)
{
struct bkey_packed *k;
- for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
- k->needs_whiteout = true;
+ for (k = i->start;
+ k != vstruct_last(i);
+ k = bkey_next_skip_noops(k, vstruct_last(i)))
+ k->needs_whiteout = v;
}
static void btree_bounce_free(struct bch_fs *c, unsigned order,
static void *btree_bounce_alloc(struct bch_fs *c, unsigned order,
bool *used_mempool)
{
+ unsigned flags = memalloc_nofs_save();
void *p;
BUG_ON(order > btree_page_order(c));
*used_mempool = false;
p = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, order);
- if (p)
- return p;
-
- *used_mempool = true;
- return mempool_alloc(&c->btree_bounce_pool, GFP_NOIO);
-}
-
-typedef int (*sort_cmp_fn)(struct btree *,
- struct bkey_packed *,
- struct bkey_packed *);
-
-struct sort_iter {
- struct btree *b;
- unsigned used;
-
- struct sort_iter_set {
- struct bkey_packed *k, *end;
- } data[MAX_BSETS + 1];
-};
-
-static void sort_iter_init(struct sort_iter *iter, struct btree *b)
-{
- memset(iter, 0, sizeof(*iter));
- iter->b = b;
-}
-
-static inline void __sort_iter_sift(struct sort_iter *iter,
- unsigned from,
- sort_cmp_fn cmp)
-{
- unsigned i;
-
- for (i = from;
- i + 1 < iter->used &&
- cmp(iter->b, iter->data[i].k, iter->data[i + 1].k) > 0;
- i++)
- swap(iter->data[i], iter->data[i + 1]);
-}
-
-static inline void sort_iter_sift(struct sort_iter *iter, sort_cmp_fn cmp)
-{
-
- __sort_iter_sift(iter, 0, cmp);
-}
-
-static inline void sort_iter_sort(struct sort_iter *iter, sort_cmp_fn cmp)
-{
- unsigned i = iter->used;
-
- while (i--)
- __sort_iter_sift(iter, i, cmp);
-}
-
-static void sort_iter_add(struct sort_iter *iter,
- struct bkey_packed *k,
- struct bkey_packed *end)
-{
- BUG_ON(iter->used >= ARRAY_SIZE(iter->data));
-
- if (k != end)
- iter->data[iter->used++] = (struct sort_iter_set) { k, end };
-}
-
-static inline struct bkey_packed *sort_iter_peek(struct sort_iter *iter)
-{
- return iter->used ? iter->data->k : NULL;
-}
-
-static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
-{
- iter->data->k = bkey_next(iter->data->k);
-
- BUG_ON(iter->data->k > iter->data->end);
-
- if (iter->data->k == iter->data->end)
- array_remove_item(iter->data, iter->used, 0);
- else
- sort_iter_sift(iter, cmp);
+ if (!p) {
+ *used_mempool = true;
+ p = mempool_alloc(&c->btree_bounce_pool, GFP_NOIO);
+ }
+ memalloc_nofs_restore(flags);
+ return p;
}
-static inline struct bkey_packed *sort_iter_next(struct sort_iter *iter,
- sort_cmp_fn cmp)
+static void sort_bkey_ptrs(const struct btree *bt,
+ struct bkey_packed **ptrs, unsigned nr)
{
- struct bkey_packed *ret = sort_iter_peek(iter);
+ unsigned n = nr, a = nr / 2, b, c, d;
- if (ret)
- sort_iter_advance(iter, cmp);
-
- return ret;
-}
-
-static inline int sort_key_whiteouts_cmp(struct btree *b,
- struct bkey_packed *l,
- struct bkey_packed *r)
-{
- return bkey_cmp_packed(b, l, r);
-}
-
-static unsigned sort_key_whiteouts(struct bkey_packed *dst,
- struct sort_iter *iter)
-{
- struct bkey_packed *in, *out = dst;
+ if (!a)
+ return;
- sort_iter_sort(iter, sort_key_whiteouts_cmp);
+ /* Heap sort: see lib/sort.c: */
+ while (1) {
+ if (a)
+ a--;
+ else if (--n)
+ swap(ptrs[0], ptrs[n]);
+ else
+ break;
- while ((in = sort_iter_next(iter, sort_key_whiteouts_cmp))) {
- bkey_copy(out, in);
- out = bkey_next(out);
+ for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
+ b = bkey_cmp_packed(bt,
+ ptrs[c],
+ ptrs[d]) >= 0 ? c : d;
+ if (d == n)
+ b = c;
+
+ while (b != a &&
+ bkey_cmp_packed(bt,
+ ptrs[a],
+ ptrs[b]) >= 0)
+ b = (b - 1) / 2;
+ c = b;
+ while (b != a) {
+ b = (b - 1) / 2;
+ swap(ptrs[b], ptrs[c]);
+ }
}
-
- return (u64 *) out - (u64 *) dst;
}
-static inline int sort_extent_whiteouts_cmp(struct btree *b,
- struct bkey_packed *l,
- struct bkey_packed *r)
+static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
{
- struct bkey ul = bkey_unpack_key(b, l);
- struct bkey ur = bkey_unpack_key(b, r);
-
- return bkey_cmp(bkey_start_pos(&ul), bkey_start_pos(&ur));
-}
-
-static unsigned sort_extent_whiteouts(struct bkey_packed *dst,
- struct sort_iter *iter)
-{
- const struct bkey_format *f = &iter->b->format;
- struct bkey_packed *in, *out = dst;
- struct bkey_i l, r;
- bool prev = false, l_packed = false;
- u64 max_packed_size = bkey_field_max(f, BKEY_FIELD_SIZE);
- u64 max_packed_offset = bkey_field_max(f, BKEY_FIELD_OFFSET);
- u64 new_size;
-
- max_packed_size = min_t(u64, max_packed_size, KEY_SIZE_MAX);
-
- sort_iter_sort(iter, sort_extent_whiteouts_cmp);
-
- while ((in = sort_iter_next(iter, sort_extent_whiteouts_cmp))) {
- if (bkey_deleted(in))
- continue;
-
- EBUG_ON(bkeyp_val_u64s(f, in));
- EBUG_ON(in->type != KEY_TYPE_DISCARD);
-
- r.k = bkey_unpack_key(iter->b, in);
+ struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
+ bool used_mempool = false;
+ unsigned order;
- if (prev &&
- bkey_cmp(l.k.p, bkey_start_pos(&r.k)) >= 0) {
- if (bkey_cmp(l.k.p, r.k.p) >= 0)
- continue;
+ if (!b->whiteout_u64s)
+ return;
- new_size = l_packed
- ? min(max_packed_size, max_packed_offset -
- bkey_start_offset(&l.k))
- : KEY_SIZE_MAX;
+ order = get_order(b->whiteout_u64s * sizeof(u64));
- new_size = min(new_size, r.k.p.offset -
- bkey_start_offset(&l.k));
+ new_whiteouts = btree_bounce_alloc(c, order, &used_mempool);
- BUG_ON(new_size < l.k.size);
+ ptrs = ptrs_end = ((void *) new_whiteouts + (PAGE_SIZE << order));
- bch2_key_resize(&l.k, new_size);
+ for (k = unwritten_whiteouts_start(c, b);
+ k != unwritten_whiteouts_end(c, b);
+ k = bkey_next(k))
+ *--ptrs = k;
- if (bkey_cmp(l.k.p, r.k.p) >= 0)
- continue;
+ sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
- bch2_cut_front(l.k.p, &r);
- }
-
- if (prev) {
- if (!bch2_bkey_pack(out, &l, f)) {
- BUG_ON(l_packed);
- bkey_copy(out, &l);
- }
- out = bkey_next(out);
- }
+ k = new_whiteouts;
- l = r;
- prev = true;
- l_packed = bkey_packed(in);
+ while (ptrs != ptrs_end) {
+ bkey_copy(k, *ptrs);
+ k = bkey_next(k);
+ ptrs++;
}
- if (prev) {
- if (!bch2_bkey_pack(out, &l, f)) {
- BUG_ON(l_packed);
- bkey_copy(out, &l);
- }
- out = bkey_next(out);
- }
+ verify_no_dups(b, new_whiteouts,
+ (void *) ((u64 *) new_whiteouts + b->whiteout_u64s),
+ btree_node_old_extent_overwrite(b));
- return (u64 *) out - (u64 *) dst;
+ memcpy_u64s(unwritten_whiteouts_start(c, b),
+ new_whiteouts, b->whiteout_u64s);
+
+ btree_bounce_free(c, order, used_mempool, new_whiteouts);
}
-static unsigned should_compact_bset(struct btree *b, struct bset_tree *t,
- bool compacting,
- enum compact_mode mode)
+static bool should_compact_bset(struct btree *b, struct bset_tree *t,
+ bool compacting, enum compact_mode mode)
{
- unsigned bset_u64s = le16_to_cpu(bset(b, t)->u64s);
- unsigned dead_u64s = bset_u64s - b->nr.bset_u64s[t - b->set];
+ if (!bset_dead_u64s(b, t))
+ return false;
- if (mode == COMPACT_LAZY) {
- if (should_compact_bset_lazy(b, t) ||
- (compacting && !bset_written(b, bset(b, t))))
- return dead_u64s;
- } else {
- if (bset_written(b, bset(b, t)))
- return dead_u64s;
+ switch (mode) {
+ case COMPACT_LAZY:
+ return should_compact_bset_lazy(b, t) ||
+ (compacting && !bset_written(b, bset(b, t)));
+ case COMPACT_ALL:
+ return true;
+ default:
+ BUG();
}
-
- return 0;
}
-bool __bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
- enum compact_mode mode)
+static bool bch2_compact_extent_whiteouts(struct bch_fs *c,
+ struct btree *b,
+ enum compact_mode mode)
{
const struct bkey_format *f = &b->format;
struct bset_tree *t;
unsigned order, whiteout_u64s = 0, u64s;
bool used_mempool, compacting = false;
+ BUG_ON(!btree_node_is_extents(b));
+
for_each_bset(b, t)
- whiteout_u64s += should_compact_bset(b, t,
- whiteout_u64s != 0, mode);
+ if (should_compact_bset(b, t, whiteout_u64s != 0, mode))
+ whiteout_u64s += bset_dead_u64s(b, t);
if (!whiteout_u64s)
return false;
+ bch2_sort_whiteouts(c, b);
+
sort_iter_init(&sort_iter, b);
whiteout_u64s += b->whiteout_u64s;
if (t != b->set && !bset_written(b, i)) {
src = container_of(i, struct btree_node_entry, keys);
dst = max(write_block(b),
- (void *) btree_bkey_last(b, t -1));
+ (void *) btree_bkey_last(b, t - 1));
}
+ if (src != dst)
+ compacting = true;
+
if (!should_compact_bset(b, t, compacting, mode)) {
if (src != dst) {
memmove(dst, src, sizeof(*src) +
out = i->start;
for (k = start; k != end; k = n) {
- n = bkey_next(k);
+ n = bkey_next_skip_noops(k, end);
- if (bkey_deleted(k) && btree_node_is_extents(b))
+ if (bkey_deleted(k))
continue;
+ BUG_ON(bkey_whiteout(k) &&
+ k->needs_whiteout &&
+ bkey_written(b, k));
+
if (bkey_whiteout(k) && !k->needs_whiteout)
continue;
if (bkey_whiteout(k)) {
- unreserve_whiteout(b, k);
memcpy_u64s(u_pos, k, bkeyp_key_u64s(f, k));
set_bkeyp_val_u64s(f, u_pos, 0);
u_pos = bkey_next(u_pos);
- } else if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) {
+ } else {
bkey_copy(out, k);
out = bkey_next(out);
}
sort_iter_add(&sort_iter, u_start, u_pos);
- if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) {
- i->u64s = cpu_to_le16((u64 *) out - i->_data);
- set_btree_bset_end(b, t);
- bch2_bset_set_no_aux_tree(b, t);
- }
+ i->u64s = cpu_to_le16((u64 *) out - i->_data);
+ set_btree_bset_end(b, t);
+ bch2_bset_set_no_aux_tree(b, t);
}
b->whiteout_u64s = (u64 *) u_pos - (u64 *) whiteouts;
BUG_ON((void *) unwritten_whiteouts_start(c, b) <
(void *) btree_bkey_last(b, bset_tree_last(b)));
- u64s = btree_node_is_extents(b)
- ? sort_extent_whiteouts(unwritten_whiteouts_start(c, b),
- &sort_iter)
- : sort_key_whiteouts(unwritten_whiteouts_start(c, b),
- &sort_iter);
+ u64s = bch2_sort_extent_whiteouts(unwritten_whiteouts_start(c, b),
+ &sort_iter);
BUG_ON(u64s > b->whiteout_u64s);
- BUG_ON(u64s != b->whiteout_u64s && !btree_node_is_extents(b));
BUG_ON(u_pos != whiteouts && !u64s);
if (u64s != b->whiteout_u64s) {
verify_no_dups(b,
unwritten_whiteouts_start(c, b),
- unwritten_whiteouts_end(c, b));
+ unwritten_whiteouts_end(c, b),
+ true);
btree_bounce_free(c, order, used_mempool, whiteouts);
- if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK)
- bch2_btree_build_aux_trees(b);
+ bch2_btree_build_aux_trees(b);
bch_btree_keys_u64s_remaining(c, b);
bch2_verify_btree_nr_keys(b);
return true;
}
-static bool bch2_drop_whiteouts(struct btree *b)
+static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
{
struct bset_tree *t;
bool ret = false;
for_each_bset(b, t) {
struct bset *i = bset(b, t);
struct bkey_packed *k, *n, *out, *start, *end;
+ struct btree_node_entry *src = NULL, *dst = NULL;
+
+ if (t != b->set && !bset_written(b, i)) {
+ src = container_of(i, struct btree_node_entry, keys);
+ dst = max(write_block(b),
+ (void *) btree_bkey_last(b, t - 1));
+ }
+
+ if (src != dst)
+ ret = true;
- if (!should_compact_bset(b, t, true, COMPACT_WRITTEN))
+ if (!should_compact_bset(b, t, ret, mode)) {
+ if (src != dst) {
+ memmove(dst, src, sizeof(*src) +
+ le16_to_cpu(src->keys.u64s) *
+ sizeof(u64));
+ i = &dst->keys;
+ set_btree_bset(b, t, i);
+ }
continue;
+ }
start = btree_bkey_first(b, t);
end = btree_bkey_last(b, t);
- if (!bset_written(b, i) &&
- t != b->set) {
- struct bset *dst =
- max_t(struct bset *, write_block(b),
- (void *) btree_bkey_last(b, t -1));
-
- memmove(dst, i, sizeof(struct bset));
- i = dst;
+ if (src != dst) {
+ memmove(dst, src, sizeof(*src));
+ i = &dst->keys;
set_btree_bset(b, t, i);
}
out = i->start;
for (k = start; k != end; k = n) {
- n = bkey_next(k);
+ n = bkey_next_skip_noops(k, end);
if (!bkey_whiteout(k)) {
bkey_copy(out, k);
out = bkey_next(out);
+ } else {
+ BUG_ON(k->needs_whiteout);
}
}
i->u64s = cpu_to_le16((u64 *) out - i->_data);
+ set_btree_bset_end(b, t);
bch2_bset_set_no_aux_tree(b, t);
ret = true;
}
bch2_verify_btree_nr_keys(b);
- return ret;
-}
-
-static inline int sort_keys_cmp(struct btree *b,
- struct bkey_packed *l,
- struct bkey_packed *r)
-{
- return bkey_cmp_packed(b, l, r) ?:
- (int) bkey_whiteout(r) - (int) bkey_whiteout(l) ?:
- (int) l->needs_whiteout - (int) r->needs_whiteout;
-}
-
-static unsigned sort_keys(struct bkey_packed *dst,
- struct sort_iter *iter,
- bool filter_whiteouts)
-{
- const struct bkey_format *f = &iter->b->format;
- struct bkey_packed *in, *next, *out = dst;
-
- sort_iter_sort(iter, sort_keys_cmp);
-
- while ((in = sort_iter_next(iter, sort_keys_cmp))) {
- if (bkey_whiteout(in) &&
- (filter_whiteouts || !in->needs_whiteout))
- continue;
-
- if (bkey_whiteout(in) &&
- (next = sort_iter_peek(iter)) &&
- !bkey_cmp_packed(iter->b, in, next)) {
- BUG_ON(in->needs_whiteout &&
- next->needs_whiteout);
- /*
- * XXX racy, called with read lock from write path
- *
- * leads to spurious BUG_ON() in bkey_unpack_key() in
- * debug mode
- */
- next->needs_whiteout |= in->needs_whiteout;
- continue;
- }
-
- if (bkey_whiteout(in)) {
- memcpy_u64s(out, in, bkeyp_key_u64s(f, in));
- set_bkeyp_val_u64s(f, out, 0);
- } else {
- bkey_copy(out, in);
- }
- out = bkey_next(out);
- }
-
- return (u64 *) out - (u64 *) dst;
-}
+ bch2_btree_build_aux_trees(b);
-static inline int sort_extents_cmp(struct btree *b,
- struct bkey_packed *l,
- struct bkey_packed *r)
-{
- return bkey_cmp_packed(b, l, r) ?:
- (int) bkey_deleted(l) - (int) bkey_deleted(r);
+ return ret;
}
-static unsigned sort_extents(struct bkey_packed *dst,
- struct sort_iter *iter,
- bool filter_whiteouts)
+bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
+ enum compact_mode mode)
{
- struct bkey_packed *in, *out = dst;
-
- sort_iter_sort(iter, sort_extents_cmp);
-
- while ((in = sort_iter_next(iter, sort_extents_cmp))) {
- if (bkey_deleted(in))
- continue;
-
- if (bkey_whiteout(in) &&
- (filter_whiteouts || !in->needs_whiteout))
- continue;
-
- bkey_copy(out, in);
- out = bkey_next(out);
- }
-
- return (u64 *) out - (u64 *) dst;
+ return !btree_node_old_extent_overwrite(b)
+ ? bch2_drop_whiteouts(b, mode)
+ : bch2_compact_extent_whiteouts(c, b, mode);
}
static void btree_node_sort(struct bch_fs *c, struct btree *b,
start_time = local_clock();
- if (btree_node_is_extents(b))
+ if (btree_node_old_extent_overwrite(b))
filter_whiteouts = bset_written(b, start_bset);
- u64s = btree_node_is_extents(b)
- ? sort_extents(out->keys.start, &sort_iter, filter_whiteouts)
- : sort_keys(out->keys.start, &sort_iter, filter_whiteouts);
+ u64s = (btree_node_old_extent_overwrite(b)
+ ? bch2_sort_extents
+ : bch2_sort_keys)(out->keys.start,
+ &sort_iter,
+ filter_whiteouts);
out->keys.u64s = cpu_to_le16(u64s);
BUG_ON(vstruct_end(&out->keys) > (void *) out + (PAGE_SIZE << order));
if (sorting_entire_node)
- bch2_time_stats_update(&c->times[BCH_TIME_btree_sort],
+ bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
start_time);
/* Make sure we preserve bset journal_seq: */
bch2_verify_btree_nr_keys(b);
}
-/* Sort + repack in a new format: */
-static struct btree_nr_keys sort_repack(struct bset *dst,
- struct btree *src,
- struct btree_node_iter *src_iter,
- struct bkey_format *out_f,
- bool filter_whiteouts)
-{
- struct bkey_format *in_f = &src->format;
- struct bkey_packed *in, *out = vstruct_last(dst);
- struct btree_nr_keys nr;
-
- memset(&nr, 0, sizeof(nr));
-
- while ((in = bch2_btree_node_iter_next_all(src_iter, src))) {
- if (filter_whiteouts && bkey_whiteout(in))
- continue;
-
- if (bch2_bkey_transform(out_f, out, bkey_packed(in)
- ? in_f : &bch2_bkey_format_current, in))
- out->format = KEY_FORMAT_LOCAL_BTREE;
- else
- bch2_bkey_unpack(src, (void *) out, in);
-
- btree_keys_account_key_add(&nr, 0, out);
- out = bkey_next(out);
- }
-
- dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
- return nr;
-}
-
-/* Sort, repack, and merge: */
-static struct btree_nr_keys sort_repack_merge(struct bch_fs *c,
- struct bset *dst,
- struct btree *src,
- struct btree_node_iter *iter,
- struct bkey_format *out_f,
- bool filter_whiteouts,
- key_filter_fn filter,
- key_merge_fn merge)
-{
- struct bkey_packed *k, *prev = NULL, *out;
- struct btree_nr_keys nr;
- BKEY_PADDED(k) tmp;
-
- memset(&nr, 0, sizeof(nr));
-
- while ((k = bch2_btree_node_iter_next_all(iter, src))) {
- if (filter_whiteouts && bkey_whiteout(k))
- continue;
-
- /*
- * The filter might modify pointers, so we have to unpack the
- * key and values to &tmp.k:
- */
- bch2_bkey_unpack(src, &tmp.k, k);
-
- if (filter && filter(c, src, bkey_i_to_s(&tmp.k)))
- continue;
-
- /* prev is always unpacked, for key merging: */
-
- if (prev &&
- merge &&
- merge(c, src, (void *) prev, &tmp.k) == BCH_MERGE_MERGE)
- continue;
-
- /*
- * the current key becomes the new prev: advance prev, then
- * copy the current key - but first pack prev (in place):
- */
- if (prev) {
- bch2_bkey_pack(prev, (void *) prev, out_f);
-
- btree_keys_account_key_add(&nr, 0, prev);
- prev = bkey_next(prev);
- } else {
- prev = vstruct_last(dst);
- }
-
- bkey_copy(prev, &tmp.k);
- }
-
- if (prev) {
- bch2_bkey_pack(prev, (void *) prev, out_f);
- btree_keys_account_key_add(&nr, 0, prev);
- out = bkey_next(prev);
- } else {
- out = vstruct_last(dst);
- }
-
- dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
- return nr;
-}
-
void bch2_btree_sort_into(struct bch_fs *c,
struct btree *dst,
struct btree *src)
bch2_btree_node_iter_init_from_start(&src_iter, src);
- if (btree_node_ops(src)->key_normalize ||
- btree_node_ops(src)->key_merge)
- nr = sort_repack_merge(c, btree_bset_first(dst),
+ if (btree_node_is_extents(src))
+ nr = bch2_sort_repack_merge(c, btree_bset_first(dst),
src, &src_iter,
&dst->format,
- true,
- btree_node_ops(src)->key_normalize,
- btree_node_ops(src)->key_merge);
+ true);
else
- nr = sort_repack(btree_bset_first(dst),
+ nr = bch2_sort_repack(btree_bset_first(dst),
src, &src_iter,
&dst->format,
true);
- bch2_time_stats_update(&c->times[BCH_TIME_btree_sort], start_time);
+ bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
+ start_time);
set_btree_bset_end(dst, dst->set);
struct btree_node_entry *bne;
bool did_sort;
- EBUG_ON(!(b->lock.state.seq & 1));
- EBUG_ON(iter && iter->l[b->level].b != b);
+ EBUG_ON(!(b->c.lock.state.seq & 1));
+ EBUG_ON(iter && iter->l[b->c.level].b != b);
did_sort = btree_node_compact(c, b, iter);
bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, &bn->flags,
bytes);
- nonce = nonce_add(nonce, round_up(bytes, CHACHA20_BLOCK_SIZE));
+ nonce = nonce_add(nonce, round_up(bytes, CHACHA_BLOCK_SIZE));
}
bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
struct btree *b, struct bset *i,
unsigned offset, int write)
{
- pr_buf(out, "error validating btree node %s"
- "at btree %u level %u/%u\n"
- "pos %llu:%llu node offset %u",
+ pr_buf(out, "error validating btree node %sat btree %u level %u/%u\n"
+ "pos ",
write ? "before write " : "",
- b->btree_id, b->level,
- c->btree_roots[b->btree_id].level,
- b->key.k.p.inode, b->key.k.p.offset,
- b->written);
+ b->c.btree_id, b->c.level,
+ c->btree_roots[b->c.btree_id].level);
+ bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
+
+ pr_buf(out, " node offset %u", b->written);
if (i)
pr_buf(out, " bset u64s %u", le16_to_cpu(i->u64s));
}
static int validate_bset(struct bch_fs *c, struct btree *b,
struct bset *i, unsigned sectors,
- unsigned *whiteout_u64s, int write,
- bool have_retry)
+ int write, bool have_retry)
{
- struct bkey_packed *k, *prev = NULL;
- struct bpos prev_pos = POS_MIN;
- enum bkey_type type = btree_node_type(b);
- bool seen_non_whiteout = false;
+ unsigned version = le16_to_cpu(i->version);
const char *err;
int ret = 0;
- if (i == &b->data->keys) {
+ btree_err_on((version != BCH_BSET_VERSION_OLD &&
+ version < bcachefs_metadata_version_min) ||
+ version >= bcachefs_metadata_version_max,
+ BTREE_ERR_FATAL, c, b, i,
+ "unsupported bset version");
+
+ if (btree_err_on(b->written + sectors > c->opts.btree_node_size,
+ BTREE_ERR_FIXABLE, c, b, i,
+ "bset past end of btree node")) {
+ i->u64s = 0;
+ return 0;
+ }
+
+ btree_err_on(b->written && !i->u64s,
+ BTREE_ERR_FIXABLE, c, b, i,
+ "empty bset");
+
+ if (!b->written) {
+ struct btree_node *bn =
+ container_of(i, struct btree_node, keys);
/* These indicate that we read the wrong btree node: */
- btree_err_on(BTREE_NODE_ID(b->data) != b->btree_id,
+
+ if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
+ struct bch_btree_ptr_v2 *bp =
+ &bkey_i_to_btree_ptr_v2(&b->key)->v;
+
+ /* XXX endianness */
+ btree_err_on(bp->seq != bn->keys.seq,
+ BTREE_ERR_MUST_RETRY, c, b, NULL,
+ "incorrect sequence number (wrong btree node)");
+ }
+
+ btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
BTREE_ERR_MUST_RETRY, c, b, i,
"incorrect btree id");
- btree_err_on(BTREE_NODE_LEVEL(b->data) != b->level,
+ btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
BTREE_ERR_MUST_RETRY, c, b, i,
"incorrect level");
if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN) {
- u64 *p = (u64 *) &b->data->ptr;
+ u64 *p = (u64 *) &bn->ptr;
*p = swab64(*p);
- bch2_bpos_swab(&b->data->min_key);
- bch2_bpos_swab(&b->data->max_key);
}
- btree_err_on(bkey_cmp(b->data->max_key, b->key.k.p),
+ if (!write)
+ compat_btree_node(b->c.level, b->c.btree_id, version,
+ BSET_BIG_ENDIAN(i), write, bn);
+
+ if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
+ struct bch_btree_ptr_v2 *bp =
+ &bkey_i_to_btree_ptr_v2(&b->key)->v;
+
+ btree_err_on(bkey_cmp(b->data->min_key, bp->min_key),
+ BTREE_ERR_MUST_RETRY, c, b, NULL,
+ "incorrect min_key: got %llu:%llu should be %llu:%llu",
+ b->data->min_key.inode,
+ b->data->min_key.offset,
+ bp->min_key.inode,
+ bp->min_key.offset);
+ }
+
+ btree_err_on(bkey_cmp(bn->max_key, b->key.k.p),
BTREE_ERR_MUST_RETRY, c, b, i,
"incorrect max key");
+ if (write)
+ compat_btree_node(b->c.level, b->c.btree_id, version,
+ BSET_BIG_ENDIAN(i), write, bn);
+
/* XXX: ideally we would be validating min_key too */
#if 0
/*
* not correct anymore, due to btree node write error
* handling
*
- * need to add b->data->seq to btree keys and verify
+ * need to add bn->seq to btree keys and verify
* against that
*/
btree_err_on(!extent_contains_ptr(bkey_i_to_s_c_extent(&b->key),
- b->data->ptr),
+ bn->ptr),
BTREE_ERR_FATAL, c, b, i,
"incorrect backpointer");
#endif
- err = bch2_bkey_format_validate(&b->data->format);
+ err = bch2_bkey_format_validate(&bn->format);
btree_err_on(err,
BTREE_ERR_FATAL, c, b, i,
"invalid bkey format: %s", err);
- }
-
- if (btree_err_on(le16_to_cpu(i->version) != BCACHE_BSET_VERSION,
- BTREE_ERR_FIXABLE, c, b, i,
- "unsupported bset version")) {
- i->version = cpu_to_le16(BCACHE_BSET_VERSION);
- i->u64s = 0;
- return 0;
- }
- if (btree_err_on(b->written + sectors > c->opts.btree_node_size,
- BTREE_ERR_FIXABLE, c, b, i,
- "bset past end of btree node")) {
- i->u64s = 0;
- return 0;
+ compat_bformat(b->c.level, b->c.btree_id, version,
+ BSET_BIG_ENDIAN(i), write,
+ &bn->format);
}
+fsck_err:
+ return ret;
+}
- btree_err_on(b->written && !i->u64s,
- BTREE_ERR_FIXABLE, c, b, i,
- "empty bset");
+static int validate_bset_keys(struct bch_fs *c, struct btree *b,
+ struct bset *i, unsigned *whiteout_u64s,
+ int write, bool have_retry)
+{
+ unsigned version = le16_to_cpu(i->version);
+ struct bkey_packed *k, *prev = NULL;
+ bool seen_non_whiteout = false;
+ int ret = 0;
if (!BSET_SEPARATE_WHITEOUTS(i)) {
seen_non_whiteout = true;
for (k = i->start;
k != vstruct_last(i);) {
- struct bkey_s_c u;
+ struct bkey_s u;
struct bkey tmp;
const char *invalid;
- if (btree_err_on(!k->u64s,
- BTREE_ERR_FIXABLE, c, b, i,
- "KEY_U64s 0: %zu bytes of metadata lost",
- vstruct_end(i) - (void *) k)) {
- i->u64s = cpu_to_le16((u64 *) k - i->_data);
- break;
- }
-
if (btree_err_on(bkey_next(k) > vstruct_last(i),
BTREE_ERR_FIXABLE, c, b, i,
"key extends past end of bset")) {
continue;
}
- if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN)
- bch2_bkey_swab(type, &b->format, k);
+ /* XXX: validate k->u64s */
+ if (!write)
+ bch2_bkey_compat(b->c.level, b->c.btree_id, version,
+ BSET_BIG_ENDIAN(i), write,
+ &b->format, k);
- u = bkey_disassemble(b, k, &tmp);
+ u = __bkey_disassemble(b, k, &tmp);
- invalid = __bch2_bkey_invalid(c, type, u) ?:
- bch2_bkey_in_btree_node(b, u) ?:
- (write ? bch2_bkey_val_invalid(c, type, u) : NULL);
+ invalid = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b)) ?:
+ bch2_bkey_in_btree_node(b, u.s_c) ?:
+ (write ? bch2_bkey_val_invalid(c, u.s_c) : NULL);
if (invalid) {
char buf[160];
- bch2_bkey_val_to_text(&PBUF(buf), c, type, u);
+ bch2_bkey_val_to_text(&PBUF(buf), c, u.s_c);
btree_err(BTREE_ERR_FIXABLE, c, b, i,
"invalid bkey:\n%s\n%s", invalid, buf);
continue;
}
+ if (write)
+ bch2_bkey_compat(b->c.level, b->c.btree_id, version,
+ BSET_BIG_ENDIAN(i), write,
+ &b->format, k);
+
/*
* with the separate whiteouts thing (used for extents), the
* second set of keys actually can have whiteouts too, so we
if (!seen_non_whiteout &&
(!bkey_whiteout(k) ||
- (bkey_cmp(prev_pos, bkey_start_pos(u.k)) > 0))) {
+ (prev && bkey_iter_cmp(b, prev, k) > 0))) {
*whiteout_u64s = k->_data - i->_data;
seen_non_whiteout = true;
- } else if (bkey_cmp(prev_pos, bkey_start_pos(u.k)) > 0) {
+ } else if (prev && bkey_iter_cmp(b, prev, k) > 0) {
+ char buf1[80];
+ char buf2[80];
+ struct bkey up = bkey_unpack_key(b, prev);
+
+ bch2_bkey_to_text(&PBUF(buf1), &up);
+ bch2_bkey_to_text(&PBUF(buf2), u.k);
+
+ bch2_dump_bset(b, i, 0);
btree_err(BTREE_ERR_FATAL, c, b, i,
- "keys out of order: %llu:%llu > %llu:%llu",
- prev_pos.inode,
- prev_pos.offset,
- u.k->p.inode,
- bkey_start_offset(u.k));
+ "keys out of order: %s > %s",
+ buf1, buf2);
/* XXX: repair this */
}
- prev_pos = u.k->p;
prev = k;
- k = bkey_next(k);
+ k = bkey_next_skip_noops(k, vstruct_last(i));
}
-
- SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
fsck_err:
return ret;
}
int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, bool have_retry)
{
struct btree_node_entry *bne;
- struct btree_node_iter_large *iter;
+ struct sort_iter *iter;
struct btree_node *sorted;
struct bkey_packed *k;
struct bset *i;
- bool used_mempool;
+ bool used_mempool, blacklisted;
unsigned u64s;
int ret, retry_read = 0, write = READ;
iter = mempool_alloc(&c->fill_iter, GFP_NOIO);
- iter->used = 0;
+ sort_iter_init(iter, b);
+ iter->size = (btree_blocks(c) + 1) * 2;
if (bch2_meta_read_fault("btree"))
btree_err(BTREE_ERR_MUST_RETRY, c, b, NULL,
BTREE_ERR_MUST_RETRY, c, b, NULL,
"bad btree header");
+ if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
+ struct bch_btree_ptr_v2 *bp =
+ &bkey_i_to_btree_ptr_v2(&b->key)->v;
+
+ btree_err_on(b->data->keys.seq != bp->seq,
+ BTREE_ERR_MUST_RETRY, c, b, NULL,
+ "got wrong btree node (seq %llx want %llx)",
+ b->data->keys.seq, bp->seq);
+ }
+
while (b->written < c->opts.btree_node_size) {
unsigned sectors, whiteout_u64s = 0;
struct nonce nonce;
bset_encrypt(c, i, b->written << 9);
- sectors = vstruct_sectors(b->data, c->block_bits);
+ if (btree_node_is_extents(b) &&
+ !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data))
+ set_btree_node_old_extent_overwrite(b);
- btree_node_set_format(b, b->data->format);
+ sectors = vstruct_sectors(b->data, c->block_bits);
} else {
bne = write_block(b);
i = &bne->keys;
sectors = vstruct_sectors(bne, c->block_bits);
}
- ret = validate_bset(c, b, i, sectors, &whiteout_u64s,
+ ret = validate_bset(c, b, i, sectors,
+ READ, have_retry);
+ if (ret)
+ goto fsck_err;
+
+ if (!b->written)
+ btree_node_set_format(b, b->data->format);
+
+ ret = validate_bset_keys(c, b, i, &whiteout_u64s,
READ, have_retry);
if (ret)
goto fsck_err;
+ SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
+
b->written += sectors;
- ret = bch2_journal_seq_should_ignore(c, le64_to_cpu(i->journal_seq), b);
- if (ret < 0) {
- btree_err(BTREE_ERR_FATAL, c, b, i,
- "insufficient memory");
- goto err;
- }
+ blacklisted = bch2_journal_seq_is_blacklisted(c,
+ le64_to_cpu(i->journal_seq),
+ true);
- if (ret) {
- btree_err_on(first,
- BTREE_ERR_FIXABLE, c, b, i,
- "first btree node bset has blacklisted journal seq");
- if (!first)
- continue;
- }
+ btree_err_on(blacklisted && first,
+ BTREE_ERR_FIXABLE, c, b, i,
+ "first btree node bset has blacklisted journal seq");
+ if (blacklisted && !first)
+ continue;
- bch2_btree_node_iter_large_push(iter, b,
- i->start,
- vstruct_idx(i, whiteout_u64s));
+ sort_iter_add(iter, i->start,
+ vstruct_idx(i, whiteout_u64s));
- bch2_btree_node_iter_large_push(iter, b,
- vstruct_idx(i, whiteout_u64s),
- vstruct_last(i));
+ sort_iter_add(iter,
+ vstruct_idx(i, whiteout_u64s),
+ vstruct_last(i));
}
for (bne = write_block(b);
set_btree_bset(b, b->set, &b->data->keys);
- b->nr = btree_node_is_extents(b)
- ? bch2_extent_sort_fix_overlapping(c, &sorted->keys, b, iter)
- : bch2_key_sort_fix_overlapping(&sorted->keys, b, iter);
+ b->nr = (btree_node_old_extent_overwrite(b)
+ ? bch2_extent_sort_fix_overlapping
+ : bch2_key_sort_fix_overlapping)(c, &sorted->keys, iter);
u64s = le16_to_cpu(sorted->keys.u64s);
*sorted = *b->data;
i = &b->data->keys;
for (k = i->start; k != vstruct_last(i);) {
- enum bkey_type type = btree_node_type(b);
struct bkey tmp;
- struct bkey_s_c u = bkey_disassemble(b, k, &tmp);
- const char *invalid = bch2_bkey_val_invalid(c, type, u);
+ struct bkey_s u = __bkey_disassemble(b, k, &tmp);
+ const char *invalid = bch2_bkey_val_invalid(c, u.s_c);
if (invalid ||
(inject_invalid_keys(c) &&
!bversion_cmp(u.k->version, MAX_VERSION))) {
char buf[160];
- bch2_bkey_val_to_text(&PBUF(buf), c, type, u);
+ bch2_bkey_val_to_text(&PBUF(buf), c, u.s_c);
btree_err(BTREE_ERR_FIXABLE, c, b, i,
"invalid bkey %s: %s", buf, invalid);
continue;
}
- k = bkey_next(k);
+ if (u.k->type == KEY_TYPE_btree_ptr_v2) {
+ struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
+
+ bp.v->mem_ptr = 0;
+ }
+
+ k = bkey_next_skip_noops(k, vstruct_last(i));
}
bch2_bset_build_aux_tree(b, b->set, false);
- set_needs_whiteout(btree_bset_first(b));
+ set_needs_whiteout(btree_bset_first(b), true);
btree_node_reset_sib_u64s(b);
out:
mempool_free(iter, &c->fill_iter);
return retry_read;
-err:
fsck_err:
if (ret == BTREE_RETRY_READ) {
retry_read = 1;
bch2_mark_io_failure(&failed, &rb->pick);
- can_retry = bch2_btree_pick_ptr(c, b, &failed, &rb->pick) > 0;
+ can_retry = bch2_bkey_pick_read_device(c,
+ bkey_i_to_s_c(&b->key),
+ &failed, &rb->pick) > 0;
if (!bio->bi_status &&
!bch2_btree_node_read_done(c, b, can_retry))
}
}
- bch2_time_stats_update(&c->times[BCH_TIME_btree_read], rb->start_time);
+ bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
+ rb->start_time);
bio_put(&rb->bio);
clear_btree_node_read_in_flight(b);
wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
trace_btree_read(c, b);
- ret = bch2_btree_pick_ptr(c, b, NULL, &pick);
+ ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
+ NULL, &pick);
if (bch2_fs_fatal_err_on(ret <= 0, c,
"btree node read error: no device to read from")) {
set_btree_node_read_error(b);
INIT_WORK(&rb->work, btree_node_read_work);
bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
bio->bi_iter.bi_sector = pick.ptr.offset;
- bio->bi_iter.bi_size = btree_bytes(c);
bio->bi_end_io = btree_node_read_endio;
bio->bi_private = b;
- bch2_bio_map(bio, b->data);
+ bch2_bio_map(bio, b->data, btree_bytes(c));
set_btree_node_read_in_flight(b);
bch2_btree_set_root_for_read(c, b);
err:
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
return ret;
}
closure_put(&((struct btree_update *) new)->cl);
bch2_journal_pin_drop(&c->journal, &w->journal);
- closure_wake_up(&w->wait);
}
static void btree_node_write_done(struct bch_fs *c, struct btree *b)
{
struct btree *b = wbio->wbio.bio.bi_private;
__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
- struct bkey_i_extent *new_key;
- struct bkey_s_extent e;
struct bch_extent_ptr *ptr;
- struct btree_iter iter;
+ struct btree_trans trans;
+ struct btree_iter *iter;
int ret;
- __bch2_btree_iter_init(&iter, c, b->btree_id, b->key.k.p,
- BTREE_MAX_DEPTH,
- b->level, BTREE_ITER_NODES);
+ bch2_trans_init(&trans, c, 0, 0);
+
+ iter = bch2_trans_get_node_iter(&trans, b->c.btree_id, b->key.k.p,
+ BTREE_MAX_DEPTH, b->c.level, 0);
retry:
- ret = bch2_btree_iter_traverse(&iter);
+ ret = bch2_btree_iter_traverse(iter);
if (ret)
goto err;
/* has node been freed? */
- if (iter.l[b->level].b != b) {
+ if (iter->l[b->c.level].b != b) {
/* node has been freed: */
BUG_ON(!btree_node_dying(b));
goto out;
bkey_copy(&tmp.k, &b->key);
- new_key = bkey_i_to_extent(&tmp.k);
- e = extent_i_to_s(new_key);
-
- bch2_extent_drop_ptrs(e, ptr,
+ bch2_bkey_drop_ptrs(bkey_i_to_s(&tmp.k), ptr,
bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
- if (!bch2_extent_nr_ptrs(e.c))
+ if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&tmp.k)))
goto err;
- ret = bch2_btree_node_update_key(c, &iter, b, new_key);
+ ret = bch2_btree_node_update_key(c, iter, b, &tmp.k);
if (ret == -EINTR)
goto retry;
if (ret)
goto err;
out:
- bch2_btree_iter_unlock(&iter);
+ bch2_trans_exit(&trans);
bio_put(&wbio->wbio.bio);
btree_node_write_done(c, b);
return;
static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
struct bset *i, unsigned sectors)
{
- const struct bch_extent_ptr *ptr;
unsigned whiteout_u64s = 0;
int ret;
- extent_for_each_ptr(bkey_i_to_s_c_extent(&b->key), ptr)
- break;
+ if (bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), BKEY_TYPE_BTREE))
+ return -1;
- ret = validate_bset(c, b, i, sectors, &whiteout_u64s, WRITE, false);
+ ret = validate_bset(c, b, i, sectors, WRITE, false) ?:
+ validate_bset_keys(c, b, i, &whiteout_u64s, WRITE, false);
if (ret)
bch2_inconsistent_error(c);
struct btree_node *bn = NULL;
struct btree_node_entry *bne = NULL;
BKEY_PADDED(key) k;
- struct bkey_s_extent e;
struct bch_extent_ptr *ptr;
struct sort_iter sort_iter;
struct nonce nonce;
u64 seq = 0;
bool used_mempool;
unsigned long old, new;
+ bool validate_before_checksum = false;
void *data;
if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
if (!(old & (1 << BTREE_NODE_dirty)))
return;
- if (b->written &&
- !btree_node_may_write(b))
+ if (!btree_node_may_write(b))
return;
if (old & (1 << BTREE_NODE_write_in_flight)) {
} while (cmpxchg_acquire(&b->flags, old, new) != old);
BUG_ON(btree_node_fake(b));
- BUG_ON(!list_empty(&b->write_blocked));
BUG_ON((b->will_make_reachable != 0) != !b->written);
BUG_ON(b->written >= c->opts.btree_node_size);
BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
- /*
- * We can't block on six_lock_write() here; another thread might be
- * trying to get a journal reservation with read locks held, and getting
- * a journal reservation might be blocked on flushing the journal and
- * doing btree writes:
- */
- if (lock_type_held == SIX_LOCK_intent &&
- six_trylock_write(&b->lock)) {
- __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN);
- six_unlock_write(&b->lock);
- } else {
- __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN_NO_WRITE_LOCK);
- }
-
- BUG_ON(b->uncompacted_whiteout_u64s);
+ bch2_sort_whiteouts(c, b);
sort_iter_init(&sort_iter, b);
i->journal_seq = cpu_to_le64(seq);
i->u64s = 0;
- if (!btree_node_is_extents(b)) {
+ if (!btree_node_old_extent_overwrite(b)) {
sort_iter_add(&sort_iter,
unwritten_whiteouts_start(c, b),
unwritten_whiteouts_end(c, b));
b->whiteout_u64s = 0;
- u64s = btree_node_is_extents(b)
- ? sort_extents(vstruct_last(i), &sort_iter, false)
- : sort_keys(i->start, &sort_iter, false);
+ u64s = btree_node_old_extent_overwrite(b)
+ ? bch2_sort_extents(vstruct_last(i), &sort_iter, false)
+ : bch2_sort_keys(i->start, &sort_iter, false);
le16_add_cpu(&i->u64s, u64s);
- clear_needs_whiteout(i);
+ set_needs_whiteout(i, false);
/* do we have data to write? */
if (b->written && !i->u64s)
BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
BUG_ON(i->seq != b->data->keys.seq);
- i->version = cpu_to_le16(BCACHE_BSET_VERSION);
+ i->version = c->sb.version < bcachefs_metadata_version_new_versioning
+ ? cpu_to_le16(BCH_BSET_VERSION_OLD)
+ : cpu_to_le16(c->sb.version);
SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
+ if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
+ validate_before_checksum = true;
+
+ /* validate_bset will be modifying: */
+ if (le16_to_cpu(i->version) < bcachefs_metadata_version_max)
+ validate_before_checksum = true;
+
/* if we're going to be encrypting, check metadata validity first: */
- if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)) &&
+ if (validate_before_checksum &&
validate_bset_for_write(c, b, i, sectors_to_write))
goto err;
bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
/* if we're not encrypting, check metadata after checksumming: */
- if (!bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)) &&
+ if (!validate_before_checksum &&
validate_bset_for_write(c, b, i, sectors_to_write))
goto err;
* reflect that those writes were done and the data flushed from the
* journal:
*
+ * Also on journal error, the pending write may have updates that were
+ * never journalled (interior nodes, see btree_update_nodes_written()) -
+ * it's critical that we don't do the write in that case otherwise we
+ * will have updates visible that weren't in the journal:
+ *
* Make sure to update b->written so bch2_btree_init_next() doesn't
* break:
*/
wbio->data = data;
wbio->wbio.order = order;
wbio->wbio.used_mempool = used_mempool;
- wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META|REQ_FUA;
- wbio->wbio.bio.bi_iter.bi_size = sectors_to_write << 9;
+ wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META;
wbio->wbio.bio.bi_end_io = btree_node_write_endio;
wbio->wbio.bio.bi_private = b;
- bch2_bio_map(&wbio->wbio.bio, data);
+ bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
/*
* If we're appending to a leaf node, we don't technically need FUA -
*/
bkey_copy(&k.key, &b->key);
- e = bkey_i_to_s_extent(&k.key);
- extent_for_each_ptr(e, ptr)
+ bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&k.key)), ptr)
ptr->offset += b->written;
b->written += sectors_to_write;
+ /* XXX: submitting IO with btree locks held: */
bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_BTREE, &k.key);
return;
err:
return false;
BUG_ON(b->whiteout_u64s);
- BUG_ON(b->uncompacted_whiteout_u64s);
clear_btree_node_just_written(b);
btree_node_sort(c, b, NULL, 0, b->nsets, true);
invalidated_iter = true;
} else {
- invalidated_iter = bch2_drop_whiteouts(b);
+ invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
}
for_each_bset(b, t)
- set_needs_whiteout(bset(b, t));
+ set_needs_whiteout(bset(b, t), true);
bch2_btree_verify(c, b);
BUG_ON(lock_type_held == SIX_LOCK_write);
if (lock_type_held == SIX_LOCK_intent ||
- six_lock_tryupgrade(&b->lock)) {
+ six_lock_tryupgrade(&b->c.lock)) {
__bch2_btree_node_write(c, b, SIX_LOCK_intent);
/* don't cycle lock unnecessarily: */
if (btree_node_just_written(b) &&
- six_trylock_write(&b->lock)) {
+ six_trylock_write(&b->c.lock)) {
bch2_btree_post_write_cleanup(c, b);
- six_unlock_write(&b->lock);
+ six_unlock_write(&b->c.lock);
}
if (lock_type_held == SIX_LOCK_read)
- six_lock_downgrade(&b->lock);
+ six_lock_downgrade(&b->c.lock);
} else {
__bch2_btree_node_write(c, b, SIX_LOCK_read);
}
rcu_read_lock();
for_each_cached_btree(b, c, tbl, i, pos) {
unsigned long flags = READ_ONCE(b->flags);
- unsigned idx = (flags & (1 << BTREE_NODE_write_idx)) != 0;
- if (//!(flags & (1 << BTREE_NODE_dirty)) &&
- !b->writes[0].wait.list.first &&
- !b->writes[1].wait.list.first &&
- !(b->will_make_reachable & 1))
+ if (!(flags & (1 << BTREE_NODE_dirty)))
continue;
- pr_buf(&out, "%p d %u l %u w %u b %u r %u:%lu c %u p %u\n",
+ pr_buf(&out, "%p d %u n %u l %u w %u b %u r %u:%lu\n",
b,
(flags & (1 << BTREE_NODE_dirty)) != 0,
- b->level,
+ (flags & (1 << BTREE_NODE_need_write)) != 0,
+ b->c.level,
b->written,
!list_empty_careful(&b->write_blocked),
b->will_make_reachable != 0,
- b->will_make_reachable & 1,
- b->writes[ idx].wait.list.first != NULL,
- b->writes[!idx].wait.list.first != NULL);
+ b->will_make_reachable & 1);
}
rcu_read_unlock();