#include <linux/random.h>
#include <trace/events/bcachefs.h>
-static void bch2_btree_insert_node(struct btree_update *, struct btree_trans *,
- struct btree_path *, struct btree *,
- struct keylist *, unsigned);
+static int bch2_btree_insert_node(struct btree_update *, struct btree_trans *,
+ struct btree_path *, struct btree *,
+ struct keylist *, unsigned);
static void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);
static struct btree_path *get_unlocked_mut_path(struct btree_trans *trans,
struct btree_path *path;
path = bch2_path_get(trans, btree_id, pos, level + 1, level,
- BTREE_ITER_INTENT, _THIS_IP_);
- path = bch2_btree_path_make_mut(trans, path, true, _THIS_IP_);
+ BTREE_ITER_NOPRESERVE|
+ BTREE_ITER_INTENT, _RET_IP_);
+ path = bch2_btree_path_make_mut(trans, path, true, _RET_IP_);
bch2_btree_path_downgrade(trans, path);
__bch2_btree_path_unlock(trans, path);
return path;
break;
bp = bkey_s_c_to_btree_ptr_v2(k);
- if (bpos_cmp(next_node, bp.v->min_key)) {
+ if (!bpos_eq(next_node, bp.v->min_key)) {
bch2_dump_btree_node(c, b);
bch2_bpos_to_text(&buf1, next_node);
bch2_bpos_to_text(&buf2, bp.v->min_key);
bch2_btree_node_iter_advance(&iter, b);
if (bch2_btree_node_iter_end(&iter)) {
- if (bpos_cmp(k.k->p, b->key.k.p)) {
+ if (!bpos_eq(k.k->p, b->key.k.p)) {
bch2_dump_btree_node(c, b);
bch2_bpos_to_text(&buf1, b->key.k.p);
bch2_bpos_to_text(&buf2, k.k->p);
}
}
+static void bch2_btree_node_free_never_used(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree *b)
+{
+ struct bch_fs *c = as->c;
+ struct prealloc_nodes *p = &as->prealloc_nodes[b->c.lock.readers != NULL];
+ struct btree_path *path;
+ unsigned level = b->c.level;
+
+ BUG_ON(!list_empty(&b->write_blocked));
+ BUG_ON(b->will_make_reachable != (1UL|(unsigned long) as));
+
+ b->will_make_reachable = 0;
+ closure_put(&as->cl);
+
+ clear_btree_node_will_make_reachable(b);
+ clear_btree_node_accessed(b);
+ clear_btree_node_dirty_acct(c, b);
+ clear_btree_node_need_write(b);
+
+ mutex_lock(&c->btree_cache.lock);
+ list_del_init(&b->list);
+ bch2_btree_node_hash_remove(&c->btree_cache, b);
+ mutex_unlock(&c->btree_cache.lock);
+
+ BUG_ON(p->nr >= ARRAY_SIZE(p->b));
+ p->b[p->nr++] = b;
+
+ six_unlock_intent(&b->c.lock);
+
+ trans_for_each_path(trans, path)
+ if (path->l[level].b == b) {
+ btree_node_unlock(trans, path, level);
+ path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
+ }
+}
+
static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
struct disk_reservation *res,
struct closure *cl,
struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
unsigned nr_reserve;
enum alloc_reserve alloc_reserve;
+ int ret;
if (flags & BTREE_INSERT_USE_RESERVE) {
nr_reserve = 0;
mutex_unlock(&c->btree_reserve_cache_lock);
retry:
- wp = bch2_alloc_sectors_start_trans(trans,
+ ret = bch2_alloc_sectors_start_trans(trans,
c->opts.metadata_target ?:
c->opts.foreground_target,
0,
&devs_have,
res->nr_replicas,
c->opts.metadata_replicas_required,
- alloc_reserve, 0, cl);
- if (IS_ERR(wp))
- return ERR_CAST(wp);
+ alloc_reserve, 0, cl, &wp);
+ if (unlikely(ret))
+ return ERR_PTR(ret);
if (wp->sectors_free < btree_sectors(c)) {
struct open_bucket *ob;
b->data->max_key = pos;
}
-struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *as,
- struct btree_trans *trans,
- struct btree *b,
- struct bkey_format format)
+static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree *b)
{
- struct btree *n;
+ struct btree *n = bch2_btree_node_alloc(as, trans, b->c.level);
+ struct bkey_format format = bch2_btree_calc_format(b);
- n = bch2_btree_node_alloc(as, trans, b->c.level);
+ /*
+ * The keys might expand with the new format - if they wouldn't fit in
+ * the btree node anymore, use the old format for now:
+ */
+ if (!bch2_btree_node_format_fits(as->c, b, &format))
+ format = b->format;
SET_BTREE_NODE_SEQ(n->data, BTREE_NODE_SEQ(b->data) + 1);
bch2_btree_sort_into(as->c, n, b);
btree_node_reset_sib_u64s(n);
-
- n->key.k.p = b->key.k.p;
return n;
}
-static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as,
- struct btree_trans *trans,
- struct btree *b)
-{
- struct bkey_format new_f = bch2_btree_calc_format(b);
-
- /*
- * The keys might expand with the new format - if they wouldn't fit in
- * the btree node anymore, use the old format for now:
- */
- if (!bch2_btree_node_format_fits(as->c, b, &new_f))
- new_f = b->format;
-
- return __bch2_btree_node_alloc_replacement(as, trans, b, new_f);
-}
-
static struct btree *__btree_root_alloc(struct btree_update *as,
struct btree_trans *trans, unsigned level)
{
btree_node_set_format(b, b->data->format);
bch2_btree_build_aux_trees(b);
- bch2_btree_update_add_new_node(as, b);
- six_unlock_write(&b->c.lock);
-
return b;
}
bch2_trans_unlock(&trans);
bch2_fs_fatal_err_on(ret && !bch2_journal_error(&c->journal), c,
- "error %i in btree_update_nodes_written()", ret);
+ "%s(): error %s", __func__, bch2_err_str(ret));
err:
if (as->b) {
struct btree_path *path;
* we're in journal error state:
*/
+ /*
+ * Ensure transaction is unlocked before using
+ * btree_node_lock_nopath() (the use of which is always suspect,
+ * we need to work on removing this in the future)
+ *
+ * It should be, but get_unlocked_mut_path() -> bch2_path_get()
+ * calls bch2_path_upgrade(), before we call path_make_mut(), so
+ * we may rarely end up with a locked path besides the one we
+ * have here:
+ */
+ bch2_trans_unlock(&trans);
btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_intent);
mark_btree_node_locked(&trans, path, b->c.level, SIX_LOCK_intent);
bch2_btree_path_level_init(&trans, path, b);
mutex_unlock(&c->btree_interior_update_lock);
btree_update_add_key(as, &as->new_keys, b);
+
+ if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
+ unsigned bytes = vstruct_end(&b->data->keys) - (void *) b->data;
+ unsigned sectors = round_up(bytes, block_bytes(c)) >> 9;
+
+ bkey_i_to_btree_ptr_v2(&b->key)->v.sectors_written =
+ cpu_to_le16(sectors);
+ }
}
/*
nr_nodes[!!update_level] += 1 + split;
update_level++;
- if (!btree_path_node(path, update_level))
- break;
+ ret = bch2_btree_path_upgrade(trans, path, update_level + 1);
+ if (ret)
+ return ERR_PTR(ret);
- /*
- * XXX: figure out how far we might need to split,
- * instead of locking/reserving all the way to the root:
- */
- split = update_level + 1 < BTREE_MAX_DEPTH;
- }
+ if (!btree_path_node(path, update_level)) {
+ /* Allocating new root? */
+ nr_nodes[1] += split;
+ update_level = BTREE_MAX_DEPTH;
+ break;
+ }
- /* Might have to allocate a new root: */
- if (update_level < BTREE_MAX_DEPTH)
- nr_nodes[1] += 1;
+ if (bch2_btree_node_insert_fits(c, path->l[update_level].b,
+ BKEY_BTREE_PTR_U64s_MAX * (1 + split)))
+ break;
- ret = bch2_btree_path_upgrade(trans, path, U8_MAX);
- if (ret)
- return ERR_PTR(ret);
+ split = true;
+ }
if (flags & BTREE_INSERT_GC_LOCK_HELD)
lockdep_assert_held(&c->gc_lock);
as->mode = BTREE_INTERIOR_NO_UPDATE;
as->took_gc_lock = !(flags & BTREE_INSERT_GC_LOCK_HELD);
as->btree_id = path->btree_id;
+ as->update_level = update_level;
INIT_LIST_HEAD(&as->list);
INIT_LIST_HEAD(&as->unwritten_list);
INIT_LIST_HEAD(&as->write_blocked_list);
}
if (ret) {
- trace_and_count(c, btree_reserve_get_fail, trans->fn, _RET_IP_, nr_nodes[0] + nr_nodes[1]);
+ trace_and_count(c, btree_reserve_get_fail, trans->fn,
+ _RET_IP_, nr_nodes[0] + nr_nodes[1], ret);
goto err;
}
struct btree *old;
trace_and_count(c, btree_node_set_root, c, b);
- BUG_ON(!b->written);
old = btree_node_root(c, b);
struct bch_fs *c = as->c;
struct bkey_packed *k;
struct printbuf buf = PRINTBUF;
+ unsigned long old, new, v;
BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 &&
!btree_ptr_sectors_written(insert));
bch2_btree_bset_insert_key(trans, path, b, node_iter, insert);
set_btree_node_dirty_acct(c, b);
- set_btree_node_need_write(b);
+
+ v = READ_ONCE(b->flags);
+ do {
+ old = new = v;
+
+ new &= ~BTREE_WRITE_TYPE_MASK;
+ new |= BTREE_WRITE_interior;
+ new |= 1 << BTREE_NODE_need_write;
+ } while ((v = cmpxchg(&b->flags, old, new)) != old);
printbuf_exit(&buf);
}
;
while (!bch2_keylist_empty(keys)) {
- bch2_insert_fixup_btree_ptr(as, trans, path, b,
- &node_iter, bch2_keylist_front(keys));
+ struct bkey_i *k = bch2_keylist_front(keys);
+
+ if (bpos_gt(k->k.p, b->key.k.p))
+ break;
+
+ bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, k);
bch2_keylist_pop_front(keys);
}
}
* Move keys from n1 (original replacement node, now lower node) to n2 (higher
* node)
*/
-static struct btree *__btree_split_node(struct btree_update *as,
- struct btree_trans *trans,
- struct btree *n1)
+static void __btree_split_node(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree *b,
+ struct btree *n[2])
{
- struct bkey_format_state s;
- size_t nr_packed = 0, nr_unpacked = 0;
- struct btree *n2;
- struct bset *set1, *set2;
- struct bkey_packed *k, *set2_start, *set2_end, *out, *prev = NULL;
- struct bpos n1_pos;
-
- n2 = bch2_btree_node_alloc(as, trans, n1->c.level);
+ struct bkey_packed *k;
+ struct bpos n1_pos = POS_MIN;
+ struct btree_node_iter iter;
+ struct bset *bsets[2];
+ struct bkey_format_state format[2];
+ struct bkey_packed *out[2];
+ struct bkey uk;
+ unsigned u64s, n1_u64s = (b->nr.live_u64s * 3) / 5;
+ int i;
- n2->data->max_key = n1->data->max_key;
- n2->data->format = n1->format;
- SET_BTREE_NODE_SEQ(n2->data, BTREE_NODE_SEQ(n1->data));
- n2->key.k.p = n1->key.k.p;
+ for (i = 0; i < 2; i++) {
+ BUG_ON(n[i]->nsets != 1);
- bch2_btree_update_add_new_node(as, n2);
+ bsets[i] = btree_bset_first(n[i]);
+ out[i] = bsets[i]->start;
- set1 = btree_bset_first(n1);
- set2 = btree_bset_first(n2);
+ SET_BTREE_NODE_SEQ(n[i]->data, BTREE_NODE_SEQ(b->data) + 1);
+ bch2_bkey_format_init(&format[i]);
+ }
- /*
- * Has to be a linear search because we don't have an auxiliary
- * search tree yet
- */
- k = set1->start;
- while (1) {
- struct bkey_packed *n = bkey_next(k);
+ u64s = 0;
+ for_each_btree_node_key(b, k, &iter) {
+ if (bkey_deleted(k))
+ continue;
+
+ i = u64s >= n1_u64s;
+ u64s += k->u64s;
+ uk = bkey_unpack_key(b, k);
+ if (!i)
+ n1_pos = uk.p;
+ bch2_bkey_format_add_key(&format[i], &uk);
+ }
- if (n == vstruct_last(set1))
- break;
- if (k->_data - set1->_data >= (le16_to_cpu(set1->u64s) * 3) / 5)
- break;
+ btree_set_min(n[0], b->data->min_key);
+ btree_set_max(n[0], n1_pos);
+ btree_set_min(n[1], bpos_successor(n1_pos));
+ btree_set_max(n[1], b->data->max_key);
- if (bkey_packed(k))
- nr_packed++;
- else
- nr_unpacked++;
+ for (i = 0; i < 2; i++) {
+ bch2_bkey_format_add_pos(&format[i], n[i]->data->min_key);
+ bch2_bkey_format_add_pos(&format[i], n[i]->data->max_key);
- prev = k;
- k = n;
+ n[i]->data->format = bch2_bkey_format_done(&format[i]);
+ btree_node_set_format(n[i], n[i]->data->format);
}
- BUG_ON(!prev);
- set2_start = k;
- set2_end = vstruct_last(set1);
-
- set1->u64s = cpu_to_le16((u64 *) set2_start - set1->_data);
- set_btree_bset_end(n1, n1->set);
+ u64s = 0;
+ for_each_btree_node_key(b, k, &iter) {
+ if (bkey_deleted(k))
+ continue;
- n1->nr.live_u64s = le16_to_cpu(set1->u64s);
- n1->nr.bset_u64s[0] = le16_to_cpu(set1->u64s);
- n1->nr.packed_keys = nr_packed;
- n1->nr.unpacked_keys = nr_unpacked;
+ i = u64s >= n1_u64s;
+ u64s += k->u64s;
- n1_pos = bkey_unpack_pos(n1, prev);
- if (as->c->sb.version < bcachefs_metadata_version_snapshot)
- n1_pos.snapshot = U32_MAX;
-
- btree_set_max(n1, n1_pos);
- btree_set_min(n2, bpos_successor(n1->key.k.p));
+ if (bch2_bkey_transform(&n[i]->format, out[i], bkey_packed(k)
+ ? &b->format: &bch2_bkey_format_current, k))
+ out[i]->format = KEY_FORMAT_LOCAL_BTREE;
+ else
+ bch2_bkey_unpack(b, (void *) out[i], k);
- bch2_bkey_format_init(&s);
- bch2_bkey_format_add_pos(&s, n2->data->min_key);
- bch2_bkey_format_add_pos(&s, n2->data->max_key);
+ out[i]->needs_whiteout = false;
- for (k = set2_start; k != set2_end; k = bkey_next(k)) {
- struct bkey uk = bkey_unpack_key(n1, k);
- bch2_bkey_format_add_key(&s, &uk);
+ btree_keys_account_key_add(&n[i]->nr, 0, out[i]);
+ out[i] = bkey_next(out[i]);
}
- n2->data->format = bch2_bkey_format_done(&s);
- btree_node_set_format(n2, n2->data->format);
+ for (i = 0; i < 2; i++) {
+ bsets[i]->u64s = cpu_to_le16((u64 *) out[i] - bsets[i]->_data);
- out = set2->start;
- memset(&n2->nr, 0, sizeof(n2->nr));
+ BUG_ON(!bsets[i]->u64s);
- for (k = set2_start; k != set2_end; k = bkey_next(k)) {
- BUG_ON(!bch2_bkey_transform(&n2->format, out, bkey_packed(k)
- ? &n1->format : &bch2_bkey_format_current, k));
- out->format = KEY_FORMAT_LOCAL_BTREE;
- btree_keys_account_key_add(&n2->nr, 0, out);
- out = bkey_next(out);
- }
-
- set2->u64s = cpu_to_le16((u64 *) out - set2->_data);
- set_btree_bset_end(n2, n2->set);
-
- BUG_ON(!set1->u64s);
- BUG_ON(!set2->u64s);
+ set_btree_bset_end(n[i], n[i]->set);
- btree_node_reset_sib_u64s(n1);
- btree_node_reset_sib_u64s(n2);
+ btree_node_reset_sib_u64s(n[i]);
- bch2_verify_btree_nr_keys(n1);
- bch2_verify_btree_nr_keys(n2);
+ bch2_verify_btree_nr_keys(n[i]);
- if (n1->c.level) {
- btree_node_interior_verify(as->c, n1);
- btree_node_interior_verify(as->c, n2);
+ if (b->c.level)
+ btree_node_interior_verify(as->c, n[i]);
}
-
- return n2;
}
/*
struct btree *b,
struct keylist *keys)
{
- struct btree_node_iter node_iter;
- struct bkey_i *k = bch2_keylist_front(keys);
- struct bkey_packed *src, *dst, *n;
- struct bset *i;
+ if (!bch2_keylist_empty(keys) &&
+ bpos_le(bch2_keylist_front(keys)->k.p, b->data->max_key)) {
+ struct btree_node_iter node_iter;
- bch2_btree_node_iter_init(&node_iter, b, &k->k.p);
+ bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p);
- __bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys);
+ __bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys);
- /*
- * We can't tolerate whiteouts here - with whiteouts there can be
- * duplicate keys, and it would be rather bad if we picked a duplicate
- * for the pivot:
- */
- i = btree_bset_first(b);
- src = dst = i->start;
- while (src != vstruct_last(i)) {
- n = bkey_next(src);
- if (!bkey_deleted(src)) {
- memmove_u64s_down(dst, src, src->u64s);
- dst = bkey_next(dst);
- }
- src = n;
+ btree_node_interior_verify(as->c, b);
}
-
- /* Also clear out the unwritten whiteouts area: */
- b->whiteout_u64s = 0;
-
- i->u64s = cpu_to_le16((u64 *) dst - i->_data);
- set_btree_bset_end(b, b->set);
-
- BUG_ON(b->nsets != 1 ||
- b->nr.live_u64s != le16_to_cpu(btree_bset_first(b)->u64s));
-
- btree_node_interior_verify(as->c, b);
}
-static void btree_split(struct btree_update *as, struct btree_trans *trans,
- struct btree_path *path, struct btree *b,
- struct keylist *keys, unsigned flags)
+static int btree_split(struct btree_update *as, struct btree_trans *trans,
+ struct btree_path *path, struct btree *b,
+ struct keylist *keys, unsigned flags)
{
struct bch_fs *c = as->c;
struct btree *parent = btree_node_parent(path, b);
struct btree *n1, *n2 = NULL, *n3 = NULL;
struct btree_path *path1 = NULL, *path2 = NULL;
u64 start_time = local_clock();
+ int ret = 0;
BUG_ON(!parent && (b != btree_node_root(c, b)));
- BUG_ON(!btree_node_intent_locked(path, btree_node_root(c, b)->c.level));
+ BUG_ON(parent && !btree_node_intent_locked(path, b->c.level + 1));
bch2_btree_interior_update_will_free_node(as, b);
- n1 = bch2_btree_node_alloc_replacement(as, trans, b);
+ if (b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c)) {
+ struct btree *n[2];
- if (keys)
- btree_split_insert_keys(as, trans, path, n1, keys);
-
- if (bset_u64s(&n1->set[0]) > BTREE_SPLIT_THRESHOLD(c)) {
trace_and_count(c, btree_node_split, c, b);
- n2 = __btree_split_node(as, trans, n1);
+ n[0] = n1 = bch2_btree_node_alloc(as, trans, b->c.level);
+ n[1] = n2 = bch2_btree_node_alloc(as, trans, b->c.level);
+
+ __btree_split_node(as, trans, b, n);
+
+ if (keys) {
+ btree_split_insert_keys(as, trans, path, n1, keys);
+ btree_split_insert_keys(as, trans, path, n2, keys);
+ BUG_ON(!bch2_keylist_empty(keys));
+ }
bch2_btree_build_aux_trees(n2);
bch2_btree_build_aux_trees(n1);
+
+ bch2_btree_update_add_new_node(as, n1);
+ bch2_btree_update_add_new_node(as, n2);
six_unlock_write(&n2->c.lock);
six_unlock_write(&n1->c.lock);
mark_btree_node_locked(trans, path2, n2->c.level, SIX_LOCK_intent);
bch2_btree_path_level_init(trans, path2, n2);
- bch2_btree_update_add_new_node(as, n1);
-
- bch2_btree_node_write(c, n1, SIX_LOCK_intent, 0);
- bch2_btree_node_write(c, n2, SIX_LOCK_intent, 0);
-
/*
* Note that on recursive parent_keys == keys, so we
* can't start adding new keys to parent_keys before emptying it
/* Depth increases, make a new root */
n3 = __btree_root_alloc(as, trans, b->c.level + 1);
+ bch2_btree_update_add_new_node(as, n3);
+ six_unlock_write(&n3->c.lock);
+
path2->locks_want++;
BUG_ON(btree_node_locked(path2, n3->c.level));
six_lock_increment(&n3->c.lock, SIX_LOCK_intent);
n3->sib_u64s[1] = U16_MAX;
btree_split_insert_keys(as, trans, path, n3, &as->parent_keys);
-
- bch2_btree_node_write(c, n3, SIX_LOCK_intent, 0);
}
} else {
trace_and_count(c, btree_node_compact, c, b);
+ n1 = bch2_btree_node_alloc_replacement(as, trans, b);
+
+ if (keys) {
+ btree_split_insert_keys(as, trans, path, n1, keys);
+ BUG_ON(!bch2_keylist_empty(keys));
+ }
+
bch2_btree_build_aux_trees(n1);
+ bch2_btree_update_add_new_node(as, n1);
six_unlock_write(&n1->c.lock);
path1 = get_unlocked_mut_path(trans, path->btree_id, n1->c.level, n1->key.k.p);
mark_btree_node_locked(trans, path1, n1->c.level, SIX_LOCK_intent);
bch2_btree_path_level_init(trans, path1, n1);
- bch2_btree_update_add_new_node(as, n1);
-
- bch2_btree_node_write(c, n1, SIX_LOCK_intent, 0);
-
if (parent)
bch2_keylist_add(&as->parent_keys, &n1->key);
}
if (parent) {
/* Split a non root node */
- bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys, flags);
+ ret = bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys, flags);
+ if (ret)
+ goto err;
} else if (n3) {
bch2_btree_set_root(as, trans, path, n3);
} else {
bch2_btree_set_root(as, trans, path, n1);
}
- bch2_btree_update_get_open_buckets(as, n1);
- if (n2)
- bch2_btree_update_get_open_buckets(as, n2);
- if (n3)
+ if (n3) {
bch2_btree_update_get_open_buckets(as, n3);
+ bch2_btree_node_write(c, n3, SIX_LOCK_intent, 0);
+ }
+ if (n2) {
+ bch2_btree_update_get_open_buckets(as, n2);
+ bch2_btree_node_write(c, n2, SIX_LOCK_intent, 0);
+ }
+ bch2_btree_update_get_open_buckets(as, n1);
+ bch2_btree_node_write(c, n1, SIX_LOCK_intent, 0);
/*
* The old node must be freed (in memory) _before_ unlocking the new
if (n2)
six_unlock_intent(&n2->c.lock);
six_unlock_intent(&n1->c.lock);
-
+out:
if (path2) {
__bch2_btree_path_unlock(trans, path2);
bch2_path_put(trans, path2, true);
? BCH_TIME_btree_node_split
: BCH_TIME_btree_node_compact],
start_time);
+ return ret;
+err:
+ if (n3)
+ bch2_btree_node_free_never_used(as, trans, n3);
+ if (n2)
+ bch2_btree_node_free_never_used(as, trans, n2);
+ bch2_btree_node_free_never_used(as, trans, n1);
+ goto out;
}
static void
* If a split occurred, this function will return early. This can only happen
* for leaf nodes -- inserts into interior nodes have to be atomic.
*/
-static void bch2_btree_insert_node(struct btree_update *as, struct btree_trans *trans,
- struct btree_path *path, struct btree *b,
- struct keylist *keys, unsigned flags)
+static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *trans,
+ struct btree_path *path, struct btree *b,
+ struct keylist *keys, unsigned flags)
{
struct bch_fs *c = as->c;
int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
int old_live_u64s = b->nr.live_u64s;
int live_u64s_added, u64s_added;
+ int ret;
lockdep_assert_held(&c->gc_lock);
- BUG_ON(!btree_node_intent_locked(path, btree_node_root(c, b)->c.level));
+ BUG_ON(!btree_node_intent_locked(path, b->c.level));
BUG_ON(!b->c.level);
BUG_ON(!as || as->b);
bch2_verify_keylist_sorted(keys);
- bch2_btree_node_lock_for_insert(trans, path, b);
+ if (!(local_clock() & 63))
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_split_race);
+
+ ret = bch2_btree_node_lock_write(trans, path, &b->c);
+ if (ret)
+ return ret;
+
+ bch2_btree_node_prep_for_write(trans, path, b);
if (!bch2_btree_node_insert_fits(c, b, bch2_keylist_u64s(keys))) {
bch2_btree_node_unlock_write(trans, path, b);
bch2_btree_node_unlock_write(trans, path, b);
btree_node_interior_verify(c, b);
- return;
+ return 0;
split:
- btree_split(as, trans, path, b, keys, flags);
+ /*
+ * We could attempt to avoid the transaction restart, by calling
+ * bch2_btree_path_upgrade() and allocating more nodes:
+ */
+ if (b->c.level >= as->update_level)
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_split_race);
+
+ return btree_split(as, trans, path, b, keys, flags);
}
int bch2_btree_split_leaf(struct btree_trans *trans,
if (IS_ERR(as))
return PTR_ERR(as);
- btree_split(as, trans, path, b, NULL, flags);
+ ret = btree_split(as, trans, path, b, NULL, flags);
+ if (ret) {
+ bch2_btree_update_free(as, trans);
+ return ret;
+ }
+
bch2_btree_update_done(as, trans);
- for (l = path->level + 1; btree_path_node(path, l) && !ret; l++)
+ for (l = path->level + 1; btree_node_intent_locked(path, l) && !ret; l++)
ret = bch2_foreground_maybe_merge(trans, path, l, flags);
return ret;
b = path->l[level].b;
- if ((sib == btree_prev_sib && !bpos_cmp(b->data->min_key, POS_MIN)) ||
- (sib == btree_next_sib && !bpos_cmp(b->data->max_key, SPOS_MAX))) {
+ if ((sib == btree_prev_sib && bpos_eq(b->data->min_key, POS_MIN)) ||
+ (sib == btree_next_sib && bpos_eq(b->data->max_key, SPOS_MAX))) {
b->sib_u64s[sib] = U16_MAX;
return 0;
}
next = m;
}
- if (bkey_cmp(bpos_successor(prev->data->max_key), next->data->min_key)) {
+ if (!bpos_eq(bpos_successor(prev->data->max_key), next->data->min_key)) {
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
bch2_bpos_to_text(&buf1, prev->data->max_key);
bch2_bpos_to_text(&buf2, next->data->min_key);
bch_err(c,
- "btree topology error in btree merge:\n"
+ "%s(): btree topology error:\n"
" prev ends at %s\n"
" next starts at %s",
- buf1.buf, buf2.buf);
+ __func__, buf1.buf, buf2.buf);
printbuf_exit(&buf1);
printbuf_exit(&buf2);
bch2_topology_error(c);
btree_set_min(n, prev->data->min_key);
btree_set_max(n, next->data->max_key);
- bch2_btree_update_add_new_node(as, n);
-
n->data->format = new_f;
btree_node_set_format(n, new_f);
bch2_btree_sort_into(c, n, next);
bch2_btree_build_aux_trees(n);
+ bch2_btree_update_add_new_node(as, n);
six_unlock_write(&n->c.lock);
new_path = get_unlocked_mut_path(trans, path->btree_id, n->c.level, n->key.k.p);
mark_btree_node_locked(trans, new_path, n->c.level, SIX_LOCK_intent);
bch2_btree_path_level_init(trans, new_path, n);
- bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
-
bkey_init(&delete.k);
delete.k.p = prev->key.k.p;
bch2_keylist_add(&as->parent_keys, &delete);
bch2_trans_verify_paths(trans);
- bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys, flags);
+ ret = bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys, flags);
+ if (ret)
+ goto err_free_update;
bch2_trans_verify_paths(trans);
bch2_btree_update_get_open_buckets(as, n);
+ bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
bch2_btree_node_free_inmem(trans, path, b);
bch2_btree_node_free_inmem(trans, sib_path, m);
bch2_path_put(trans, sib_path, true);
bch2_trans_verify_locks(trans);
return ret;
+err_free_update:
+ bch2_btree_node_free_never_used(as, trans, n);
+ bch2_btree_update_free(as, trans);
+ goto out;
}
/**
bch2_btree_interior_update_will_free_node(as, b);
n = bch2_btree_node_alloc_replacement(as, trans, b);
- bch2_btree_update_add_new_node(as, n);
bch2_btree_build_aux_trees(n);
+ bch2_btree_update_add_new_node(as, n);
six_unlock_write(&n->c.lock);
new_path = get_unlocked_mut_path(trans, iter->btree_id, n->c.level, n->key.k.p);
trace_and_count(c, btree_node_rewrite, c, b);
- bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
-
if (parent) {
bch2_keylist_add(&as->parent_keys, &n->key);
- bch2_btree_insert_node(as, trans, iter->path, parent,
- &as->parent_keys, flags);
+ ret = bch2_btree_insert_node(as, trans, iter->path, parent,
+ &as->parent_keys, flags);
+ if (ret)
+ goto err;
} else {
bch2_btree_set_root(as, trans, iter->path, n);
}
bch2_btree_update_get_open_buckets(as, n);
+ bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
bch2_btree_node_free_inmem(trans, iter->path, b);
six_unlock_intent(&n->c.lock);
bch2_btree_update_done(as, trans);
- bch2_path_put(trans, new_path, true);
out:
+ if (new_path)
+ bch2_path_put(trans, new_path, true);
bch2_btree_path_downgrade(trans, iter->path);
return ret;
+err:
+ bch2_btree_node_free_never_used(as, trans, n);
+ bch2_btree_update_free(as, trans);
+ goto out;
}
struct async_btree_rewrite {
goto out;
ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
-out :
+out:
bch2_trans_iter_exit(trans, &iter);
return ret;
_THIS_IP_);
BUG_ON(iter2.path->level != b->c.level);
- BUG_ON(bpos_cmp(iter2.path->pos, new_key->k.p));
+ BUG_ON(!bpos_eq(iter2.path->pos, new_key->k.p));
btree_path_set_level_up(trans, iter2.path);