#include <trace/events/bcachefs.h>
static void btree_iter_set_search_pos(struct btree_iter *, struct bpos);
+static struct btree_iter *btree_iter_child_alloc(struct btree_iter *, unsigned long);
+static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *);
+static void btree_iter_copy(struct btree_iter *, struct btree_iter *);
static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
{
return true;
}
-static inline bool btree_iter_get_locks(struct btree_iter *iter,
- bool upgrade, bool trace)
+static inline bool btree_iter_get_locks(struct btree_iter *iter, bool upgrade,
+ unsigned long trace_ip)
{
unsigned l = iter->level;
int fail_idx = -1;
if (!(upgrade
? bch2_btree_node_upgrade(iter, l)
: bch2_btree_node_relock(iter, l))) {
- if (trace)
- (upgrade
- ? trace_node_upgrade_fail
- : trace_node_relock_fail)(l, iter->l[l].lock_seq,
- is_btree_node(iter, l)
- ? 0
- : (unsigned long) iter->l[l].b,
- is_btree_node(iter, l)
- ? iter->l[l].b->c.lock.state.seq
- : 0);
+ (upgrade
+ ? trace_node_upgrade_fail
+ : trace_node_relock_fail)(iter->trans->ip, trace_ip,
+ iter->btree_id, &iter->real_pos,
+ l, iter->l[l].lock_seq,
+ is_btree_node(iter, l)
+ ? 0
+ : (unsigned long) iter->l[l].b,
+ is_btree_node(iter, l)
+ ? iter->l[l].b->c.lock.state.seq
+ : 0);
fail_idx = l;
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
*/
if (type == SIX_LOCK_intent &&
linked->nodes_locked != linked->nodes_intent_locked) {
- linked->locks_want = max_t(unsigned,
- linked->locks_want,
- __fls(linked->nodes_locked) + 1);
- if (!btree_iter_get_locks(linked, true, false)) {
- deadlock_iter = linked;
- reason = 1;
- }
+ deadlock_iter = linked;
+ reason = 1;
}
if (linked->btree_id != iter->btree_id) {
* we're about to lock, it must have the ancestors locked too:
*/
if (level > __fls(linked->nodes_locked)) {
- linked->locks_want =
- max(level + 1, max_t(unsigned,
- linked->locks_want,
- iter->locks_want));
- if (!btree_iter_get_locks(linked, true, false)) {
- deadlock_iter = linked;
- reason = 5;
- }
+ deadlock_iter = linked;
+ reason = 5;
}
/* Must lock btree nodes in key order: */
btree_iter_type(linked))) <= 0) {
deadlock_iter = linked;
reason = 7;
- }
-
- /*
- * Recheck if this is a node we already have locked - since one
- * of the get_locks() calls might've successfully
- * upgraded/relocked it:
- */
- if (linked->l[level].b == b &&
- btree_node_locked_type(linked, level) >= type) {
- six_lock_increment(&b->c.lock, type);
- return true;
+ BUG_ON(trans->in_traverse_all);
}
}
if (unlikely(deadlock_iter)) {
trace_trans_restart_would_deadlock(iter->trans->ip, ip,
- reason,
+ trans->in_traverse_all, reason,
deadlock_iter->btree_id,
btree_iter_type(deadlock_iter),
+ &deadlock_iter->real_pos,
iter->btree_id,
- btree_iter_type(iter));
+ btree_iter_type(iter),
+ &pos);
return false;
}
#endif
__flatten
-bool bch2_btree_iter_relock(struct btree_iter *iter, bool trace)
+static bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
{
- return btree_iter_get_locks(iter, false, trace);
+ return btree_iter_get_locks(iter, false, trace_ip);
}
bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
iter->locks_want = new_locks_want;
- if (btree_iter_get_locks(iter, true, true))
+ if (btree_iter_get_locks(iter, true, _THIS_IP_))
return true;
/*
- * Ancestor nodes must be locked before child nodes, so set locks_want
- * on iterators that might lock ancestors before us to avoid getting
- * -EINTR later:
+ * XXX: this is ugly - we'd prefer to not be mucking with other
+ * iterators in the btree_trans here.
+ *
+ * On failure to upgrade the iterator, setting iter->locks_want and
+ * calling get_locks() is sufficient to make bch2_btree_iter_traverse()
+ * get the locks we want on transaction restart.
+ *
+ * But if this iterator was a clone, on transaction restart what we did
+ * to this iterator isn't going to be preserved.
+ *
+ * Possibly we could add an iterator field for the parent iterator when
+ * an iterator is a copy - for now, we'll just upgrade any other
+ * iterators with the same btree id.
+ *
+ * The code below used to be needed to ensure ancestor nodes get locked
+ * before interior nodes - now that's handled by
+ * bch2_btree_iter_traverse_all().
*/
trans_for_each_iter(iter->trans, linked)
if (linked != iter &&
+ btree_iter_type(linked) == btree_iter_type(iter) &&
linked->btree_id == iter->btree_id &&
linked->locks_want < new_locks_want) {
linked->locks_want = new_locks_want;
- btree_iter_get_locks(linked, true, false);
+ btree_iter_get_locks(linked, true, _THIS_IP_);
}
return false;
/* Btree transaction locking: */
+static inline bool btree_iter_should_be_locked(struct btree_trans *trans,
+ struct btree_iter *iter)
+{
+ return (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) ||
+ iter->should_be_locked;
+}
+
bool bch2_trans_relock(struct btree_trans *trans)
{
struct btree_iter *iter;
trans_for_each_iter(trans, iter)
- if (btree_iter_keep(trans, iter) &&
- !bch2_btree_iter_relock(iter, true))
+ if (!bch2_btree_iter_relock(iter, _RET_IP_) &&
+ btree_iter_should_be_locked(trans, iter)) {
+ trace_trans_restart_relock(trans->ip, _RET_IP_,
+ iter->btree_id, &iter->real_pos);
return false;
+ }
return true;
}
ret = bkey_disassemble(l->b, k, u);
- if (bch2_debug_check_bkeys)
+ /*
+ * XXX: bch2_btree_bset_insert_key() generates invalid keys when we
+ * overwrite extents - it sets k->type = KEY_TYPE_deleted on the key
+ * being overwritten but doesn't change k->size. But this is ok, because
+ * those keys are never written out, we just have to avoid a spurious
+ * assertion here:
+ */
+ if (bch2_debug_check_bkeys && !bkey_deleted(ret.k))
bch2_bkey_debugcheck(iter->trans->c, l->b, ret);
return ret;
/* peek_all() doesn't skip deleted keys */
static inline struct bkey_s_c btree_iter_level_peek_all(struct btree_iter *iter,
- struct btree_iter_level *l,
- struct bkey *u)
+ struct btree_iter_level *l)
{
- return __btree_iter_unpack(iter, l, u,
+ return __btree_iter_unpack(iter, l, &iter->k,
bch2_btree_node_iter_peek_all(&l->iter, l->b));
}
if (iter->flags & BTREE_ITER_PREFETCH)
btree_iter_prefetch(iter);
+ if (btree_node_read_locked(iter, level + 1))
+ btree_node_unlock(iter, level + 1);
iter->level = level;
+
+ bch2_btree_iter_verify_locks(iter);
err:
bch2_bkey_buf_exit(&tmp, c);
return ret;
static int btree_iter_traverse_one(struct btree_iter *, unsigned long);
-static int __btree_iter_traverse_all(struct btree_trans *trans, int ret)
+static int __btree_iter_traverse_all(struct btree_trans *trans, int ret,
+ unsigned long trace_ip)
{
struct bch_fs *c = trans->c;
struct btree_iter *iter;
u8 sorted[BTREE_ITER_MAX];
- unsigned i, nr_sorted = 0;
+ int i, nr_sorted = 0;
+ bool relock_fail;
if (trans->in_traverse_all)
return -EINTR;
trans->in_traverse_all = true;
retry_all:
nr_sorted = 0;
+ relock_fail = false;
- trans_for_each_iter(trans, iter)
+ trans_for_each_iter(trans, iter) {
+ if (!bch2_btree_iter_relock(iter, _THIS_IP_))
+ relock_fail = true;
sorted[nr_sorted++] = iter->idx;
+ }
+
+ if (!relock_fail) {
+ trans->in_traverse_all = false;
+ return 0;
+ }
#define btree_iter_cmp_by_idx(_l, _r) \
btree_iter_lock_cmp(&trans->iters[_l], &trans->iters[_r])
bubble_sort(sorted, nr_sorted, btree_iter_cmp_by_idx);
#undef btree_iter_cmp_by_idx
+
+ for (i = nr_sorted - 2; i >= 0; --i) {
+ struct btree_iter *iter1 = trans->iters + sorted[i];
+ struct btree_iter *iter2 = trans->iters + sorted[i + 1];
+
+ if (iter1->btree_id == iter2->btree_id &&
+ iter1->locks_want < iter2->locks_want)
+ __bch2_btree_iter_upgrade(iter1, iter2->locks_want);
+ else if (!iter1->locks_want && iter2->locks_want)
+ __bch2_btree_iter_upgrade(iter1, 1);
+ }
+
bch2_trans_unlock(trans);
cond_resched();
bch2_btree_cache_cannibalize_unlock(c);
trans->in_traverse_all = false;
+
+ trace_trans_traverse_all(trans->ip, trace_ip);
return ret;
}
int bch2_btree_iter_traverse_all(struct btree_trans *trans)
{
- return __btree_iter_traverse_all(trans, 0);
+ return __btree_iter_traverse_all(trans, 0, _RET_IP_);
}
static inline bool btree_iter_good_node(struct btree_iter *iter,
unsigned long trace_ip)
{
unsigned depth_want = iter->level;
+ int ret = 0;
/*
* if we need interior nodes locked, call btree_iter_relock() to make
*/
if (iter->uptodate == BTREE_ITER_NEED_RELOCK ||
iter->locks_want > 1)
- bch2_btree_iter_relock(iter, false);
+ bch2_btree_iter_relock(iter, _THIS_IP_);
- if (btree_iter_type(iter) == BTREE_ITER_CACHED)
- return bch2_btree_iter_traverse_cached(iter);
+ if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
+ ret = bch2_btree_iter_traverse_cached(iter);
+ goto out;
+ }
if (iter->uptodate < BTREE_ITER_NEED_RELOCK)
- return 0;
+ goto out;
if (unlikely(iter->level >= BTREE_MAX_DEPTH))
- return 0;
+ goto out;
iter->level = btree_iter_up_until_good_node(iter, 0);
* btree_iter_lock_root() comes next and that it can't fail
*/
while (iter->level > depth_want) {
- int ret = btree_iter_node(iter, iter->level)
+ ret = btree_iter_node(iter, iter->level)
? btree_iter_down(iter, trace_ip)
: btree_iter_lock_root(iter, depth_want, trace_ip);
if (unlikely(ret)) {
- if (ret == 1)
- return 0;
+ if (ret == 1) {
+ /*
+ * Got to the end of the btree (in
+ * BTREE_ITER_NODES mode)
+ */
+ ret = 0;
+ goto out;
+ }
iter->level = depth_want;
iter->l[iter->level].b =
BTREE_ITER_NO_NODE_DOWN;
}
- return ret;
+ goto out;
}
}
iter->uptodate = BTREE_ITER_NEED_PEEK;
-
+out:
+ trace_iter_traverse(iter->trans->ip, trace_ip,
+ iter->btree_id, &iter->real_pos, ret);
bch2_btree_iter_verify(iter);
- return 0;
+ return ret;
}
static int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
ret = bch2_trans_cond_resched(trans) ?:
btree_iter_traverse_one(iter, _RET_IP_);
if (unlikely(ret))
- ret = __btree_iter_traverse_all(trans, ret);
+ ret = __btree_iter_traverse_all(trans, ret, _RET_IP_);
return ret;
}
int __must_check
bch2_btree_iter_traverse(struct btree_iter *iter)
{
+ int ret;
+
btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
- return btree_iter_traverse(iter);
+ ret = btree_iter_traverse(iter);
+ if (ret)
+ return ret;
+
+ iter->should_be_locked = true;
+ return 0;
}
/* Iterate across nodes (leaf and interior nodes) */
iter->pos = iter->real_pos = b->key.k.p;
bch2_btree_iter_verify(iter);
+ iter->should_be_locked = true;
return b;
}
iter->pos = iter->real_pos = b->key.k.p;
bch2_btree_iter_verify(iter);
+ iter->should_be_locked = true;
return b;
}
static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_pos)
{
+ struct bpos old_pos = iter->real_pos;
int cmp = bpos_cmp(new_pos, iter->real_pos);
unsigned l = iter->level;
goto out;
iter->real_pos = new_pos;
+ iter->should_be_locked = false;
if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) {
btree_node_unlock(iter, 0);
- iter->l[0].b = BTREE_ITER_NO_NODE_UP;
+ iter->l[0].b = BTREE_ITER_NO_NODE_CACHED;
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
return;
}
btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
bch2_btree_iter_verify(iter);
+#ifdef CONFIG_BCACHEFS_DEBUG
+ trace_iter_set_search_pos(iter->trans->ip, _RET_IP_,
+ iter->btree_id,
+ &old_pos, &new_pos, l);
+#endif
}
inline bool bch2_btree_iter_advance(struct btree_iter *iter)
return ret;
}
-static struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
- enum btree_id btree_id, struct bpos pos)
+static inline struct bkey_i *btree_trans_peek_updates(struct btree_iter *iter,
+ struct bpos pos)
{
struct btree_insert_entry *i;
- trans_for_each_update2(trans, i)
- if ((cmp_int(btree_id, i->iter->btree_id) ?:
- bkey_cmp(pos, i->k->k.p)) <= 0) {
- if (btree_id == i->iter->btree_id)
+ if (!(iter->flags & BTREE_ITER_WITH_UPDATES))
+ return NULL;
+
+ trans_for_each_update(iter->trans, i)
+ if ((cmp_int(iter->btree_id, i->iter->btree_id) ?:
+ bkey_cmp(pos, i->k->k.p)) <= 0) {
+ if (iter->btree_id == i->iter->btree_id)
return i->k;
break;
}
return NULL;
}
-static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter, bool with_updates)
+/**
+ * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
+ * current position
+ */
+struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
{
struct bpos search_key = btree_iter_search_key(iter);
- struct bkey_i *next_update = with_updates
- ? btree_trans_peek_updates(iter->trans, iter->btree_id, search_key)
- : NULL;
+ struct bkey_i *next_update;
struct bkey_s_c k;
int ret;
EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
bch2_btree_iter_verify(iter);
bch2_btree_iter_verify_entry_exit(iter);
-
+start:
+ next_update = btree_trans_peek_updates(iter, search_key);
btree_iter_set_search_pos(iter, search_key);
while (1) {
k = btree_iter_level_peek(iter, &iter->l[0]);
if (next_update &&
- bpos_cmp(next_update->k.p, iter->real_pos) <= 0)
+ bpos_cmp(next_update->k.p, iter->real_pos) <= 0) {
+ iter->k = next_update->k;
k = bkey_i_to_s_c(next_update);
+ }
if (likely(k.k)) {
if (bkey_deleted(k.k)) {
- btree_iter_set_search_pos(iter,
- bkey_successor(iter, k.k->p));
- continue;
+ search_key = bkey_successor(iter, k.k->p);
+ goto start;
}
break;
* iter->pos should be mononotically increasing, and always be equal to
* the key we just returned - except extents can straddle iter->pos:
*/
- if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
+ if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
+ iter->pos = k.k->p;
+ else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
iter->pos = bkey_start_pos(k.k);
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter);
+ iter->should_be_locked = true;
return k;
}
-/**
- * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
- * current position
- */
-struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
-{
- return __btree_iter_peek(iter, false);
-}
-
/**
* bch2_btree_iter_next: returns first key greater than iterator's current
* position
return bch2_btree_iter_peek(iter);
}
-struct bkey_s_c bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
-{
- return __btree_iter_peek(iter, true);
-}
-
-struct bkey_s_c bch2_btree_iter_next_with_updates(struct btree_iter *iter)
-{
- if (!bch2_btree_iter_advance(iter))
- return bkey_s_c_null;
-
- return bch2_btree_iter_peek_with_updates(iter);
-}
-
/**
* bch2_btree_iter_peek_prev: returns first key less than or equal to
* iterator's current position
int ret;
EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
+ EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
bch2_btree_iter_verify(iter);
bch2_btree_iter_verify_entry_exit(iter);
out:
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter);
+ iter->should_be_locked = true;
return k;
no_key:
/*
return bch2_btree_iter_peek_prev(iter);
}
-static inline struct bkey_s_c
-__bch2_btree_iter_peek_slot_extents(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
{
+ struct bpos search_key;
struct bkey_s_c k;
- struct bpos pos, next_start;
+ int ret;
- /* keys & holes can't span inode numbers: */
- if (iter->pos.offset == KEY_OFFSET_MAX) {
+ EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS &&
+ btree_iter_type(iter) != BTREE_ITER_CACHED);
+ bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify_entry_exit(iter);
+
+ /* extents can't span inode numbers: */
+ if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
+ unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
if (iter->pos.inode == KEY_INODE_MAX)
return bkey_s_c_null;
- bch2_btree_iter_set_pos(iter, bkey_successor(iter, iter->pos));
+ bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
}
- pos = iter->pos;
- k = bch2_btree_iter_peek(iter);
- iter->pos = pos;
-
- if (bkey_err(k))
- return k;
-
- if (k.k && bkey_cmp(bkey_start_pos(k.k), iter->pos) <= 0)
- return k;
-
- next_start = k.k ? bkey_start_pos(k.k) : POS_MAX;
-
- bkey_init(&iter->k);
- iter->k.p = iter->pos;
- bch2_key_resize(&iter->k,
- min_t(u64, KEY_SIZE_MAX,
- (next_start.inode == iter->pos.inode
- ? next_start.offset
- : KEY_OFFSET_MAX) -
- iter->pos.offset));
-
- EBUG_ON(!iter->k.size);
+ search_key = btree_iter_search_key(iter);
+ btree_iter_set_search_pos(iter, search_key);
- bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(iter);
+ ret = btree_iter_traverse(iter);
+ if (unlikely(ret))
+ return bkey_s_c_err(ret);
- return (struct bkey_s_c) { &iter->k, NULL };
-}
+ if (btree_iter_type(iter) == BTREE_ITER_CACHED ||
+ !(iter->flags & BTREE_ITER_IS_EXTENTS)) {
+ struct bkey_i *next_update;
+ struct bkey_cached *ck;
-struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
-{
- struct btree_iter_level *l = &iter->l[0];
- struct bkey_s_c k;
- int ret;
+ switch (btree_iter_type(iter)) {
+ case BTREE_ITER_KEYS:
+ k = btree_iter_level_peek_all(iter, &iter->l[0]);
+ EBUG_ON(k.k && bkey_deleted(k.k) && bkey_cmp(k.k->p, iter->pos) == 0);
+ break;
+ case BTREE_ITER_CACHED:
+ ck = (void *) iter->l[0].b;
+ EBUG_ON(iter->btree_id != ck->key.btree_id ||
+ bkey_cmp(iter->pos, ck->key.pos));
+ BUG_ON(!ck->valid);
- EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
- bch2_btree_iter_verify(iter);
- bch2_btree_iter_verify_entry_exit(iter);
+ k = bkey_i_to_s_c(ck->k);
+ break;
+ case BTREE_ITER_NODES:
+ BUG();
+ }
- btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
+ next_update = btree_trans_peek_updates(iter, search_key);
+ if (next_update &&
+ (!k.k || bpos_cmp(next_update->k.p, k.k->p) <= 0)) {
+ iter->k = next_update->k;
+ k = bkey_i_to_s_c(next_update);
+ }
+ } else {
+ if ((iter->flags & BTREE_ITER_INTENT)) {
+ struct btree_iter *child =
+ btree_iter_child_alloc(iter, _THIS_IP_);
- if (iter->flags & BTREE_ITER_IS_EXTENTS)
- return __bch2_btree_iter_peek_slot_extents(iter);
+ btree_iter_copy(child, iter);
+ k = bch2_btree_iter_peek(child);
- ret = btree_iter_traverse(iter);
- if (unlikely(ret))
- return bkey_s_c_err(ret);
+ if (k.k && !bkey_err(k))
+ iter->k = child->k;
+ } else {
+ struct bpos pos = iter->pos;
- k = btree_iter_level_peek_all(iter, l, &iter->k);
+ k = bch2_btree_iter_peek(iter);
+ iter->pos = pos;
+ }
- EBUG_ON(k.k && bkey_deleted(k.k) && bkey_cmp(k.k->p, iter->pos) == 0);
+ if (unlikely(bkey_err(k)))
+ return k;
+ }
- if (!k.k || bkey_cmp(iter->pos, k.k->p)) {
- /* hole */
- bkey_init(&iter->k);
- iter->k.p = iter->pos;
- k = (struct bkey_s_c) { &iter->k, NULL };
+ if (!(iter->flags & BTREE_ITER_IS_EXTENTS)) {
+ if (!k.k ||
+ ((iter->flags & BTREE_ITER_ALL_SNAPSHOTS)
+ ? bpos_cmp(iter->pos, k.k->p)
+ : bkey_cmp(iter->pos, k.k->p))) {
+ bkey_init(&iter->k);
+ iter->k.p = iter->pos;
+ k = (struct bkey_s_c) { &iter->k, NULL };
+ }
+ } else {
+ struct bpos next = k.k ? bkey_start_pos(k.k) : POS_MAX;
+
+ if (bkey_cmp(iter->pos, next) < 0) {
+ bkey_init(&iter->k);
+ iter->k.p = iter->pos;
+ bch2_key_resize(&iter->k,
+ min_t(u64, KEY_SIZE_MAX,
+ (next.inode == iter->pos.inode
+ ? next.offset
+ : KEY_OFFSET_MAX) -
+ iter->pos.offset));
+
+ k = (struct bkey_s_c) { &iter->k, NULL };
+ EBUG_ON(!k.k->size);
+ }
}
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter);
+ iter->should_be_locked = true;
+
return k;
}
return bch2_btree_iter_peek_slot(iter);
}
-struct bkey_s_c bch2_btree_iter_peek_cached(struct btree_iter *iter)
-{
- struct bkey_cached *ck;
- int ret;
-
- EBUG_ON(btree_iter_type(iter) != BTREE_ITER_CACHED);
- bch2_btree_iter_verify(iter);
-
- ret = btree_iter_traverse(iter);
- if (unlikely(ret))
- return bkey_s_c_err(ret);
-
- ck = (void *) iter->l[0].b;
-
- EBUG_ON(iter->btree_id != ck->key.btree_id ||
- bkey_cmp(iter->pos, ck->key.pos));
- BUG_ON(!ck->valid);
-
- return bkey_i_to_s_c(ck->k);
-}
-
static inline void bch2_btree_iter_init(struct btree_trans *trans,
struct btree_iter *iter, enum btree_id btree_id)
{
iter->trans = trans;
iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
iter->btree_id = btree_id;
+ iter->real_pos = POS_MIN;
iter->level = 0;
iter->min_depth = 0;
iter->locks_want = 0;
/* new transactional stuff: */
+static void btree_iter_child_free(struct btree_iter *iter)
+{
+ struct btree_iter *child = btree_iter_child(iter);
+
+ if (child) {
+ bch2_trans_iter_free(iter->trans, child);
+ iter->child_idx = U8_MAX;
+ }
+}
+
+static struct btree_iter *btree_iter_child_alloc(struct btree_iter *iter,
+ unsigned long ip)
+{
+ struct btree_trans *trans = iter->trans;
+ struct btree_iter *child = btree_iter_child(iter);
+
+ if (!child) {
+ child = btree_trans_iter_alloc(trans);
+ child->ip_allocated = ip;
+ iter->child_idx = child->idx;
+
+ trans->iters_live |= 1ULL << child->idx;
+ trans->iters_touched |= 1ULL << child->idx;
+ }
+
+ return child;
+}
+
static inline void __bch2_trans_iter_free(struct btree_trans *trans,
unsigned idx)
{
+ btree_iter_child_free(&trans->iters[idx]);
+
__bch2_btree_iter_unlock(&trans->iters[idx]);
trans->iters_linked &= ~(1ULL << idx);
trans->iters_live &= ~(1ULL << idx);
static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans)
{
+ struct btree_iter *iter;
unsigned idx;
if (unlikely(trans->iters_linked ==
btree_trans_iter_alloc_fail(trans);
idx = __ffs64(~trans->iters_linked);
-
+ iter = &trans->iters[idx];
+
+ iter->trans = trans;
+ iter->idx = idx;
+ iter->child_idx = U8_MAX;
+ iter->flags = 0;
+ iter->nodes_locked = 0;
+ iter->nodes_intent_locked = 0;
trans->iters_linked |= 1ULL << idx;
- trans->iters[idx].idx = idx;
- trans->iters[idx].flags = 0;
- return &trans->iters[idx];
+ return iter;
}
-static inline void btree_iter_copy(struct btree_iter *dst,
- struct btree_iter *src)
+static void btree_iter_copy(struct btree_iter *dst, struct btree_iter *src)
{
- unsigned i, idx = dst->idx;
+ unsigned i;
- *dst = *src;
- dst->idx = idx;
- dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
+ __bch2_btree_iter_unlock(dst);
+ btree_iter_child_free(dst);
+
+ memcpy(&dst->flags, &src->flags,
+ sizeof(struct btree_iter) - offsetof(struct btree_iter, flags));
for (i = 0; i < BTREE_MAX_DEPTH; i++)
if (btree_node_locked(dst, i))
unsigned flags)
{
struct btree_iter *iter, *best = NULL;
+ struct bpos real_pos, pos_min = POS_MIN;
+
+ if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
+ btree_node_type_is_extents(btree_id) &&
+ !(flags & BTREE_ITER_NOT_EXTENTS) &&
+ !(flags & BTREE_ITER_ALL_SNAPSHOTS))
+ flags |= BTREE_ITER_IS_EXTENTS;
if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
!btree_type_has_snapshots(btree_id))
pos.snapshot = btree_type_has_snapshots(btree_id)
? U32_MAX : 0;
+ real_pos = pos;
+
+ if ((flags & BTREE_ITER_IS_EXTENTS) &&
+ bkey_cmp(pos, POS_MAX))
+ real_pos = bpos_nosnap_successor(pos);
+
trans_for_each_iter(trans, iter) {
if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE))
continue;
if (iter->btree_id != btree_id)
continue;
- if (best &&
- bkey_cmp(bpos_diff(best->real_pos, pos),
- bpos_diff(iter->real_pos, pos)) > 0)
- continue;
+ if (best) {
+ int cmp = bkey_cmp(bpos_diff(best->real_pos, real_pos),
+ bpos_diff(iter->real_pos, real_pos));
+
+ if (cmp < 0 ||
+ ((cmp == 0 && btree_iter_keep(trans, iter))))
+ continue;
+ }
best = iter;
}
trans->iters_live |= 1ULL << iter->idx;
trans->iters_touched |= 1ULL << iter->idx;
- if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
- btree_node_type_is_extents(btree_id) &&
- !(flags & BTREE_ITER_NOT_EXTENTS) &&
- !(flags & BTREE_ITER_ALL_SNAPSHOTS))
- flags |= BTREE_ITER_IS_EXTENTS;
-
iter->flags = flags;
iter->snapshot = pos.snapshot;
- locks_want = min(locks_want, BTREE_MAX_DEPTH);
+ /*
+ * If the iterator has locks_want greater than requested, we explicitly
+ * do not downgrade it here - on transaction restart because btree node
+ * split needs to upgrade locks, we might be putting/getting the
+ * iterator again. Downgrading iterators only happens via an explicit
+ * bch2_trans_downgrade().
+ */
+ locks_want = min(locks_want, BTREE_MAX_DEPTH);
if (locks_want > iter->locks_want) {
iter->locks_want = locks_want;
- btree_iter_get_locks(iter, true, false);
- } else if (locks_want < iter->locks_want) {
- __bch2_btree_iter_downgrade(iter, locks_want);
+ btree_iter_get_locks(iter, true, _THIS_IP_);
}
- while (iter->level < depth) {
+ while (iter->level != depth) {
btree_node_unlock(iter, iter->level);
iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT;
- iter->level++;
+ iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
+ if (iter->level < depth)
+ iter->level++;
+ else
+ iter->level--;
}
- while (iter->level > depth)
- iter->l[--iter->level].b = BTREE_ITER_NO_NODE_INIT;
-
iter->min_depth = depth;
bch2_btree_iter_set_pos(iter, pos);
- btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
+ btree_iter_set_search_pos(iter, real_pos);
+
+ trace_trans_get_iter(_RET_IP_, trans->ip,
+ btree_id,
+ &real_pos, locks_want, iter->uptodate,
+ best ? &best->real_pos : &pos_min,
+ best ? best->locks_want : U8_MAX,
+ best ? best->uptodate : U8_MAX);
return iter;
}
return iter;
}
-static int bch2_trans_preload_mem(struct btree_trans *trans, size_t size)
+void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
{
- if (size > trans->mem_bytes) {
+ size_t new_top = trans->mem_top + size;
+ void *p;
+
+ if (new_top > trans->mem_bytes) {
size_t old_bytes = trans->mem_bytes;
- size_t new_bytes = roundup_pow_of_two(size);
- void *new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
+ size_t new_bytes = roundup_pow_of_two(new_top);
+ void *new_mem;
+
+ WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
+
+ new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
+ if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
+ new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
+ new_bytes = BTREE_TRANS_MEM_MAX;
+ kfree(trans->mem);
+ }
if (!new_mem)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
trans->mem = new_mem;
trans->mem_bytes = new_bytes;
if (old_bytes) {
- trace_trans_restart_mem_realloced(trans->ip, new_bytes);
- return -EINTR;
+ trace_trans_restart_mem_realloced(trans->ip, _RET_IP_, new_bytes);
+ return ERR_PTR(-EINTR);
}
}
- return 0;
-}
-
-void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
-{
- void *p;
- int ret;
-
- ret = bch2_trans_preload_mem(trans, trans->mem_top + size);
- if (ret)
- return ERR_PTR(ret);
-
p = trans->mem + trans->mem_top;
trans->mem_top += size;
+ memset(p, 0, size);
return p;
}
{
struct btree_iter *iter;
- trans_for_each_iter(trans, iter)
+ trans_for_each_iter(trans, iter) {
iter->flags &= ~(BTREE_ITER_KEEP_UNTIL_COMMIT|
BTREE_ITER_SET_POS_AFTER_COMMIT);
+ iter->should_be_locked = false;
+ }
bch2_trans_unlink_iters(trans);
trans->iters_touched &= trans->iters_live;
+ trans->extra_journal_res = 0;
trans->nr_updates = 0;
- trans->nr_updates2 = 0;
trans->mem_top = 0;
trans->hooks = NULL;
if (!(flags & TRANS_RESET_NOUNLOCK))
bch2_trans_cond_resched(trans);
- if (!(flags & TRANS_RESET_NOTRAVERSE))
+ if (!(flags & TRANS_RESET_NOTRAVERSE) &&
+ trans->iters_linked)
bch2_btree_iter_traverse_all(trans);
}
trans->iters = p; p += iters_bytes;
trans->updates = p; p += updates_bytes;
- trans->updates2 = p; p += updates_bytes;
}
void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
unsigned expected_nr_iters,
size_t expected_mem_bytes)
+ __acquires(&c->btree_trans_barrier)
{
memset(trans, 0, sizeof(*trans));
trans->c = c;
if (expected_mem_bytes) {
trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes);
trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL);
+
+ if (!unlikely(trans->mem)) {
+ trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
+ trans->mem_bytes = BTREE_TRANS_MEM_MAX;
+ }
}
trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
}
int bch2_trans_exit(struct btree_trans *trans)
+ __releases(&c->btree_trans_barrier)
{
struct bch_fs *c = trans->c;
bch2_trans_unlock(trans);
#ifdef CONFIG_BCACHEFS_DEBUG
+ if (trans->iters_live) {
+ struct btree_iter *iter;
+
+ trans_for_each_iter(trans, iter)
+ btree_iter_child_free(iter);
+ }
+
if (trans->iters_live) {
struct btree_iter *iter;
bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
- kfree(trans->fs_usage_deltas);
- kfree(trans->mem);
+ if (trans->fs_usage_deltas) {
+ if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
+ REPLICAS_DELTA_LIST_MAX)
+ mempool_free(trans->fs_usage_deltas,
+ &trans->c->replicas_delta_pool);
+ else
+ kfree(trans->fs_usage_deltas);
+ }
+
+ if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
+ mempool_free(trans->mem, &trans->c->btree_trans_mem_pool);
+ else
+ kfree(trans->mem);
#ifdef __KERNEL__
/*
*/
trans->iters = this_cpu_xchg(c->btree_iters_bufs->iter, trans->iters);
#endif
+
if (trans->iters)
mempool_free(trans->iters, &trans->c->btree_iters_pool);
void bch2_fs_btree_iter_exit(struct bch_fs *c)
{
+ mempool_exit(&c->btree_trans_mem_pool);
mempool_exit(&c->btree_iters_pool);
cleanup_srcu_struct(&c->btree_trans_barrier);
}
return init_srcu_struct(&c->btree_trans_barrier) ?:
mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
sizeof(struct btree_iter) * nr +
- sizeof(struct btree_insert_entry) * nr +
- sizeof(struct btree_insert_entry) * nr);
+ sizeof(struct btree_insert_entry) * nr) ?:
+ mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
+ BTREE_TRANS_MEM_MAX);
}