#include <linux/prefetch.h>
#include <trace/events/bcachefs.h>
+static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *,
+ struct btree_iter_level *,
+ struct bkey *);
+
#define BTREE_ITER_NOT_END ((struct btree *) 1)
static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
{
- return iter->nodes[l] && iter->nodes[l] != BTREE_ITER_NOT_END;
+ return l < BTREE_MAX_DEPTH &&
+ iter->l[l].b &&
+ iter->l[l].b != BTREE_ITER_NOT_END;
+}
+
+/* Returns < 0 if @k is before iter pos, > 0 if @k is after */
+static inline int __btree_iter_pos_cmp(struct btree_iter *iter,
+ const struct btree *b,
+ const struct bkey_packed *k,
+ bool interior_node)
+{
+ int cmp = bkey_cmp_left_packed(b, k, &iter->pos);
+
+ if (cmp)
+ return cmp;
+ if (bkey_deleted(k))
+ return -1;
+
+ /*
+ * Normally, for extents we want the first key strictly greater than
+ * the iterator position - with the exception that for interior nodes,
+ * we don't want to advance past the last key if the iterator position
+ * is POS_MAX:
+ */
+ if (iter->flags & BTREE_ITER_IS_EXTENTS &&
+ (!interior_node ||
+ bkey_cmp_left_packed_byval(b, k, POS_MAX)))
+ return -1;
+ return 1;
+}
+
+static inline int btree_iter_pos_cmp(struct btree_iter *iter,
+ const struct btree *b,
+ const struct bkey_packed *k)
+{
+ return __btree_iter_pos_cmp(iter, b, k, b->level != 0);
}
/* Btree node locking: */
{
struct btree_iter *linked;
- EBUG_ON(iter->nodes[b->level] != b);
- EBUG_ON(iter->lock_seq[b->level] + 1 != b->lock.state.seq);
-
- for_each_linked_btree_node(iter, b, linked)
- linked->lock_seq[b->level] += 2;
+ EBUG_ON(iter->l[b->level].b != b);
+ EBUG_ON(iter->l[b->level].lock_seq + 1 != b->lock.state.seq);
- iter->lock_seq[b->level] += 2;
+ for_each_btree_iter_with_node(iter, b, linked)
+ linked->l[b->level].lock_seq += 2;
six_unlock_write(&b->lock);
}
-void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
+void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
{
+ struct bch_fs *c = iter->c;
struct btree_iter *linked;
unsigned readers = 0;
- EBUG_ON(iter->nodes[b->level] != b);
- EBUG_ON(iter->lock_seq[b->level] != b->lock.state.seq);
-
- if (six_trylock_write(&b->lock))
- return;
+ EBUG_ON(btree_node_read_locked(iter, b->level));
for_each_linked_btree_iter(iter, linked)
- if (linked->nodes[b->level] == b &&
+ if (linked->l[b->level].b == b &&
btree_node_read_locked(linked, b->level))
readers++;
- if (likely(!readers)) {
- six_lock_write(&b->lock);
- } else {
- /*
- * Must drop our read locks before calling six_lock_write() -
- * six_unlock() won't do wakeups until the reader count
- * goes to 0, and it's safe because we have the node intent
- * locked:
- */
- atomic64_sub(__SIX_VAL(read_lock, readers),
- &b->lock.state.counter);
- six_lock_write(&b->lock);
- atomic64_add(__SIX_VAL(read_lock, readers),
- &b->lock.state.counter);
- }
+ /*
+ * Must drop our read locks before calling six_lock_write() -
+ * six_unlock() won't do wakeups until the reader count
+ * goes to 0, and it's safe because we have the node intent
+ * locked:
+ */
+ atomic64_sub(__SIX_VAL(read_lock, readers),
+ &b->lock.state.counter);
+ btree_node_lock_type(c, b, SIX_LOCK_write);
+ atomic64_add(__SIX_VAL(read_lock, readers),
+ &b->lock.state.counter);
}
-bool bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
+bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
{
- struct btree_iter *linked;
- struct btree *b = iter->nodes[level];
- enum btree_node_locked_type want = btree_lock_want(iter, level);
- enum btree_node_locked_type have = btree_node_locked_type(iter, level);
+ struct btree *b = btree_iter_node(iter, level);
+ int want = __btree_lock_want(iter, level);
- if (want == have)
- return true;
+ if (!b || b == BTREE_ITER_NOT_END)
+ return false;
+
+ if (race_fault())
+ return false;
+
+ if (!six_relock_type(&b->lock, want, iter->l[level].lock_seq) &&
+ !(iter->l[level].lock_seq >> 1 == b->lock.state.seq >> 1 &&
+ btree_node_lock_increment(iter, b, level, want)))
+ return false;
+
+ mark_btree_node_locked(iter, level, want);
+ return true;
+}
+
+static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level)
+{
+ struct btree *b = iter->l[level].b;
+
+ EBUG_ON(btree_lock_want(iter, level) != BTREE_NODE_INTENT_LOCKED);
if (!is_btree_node(iter, level))
return false;
+ if (btree_node_intent_locked(iter, level))
+ return true;
+
if (race_fault())
return false;
- if (have != BTREE_NODE_UNLOCKED
- ? six_trylock_convert(&b->lock, have, want)
- : six_relock_type(&b->lock, want, iter->lock_seq[level]))
+ if (btree_node_locked(iter, level)
+ ? six_lock_tryupgrade(&b->lock)
+ : six_relock_type(&b->lock, SIX_LOCK_intent, iter->l[level].lock_seq))
goto success;
- for_each_linked_btree_iter(iter, linked)
- if (linked->nodes[level] == b &&
- btree_node_locked_type(linked, level) == want &&
- iter->lock_seq[level] == b->lock.state.seq) {
- btree_node_unlock(iter, level);
- six_lock_increment(&b->lock, want);
- goto success;
- }
+ if (iter->l[level].lock_seq >> 1 == b->lock.state.seq >> 1 &&
+ btree_node_lock_increment(iter, b, level, BTREE_NODE_INTENT_LOCKED)) {
+ btree_node_unlock(iter, level);
+ goto success;
+ }
return false;
success:
- mark_btree_node_unlocked(iter, level);
- mark_btree_node_locked(iter, level, want);
+ mark_btree_node_intent_locked(iter, level);
return true;
}
-/* Slowpath: */
-bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
- unsigned level,
- struct btree_iter *iter,
- enum six_lock_type type)
+static inline bool btree_iter_get_locks(struct btree_iter *iter,
+ bool upgrade)
{
- struct btree_iter *linked;
+ unsigned l = iter->level;
+ int fail_idx = -1;
- /* Can't have children locked before ancestors: */
- EBUG_ON(iter->nodes_locked && level > __ffs(iter->nodes_locked));
-
- /*
- * Can't hold any read locks while we block taking an intent lock - see
- * below for reasoning, and we should have already dropped any read
- * locks in the current iterator
- */
- EBUG_ON(type == SIX_LOCK_intent &&
- iter->nodes_locked != iter->nodes_intent_locked);
+ do {
+ if (!btree_iter_node(iter, l))
+ break;
- for_each_linked_btree_iter(iter, linked)
- if (linked->nodes[level] == b &&
- btree_node_locked_type(linked, level) == type) {
- six_lock_increment(&b->lock, type);
- return true;
+ if (!(upgrade
+ ? bch2_btree_node_upgrade(iter, l)
+ : bch2_btree_node_relock(iter, l))) {
+ fail_idx = l;
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
}
+ l++;
+ } while (l < iter->locks_want);
+
/*
- * Must lock btree nodes in key order - this case hapens when locking
- * the prev sibling in btree node merging:
+ * When we fail to get a lock, we have to ensure that any child nodes
+ * can't be relocked so bch2_btree_iter_traverse has to walk back up to
+ * the node that we failed to relock:
*/
- if (iter->nodes_locked &&
- __ffs(iter->nodes_locked) == level &&
- __btree_iter_cmp(iter->btree_id, pos, iter))
- return false;
+ while (fail_idx >= 0) {
+ btree_node_unlock(iter, fail_idx);
+ iter->l[fail_idx].b = BTREE_ITER_NOT_END;
+ --fail_idx;
+ }
+
+ if (iter->uptodate == BTREE_ITER_NEED_RELOCK)
+ iter->uptodate = BTREE_ITER_NEED_PEEK;
+
+ bch2_btree_iter_verify_locks(iter);
+ return iter->uptodate < BTREE_ITER_NEED_RELOCK;
+}
- for_each_linked_btree_iter(iter, linked) {
+/* Slowpath: */
+bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
+ unsigned level,
+ struct btree_iter *iter,
+ enum six_lock_type type,
+ bool may_drop_locks)
+{
+ struct bch_fs *c = iter->c;
+ struct btree_iter *linked;
+ bool ret = true;
+
+ /* Check if it's safe to block: */
+ for_each_btree_iter(iter, linked) {
if (!linked->nodes_locked)
continue;
+ /* * Must lock btree nodes in key order: */
+ if (__btree_iter_cmp(iter->btree_id, pos, linked) < 0)
+ ret = false;
+
/*
* Can't block taking an intent lock if we have _any_ nodes read
* locked:
*/
if (type == SIX_LOCK_intent &&
linked->nodes_locked != linked->nodes_intent_locked) {
- linked->locks_want = max_t(unsigned,
- linked->locks_want,
- iter->locks_want);
- return false;
+ if (may_drop_locks) {
+ linked->locks_want = max_t(unsigned,
+ linked->locks_want,
+ __fls(linked->nodes_locked) + 1);
+ btree_iter_get_locks(linked, true);
+ }
+ ret = false;
}
- /* We have to lock btree nodes in key order: */
- if (__btree_iter_cmp(iter->btree_id, pos, linked) < 0)
- return false;
-
/*
* Interior nodes must be locked before their descendants: if
* another iterator has possible descendants locked of the node
*/
if (linked->btree_id == iter->btree_id &&
level > __fls(linked->nodes_locked)) {
- linked->locks_want = max_t(unsigned,
- linked->locks_want,
- iter->locks_want);
- return false;
+ if (may_drop_locks) {
+ linked->locks_want =
+ max(level + 1, max_t(unsigned,
+ linked->locks_want,
+ iter->locks_want));
+ btree_iter_get_locks(linked, true);
+ }
+ ret = false;
}
}
- six_lock_type(&b->lock, type);
- return true;
+ if (ret)
+ __btree_node_lock_type(c, b, type);
+ else
+ trans_restart();
+
+ return ret;
}
/* Btree iterator locking: */
-
-static void btree_iter_drop_extra_locks(struct btree_iter *iter)
+#ifdef CONFIG_BCACHEFS_DEBUG
+void __bch2_btree_iter_verify_locks(struct btree_iter *iter)
{
unsigned l;
- while (iter->nodes_locked &&
- (l = __fls(iter->nodes_locked)) > iter->locks_want) {
- if (!btree_node_locked(iter, l))
- panic("l %u nodes_locked %u\n", l, iter->nodes_locked);
+ BUG_ON((iter->flags & BTREE_ITER_NOUNLOCK) &&
+ !btree_node_locked(iter, 0));
- if (l > iter->level) {
- btree_node_unlock(iter, l);
- } else if (btree_node_intent_locked(iter, l)) {
- six_lock_downgrade(&iter->nodes[l]->lock);
- iter->nodes_intent_locked ^= 1 << l;
- }
+ for (l = 0; btree_iter_node(iter, l); l++) {
+ if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
+ !btree_node_locked(iter, l))
+ continue;
+
+ BUG_ON(btree_lock_want(iter, l) !=
+ btree_node_locked_type(iter, l));
}
}
-bool __bch2_btree_iter_set_locks_want(struct btree_iter *iter,
- unsigned new_locks_want)
+void bch2_btree_iter_verify_locks(struct btree_iter *iter)
{
struct btree_iter *linked;
- unsigned l;
- /* Drop locks we don't want anymore: */
- if (new_locks_want < iter->locks_want)
- for_each_linked_btree_iter(iter, linked)
- if (linked->locks_want > new_locks_want) {
- linked->locks_want = max_t(unsigned, 1,
- new_locks_want);
- btree_iter_drop_extra_locks(linked);
- }
+ for_each_btree_iter(iter, linked)
+ __bch2_btree_iter_verify_locks(linked);
+
+}
+#endif
+
+__flatten
+static bool __bch2_btree_iter_relock(struct btree_iter *iter)
+{
+ return iter->uptodate >= BTREE_ITER_NEED_RELOCK
+ ? btree_iter_get_locks(iter, false)
+ : true;
+}
+
+bool bch2_btree_iter_relock(struct btree_iter *iter)
+{
+ struct btree_iter *linked;
+ bool ret = true;
+
+ for_each_btree_iter(iter, linked)
+ ret &= __bch2_btree_iter_relock(linked);
+
+ return ret;
+}
+
+bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
+ unsigned new_locks_want)
+{
+ struct btree_iter *linked;
+
+ EBUG_ON(iter->locks_want >= new_locks_want);
iter->locks_want = new_locks_want;
- btree_iter_drop_extra_locks(iter);
- for (l = iter->level; l < iter->locks_want && iter->nodes[l]; l++)
- if (!bch2_btree_node_relock(iter, l))
- goto fail;
+ if (btree_iter_get_locks(iter, true))
+ return true;
- return true;
-fail:
/*
- * Just an optimization: ancestor nodes must be locked before child
- * nodes, so set locks_want on iterators that might lock ancestors
- * before us to avoid getting -EINTR later:
+ * Ancestor nodes must be locked before child nodes, so set locks_want
+ * on iterators that might lock ancestors before us to avoid getting
+ * -EINTR later:
*/
for_each_linked_btree_iter(iter, linked)
if (linked->btree_id == iter->btree_id &&
- btree_iter_cmp(linked, iter) <= 0)
- linked->locks_want = max_t(unsigned, linked->locks_want,
- new_locks_want);
+ btree_iter_cmp(linked, iter) <= 0 &&
+ linked->locks_want < new_locks_want) {
+ linked->locks_want = new_locks_want;
+ btree_iter_get_locks(linked, true);
+ }
+
return false;
}
-static int __bch2_btree_iter_unlock(struct btree_iter *iter)
+bool __bch2_btree_iter_upgrade_nounlock(struct btree_iter *iter,
+ unsigned new_locks_want)
{
- while (iter->nodes_locked)
- btree_node_unlock(iter, __ffs(iter->nodes_locked));
+ unsigned l = iter->level;
- return iter->flags & BTREE_ITER_ERROR ? -EIO : 0;
+ EBUG_ON(iter->locks_want >= new_locks_want);
+
+ iter->locks_want = new_locks_want;
+
+ do {
+ if (!btree_iter_node(iter, l))
+ break;
+
+ if (!bch2_btree_node_upgrade(iter, l)) {
+ iter->locks_want = l;
+ return false;
+ }
+
+ l++;
+ } while (l < iter->locks_want);
+
+ return true;
+}
+
+void __bch2_btree_iter_downgrade(struct btree_iter *iter,
+ unsigned downgrade_to)
+{
+ struct btree_iter *linked;
+ unsigned l;
+
+ /*
+ * We downgrade linked iterators as well because btree_iter_upgrade
+ * might have had to modify locks_want on linked iterators due to lock
+ * ordering:
+ */
+ for_each_btree_iter(iter, linked) {
+ unsigned new_locks_want = downgrade_to ?:
+ (linked->flags & BTREE_ITER_INTENT ? 1 : 0);
+
+ if (linked->locks_want <= new_locks_want)
+ continue;
+
+ linked->locks_want = new_locks_want;
+
+ while (linked->nodes_locked &&
+ (l = __fls(linked->nodes_locked)) >= linked->locks_want) {
+ if (l > linked->level) {
+ btree_node_unlock(linked, l);
+ } else {
+ if (btree_node_intent_locked(linked, l)) {
+ six_lock_downgrade(&linked->l[l].b->lock);
+ linked->nodes_intent_locked ^= 1 << l;
+ }
+ break;
+ }
+ }
+ }
+
+ bch2_btree_iter_verify_locks(iter);
}
int bch2_btree_iter_unlock(struct btree_iter *iter)
{
struct btree_iter *linked;
- for_each_linked_btree_iter(iter, linked)
+ for_each_btree_iter(iter, linked)
__bch2_btree_iter_unlock(linked);
- return __bch2_btree_iter_unlock(iter);
+
+ return iter->flags & BTREE_ITER_ERROR ? -EIO : 0;
}
/* Btree iterator: */
#ifdef CONFIG_BCACHEFS_DEBUG
static void __bch2_btree_iter_verify(struct btree_iter *iter,
- struct btree *b)
+ struct btree *b)
{
- struct btree_node_iter *node_iter = &iter->node_iters[b->level];
- struct btree_node_iter tmp = *node_iter;
+ struct btree_iter_level *l = &iter->l[b->level];
+ struct btree_node_iter tmp = l->iter;
struct bkey_packed *k;
- bch2_btree_node_iter_verify(node_iter, b);
+ if (iter->uptodate > BTREE_ITER_NEED_PEEK)
+ return;
+
+ bch2_btree_node_iter_verify(&l->iter, b);
/*
* For interior nodes, the iterator will have skipped past
* deleted keys:
+ *
+ * For extents, the iterator may have skipped past deleted keys (but not
+ * whiteouts)
*/
- k = b->level
- ? bch2_btree_node_iter_prev(&tmp, b)
+ k = b->level || iter->flags & BTREE_ITER_IS_EXTENTS
+ ? bch2_btree_node_iter_prev_filter(&tmp, b, KEY_TYPE_discard)
: bch2_btree_node_iter_prev_all(&tmp, b);
- if (k && btree_iter_pos_cmp_packed(b, &iter->pos, k,
- iter->flags & BTREE_ITER_IS_EXTENTS)) {
+ if (k && btree_iter_pos_cmp(iter, b, k) > 0) {
char buf[100];
struct bkey uk = bkey_unpack_key(b, k);
- bch2_bkey_to_text(buf, sizeof(buf), &uk);
- panic("prev key should be before after pos:\n%s\n%llu:%llu\n",
+ bch2_bkey_to_text(&PBUF(buf), &uk);
+ panic("prev key should be before iter pos:\n%s\n%llu:%llu\n",
buf, iter->pos.inode, iter->pos.offset);
}
- k = bch2_btree_node_iter_peek_all(node_iter, b);
- if (k && !btree_iter_pos_cmp_packed(b, &iter->pos, k,
- iter->flags & BTREE_ITER_IS_EXTENTS)) {
+ k = bch2_btree_node_iter_peek_all(&l->iter, b);
+ if (k && btree_iter_pos_cmp(iter, b, k) < 0) {
char buf[100];
struct bkey uk = bkey_unpack_key(b, k);
- bch2_bkey_to_text(buf, sizeof(buf), &uk);
- panic("next key should be before iter pos:\n%llu:%llu\n%s\n",
+ bch2_bkey_to_text(&PBUF(buf), &uk);
+ panic("iter should be after current key:\n"
+ "iter pos %llu:%llu\n"
+ "cur key %s\n",
iter->pos.inode, iter->pos.offset, buf);
}
+
+ BUG_ON(iter->uptodate == BTREE_ITER_UPTODATE &&
+ (iter->flags & BTREE_ITER_TYPE) == BTREE_ITER_KEYS &&
+ !bkey_whiteout(&iter->k) &&
+ bch2_btree_node_iter_end(&l->iter));
}
void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b)
{
struct btree_iter *linked;
- if (iter->nodes[b->level] == b)
- __bch2_btree_iter_verify(iter, b);
-
- for_each_linked_btree_node(iter, b, linked)
- __bch2_btree_iter_verify(iter, b);
+ for_each_btree_iter_with_node(iter, b, linked)
+ __bch2_btree_iter_verify(linked, b);
}
+#else
+
+static inline void __bch2_btree_iter_verify(struct btree_iter *iter,
+ struct btree *b) {}
+
#endif
static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
struct btree_node_iter_set *set;
unsigned offset = __btree_node_key_to_offset(b, where);
int shift = new_u64s - clobber_u64s;
- unsigned old_end = (int) __btree_node_key_to_offset(b, end) - shift;
+ unsigned old_end = t->end_offset - shift;
btree_node_iter_for_each(node_iter, set)
if (set->end == old_end)
/* didn't find the bset in the iterator - might have to readd it: */
if (new_u64s &&
- btree_iter_pos_cmp_packed(b, &iter->pos, where,
- iter->flags & BTREE_ITER_IS_EXTENTS))
+ btree_iter_pos_cmp(iter, b, where) > 0) {
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
+
bch2_btree_node_iter_push(node_iter, b, where, end);
+
+ if (!b->level &&
+ node_iter == &iter->l[0].iter)
+ bkey_disassemble(b,
+ bch2_btree_node_iter_peek_all(node_iter, b),
+ &iter->k);
+ }
return;
found:
- set->end = (int) set->end + shift;
+ set->end = t->end_offset;
/* Iterator hasn't gotten to the key that changed yet: */
if (set->k < offset)
return;
if (new_u64s &&
- btree_iter_pos_cmp_packed(b, &iter->pos, where,
- iter->flags & BTREE_ITER_IS_EXTENTS)) {
+ btree_iter_pos_cmp(iter, b, where) > 0) {
set->k = offset;
- bch2_btree_node_iter_sort(node_iter, b);
} else if (set->k < offset + clobber_u64s) {
set->k = offset + new_u64s;
if (set->k == set->end)
- *set = node_iter->data[--node_iter->used];
- bch2_btree_node_iter_sort(node_iter, b);
+ bch2_btree_node_iter_set_drop(node_iter, set);
} else {
set->k = (int) set->k + shift;
+ goto iter_current_key_not_modified;
+ }
+
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
+
+ bch2_btree_node_iter_sort(node_iter, b);
+ if (!b->level && node_iter == &iter->l[0].iter) {
+ /*
+ * not legal to call bkey_debugcheck() here, because we're
+ * called midway through the update path after update has been
+ * marked but before deletes have actually happened:
+ */
+#if 0
+ __btree_iter_peek_all(iter, &iter->l[0], &iter->k);
+#endif
+ struct btree_iter_level *l = &iter->l[0];
+ struct bkey_packed *k =
+ bch2_btree_node_iter_peek_all(&l->iter, l->b);
+
+ if (unlikely(!k))
+ iter->k.type = KEY_TYPE_deleted;
+ else
+ bkey_disassemble(l->b, k, &iter->k);
}
+iter_current_key_not_modified:
/*
* Interior nodes are special because iterators for interior nodes don't
* always point to the key for the child node the btree iterator points
* to.
*/
- if (b->level && new_u64s && !bkey_deleted(where) &&
- btree_iter_pos_cmp_packed(b, &iter->pos, where,
- iter->flags & BTREE_ITER_IS_EXTENTS)) {
+ if (b->level && new_u64s &&
+ btree_iter_pos_cmp(iter, b, where) > 0) {
struct bset_tree *t;
struct bkey_packed *k;
k = bch2_bkey_prev_all(b, t,
bch2_btree_node_iter_bset_pos(node_iter, b, t));
if (k &&
- __btree_node_iter_cmp(node_iter, b,
- k, where) > 0) {
+ bkey_iter_cmp(b, k, where) > 0) {
struct btree_node_iter_set *set;
unsigned offset =
__btree_node_key_to_offset(b, bkey_next(k));
}
void bch2_btree_node_iter_fix(struct btree_iter *iter,
- struct btree *b,
- struct btree_node_iter *node_iter,
- struct bset_tree *t,
- struct bkey_packed *where,
- unsigned clobber_u64s,
- unsigned new_u64s)
+ struct btree *b,
+ struct btree_node_iter *node_iter,
+ struct bkey_packed *where,
+ unsigned clobber_u64s,
+ unsigned new_u64s)
{
+ struct bset_tree *t = bch2_bkey_to_bset(b, where);
struct btree_iter *linked;
- if (node_iter != &iter->node_iters[b->level])
+ if (node_iter != &iter->l[b->level].iter)
__bch2_btree_node_iter_fix(iter, b, node_iter, t,
where, clobber_u64s, new_u64s);
- if (iter->nodes[b->level] == b)
- __bch2_btree_node_iter_fix(iter, b,
- &iter->node_iters[b->level], t,
- where, clobber_u64s, new_u64s);
-
- for_each_linked_btree_node(iter, b, linked)
+ for_each_btree_iter_with_node(iter, b, linked)
__bch2_btree_node_iter_fix(linked, b,
- &linked->node_iters[b->level], t,
+ &linked->l[b->level].iter, t,
where, clobber_u64s, new_u64s);
-
- /* interior node iterators are... special... */
- if (!b->level)
- bch2_btree_iter_verify(iter, b);
}
-/* peek_all() doesn't skip deleted keys */
-static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter)
+static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter,
+ struct btree_iter_level *l,
+ struct bkey *u,
+ struct bkey_packed *k)
{
- struct btree *b = iter->nodes[iter->level];
- struct bkey_packed *k =
- bch2_btree_node_iter_peek_all(&iter->node_iters[iter->level], b);
struct bkey_s_c ret;
- EBUG_ON(!btree_node_locked(iter, iter->level));
-
- if (!k)
+ if (unlikely(!k)) {
+ /*
+ * signal to bch2_btree_iter_peek_slot() that we're currently at
+ * a hole
+ */
+ u->type = KEY_TYPE_deleted;
return bkey_s_c_null;
+ }
- ret = bkey_disassemble(b, k, &iter->k);
+ ret = bkey_disassemble(l->b, k, u);
if (debug_check_bkeys(iter->c))
- bch2_bkey_debugcheck(iter->c, b, ret);
+ bch2_bkey_debugcheck(iter->c, l->b, ret);
return ret;
}
-static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter)
+/* peek_all() doesn't skip deleted keys */
+static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter,
+ struct btree_iter_level *l,
+ struct bkey *u)
{
- struct btree *b = iter->nodes[iter->level];
- struct bkey_packed *k =
- bch2_btree_node_iter_peek(&iter->node_iters[iter->level], b);
- struct bkey_s_c ret;
-
- EBUG_ON(!btree_node_locked(iter, iter->level));
+ return __btree_iter_unpack(iter, l, u,
+ bch2_btree_node_iter_peek_all(&l->iter, l->b));
+}
- if (!k)
- return bkey_s_c_null;
+static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter,
+ struct btree_iter_level *l)
+{
+ return __btree_iter_unpack(iter, l, &iter->k,
+ bch2_btree_node_iter_peek(&l->iter, l->b));
+}
- ret = bkey_disassemble(b, k, &iter->k);
+static inline bool btree_iter_advance_to_pos(struct btree_iter *iter,
+ struct btree_iter_level *l,
+ int max_advance)
+{
+ struct bkey_packed *k;
+ int nr_advanced = 0;
- if (debug_check_bkeys(iter->c))
- bch2_bkey_debugcheck(iter->c, b, ret);
+ while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
+ btree_iter_pos_cmp(iter, l->b, k) < 0) {
+ if (max_advance > 0 && nr_advanced >= max_advance)
+ return false;
- return ret;
-}
+ bch2_btree_node_iter_advance(&l->iter, l->b);
+ nr_advanced++;
+ }
-static inline void __btree_iter_advance(struct btree_iter *iter)
-{
- bch2_btree_node_iter_advance(&iter->node_iters[iter->level],
- iter->nodes[iter->level]);
+ return true;
}
/*
*/
static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
{
+ struct btree_iter_level *l;
+ unsigned plevel;
bool parent_locked;
struct bkey_packed *k;
- if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) ||
- !iter->nodes[b->level + 1])
+ if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
+ return;
+
+ plevel = b->level + 1;
+ if (!btree_iter_node(iter, plevel))
return;
- parent_locked = btree_node_locked(iter, b->level + 1);
+ parent_locked = btree_node_locked(iter, plevel);
- if (!bch2_btree_node_relock(iter, b->level + 1))
+ if (!bch2_btree_node_relock(iter, plevel))
return;
- k = bch2_btree_node_iter_peek_all(&iter->node_iters[b->level + 1],
- iter->nodes[b->level + 1]);
+ l = &iter->l[plevel];
+ k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
if (!k ||
bkey_deleted(k) ||
- bkey_cmp_left_packed(iter->nodes[b->level + 1],
- k, &b->key.k.p)) {
+ bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
char buf[100];
struct bkey uk = bkey_unpack_key(b, k);
- bch2_bkey_to_text(buf, sizeof(buf), &uk);
+ bch2_bkey_to_text(&PBUF(buf), &uk);
panic("parent iter doesn't point to new node:\n%s\n%llu:%llu\n",
buf, b->key.k.p.inode, b->key.k.p.offset);
}
btree_node_unlock(iter, b->level + 1);
}
-static inline void __btree_iter_init(struct btree_iter *iter,
- struct btree *b)
+static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
+ struct btree *b)
{
- bch2_btree_node_iter_init(&iter->node_iters[b->level], b, iter->pos,
- iter->flags & BTREE_ITER_IS_EXTENTS,
- btree_node_is_extents(b));
-
- /* Skip to first non whiteout: */
- if (b->level)
- bch2_btree_node_iter_peek(&iter->node_iters[b->level], b);
+ return __btree_iter_pos_cmp(iter, NULL,
+ bkey_to_packed(&b->key), true) < 0;
}
static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
{
return iter->btree_id == b->btree_id &&
bkey_cmp(iter->pos, b->data->min_key) >= 0 &&
- btree_iter_pos_cmp(iter->pos, &b->key.k,
- iter->flags & BTREE_ITER_IS_EXTENTS);
+ !btree_iter_pos_after_node(iter, b);
+}
+
+static inline void __btree_iter_init(struct btree_iter *iter,
+ unsigned level)
+{
+ struct btree_iter_level *l = &iter->l[level];
+
+ bch2_btree_node_iter_init(&l->iter, l->b, &iter->pos);
+
+ if (iter->flags & BTREE_ITER_IS_EXTENTS)
+ btree_iter_advance_to_pos(iter, l, -1);
+
+ /* Skip to first non whiteout: */
+ if (level)
+ bch2_btree_node_iter_peek(&l->iter, l->b);
+
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
}
static inline void btree_iter_node_set(struct btree_iter *iter,
EBUG_ON(!btree_iter_pos_in_node(iter, b));
EBUG_ON(b->lock.state.seq & 1);
- iter->lock_seq[b->level] = b->lock.state.seq;
- iter->nodes[b->level] = b;
- __btree_iter_init(iter, b);
+ iter->l[b->level].lock_seq = b->lock.state.seq;
+ iter->l[b->level].b = b;
+ __btree_iter_init(iter, b->level);
}
/*
* A btree node is being replaced - update the iterator to point to the new
* node:
*/
-bool bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
+void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
{
+ enum btree_node_locked_type t;
struct btree_iter *linked;
- for_each_linked_btree_iter(iter, linked)
+ for_each_btree_iter(iter, linked)
if (btree_iter_pos_in_node(linked, b)) {
/*
* bch2_btree_iter_node_drop() has already been called -
*/
BUG_ON(btree_node_locked(linked, b->level));
- /*
- * If @linked wants this node read locked, we don't want
- * to actually take the read lock now because it's not
- * legal to hold read locks on other nodes while we take
- * write locks, so the journal can make forward
- * progress...
- *
- * Instead, btree_iter_node_set() sets things up so
- * bch2_btree_node_relock() will succeed:
- */
-
- if (btree_want_intent(linked, b->level)) {
- six_lock_increment(&b->lock, SIX_LOCK_intent);
- mark_btree_node_intent_locked(linked, b->level);
+ t = btree_lock_want(linked, b->level);
+ if (t != BTREE_NODE_UNLOCKED) {
+ six_lock_increment(&b->lock, t);
+ mark_btree_node_locked(linked, b->level, t);
}
btree_iter_node_set(linked, b);
}
- if (!btree_iter_pos_in_node(iter, b)) {
- six_unlock_intent(&b->lock);
- return false;
- }
-
- mark_btree_node_intent_locked(iter, b->level);
- btree_iter_node_set(iter, b);
- return true;
+ six_unlock_intent(&b->lock);
}
-void bch2_btree_iter_node_drop_linked(struct btree_iter *iter, struct btree *b)
+void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
{
struct btree_iter *linked;
unsigned level = b->level;
- for_each_linked_btree_iter(iter, linked)
- if (linked->nodes[level] == b) {
- btree_node_unlock(linked, level);
- linked->nodes[level] = BTREE_ITER_NOT_END;
- }
-}
+ /* caller now responsible for unlocking @b */
-void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
-{
- unsigned level = b->level;
+ BUG_ON(iter->l[level].b != b);
+ BUG_ON(!btree_node_intent_locked(iter, level));
- if (iter->nodes[level] == b) {
- BUG_ON(b->lock.state.intent_lock != 1);
- btree_node_unlock(iter, level);
- iter->nodes[level] = BTREE_ITER_NOT_END;
- }
+ iter->l[level].b = BTREE_ITER_NOT_END;
+ mark_btree_node_unlocked(iter, level);
+
+ for_each_btree_iter(iter, linked)
+ if (linked->l[level].b == b) {
+ __btree_node_unlock(linked, level);
+ linked->l[level].b = BTREE_ITER_NOT_END;
+ }
}
/*
{
struct btree_iter *linked;
- for_each_linked_btree_node(iter, b, linked)
- __btree_iter_init(linked, b);
- __btree_iter_init(iter, b);
+ for_each_btree_iter_with_node(iter, b, linked)
+ __btree_iter_init(linked, b->level);
}
static inline int btree_iter_lock_root(struct btree_iter *iter,
* that depth
*/
iter->level = depth_want;
- iter->nodes[iter->level] = NULL;
- return 0;
+ iter->l[iter->level].b = NULL;
+ return 1;
}
- lock_type = btree_lock_want(iter, iter->level);
+ lock_type = __btree_lock_want(iter, iter->level);
if (unlikely(!btree_node_lock(b, POS_MAX, iter->level,
- iter, lock_type)))
+ iter, lock_type, true)))
return -EINTR;
if (likely(b == c->btree_roots[iter->btree_id].b &&
b->level == iter->level &&
!race_fault())) {
for (i = 0; i < iter->level; i++)
- iter->nodes[i] = BTREE_ITER_NOT_END;
- iter->nodes[iter->level] = b;
+ iter->l[i].b = BTREE_ITER_NOT_END;
+ iter->l[iter->level].b = b;
mark_btree_node_locked(iter, iter->level, lock_type);
btree_iter_node_set(iter, b);
noinline
static void btree_iter_prefetch(struct btree_iter *iter)
{
- struct btree *b = iter->nodes[iter->level + 1];
- struct btree_node_iter node_iter = iter->node_iters[iter->level + 1];
+ struct btree_iter_level *l = &iter->l[iter->level];
+ struct btree_node_iter node_iter = l->iter;
struct bkey_packed *k;
BKEY_PADDED(k) tmp;
- unsigned nr = iter->level ? 1 : 8;
+ unsigned nr = test_bit(BCH_FS_STARTED, &iter->c->flags)
+ ? (iter->level > 1 ? 0 : 2)
+ : (iter->level > 1 ? 1 : 16);
+ bool was_locked = btree_node_locked(iter, iter->level);
while (nr) {
- bch2_btree_node_iter_advance(&node_iter, b);
- k = bch2_btree_node_iter_peek(&node_iter, b);
+ if (!bch2_btree_node_relock(iter, iter->level))
+ return;
+
+ bch2_btree_node_iter_advance(&node_iter, l->b);
+ k = bch2_btree_node_iter_peek(&node_iter, l->b);
if (!k)
break;
- bch2_bkey_unpack(b, &tmp.k, k);
- bch2_btree_node_prefetch(iter, &tmp.k, iter->level);
+ bch2_bkey_unpack(l->b, &tmp.k, k);
+ bch2_btree_node_prefetch(iter->c, iter, &tmp.k,
+ iter->level - 1);
}
+
+ if (!was_locked)
+ btree_node_unlock(iter, iter->level);
}
static inline int btree_iter_down(struct btree_iter *iter)
{
+ struct btree_iter_level *l = &iter->l[iter->level];
struct btree *b;
- struct bkey_s_c k = __btree_iter_peek(iter);
unsigned level = iter->level - 1;
- enum six_lock_type lock_type = btree_lock_want(iter, level);
+ enum six_lock_type lock_type = __btree_lock_want(iter, level);
BKEY_PADDED(k) tmp;
- bkey_reassemble(&tmp.k, k);
+ BUG_ON(!btree_node_locked(iter, iter->level));
+
+ bch2_bkey_unpack(l->b, &tmp.k,
+ bch2_btree_node_iter_peek(&l->iter, l->b));
- b = bch2_btree_node_get(iter, &tmp.k, level, lock_type);
+ b = bch2_btree_node_get(iter->c, iter, &tmp.k, level, lock_type, true);
if (unlikely(IS_ERR(b)))
return PTR_ERR(b);
- iter->level = level;
mark_btree_node_locked(iter, level, lock_type);
btree_iter_node_set(iter, b);
if (iter->flags & BTREE_ITER_PREFETCH)
btree_iter_prefetch(iter);
+ iter->level = level;
+
return 0;
}
closure_init_stack(&cl);
do {
- ret = bch2_btree_node_cannibalize_lock(c, &cl);
+ ret = bch2_btree_cache_cannibalize_lock(c, &cl);
closure_sync(&cl);
} while (ret);
}
ret = btree_iter_linked(iter) ? -EINTR : 0;
out:
- bch2_btree_node_cannibalize_unlock(c);
+ bch2_btree_cache_cannibalize_unlock(c);
return ret;
io_error:
BUG_ON(ret != -EIO);
iter->flags |= BTREE_ITER_ERROR;
- iter->nodes[iter->level] = NULL;
+ iter->l[iter->level].b = BTREE_ITER_NOT_END;
goto out;
}
+static unsigned btree_iter_up_until_locked(struct btree_iter *iter,
+ bool check_pos)
+{
+ unsigned l = iter->level;
+
+ while (btree_iter_node(iter, l) &&
+ !(is_btree_node(iter, l) &&
+ bch2_btree_node_relock(iter, l) &&
+ (!check_pos ||
+ btree_iter_pos_in_node(iter, iter->l[l].b)))) {
+ btree_node_unlock(iter, l);
+ iter->l[l].b = BTREE_ITER_NOT_END;
+ l++;
+ }
+
+ return l;
+}
+
/*
* This is the main state machine for walking down the btree - walks down to a
* specified depth
{
unsigned depth_want = iter->level;
- /* make sure we have all the intent locks we need - ugh */
- if (unlikely(iter->nodes[iter->level] &&
- iter->level + 1 < iter->locks_want)) {
- unsigned i;
-
- for (i = iter->level + 1;
- i < iter->locks_want && iter->nodes[i];
- i++)
- if (!bch2_btree_node_relock(iter, i)) {
- while (iter->nodes[iter->level] &&
- iter->level + 1 < iter->locks_want)
- btree_iter_up(iter);
- break;
- }
- }
+ if (unlikely(iter->level >= BTREE_MAX_DEPTH))
+ return 0;
+
+ if (__bch2_btree_iter_relock(iter))
+ return 0;
/*
- * If the current node isn't locked, go up until we have a locked node
- * or run out of nodes:
+ * XXX: correctly using BTREE_ITER_UPTODATE should make using check_pos
+ * here unnecessary
*/
- while (iter->nodes[iter->level] &&
- !(is_btree_node(iter, iter->level) &&
- bch2_btree_node_relock(iter, iter->level) &&
- btree_iter_pos_cmp(iter->pos,
- &iter->nodes[iter->level]->key.k,
- iter->flags & BTREE_ITER_IS_EXTENTS)))
- btree_iter_up(iter);
+ iter->level = btree_iter_up_until_locked(iter, true);
/*
* If we've got a btree node locked (i.e. we aren't about to relock the
* root) - advance its node iterator if necessary:
+ *
+ * XXX correctly using BTREE_ITER_UPTODATE should make this unnecessary
*/
- if (iter->nodes[iter->level]) {
- struct bkey_s_c k;
-
- while ((k = __btree_iter_peek_all(iter)).k &&
- !btree_iter_pos_cmp(iter->pos, k.k,
- iter->flags & BTREE_ITER_IS_EXTENTS))
- __btree_iter_advance(iter);
- }
+ if (btree_iter_node(iter, iter->level))
+ btree_iter_advance_to_pos(iter, &iter->l[iter->level], -1);
/*
* Note: iter->nodes[iter->level] may be temporarily NULL here - that
* btree_iter_lock_root() comes next and that it can't fail
*/
while (iter->level > depth_want) {
- int ret = iter->nodes[iter->level]
+ int ret = btree_iter_node(iter, iter->level)
? btree_iter_down(iter)
: btree_iter_lock_root(iter, depth_want);
if (unlikely(ret)) {
+ if (ret == 1)
+ return 0;
+
iter->level = depth_want;
+ iter->l[iter->level].b = BTREE_ITER_NOT_END;
return ret;
}
}
+ iter->uptodate = BTREE_ITER_NEED_PEEK;
+
+ bch2_btree_iter_verify_locks(iter);
+ __bch2_btree_iter_verify(iter, iter->l[iter->level].b);
return 0;
}
{
int ret;
- if (unlikely(!iter->nodes[iter->level]))
- return 0;
-
- iter->flags &= ~BTREE_ITER_AT_END_OF_LEAF;
-
ret = __bch2_btree_iter_traverse(iter);
if (unlikely(ret))
ret = btree_iter_traverse_error(iter, ret);
+ BUG_ON(ret == -EINTR && !btree_iter_linked(iter));
+
return ret;
}
+static inline void bch2_btree_iter_checks(struct btree_iter *iter,
+ enum btree_iter_type type)
+{
+ EBUG_ON(iter->btree_id >= BTREE_ID_NR);
+ EBUG_ON(!!(iter->flags & BTREE_ITER_IS_EXTENTS) !=
+ (iter->btree_id == BTREE_ID_EXTENTS &&
+ type != BTREE_ITER_NODES));
+
+ bch2_btree_iter_verify_locks(iter);
+}
+
/* Iterate across nodes (leaf and interior nodes) */
struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
struct btree *b;
int ret;
- EBUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
+ bch2_btree_iter_checks(iter, BTREE_ITER_NODES);
+
+ if (iter->uptodate == BTREE_ITER_UPTODATE)
+ return iter->l[iter->level].b;
ret = bch2_btree_iter_traverse(iter);
if (ret)
return NULL;
- b = iter->nodes[iter->level];
+ b = btree_iter_node(iter, iter->level);
+ if (!b)
+ return NULL;
- if (b) {
- EBUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0);
- iter->pos = b->key.k.p;
- }
+ BUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0);
+
+ iter->pos = b->key.k.p;
+ iter->uptodate = BTREE_ITER_UPTODATE;
return b;
}
struct btree *b;
int ret;
- EBUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
+ bch2_btree_iter_checks(iter, BTREE_ITER_NODES);
+
+ /* already got to end? */
+ if (!btree_iter_node(iter, iter->level))
+ return NULL;
btree_iter_up(iter);
- if (!iter->nodes[iter->level])
- return NULL;
+ if (!bch2_btree_node_relock(iter, iter->level))
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
- /* parent node usually won't be locked: redo traversal if necessary */
ret = bch2_btree_iter_traverse(iter);
if (ret)
return NULL;
- b = iter->nodes[iter->level];
+ /* got to end? */
+ b = btree_iter_node(iter, iter->level);
if (!b)
- return b;
+ return NULL;
if (bkey_cmp(iter->pos, b->key.k.p) < 0) {
- /* Haven't gotten to the end of the parent node: */
+ /*
+ * Haven't gotten to the end of the parent node: go back down to
+ * the next child node
+ */
+
+ /*
+ * We don't really want to be unlocking here except we can't
+ * directly tell btree_iter_traverse() "traverse to this level"
+ * except by setting iter->level, so we have to unlock so we
+ * don't screw up our lock invariants:
+ */
+ if (btree_node_read_locked(iter, iter->level))
+ btree_node_unlock(iter, iter->level);
/* ick: */
iter->pos = iter->btree_id == BTREE_ID_INODES
: bkey_successor(iter->pos);
iter->level = depth;
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
ret = bch2_btree_iter_traverse(iter);
if (ret)
return NULL;
- b = iter->nodes[iter->level];
+ b = iter->l[iter->level].b;
}
iter->pos = b->key.k.p;
+ iter->uptodate = BTREE_ITER_UPTODATE;
return b;
}
void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_pos)
{
- struct btree *b = iter->nodes[0];
- struct btree_node_iter *node_iter = &iter->node_iters[0];
- struct bkey_packed *k;
+ struct btree_iter_level *l = &iter->l[0];
EBUG_ON(iter->level != 0);
EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0);
EBUG_ON(!btree_node_locked(iter, 0));
- EBUG_ON(bkey_cmp(new_pos, b->key.k.p) > 0);
+ EBUG_ON(bkey_cmp(new_pos, l->b->key.k.p) > 0);
- while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
- !btree_iter_pos_cmp_packed(b, &new_pos, k,
- iter->flags & BTREE_ITER_IS_EXTENTS))
- bch2_btree_node_iter_advance(node_iter, b);
+ iter->pos = new_pos;
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
- if (!k &&
- !btree_iter_pos_cmp(new_pos, &b->key.k,
- iter->flags & BTREE_ITER_IS_EXTENTS))
- iter->flags |= BTREE_ITER_AT_END_OF_LEAF;
+ btree_iter_advance_to_pos(iter, l, -1);
- iter->pos = new_pos;
+ if (bch2_btree_node_iter_end(&l->iter) &&
+ btree_iter_pos_after_node(iter, l->b))
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
}
void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
{
- EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0); /* XXX handle this */
+ int cmp = bkey_cmp(new_pos, iter->pos);
+ unsigned level;
+
+ if (!cmp)
+ return;
+
iter->pos = new_pos;
-}
-void bch2_btree_iter_advance_pos(struct btree_iter *iter)
-{
- /*
- * We use iter->k instead of iter->pos for extents: iter->pos will be
- * equal to the start of the extent we returned, but we need to advance
- * to the end of the extent we returned.
- */
- bch2_btree_iter_set_pos(iter,
- btree_type_successor(iter->btree_id, iter->k.p));
+ level = btree_iter_up_until_locked(iter, true);
+
+ if (btree_iter_node(iter, level)) {
+ /*
+ * We might have to skip over many keys, or just a few: try
+ * advancing the node iterator, and if we have to skip over too
+ * many keys just reinit it (or if we're rewinding, since that
+ * is expensive).
+ */
+ if (cmp < 0 ||
+ !btree_iter_advance_to_pos(iter, &iter->l[level], 8))
+ __btree_iter_init(iter, level);
+
+ /* Don't leave it locked if we're not supposed to: */
+ if (btree_lock_want(iter, level) == BTREE_NODE_UNLOCKED)
+ btree_node_unlock(iter, level);
+ }
+
+ if (level != iter->level)
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
+ else
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
}
-/* XXX: expensive */
-void bch2_btree_iter_rewind(struct btree_iter *iter, struct bpos pos)
+static inline struct bkey_s_c btree_iter_peek_uptodate(struct btree_iter *iter)
{
- /* incapable of rewinding across nodes: */
- BUG_ON(bkey_cmp(pos, iter->nodes[iter->level]->data->min_key) < 0);
+ struct btree_iter_level *l = &iter->l[0];
+ struct bkey_s_c ret = { .k = &iter->k };
+
+ if (!bkey_deleted(&iter->k)) {
+ EBUG_ON(bch2_btree_node_iter_end(&l->iter));
+ ret.v = bkeyp_val(&l->b->format,
+ __bch2_btree_node_iter_peek_all(&l->iter, l->b));
+ }
- iter->pos = pos;
- __btree_iter_init(iter, iter->nodes[iter->level]);
+ if (debug_check_bkeys(iter->c) &&
+ !bkey_deleted(ret.k))
+ bch2_bkey_debugcheck(iter->c, l->b, ret);
+ return ret;
}
struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
{
+ struct btree_iter_level *l = &iter->l[0];
struct bkey_s_c k;
int ret;
- EBUG_ON(!!(iter->flags & BTREE_ITER_IS_EXTENTS) !=
- (iter->btree_id == BTREE_ID_EXTENTS));
+ bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
+
+ if (iter->uptodate == BTREE_ITER_UPTODATE)
+ return btree_iter_peek_uptodate(iter);
while (1) {
ret = bch2_btree_iter_traverse(iter);
- if (unlikely(ret)) {
- iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
+ if (unlikely(ret))
return bkey_s_c_err(ret);
- }
- k = __btree_iter_peek(iter);
- if (likely(k.k)) {
- /*
- * iter->pos should always be equal to the key we just
- * returned - except extents can straddle iter->pos:
- */
- if (!(iter->flags & BTREE_ITER_IS_EXTENTS) ||
- bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
- bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
- return k;
- }
+ k = __btree_iter_peek(iter, l);
+ if (likely(k.k))
+ break;
- iter->pos = iter->nodes[0]->key.k.p;
+ /* got to the end of the leaf, iterator needs to be traversed: */
+ iter->pos = l->b->key.k.p;
+ iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
- if (!bkey_cmp(iter->pos, POS_MAX)) {
- iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
- bch2_btree_iter_unlock(iter);
+ if (!bkey_cmp(iter->pos, POS_MAX))
return bkey_s_c_null;
- }
iter->pos = btree_type_successor(iter->btree_id, iter->pos);
}
+
+ /*
+ * iter->pos should always be equal to the key we just
+ * returned - except extents can straddle iter->pos:
+ */
+ if (!(iter->flags & BTREE_ITER_IS_EXTENTS) ||
+ bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
+ iter->pos = bkey_start_pos(k.k);
+
+ iter->uptodate = BTREE_ITER_UPTODATE;
+ return k;
+}
+
+static noinline
+struct bkey_s_c bch2_btree_iter_peek_next_leaf(struct btree_iter *iter)
+{
+ struct btree_iter_level *l = &iter->l[0];
+
+ iter->pos = l->b->key.k.p;
+ iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
+
+ if (!bkey_cmp(iter->pos, POS_MAX))
+ return bkey_s_c_null;
+
+ iter->pos = btree_type_successor(iter->btree_id, iter->pos);
+
+ return bch2_btree_iter_peek(iter);
}
-struct bkey_s_c bch2_btree_iter_peek_with_holes(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
{
+ struct btree_iter_level *l = &iter->l[0];
+ struct bkey_packed *p;
+ struct bkey_s_c k;
+
+ bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
+
+ if (unlikely(iter->uptodate != BTREE_ITER_UPTODATE)) {
+ k = bch2_btree_iter_peek(iter);
+ if (IS_ERR_OR_NULL(k.k))
+ return k;
+ }
+
+ do {
+ bch2_btree_node_iter_advance(&l->iter, l->b);
+ p = bch2_btree_node_iter_peek_all(&l->iter, l->b);
+ if (unlikely(!p))
+ return bch2_btree_iter_peek_next_leaf(iter);
+ } while (bkey_whiteout(p));
+
+ k = __btree_iter_unpack(iter, l, &iter->k, p);
+
+ EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) < 0);
+ iter->pos = bkey_start_pos(k.k);
+ return k;
+}
+
+struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
+{
+ struct btree_iter_level *l = &iter->l[0];
+ struct bkey_packed *p;
struct bkey_s_c k;
- struct bkey n;
int ret;
- EBUG_ON(!!(iter->flags & BTREE_ITER_IS_EXTENTS) !=
- (iter->btree_id == BTREE_ID_EXTENTS));
+ bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
+
+ if (unlikely(iter->uptodate != BTREE_ITER_UPTODATE)) {
+ k = bch2_btree_iter_peek(iter);
+ if (IS_ERR(k.k))
+ return k;
+ }
while (1) {
+ p = bch2_btree_node_iter_prev(&l->iter, l->b);
+ if (likely(p))
+ break;
+
+ iter->pos = l->b->data->min_key;
+ if (!bkey_cmp(iter->pos, POS_MIN))
+ return bkey_s_c_null;
+
+ bch2_btree_iter_set_pos(iter,
+ btree_type_predecessor(iter->btree_id, iter->pos));
+
ret = bch2_btree_iter_traverse(iter);
- if (unlikely(ret)) {
- iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
+ if (unlikely(ret))
return bkey_s_c_err(ret);
- }
- k = __btree_iter_peek_all(iter);
+ p = bch2_btree_node_iter_peek(&l->iter, l->b);
+ if (p)
+ break;
+ }
+
+ k = __btree_iter_unpack(iter, l, &iter->k, p);
+
+ EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
+
+ iter->pos = bkey_start_pos(k.k);
+ iter->uptodate = BTREE_ITER_UPTODATE;
+ return k;
+}
+
+static inline struct bkey_s_c
+__bch2_btree_iter_peek_slot_extents(struct btree_iter *iter)
+{
+ struct btree_iter_level *l = &iter->l[0];
+ struct btree_node_iter node_iter;
+ struct bkey_s_c k;
+ struct bkey n;
+ int ret;
+
recheck:
- if (!k.k || bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) {
- /* hole */
- bkey_init(&n);
- n.p = iter->pos;
-
- if (iter->flags & BTREE_ITER_IS_EXTENTS) {
- if (n.p.offset == KEY_OFFSET_MAX) {
- iter->pos = bkey_successor(iter->pos);
- goto recheck;
- }
+ while ((k = __btree_iter_peek_all(iter, l, &iter->k)).k &&
+ bkey_deleted(k.k) &&
+ bkey_cmp(bkey_start_pos(k.k), iter->pos) == 0)
+ bch2_btree_node_iter_advance(&l->iter, l->b);
- if (!k.k)
- k.k = &iter->nodes[0]->key.k;
+ /*
+ * iterator is now at the correct position for inserting at iter->pos,
+ * but we need to keep iterating until we find the first non whiteout so
+ * we know how big a hole we have, if any:
+ */
- bch2_key_resize(&n,
- min_t(u64, KEY_SIZE_MAX,
- (k.k->p.inode == n.p.inode
- ? bkey_start_offset(k.k)
- : KEY_OFFSET_MAX) -
- n.p.offset));
+ node_iter = l->iter;
+ if (k.k && bkey_whiteout(k.k))
+ k = __btree_iter_unpack(iter, l, &iter->k,
+ bch2_btree_node_iter_peek(&node_iter, l->b));
- EBUG_ON(!n.size);
- }
+ /*
+ * If we got to the end of the node, check if we need to traverse to the
+ * next node:
+ */
+ if (unlikely(!k.k && btree_iter_pos_after_node(iter, l->b))) {
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
+ ret = bch2_btree_iter_traverse(iter);
+ if (unlikely(ret))
+ return bkey_s_c_err(ret);
- iter->k = n;
- return (struct bkey_s_c) { &iter->k, NULL };
- } else if (!bkey_deleted(k.k)) {
- return k;
- } else {
- __btree_iter_advance(iter);
- }
+ goto recheck;
+ }
+
+ if (k.k &&
+ !bkey_whiteout(k.k) &&
+ bkey_cmp(bkey_start_pos(k.k), iter->pos) <= 0) {
+ /*
+ * if we skipped forward to find the first non whiteout and
+ * there _wasn't_ actually a hole, we want the iterator to be
+ * pointed at the key we found:
+ */
+ l->iter = node_iter;
+
+ EBUG_ON(bkey_cmp(k.k->p, iter->pos) < 0);
+ EBUG_ON(bkey_deleted(k.k));
+ iter->uptodate = BTREE_ITER_UPTODATE;
+ return k;
+ }
+
+ /* hole */
+
+ /* holes can't span inode numbers: */
+ if (iter->pos.offset == KEY_OFFSET_MAX) {
+ if (iter->pos.inode == KEY_INODE_MAX)
+ return bkey_s_c_null;
+
+ iter->pos = bkey_successor(iter->pos);
+ goto recheck;
+ }
+
+ if (!k.k)
+ k.k = &l->b->key.k;
+
+ bkey_init(&n);
+ n.p = iter->pos;
+ bch2_key_resize(&n,
+ min_t(u64, KEY_SIZE_MAX,
+ (k.k->p.inode == n.p.inode
+ ? bkey_start_offset(k.k)
+ : KEY_OFFSET_MAX) -
+ n.p.offset));
+
+ EBUG_ON(!n.size);
+
+ iter->k = n;
+ iter->uptodate = BTREE_ITER_UPTODATE;
+ return (struct bkey_s_c) { &iter->k, NULL };
+}
+
+static inline struct bkey_s_c
+__bch2_btree_iter_peek_slot(struct btree_iter *iter)
+{
+ struct btree_iter_level *l = &iter->l[0];
+ struct bkey_s_c k;
+ int ret;
+
+ if (iter->flags & BTREE_ITER_IS_EXTENTS)
+ return __bch2_btree_iter_peek_slot_extents(iter);
+
+recheck:
+ while ((k = __btree_iter_peek_all(iter, l, &iter->k)).k &&
+ bkey_deleted(k.k) &&
+ bkey_cmp(k.k->p, iter->pos) == 0)
+ bch2_btree_node_iter_advance(&l->iter, l->b);
+
+ /*
+ * If we got to the end of the node, check if we need to traverse to the
+ * next node:
+ */
+ if (unlikely(!k.k && btree_iter_pos_after_node(iter, l->b))) {
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
+ ret = bch2_btree_iter_traverse(iter);
+ if (unlikely(ret))
+ return bkey_s_c_err(ret);
+
+ goto recheck;
+ }
+
+ if (k.k &&
+ !bkey_deleted(k.k) &&
+ !bkey_cmp(iter->pos, k.k->p)) {
+ iter->uptodate = BTREE_ITER_UPTODATE;
+ return k;
+ } else {
+ /* hole */
+ bkey_init(&iter->k);
+ iter->k.p = iter->pos;
+
+ iter->uptodate = BTREE_ITER_UPTODATE;
+ return (struct bkey_s_c) { &iter->k, NULL };
}
}
+struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
+{
+ int ret;
+
+ bch2_btree_iter_checks(iter, BTREE_ITER_SLOTS);
+
+ if (iter->uptodate == BTREE_ITER_UPTODATE)
+ return btree_iter_peek_uptodate(iter);
+
+ ret = bch2_btree_iter_traverse(iter);
+ if (unlikely(ret))
+ return bkey_s_c_err(ret);
+
+ return __bch2_btree_iter_peek_slot(iter);
+}
+
+struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
+{
+ bch2_btree_iter_checks(iter, BTREE_ITER_SLOTS);
+
+ iter->pos = btree_type_successor(iter->btree_id, iter->k.p);
+
+ if (unlikely(iter->uptodate != BTREE_ITER_UPTODATE)) {
+ /*
+ * XXX: when we just need to relock we should be able to avoid
+ * calling traverse, but we need to kill BTREE_ITER_NEED_PEEK
+ * for that to work
+ */
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
+
+ return bch2_btree_iter_peek_slot(iter);
+ }
+
+ if (!bkey_deleted(&iter->k))
+ bch2_btree_node_iter_advance(&iter->l[0].iter, iter->l[0].b);
+
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
+
+ return __bch2_btree_iter_peek_slot(iter);
+}
+
void __bch2_btree_iter_init(struct btree_iter *iter, struct bch_fs *c,
enum btree_id btree_id, struct bpos pos,
unsigned locks_want, unsigned depth,
unsigned flags)
{
+ unsigned i;
+
+ EBUG_ON(depth >= BTREE_MAX_DEPTH);
+ EBUG_ON(locks_want > BTREE_MAX_DEPTH);
+
iter->c = c;
iter->pos = pos;
+ bkey_init(&iter->k);
+ iter->k.p = pos;
iter->flags = flags;
+ iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
iter->btree_id = btree_id;
iter->level = depth;
- iter->locks_want = min(locks_want, BTREE_MAX_DEPTH);
+ iter->locks_want = locks_want;
iter->nodes_locked = 0;
iter->nodes_intent_locked = 0;
- memset(iter->nodes, 0, sizeof(iter->nodes));
- iter->nodes[iter->level] = BTREE_ITER_NOT_END;
+ for (i = 0; i < ARRAY_SIZE(iter->l); i++)
+ iter->l[i].b = NULL;
+ iter->l[iter->level].b = BTREE_ITER_NOT_END;
iter->next = iter;
prefetch(c->btree_roots[btree_id].b);
if (!btree_iter_linked(iter))
return;
- for_each_linked_btree_iter(iter, linked) {
-
+ for_each_linked_btree_iter(iter, linked)
if (linked->next == iter) {
linked->next = iter->next;
+ iter->next = iter;
return;
}
- }
BUG();
}
iter->next = new;
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
- unsigned nr_iters = 1;
+ unsigned nr_iters = 0;
- for_each_linked_btree_iter(iter, new)
- nr_iters++;
+ for_each_btree_iter(new, iter)
+ if (iter->btree_id == new->btree_id)
+ nr_iters++;
BUG_ON(nr_iters > SIX_LOCK_MAX_RECURSE);
}
void bch2_btree_iter_copy(struct btree_iter *dst, struct btree_iter *src)
{
+ unsigned i;
+
__bch2_btree_iter_unlock(dst);
memcpy(dst, src, offsetof(struct btree_iter, next));
- dst->nodes_locked = dst->nodes_intent_locked = 0;
+
+ for (i = 0; i < BTREE_MAX_DEPTH; i++)
+ if (btree_node_locked(dst, i))
+ six_lock_increment(&dst->l[i].b->lock,
+ __btree_lock_want(dst, i));
+}
+
+/* new transactional stuff: */
+
+static void btree_trans_verify(struct btree_trans *trans)
+{
+ unsigned i;
+
+ for (i = 0; i < trans->nr_iters; i++) {
+ struct btree_iter *iter = &trans->iters[i];
+
+ BUG_ON(btree_iter_linked(iter) !=
+ ((trans->iters_linked & (1 << i)) &&
+ !is_power_of_2(trans->iters_linked)));
+ }
+}
+
+static inline unsigned btree_trans_iter_idx(struct btree_trans *trans,
+ struct btree_iter *iter)
+{
+ ssize_t idx = iter - trans->iters;
+
+ BUG_ON(idx < 0 || idx >= trans->nr_iters);
+ BUG_ON(!(trans->iters_live & (1U << idx)));
+
+ return idx;
+}
+
+void bch2_trans_iter_put(struct btree_trans *trans,
+ struct btree_iter *iter)
+{
+ ssize_t idx = btree_trans_iter_idx(trans, iter);
+
+ trans->iters_live &= ~(1U << idx);
+}
+
+void bch2_trans_iter_free(struct btree_trans *trans,
+ struct btree_iter *iter)
+{
+ ssize_t idx = btree_trans_iter_idx(trans, iter);
+
+ trans->iters_live &= ~(1U << idx);
+ trans->iters_linked &= ~(1U << idx);
+ bch2_btree_iter_unlink(iter);
+}
+
+static int btree_trans_realloc_iters(struct btree_trans *trans)
+{
+ struct btree_iter *new_iters;
+ unsigned i;
+
+ bch2_trans_unlock(trans);
+
+ new_iters = mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS);
+
+ memcpy(new_iters, trans->iters,
+ sizeof(struct btree_iter) * trans->nr_iters);
+ trans->iters = new_iters;
+
+ for (i = 0; i < trans->nr_iters; i++)
+ trans->iters[i].next = &trans->iters[i];
+
+ if (trans->iters_linked) {
+ unsigned first_linked = __ffs(trans->iters_linked);
+
+ for (i = first_linked + 1; i < trans->nr_iters; i++)
+ if (trans->iters_linked & (1 << i))
+ bch2_btree_iter_link(&trans->iters[first_linked],
+ &trans->iters[i]);
+ }
+
+ btree_trans_verify(trans);
+
+ if (trans->iters_live) {
+ trans_restart();
+ return -EINTR;
+ }
+
+ return 0;
+}
+
+void bch2_trans_preload_iters(struct btree_trans *trans)
+{
+ if (trans->iters == trans->iters_onstack)
+ btree_trans_realloc_iters(trans);
+}
+
+static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans,
+ unsigned btree_id,
+ unsigned flags, u64 iter_id)
+{
+ struct btree_iter *iter;
+ int idx;
+
+ BUG_ON(trans->nr_iters > BTREE_ITER_MAX);
+
+ for (idx = 0; idx < trans->nr_iters; idx++)
+ if (trans->iter_ids[idx] == iter_id)
+ goto found;
+ idx = -1;
+found:
+ if (idx < 0) {
+ idx = ffz(trans->iters_linked);
+ if (idx < trans->nr_iters)
+ goto got_slot;
+
+ BUG_ON(trans->nr_iters == BTREE_ITER_MAX);
+
+ if (trans->iters == trans->iters_onstack &&
+ trans->nr_iters == ARRAY_SIZE(trans->iters_onstack)) {
+ int ret = btree_trans_realloc_iters(trans);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ idx = trans->nr_iters++;
+got_slot:
+ trans->iter_ids[idx] = iter_id;
+ iter = &trans->iters[idx];
+
+ bch2_btree_iter_init(iter, trans->c, btree_id, POS_MIN, flags);
+ } else {
+ iter = &trans->iters[idx];
+
+ iter->flags &= ~(BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
+ iter->flags |= flags & (BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
+ }
+
+ BUG_ON(trans->iters_live & (1 << idx));
+ trans->iters_live |= 1 << idx;
+
+ if (trans->iters_linked &&
+ !(trans->iters_linked & (1 << idx)))
+ bch2_btree_iter_link(&trans->iters[__ffs(trans->iters_linked)],
+ iter);
+
+ trans->iters_linked |= 1 << idx;
+
+ btree_trans_verify(trans);
+
+ BUG_ON(iter->btree_id != btree_id);
+ BUG_ON((iter->flags ^ flags) & BTREE_ITER_TYPE);
+
+ return iter;
+}
+
+struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
+ enum btree_id btree_id,
+ struct bpos pos, unsigned flags,
+ u64 iter_id)
+{
+ struct btree_iter *iter =
+ __btree_trans_get_iter(trans, btree_id, flags, iter_id);
+
+ if (!IS_ERR(iter))
+ bch2_btree_iter_set_pos(iter, pos);
+ return iter;
+}
+
+struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans,
+ struct btree_iter *src,
+ u64 iter_id)
+{
+ struct btree_iter *iter =
+ __btree_trans_get_iter(trans, src->btree_id,
+ src->flags, iter_id);
+
+ if (!IS_ERR(iter))
+ bch2_btree_iter_copy(iter, src);
+ return iter;
+}
+
+void *bch2_trans_kmalloc(struct btree_trans *trans,
+ size_t size)
+{
+ void *ret;
+
+ if (trans->mem_top + size > trans->mem_bytes) {
+ size_t old_bytes = trans->mem_bytes;
+ size_t new_bytes = roundup_pow_of_two(trans->mem_top + size);
+ void *new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
+
+ if (!new_mem)
+ return ERR_PTR(-ENOMEM);
+
+ trans->mem = new_mem;
+ trans->mem_bytes = new_bytes;
+
+ if (old_bytes) {
+ trans_restart();
+ return ERR_PTR(-EINTR);
+ }
+ }
+
+ ret = trans->mem + trans->mem_top;
+ trans->mem_top += size;
+ return ret;
+}
+
+int bch2_trans_unlock(struct btree_trans *trans)
+{
+ unsigned iters = trans->iters_linked;
+ int ret = 0;
+
+ while (iters) {
+ unsigned idx = __ffs(iters);
+ struct btree_iter *iter = &trans->iters[idx];
+
+ if (iter->flags & BTREE_ITER_ERROR)
+ ret = -EIO;
+
+ __bch2_btree_iter_unlock(iter);
+ iters ^= 1 << idx;
+ }
+
+ return ret;
+}
+
+void __bch2_trans_begin(struct btree_trans *trans)
+{
+ unsigned idx;
+
+ btree_trans_verify(trans);
+
+ /*
+ * On transaction restart, the transaction isn't required to allocate
+ * all the same iterators it on the last iteration:
+ *
+ * Unlink any iterators it didn't use this iteration, assuming it got
+ * further (allocated an iter with a higher idx) than where the iter
+ * was originally allocated:
+ */
+ while (trans->iters_linked &&
+ trans->iters_live &&
+ (idx = __fls(trans->iters_linked)) >
+ __fls(trans->iters_live)) {
+ trans->iters_linked ^= 1 << idx;
+ bch2_btree_iter_unlink(&trans->iters[idx]);
+ }
+
+ trans->iters_live = 0;
+ trans->nr_updates = 0;
+ trans->mem_top = 0;
+
+ btree_trans_verify(trans);
+}
+
+void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c)
+{
+ trans->c = c;
+ trans->nr_restarts = 0;
+ trans->nr_iters = 0;
+ trans->iters_live = 0;
+ trans->iters_linked = 0;
+ trans->nr_updates = 0;
+ trans->mem_top = 0;
+ trans->mem_bytes = 0;
+ trans->mem = NULL;
+ trans->iters = trans->iters_onstack;
+}
+
+int bch2_trans_exit(struct btree_trans *trans)
+{
+ int ret = bch2_trans_unlock(trans);
+
+ kfree(trans->mem);
+ if (trans->iters != trans->iters_onstack)
+ mempool_free(trans->iters, &trans->c->btree_iters_pool);
+ trans->mem = (void *) 0x1;
+ trans->iters = (void *) 0x1;
+ return ret;
}