#include "bkey_methods.h"
#include "btree_cache.h"
#include "btree_iter.h"
+#include "btree_key_cache.h"
#include "btree_locking.h"
+#include "btree_update.h"
#include "debug.h"
#include "extents.h"
+#include "journal.h"
#include <linux/prefetch.h>
#include <trace/events/bcachefs.h>
-#define BTREE_ITER_NO_NODE_GET_LOCKS ((struct btree *) 1)
-#define BTREE_ITER_NO_NODE_DROP ((struct btree *) 2)
-#define BTREE_ITER_NO_NODE_LOCK_ROOT ((struct btree *) 3)
-#define BTREE_ITER_NO_NODE_UP ((struct btree *) 4)
-#define BTREE_ITER_NO_NODE_DOWN ((struct btree *) 5)
-#define BTREE_ITER_NO_NODE_INIT ((struct btree *) 6)
-#define BTREE_ITER_NO_NODE_ERROR ((struct btree *) 7)
-
static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
{
return l < BTREE_MAX_DEPTH &&
return pos;
}
+static inline bool btree_iter_pos_before_node(struct btree_iter *iter,
+ struct btree *b)
+{
+ return bkey_cmp(btree_iter_search_key(iter), b->data->min_key) < 0;
+}
+
+static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
+ struct btree *b)
+{
+ return bkey_cmp(b->key.k.p, btree_iter_search_key(iter)) < 0;
+}
+
+static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
+ struct btree *b)
+{
+ return iter->btree_id == b->c.btree_id &&
+ !btree_iter_pos_before_node(iter, b) &&
+ !btree_iter_pos_after_node(iter, b);
+}
+
/* Btree node locking: */
void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
struct btree_iter *linked;
unsigned readers = 0;
- EBUG_ON(!btree_node_intent_locked(iter, b->level));
+ EBUG_ON(!btree_node_intent_locked(iter, b->c.level));
trans_for_each_iter(iter->trans, linked)
- if (linked->l[b->level].b == b &&
- btree_node_read_locked(linked, b->level))
+ if (linked->l[b->c.level].b == b &&
+ btree_node_read_locked(linked, b->c.level))
readers++;
/*
* locked:
*/
atomic64_sub(__SIX_VAL(read_lock, readers),
- &b->lock.state.counter);
+ &b->c.lock.state.counter);
btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
atomic64_add(__SIX_VAL(read_lock, readers),
- &b->lock.state.counter);
+ &b->c.lock.state.counter);
}
bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
if (race_fault())
return false;
- if (six_relock_type(&b->lock, want, iter->l[level].lock_seq) ||
+ if (six_relock_type(&b->c.lock, want, iter->l[level].lock_seq) ||
(btree_node_lock_seq_matches(iter, b, level) &&
- btree_node_lock_increment(iter, b, level, want))) {
+ btree_node_lock_increment(iter->trans, b, level, want))) {
mark_btree_node_locked(iter, level, want);
return true;
} else {
return false;
if (btree_node_locked(iter, level)
- ? six_lock_tryupgrade(&b->lock)
- : six_relock_type(&b->lock, SIX_LOCK_intent, iter->l[level].lock_seq))
+ ? six_lock_tryupgrade(&b->c.lock)
+ : six_relock_type(&b->c.lock, SIX_LOCK_intent, iter->l[level].lock_seq))
goto success;
if (btree_node_lock_seq_matches(iter, b, level) &&
- btree_node_lock_increment(iter, b, level, BTREE_NODE_INTENT_LOCKED)) {
+ btree_node_lock_increment(iter->trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
btree_node_unlock(iter, level);
goto success;
}
? 0
: (unsigned long) iter->l[l].b,
is_btree_node(iter, l)
- ? iter->l[l].b->lock.state.seq
+ ? iter->l[l].b->c.lock.state.seq
: 0);
fail_idx = l;
/* Slowpath: */
bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
- unsigned level,
- struct btree_iter *iter,
- enum six_lock_type type)
+ unsigned level, struct btree_iter *iter,
+ enum six_lock_type type,
+ six_lock_should_sleep_fn should_sleep_fn,
+ void *p)
{
+ struct btree_trans *trans = iter->trans;
struct btree_iter *linked;
+ u64 start_time = local_clock();
bool ret = true;
/* Check if it's safe to block: */
- trans_for_each_iter(iter->trans, linked) {
+ trans_for_each_iter(trans, linked) {
if (!linked->nodes_locked)
continue;
- /* * Must lock btree nodes in key order: */
- if (__btree_iter_cmp(iter->btree_id, pos, linked) < 0)
- ret = false;
-
/*
* Can't block taking an intent lock if we have _any_ nodes read
* locked:
*/
if (type == SIX_LOCK_intent &&
linked->nodes_locked != linked->nodes_intent_locked) {
- if (!(iter->trans->nounlock)) {
+ if (!(trans->nounlock)) {
linked->locks_want = max_t(unsigned,
linked->locks_want,
__fls(linked->nodes_locked) + 1);
- btree_iter_get_locks(linked, true, false);
+ if (!btree_iter_get_locks(linked, true, false))
+ ret = false;
+ } else {
+ ret = false;
}
- ret = false;
}
/*
*/
if (linked->btree_id == iter->btree_id &&
level > __fls(linked->nodes_locked)) {
- if (!(iter->trans->nounlock)) {
+ if (!(trans->nounlock)) {
linked->locks_want =
max(level + 1, max_t(unsigned,
linked->locks_want,
iter->locks_want));
- btree_iter_get_locks(linked, true, false);
+ if (!btree_iter_get_locks(linked, true, false))
+ ret = false;
+ } else {
+ ret = false;
}
+ }
+
+ /* Must lock btree nodes in key order: */
+ if ((cmp_int(iter->btree_id, linked->btree_id) ?:
+ -cmp_int(btree_iter_type(iter), btree_iter_type(linked))) < 0)
+ ret = false;
+
+ if (iter->btree_id == linked->btree_id &&
+ btree_node_locked(linked, level) &&
+ bkey_cmp(pos, linked->l[level].b->key.k.p) <= 0)
ret = false;
+
+ /*
+ * Recheck if this is a node we already have locked - since one
+ * of the get_locks() calls might've successfully
+ * upgraded/relocked it:
+ */
+ if (linked->l[level].b == b &&
+ btree_node_locked_type(linked, level) >= type) {
+ six_lock_increment(&b->c.lock, type);
+ return true;
}
}
return false;
}
- __btree_node_lock_type(iter->trans->c, b, type);
+ if (six_trylock_type(&b->c.lock, type))
+ return true;
+
+ if (six_lock_type(&b->c.lock, type, should_sleep_fn, p))
+ return false;
+
+ bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
+ start_time);
return true;
}
/* Btree iterator locking: */
#ifdef CONFIG_BCACHEFS_DEBUG
-void bch2_btree_iter_verify_locks(struct btree_iter *iter)
+static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
{
unsigned l;
- for (l = 0; btree_iter_node(iter, l); l++) {
+ if (!(iter->trans->iters_linked & (1ULL << iter->idx))) {
+ BUG_ON(iter->nodes_locked);
+ return;
+ }
+
+ for (l = 0; is_btree_node(iter, l); l++) {
if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
!btree_node_locked(iter, l))
continue;
{
struct btree_iter *iter;
- trans_for_each_iter(trans, iter)
+ trans_for_each_iter_all(trans, iter)
bch2_btree_iter_verify_locks(iter);
}
+#else
+static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
#endif
__flatten
-static bool bch2_btree_iter_relock(struct btree_iter *iter, bool trace)
+bool bch2_btree_iter_relock(struct btree_iter *iter, bool trace)
{
return btree_iter_get_locks(iter, false, trace);
}
void __bch2_btree_iter_downgrade(struct btree_iter *iter,
unsigned downgrade_to)
{
- struct btree_iter *linked;
- unsigned l;
-
- /*
- * We downgrade linked iterators as well because btree_iter_upgrade
- * might have had to modify locks_want on linked iterators due to lock
- * ordering:
- */
- trans_for_each_iter(iter->trans, linked) {
- unsigned new_locks_want = downgrade_to ?:
- (linked->flags & BTREE_ITER_INTENT ? 1 : 0);
-
- if (linked->locks_want <= new_locks_want)
- continue;
+ unsigned l, new_locks_want = downgrade_to ?:
+ (iter->flags & BTREE_ITER_INTENT ? 1 : 0);
- linked->locks_want = new_locks_want;
+ if (iter->locks_want < downgrade_to) {
+ iter->locks_want = new_locks_want;
- while (linked->nodes_locked &&
- (l = __fls(linked->nodes_locked)) >= linked->locks_want) {
- if (l > linked->level) {
- btree_node_unlock(linked, l);
+ while (iter->nodes_locked &&
+ (l = __fls(iter->nodes_locked)) >= iter->locks_want) {
+ if (l > iter->level) {
+ btree_node_unlock(iter, l);
} else {
- if (btree_node_intent_locked(linked, l)) {
- six_lock_downgrade(&linked->l[l].b->lock);
- linked->nodes_intent_locked ^= 1 << l;
+ if (btree_node_intent_locked(iter, l)) {
+ six_lock_downgrade(&iter->l[l].b->c.lock);
+ iter->nodes_intent_locked ^= 1 << l;
}
break;
}
bch2_btree_trans_verify_locks(iter->trans);
}
+void bch2_trans_downgrade(struct btree_trans *trans)
+{
+ struct btree_iter *iter;
+
+ trans_for_each_iter(trans, iter)
+ bch2_btree_iter_downgrade(iter);
+}
+
/* Btree transaction locking: */
bool bch2_trans_relock(struct btree_trans *trans)
#ifdef CONFIG_BCACHEFS_DEBUG
-static void __bch2_btree_iter_verify(struct btree_iter *iter,
- struct btree *b)
+static void bch2_btree_iter_verify_level(struct btree_iter *iter,
+ unsigned level)
{
struct bpos pos = btree_iter_search_key(iter);
- struct btree_iter_level *l = &iter->l[b->level];
+ struct btree_iter_level *l = &iter->l[level];
struct btree_node_iter tmp = l->iter;
- struct bkey_packed *k;
+ bool locked = btree_node_locked(iter, level);
+ struct bkey_packed *p, *k;
+ char buf1[100], buf2[100];
+ const char *msg;
if (!debug_check_iterators(iter->trans->c))
return;
- if (iter->uptodate > BTREE_ITER_NEED_PEEK)
+ BUG_ON(iter->level < iter->min_depth);
+
+ if (!btree_iter_node(iter, level))
+ return;
+
+ if (!bch2_btree_node_relock(iter, level))
return;
- bch2_btree_node_iter_verify(&l->iter, b);
+ /*
+ * Ideally this invariant would always be true, and hopefully in the
+ * future it will be, but for now set_pos_same_leaf() breaks it:
+ */
+ BUG_ON(iter->uptodate < BTREE_ITER_NEED_TRAVERSE &&
+ !btree_iter_pos_in_node(iter, l->b));
+
+ /*
+ * node iterators don't use leaf node iterator:
+ */
+ if (btree_iter_type(iter) == BTREE_ITER_NODES &&
+ level <= iter->min_depth)
+ goto unlock;
+
+ bch2_btree_node_iter_verify(&l->iter, l->b);
/*
* For interior nodes, the iterator will have skipped past
* For extents, the iterator may have skipped past deleted keys (but not
* whiteouts)
*/
- k = b->level || iter->flags & BTREE_ITER_IS_EXTENTS
- ? bch2_btree_node_iter_prev_filter(&tmp, b, KEY_TYPE_discard)
- : bch2_btree_node_iter_prev_all(&tmp, b);
- if (k && bkey_iter_pos_cmp(b, k, &pos) >= 0) {
- char buf[100];
- struct bkey uk = bkey_unpack_key(b, k);
+ p = level || btree_node_type_is_extents(iter->btree_id)
+ ? bch2_btree_node_iter_prev_filter(&tmp, l->b, KEY_TYPE_discard)
+ : bch2_btree_node_iter_prev_all(&tmp, l->b);
+ k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
- bch2_bkey_to_text(&PBUF(buf), &uk);
- panic("iterator should be before prev key:\n%s\n%llu:%llu\n",
- buf, iter->pos.inode, iter->pos.offset);
+ if (p && bkey_iter_pos_cmp(l->b, p, &pos) >= 0) {
+ msg = "before";
+ goto err;
}
- k = bch2_btree_node_iter_peek_all(&l->iter, b);
- if (k && bkey_iter_pos_cmp(b, k, &pos) < 0) {
- char buf[100];
- struct bkey uk = bkey_unpack_key(b, k);
+ if (k && bkey_iter_pos_cmp(l->b, k, &pos) < 0) {
+ msg = "after";
+ goto err;
+ }
+unlock:
+ if (!locked)
+ btree_node_unlock(iter, level);
+ return;
+err:
+ strcpy(buf1, "(none)");
+ strcpy(buf2, "(none)");
+
+ if (p) {
+ struct bkey uk = bkey_unpack_key(l->b, p);
+ bch2_bkey_to_text(&PBUF(buf1), &uk);
+ }
- bch2_bkey_to_text(&PBUF(buf), &uk);
- panic("iter should be after current key:\n"
- "iter pos %llu:%llu\n"
- "cur key %s\n",
- iter->pos.inode, iter->pos.offset, buf);
+ if (k) {
+ struct bkey uk = bkey_unpack_key(l->b, k);
+ bch2_bkey_to_text(&PBUF(buf2), &uk);
}
+
+ panic("iterator should be %s key at level %u:\n"
+ "iter pos %s %llu:%llu\n"
+ "prev key %s\n"
+ "cur key %s\n",
+ msg, level,
+ iter->flags & BTREE_ITER_IS_EXTENTS ? ">" : "=>",
+ iter->pos.inode, iter->pos.offset,
+ buf1, buf2);
}
-void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b)
+static void bch2_btree_iter_verify(struct btree_iter *iter)
{
- struct btree_iter *linked;
+ unsigned i;
- if (!debug_check_iterators(iter->trans->c))
+ bch2_btree_trans_verify_locks(iter->trans);
+
+ for (i = 0; i < BTREE_MAX_DEPTH; i++)
+ bch2_btree_iter_verify_level(iter, i);
+}
+
+void bch2_btree_trans_verify_iters(struct btree_trans *trans, struct btree *b)
+{
+ struct btree_iter *iter;
+
+ if (!debug_check_iterators(trans->c))
return;
- trans_for_each_iter_with_node(iter->trans, b, linked)
- __bch2_btree_iter_verify(linked, b);
+ trans_for_each_iter_with_node(trans, b, iter)
+ bch2_btree_iter_verify_level(iter, b->c.level);
}
#else
-static inline void __bch2_btree_iter_verify(struct btree_iter *iter,
- struct btree *b) {}
+static inline void bch2_btree_iter_verify_level(struct btree_iter *iter, unsigned l) {}
+static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
#endif
struct btree *b,
struct bkey_packed *where)
{
- struct btree_iter_level *l = &iter->l[b->level];
+ struct btree_iter_level *l = &iter->l[b->c.level];
struct bpos pos = btree_iter_search_key(iter);
if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
trans_for_each_iter_with_node(iter->trans, b, linked) {
__bch2_btree_iter_fix_key_modified(linked, b, where);
- __bch2_btree_iter_verify(linked, b);
+ bch2_btree_iter_verify_level(linked, b->c.level);
}
}
*/
if (!bch2_btree_node_iter_end(node_iter) &&
iter_current_key_modified &&
- (b->level ||
- (iter->flags & BTREE_ITER_IS_EXTENTS))) {
+ (b->c.level ||
+ btree_node_type_is_extents(iter->btree_id))) {
struct bset_tree *t;
struct bkey_packed *k, *k2, *p;
}
}
- if (!b->level &&
+ if (!b->c.level &&
node_iter == &iter->l[0].iter &&
- iter_current_key_modified) {
- struct bkey_packed *k =
- bch2_btree_node_iter_peek_all(node_iter, b);
-
- if (likely(k)) {
- bkey_disassemble(b, k, &iter->k);
- } else {
- /* XXX: for extents, calculate size of hole? */
- iter->k.type = KEY_TYPE_deleted;
- }
-
+ iter_current_key_modified)
btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
- }
}
void bch2_btree_node_iter_fix(struct btree_iter *iter,
struct bset_tree *t = bch2_bkey_to_bset(b, where);
struct btree_iter *linked;
- if (node_iter != &iter->l[b->level].iter) {
+ if (node_iter != &iter->l[b->c.level].iter) {
__bch2_btree_node_iter_fix(iter, b, node_iter, t,
where, clobber_u64s, new_u64s);
- bch2_btree_node_iter_verify(node_iter, b);
+
+ if (debug_check_iterators(iter->trans->c))
+ bch2_btree_node_iter_verify(node_iter, b);
}
trans_for_each_iter_with_node(iter->trans, b, linked) {
__bch2_btree_node_iter_fix(linked, b,
- &linked->l[b->level].iter, t,
+ &linked->l[b->c.level].iter, t,
where, clobber_u64s, new_u64s);
- __bch2_btree_iter_verify(linked, b);
+ bch2_btree_iter_verify_level(linked, b->c.level);
}
}
if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
return;
- plevel = b->level + 1;
+ plevel = b->c.level + 1;
if (!btree_iter_node(iter, plevel))
return;
}
if (!parent_locked)
- btree_node_unlock(iter, b->level + 1);
-}
-
-static inline bool btree_iter_pos_before_node(struct btree_iter *iter,
- struct btree *b)
-{
- return bkey_cmp(iter->pos, b->data->min_key) < 0;
-}
-
-static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
- struct btree *b)
-{
- return bkey_cmp(b->key.k.p, btree_iter_search_key(iter)) < 0;
-}
-
-static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
- struct btree *b)
-{
- return iter->btree_id == b->btree_id &&
- !btree_iter_pos_before_node(iter, b) &&
- !btree_iter_pos_after_node(iter, b);
+ btree_node_unlock(iter, b->c.level + 1);
}
static inline void __btree_iter_init(struct btree_iter *iter,
static inline void btree_iter_node_set(struct btree_iter *iter,
struct btree *b)
{
+ BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
+
btree_iter_verify_new_node(iter, b);
EBUG_ON(!btree_iter_pos_in_node(iter, b));
- EBUG_ON(b->lock.state.seq & 1);
+ EBUG_ON(b->c.lock.state.seq & 1);
- iter->l[b->level].lock_seq = b->lock.state.seq;
- iter->l[b->level].b = b;
- __btree_iter_init(iter, b->level);
+ iter->l[b->c.level].lock_seq = b->c.lock.state.seq;
+ iter->l[b->c.level].b = b;
+ __btree_iter_init(iter, b->c.level);
}
/*
struct btree_iter *linked;
trans_for_each_iter(iter->trans, linked)
- if (btree_iter_pos_in_node(linked, b)) {
+ if (btree_iter_type(linked) != BTREE_ITER_CACHED &&
+ btree_iter_pos_in_node(linked, b)) {
/*
* bch2_btree_iter_node_drop() has already been called -
* the old node we're replacing has already been
* unlocked and the pointer invalidated
*/
- BUG_ON(btree_node_locked(linked, b->level));
+ BUG_ON(btree_node_locked(linked, b->c.level));
- t = btree_lock_want(linked, b->level);
+ t = btree_lock_want(linked, b->c.level);
if (t != BTREE_NODE_UNLOCKED) {
- six_lock_increment(&b->lock, t);
- mark_btree_node_locked(linked, b->level, t);
+ six_lock_increment(&b->c.lock, t);
+ mark_btree_node_locked(linked, b->c.level, t);
}
btree_iter_node_set(linked, b);
void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
{
struct btree_iter *linked;
- unsigned level = b->level;
+ unsigned level = b->c.level;
trans_for_each_iter(iter->trans, linked)
if (linked->l[level].b == b) {
struct btree_iter *linked;
trans_for_each_iter_with_node(iter->trans, b, linked)
- __btree_iter_init(linked, b->level);
+ __btree_iter_init(linked, b->c.level);
+}
+
+static int lock_root_check_fn(struct six_lock *lock, void *p)
+{
+ struct btree *b = container_of(lock, struct btree, c.lock);
+ struct btree **rootp = p;
+
+ return b == *rootp ? 0 : -1;
}
static inline int btree_iter_lock_root(struct btree_iter *iter,
unsigned depth_want)
{
struct bch_fs *c = iter->trans->c;
- struct btree *b;
+ struct btree *b, **rootp = &c->btree_roots[iter->btree_id].b;
enum six_lock_type lock_type;
unsigned i;
EBUG_ON(iter->nodes_locked);
while (1) {
- b = READ_ONCE(c->btree_roots[iter->btree_id].b);
- iter->level = READ_ONCE(b->level);
+ b = READ_ONCE(*rootp);
+ iter->level = READ_ONCE(b->c.level);
if (unlikely(iter->level < depth_want)) {
/*
lock_type = __btree_lock_want(iter, iter->level);
if (unlikely(!btree_node_lock(b, POS_MAX, iter->level,
- iter, lock_type)))
+ iter, lock_type,
+ lock_root_check_fn, rootp)))
return -EINTR;
- if (likely(b == c->btree_roots[iter->btree_id].b &&
- b->level == iter->level &&
+ if (likely(b == READ_ONCE(*rootp) &&
+ b->c.level == iter->level &&
!race_fault())) {
for (i = 0; i < iter->level; i++)
iter->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
return 0;
}
- six_unlock_type(&b->lock, lock_type);
+ six_unlock_type(&b->c.lock, lock_type);
}
}
btree_node_unlock(iter, iter->level);
}
+static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
+ unsigned plevel, struct btree *b)
+{
+ struct btree_iter_level *l = &iter->l[plevel];
+ bool locked = btree_node_locked(iter, plevel);
+ struct bkey_packed *k;
+ struct bch_btree_ptr_v2 *bp;
+
+ if (!bch2_btree_node_relock(iter, plevel))
+ return;
+
+ k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
+ BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
+
+ bp = (void *) bkeyp_val(&l->b->format, k);
+ bp->mem_ptr = (unsigned long)b;
+
+ if (!locked)
+ btree_node_unlock(iter, plevel);
+}
+
static __always_inline int btree_iter_down(struct btree_iter *iter)
{
struct bch_fs *c = iter->trans->c;
mark_btree_node_locked(iter, level, lock_type);
btree_iter_node_set(iter, b);
+ if (tmp.k.k.type == KEY_TYPE_btree_ptr_v2 &&
+ unlikely(b != btree_node_mem_ptr(&tmp.k)))
+ btree_node_mem_ptr_set(iter, level + 1, b);
+
if (iter->flags & BTREE_ITER_PREFETCH)
btree_iter_prefetch(iter);
static int btree_iter_traverse_one(struct btree_iter *);
-static int __btree_iter_traverse_all(struct btree_trans *trans,
- struct btree_iter *orig_iter, int ret)
+static int __btree_iter_traverse_all(struct btree_trans *trans, int ret)
{
struct bch_fs *c = trans->c;
struct btree_iter *iter;
u8 sorted[BTREE_ITER_MAX];
unsigned i, nr_sorted = 0;
+ if (trans->in_traverse_all)
+ return -EINTR;
+
+ trans->in_traverse_all = true;
+retry_all:
+ nr_sorted = 0;
+
trans_for_each_iter(trans, iter)
- sorted[nr_sorted++] = iter - trans->iters;
+ sorted[nr_sorted++] = iter->idx;
#define btree_iter_cmp_by_idx(_l, _r) \
btree_iter_cmp(&trans->iters[_l], &trans->iters[_r])
bubble_sort(sorted, nr_sorted, btree_iter_cmp_by_idx);
#undef btree_iter_cmp_by_idx
-
-retry_all:
bch2_trans_unlock(trans);
if (unlikely(ret == -ENOMEM)) {
if (unlikely(ret == -EIO)) {
trans->error = true;
- if (orig_iter) {
- orig_iter->flags |= BTREE_ITER_ERROR;
- orig_iter->l[orig_iter->level].b =
- BTREE_ITER_NO_NODE_ERROR;
- }
goto out;
}
/* Now, redo traversals in correct order: */
for (i = 0; i < nr_sorted; i++) {
- iter = &trans->iters[sorted[i]];
+ unsigned idx = sorted[i];
+
+ /*
+ * sucessfully traversing one iterator can cause another to be
+ * unlinked, in btree_key_cache_fill()
+ */
+ if (!(trans->iters_linked & (1ULL << idx)))
+ continue;
- ret = btree_iter_traverse_one(iter);
+ ret = btree_iter_traverse_one(&trans->iters[idx]);
if (ret)
goto retry_all;
}
- ret = hweight64(trans->iters_live) > 1 ? -EINTR : 0;
+ if (hweight64(trans->iters_live) > 1)
+ ret = -EINTR;
+ else
+ trans_for_each_iter(trans, iter)
+ if (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) {
+ ret = -EINTR;
+ break;
+ }
out:
bch2_btree_cache_cannibalize_unlock(c);
+
+ trans->in_traverse_all = false;
return ret;
}
int bch2_btree_iter_traverse_all(struct btree_trans *trans)
{
- return __btree_iter_traverse_all(trans, NULL, 0);
+ return __btree_iter_traverse_all(trans, 0);
}
static inline bool btree_iter_good_node(struct btree_iter *iter,
{
unsigned depth_want = iter->level;
- if (unlikely(iter->level >= BTREE_MAX_DEPTH))
- return 0;
-
/*
* if we need interior nodes locked, call btree_iter_relock() to make
* sure we walk back up enough that we lock them:
iter->locks_want > 1)
bch2_btree_iter_relock(iter, false);
+ if (btree_iter_type(iter) == BTREE_ITER_CACHED)
+ return bch2_btree_iter_traverse_cached(iter);
+
if (iter->uptodate < BTREE_ITER_NEED_RELOCK)
return 0;
+ if (unlikely(iter->level >= BTREE_MAX_DEPTH))
+ return 0;
+
/*
* XXX: correctly using BTREE_ITER_UPTODATE should make using check_pos
* here unnecessary
return 0;
iter->level = depth_want;
- iter->l[iter->level].b = BTREE_ITER_NO_NODE_DOWN;
+
+ if (ret == -EIO) {
+ iter->flags |= BTREE_ITER_ERROR;
+ iter->l[iter->level].b =
+ BTREE_ITER_NO_NODE_ERROR;
+ } else {
+ iter->l[iter->level].b =
+ BTREE_ITER_NO_NODE_DOWN;
+ }
return ret;
}
}
iter->uptodate = BTREE_ITER_NEED_PEEK;
- bch2_btree_trans_verify_locks(iter->trans);
- if (btree_iter_node(iter, iter->level))
- __bch2_btree_iter_verify(iter, iter->l[iter->level].b);
+ bch2_btree_iter_verify(iter);
return 0;
}
int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
{
+ struct btree_trans *trans = iter->trans;
int ret;
- ret = bch2_trans_cond_resched(iter->trans) ?:
+ ret = bch2_trans_cond_resched(trans) ?:
btree_iter_traverse_one(iter);
if (unlikely(ret))
- ret = __btree_iter_traverse_all(iter->trans, iter, ret);
+ ret = __btree_iter_traverse_all(trans, ret);
return ret;
}
enum btree_iter_type type)
{
EBUG_ON(iter->btree_id >= BTREE_ID_NR);
- EBUG_ON(!!(iter->flags & BTREE_ITER_IS_EXTENTS) !=
- (btree_node_type_is_extents(iter->btree_id) &&
- type != BTREE_ITER_NODES));
EBUG_ON(btree_iter_type(iter) != type);
- bch2_btree_trans_verify_locks(iter->trans);
+ BUG_ON(type == BTREE_ITER_KEYS &&
+ (bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
+ bkey_cmp(iter->pos, iter->k.p) > 0));
+
+ bch2_btree_iter_verify_locks(iter);
+ bch2_btree_iter_verify_level(iter, iter->level);
}
/* Iterate across nodes (leaf and interior nodes) */
iter->pos = b->key.k.p;
iter->uptodate = BTREE_ITER_UPTODATE;
+ bch2_btree_iter_verify(iter);
+
return b;
}
-struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
+struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
{
struct btree *b;
int ret;
if (btree_node_read_locked(iter, iter->level))
btree_node_unlock(iter, iter->level);
- /* ick: */
- iter->pos = iter->btree_id == BTREE_ID_INODES
- ? btree_type_successor(iter->btree_id, iter->pos)
- : bkey_successor(iter->pos);
- iter->level = depth;
+ iter->pos = bkey_successor(iter->pos);
+ iter->level = iter->min_depth;
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
ret = bch2_btree_iter_traverse(iter);
iter->pos = b->key.k.p;
iter->uptodate = BTREE_ITER_UPTODATE;
+ bch2_btree_iter_verify(iter);
+
return b;
}
EBUG_ON(!btree_node_locked(iter, 0));
EBUG_ON(bkey_cmp(new_pos, l->b->key.k.p) > 0);
- iter->pos = new_pos;
+ bkey_init(&iter->k);
+ iter->k.p = iter->pos = new_pos;
btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
btree_iter_advance_to_pos(iter, l, -1);
+ /*
+ * XXX:
+ * keeping a node locked that's outside (even just outside) iter->pos
+ * breaks __bch2_btree_node_lock(). This seems to only affect
+ * bch2_btree_node_get_sibling so for now it's fixed there, but we
+ * should try to get rid of this corner case.
+ *
+ * (this behaviour is currently needed for BTREE_INSERT_NOUNLOCK)
+ */
+
if (bch2_btree_node_iter_end(&l->iter) &&
btree_iter_pos_after_node(iter, l->b))
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
}
-static unsigned btree_iter_pos_changed(struct btree_iter *iter, int cmp)
+static void btree_iter_pos_changed(struct btree_iter *iter, int cmp)
{
- unsigned l = btree_iter_up_until_good_node(iter, cmp);
+ unsigned l = iter->level;
+
+ if (!cmp)
+ goto out;
+
+ if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) {
+ btree_node_unlock(iter, 0);
+ iter->l[0].b = BTREE_ITER_NO_NODE_UP;
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
+ return;
+ }
+
+ l = btree_iter_up_until_good_node(iter, cmp);
if (btree_iter_node(iter, l)) {
/*
if (btree_lock_want(iter, l) == BTREE_NODE_UNLOCKED)
btree_node_unlock(iter, l);
}
+out:
+ if (l != iter->level)
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
+ else
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
+}
- return l;
+void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos,
+ bool strictly_greater)
+{
+ struct bpos old = btree_iter_search_key(iter);
+ int cmp;
+
+ iter->flags &= ~BTREE_ITER_IS_EXTENTS;
+ iter->flags |= strictly_greater ? BTREE_ITER_IS_EXTENTS : 0;
+
+ bkey_init(&iter->k);
+ iter->k.p = iter->pos = new_pos;
+
+ cmp = bkey_cmp(btree_iter_search_key(iter), old);
+
+ btree_iter_pos_changed(iter, cmp);
}
void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
{
int cmp = bkey_cmp(new_pos, iter->pos);
- unsigned l;
-
- if (!cmp)
- return;
-
- iter->pos = new_pos;
- l = btree_iter_pos_changed(iter, cmp);
+ bkey_init(&iter->k);
+ iter->k.p = iter->pos = new_pos;
- if (l != iter->level)
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
- else
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
+ btree_iter_pos_changed(iter, cmp);
}
static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter)
{
struct btree_iter_level *l = &iter->l[0];
+ bool ret;
- iter->pos = l->b->key.k.p;
- iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
+ bkey_init(&iter->k);
+ iter->k.p = iter->pos = l->b->key.k.p;
- if (!bkey_cmp(iter->pos, POS_MAX)) {
- bkey_init(&iter->k);
- iter->k.p = POS_MAX;
- return false;
- }
+ ret = bkey_cmp(iter->pos, POS_MAX) != 0;
+ if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
+ iter->k.p = iter->pos = bkey_successor(iter->pos);
- iter->pos = btree_type_successor(iter->btree_id, iter->pos);
btree_iter_pos_changed(iter, 1);
- return true;
+ return ret;
}
static inline bool btree_iter_set_pos_to_prev_leaf(struct btree_iter *iter)
{
struct btree_iter_level *l = &iter->l[0];
+ bool ret;
- iter->pos = l->b->data->min_key;
+ bkey_init(&iter->k);
+ iter->k.p = iter->pos = l->b->data->min_key;
iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
- if (!bkey_cmp(iter->pos, POS_MIN)) {
- bkey_init(&iter->k);
- iter->k.p = POS_MIN;
- return false;
+ ret = bkey_cmp(iter->pos, POS_MIN) != 0;
+ if (ret) {
+ iter->k.p = iter->pos = bkey_predecessor(iter->pos);
+
+ if (iter->flags & BTREE_ITER_IS_EXTENTS)
+ iter->k.p = iter->pos = bkey_predecessor(iter->pos);
}
- iter->pos = btree_type_predecessor(iter->btree_id, iter->pos);
btree_iter_pos_changed(iter, -1);
- return true;
+ return ret;
}
+/**
+ * btree_iter_peek_uptodate - given an iterator that is uptodate, return the key
+ * it currently points to
+ */
static inline struct bkey_s_c btree_iter_peek_uptodate(struct btree_iter *iter)
{
struct btree_iter_level *l = &iter->l[0];
bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
- if (iter->uptodate == BTREE_ITER_UPTODATE)
+ if (iter->uptodate == BTREE_ITER_UPTODATE &&
+ !bkey_deleted(&iter->k))
return btree_iter_peek_uptodate(iter);
while (1) {
iter->pos = bkey_start_pos(k.k);
iter->uptodate = BTREE_ITER_UPTODATE;
+
+ bch2_btree_iter_verify_level(iter, 0);
return k;
}
* position
*/
struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
+{
+ if (unlikely(!bkey_cmp(iter->k.p, POS_MAX)))
+ return bkey_s_c_null;
+
+ bch2_btree_iter_set_pos(iter,
+ (iter->flags & BTREE_ITER_IS_EXTENTS)
+ ? iter->k.p
+ : bkey_successor(iter->k.p));
+
+ return bch2_btree_iter_peek(iter);
+}
+
+static struct bkey_s_c __btree_trans_updates_peek(struct btree_iter *iter)
+{
+ struct bpos pos = btree_iter_search_key(iter);
+ struct btree_trans *trans = iter->trans;
+ struct btree_insert_entry *i;
+
+ trans_for_each_update2(trans, i)
+ if ((cmp_int(iter->btree_id, i->iter->btree_id) ?:
+ bkey_cmp(pos, i->k->k.p)) <= 0)
+ break;
+
+ return i < trans->updates2 + trans->nr_updates2 &&
+ iter->btree_id == i->iter->btree_id
+ ? bkey_i_to_s_c(i->k)
+ : bkey_s_c_null;
+}
+
+static struct bkey_s_c __bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
{
struct btree_iter_level *l = &iter->l[0];
- struct bkey_packed *p;
+ struct bkey_s_c k = __btree_iter_peek(iter, l);
+ struct bkey_s_c u = __btree_trans_updates_peek(iter);
+
+ if (k.k && (!u.k || bkey_cmp(k.k->p, u.k->p) < 0))
+ return k;
+ if (u.k && bkey_cmp(u.k->p, l->b->key.k.p) <= 0) {
+ iter->k = *u.k;
+ return u;
+ }
+ return bkey_s_c_null;
+}
+
+struct bkey_s_c bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
+{
struct bkey_s_c k;
+ int ret;
bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
- if (unlikely(iter->uptodate != BTREE_ITER_UPTODATE)) {
- if (unlikely(!bkey_cmp(iter->k.p, POS_MAX)))
- return bkey_s_c_null;
+ while (1) {
+ ret = bch2_btree_iter_traverse(iter);
+ if (unlikely(ret))
+ return bkey_s_c_err(ret);
- /*
- * XXX: when we just need to relock we should be able to avoid
- * calling traverse, but we need to kill BTREE_ITER_NEED_PEEK
- * for that to work
- */
- iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
+ k = __bch2_btree_iter_peek_with_updates(iter);
- bch2_btree_iter_set_pos(iter,
- btree_type_successor(iter->btree_id, iter->k.p));
+ if (k.k && bkey_deleted(k.k)) {
+ bch2_btree_iter_set_pos(iter,
+ (iter->flags & BTREE_ITER_IS_EXTENTS)
+ ? iter->k.p
+ : bkey_successor(iter->k.p));
+ continue;
+ }
- return bch2_btree_iter_peek(iter);
- }
+ if (likely(k.k))
+ break;
- if (unlikely(bkey_deleted(&iter->k))) {
- /*
- * we're currently pointed at a hole, because previously we were
- * iterating over slots:
- */
- return bch2_btree_iter_peek(iter);
+ if (!btree_iter_set_pos_to_next_leaf(iter))
+ return bkey_s_c_null;
}
- do {
- bch2_btree_node_iter_advance(&l->iter, l->b);
- p = bch2_btree_node_iter_peek_all(&l->iter, l->b);
- } while (likely(p) && bkey_whiteout(p));
+ /*
+ * iter->pos should always be equal to the key we just
+ * returned - except extents can straddle iter->pos:
+ */
+ if (!(iter->flags & BTREE_ITER_IS_EXTENTS) ||
+ bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
+ iter->pos = bkey_start_pos(k.k);
+
+ iter->uptodate = BTREE_ITER_UPTODATE;
+ return k;
+}
- if (unlikely(!p))
- return btree_iter_set_pos_to_next_leaf(iter)
- ? bch2_btree_iter_peek(iter)
- : bkey_s_c_null;
+struct bkey_s_c bch2_btree_iter_next_with_updates(struct btree_iter *iter)
+{
+ if (unlikely(!bkey_cmp(iter->k.p, POS_MAX)))
+ return bkey_s_c_null;
- k = __btree_iter_unpack(iter, l, &iter->k, p);
+ bch2_btree_iter_set_pos(iter,
+ (iter->flags & BTREE_ITER_IS_EXTENTS)
+ ? iter->k.p
+ : bkey_successor(iter->k.p));
- EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) < 0);
- iter->pos = bkey_start_pos(k.k);
- return k;
+ return bch2_btree_iter_peek_with_updates(iter);
}
/**
*/
struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
{
+ struct bpos pos = iter->pos;
struct btree_iter_level *l = &iter->l[0];
struct bkey_s_c k;
int ret;
bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
- if (iter->uptodate == BTREE_ITER_UPTODATE)
+ if (iter->uptodate == BTREE_ITER_UPTODATE &&
+ !bkey_deleted(&iter->k))
return btree_iter_peek_uptodate(iter);
while (1) {
return bkey_s_c_err(ret);
k = __btree_iter_peek(iter, l);
- if (!k.k ||
- bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
+ if (!k.k || bkey_cmp(bkey_start_pos(k.k), pos) > 0)
k = __btree_iter_prev(iter, l);
if (likely(k.k))
return bkey_s_c_null;
}
- EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
+ EBUG_ON(bkey_cmp(bkey_start_pos(k.k), pos) > 0);
iter->pos = bkey_start_pos(k.k);
iter->uptodate = BTREE_ITER_UPTODATE;
return k;
*/
struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
{
- struct btree_iter_level *l = &iter->l[0];
- struct bkey_s_c k;
+ struct bpos pos = bkey_start_pos(&iter->k);
bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
- if (unlikely(iter->uptodate != BTREE_ITER_UPTODATE)) {
- /*
- * XXX: when we just need to relock we should be able to avoid
- * calling traverse, but we need to kill BTREE_ITER_NEED_PEEK
- * for that to work
- */
- iter->pos = btree_type_predecessor(iter->btree_id,
- iter->pos);
- iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
-
- return bch2_btree_iter_peek_prev(iter);
- }
+ if (unlikely(!bkey_cmp(pos, POS_MIN)))
+ return bkey_s_c_null;
- k = __btree_iter_prev(iter, l);
- if (unlikely(!k.k))
- return btree_iter_set_pos_to_prev_leaf(iter)
- ? bch2_btree_iter_peek(iter)
- : bkey_s_c_null;
+ bch2_btree_iter_set_pos(iter, bkey_predecessor(pos));
- EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) >= 0);
- iter->pos = bkey_start_pos(k.k);
- return k;
+ return bch2_btree_iter_peek_prev(iter);
}
static inline struct bkey_s_c
struct bkey n;
int ret;
-recheck:
- btree_iter_advance_to_pos(iter, l, -1);
+ /* keys & holes can't span inode numbers: */
+ if (iter->pos.offset == KEY_OFFSET_MAX) {
+ if (iter->pos.inode == KEY_INODE_MAX)
+ return bkey_s_c_null;
+
+ bch2_btree_iter_set_pos(iter, bkey_successor(iter->pos));
+
+ ret = bch2_btree_iter_traverse(iter);
+ if (unlikely(ret))
+ return bkey_s_c_err(ret);
+ }
/*
* iterator is now at the correct position for inserting at iter->pos,
if (k.k && bkey_cmp(bkey_start_pos(k.k), iter->pos) <= 0) {
/*
- * If there wasn't actually a hole, want the iterator to be
- * pointed at the key we found:
- *
- * XXX: actually, we shouldn't be changing the iterator here:
- * the iterator needs to be correct for inserting at iter->pos,
- * and there may be whiteouts between iter->pos and what this
- * iterator points at:
+ * We're not setting iter->uptodate because the node iterator
+ * doesn't necessarily point at the key we're returning:
*/
- l->iter = node_iter;
EBUG_ON(bkey_cmp(k.k->p, iter->pos) <= 0);
- iter->uptodate = BTREE_ITER_UPTODATE;
-
- __bch2_btree_iter_verify(iter, l->b);
+ bch2_btree_iter_verify_level(iter, 0);
return k;
}
- /*
- * If we got to the end of the node, check if we need to traverse to the
- * next node:
- */
- if (unlikely(!k.k && btree_iter_pos_after_node(iter, l->b))) {
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
- ret = bch2_btree_iter_traverse(iter);
- if (unlikely(ret))
- return bkey_s_c_err(ret);
-
- goto recheck;
- }
-
/* hole */
- /* holes can't span inode numbers: */
- if (iter->pos.offset == KEY_OFFSET_MAX) {
- if (iter->pos.inode == KEY_INODE_MAX)
- return bkey_s_c_null;
-
- iter->pos = bkey_successor(iter->pos);
- goto recheck;
- }
-
if (!k.k)
k.k = &l->b->key.k;
iter->k = n;
iter->uptodate = BTREE_ITER_UPTODATE;
- __bch2_btree_iter_verify(iter, l->b);
+ bch2_btree_iter_verify_level(iter, 0);
return (struct bkey_s_c) { &iter->k, NULL };
}
-static inline struct bkey_s_c
-__bch2_btree_iter_peek_slot(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
{
struct btree_iter_level *l = &iter->l[0];
struct bkey_s_c k;
int ret;
+ bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
+
+ if (iter->uptodate == BTREE_ITER_UPTODATE)
+ return btree_iter_peek_uptodate(iter);
+
+ ret = bch2_btree_iter_traverse(iter);
+ if (unlikely(ret))
+ return bkey_s_c_err(ret);
+
if (iter->flags & BTREE_ITER_IS_EXTENTS)
return __bch2_btree_iter_peek_slot_extents(iter);
-recheck:
- while ((k = __btree_iter_peek_all(iter, l, &iter->k)).k &&
- bkey_deleted(k.k) &&
- bkey_cmp(k.k->p, iter->pos) == 0)
- bch2_btree_node_iter_advance(&l->iter, l->b);
+ k = __btree_iter_peek_all(iter, l, &iter->k);
- /*
- * If we got to the end of the node, check if we need to traverse to the
- * next node:
- */
- if (unlikely(!k.k && btree_iter_pos_after_node(iter, l->b))) {
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
- ret = bch2_btree_iter_traverse(iter);
- if (unlikely(ret))
- return bkey_s_c_err(ret);
-
- goto recheck;
- }
+ EBUG_ON(k.k && bkey_deleted(k.k) && bkey_cmp(k.k->p, iter->pos) == 0);
- if (!k.k ||
- bkey_deleted(k.k) ||
- bkey_cmp(iter->pos, k.k->p)) {
+ if (!k.k || bkey_cmp(iter->pos, k.k->p)) {
/* hole */
bkey_init(&iter->k);
iter->k.p = iter->pos;
}
iter->uptodate = BTREE_ITER_UPTODATE;
- __bch2_btree_iter_verify(iter, l->b);
+ bch2_btree_iter_verify_level(iter, 0);
return k;
}
-struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
{
- int ret;
-
- bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
-
- if (iter->uptodate == BTREE_ITER_UPTODATE)
- return btree_iter_peek_uptodate(iter);
+ if (unlikely(!bkey_cmp(iter->k.p, POS_MAX)))
+ return bkey_s_c_null;
- ret = bch2_btree_iter_traverse(iter);
- if (unlikely(ret))
- return bkey_s_c_err(ret);
+ bch2_btree_iter_set_pos(iter,
+ (iter->flags & BTREE_ITER_IS_EXTENTS)
+ ? iter->k.p
+ : bkey_successor(iter->k.p));
- return __bch2_btree_iter_peek_slot(iter);
+ return bch2_btree_iter_peek_slot(iter);
}
-struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_peek_cached(struct btree_iter *iter)
{
- bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
+ struct bkey_cached *ck;
+ int ret;
- iter->pos = btree_type_successor(iter->btree_id, iter->k.p);
+ bch2_btree_iter_checks(iter, BTREE_ITER_CACHED);
- if (unlikely(iter->uptodate != BTREE_ITER_UPTODATE)) {
- /*
- * XXX: when we just need to relock we should be able to avoid
- * calling traverse, but we need to kill BTREE_ITER_NEED_PEEK
- * for that to work
- */
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
-
- return bch2_btree_iter_peek_slot(iter);
- }
+ ret = bch2_btree_iter_traverse(iter);
+ if (unlikely(ret))
+ return bkey_s_c_err(ret);
- if (!bkey_deleted(&iter->k))
- bch2_btree_node_iter_advance(&iter->l[0].iter, iter->l[0].b);
+ ck = (void *) iter->l[0].b;
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
+ EBUG_ON(iter->btree_id != ck->key.btree_id ||
+ bkey_cmp(iter->pos, ck->key.pos));
+ BUG_ON(!ck->valid);
- return __bch2_btree_iter_peek_slot(iter);
+ return bkey_i_to_s_c(ck->k);
}
static inline void bch2_btree_iter_init(struct btree_trans *trans,
iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
iter->btree_id = btree_id;
iter->level = 0;
+ iter->min_depth = 0;
iter->locks_want = flags & BTREE_ITER_INTENT ? 1 : 0;
iter->nodes_locked = 0;
iter->nodes_intent_locked = 0;
for (i = 0; i < ARRAY_SIZE(iter->l); i++)
- iter->l[i].b = NULL;
- iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT;
+ iter->l[i].b = BTREE_ITER_NO_NODE_INIT;
prefetch(c->btree_roots[btree_id].b);
}
int bch2_trans_iter_put(struct btree_trans *trans,
struct btree_iter *iter)
{
- int ret = btree_iter_err(iter);
+ int ret;
+
+ if (IS_ERR_OR_NULL(iter))
+ return 0;
+
+ BUG_ON(trans->iters + iter->idx != iter);
+
+ ret = btree_iter_err(iter);
if (!(trans->iters_touched & (1ULL << iter->idx)) &&
!(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT))
int bch2_trans_iter_free(struct btree_trans *trans,
struct btree_iter *iter)
{
+ if (IS_ERR_OR_NULL(iter))
+ return 0;
+
trans->iters_touched &= ~(1ULL << iter->idx);
return bch2_trans_iter_put(trans, iter);
static int bch2_trans_realloc_iters(struct btree_trans *trans,
unsigned new_size)
{
- void *new_iters, *new_updates;
+ void *p, *new_iters, *new_updates, *new_updates2;
size_t iters_bytes;
size_t updates_bytes;
iters_bytes = sizeof(struct btree_iter) * new_size;
updates_bytes = sizeof(struct btree_insert_entry) * new_size;
- new_iters = kmalloc(iters_bytes + updates_bytes, GFP_NOFS);
- if (new_iters)
+ p = kmalloc(iters_bytes +
+ updates_bytes +
+ updates_bytes, GFP_NOFS);
+ if (p)
goto success;
- new_iters = mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS);
+ p = mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS);
new_size = BTREE_ITER_MAX;
trans->used_mempool = true;
success:
- new_updates = new_iters + iters_bytes;
+ new_iters = p; p += iters_bytes;
+ new_updates = p; p += updates_bytes;
+ new_updates2 = p; p += updates_bytes;
memcpy(new_iters, trans->iters,
sizeof(struct btree_iter) * trans->nr_iters);
memcpy(new_updates, trans->updates,
sizeof(struct btree_insert_entry) * trans->nr_updates);
+ memcpy(new_updates2, trans->updates2,
+ sizeof(struct btree_insert_entry) * trans->nr_updates2);
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
memset(trans->iters, POISON_FREE,
trans->iters = new_iters;
trans->updates = new_updates;
+ trans->updates2 = new_updates2;
trans->size = new_size;
if (trans->iters_live) {
struct btree_iter *iter;
trans_for_each_iter(trans, iter) {
- pr_err("iter: btree %s pos %llu:%llu%s%s%s",
+ pr_err("iter: btree %s pos %llu:%llu%s%s%s %ps",
bch2_btree_ids[iter->btree_id],
iter->pos.inode,
iter->pos.offset,
(trans->iters_live & (1ULL << iter->idx)) ? " live" : "",
(trans->iters_touched & (1ULL << iter->idx)) ? " touched" : "",
- iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "");
+ iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "",
+ (void *) iter->ip_allocated);
}
panic("trans iter oveflow\n");
*dst = *src;
dst->idx = idx;
+ dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
for (i = 0; i < BTREE_MAX_DEPTH; i++)
if (btree_node_locked(dst, i))
- six_lock_increment(&dst->l[i].b->lock,
+ six_lock_increment(&dst->l[i].b->c.lock,
__btree_lock_want(dst, i));
dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
iter = best;
}
- iter->flags &= ~(BTREE_ITER_SLOTS|BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
- iter->flags |= flags & (BTREE_ITER_SLOTS|BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
+ iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
+ iter->flags &= ~BTREE_ITER_USER_FLAGS;
+ iter->flags |= flags & BTREE_ITER_USER_FLAGS;
if (iter->flags & BTREE_ITER_INTENT)
bch2_btree_iter_upgrade(iter, 1);
return iter;
}
-struct btree_iter *bch2_trans_get_iter(struct btree_trans *trans,
- enum btree_id btree_id,
- struct bpos pos, unsigned flags)
+struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
+ enum btree_id btree_id,
+ struct bpos pos, unsigned flags)
{
struct btree_iter *iter =
__btree_trans_get_iter(trans, btree_id, pos, flags);
if (!IS_ERR(iter))
- bch2_btree_iter_set_pos(iter, pos);
+ __bch2_btree_iter_set_pos(iter, pos,
+ btree_node_type_is_extents(btree_id));
return iter;
}
iter->locks_want = locks_want;
iter->level = depth;
+ iter->min_depth = depth;
for (i = 0; i < ARRAY_SIZE(iter->l); i++)
iter->l[i].b = NULL;
return iter;
}
-struct btree_iter *bch2_trans_copy_iter(struct btree_trans *trans,
+struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans,
struct btree_iter *src)
{
struct btree_iter *iter;
trans->iters_live |= 1ULL << iter->idx;
/*
- * Don't mark it as touched, we don't need to preserve this iter since
- * it's cheap to copy it again:
+ * We don't need to preserve this iter since it's cheap to copy it
+ * again - this will cause trans_iter_put() to free it right away:
*/
trans->iters_touched &= ~(1ULL << iter->idx);
bch2_trans_unlink_iters(trans);
- if (flags & TRANS_RESET_ITERS)
- trans->iters_live = 0;
-
trans->iters_touched &= trans->iters_live;
trans->need_reset = 0;
trans->nr_updates = 0;
+ trans->nr_updates2 = 0;
+ trans->mem_top = 0;
- if (flags & TRANS_RESET_MEM)
- trans->mem_top = 0;
+ trans->extra_journal_entries = NULL;
+ trans->extra_journal_entry_u64s = 0;
if (trans->fs_usage_deltas) {
trans->fs_usage_deltas->used = 0;
{
memset(trans, 0, offsetof(struct btree_trans, iters_onstack));
+ /*
+ * reallocating iterators currently completely breaks
+ * bch2_trans_iter_put():
+ */
+ expected_nr_iters = BTREE_ITER_MAX;
+
trans->c = c;
trans->ip = _RET_IP_;
trans->size = ARRAY_SIZE(trans->iters_onstack);
trans->iters = trans->iters_onstack;
trans->updates = trans->updates_onstack;
+ trans->updates2 = trans->updates2_onstack;
trans->fs_usage_deltas = NULL;
if (expected_nr_iters > trans->size)
if (expected_mem_bytes)
bch2_trans_preload_mem(trans, expected_mem_bytes);
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+ trans->pid = current->pid;
+ mutex_lock(&c->btree_trans_lock);
+ list_add(&trans->list, &c->btree_trans_list);
+ mutex_unlock(&c->btree_trans_lock);
+#endif
}
int bch2_trans_exit(struct btree_trans *trans)
{
bch2_trans_unlock(trans);
+#ifdef CONFIG_BCACHEFS_DEBUG
+ mutex_lock(&trans->c->btree_trans_lock);
+ list_del(&trans->list);
+ mutex_unlock(&trans->c->btree_trans_lock);
+#endif
+
+ bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
+
kfree(trans->fs_usage_deltas);
kfree(trans->mem);
if (trans->used_mempool)
return trans->error ? -EIO : 0;
}
+void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
+{
+#ifdef CONFIG_BCACHEFS_DEBUG
+ struct btree_trans *trans;
+ struct btree_iter *iter;
+ struct btree *b;
+ unsigned l;
+
+ mutex_lock(&c->btree_trans_lock);
+ list_for_each_entry(trans, &c->btree_trans_list, list) {
+ pr_buf(out, "%i %px %ps\n", trans->pid, trans, (void *) trans->ip);
+
+ trans_for_each_iter(trans, iter) {
+ if (!iter->nodes_locked)
+ continue;
+
+ pr_buf(out, " iter %u %s:",
+ iter->idx,
+ bch2_btree_ids[iter->btree_id]);
+ bch2_bpos_to_text(out, iter->pos);
+ pr_buf(out, "\n");
+
+ for (l = 0; l < BTREE_MAX_DEPTH; l++) {
+ if (btree_node_locked(iter, l)) {
+ b = iter->l[l].b;
+
+ pr_buf(out, " %px %s l=%u ",
+ b, btree_node_intent_locked(iter, l) ? "i" : "r", l);
+ bch2_bpos_to_text(out, b->key.k.p);
+ pr_buf(out, "\n");
+ }
+ }
+ }
+
+ b = READ_ONCE(trans->locking);
+ if (b) {
+ pr_buf(out, " locking iter %u l=%u %s:",
+ trans->locking_iter_idx,
+ trans->locking_level,
+ bch2_btree_ids[trans->locking_btree_id]);
+ bch2_bpos_to_text(out, trans->locking_pos);
+
+ pr_buf(out, " node %px l=%u %s:",
+ b, b->c.level,
+ bch2_btree_ids[b->c.btree_id]);
+ bch2_bpos_to_text(out, b->key.k.p);
+ pr_buf(out, "\n");
+ }
+ }
+ mutex_unlock(&c->btree_trans_lock);
+#endif
+}
+
void bch2_fs_btree_iter_exit(struct bch_fs *c)
{
mempool_exit(&c->btree_iters_pool);
{
unsigned nr = BTREE_ITER_MAX;
+ INIT_LIST_HEAD(&c->btree_trans_list);
+ mutex_init(&c->btree_trans_lock);
+
return mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
sizeof(struct btree_iter) * nr +
sizeof(struct btree_insert_entry) * nr +
- sizeof(u8) * nr);
+ sizeof(struct btree_insert_entry) * nr);
}