X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_iter.c;h=bdbb90014b5c0a38197bee08e49145e707ba6468;hb=1388212eb85c1a904e7dcde539ebb0c2d125fcc9;hp=f4f73bfc85470f036d5c847fc78c580828b3007c;hpb=85ee972555948337bb1a58f0702a4da95db6758f;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_iter.c b/libbcachefs/btree_iter.c index f4f73bf..bdbb900 100644 --- a/libbcachefs/btree_iter.c +++ b/libbcachefs/btree_iter.c @@ -1,149 +1,305 @@ +// SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" #include "bkey_methods.h" +#include "bkey_buf.h" #include "btree_cache.h" #include "btree_iter.h" +#include "btree_key_cache.h" #include "btree_locking.h" +#include "btree_update.h" #include "debug.h" +#include "error.h" #include "extents.h" +#include "journal.h" +#include "replicas.h" +#include "subvolume.h" #include #include -#define BTREE_ITER_NOT_END ((struct btree *) 1) +static void btree_trans_verify_sorted(struct btree_trans *); +static void btree_path_check_sort(struct btree_trans *, struct btree_path *, int); -static inline bool is_btree_node(struct btree_iter *iter, unsigned l) +static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *); +static inline void btree_path_list_add(struct btree_trans *, struct btree_path *, + struct btree_path *); + +static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter) { - return iter->nodes[l] && iter->nodes[l] != BTREE_ITER_NOT_END; +#ifdef CONFIG_BCACHEFS_DEBUG + return iter->ip_allocated; +#else + return 0; +#endif } -/* Btree node locking: */ +static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *); /* - * Updates the saved lock sequence number, so that bch2_btree_node_relock() will - * succeed: + * Unlocks before scheduling + * Note: does not revalidate iterator */ -void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter) +static inline int bch2_trans_cond_resched(struct btree_trans *trans) +{ + if (need_resched() || race_fault()) { + bch2_trans_unlock(trans); + schedule(); + return bch2_trans_relock(trans) ? 0 : -EINTR; + } else { + return 0; + } +} + +static inline int __btree_path_cmp(const struct btree_path *l, + enum btree_id r_btree_id, + bool r_cached, + struct bpos r_pos, + unsigned r_level) +{ + return cmp_int(l->btree_id, r_btree_id) ?: + cmp_int((int) l->cached, (int) r_cached) ?: + bpos_cmp(l->pos, r_pos) ?: + -cmp_int(l->level, r_level); +} + +static inline int btree_path_cmp(const struct btree_path *l, + const struct btree_path *r) +{ + return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level); +} + +static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p) { - struct btree_iter *linked; + /* Are we iterating over keys in all snapshots? */ + if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) { + p = bpos_successor(p); + } else { + p = bpos_nosnap_successor(p); + p.snapshot = iter->snapshot; + } - EBUG_ON(iter->nodes[b->level] != b); - EBUG_ON(iter->lock_seq[b->level] + 1 != b->lock.state.seq); + return p; +} - for_each_linked_btree_node(iter, b, linked) - linked->lock_seq[b->level] += 2; +static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p) +{ + /* Are we iterating over keys in all snapshots? */ + if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) { + p = bpos_predecessor(p); + } else { + p = bpos_nosnap_predecessor(p); + p.snapshot = iter->snapshot; + } - iter->lock_seq[b->level] += 2; + return p; +} - six_unlock_write(&b->lock); +static inline bool is_btree_node(struct btree_path *path, unsigned l) +{ + return l < BTREE_MAX_DEPTH && + (unsigned long) path->l[l].b >= 128; } -void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter) +static inline struct bpos btree_iter_search_key(struct btree_iter *iter) { - struct btree_iter *linked; - unsigned readers = 0; + struct bpos pos = iter->pos; - EBUG_ON(iter->nodes[b->level] != b); - EBUG_ON(iter->lock_seq[b->level] != b->lock.state.seq); + if ((iter->flags & BTREE_ITER_IS_EXTENTS) && + bkey_cmp(pos, POS_MAX)) + pos = bkey_successor(iter, pos); + return pos; +} - if (six_trylock_write(&b->lock)) - return; +static inline bool btree_path_pos_before_node(struct btree_path *path, + struct btree *b) +{ + return bpos_cmp(path->pos, b->data->min_key) < 0; +} + +static inline bool btree_path_pos_after_node(struct btree_path *path, + struct btree *b) +{ + return bpos_cmp(b->key.k.p, path->pos) < 0; +} + +static inline bool btree_path_pos_in_node(struct btree_path *path, + struct btree *b) +{ + return path->btree_id == b->c.btree_id && + !btree_path_pos_before_node(path, b) && + !btree_path_pos_after_node(path, b); +} + +/* Btree node locking: */ + +void bch2_btree_node_unlock_write(struct btree_trans *trans, + struct btree_path *path, struct btree *b) +{ + bch2_btree_node_unlock_write_inlined(trans, path, b); +} + +void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b) +{ + struct btree_path *linked; + unsigned readers = 0; - for_each_linked_btree_iter(iter, linked) - if (linked->nodes[b->level] == b && - btree_node_read_locked(linked, b->level)) + trans_for_each_path(trans, linked) + if (linked->l[b->c.level].b == b && + btree_node_read_locked(linked, b->c.level)) readers++; - if (likely(!readers)) { - six_lock_write(&b->lock); - } else { - /* - * Must drop our read locks before calling six_lock_write() - - * six_unlock() won't do wakeups until the reader count - * goes to 0, and it's safe because we have the node intent - * locked: - */ + /* + * Must drop our read locks before calling six_lock_write() - + * six_unlock() won't do wakeups until the reader count + * goes to 0, and it's safe because we have the node intent + * locked: + */ + if (!b->c.lock.readers) atomic64_sub(__SIX_VAL(read_lock, readers), - &b->lock.state.counter); - six_lock_write(&b->lock); + &b->c.lock.state.counter); + else + this_cpu_sub(*b->c.lock.readers, readers); + + btree_node_lock_type(trans->c, b, SIX_LOCK_write); + + if (!b->c.lock.readers) atomic64_add(__SIX_VAL(read_lock, readers), - &b->lock.state.counter); - } + &b->c.lock.state.counter); + else + this_cpu_add(*b->c.lock.readers, readers); } -bool bch2_btree_node_relock(struct btree_iter *iter, unsigned level) +bool __bch2_btree_node_relock(struct btree_trans *trans, + struct btree_path *path, unsigned level) { - struct btree_iter *linked; - struct btree *b = iter->nodes[level]; - enum btree_node_locked_type want = btree_lock_want(iter, level); - enum btree_node_locked_type have = btree_node_locked_type(iter, level); + struct btree *b = btree_path_node(path, level); + int want = __btree_lock_want(path, level); + + if (!is_btree_node(path, level)) + return false; - if (want == have) + if (race_fault()) + return false; + + if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) || + (btree_node_lock_seq_matches(path, b, level) && + btree_node_lock_increment(trans, b, level, want))) { + mark_btree_node_locked(path, level, want); return true; + } else { + return false; + } +} + +bool bch2_btree_node_upgrade(struct btree_trans *trans, + struct btree_path *path, unsigned level) +{ + struct btree *b = path->l[level].b; - if (!is_btree_node(iter, level)) + if (!is_btree_node(path, level)) return false; + switch (btree_lock_want(path, level)) { + case BTREE_NODE_UNLOCKED: + BUG_ON(btree_node_locked(path, level)); + return true; + case BTREE_NODE_READ_LOCKED: + BUG_ON(btree_node_intent_locked(path, level)); + return bch2_btree_node_relock(trans, path, level); + case BTREE_NODE_INTENT_LOCKED: + break; + } + + if (btree_node_intent_locked(path, level)) + return true; + if (race_fault()) return false; - if (have != BTREE_NODE_UNLOCKED - ? six_trylock_convert(&b->lock, have, want) - : six_relock_type(&b->lock, want, iter->lock_seq[level])) + if (btree_node_locked(path, level) + ? six_lock_tryupgrade(&b->c.lock) + : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq)) goto success; - for_each_linked_btree_iter(iter, linked) - if (linked->nodes[level] == b && - btree_node_locked_type(linked, level) == want && - iter->lock_seq[level] == b->lock.state.seq) { - btree_node_unlock(iter, level); - six_lock_increment(&b->lock, want); - goto success; - } + if (btree_node_lock_seq_matches(path, b, level) && + btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) { + btree_node_unlock(path, level); + goto success; + } return false; success: - mark_btree_node_unlocked(iter, level); - mark_btree_node_locked(iter, level, want); + mark_btree_node_intent_locked(path, level); return true; } -/* Slowpath: */ -bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, - unsigned level, - struct btree_iter *iter, - enum six_lock_type type) +static inline bool btree_path_get_locks(struct btree_trans *trans, + struct btree_path *path, + bool upgrade, unsigned long trace_ip) { - struct btree_iter *linked; + unsigned l = path->level; + int fail_idx = -1; - /* Can't have children locked before ancestors: */ - EBUG_ON(iter->nodes_locked && level > __ffs(iter->nodes_locked)); + do { + if (!btree_path_node(path, l)) + break; - /* - * Can't hold any read locks while we block taking an intent lock - see - * below for reasoning, and we should have already dropped any read - * locks in the current iterator - */ - EBUG_ON(type == SIX_LOCK_intent && - iter->nodes_locked != iter->nodes_intent_locked); + if (!(upgrade + ? bch2_btree_node_upgrade(trans, path, l) + : bch2_btree_node_relock(trans, path, l))) + fail_idx = l; - for_each_linked_btree_iter(iter, linked) - if (linked->nodes[level] == b && - btree_node_locked_type(linked, level) == type) { - six_lock_increment(&b->lock, type); - return true; - } + l++; + } while (l < path->locks_want); /* - * Must lock btree nodes in key order - this case hapens when locking - * the prev sibling in btree node merging: + * When we fail to get a lock, we have to ensure that any child nodes + * can't be relocked so bch2_btree_path_traverse has to walk back up to + * the node that we failed to relock: */ - if (iter->nodes_locked && - __ffs(iter->nodes_locked) == level && - __btree_iter_cmp(iter->btree_id, pos, iter)) - return false; + if (fail_idx >= 0) { + __bch2_btree_path_unlock(path); + btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + + do { + path->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS; + --fail_idx; + } while (fail_idx >= 0); + } + + if (path->uptodate == BTREE_ITER_NEED_RELOCK) + path->uptodate = BTREE_ITER_UPTODATE; + + bch2_trans_verify_locks(trans); + + return path->uptodate < BTREE_ITER_NEED_RELOCK; +} - for_each_linked_btree_iter(iter, linked) { +static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b, + bool cached) +{ + return !cached + ? container_of(_b, struct btree, c)->key.k.p + : container_of(_b, struct bkey_cached, c)->key.pos; +} + +/* Slowpath: */ +bool __bch2_btree_node_lock(struct btree_trans *trans, + struct btree_path *path, + struct btree *b, + struct bpos pos, unsigned level, + enum six_lock_type type, + six_lock_should_sleep_fn should_sleep_fn, void *p, + unsigned long ip) +{ + struct btree_path *linked, *deadlock_path = NULL; + u64 start_time = local_clock(); + unsigned reason = 9; + bool ret; + + /* Check if it's safe to block: */ + trans_for_each_path(trans, linked) { if (!linked->nodes_locked) continue; @@ -161,1069 +317,2663 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, */ if (type == SIX_LOCK_intent && linked->nodes_locked != linked->nodes_intent_locked) { - linked->locks_want = max_t(unsigned, - linked->locks_want, - iter->locks_want); - return false; + deadlock_path = linked; + reason = 1; } - /* We have to lock btree nodes in key order: */ - if (__btree_iter_cmp(iter->btree_id, pos, linked) < 0) - return false; + if (linked->btree_id != path->btree_id) { + if (linked->btree_id > path->btree_id) { + deadlock_path = linked; + reason = 3; + } + continue; + } + + /* + * Within the same btree, cached paths come before non + * cached paths: + */ + if (linked->cached != path->cached) { + if (path->cached) { + deadlock_path = linked; + reason = 4; + } + continue; + } /* * Interior nodes must be locked before their descendants: if - * another iterator has possible descendants locked of the node + * another path has possible descendants locked of the node * we're about to lock, it must have the ancestors locked too: */ - if (linked->btree_id == iter->btree_id && - level > __fls(linked->nodes_locked)) { - linked->locks_want = max_t(unsigned, - linked->locks_want, - iter->locks_want); - return false; + if (level > __fls(linked->nodes_locked)) { + deadlock_path = linked; + reason = 5; + } + + /* Must lock btree nodes in key order: */ + if (btree_node_locked(linked, level) && + bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b, + linked->cached)) <= 0) { + deadlock_path = linked; + reason = 7; + BUG_ON(trans->in_traverse_all); } } - six_lock_type(&b->lock, type); - return true; + if (unlikely(deadlock_path)) { + trace_trans_restart_would_deadlock(trans->ip, ip, + trans->in_traverse_all, reason, + deadlock_path->btree_id, + deadlock_path->cached, + &deadlock_path->pos, + path->btree_id, + path->cached, + &pos); + btree_trans_restart(trans); + return false; + } + + if (six_trylock_type(&b->c.lock, type)) + return true; + + trans->locking_path_idx = path->idx; + trans->locking_pos = pos; + trans->locking_btree_id = path->btree_id; + trans->locking_level = level; + trans->locking = b; + + ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0; + + trans->locking = NULL; + + if (ret) + bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)], + start_time); + return ret; } /* Btree iterator locking: */ +#ifdef CONFIG_BCACHEFS_DEBUG -static void btree_iter_drop_extra_locks(struct btree_iter *iter) +static void bch2_btree_path_verify_locks(struct btree_path *path) { unsigned l; - while (iter->nodes_locked && - (l = __fls(iter->nodes_locked)) > iter->locks_want) { - if (!btree_node_locked(iter, l)) - panic("l %u nodes_locked %u\n", l, iter->nodes_locked); - - if (l > iter->level) { - btree_node_unlock(iter, l); - } else if (btree_node_intent_locked(iter, l)) { - six_lock_downgrade(&iter->nodes[l]->lock); - iter->nodes_intent_locked ^= 1 << l; - } + if (!path->nodes_locked) { + BUG_ON(path->uptodate == BTREE_ITER_UPTODATE && + btree_path_node(path, path->level)); + return; } + + for (l = 0; btree_path_node(path, l); l++) + BUG_ON(btree_lock_want(path, l) != + btree_node_locked_type(path, l)); } -bool __bch2_btree_iter_set_locks_want(struct btree_iter *iter, - unsigned new_locks_want) +void bch2_trans_verify_locks(struct btree_trans *trans) { - struct btree_iter *linked; - unsigned l; + struct btree_path *path; - /* Drop locks we don't want anymore: */ - if (new_locks_want < iter->locks_want) - for_each_linked_btree_iter(iter, linked) - if (linked->locks_want > new_locks_want) { - linked->locks_want = max_t(unsigned, 1, - new_locks_want); - btree_iter_drop_extra_locks(linked); - } + trans_for_each_path(trans, path) + bch2_btree_path_verify_locks(path); +} +#else +static inline void bch2_btree_path_verify_locks(struct btree_path *path) {} +#endif - iter->locks_want = new_locks_want; - btree_iter_drop_extra_locks(iter); +/* Btree path locking: */ - for (l = iter->level; l < iter->locks_want && iter->nodes[l]; l++) - if (!bch2_btree_node_relock(iter, l)) - goto fail; +/* + * Only for btree_cache.c - only relocks intent locks + */ +bool bch2_btree_path_relock_intent(struct btree_trans *trans, + struct btree_path *path) +{ + unsigned l; + + for (l = path->level; + l < path->locks_want && btree_path_node(path, l); + l++) { + if (!bch2_btree_node_relock(trans, path, l)) { + __bch2_btree_path_unlock(path); + btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + btree_trans_restart(trans); + return false; + } + } return true; -fail: - /* - * Just an optimization: ancestor nodes must be locked before child - * nodes, so set locks_want on iterators that might lock ancestors - * before us to avoid getting -EINTR later: - */ - for_each_linked_btree_iter(iter, linked) - if (linked->btree_id == iter->btree_id && - btree_iter_cmp(linked, iter) <= 0) - linked->locks_want = max_t(unsigned, linked->locks_want, - new_locks_want); - return false; } -static void __bch2_btree_iter_unlock(struct btree_iter *iter) +__flatten +static bool bch2_btree_path_relock(struct btree_trans *trans, + struct btree_path *path, unsigned long trace_ip) { - iter->flags &= ~BTREE_ITER_UPTODATE; + bool ret = btree_path_get_locks(trans, path, false, trace_ip); - while (iter->nodes_locked) - btree_node_unlock(iter, __ffs(iter->nodes_locked)); + if (!ret) + btree_trans_restart(trans); + return ret; } -int bch2_btree_iter_unlock(struct btree_iter *iter) +bool __bch2_btree_path_upgrade(struct btree_trans *trans, + struct btree_path *path, + unsigned new_locks_want) { - struct btree_iter *linked; + struct btree_path *linked; - for_each_linked_btree_iter(iter, linked) - __bch2_btree_iter_unlock(linked); - __bch2_btree_iter_unlock(iter); + EBUG_ON(path->locks_want >= new_locks_want); - return iter->flags & BTREE_ITER_ERROR ? -EIO : 0; -} + path->locks_want = new_locks_want; -/* Btree iterator: */ + if (btree_path_get_locks(trans, path, true, _THIS_IP_)) + return true; -#ifdef CONFIG_BCACHEFS_DEBUG + /* + * XXX: this is ugly - we'd prefer to not be mucking with other + * iterators in the btree_trans here. + * + * On failure to upgrade the iterator, setting iter->locks_want and + * calling get_locks() is sufficient to make bch2_btree_path_traverse() + * get the locks we want on transaction restart. + * + * But if this iterator was a clone, on transaction restart what we did + * to this iterator isn't going to be preserved. + * + * Possibly we could add an iterator field for the parent iterator when + * an iterator is a copy - for now, we'll just upgrade any other + * iterators with the same btree id. + * + * The code below used to be needed to ensure ancestor nodes get locked + * before interior nodes - now that's handled by + * bch2_btree_path_traverse_all(). + */ + trans_for_each_path(trans, linked) + if (linked != path && + linked->cached == path->cached && + linked->btree_id == path->btree_id && + linked->locks_want < new_locks_want) { + linked->locks_want = new_locks_want; + btree_path_get_locks(trans, linked, true, _THIS_IP_); + } + + return false; +} -static void __bch2_btree_iter_verify(struct btree_iter *iter, - struct btree *b) +void __bch2_btree_path_downgrade(struct btree_path *path, + unsigned new_locks_want) { - struct btree_node_iter *node_iter = &iter->node_iters[b->level]; - struct btree_node_iter tmp = *node_iter; - struct bkey_packed *k; + unsigned l; - bch2_btree_node_iter_verify(node_iter, b); + EBUG_ON(path->locks_want < new_locks_want); - /* - * For interior nodes, the iterator will have skipped past - * deleted keys: - */ - k = b->level - ? bch2_btree_node_iter_prev(&tmp, b) - : bch2_btree_node_iter_prev_all(&tmp, b); - if (k && btree_iter_pos_cmp_packed(b, &iter->pos, k, - iter->flags & BTREE_ITER_IS_EXTENTS)) { - char buf[100]; - struct bkey uk = bkey_unpack_key(b, k); + path->locks_want = new_locks_want; - bch2_bkey_to_text(buf, sizeof(buf), &uk); - panic("prev key should be before after pos:\n%s\n%llu:%llu\n", - buf, iter->pos.inode, iter->pos.offset); + while (path->nodes_locked && + (l = __fls(path->nodes_locked)) >= path->locks_want) { + if (l > path->level) { + btree_node_unlock(path, l); + } else { + if (btree_node_intent_locked(path, l)) { + six_lock_downgrade(&path->l[l].b->c.lock); + path->nodes_intent_locked ^= 1 << l; + } + break; + } } - k = bch2_btree_node_iter_peek_all(node_iter, b); - if (k && !btree_iter_pos_cmp_packed(b, &iter->pos, k, - iter->flags & BTREE_ITER_IS_EXTENTS)) { - char buf[100]; - struct bkey uk = bkey_unpack_key(b, k); + bch2_btree_path_verify_locks(path); +} - bch2_bkey_to_text(buf, sizeof(buf), &uk); - panic("next key should be before iter pos:\n%llu:%llu\n%s\n", - iter->pos.inode, iter->pos.offset, buf); - } +void bch2_trans_downgrade(struct btree_trans *trans) +{ + struct btree_path *path; + + trans_for_each_path(trans, path) + bch2_btree_path_downgrade(path); } -void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b) +/* Btree transaction locking: */ + +bool bch2_trans_relock(struct btree_trans *trans) { - struct btree_iter *linked; + struct btree_path *path; - if (iter->nodes[b->level] == b) - __bch2_btree_iter_verify(iter, b); + if (unlikely(trans->restarted)) + return false; - for_each_linked_btree_node(iter, b, linked) - __bch2_btree_iter_verify(iter, b); + trans_for_each_path(trans, path) + if (path->should_be_locked && + !bch2_btree_path_relock(trans, path, _RET_IP_)) { + trace_trans_restart_relock(trans->ip, _RET_IP_, + path->btree_id, &path->pos); + BUG_ON(!trans->restarted); + return false; + } + return true; } -#endif +void bch2_trans_unlock(struct btree_trans *trans) +{ + struct btree_path *path; -static void __bch2_btree_node_iter_fix(struct btree_iter *iter, - struct btree *b, - struct btree_node_iter *node_iter, - struct bset_tree *t, - struct bkey_packed *where, - unsigned clobber_u64s, - unsigned new_u64s) + trans_for_each_path(trans, path) + __bch2_btree_path_unlock(path); + + BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key)); +} + +/* Btree iterator: */ + +#ifdef CONFIG_BCACHEFS_DEBUG + +static void bch2_btree_path_verify_cached(struct btree_trans *trans, + struct btree_path *path) { - const struct bkey_packed *end = btree_bkey_last(b, t); - struct btree_node_iter_set *set; - unsigned offset = __btree_node_key_to_offset(b, where); - int shift = new_u64s - clobber_u64s; - unsigned old_end = (int) __btree_node_key_to_offset(b, end) - shift; + struct bkey_cached *ck; + bool locked = btree_node_locked(path, 0); - btree_node_iter_for_each(node_iter, set) - if (set->end == old_end) - goto found; + if (!bch2_btree_node_relock(trans, path, 0)) + return; - /* didn't find the bset in the iterator - might have to readd it: */ - if (new_u64s && - btree_iter_pos_cmp_packed(b, &iter->pos, where, - iter->flags & BTREE_ITER_IS_EXTENTS)) - bch2_btree_node_iter_push(node_iter, b, where, end); - return; -found: - set->end = (int) set->end + shift; + ck = (void *) path->l[0].b; + BUG_ON(ck->key.btree_id != path->btree_id || + bkey_cmp(ck->key.pos, path->pos)); - /* Iterator hasn't gotten to the key that changed yet: */ - if (set->k < offset) + if (!locked) + btree_node_unlock(path, 0); +} + +static void bch2_btree_path_verify_level(struct btree_trans *trans, + struct btree_path *path, unsigned level) +{ + struct btree_path_level *l; + struct btree_node_iter tmp; + bool locked; + struct bkey_packed *p, *k; + char buf1[100], buf2[100], buf3[100]; + const char *msg; + + if (!bch2_debug_check_iterators) return; - if (new_u64s && - btree_iter_pos_cmp_packed(b, &iter->pos, where, - iter->flags & BTREE_ITER_IS_EXTENTS)) { - set->k = offset; - bch2_btree_node_iter_sort(node_iter, b); - } else if (set->k < offset + clobber_u64s) { - set->k = offset + new_u64s; - if (set->k == set->end) - *set = node_iter->data[--node_iter->used]; - bch2_btree_node_iter_sort(node_iter, b); - } else { - set->k = (int) set->k + shift; + l = &path->l[level]; + tmp = l->iter; + locked = btree_node_locked(path, level); + + if (path->cached) { + if (!level) + bch2_btree_path_verify_cached(trans, path); + return; } + if (!btree_path_node(path, level)) + return; + + if (!bch2_btree_node_relock(trans, path, level)) + return; + + BUG_ON(!btree_path_pos_in_node(path, l->b)); + + bch2_btree_node_iter_verify(&l->iter, l->b); + /* - * Interior nodes are special because iterators for interior nodes don't - * obey the usual invariants regarding the iterator position: - * - * We may have whiteouts that compare greater than the iterator - * position, and logically should be in the iterator, but that we - * skipped past to find the first live key greater than the iterator - * position. This becomes an issue when we insert a new key that is - * greater than the current iterator position, but smaller than the - * whiteouts we've already skipped past - this happens in the course of - * a btree split. - * - * We have to rewind the iterator past to before those whiteouts here, - * else bkey_node_iter_prev() is not going to work and who knows what - * else would happen. And we have to do it manually, because here we've - * already done the insert and the iterator is currently inconsistent: - * - * We've got multiple competing invariants, here - we have to be careful - * about rewinding iterators for interior nodes, because they should - * always point to the key for the child node the btree iterator points - * to. + * For interior nodes, the iterator will have skipped past deleted keys: */ - if (b->level && new_u64s && !bkey_deleted(where) && - btree_iter_pos_cmp_packed(b, &iter->pos, where, - iter->flags & BTREE_ITER_IS_EXTENTS)) { - struct bset_tree *t; - struct bkey_packed *k; - - for_each_bset(b, t) { - if (bch2_bkey_to_bset(b, where) == t) - continue; + p = level + ? bch2_btree_node_iter_prev(&tmp, l->b) + : bch2_btree_node_iter_prev_all(&tmp, l->b); + k = bch2_btree_node_iter_peek_all(&l->iter, l->b); + + if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) { + msg = "before"; + goto err; + } - k = bch2_bkey_prev_all(b, t, - bch2_btree_node_iter_bset_pos(node_iter, b, t)); - if (k && - __btree_node_iter_cmp(node_iter, b, - k, where) > 0) { - struct btree_node_iter_set *set; - unsigned offset = - __btree_node_key_to_offset(b, bkey_next(k)); - - btree_node_iter_for_each(node_iter, set) - if (set->k == offset) { - set->k = __btree_node_key_to_offset(b, k); - bch2_btree_node_iter_sort(node_iter, b); - goto next_bset; - } - - bch2_btree_node_iter_push(node_iter, b, k, - btree_bkey_last(b, t)); - } -next_bset: - t = t; - } + if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) { + msg = "after"; + goto err; } -} -void bch2_btree_node_iter_fix(struct btree_iter *iter, - struct btree *b, - struct btree_node_iter *node_iter, - struct bset_tree *t, - struct bkey_packed *where, - unsigned clobber_u64s, - unsigned new_u64s) -{ - struct btree_iter *linked; + if (!locked) + btree_node_unlock(path, level); + return; +err: + strcpy(buf2, "(none)"); + strcpy(buf3, "(none)"); - if (node_iter != &iter->node_iters[b->level]) - __bch2_btree_node_iter_fix(iter, b, node_iter, t, - where, clobber_u64s, new_u64s); + bch2_bpos_to_text(&PBUF(buf1), path->pos); - if (iter->nodes[b->level] == b) - __bch2_btree_node_iter_fix(iter, b, - &iter->node_iters[b->level], t, - where, clobber_u64s, new_u64s); + if (p) { + struct bkey uk = bkey_unpack_key(l->b, p); + bch2_bkey_to_text(&PBUF(buf2), &uk); + } - for_each_linked_btree_node(iter, b, linked) - __bch2_btree_node_iter_fix(linked, b, - &linked->node_iters[b->level], t, - where, clobber_u64s, new_u64s); + if (k) { + struct bkey uk = bkey_unpack_key(l->b, k); + bch2_bkey_to_text(&PBUF(buf3), &uk); + } - /* interior node iterators are... special... */ - if (!b->level) - bch2_btree_iter_verify(iter, b); + panic("path should be %s key at level %u:\n" + "path pos %s\n" + "prev key %s\n" + "cur key %s\n", + msg, level, buf1, buf2, buf3); } -/* peek_all() doesn't skip deleted keys */ -static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter) +static void bch2_btree_path_verify(struct btree_trans *trans, + struct btree_path *path) { - struct btree *b = iter->nodes[iter->level]; - struct bkey_packed *k = - bch2_btree_node_iter_peek_all(&iter->node_iters[iter->level], b); - struct bkey_s_c ret; + struct bch_fs *c = trans->c; + unsigned i; - EBUG_ON(!btree_node_locked(iter, iter->level)); + EBUG_ON(path->btree_id >= BTREE_ID_NR); - if (!k) - return bkey_s_c_null; + for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) { + if (!path->l[i].b) { + BUG_ON(!path->cached && + c->btree_roots[path->btree_id].b->c.level > i); + break; + } - ret = bkey_disassemble(b, k, &iter->k); + bch2_btree_path_verify_level(trans, path, i); + } - if (debug_check_bkeys(iter->c)) - bch2_bkey_debugcheck(iter->c, b, ret); + bch2_btree_path_verify_locks(path); +} - return ret; +void bch2_trans_verify_paths(struct btree_trans *trans) +{ + struct btree_path *path; + + trans_for_each_path(trans, path) + bch2_btree_path_verify(trans, path); } -static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter) +static void bch2_btree_iter_verify(struct btree_iter *iter) { - struct btree *b = iter->nodes[iter->level]; - struct bkey_packed *k = - bch2_btree_node_iter_peek(&iter->node_iters[iter->level], b); - struct bkey_s_c ret; + struct btree_trans *trans = iter->trans; - EBUG_ON(!btree_node_locked(iter, iter->level)); + BUG_ON(iter->btree_id >= BTREE_ID_NR); - if (!k) - return bkey_s_c_null; + BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached); - ret = bkey_disassemble(b, k, &iter->k); + BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) && + iter->pos.snapshot != iter->snapshot); - if (debug_check_bkeys(iter->c)) - bch2_bkey_debugcheck(iter->c, b, ret); + BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) && + (iter->flags & BTREE_ITER_ALL_SNAPSHOTS)); - return ret; + BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) && + (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) && + !btree_type_has_snapshots(iter->btree_id)); + + bch2_btree_path_verify(trans, iter->path); } -static inline void __btree_iter_advance(struct btree_iter *iter) +static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) { - bch2_btree_node_iter_advance(&iter->node_iters[iter->level], - iter->nodes[iter->level]); + BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && + !iter->pos.snapshot); + + BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) && + iter->pos.snapshot != iter->snapshot); + + BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 || + bkey_cmp(iter->pos, iter->k.p) > 0); } -/* - * Verify that iterator for parent node points to child node: - */ -static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b) +static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { - bool parent_locked; - struct bkey_packed *k; + struct btree_trans *trans = iter->trans; + struct btree_iter copy; + struct bkey_s_c prev; + int ret = 0; - if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) || - !iter->nodes[b->level + 1]) - return; + if (!bch2_debug_check_iterators) + return 0; - parent_locked = btree_node_locked(iter, b->level + 1); + if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)) + return 0; - if (!bch2_btree_node_relock(iter, b->level + 1)) + if (bkey_err(k) || !k.k) + return 0; + + BUG_ON(!bch2_snapshot_is_ancestor(trans->c, + iter->snapshot, + k.k->p.snapshot)); + + bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos, + BTREE_ITER_ALL_SNAPSHOTS); + prev = bch2_btree_iter_prev(©); + if (!prev.k) + goto out; + + ret = bkey_err(prev); + if (ret) + goto out; + + if (!bkey_cmp(prev.k->p, k.k->p) && + bch2_snapshot_is_ancestor(trans->c, iter->snapshot, + prev.k->p.snapshot) > 0) { + char buf1[100], buf2[200]; + + bch2_bkey_to_text(&PBUF(buf1), k.k); + bch2_bkey_to_text(&PBUF(buf2), prev.k); + + panic("iter snap %u\n" + "k %s\n" + "prev %s\n", + iter->snapshot, + buf1, buf2); + } +out: + bch2_trans_iter_exit(trans, ©); + return ret; +} + +void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id, + struct bpos pos, bool key_cache) +{ + struct btree_path *path; + unsigned idx; + char buf[100]; + + trans_for_each_path_inorder(trans, path, idx) { + int cmp = cmp_int(path->btree_id, id) ?: + cmp_int(path->cached, key_cache); + + if (cmp > 0) + break; + if (cmp < 0) + continue; + + if (!(path->nodes_locked & 1) || + !path->should_be_locked) + continue; + + if (!key_cache) { + if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 && + bkey_cmp(pos, path->l[0].b->key.k.p) <= 0) + return; + } else { + if (!bkey_cmp(pos, path->pos)) + return; + } + } + + bch2_dump_trans_paths_updates(trans); + panic("not locked: %s %s%s\n", + bch2_btree_ids[id], + (bch2_bpos_to_text(&PBUF(buf), pos), buf), + key_cache ? " cached" : ""); +} + +#else + +static inline void bch2_btree_path_verify_level(struct btree_trans *trans, + struct btree_path *path, unsigned l) {} +static inline void bch2_btree_path_verify(struct btree_trans *trans, + struct btree_path *path) {} +static inline void bch2_btree_iter_verify(struct btree_iter *iter) {} +static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {} +static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; } + +#endif + +/* Btree path: fixups after btree updates */ + +static void btree_node_iter_set_set_pos(struct btree_node_iter *iter, + struct btree *b, + struct bset_tree *t, + struct bkey_packed *k) +{ + struct btree_node_iter_set *set; + + btree_node_iter_for_each(iter, set) + if (set->end == t->end_offset) { + set->k = __btree_node_key_to_offset(b, k); + bch2_btree_node_iter_sort(iter, b); + return; + } + + bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t)); +} + +static void __bch2_btree_path_fix_key_modified(struct btree_path *path, + struct btree *b, + struct bkey_packed *where) +{ + struct btree_path_level *l = &path->l[b->c.level]; + + if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b)) + return; + + if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0) + bch2_btree_node_iter_advance(&l->iter, l->b); +} + +void bch2_btree_path_fix_key_modified(struct btree_trans *trans, + struct btree *b, + struct bkey_packed *where) +{ + struct btree_path *path; + + trans_for_each_path_with_node(trans, b, path) { + __bch2_btree_path_fix_key_modified(path, b, where); + bch2_btree_path_verify_level(trans, path, b->c.level); + } +} + +static void __bch2_btree_node_iter_fix(struct btree_path *path, + struct btree *b, + struct btree_node_iter *node_iter, + struct bset_tree *t, + struct bkey_packed *where, + unsigned clobber_u64s, + unsigned new_u64s) +{ + const struct bkey_packed *end = btree_bkey_last(b, t); + struct btree_node_iter_set *set; + unsigned offset = __btree_node_key_to_offset(b, where); + int shift = new_u64s - clobber_u64s; + unsigned old_end = t->end_offset - shift; + unsigned orig_iter_pos = node_iter->data[0].k; + bool iter_current_key_modified = + orig_iter_pos >= offset && + orig_iter_pos <= offset + clobber_u64s; + + btree_node_iter_for_each(node_iter, set) + if (set->end == old_end) + goto found; + + /* didn't find the bset in the iterator - might have to readd it: */ + if (new_u64s && + bkey_iter_pos_cmp(b, where, &path->pos) >= 0) { + bch2_btree_node_iter_push(node_iter, b, where, end); + goto fixup_done; + } else { + /* Iterator is after key that changed */ + return; + } +found: + set->end = t->end_offset; + + /* Iterator hasn't gotten to the key that changed yet: */ + if (set->k < offset) + return; + + if (new_u64s && + bkey_iter_pos_cmp(b, where, &path->pos) >= 0) { + set->k = offset; + } else if (set->k < offset + clobber_u64s) { + set->k = offset + new_u64s; + if (set->k == set->end) + bch2_btree_node_iter_set_drop(node_iter, set); + } else { + /* Iterator is after key that changed */ + set->k = (int) set->k + shift; + return; + } + + bch2_btree_node_iter_sort(node_iter, b); +fixup_done: + if (node_iter->data[0].k != orig_iter_pos) + iter_current_key_modified = true; + + /* + * When a new key is added, and the node iterator now points to that + * key, the iterator might have skipped past deleted keys that should + * come after the key the iterator now points to. We have to rewind to + * before those deleted keys - otherwise + * bch2_btree_node_iter_prev_all() breaks: + */ + if (!bch2_btree_node_iter_end(node_iter) && + iter_current_key_modified && + b->c.level) { + struct bset_tree *t; + struct bkey_packed *k, *k2, *p; + + k = bch2_btree_node_iter_peek_all(node_iter, b); + + for_each_bset(b, t) { + bool set_pos = false; + + if (node_iter->data[0].end == t->end_offset) + continue; + + k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t); + + while ((p = bch2_bkey_prev_all(b, t, k2)) && + bkey_iter_cmp(b, k, p) < 0) { + k2 = p; + set_pos = true; + } + + if (set_pos) + btree_node_iter_set_set_pos(node_iter, + b, t, k2); + } + } +} + +void bch2_btree_node_iter_fix(struct btree_trans *trans, + struct btree_path *path, + struct btree *b, + struct btree_node_iter *node_iter, + struct bkey_packed *where, + unsigned clobber_u64s, + unsigned new_u64s) +{ + struct bset_tree *t = bch2_bkey_to_bset(b, where); + struct btree_path *linked; + + if (node_iter != &path->l[b->c.level].iter) { + __bch2_btree_node_iter_fix(path, b, node_iter, t, + where, clobber_u64s, new_u64s); + + if (bch2_debug_check_iterators) + bch2_btree_node_iter_verify(node_iter, b); + } + + trans_for_each_path_with_node(trans, b, linked) { + __bch2_btree_node_iter_fix(linked, b, + &linked->l[b->c.level].iter, t, + where, clobber_u64s, new_u64s); + bch2_btree_path_verify_level(trans, linked, b->c.level); + } +} + +/* Btree path level: pointer to a particular btree node and node iter */ + +static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c, + struct btree_path_level *l, + struct bkey *u, + struct bkey_packed *k) +{ + struct bkey_s_c ret; + + if (unlikely(!k)) { + /* + * signal to bch2_btree_iter_peek_slot() that we're currently at + * a hole + */ + u->type = KEY_TYPE_deleted; + return bkey_s_c_null; + } + + ret = bkey_disassemble(l->b, k, u); + + /* + * XXX: bch2_btree_bset_insert_key() generates invalid keys when we + * overwrite extents - it sets k->type = KEY_TYPE_deleted on the key + * being overwritten but doesn't change k->size. But this is ok, because + * those keys are never written out, we just have to avoid a spurious + * assertion here: + */ + if (bch2_debug_check_bkeys && !bkey_deleted(ret.k)) + bch2_bkey_debugcheck(c, l->b, ret); + + return ret; +} + +static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c, + struct btree_path_level *l, + struct bkey *u) +{ + return __btree_iter_unpack(c, l, u, + bch2_btree_node_iter_peek_all(&l->iter, l->b)); +} + +static inline struct bkey_s_c btree_path_level_peek(struct bch_fs *c, + struct btree_path *path, + struct btree_path_level *l, + struct bkey *u) +{ + struct bkey_s_c k = __btree_iter_unpack(c, l, u, + bch2_btree_node_iter_peek(&l->iter, l->b)); + + path->pos = k.k ? k.k->p : l->b->key.k.p; + return k; +} + +static inline struct bkey_s_c btree_path_level_prev(struct bch_fs *c, + struct btree_path *path, + struct btree_path_level *l, + struct bkey *u) +{ + struct bkey_s_c k = __btree_iter_unpack(c, l, u, + bch2_btree_node_iter_prev(&l->iter, l->b)); + + path->pos = k.k ? k.k->p : l->b->data->min_key; + return k; +} + +static inline bool btree_path_advance_to_pos(struct btree_path *path, + struct btree_path_level *l, + int max_advance) +{ + struct bkey_packed *k; + int nr_advanced = 0; + + while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) && + bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) { + if (max_advance > 0 && nr_advanced >= max_advance) + return false; + + bch2_btree_node_iter_advance(&l->iter, l->b); + nr_advanced++; + } + + return true; +} + +/* + * Verify that iterator for parent node points to child node: + */ +static void btree_path_verify_new_node(struct btree_trans *trans, + struct btree_path *path, struct btree *b) +{ + struct btree_path_level *l; + unsigned plevel; + bool parent_locked; + struct bkey_packed *k; + + if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) + return; + + plevel = b->c.level + 1; + if (!btree_path_node(path, plevel)) return; - k = bch2_btree_node_iter_peek_all(&iter->node_iters[b->level + 1], - iter->nodes[b->level + 1]); + parent_locked = btree_node_locked(path, plevel); + + if (!bch2_btree_node_relock(trans, path, plevel)) + return; + + l = &path->l[plevel]; + k = bch2_btree_node_iter_peek_all(&l->iter, l->b); if (!k || bkey_deleted(k) || - bkey_cmp_left_packed(iter->nodes[b->level + 1], - k, &b->key.k.p)) { - char buf[100]; + bkey_cmp_left_packed(l->b, k, &b->key.k.p)) { + char buf1[100]; + char buf2[100]; + char buf3[100]; + char buf4[100]; struct bkey uk = bkey_unpack_key(b, k); - bch2_bkey_to_text(buf, sizeof(buf), &uk); - panic("parent iter doesn't point to new node:\n%s\n%llu:%llu\n", - buf, b->key.k.p.inode, b->key.k.p.offset); + bch2_dump_btree_node(trans->c, l->b); + bch2_bpos_to_text(&PBUF(buf1), path->pos); + bch2_bkey_to_text(&PBUF(buf2), &uk); + bch2_bpos_to_text(&PBUF(buf3), b->data->min_key); + bch2_bpos_to_text(&PBUF(buf3), b->data->max_key); + panic("parent iter doesn't point to new node:\n" + "iter pos %s %s\n" + "iter key %s\n" + "new node %s-%s\n", + bch2_btree_ids[path->btree_id], buf1, + buf2, buf3, buf4); } if (!parent_locked) - btree_node_unlock(iter, b->level + 1); + btree_node_unlock(path, plevel); } -static inline void __btree_iter_init(struct btree_iter *iter, - struct btree *b) +static inline void __btree_path_level_init(struct btree_path *path, + unsigned level) { - bch2_btree_node_iter_init(&iter->node_iters[b->level], b, iter->pos, - iter->flags & BTREE_ITER_IS_EXTENTS, - btree_node_is_extents(b)); + struct btree_path_level *l = &path->l[level]; - /* Skip to first non whiteout: */ - if (b->level) - bch2_btree_node_iter_peek(&iter->node_iters[b->level], b); -} + bch2_btree_node_iter_init(&l->iter, l->b, &path->pos); -static inline bool btree_iter_pos_in_node(struct btree_iter *iter, - struct btree *b) -{ - return iter->btree_id == b->btree_id && - bkey_cmp(iter->pos, b->data->min_key) >= 0 && - btree_iter_pos_cmp(iter->pos, &b->key.k, - iter->flags & BTREE_ITER_IS_EXTENTS); + /* + * Iterators to interior nodes should always be pointed at the first non + * whiteout: + */ + if (level) + bch2_btree_node_iter_peek(&l->iter, l->b); } -static inline void btree_iter_node_set(struct btree_iter *iter, - struct btree *b) +static inline void btree_path_level_init(struct btree_trans *trans, + struct btree_path *path, + struct btree *b) { - btree_iter_verify_new_node(iter, b); + BUG_ON(path->cached); - EBUG_ON(!btree_iter_pos_in_node(iter, b)); - EBUG_ON(b->lock.state.seq & 1); + btree_path_verify_new_node(trans, path, b); - iter->lock_seq[b->level] = b->lock.state.seq; - iter->nodes[b->level] = b; - __btree_iter_init(iter, b); + EBUG_ON(!btree_path_pos_in_node(path, b)); + EBUG_ON(b->c.lock.state.seq & 1); + + path->l[b->c.level].lock_seq = b->c.lock.state.seq; + path->l[b->c.level].b = b; + __btree_path_level_init(path, b->c.level); } +/* Btree path: fixups after btree node updates: */ + /* * A btree node is being replaced - update the iterator to point to the new * node: */ -bool bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b) +void bch2_trans_node_add(struct btree_trans *trans, struct btree *b) { - struct btree_iter *linked; + struct btree_path *path; + + trans_for_each_path(trans, path) + if (!path->cached && + btree_path_pos_in_node(path, b)) { + enum btree_node_locked_type t = + btree_lock_want(path, b->c.level); + + if (path->nodes_locked && + t != BTREE_NODE_UNLOCKED) { + btree_node_unlock(path, b->c.level); + six_lock_increment(&b->c.lock, t); + mark_btree_node_locked(path, b->c.level, t); + } - for_each_linked_btree_iter(iter, linked) - if (btree_iter_pos_in_node(linked, b)) { - /* - * bch2_btree_iter_node_drop() has already been called - - * the old node we're replacing has already been - * unlocked and the pointer invalidated - */ - BUG_ON(btree_node_locked(linked, b->level)); + btree_path_level_init(trans, path, b); + } +} + +/* + * A btree node has been modified in such a way as to invalidate iterators - fix + * them: + */ +void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b) +{ + struct btree_path *path; + + trans_for_each_path_with_node(trans, b, path) + __btree_path_level_init(path, b->c.level); +} + +/* Btree path: traverse, set_pos: */ + +static int lock_root_check_fn(struct six_lock *lock, void *p) +{ + struct btree *b = container_of(lock, struct btree, c.lock); + struct btree **rootp = p; + + return b == *rootp ? 0 : -1; +} +static inline int btree_path_lock_root(struct btree_trans *trans, + struct btree_path *path, + unsigned depth_want, + unsigned long trace_ip) +{ + struct bch_fs *c = trans->c; + struct btree *b, **rootp = &c->btree_roots[path->btree_id].b; + enum six_lock_type lock_type; + unsigned i; + + EBUG_ON(path->nodes_locked); + + while (1) { + b = READ_ONCE(*rootp); + path->level = READ_ONCE(b->c.level); + + if (unlikely(path->level < depth_want)) { /* - * If @linked wants this node read locked, we don't want - * to actually take the read lock now because it's not - * legal to hold read locks on other nodes while we take - * write locks, so the journal can make forward - * progress... - * - * Instead, btree_iter_node_set() sets things up so - * bch2_btree_node_relock() will succeed: + * the root is at a lower depth than the depth we want: + * got to the end of the btree, or we're walking nodes + * greater than some depth and there are no nodes >= + * that depth */ + path->level = depth_want; + for (i = path->level; i < BTREE_MAX_DEPTH; i++) + path->l[i].b = NULL; + return 1; + } - if (btree_want_intent(linked, b->level)) { - six_lock_increment(&b->lock, SIX_LOCK_intent); - mark_btree_node_intent_locked(linked, b->level); - } + lock_type = __btree_lock_want(path, path->level); + if (unlikely(!btree_node_lock(trans, path, b, SPOS_MAX, + path->level, lock_type, + lock_root_check_fn, rootp, + trace_ip))) { + if (trans->restarted) + return -EINTR; + continue; + } - btree_iter_node_set(linked, b); + if (likely(b == READ_ONCE(*rootp) && + b->c.level == path->level && + !race_fault())) { + for (i = 0; i < path->level; i++) + path->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT; + path->l[path->level].b = b; + for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++) + path->l[i].b = NULL; + + mark_btree_node_locked(path, path->level, lock_type); + btree_path_level_init(trans, path, b); + return 0; } - if (!btree_iter_pos_in_node(iter, b)) { - six_unlock_intent(&b->lock); - return false; + six_unlock_type(&b->c.lock, lock_type); } +} - mark_btree_node_intent_locked(iter, b->level); - btree_iter_node_set(iter, b); - return true; +noinline +static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path) +{ + struct bch_fs *c = trans->c; + struct btree_path_level *l = path_l(path); + struct btree_node_iter node_iter = l->iter; + struct bkey_packed *k; + struct bkey_buf tmp; + unsigned nr = test_bit(BCH_FS_STARTED, &c->flags) + ? (path->level > 1 ? 0 : 2) + : (path->level > 1 ? 1 : 16); + bool was_locked = btree_node_locked(path, path->level); + int ret = 0; + + bch2_bkey_buf_init(&tmp); + + while (nr && !ret) { + if (!bch2_btree_node_relock(trans, path, path->level)) + break; + + bch2_btree_node_iter_advance(&node_iter, l->b); + k = bch2_btree_node_iter_peek(&node_iter, l->b); + if (!k) + break; + + bch2_bkey_buf_unpack(&tmp, c, l->b, k); + ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id, + path->level - 1); + } + + if (!was_locked) + btree_node_unlock(path, path->level); + + bch2_bkey_buf_exit(&tmp, c); + return ret; +} + +static noinline void btree_node_mem_ptr_set(struct btree_trans *trans, + struct btree_path *path, + unsigned plevel, struct btree *b) +{ + struct btree_path_level *l = &path->l[plevel]; + bool locked = btree_node_locked(path, plevel); + struct bkey_packed *k; + struct bch_btree_ptr_v2 *bp; + + if (!bch2_btree_node_relock(trans, path, plevel)) + return; + + k = bch2_btree_node_iter_peek_all(&l->iter, l->b); + BUG_ON(k->type != KEY_TYPE_btree_ptr_v2); + + bp = (void *) bkeyp_val(&l->b->format, k); + bp->mem_ptr = (unsigned long)b; + + if (!locked) + btree_node_unlock(path, plevel); +} + +static __always_inline int btree_path_down(struct btree_trans *trans, + struct btree_path *path, + unsigned flags, + unsigned long trace_ip) +{ + struct bch_fs *c = trans->c; + struct btree_path_level *l = path_l(path); + struct btree *b; + unsigned level = path->level - 1; + enum six_lock_type lock_type = __btree_lock_want(path, level); + struct bkey_buf tmp; + int ret; + + EBUG_ON(!btree_node_locked(path, path->level)); + + bch2_bkey_buf_init(&tmp); + bch2_bkey_buf_unpack(&tmp, c, l->b, + bch2_btree_node_iter_peek(&l->iter, l->b)); + + b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip); + ret = PTR_ERR_OR_ZERO(b); + if (unlikely(ret)) + goto err; + + mark_btree_node_locked(path, level, lock_type); + btree_path_level_init(trans, path, b); + + if (tmp.k->k.type == KEY_TYPE_btree_ptr_v2 && + unlikely(b != btree_node_mem_ptr(tmp.k))) + btree_node_mem_ptr_set(trans, path, level + 1, b); + + if (flags & BTREE_ITER_PREFETCH) + ret = btree_path_prefetch(trans, path); + + if (btree_node_read_locked(path, level + 1)) + btree_node_unlock(path, level + 1); + path->level = level; + + bch2_btree_path_verify_locks(path); +err: + bch2_bkey_buf_exit(&tmp, c); + return ret; } -void bch2_btree_iter_node_drop_linked(struct btree_iter *iter, struct btree *b) -{ - struct btree_iter *linked; +static int btree_path_traverse_one(struct btree_trans *, struct btree_path *, + unsigned, unsigned long); + +static int __btree_path_traverse_all(struct btree_trans *trans, int ret, + unsigned long trace_ip) +{ + struct bch_fs *c = trans->c; + struct btree_path *path; + int i; + + if (trans->in_traverse_all) + return -EINTR; + + trans->in_traverse_all = true; +retry_all: + trans->restarted = false; + + trans_for_each_path(trans, path) + path->should_be_locked = false; + + btree_trans_verify_sorted(trans); + + for (i = trans->nr_sorted - 2; i >= 0; --i) { + struct btree_path *path1 = trans->paths + trans->sorted[i]; + struct btree_path *path2 = trans->paths + trans->sorted[i + 1]; + + if (path1->btree_id == path2->btree_id && + path1->locks_want < path2->locks_want) + __bch2_btree_path_upgrade(trans, path1, path2->locks_want); + else if (!path1->locks_want && path2->locks_want) + __bch2_btree_path_upgrade(trans, path1, 1); + } + + bch2_trans_unlock(trans); + cond_resched(); + + if (unlikely(ret == -ENOMEM)) { + struct closure cl; + + closure_init_stack(&cl); + + do { + ret = bch2_btree_cache_cannibalize_lock(c, &cl); + closure_sync(&cl); + } while (ret); + } + + if (unlikely(ret == -EIO)) + goto out; + + BUG_ON(ret && ret != -EINTR); + + /* Now, redo traversals in correct order: */ + i = 0; + while (i < trans->nr_sorted) { + path = trans->paths + trans->sorted[i]; + + EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx))); + + ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_); + if (ret) + goto retry_all; + + EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx))); + + if (path->nodes_locked || + !btree_path_node(path, path->level)) + i++; + } + + /* + * BTREE_ITER_NEED_RELOCK is ok here - if we called bch2_trans_unlock() + * and relock(), relock() won't relock since path->should_be_locked + * isn't set yet, which is all fine + */ + trans_for_each_path(trans, path) + BUG_ON(path->uptodate >= BTREE_ITER_NEED_TRAVERSE); +out: + bch2_btree_cache_cannibalize_unlock(c); + + trans->in_traverse_all = false; + + trace_trans_traverse_all(trans->ip, trace_ip); + return ret; +} + +static int bch2_btree_path_traverse_all(struct btree_trans *trans) +{ + return __btree_path_traverse_all(trans, 0, _RET_IP_); +} + +static inline bool btree_path_good_node(struct btree_trans *trans, + struct btree_path *path, + unsigned l, int check_pos) +{ + if (!is_btree_node(path, l) || + !bch2_btree_node_relock(trans, path, l)) + return false; + + if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b)) + return false; + if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b)) + return false; + return true; +} + +static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans, + struct btree_path *path, + int check_pos) +{ + unsigned i, l = path->level; + + while (btree_path_node(path, l) && + !btree_path_good_node(trans, path, l, check_pos)) { + btree_node_unlock(path, l); + path->l[l].b = BTREE_ITER_NO_NODE_UP; + l++; + } + + /* If we need intent locks, take them too: */ + for (i = l + 1; + i < path->locks_want && btree_path_node(path, i); + i++) + if (!bch2_btree_node_relock(trans, path, i)) + while (l <= i) { + btree_node_unlock(path, l); + path->l[l].b = BTREE_ITER_NO_NODE_UP; + l++; + } + + return l; +} + +/* + * This is the main state machine for walking down the btree - walks down to a + * specified depth + * + * Returns 0 on success, -EIO on error (error reading in a btree node). + * + * On error, caller (peek_node()/peek_key()) must return NULL; the error is + * stashed in the iterator and returned from bch2_trans_exit(). + */ +static int btree_path_traverse_one(struct btree_trans *trans, + struct btree_path *path, + unsigned flags, + unsigned long trace_ip) +{ + unsigned depth_want = path->level; + int ret = 0; + + if (unlikely(trans->restarted)) { + ret = -EINTR; + goto out; + } + + /* + * Ensure we obey path->should_be_locked: if it's set, we can't unlock + * and re-traverse the path without a transaction restart: + */ + if (path->should_be_locked) { + ret = bch2_btree_path_relock(trans, path, trace_ip) ? 0 : -EINTR; + goto out; + } + + if (path->cached) { + ret = bch2_btree_path_traverse_cached(trans, path, flags); + goto out; + } + + if (unlikely(path->level >= BTREE_MAX_DEPTH)) + goto out; + + path->level = btree_path_up_until_good_node(trans, path, 0); + + /* + * Note: path->nodes[path->level] may be temporarily NULL here - that + * would indicate to other code that we got to the end of the btree, + * here it indicates that relocking the root failed - it's critical that + * btree_path_lock_root() comes next and that it can't fail + */ + while (path->level > depth_want) { + ret = btree_path_node(path, path->level) + ? btree_path_down(trans, path, flags, trace_ip) + : btree_path_lock_root(trans, path, depth_want, trace_ip); + if (unlikely(ret)) { + if (ret == 1) { + /* + * No nodes at this level - got to the end of + * the btree: + */ + ret = 0; + goto out; + } + + __bch2_btree_path_unlock(path); + path->level = depth_want; + + if (ret == -EIO) + path->l[path->level].b = + BTREE_ITER_NO_NODE_ERROR; + else + path->l[path->level].b = + BTREE_ITER_NO_NODE_DOWN; + goto out; + } + } + + path->uptodate = BTREE_ITER_UPTODATE; +out: + BUG_ON((ret == -EINTR) != !!trans->restarted); + bch2_btree_path_verify(trans, path); + return ret; +} + +static int __btree_path_traverse_all(struct btree_trans *, int, unsigned long); + +int __must_check bch2_btree_path_traverse(struct btree_trans *trans, + struct btree_path *path, unsigned flags) +{ + if (path->uptodate < BTREE_ITER_NEED_RELOCK) + return 0; + + return bch2_trans_cond_resched(trans) ?: + btree_path_traverse_one(trans, path, flags, _RET_IP_); +} + +static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst, + struct btree_path *src) +{ + unsigned i; + + memcpy(&dst->pos, &src->pos, + sizeof(struct btree_path) - offsetof(struct btree_path, pos)); + + for (i = 0; i < BTREE_MAX_DEPTH; i++) + if (btree_node_locked(dst, i)) + six_lock_increment(&dst->l[i].b->c.lock, + __btree_lock_want(dst, i)); + + btree_path_check_sort(trans, dst, 0); +} + +static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src, + bool intent) +{ + struct btree_path *new = btree_path_alloc(trans, src); + + btree_path_copy(trans, new, src); + __btree_path_get(new, intent); + return new; +} + +inline struct btree_path * __must_check +bch2_btree_path_make_mut(struct btree_trans *trans, + struct btree_path *path, bool intent, + unsigned long ip) +{ + if (path->ref > 1 || path->preserve) { + __btree_path_put(path, intent); + path = btree_path_clone(trans, path, intent); + path->preserve = false; +#ifdef CONFIG_BCACHEFS_DEBUG + path->ip_allocated = ip; +#endif + btree_trans_verify_sorted(trans); + } + + return path; +} + +static struct btree_path * __must_check +btree_path_set_pos(struct btree_trans *trans, + struct btree_path *path, struct bpos new_pos, + bool intent, unsigned long ip) +{ + int cmp = bpos_cmp(new_pos, path->pos); + unsigned l = path->level; + + EBUG_ON(trans->restarted); + EBUG_ON(!path->ref); + + if (!cmp) + return path; + + path = bch2_btree_path_make_mut(trans, path, intent, ip); + + path->pos = new_pos; + path->should_be_locked = false; + + btree_path_check_sort(trans, path, cmp); + + if (unlikely(path->cached)) { + btree_node_unlock(path, 0); + path->l[0].b = BTREE_ITER_NO_NODE_CACHED; + btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + goto out; + } + + l = btree_path_up_until_good_node(trans, path, cmp); + + if (btree_path_node(path, l)) { + /* + * We might have to skip over many keys, or just a few: try + * advancing the node iterator, and if we have to skip over too + * many keys just reinit it (or if we're rewinding, since that + * is expensive). + */ + if (cmp < 0 || + !btree_path_advance_to_pos(path, &path->l[l], 8)) + __btree_path_level_init(path, l); + } + + if (l != path->level) { + btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + __bch2_btree_path_unlock(path); + } +out: + bch2_btree_path_verify(trans, path); + return path; +} + +/* Btree path: main interface: */ + +static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path) +{ + struct btree_path *next; + + next = prev_btree_path(trans, path); + if (next && !btree_path_cmp(next, path)) + return next; + + next = next_btree_path(trans, path); + if (next && !btree_path_cmp(next, path)) + return next; + + return NULL; +} + +static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path) +{ + struct btree_path *next; + + next = prev_btree_path(trans, path); + if (next && next->level == path->level && path_l(next)->b == path_l(path)->b) + return next; + + next = next_btree_path(trans, path); + if (next && next->level == path->level && path_l(next)->b == path_l(path)->b) + return next; + + return NULL; +} + +static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path) +{ + __bch2_btree_path_unlock(path); + btree_path_list_remove(trans, path); + trans->paths_allocated &= ~(1ULL << path->idx); +} + +void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent) +{ + struct btree_path *dup; + + EBUG_ON(trans->paths + path->idx != path); + EBUG_ON(!path->ref); + + if (!__btree_path_put(path, intent)) + return; + + /* + * Perhaps instead we should check for duplicate paths in traverse_all: + */ + if (path->preserve && + (dup = have_path_at_pos(trans, path))) { + dup->preserve = true; + path->preserve = false; + goto free; + } + + if (!path->preserve && + (dup = have_node_at_pos(trans, path))) + goto free; + return; +free: + if (path->should_be_locked && + !btree_node_locked(dup, path->level)) + return; + + dup->should_be_locked |= path->should_be_locked; + __bch2_path_free(trans, path); +} + +noinline __cold +void bch2_dump_trans_paths_updates(struct btree_trans *trans) +{ + struct btree_path *path; + struct btree_insert_entry *i; + unsigned idx; + char buf1[300], buf2[300]; + + btree_trans_verify_sorted(trans); + + trans_for_each_path_inorder(trans, path, idx) + printk(KERN_ERR "path: idx %u ref %u:%u%s%s btree %s pos %s locks %u %pS\n", + path->idx, path->ref, path->intent_ref, + path->should_be_locked ? " S" : "", + path->preserve ? " P" : "", + bch2_btree_ids[path->btree_id], + (bch2_bpos_to_text(&PBUF(buf1), path->pos), buf1), + path->nodes_locked, +#ifdef CONFIG_BCACHEFS_DEBUG + (void *) path->ip_allocated +#else + NULL +#endif + ); + + trans_for_each_update(trans, i) { + struct bkey u; + struct bkey_s_c old = bch2_btree_path_peek_slot(i->path, &u); + + printk(KERN_ERR "update: btree %s %pS\n old %s\n new %s", + bch2_btree_ids[i->btree_id], + (void *) i->ip_allocated, + (bch2_bkey_val_to_text(&PBUF(buf1), trans->c, old), buf1), + (bch2_bkey_val_to_text(&PBUF(buf2), trans->c, bkey_i_to_s_c(i->k)), buf2)); + } +} + +static struct btree_path *btree_path_alloc(struct btree_trans *trans, + struct btree_path *pos) +{ + struct btree_path *path; + unsigned idx; + + if (unlikely(trans->paths_allocated == + ~((~0ULL << 1) << (BTREE_ITER_MAX - 1)))) { + bch2_dump_trans_paths_updates(trans); + panic("trans path oveflow\n"); + } + + idx = __ffs64(~trans->paths_allocated); + trans->paths_allocated |= 1ULL << idx; + + path = &trans->paths[idx]; + + path->idx = idx; + path->ref = 0; + path->intent_ref = 0; + path->nodes_locked = 0; + path->nodes_intent_locked = 0; + + btree_path_list_add(trans, pos, path); + return path; +} + +struct btree_path *bch2_path_get(struct btree_trans *trans, bool cached, + enum btree_id btree_id, struct bpos pos, + unsigned locks_want, unsigned level, + bool intent, unsigned long ip) +{ + struct btree_path *path, *path_pos = NULL; + int i; + + BUG_ON(trans->restarted); + + trans_for_each_path_inorder(trans, path, i) { + if (__btree_path_cmp(path, + btree_id, + cached, + pos, + level) > 0) + break; + + path_pos = path; + } + + if (path_pos && + path_pos->cached == cached && + path_pos->btree_id == btree_id && + path_pos->level == level) { + __btree_path_get(path_pos, intent); + path = btree_path_set_pos(trans, path_pos, pos, intent, ip); + path->preserve = true; + } else { + path = btree_path_alloc(trans, path_pos); + path_pos = NULL; + + __btree_path_get(path, intent); + path->pos = pos; + path->btree_id = btree_id; + path->cached = cached; + path->preserve = true; + path->uptodate = BTREE_ITER_NEED_TRAVERSE; + path->should_be_locked = false; + path->level = level; + path->locks_want = locks_want; + path->nodes_locked = 0; + path->nodes_intent_locked = 0; + for (i = 0; i < ARRAY_SIZE(path->l); i++) + path->l[i].b = BTREE_ITER_NO_NODE_INIT; +#ifdef CONFIG_BCACHEFS_DEBUG + path->ip_allocated = ip; +#endif + btree_trans_verify_sorted(trans); + } + + if (path->intent_ref) + locks_want = max(locks_want, level + 1); + + /* + * If the path has locks_want greater than requested, we don't downgrade + * it here - on transaction restart because btree node split needs to + * upgrade locks, we might be putting/getting the iterator again. + * Downgrading iterators only happens via bch2_trans_downgrade(), after + * a successful transaction commit. + */ + + locks_want = min(locks_want, BTREE_MAX_DEPTH); + if (locks_want > path->locks_want) { + path->locks_want = locks_want; + btree_path_get_locks(trans, path, true, _THIS_IP_); + } + + return path; +} + +inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u) +{ + + struct bkey_s_c k; + + BUG_ON(path->uptodate != BTREE_ITER_UPTODATE); + + if (!path->cached) { + struct btree_path_level *l = path_l(path); + struct bkey_packed *_k = + bch2_btree_node_iter_peek_all(&l->iter, l->b); + + k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null; + + EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0); + + if (!k.k || bpos_cmp(path->pos, k.k->p)) + goto hole; + } else { + struct bkey_cached *ck = (void *) path->l[0].b; + + EBUG_ON(path->btree_id != ck->key.btree_id || + bkey_cmp(path->pos, ck->key.pos)); + + /* BTREE_ITER_CACHED_NOFILL? */ + if (unlikely(!ck->valid)) + goto hole; + + k = bkey_i_to_s_c(ck->k); + } + + return k; +hole: + bkey_init(u); + u->p = path->pos; + return (struct bkey_s_c) { u, NULL }; +} + +/* Btree iterators: */ + +int __must_check +__bch2_btree_iter_traverse(struct btree_iter *iter) +{ + return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags); +} + +int __must_check +bch2_btree_iter_traverse(struct btree_iter *iter) +{ + int ret; + + iter->path = btree_path_set_pos(iter->trans, iter->path, + btree_iter_search_key(iter), + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); + + ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags); + if (ret) + return ret; + + iter->path->should_be_locked = true; + return 0; +} + +/* Iterate across nodes (leaf and interior nodes) */ + +struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter) +{ + struct btree_trans *trans = iter->trans; + struct btree *b = NULL; + int ret; + + EBUG_ON(iter->path->cached); + bch2_btree_iter_verify(iter); + + ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); + if (ret) + goto err; + + b = btree_path_node(iter->path, iter->path->level); + if (!b) + goto out; + + BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0); + + bkey_init(&iter->k); + iter->k.p = iter->pos = b->key.k.p; + + iter->path = btree_path_set_pos(trans, iter->path, b->key.k.p, + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); + iter->path->should_be_locked = true; + BUG_ON(iter->path->uptodate); +out: + bch2_btree_iter_verify_entry_exit(iter); + bch2_btree_iter_verify(iter); + + return b; +err: + b = ERR_PTR(ret); + goto out; +} + +struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) +{ + struct btree_trans *trans = iter->trans; + struct btree_path *path = iter->path; + struct btree *b = NULL; + unsigned l; + int ret; + + BUG_ON(trans->restarted); + EBUG_ON(iter->path->cached); + bch2_btree_iter_verify(iter); + + /* already at end? */ + if (!btree_path_node(path, path->level)) + return NULL; + + /* got to end? */ + if (!btree_path_node(path, path->level + 1)) { + btree_node_unlock(path, path->level); + path->l[path->level].b = BTREE_ITER_NO_NODE_UP; + path->level++; + return NULL; + } + + if (!bch2_btree_node_relock(trans, path, path->level + 1)) { + __bch2_btree_path_unlock(path); + path->l[path->level].b = BTREE_ITER_NO_NODE_GET_LOCKS; + path->l[path->level + 1].b = BTREE_ITER_NO_NODE_GET_LOCKS; + btree_trans_restart(trans); + ret = -EINTR; + goto err; + } + + b = btree_path_node(path, path->level + 1); + + if (!bpos_cmp(iter->pos, b->key.k.p)) { + btree_node_unlock(path, path->level); + path->l[path->level].b = BTREE_ITER_NO_NODE_UP; + path->level++; + } else { + /* + * Haven't gotten to the end of the parent node: go back down to + * the next child node + */ + path = iter->path = + btree_path_set_pos(trans, path, bpos_successor(iter->pos), + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); + + path->level = iter->min_depth; + + for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++) + if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED) + btree_node_unlock(path, l); + + btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + bch2_btree_iter_verify(iter); + + ret = bch2_btree_path_traverse(trans, path, iter->flags); + if (ret) + goto err; + + b = path->l[path->level].b; + } + + bkey_init(&iter->k); + iter->k.p = iter->pos = b->key.k.p; + + iter->path = btree_path_set_pos(trans, iter->path, b->key.k.p, + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); + iter->path->should_be_locked = true; + BUG_ON(iter->path->uptodate); +out: + bch2_btree_iter_verify_entry_exit(iter); + bch2_btree_iter_verify(iter); + + return b; +err: + b = ERR_PTR(ret); + goto out; +} + +/* Iterate across keys (in leaf nodes only) */ + +inline bool bch2_btree_iter_advance(struct btree_iter *iter) +{ + struct bpos pos = iter->k.p; + bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS + ? bpos_cmp(pos, SPOS_MAX) + : bkey_cmp(pos, SPOS_MAX)) != 0; + + if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS)) + pos = bkey_successor(iter, pos); + bch2_btree_iter_set_pos(iter, pos); + return ret; +} + +inline bool bch2_btree_iter_rewind(struct btree_iter *iter) +{ + struct bpos pos = bkey_start_pos(&iter->k); + bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS + ? bpos_cmp(pos, POS_MIN) + : bkey_cmp(pos, POS_MIN)) != 0; + + if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS)) + pos = bkey_predecessor(iter, pos); + bch2_btree_iter_set_pos(iter, pos); + return ret; +} + +/** + * bch2_btree_iter_peek: returns first key greater than or equal to iterator's + * current position + */ +struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter) +{ + struct btree_trans *trans = iter->trans; + struct bpos search_key = btree_iter_search_key(iter); + struct bkey_i *next_update; + struct bkey_s_c k; + int ret, cmp; + + EBUG_ON(iter->path->cached || iter->path->level); + bch2_btree_iter_verify(iter); + bch2_btree_iter_verify_entry_exit(iter); + + while (1) { + iter->path = btree_path_set_pos(trans, iter->path, search_key, + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); + + ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); + if (unlikely(ret)) { + /* ensure that iter->k is consistent with iter->pos: */ + bch2_btree_iter_set_pos(iter, iter->pos); + k = bkey_s_c_err(ret); + goto out; + } + + next_update = iter->flags & BTREE_ITER_WITH_UPDATES + ? btree_trans_peek_updates(trans, iter->btree_id, search_key) + : NULL; + k = btree_path_level_peek_all(trans->c, &iter->path->l[0], &iter->k); + + /* * In the btree, deleted keys sort before non deleted: */ + if (k.k && bkey_deleted(k.k) && + (!next_update || + bpos_cmp(k.k->p, next_update->k.p) <= 0)) { + search_key = k.k->p; + continue; + } + + if (next_update && + bpos_cmp(next_update->k.p, + k.k ? k.k->p : iter->path->l[0].b->key.k.p) <= 0) { + iter->k = next_update->k; + k = bkey_i_to_s_c(next_update); + } + + if (likely(k.k)) { + /* + * We can never have a key in a leaf node at POS_MAX, so + * we don't have to check these successor() calls: + */ + if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && + !bch2_snapshot_is_ancestor(trans->c, + iter->snapshot, + k.k->p.snapshot)) { + search_key = bpos_successor(k.k->p); + continue; + } + + if (bkey_whiteout(k.k) && + !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) { + search_key = bkey_successor(iter, k.k->p); + continue; + } + + break; + } else if (likely(bpos_cmp(iter->path->l[0].b->key.k.p, SPOS_MAX))) { + /* Advance to next leaf node: */ + search_key = bpos_successor(iter->path->l[0].b->key.k.p); + } else { + /* End of btree: */ + bch2_btree_iter_set_pos(iter, SPOS_MAX); + k = bkey_s_c_null; + goto out; + } + } + + /* + * iter->pos should be mononotically increasing, and always be equal to + * the key we just returned - except extents can straddle iter->pos: + */ + if (!(iter->flags & BTREE_ITER_IS_EXTENTS)) + iter->pos = k.k->p; + else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) + iter->pos = bkey_start_pos(k.k); + + if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) + iter->pos.snapshot = iter->snapshot; + + cmp = bpos_cmp(k.k->p, iter->path->pos); + if (cmp) { + iter->path = bch2_btree_path_make_mut(trans, iter->path, + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); + iter->path->pos = k.k->p; + btree_path_check_sort(trans, iter->path, cmp); + } +out: + iter->path->should_be_locked = true; + + bch2_btree_iter_verify_entry_exit(iter); + bch2_btree_iter_verify(iter); + ret = bch2_btree_iter_verify_ret(iter, k); + if (unlikely(ret)) + return bkey_s_c_err(ret); - for_each_linked_btree_iter(iter, linked) - bch2_btree_iter_node_drop(linked, b); + return k; } -void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b) +/** + * bch2_btree_iter_next: returns first key greater than iterator's current + * position + */ +struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter) { - unsigned level = b->level; + if (!bch2_btree_iter_advance(iter)) + return bkey_s_c_null; - if (iter->nodes[level] == b) { - iter->flags &= ~BTREE_ITER_UPTODATE; - btree_node_unlock(iter, level); - iter->nodes[level] = BTREE_ITER_NOT_END; - } + return bch2_btree_iter_peek(iter); } -/* - * A btree node has been modified in such a way as to invalidate iterators - fix - * them: +/** + * bch2_btree_iter_peek_prev: returns first key less than or equal to + * iterator's current position */ -void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b) +struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) { - struct btree_iter *linked; - - for_each_linked_btree_node(iter, b, linked) - __btree_iter_init(linked, b); - __btree_iter_init(iter, b); -} + struct btree_trans *trans = iter->trans; + struct bpos search_key = iter->pos; + struct btree_path *saved_path = NULL; + struct bkey_s_c k; + struct bkey saved_k; + const struct bch_val *saved_v; + int ret; -static inline int btree_iter_lock_root(struct btree_iter *iter, - unsigned depth_want) -{ - struct bch_fs *c = iter->c; - struct btree *b; - enum six_lock_type lock_type; - unsigned i; + EBUG_ON(iter->path->cached || iter->path->level); + EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES); + bch2_btree_iter_verify(iter); + bch2_btree_iter_verify_entry_exit(iter); - EBUG_ON(iter->nodes_locked); + if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) + search_key.snapshot = U32_MAX; while (1) { - b = READ_ONCE(c->btree_roots[iter->btree_id].b); - iter->level = READ_ONCE(b->level); + iter->path = btree_path_set_pos(trans, iter->path, search_key, + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); - if (unlikely(iter->level < depth_want)) { - /* - * the root is at a lower depth than the depth we want: - * got to the end of the btree, or we're walking nodes - * greater than some depth and there are no nodes >= - * that depth - */ - iter->level = depth_want; - iter->nodes[iter->level] = NULL; - return 0; + ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); + if (unlikely(ret)) { + /* ensure that iter->k is consistent with iter->pos: */ + bch2_btree_iter_set_pos(iter, iter->pos); + k = bkey_s_c_err(ret); + goto out; } - lock_type = btree_lock_want(iter, iter->level); - if (unlikely(!btree_node_lock(b, POS_MAX, iter->level, - iter, lock_type))) - return -EINTR; + k = btree_path_level_peek(trans->c, iter->path, + &iter->path->l[0], &iter->k); + if (!k.k || + ((iter->flags & BTREE_ITER_IS_EXTENTS) + ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0 + : bpos_cmp(k.k->p, search_key) > 0)) + k = btree_path_level_prev(trans->c, iter->path, + &iter->path->l[0], &iter->k); - if (likely(b == c->btree_roots[iter->btree_id].b && - b->level == iter->level && - !race_fault())) { - for (i = 0; i < iter->level; i++) - iter->nodes[i] = BTREE_ITER_NOT_END; - iter->nodes[iter->level] = b; + btree_path_check_sort(trans, iter->path, 0); - mark_btree_node_locked(iter, iter->level, lock_type); - btree_iter_node_set(iter, b); - return 0; + if (likely(k.k)) { + if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) { + if (k.k->p.snapshot == iter->snapshot) + goto got_key; + + /* + * If we have a saved candidate, and we're no + * longer at the same _key_ (not pos), return + * that candidate + */ + if (saved_path && bkey_cmp(k.k->p, saved_k.p)) { + bch2_path_put(trans, iter->path, + iter->flags & BTREE_ITER_INTENT); + iter->path = saved_path; + saved_path = NULL; + iter->k = saved_k; + k.v = saved_v; + goto got_key; + } - } + if (bch2_snapshot_is_ancestor(iter->trans->c, + iter->snapshot, + k.k->p.snapshot)) { + if (saved_path) + bch2_path_put(trans, saved_path, + iter->flags & BTREE_ITER_INTENT); + saved_path = btree_path_clone(trans, iter->path, + iter->flags & BTREE_ITER_INTENT); + saved_k = *k.k; + saved_v = k.v; + } + + search_key = bpos_predecessor(k.k->p); + continue; + } +got_key: + if (bkey_whiteout(k.k) && + !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) { + search_key = bkey_predecessor(iter, k.k->p); + if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) + search_key.snapshot = U32_MAX; + continue; + } - six_unlock_type(&b->lock, lock_type); + break; + } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) { + /* Advance to previous leaf node: */ + search_key = bpos_predecessor(iter->path->l[0].b->data->min_key); + } else { + /* Start of btree: */ + bch2_btree_iter_set_pos(iter, POS_MIN); + k = bkey_s_c_null; + goto out; + } } -} -noinline -static void btree_iter_prefetch(struct btree_iter *iter) -{ - struct btree *b = iter->nodes[iter->level + 1]; - struct btree_node_iter node_iter = iter->node_iters[iter->level + 1]; - struct bkey_packed *k; - BKEY_PADDED(k) tmp; - unsigned nr = iter->level ? 1 : 8; - bool was_locked = btree_node_locked(iter, iter->level + 1); + EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0); - while (nr) { - if (!bch2_btree_node_relock(iter, iter->level + 1)) - return; + /* Extents can straddle iter->pos: */ + if (bkey_cmp(k.k->p, iter->pos) < 0) + iter->pos = k.k->p; - bch2_btree_node_iter_advance(&node_iter, b); - k = bch2_btree_node_iter_peek(&node_iter, b); - if (!k) - break; + if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) + iter->pos.snapshot = iter->snapshot; +out: + if (saved_path) + bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_INTENT); + iter->path->should_be_locked = true; - bch2_bkey_unpack(b, &tmp.k, k); - bch2_btree_node_prefetch(iter->c, &tmp.k, - iter->level, iter->btree_id); - } + bch2_btree_iter_verify_entry_exit(iter); + bch2_btree_iter_verify(iter); - if (!was_locked) - btree_node_unlock(iter, iter->level + 1); + return k; } -static inline int btree_iter_down(struct btree_iter *iter) +/** + * bch2_btree_iter_prev: returns first key less than iterator's current + * position + */ +struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter) { - struct btree *b; - struct bkey_s_c k = __btree_iter_peek(iter); - unsigned level = iter->level - 1; - enum six_lock_type lock_type = btree_lock_want(iter, level); - BKEY_PADDED(k) tmp; - - bkey_reassemble(&tmp.k, k); + if (!bch2_btree_iter_rewind(iter)) + return bkey_s_c_null; - b = bch2_btree_node_get(iter->c, iter, &tmp.k, level, lock_type); - if (unlikely(IS_ERR(b))) - return PTR_ERR(b); + return bch2_btree_iter_peek_prev(iter); +} - iter->level = level; - mark_btree_node_locked(iter, level, lock_type); - btree_iter_node_set(iter, b); +struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) +{ + struct btree_trans *trans = iter->trans; + struct bpos search_key; + struct bkey_s_c k; + int ret; - if (iter->flags & BTREE_ITER_PREFETCH) - btree_iter_prefetch(iter); + EBUG_ON(iter->path->level); + bch2_btree_iter_verify(iter); + bch2_btree_iter_verify_entry_exit(iter); - return 0; -} + /* extents can't span inode numbers: */ + if ((iter->flags & BTREE_ITER_IS_EXTENTS) && + unlikely(iter->pos.offset == KEY_OFFSET_MAX)) { + if (iter->pos.inode == KEY_INODE_MAX) + return bkey_s_c_null; -static void btree_iter_up(struct btree_iter *iter) -{ - btree_node_unlock(iter, iter->level++); -} + bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos)); + } -int __must_check __bch2_btree_iter_traverse(struct btree_iter *); + search_key = btree_iter_search_key(iter); + iter->path = btree_path_set_pos(trans, iter->path, search_key, + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); -static int btree_iter_traverse_error(struct btree_iter *iter, int ret) -{ - struct bch_fs *c = iter->c; - struct btree_iter *linked, *sorted_iters, **i; -retry_all: - bch2_btree_iter_unlock(iter); + ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); + if (unlikely(ret)) + return bkey_s_c_err(ret); - if (ret != -ENOMEM && ret != -EINTR) - goto io_error; + if ((iter->flags & BTREE_ITER_CACHED) || + !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) { + struct bkey_i *next_update; - if (ret == -ENOMEM) { - struct closure cl; + next_update = iter->flags & BTREE_ITER_WITH_UPDATES + ? btree_trans_peek_updates(trans, iter->btree_id, search_key) + : NULL; - closure_init_stack(&cl); + if (next_update && + !bpos_cmp(next_update->k.p, iter->pos)) { + iter->k = next_update->k; + k = bkey_i_to_s_c(next_update); + } else { + k = bch2_btree_path_peek_slot(iter->path, &iter->k); + } + } else { + struct bpos next; - do { - ret = bch2_btree_node_cannibalize_lock(c, &cl); - closure_sync(&cl); - } while (ret); - } + if (iter->flags & BTREE_ITER_INTENT) { + struct btree_iter iter2; - /* - * Linked iters are normally a circular singly linked list - break cycle - * while we sort them: - */ - linked = iter->next; - iter->next = NULL; - sorted_iters = NULL; + bch2_trans_copy_iter(&iter2, iter); + k = bch2_btree_iter_peek(&iter2); - while (linked) { - iter = linked; - linked = linked->next; + if (k.k && !bkey_err(k)) { + iter->k = iter2.k; + k.k = &iter->k; + } + bch2_trans_iter_exit(trans, &iter2); + } else { + struct bpos pos = iter->pos; - i = &sorted_iters; - while (*i && btree_iter_cmp(iter, *i) > 0) - i = &(*i)->next; + k = bch2_btree_iter_peek(iter); + iter->pos = pos; + } - iter->next = *i; - *i = iter; - } + if (unlikely(bkey_err(k))) + return k; - /* Make list circular again: */ - iter = sorted_iters; - while (iter->next) - iter = iter->next; - iter->next = sorted_iters; + next = k.k ? bkey_start_pos(k.k) : POS_MAX; - /* Now, redo traversals in correct order: */ + if (bkey_cmp(iter->pos, next) < 0) { + bkey_init(&iter->k); + iter->k.p = iter->pos; + bch2_key_resize(&iter->k, + min_t(u64, KEY_SIZE_MAX, + (next.inode == iter->pos.inode + ? next.offset + : KEY_OFFSET_MAX) - + iter->pos.offset)); - iter = sorted_iters; - do { -retry: - ret = __bch2_btree_iter_traverse(iter); - if (unlikely(ret)) { - if (ret == -EINTR) - goto retry; - goto retry_all; + k = (struct bkey_s_c) { &iter->k, NULL }; + EBUG_ON(!k.k->size); } + } - iter = iter->next; - } while (iter != sorted_iters); + iter->path->should_be_locked = true; - ret = btree_iter_linked(iter) ? -EINTR : 0; -out: - bch2_btree_node_cannibalize_unlock(c); - return ret; -io_error: - BUG_ON(ret != -EIO); + bch2_btree_iter_verify_entry_exit(iter); + bch2_btree_iter_verify(iter); + ret = bch2_btree_iter_verify_ret(iter, k); + if (unlikely(ret)) + return bkey_s_c_err(ret); - iter->flags |= BTREE_ITER_ERROR; - iter->nodes[iter->level] = NULL; - goto out; + return k; } -/* - * This is the main state machine for walking down the btree - walks down to a - * specified depth - * - * Returns 0 on success, -EIO on error (error reading in a btree node). - * - * On error, caller (peek_node()/peek_key()) must return NULL; the error is - * stashed in the iterator and returned from bch2_btree_iter_unlock(). - */ -int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter) +struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter) { - unsigned depth_want = iter->level; + if (!bch2_btree_iter_advance(iter)) + return bkey_s_c_null; - if (unlikely(!iter->nodes[iter->level])) - return 0; + return bch2_btree_iter_peek_slot(iter); +} - iter->flags &= ~(BTREE_ITER_UPTODATE|BTREE_ITER_AT_END_OF_LEAF); - - /* make sure we have all the intent locks we need - ugh */ - if (unlikely(iter->nodes[iter->level] && - iter->level + 1 < iter->locks_want)) { - unsigned i; - - for (i = iter->level + 1; - i < iter->locks_want && iter->nodes[i]; - i++) - if (!bch2_btree_node_relock(iter, i)) { - while (iter->nodes[iter->level] && - iter->level + 1 < iter->locks_want) - btree_iter_up(iter); - break; - } - } +struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter) +{ + if (!bch2_btree_iter_rewind(iter)) + return bkey_s_c_null; - /* - * If the current node isn't locked, go up until we have a locked node - * or run out of nodes: - */ - while (iter->nodes[iter->level] && - !(is_btree_node(iter, iter->level) && - bch2_btree_node_relock(iter, iter->level) && - btree_iter_pos_cmp(iter->pos, - &iter->nodes[iter->level]->key.k, - iter->flags & BTREE_ITER_IS_EXTENTS))) - btree_iter_up(iter); + return bch2_btree_iter_peek_slot(iter); +} - /* - * If we've got a btree node locked (i.e. we aren't about to relock the - * root) - advance its node iterator if necessary: - */ - if (iter->nodes[iter->level]) { - struct bkey_s_c k; +/* new transactional stuff: */ - while ((k = __btree_iter_peek_all(iter)).k && - !btree_iter_pos_cmp(iter->pos, k.k, - iter->flags & BTREE_ITER_IS_EXTENTS)) - __btree_iter_advance(iter); - } +static inline void btree_path_verify_sorted_ref(struct btree_trans *trans, + struct btree_path *path) +{ + EBUG_ON(path->sorted_idx >= trans->nr_sorted); + EBUG_ON(trans->sorted[path->sorted_idx] != path->idx); + EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx))); +} - /* - * Note: iter->nodes[iter->level] may be temporarily NULL here - that - * would indicate to other code that we got to the end of the btree, - * here it indicates that relocking the root failed - it's critical that - * btree_iter_lock_root() comes next and that it can't fail - */ - while (iter->level > depth_want) { - int ret = iter->nodes[iter->level] - ? btree_iter_down(iter) - : btree_iter_lock_root(iter, depth_want); - if (unlikely(ret)) { - iter->level = depth_want; - iter->nodes[iter->level] = BTREE_ITER_NOT_END; - return ret; - } - } +static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) +{ +#ifdef CONFIG_BCACHEFS_DEBUG + unsigned i; - return 0; + for (i = 0; i < trans->nr_sorted; i++) + btree_path_verify_sorted_ref(trans, trans->paths + trans->sorted[i]); +#endif } -int __must_check bch2_btree_iter_traverse(struct btree_iter *iter) +static void btree_trans_verify_sorted(struct btree_trans *trans) { - int ret; - - ret = __bch2_btree_iter_traverse(iter); - if (unlikely(ret)) - ret = btree_iter_traverse_error(iter, ret); +#ifdef CONFIG_BCACHEFS_DEBUG + struct btree_path *path, *prev = NULL; + unsigned i; - return ret; + trans_for_each_path_inorder(trans, path, i) { + BUG_ON(prev && btree_path_cmp(prev, path) > 0); + prev = path; + } +#endif } -/* Iterate across nodes (leaf and interior nodes) */ - -struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter) +static inline void btree_path_swap(struct btree_trans *trans, + struct btree_path *l, struct btree_path *r) { - struct btree *b; - int ret; + swap(l->sorted_idx, r->sorted_idx); + swap(trans->sorted[l->sorted_idx], + trans->sorted[r->sorted_idx]); - EBUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS); + btree_path_verify_sorted_ref(trans, l); + btree_path_verify_sorted_ref(trans, r); +} - ret = bch2_btree_iter_traverse(iter); - if (ret) - return NULL; +static void btree_path_check_sort(struct btree_trans *trans, struct btree_path *path, + int cmp) +{ + struct btree_path *n; - b = iter->nodes[iter->level]; + if (cmp <= 0) { + n = prev_btree_path(trans, path); + if (n && btree_path_cmp(n, path) > 0) { + do { + btree_path_swap(trans, n, path); + n = prev_btree_path(trans, path); + } while (n && btree_path_cmp(n, path) > 0); - if (b) { - EBUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0); - iter->pos = b->key.k.p; + goto out; + } } - return b; + if (cmp >= 0) { + n = next_btree_path(trans, path); + if (n && btree_path_cmp(path, n) > 0) { + do { + btree_path_swap(trans, path, n); + n = next_btree_path(trans, path); + } while (n && btree_path_cmp(path, n) > 0); + } + } +out: + btree_trans_verify_sorted(trans); } -struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth) +static inline void btree_path_list_remove(struct btree_trans *trans, + struct btree_path *path) { - struct btree *b; - int ret; + unsigned i; - EBUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS); + EBUG_ON(path->sorted_idx >= trans->nr_sorted); - btree_iter_up(iter); + array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx); - if (!iter->nodes[iter->level]) - return NULL; + for (i = path->sorted_idx; i < trans->nr_sorted; i++) + trans->paths[trans->sorted[i]].sorted_idx = i; - /* parent node usually won't be locked: redo traversal if necessary */ - ret = bch2_btree_iter_traverse(iter); - if (ret) - return NULL; + path->sorted_idx = U8_MAX; - b = iter->nodes[iter->level]; - if (!b) - return b; + btree_trans_verify_sorted_refs(trans); +} - if (bkey_cmp(iter->pos, b->key.k.p) < 0) { - /* Haven't gotten to the end of the parent node: */ +static inline void btree_path_list_add(struct btree_trans *trans, + struct btree_path *pos, + struct btree_path *path) +{ + unsigned i; - /* ick: */ - iter->pos = iter->btree_id == BTREE_ID_INODES - ? btree_type_successor(iter->btree_id, iter->pos) - : bkey_successor(iter->pos); - iter->level = depth; + btree_trans_verify_sorted_refs(trans); - ret = bch2_btree_iter_traverse(iter); - if (ret) - return NULL; + path->sorted_idx = pos ? pos->sorted_idx + 1 : 0; - b = iter->nodes[iter->level]; - } + array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx); - iter->pos = b->key.k.p; + for (i = path->sorted_idx; i < trans->nr_sorted; i++) + trans->paths[trans->sorted[i]].sorted_idx = i; - return b; + btree_trans_verify_sorted_refs(trans); } -/* Iterate across keys (in leaf nodes only) */ - -void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_pos) +void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter) { - struct btree *b = iter->nodes[0]; - struct btree_node_iter *node_iter = &iter->node_iters[0]; - struct bkey_packed *k; + if (iter->path) + bch2_path_put(trans, iter->path, + iter->flags & BTREE_ITER_INTENT); + iter->path = NULL; +} - EBUG_ON(iter->level != 0); - EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0); - EBUG_ON(!btree_node_locked(iter, 0)); - EBUG_ON(bkey_cmp(new_pos, b->key.k.p) > 0); +static void __bch2_trans_iter_init(struct btree_trans *trans, + struct btree_iter *iter, + unsigned btree_id, struct bpos pos, + unsigned locks_want, + unsigned depth, + unsigned flags, + unsigned long ip) +{ + EBUG_ON(trans->restarted); + + if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) && + btree_node_type_is_extents(btree_id)) + flags |= BTREE_ITER_IS_EXTENTS; + + if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) && + !btree_type_has_snapshots(btree_id)) + flags &= ~BTREE_ITER_ALL_SNAPSHOTS; + + if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) && + btree_type_has_snapshots(btree_id)) + flags |= BTREE_ITER_FILTER_SNAPSHOTS; + + iter->trans = trans; + iter->path = NULL; + iter->btree_id = btree_id; + iter->min_depth = depth; + iter->flags = flags; + iter->snapshot = pos.snapshot; + iter->pos = pos; + iter->k.type = KEY_TYPE_deleted; + iter->k.p = pos; + iter->k.size = 0; +#ifdef CONFIG_BCACHEFS_DEBUG + iter->ip_allocated = ip; +#endif - while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) && - !btree_iter_pos_cmp_packed(b, &new_pos, k, - iter->flags & BTREE_ITER_IS_EXTENTS)) - bch2_btree_node_iter_advance(node_iter, b); + iter->path = bch2_path_get(trans, + flags & BTREE_ITER_CACHED, + btree_id, + iter->pos, + locks_want, + depth, + flags & BTREE_ITER_INTENT, ip); +} - if (!k && - !btree_iter_pos_cmp(new_pos, &b->key.k, - iter->flags & BTREE_ITER_IS_EXTENTS)) - iter->flags |= BTREE_ITER_AT_END_OF_LEAF; +void bch2_trans_iter_init(struct btree_trans *trans, + struct btree_iter *iter, + unsigned btree_id, struct bpos pos, + unsigned flags) +{ + __bch2_trans_iter_init(trans, iter, btree_id, pos, + 0, 0, flags, _RET_IP_); +} - iter->pos = new_pos; - iter->flags &= ~BTREE_ITER_UPTODATE; +void bch2_trans_node_iter_init(struct btree_trans *trans, + struct btree_iter *iter, + enum btree_id btree_id, + struct bpos pos, + unsigned locks_want, + unsigned depth, + unsigned flags) +{ + __bch2_trans_iter_init(trans, iter, btree_id, pos, locks_want, depth, + BTREE_ITER_NOT_EXTENTS| + __BTREE_ITER_ALL_SNAPSHOTS| + BTREE_ITER_ALL_SNAPSHOTS| + flags, _RET_IP_); + BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH)); + BUG_ON(iter->path->level != depth); + BUG_ON(iter->min_depth != depth); } -void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos) +void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src) { - EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0); /* XXX handle this */ - iter->pos = new_pos; - iter->flags &= ~BTREE_ITER_UPTODATE; + *dst = *src; + if (src->path) + __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT); } -void bch2_btree_iter_advance_pos(struct btree_iter *iter) +void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size) { - if (iter->flags & BTREE_ITER_UPTODATE && - !(iter->flags & BTREE_ITER_WITH_HOLES)) { - struct bkey_s_c k; + size_t new_top = trans->mem_top + size; + void *p; - __btree_iter_advance(iter); - k = __btree_iter_peek(iter); - if (likely(k.k)) { - iter->pos = bkey_start_pos(k.k); - return; + if (new_top > trans->mem_bytes) { + size_t old_bytes = trans->mem_bytes; + size_t new_bytes = roundup_pow_of_two(new_top); + void *new_mem; + + WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX); + + new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS); + if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) { + new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL); + new_bytes = BTREE_TRANS_MEM_MAX; + kfree(trans->mem); + } + + if (!new_mem) + return ERR_PTR(-ENOMEM); + + trans->mem = new_mem; + trans->mem_bytes = new_bytes; + + if (old_bytes) { + trace_trans_restart_mem_realloced(trans->ip, _RET_IP_, new_bytes); + btree_trans_restart(trans); + return ERR_PTR(-EINTR); } } - /* - * We use iter->k instead of iter->pos for extents: iter->pos will be - * equal to the start of the extent we returned, but we need to advance - * to the end of the extent we returned. - */ - bch2_btree_iter_set_pos(iter, - btree_type_successor(iter->btree_id, iter->k.p)); + p = trans->mem + trans->mem_top; + trans->mem_top += size; + memset(p, 0, size); + return p; } -/* XXX: expensive */ -void bch2_btree_iter_rewind(struct btree_iter *iter, struct bpos pos) +/** + * bch2_trans_begin() - reset a transaction after a interrupted attempt + * @trans: transaction to reset + * + * While iterating over nodes or updating nodes a attempt to lock a btree + * node may return EINTR when the trylock fails. When this occurs + * bch2_trans_begin() should be called and the transaction retried. + */ +void bch2_trans_begin(struct btree_trans *trans) { - /* incapable of rewinding across nodes: */ - BUG_ON(bkey_cmp(pos, iter->nodes[iter->level]->data->min_key) < 0); + struct btree_insert_entry *i; + struct btree_path *path; + + trans_for_each_update(trans, i) + __btree_path_put(i->path, true); + + memset(&trans->journal_res, 0, sizeof(trans->journal_res)); + trans->extra_journal_res = 0; + trans->nr_updates = 0; + trans->mem_top = 0; + + trans->hooks = NULL; + trans->extra_journal_entries = NULL; + trans->extra_journal_entry_u64s = 0; + + if (trans->fs_usage_deltas) { + trans->fs_usage_deltas->used = 0; + memset(&trans->fs_usage_deltas->memset_start, 0, + (void *) &trans->fs_usage_deltas->memset_end - + (void *) &trans->fs_usage_deltas->memset_start); + } - iter->pos = pos; - iter->flags &= ~BTREE_ITER_UPTODATE; - __btree_iter_init(iter, iter->nodes[iter->level]); + trans_for_each_path(trans, path) { + path->should_be_locked = false; + + /* + * XXX: we probably shouldn't be doing this if the transaction + * was restarted, but currently we still overflow transaction + * iterators if we do that + */ + if (!path->ref && !path->preserve) + __bch2_path_free(trans, path); + else if (!path->ref) + path->preserve = false; + } + + bch2_trans_cond_resched(trans); + + if (trans->restarted) + bch2_btree_path_traverse_all(trans); + + trans->restarted = false; } -struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter) +static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c) { - struct bkey_s_c k; - int ret; + size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX; + size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX; + void *p = NULL; - EBUG_ON(!!(iter->flags & BTREE_ITER_IS_EXTENTS) != - (iter->btree_id == BTREE_ID_EXTENTS)); + BUG_ON(trans->used_mempool); - if (iter->flags & BTREE_ITER_UPTODATE) { - struct btree *b = iter->nodes[0]; - struct bkey_packed *k = - __bch2_btree_node_iter_peek_all(&iter->node_iters[0], b); - struct bkey_s_c ret = { - .k = &iter->k, - .v = bkeyp_val(&b->format, k) - }; +#ifdef __KERNEL__ + p = this_cpu_xchg(c->btree_paths_bufs->path , NULL); +#endif + if (!p) + p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS); - EBUG_ON(!btree_node_locked(iter, 0)); + trans->paths = p; p += paths_bytes; + trans->updates = p; p += updates_bytes; +} - if (debug_check_bkeys(iter->c)) - bch2_bkey_debugcheck(iter->c, b, ret); - return ret; - } +void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, + unsigned expected_nr_iters, + size_t expected_mem_bytes) + __acquires(&c->btree_trans_barrier) +{ + memset(trans, 0, sizeof(*trans)); + trans->c = c; + trans->ip = _RET_IP_; - while (1) { - ret = bch2_btree_iter_traverse(iter); - if (unlikely(ret)) { - iter->k = KEY(iter->pos.inode, iter->pos.offset, 0); - return bkey_s_c_err(ret); - } + bch2_trans_alloc_paths(trans, c); - k = __btree_iter_peek(iter); - if (likely(k.k)) { - /* - * iter->pos should always be equal to the key we just - * returned - except extents can straddle iter->pos: - */ - if (!(iter->flags & BTREE_ITER_IS_EXTENTS) || - bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) - iter->pos = bkey_start_pos(k.k); + if (expected_mem_bytes) { + trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes); + trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL); - iter->flags |= BTREE_ITER_UPTODATE; - return k; + if (!unlikely(trans->mem)) { + trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL); + trans->mem_bytes = BTREE_TRANS_MEM_MAX; } + } - iter->pos = iter->nodes[0]->key.k.p; - - if (!bkey_cmp(iter->pos, POS_MAX)) { - iter->k = KEY(iter->pos.inode, iter->pos.offset, 0); - bch2_btree_iter_unlock(iter); - return bkey_s_c_null; - } + trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier); - iter->pos = btree_type_successor(iter->btree_id, iter->pos); - } + trans->pid = current->pid; + mutex_lock(&c->btree_trans_lock); + list_add(&trans->list, &c->btree_trans_list); + mutex_unlock(&c->btree_trans_lock); } -struct bkey_s_c bch2_btree_iter_peek_with_holes(struct btree_iter *iter) +static void check_btree_paths_leaked(struct btree_trans *trans) { - struct bkey_s_c k; - struct bkey n; - int ret; +#ifdef CONFIG_BCACHEFS_DEBUG + struct bch_fs *c = trans->c; + struct btree_path *path; - EBUG_ON(!!(iter->flags & BTREE_ITER_IS_EXTENTS) != - (iter->btree_id == BTREE_ID_EXTENTS)); + trans_for_each_path(trans, path) + if (path->ref) + goto leaked; + return; +leaked: + bch_err(c, "btree paths leaked from %pS!", (void *) trans->ip); + trans_for_each_path(trans, path) + if (path->ref) + printk(KERN_ERR " btree %s %pS\n", + bch2_btree_ids[path->btree_id], + (void *) path->ip_allocated); + /* Be noisy about this: */ + bch2_fatal_error(c); +#endif +} - iter->flags &= ~BTREE_ITER_UPTODATE; +void bch2_trans_exit(struct btree_trans *trans) + __releases(&c->btree_trans_barrier) +{ + struct btree_insert_entry *i; + struct bch_fs *c = trans->c; - while (1) { - ret = bch2_btree_iter_traverse(iter); - if (unlikely(ret)) { - iter->k = KEY(iter->pos.inode, iter->pos.offset, 0); - return bkey_s_c_err(ret); - } + bch2_trans_unlock(trans); - k = __btree_iter_peek_all(iter); -recheck: - if (!k.k || bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) { - /* hole */ - bkey_init(&n); - n.p = iter->pos; - - if (iter->flags & BTREE_ITER_IS_EXTENTS) { - if (n.p.offset == KEY_OFFSET_MAX) { - iter->pos = bkey_successor(iter->pos); - goto recheck; - } + trans_for_each_update(trans, i) + __btree_path_put(i->path, true); + trans->nr_updates = 0; - if (!k.k) - k.k = &iter->nodes[0]->key.k; + check_btree_paths_leaked(trans); - bch2_key_resize(&n, - min_t(u64, KEY_SIZE_MAX, - (k.k->p.inode == n.p.inode - ? bkey_start_offset(k.k) - : KEY_OFFSET_MAX) - - n.p.offset)); + mutex_lock(&c->btree_trans_lock); + list_del(&trans->list); + mutex_unlock(&c->btree_trans_lock); - EBUG_ON(!n.size); - } + srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx); - iter->k = n; - return (struct bkey_s_c) { &iter->k, NULL }; - } else if (!bkey_deleted(k.k)) { - return k; - } else { - __btree_iter_advance(iter); - } + bch2_journal_preres_put(&c->journal, &trans->journal_preres); + + if (trans->fs_usage_deltas) { + if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) == + REPLICAS_DELTA_LIST_MAX) + mempool_free(trans->fs_usage_deltas, + &c->replicas_delta_pool); + else + kfree(trans->fs_usage_deltas); } + + if (trans->mem_bytes == BTREE_TRANS_MEM_MAX) + mempool_free(trans->mem, &c->btree_trans_mem_pool); + else + kfree(trans->mem); + +#ifdef __KERNEL__ + /* + * Userspace doesn't have a real percpu implementation: + */ + trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths); +#endif + + if (trans->paths) + mempool_free(trans->paths, &c->btree_paths_pool); + + trans->mem = (void *) 0x1; + trans->paths = (void *) 0x1; } -void __bch2_btree_iter_init(struct btree_iter *iter, struct bch_fs *c, - enum btree_id btree_id, struct bpos pos, - unsigned locks_want, unsigned depth, - unsigned flags) +static void __maybe_unused +bch2_btree_path_node_to_text(struct printbuf *out, + struct btree_bkey_cached_common *_b, + bool cached) { - EBUG_ON(depth >= BTREE_MAX_DEPTH); - EBUG_ON(locks_want > BTREE_MAX_DEPTH); + pr_buf(out, " l=%u %s:", + _b->level, bch2_btree_ids[_b->btree_id]); + bch2_bpos_to_text(out, btree_node_pos(_b, cached)); +} - iter->c = c; - iter->pos = pos; - iter->flags = flags; - iter->btree_id = btree_id; - iter->level = depth; - iter->locks_want = locks_want; - iter->nodes_locked = 0; - iter->nodes_intent_locked = 0; - memset(iter->nodes, 0, sizeof(iter->nodes)); - iter->nodes[iter->level] = BTREE_ITER_NOT_END; - iter->next = iter; +static bool trans_has_locks(struct btree_trans *trans) +{ + struct btree_path *path; - prefetch(c->btree_roots[btree_id].b); + trans_for_each_path(trans, path) + if (path->nodes_locked) + return true; + return false; } -void bch2_btree_iter_unlink(struct btree_iter *iter) +void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c) { - struct btree_iter *linked; + struct btree_trans *trans; + struct btree_path *path; + struct btree *b; + unsigned l; - __bch2_btree_iter_unlock(iter); + mutex_lock(&c->btree_trans_lock); + list_for_each_entry(trans, &c->btree_trans_list, list) { + if (!trans_has_locks(trans)) + continue; - if (!btree_iter_linked(iter)) - return; + pr_buf(out, "%i %ps\n", trans->pid, (void *) trans->ip); - for_each_linked_btree_iter(iter, linked) { + trans_for_each_path(trans, path) { + if (!path->nodes_locked) + continue; - if (linked->next == iter) { - linked->next = iter->next; - return; + pr_buf(out, " path %u %c l=%u %s:", + path->idx, + path->cached ? 'c' : 'b', + path->level, + bch2_btree_ids[path->btree_id]); + bch2_bpos_to_text(out, path->pos); + pr_buf(out, "\n"); + + for (l = 0; l < BTREE_MAX_DEPTH; l++) { + if (btree_node_locked(path, l)) { + pr_buf(out, " %s l=%u ", + btree_node_intent_locked(path, l) ? "i" : "r", l); + bch2_btree_path_node_to_text(out, + (void *) path->l[l].b, + path->cached); + pr_buf(out, "\n"); + } + } } - } - BUG(); + b = READ_ONCE(trans->locking); + if (b) { + path = &trans->paths[trans->locking_path_idx]; + pr_buf(out, " locking path %u %c l=%u %s:", + trans->locking_path_idx, + path->cached ? 'c' : 'b', + trans->locking_level, + bch2_btree_ids[trans->locking_btree_id]); + bch2_bpos_to_text(out, trans->locking_pos); + + pr_buf(out, " node "); + bch2_btree_path_node_to_text(out, + (void *) b, path->cached); + pr_buf(out, "\n"); + } + } + mutex_unlock(&c->btree_trans_lock); } -void bch2_btree_iter_link(struct btree_iter *iter, struct btree_iter *new) +void bch2_fs_btree_iter_exit(struct bch_fs *c) { - BUG_ON(btree_iter_linked(new)); - - new->next = iter->next; - iter->next = new; - - if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) { - unsigned nr_iters = 1; - - for_each_linked_btree_iter(iter, new) - nr_iters++; - - BUG_ON(nr_iters > SIX_LOCK_MAX_RECURSE); - } + mempool_exit(&c->btree_trans_mem_pool); + mempool_exit(&c->btree_paths_pool); + cleanup_srcu_struct(&c->btree_trans_barrier); } -void bch2_btree_iter_copy(struct btree_iter *dst, struct btree_iter *src) +int bch2_fs_btree_iter_init(struct bch_fs *c) { - __bch2_btree_iter_unlock(dst); - memcpy(dst, src, offsetof(struct btree_iter, next)); - dst->nodes_locked = dst->nodes_intent_locked = 0; + unsigned nr = BTREE_ITER_MAX; + + INIT_LIST_HEAD(&c->btree_trans_list); + mutex_init(&c->btree_trans_lock); + + return init_srcu_struct(&c->btree_trans_barrier) ?: + mempool_init_kmalloc_pool(&c->btree_paths_pool, 1, + sizeof(struct btree_path) * nr + + sizeof(struct btree_insert_entry) * nr) ?: + mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1, + BTREE_TRANS_MEM_MAX); }