X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_iter.c;h=365794dc4dcd66c4b18a1519987e01ed6628d25d;hb=6b1f79d5df9f2735192ed1a40c711cf131d4f43e;hp=8955555d6603267fd491d15919ac9259957f7eca;hpb=f96ba8e0aac91f2650270e9639359243cb9ac2d1;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_iter.c b/libbcachefs/btree_iter.c index 8955555..365794d 100644 --- a/libbcachefs/btree_iter.c +++ b/libbcachefs/btree_iter.c @@ -2,513 +2,458 @@ #include "bcachefs.h" #include "bkey_methods.h" +#include "bkey_buf.h" #include "btree_cache.h" #include "btree_iter.h" +#include "btree_key_cache.h" #include "btree_locking.h" +#include "btree_update.h" #include "debug.h" +#include "error.h" #include "extents.h" +#include "journal.h" +#include "recovery.h" +#include "replicas.h" +#include "subvolume.h" +#include "trace.h" +#include #include -#include -static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *, - struct btree_iter_level *, - struct bkey *); +static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *); +static inline void btree_path_list_add(struct btree_trans *, struct btree_path *, + struct btree_path *); -#define BTREE_ITER_NO_NODE_GET_LOCKS ((struct btree *) 1) -#define BTREE_ITER_NO_NODE_DROP ((struct btree *) 2) -#define BTREE_ITER_NO_NODE_LOCK_ROOT ((struct btree *) 3) -#define BTREE_ITER_NO_NODE_UP ((struct btree *) 4) -#define BTREE_ITER_NO_NODE_DOWN ((struct btree *) 5) -#define BTREE_ITER_NO_NODE_INIT ((struct btree *) 6) -#define BTREE_ITER_NO_NODE_ERROR ((struct btree *) 7) - -static inline bool is_btree_node(struct btree_iter *iter, unsigned l) +static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter) { - return l < BTREE_MAX_DEPTH && - (unsigned long) iter->l[l].b >= 128; +#ifdef TRACK_PATH_ALLOCATED + return iter->ip_allocated; +#else + return 0; +#endif } -/* Returns < 0 if @k is before iter pos, > 0 if @k is after */ -static inline int __btree_iter_pos_cmp(struct btree_iter *iter, - const struct btree *b, - const struct bkey_packed *k, - bool interior_node) -{ - int cmp = bkey_cmp_left_packed(b, k, &iter->pos); +static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *); - if (cmp) - return cmp; - if (bkey_deleted(k)) - return -1; +/* + * Unlocks before scheduling + * Note: does not revalidate iterator + */ +static inline int bch2_trans_cond_resched(struct btree_trans *trans) +{ + if (need_resched() || race_fault()) { + bch2_trans_unlock(trans); + schedule(); + return bch2_trans_relock(trans); + } else { + return 0; + } +} +static inline int __btree_path_cmp(const struct btree_path *l, + enum btree_id r_btree_id, + bool r_cached, + struct bpos r_pos, + unsigned r_level) +{ /* - * Normally, for extents we want the first key strictly greater than - * the iterator position - with the exception that for interior nodes, - * we don't want to advance past the last key if the iterator position - * is POS_MAX: + * Must match lock ordering as defined by __bch2_btree_node_lock: */ - if (iter->flags & BTREE_ITER_IS_EXTENTS && - (!interior_node || - bkey_cmp_left_packed_byval(b, k, POS_MAX))) - return -1; - return 1; + return cmp_int(l->btree_id, r_btree_id) ?: + cmp_int((int) l->cached, (int) r_cached) ?: + bpos_cmp(l->pos, r_pos) ?: + -cmp_int(l->level, r_level); } -static inline int btree_iter_pos_cmp(struct btree_iter *iter, - const struct btree *b, - const struct bkey_packed *k) +static inline int btree_path_cmp(const struct btree_path *l, + const struct btree_path *r) { - return __btree_iter_pos_cmp(iter, b, k, b->level != 0); + return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level); } -/* Btree node locking: */ - -/* - * Updates the saved lock sequence number, so that bch2_btree_node_relock() will - * succeed: - */ -void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter) +static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p) { - struct btree_iter *linked; - - EBUG_ON(iter->l[b->level].b != b); - EBUG_ON(iter->l[b->level].lock_seq + 1 != b->lock.state.seq); - - trans_for_each_iter_with_node(iter->trans, b, linked) - linked->l[b->level].lock_seq += 2; + /* Are we iterating over keys in all snapshots? */ + if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) { + p = bpos_successor(p); + } else { + p = bpos_nosnap_successor(p); + p.snapshot = iter->snapshot; + } - six_unlock_write(&b->lock); + return p; } -void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter) +static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p) { - struct btree_iter *linked; - unsigned readers = 0; - - EBUG_ON(btree_node_read_locked(iter, b->level)); - - trans_for_each_iter(iter->trans, linked) - if (linked->l[b->level].b == b && - btree_node_read_locked(linked, b->level)) - readers++; + /* Are we iterating over keys in all snapshots? */ + if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) { + p = bpos_predecessor(p); + } else { + p = bpos_nosnap_predecessor(p); + p.snapshot = iter->snapshot; + } - /* - * Must drop our read locks before calling six_lock_write() - - * six_unlock() won't do wakeups until the reader count - * goes to 0, and it's safe because we have the node intent - * locked: - */ - atomic64_sub(__SIX_VAL(read_lock, readers), - &b->lock.state.counter); - btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write); - atomic64_add(__SIX_VAL(read_lock, readers), - &b->lock.state.counter); + return p; } -bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level) +static inline struct bpos btree_iter_search_key(struct btree_iter *iter) { - struct btree *b = btree_iter_node(iter, level); - int want = __btree_lock_want(iter, level); + struct bpos pos = iter->pos; - if (!is_btree_node(iter, level)) - return false; - - if (race_fault()) - return false; + if ((iter->flags & BTREE_ITER_IS_EXTENTS) && + !bkey_eq(pos, POS_MAX)) + pos = bkey_successor(iter, pos); + return pos; +} - if (six_relock_type(&b->lock, want, iter->l[level].lock_seq) || - (btree_node_lock_seq_matches(iter, b, level) && - btree_node_lock_increment(iter, b, level, want))) { - mark_btree_node_locked(iter, level, want); - return true; - } else { - return false; - } +static inline bool btree_path_pos_before_node(struct btree_path *path, + struct btree *b) +{ + return bpos_lt(path->pos, b->data->min_key); } -static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level) +static inline bool btree_path_pos_after_node(struct btree_path *path, + struct btree *b) { - struct btree *b = iter->l[level].b; + return bpos_gt(path->pos, b->key.k.p); +} - EBUG_ON(btree_lock_want(iter, level) != BTREE_NODE_INTENT_LOCKED); +static inline bool btree_path_pos_in_node(struct btree_path *path, + struct btree *b) +{ + return path->btree_id == b->c.btree_id && + !btree_path_pos_before_node(path, b) && + !btree_path_pos_after_node(path, b); +} - if (!is_btree_node(iter, level)) - return false; +/* Btree iterator: */ - if (btree_node_intent_locked(iter, level)) - return true; +#ifdef CONFIG_BCACHEFS_DEBUG - if (race_fault()) - return false; +static void bch2_btree_path_verify_cached(struct btree_trans *trans, + struct btree_path *path) +{ + struct bkey_cached *ck; + bool locked = btree_node_locked(path, 0); - if (btree_node_locked(iter, level) - ? six_lock_tryupgrade(&b->lock) - : six_relock_type(&b->lock, SIX_LOCK_intent, iter->l[level].lock_seq)) - goto success; + if (!bch2_btree_node_relock(trans, path, 0)) + return; - if (btree_node_lock_seq_matches(iter, b, level) && - btree_node_lock_increment(iter, b, level, BTREE_NODE_INTENT_LOCKED)) { - btree_node_unlock(iter, level); - goto success; - } + ck = (void *) path->l[0].b; + BUG_ON(ck->key.btree_id != path->btree_id || + !bkey_eq(ck->key.pos, path->pos)); - return false; -success: - mark_btree_node_intent_locked(iter, level); - return true; + if (!locked) + btree_node_unlock(trans, path, 0); } -static inline bool btree_iter_get_locks(struct btree_iter *iter, - bool upgrade, bool trace) +static void bch2_btree_path_verify_level(struct btree_trans *trans, + struct btree_path *path, unsigned level) { - unsigned l = iter->level; - int fail_idx = -1; - - do { - if (!btree_iter_node(iter, l)) - break; + struct btree_path_level *l; + struct btree_node_iter tmp; + bool locked; + struct bkey_packed *p, *k; + struct printbuf buf1 = PRINTBUF; + struct printbuf buf2 = PRINTBUF; + struct printbuf buf3 = PRINTBUF; + const char *msg; - if (!(upgrade - ? bch2_btree_node_upgrade(iter, l) - : bch2_btree_node_relock(iter, l))) { - if (trace) - (upgrade - ? trace_node_upgrade_fail - : trace_node_relock_fail)(l, iter->l[l].lock_seq, - is_btree_node(iter, l) - ? 0 - : (unsigned long) iter->l[l].b, - is_btree_node(iter, l) - ? iter->l[l].b->lock.state.seq - : 0); - - fail_idx = l; - btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); - } + if (!bch2_debug_check_iterators) + return; - l++; - } while (l < iter->locks_want); + l = &path->l[level]; + tmp = l->iter; + locked = btree_node_locked(path, level); - /* - * When we fail to get a lock, we have to ensure that any child nodes - * can't be relocked so bch2_btree_iter_traverse has to walk back up to - * the node that we failed to relock: - */ - while (fail_idx >= 0) { - btree_node_unlock(iter, fail_idx); - iter->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS; - --fail_idx; + if (path->cached) { + if (!level) + bch2_btree_path_verify_cached(trans, path); + return; } - if (iter->uptodate == BTREE_ITER_NEED_RELOCK) - iter->uptodate = BTREE_ITER_NEED_PEEK; - - bch2_btree_trans_verify_locks(iter->trans); - - return iter->uptodate < BTREE_ITER_NEED_RELOCK; -} + if (!btree_path_node(path, level)) + return; -/* Slowpath: */ -bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, - unsigned level, - struct btree_iter *iter, - enum six_lock_type type) -{ - struct btree_iter *linked; - bool ret = true; + if (!bch2_btree_node_relock_notrace(trans, path, level)) + return; - /* Check if it's safe to block: */ - trans_for_each_iter(iter->trans, linked) { - if (!linked->nodes_locked) - continue; + BUG_ON(!btree_path_pos_in_node(path, l->b)); - /* * Must lock btree nodes in key order: */ - if (__btree_iter_cmp(iter->btree_id, pos, linked) < 0) - ret = false; + bch2_btree_node_iter_verify(&l->iter, l->b); - /* - * Can't block taking an intent lock if we have _any_ nodes read - * locked: - * - * - Our read lock blocks another thread with an intent lock on - * the same node from getting a write lock, and thus from - * dropping its intent lock - * - * - And the other thread may have multiple nodes intent locked: - * both the node we want to intent lock, and the node we - * already have read locked - deadlock: - */ - if (type == SIX_LOCK_intent && - linked->nodes_locked != linked->nodes_intent_locked) { - if (!(iter->trans->nounlock)) { - linked->locks_want = max_t(unsigned, - linked->locks_want, - __fls(linked->nodes_locked) + 1); - btree_iter_get_locks(linked, true, false); - } - ret = false; - } + /* + * For interior nodes, the iterator will have skipped past deleted keys: + */ + p = level + ? bch2_btree_node_iter_prev(&tmp, l->b) + : bch2_btree_node_iter_prev_all(&tmp, l->b); + k = bch2_btree_node_iter_peek_all(&l->iter, l->b); - /* - * Interior nodes must be locked before their descendants: if - * another iterator has possible descendants locked of the node - * we're about to lock, it must have the ancestors locked too: - */ - if (linked->btree_id == iter->btree_id && - level > __fls(linked->nodes_locked)) { - if (!(iter->trans->nounlock)) { - linked->locks_want = - max(level + 1, max_t(unsigned, - linked->locks_want, - iter->locks_want)); - btree_iter_get_locks(linked, true, false); - } - ret = false; - } + if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) { + msg = "before"; + goto err; } - if (unlikely(!ret)) { - trace_trans_restart_would_deadlock(iter->trans->ip); - return false; + if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) { + msg = "after"; + goto err; } - __btree_node_lock_type(iter->trans->c, b, type); - return true; -} + if (!locked) + btree_node_unlock(trans, path, level); + return; +err: + bch2_bpos_to_text(&buf1, path->pos); -/* Btree iterator locking: */ + if (p) { + struct bkey uk = bkey_unpack_key(l->b, p); -#ifdef CONFIG_BCACHEFS_DEBUG -void bch2_btree_iter_verify_locks(struct btree_iter *iter) -{ - unsigned l; + bch2_bkey_to_text(&buf2, &uk); + } else { + prt_printf(&buf2, "(none)"); + } - for (l = 0; btree_iter_node(iter, l); l++) { - if (iter->uptodate >= BTREE_ITER_NEED_RELOCK && - !btree_node_locked(iter, l)) - continue; + if (k) { + struct bkey uk = bkey_unpack_key(l->b, k); - BUG_ON(btree_lock_want(iter, l) != - btree_node_locked_type(iter, l)); + bch2_bkey_to_text(&buf3, &uk); + } else { + prt_printf(&buf3, "(none)"); } + + panic("path should be %s key at level %u:\n" + "path pos %s\n" + "prev key %s\n" + "cur key %s\n", + msg, level, buf1.buf, buf2.buf, buf3.buf); } -void bch2_btree_trans_verify_locks(struct btree_trans *trans) +static void bch2_btree_path_verify(struct btree_trans *trans, + struct btree_path *path) { - struct btree_iter *iter; + struct bch_fs *c = trans->c; + unsigned i; + + EBUG_ON(path->btree_id >= BTREE_ID_NR); + + for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) { + if (!path->l[i].b) { + BUG_ON(!path->cached && + c->btree_roots[path->btree_id].b->c.level > i); + break; + } + + bch2_btree_path_verify_level(trans, path, i); + } - trans_for_each_iter(trans, iter) - bch2_btree_iter_verify_locks(iter); + bch2_btree_path_verify_locks(path); } -#endif -__flatten -static bool bch2_btree_iter_relock(struct btree_iter *iter, bool trace) +void bch2_trans_verify_paths(struct btree_trans *trans) { - return iter->uptodate >= BTREE_ITER_NEED_RELOCK - ? btree_iter_get_locks(iter, false, trace) - : true; + struct btree_path *path; + + trans_for_each_path(trans, path) + bch2_btree_path_verify(trans, path); } -bool __bch2_btree_iter_upgrade(struct btree_iter *iter, - unsigned new_locks_want) +static void bch2_btree_iter_verify(struct btree_iter *iter) { - struct btree_iter *linked; + struct btree_trans *trans = iter->trans; - EBUG_ON(iter->locks_want >= new_locks_want); + BUG_ON(iter->btree_id >= BTREE_ID_NR); - iter->locks_want = new_locks_want; + BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached); - if (btree_iter_get_locks(iter, true, true)) - return true; + BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) && + (iter->flags & BTREE_ITER_ALL_SNAPSHOTS)); - /* - * Ancestor nodes must be locked before child nodes, so set locks_want - * on iterators that might lock ancestors before us to avoid getting - * -EINTR later: - */ - trans_for_each_iter(iter->trans, linked) - if (linked != iter && - linked->btree_id == iter->btree_id && - linked->locks_want < new_locks_want) { - linked->locks_want = new_locks_want; - btree_iter_get_locks(linked, true, false); - } + BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) && + (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) && + !btree_type_has_snapshots(iter->btree_id)); - return false; + if (iter->update_path) + bch2_btree_path_verify(trans, iter->update_path); + bch2_btree_path_verify(trans, iter->path); } -bool __bch2_btree_iter_upgrade_nounlock(struct btree_iter *iter, - unsigned new_locks_want) +static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) { - unsigned l = iter->level; - - EBUG_ON(iter->locks_want >= new_locks_want); - - iter->locks_want = new_locks_want; - - do { - if (!btree_iter_node(iter, l)) - break; - - if (!bch2_btree_node_upgrade(iter, l)) { - iter->locks_want = l; - return false; - } + BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && + !iter->pos.snapshot); - l++; - } while (l < iter->locks_want); + BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) && + iter->pos.snapshot != iter->snapshot); - return true; + BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) || + bkey_gt(iter->pos, iter->k.p)); } -void __bch2_btree_iter_downgrade(struct btree_iter *iter, - unsigned downgrade_to) +static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { - struct btree_iter *linked; - unsigned l; + struct btree_trans *trans = iter->trans; + struct btree_iter copy; + struct bkey_s_c prev; + int ret = 0; - /* - * We downgrade linked iterators as well because btree_iter_upgrade - * might have had to modify locks_want on linked iterators due to lock - * ordering: - */ - trans_for_each_iter(iter->trans, linked) { - unsigned new_locks_want = downgrade_to ?: - (linked->flags & BTREE_ITER_INTENT ? 1 : 0); + if (!bch2_debug_check_iterators) + return 0; - if (linked->locks_want <= new_locks_want) - continue; + if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)) + return 0; - linked->locks_want = new_locks_want; + if (bkey_err(k) || !k.k) + return 0; - while (linked->nodes_locked && - (l = __fls(linked->nodes_locked)) >= linked->locks_want) { - if (l > linked->level) { - btree_node_unlock(linked, l); - } else { - if (btree_node_intent_locked(linked, l)) { - six_lock_downgrade(&linked->l[l].b->lock); - linked->nodes_intent_locked ^= 1 << l; - } - break; - } - } - } + BUG_ON(!bch2_snapshot_is_ancestor(trans->c, + iter->snapshot, + k.k->p.snapshot)); - bch2_btree_trans_verify_locks(iter->trans); -} + bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos, + BTREE_ITER_NOPRESERVE| + BTREE_ITER_ALL_SNAPSHOTS); + prev = bch2_btree_iter_prev(©); + if (!prev.k) + goto out; -/* Btree transaction locking: */ + ret = bkey_err(prev); + if (ret) + goto out; -bool bch2_trans_relock(struct btree_trans *trans) -{ - struct btree_iter *iter; - bool ret = true; + if (bkey_eq(prev.k->p, k.k->p) && + bch2_snapshot_is_ancestor(trans->c, iter->snapshot, + prev.k->p.snapshot) > 0) { + struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF; - trans_for_each_iter(trans, iter) - if (iter->uptodate == BTREE_ITER_NEED_RELOCK) - ret &= bch2_btree_iter_relock(iter, true); + bch2_bkey_to_text(&buf1, k.k); + bch2_bkey_to_text(&buf2, prev.k); + panic("iter snap %u\n" + "k %s\n" + "prev %s\n", + iter->snapshot, + buf1.buf, buf2.buf); + } +out: + bch2_trans_iter_exit(trans, ©); return ret; } -void bch2_trans_unlock(struct btree_trans *trans) +void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id, + struct bpos pos, bool key_cache) { - struct btree_iter *iter; + struct btree_path *path; + unsigned idx; + struct printbuf buf = PRINTBUF; - trans_for_each_iter(trans, iter) - __bch2_btree_iter_unlock(iter); -} + btree_trans_sort_paths(trans); -/* Btree iterator: */ + trans_for_each_path_inorder(trans, path, idx) { + int cmp = cmp_int(path->btree_id, id) ?: + cmp_int(path->cached, key_cache); -#ifdef CONFIG_BCACHEFS_DEBUG + if (cmp > 0) + break; + if (cmp < 0) + continue; -static void __bch2_btree_iter_verify(struct btree_iter *iter, - struct btree *b) -{ - struct btree_iter_level *l = &iter->l[b->level]; - struct btree_node_iter tmp = l->iter; - struct bkey_packed *k; + if (!btree_node_locked(path, 0) || + !path->should_be_locked) + continue; - if (!debug_check_iterators(iter->trans->c)) - return; + if (!key_cache) { + if (bkey_ge(pos, path->l[0].b->data->min_key) && + bkey_le(pos, path->l[0].b->key.k.p)) + return; + } else { + if (bkey_eq(pos, path->pos)) + return; + } + } - if (iter->uptodate > BTREE_ITER_NEED_PEEK) - return; + bch2_dump_trans_paths_updates(trans); + bch2_bpos_to_text(&buf, pos); - bch2_btree_node_iter_verify(&l->iter, b); + panic("not locked: %s %s%s\n", + bch2_btree_ids[id], buf.buf, + key_cache ? " cached" : ""); +} - /* - * For interior nodes, the iterator will have skipped past - * deleted keys: - * - * For extents, the iterator may have skipped past deleted keys (but not - * whiteouts) - */ - k = b->level || iter->flags & BTREE_ITER_IS_EXTENTS - ? bch2_btree_node_iter_prev_filter(&tmp, b, KEY_TYPE_discard) - : bch2_btree_node_iter_prev_all(&tmp, b); - if (k && btree_iter_pos_cmp(iter, b, k) > 0) { - char buf[100]; - struct bkey uk = bkey_unpack_key(b, k); +#else - bch2_bkey_to_text(&PBUF(buf), &uk); - panic("prev key should be before iter pos:\n%s\n%llu:%llu\n", - buf, iter->pos.inode, iter->pos.offset); - } +static inline void bch2_btree_path_verify_level(struct btree_trans *trans, + struct btree_path *path, unsigned l) {} +static inline void bch2_btree_path_verify(struct btree_trans *trans, + struct btree_path *path) {} +static inline void bch2_btree_iter_verify(struct btree_iter *iter) {} +static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {} +static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; } + +#endif - k = bch2_btree_node_iter_peek_all(&l->iter, b); - if (k && btree_iter_pos_cmp(iter, b, k) < 0) { - char buf[100]; - struct bkey uk = bkey_unpack_key(b, k); +/* Btree path: fixups after btree updates */ - bch2_bkey_to_text(&PBUF(buf), &uk); - panic("iter should be after current key:\n" - "iter pos %llu:%llu\n" - "cur key %s\n", - iter->pos.inode, iter->pos.offset, buf); - } +static void btree_node_iter_set_set_pos(struct btree_node_iter *iter, + struct btree *b, + struct bset_tree *t, + struct bkey_packed *k) +{ + struct btree_node_iter_set *set; + + btree_node_iter_for_each(iter, set) + if (set->end == t->end_offset) { + set->k = __btree_node_key_to_offset(b, k); + bch2_btree_node_iter_sort(iter, b); + return; + } - BUG_ON(iter->uptodate == BTREE_ITER_UPTODATE && - (iter->flags & BTREE_ITER_TYPE) == BTREE_ITER_KEYS && - !bkey_whiteout(&iter->k) && - bch2_btree_node_iter_end(&l->iter)); + bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t)); } -void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b) +static void __bch2_btree_path_fix_key_modified(struct btree_path *path, + struct btree *b, + struct bkey_packed *where) { - struct btree_iter *linked; + struct btree_path_level *l = &path->l[b->c.level]; - if (!debug_check_iterators(iter->trans->c)) + if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b)) return; - trans_for_each_iter_with_node(iter->trans, b, linked) - __bch2_btree_iter_verify(linked, b); + if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0) + bch2_btree_node_iter_advance(&l->iter, l->b); } -#else - -static inline void __bch2_btree_iter_verify(struct btree_iter *iter, - struct btree *b) {} +void bch2_btree_path_fix_key_modified(struct btree_trans *trans, + struct btree *b, + struct bkey_packed *where) +{ + struct btree_path *path; -#endif + trans_for_each_path_with_node(trans, b, path) { + __bch2_btree_path_fix_key_modified(path, b, where); + bch2_btree_path_verify_level(trans, path, b->c.level); + } +} -static void __bch2_btree_node_iter_fix(struct btree_iter *iter, - struct btree *b, - struct btree_node_iter *node_iter, - struct bset_tree *t, - struct bkey_packed *where, - unsigned clobber_u64s, - unsigned new_u64s) +static void __bch2_btree_node_iter_fix(struct btree_path *path, + struct btree *b, + struct btree_node_iter *node_iter, + struct bset_tree *t, + struct bkey_packed *where, + unsigned clobber_u64s, + unsigned new_u64s) { const struct bkey_packed *end = btree_bkey_last(b, t); struct btree_node_iter_set *set; unsigned offset = __btree_node_key_to_offset(b, where); int shift = new_u64s - clobber_u64s; unsigned old_end = t->end_offset - shift; + unsigned orig_iter_pos = node_iter->data[0].k; + bool iter_current_key_modified = + orig_iter_pos >= offset && + orig_iter_pos <= offset + clobber_u64s; btree_node_iter_for_each(node_iter, set) if (set->end == old_end) @@ -516,18 +461,13 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter, /* didn't find the bset in the iterator - might have to readd it: */ if (new_u64s && - btree_iter_pos_cmp(iter, b, where) > 0) { - btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK); - + bkey_iter_pos_cmp(b, where, &path->pos) >= 0) { bch2_btree_node_iter_push(node_iter, b, where, end); - - if (!b->level && - node_iter == &iter->l[0].iter) - bkey_disassemble(b, - bch2_btree_node_iter_peek_all(node_iter, b), - &iter->k); + goto fixup_done; + } else { + /* Iterator is after key that changed */ + return; } - return; found: set->end = t->end_offset; @@ -536,122 +476,93 @@ found: return; if (new_u64s && - btree_iter_pos_cmp(iter, b, where) > 0) { + bkey_iter_pos_cmp(b, where, &path->pos) >= 0) { set->k = offset; } else if (set->k < offset + clobber_u64s) { set->k = offset + new_u64s; if (set->k == set->end) bch2_btree_node_iter_set_drop(node_iter, set); } else { + /* Iterator is after key that changed */ set->k = (int) set->k + shift; - goto iter_current_key_not_modified; + return; } - btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK); - bch2_btree_node_iter_sort(node_iter, b); - if (!b->level && node_iter == &iter->l[0].iter) { - /* - * not legal to call bkey_debugcheck() here, because we're - * called midway through the update path after update has been - * marked but before deletes have actually happened: - */ -#if 0 - __btree_iter_peek_all(iter, &iter->l[0], &iter->k); -#endif - struct btree_iter_level *l = &iter->l[0]; - struct bkey_packed *k = - bch2_btree_node_iter_peek_all(&l->iter, l->b); - - if (unlikely(!k)) - iter->k.type = KEY_TYPE_deleted; - else - bkey_disassemble(l->b, k, &iter->k); - } -iter_current_key_not_modified: +fixup_done: + if (node_iter->data[0].k != orig_iter_pos) + iter_current_key_modified = true; /* - * Interior nodes are special because iterators for interior nodes don't - * obey the usual invariants regarding the iterator position: - * - * We may have whiteouts that compare greater than the iterator - * position, and logically should be in the iterator, but that we - * skipped past to find the first live key greater than the iterator - * position. This becomes an issue when we insert a new key that is - * greater than the current iterator position, but smaller than the - * whiteouts we've already skipped past - this happens in the course of - * a btree split. - * - * We have to rewind the iterator past to before those whiteouts here, - * else bkey_node_iter_prev() is not going to work and who knows what - * else would happen. And we have to do it manually, because here we've - * already done the insert and the iterator is currently inconsistent: - * - * We've got multiple competing invariants, here - we have to be careful - * about rewinding iterators for interior nodes, because they should - * always point to the key for the child node the btree iterator points - * to. + * When a new key is added, and the node iterator now points to that + * key, the iterator might have skipped past deleted keys that should + * come after the key the iterator now points to. We have to rewind to + * before those deleted keys - otherwise + * bch2_btree_node_iter_prev_all() breaks: */ - if (b->level && new_u64s && - btree_iter_pos_cmp(iter, b, where) > 0) { + if (!bch2_btree_node_iter_end(node_iter) && + iter_current_key_modified && + b->c.level) { struct bset_tree *t; - struct bkey_packed *k; + struct bkey_packed *k, *k2, *p; + + k = bch2_btree_node_iter_peek_all(node_iter, b); for_each_bset(b, t) { - if (bch2_bkey_to_bset(b, where) == t) + bool set_pos = false; + + if (node_iter->data[0].end == t->end_offset) continue; - k = bch2_bkey_prev_all(b, t, - bch2_btree_node_iter_bset_pos(node_iter, b, t)); - if (k && - bkey_iter_cmp(b, k, where) > 0) { - struct btree_node_iter_set *set; - unsigned offset = - __btree_node_key_to_offset(b, bkey_next(k)); - - btree_node_iter_for_each(node_iter, set) - if (set->k == offset) { - set->k = __btree_node_key_to_offset(b, k); - bch2_btree_node_iter_sort(node_iter, b); - goto next_bset; - } - - bch2_btree_node_iter_push(node_iter, b, k, - btree_bkey_last(b, t)); + k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t); + + while ((p = bch2_bkey_prev_all(b, t, k2)) && + bkey_iter_cmp(b, k, p) < 0) { + k2 = p; + set_pos = true; } -next_bset: - t = t; + + if (set_pos) + btree_node_iter_set_set_pos(node_iter, + b, t, k2); } } } -void bch2_btree_node_iter_fix(struct btree_iter *iter, +void bch2_btree_node_iter_fix(struct btree_trans *trans, + struct btree_path *path, struct btree *b, struct btree_node_iter *node_iter, struct bkey_packed *where, unsigned clobber_u64s, unsigned new_u64s) { - struct bset_tree *t = bch2_bkey_to_bset(b, where); - struct btree_iter *linked; + struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where); + struct btree_path *linked; + + if (node_iter != &path->l[b->c.level].iter) { + __bch2_btree_node_iter_fix(path, b, node_iter, t, + where, clobber_u64s, new_u64s); - if (node_iter != &iter->l[b->level].iter) - __bch2_btree_node_iter_fix(iter, b, node_iter, t, - where, clobber_u64s, new_u64s); + if (bch2_debug_check_iterators) + bch2_btree_node_iter_verify(node_iter, b); + } - trans_for_each_iter_with_node(iter->trans, b, linked) + trans_for_each_path_with_node(trans, b, linked) { __bch2_btree_node_iter_fix(linked, b, - &linked->l[b->level].iter, t, - where, clobber_u64s, new_u64s); + &linked->l[b->c.level].iter, t, + where, clobber_u64s, new_u64s); + bch2_btree_path_verify_level(trans, linked, b->c.level); + } } -static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter, - struct btree_iter_level *l, +/* Btree path level: pointer to a particular btree node and node iter */ + +static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c, + struct btree_path_level *l, struct bkey *u, struct bkey_packed *k) { - struct bkey_s_c ret; - if (unlikely(!k)) { /* * signal to bch2_btree_iter_peek_slot() that we're currently at @@ -661,39 +572,54 @@ static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter, return bkey_s_c_null; } - ret = bkey_disassemble(l->b, k, u); - - if (debug_check_bkeys(iter->trans->c)) - bch2_bkey_debugcheck(iter->trans->c, l->b, ret); - - return ret; + return bkey_disassemble(l->b, k, u); } -/* peek_all() doesn't skip deleted keys */ -static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter, - struct btree_iter_level *l, - struct bkey *u) +static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c, + struct btree_path_level *l, + struct bkey *u) { - return __btree_iter_unpack(iter, l, u, + return __btree_iter_unpack(c, l, u, bch2_btree_node_iter_peek_all(&l->iter, l->b)); } -static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter, - struct btree_iter_level *l) +static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans, + struct btree_path *path, + struct btree_path_level *l, + struct bkey *u) { - return __btree_iter_unpack(iter, l, &iter->k, + struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u, bch2_btree_node_iter_peek(&l->iter, l->b)); + + path->pos = k.k ? k.k->p : l->b->key.k.p; + trans->paths_sorted = false; + bch2_btree_path_verify_level(trans, path, l - path->l); + return k; +} + +static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans, + struct btree_path *path, + struct btree_path_level *l, + struct bkey *u) +{ + struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u, + bch2_btree_node_iter_prev(&l->iter, l->b)); + + path->pos = k.k ? k.k->p : l->b->data->min_key; + trans->paths_sorted = false; + bch2_btree_path_verify_level(trans, path, l - path->l); + return k; } -static inline bool btree_iter_advance_to_pos(struct btree_iter *iter, - struct btree_iter_level *l, +static inline bool btree_path_advance_to_pos(struct btree_path *path, + struct btree_path_level *l, int max_advance) { struct bkey_packed *k; int nr_advanced = 0; while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) && - btree_iter_pos_cmp(iter, l->b, k) < 0) { + bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) { if (max_advance > 0 && nr_advanced >= max_advance) return false; @@ -704,281 +630,363 @@ static inline bool btree_iter_advance_to_pos(struct btree_iter *iter, return true; } -/* - * Verify that iterator for parent node points to child node: - */ -static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b) +static inline void __btree_path_level_init(struct btree_path *path, + unsigned level) { - struct btree_iter_level *l; - unsigned plevel; - bool parent_locked; - struct bkey_packed *k; - - if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) - return; - - plevel = b->level + 1; - if (!btree_iter_node(iter, plevel)) - return; - - parent_locked = btree_node_locked(iter, plevel); - - if (!bch2_btree_node_relock(iter, plevel)) - return; - - l = &iter->l[plevel]; - k = bch2_btree_node_iter_peek_all(&l->iter, l->b); - if (!k || - bkey_deleted(k) || - bkey_cmp_left_packed(l->b, k, &b->key.k.p)) { - char buf[100]; - struct bkey uk = bkey_unpack_key(b, k); - - bch2_bkey_to_text(&PBUF(buf), &uk); - panic("parent iter doesn't point to new node:\n%s\n%llu:%llu\n", - buf, b->key.k.p.inode, b->key.k.p.offset); - } - - if (!parent_locked) - btree_node_unlock(iter, b->level + 1); -} + struct btree_path_level *l = &path->l[level]; -static inline bool btree_iter_pos_after_node(struct btree_iter *iter, - struct btree *b) -{ - return __btree_iter_pos_cmp(iter, NULL, - bkey_to_packed(&b->key), true) < 0; -} + bch2_btree_node_iter_init(&l->iter, l->b, &path->pos); -static inline bool btree_iter_pos_in_node(struct btree_iter *iter, - struct btree *b) -{ - return iter->btree_id == b->btree_id && - bkey_cmp(iter->pos, b->data->min_key) >= 0 && - !btree_iter_pos_after_node(iter, b); + /* + * Iterators to interior nodes should always be pointed at the first non + * whiteout: + */ + if (level) + bch2_btree_node_iter_peek(&l->iter, l->b); } -static inline void __btree_iter_init(struct btree_iter *iter, - unsigned level) +void bch2_btree_path_level_init(struct btree_trans *trans, + struct btree_path *path, + struct btree *b) { - struct btree_iter_level *l = &iter->l[level]; - - bch2_btree_node_iter_init(&l->iter, l->b, &iter->pos); - - if (iter->flags & BTREE_ITER_IS_EXTENTS) - btree_iter_advance_to_pos(iter, l, -1); + BUG_ON(path->cached); - /* Skip to first non whiteout: */ - if (level) - bch2_btree_node_iter_peek(&l->iter, l->b); + EBUG_ON(!btree_path_pos_in_node(path, b)); + EBUG_ON(b->c.lock.state.seq & 1); - btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK); + path->l[b->c.level].lock_seq = b->c.lock.state.seq; + path->l[b->c.level].b = b; + __btree_path_level_init(path, b->c.level); } -static inline void btree_iter_node_set(struct btree_iter *iter, - struct btree *b) -{ - btree_iter_verify_new_node(iter, b); - - EBUG_ON(!btree_iter_pos_in_node(iter, b)); - EBUG_ON(b->lock.state.seq & 1); +/* Btree path: fixups after btree node updates: */ - iter->l[b->level].lock_seq = b->lock.state.seq; - iter->l[b->level].b = b; - __btree_iter_init(iter, b->level); +static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b) +{ + struct bch_fs *c = trans->c; + struct btree_insert_entry *i; + + trans_for_each_update(trans, i) + if (!i->cached && + i->level == b->c.level && + i->btree_id == b->c.btree_id && + bpos_cmp(i->k->k.p, b->data->min_key) >= 0 && + bpos_cmp(i->k->k.p, b->data->max_key) <= 0) { + i->old_v = bch2_btree_path_peek_slot(i->path, &i->old_k).v; + + if (unlikely(trans->journal_replay_not_finished)) { + struct bkey_i *j_k = + bch2_journal_keys_peek_slot(c, i->btree_id, i->level, + i->k->k.p); + + if (j_k) { + i->old_k = j_k->k; + i->old_v = &j_k->v; + } + } + } } /* * A btree node is being replaced - update the iterator to point to the new * node: */ -void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b) +void bch2_trans_node_add(struct btree_trans *trans, struct btree *b) { - enum btree_node_locked_type t; - struct btree_iter *linked; + struct btree_path *path; - trans_for_each_iter(iter->trans, linked) - if (btree_iter_pos_in_node(linked, b)) { - /* - * bch2_btree_iter_node_drop() has already been called - - * the old node we're replacing has already been - * unlocked and the pointer invalidated - */ - BUG_ON(btree_node_locked(linked, b->level)); + trans_for_each_path(trans, path) + if (path->uptodate == BTREE_ITER_UPTODATE && + !path->cached && + btree_path_pos_in_node(path, b)) { + enum btree_node_locked_type t = + btree_lock_want(path, b->c.level); - t = btree_lock_want(linked, b->level); if (t != BTREE_NODE_UNLOCKED) { - six_lock_increment(&b->lock, t); - mark_btree_node_locked(linked, b->level, t); + btree_node_unlock(trans, path, b->c.level); + six_lock_increment(&b->c.lock, t); + mark_btree_node_locked(trans, path, b->c.level, t); } - btree_iter_node_set(linked, b); + bch2_btree_path_level_init(trans, path, b); } - six_unlock_intent(&b->lock); -} - -void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b) -{ - struct btree_iter *linked; - unsigned level = b->level; - - trans_for_each_iter(iter->trans, linked) - if (linked->l[level].b == b) { - __btree_node_unlock(linked, level); - linked->l[level].b = BTREE_ITER_NO_NODE_DROP; - } + bch2_trans_revalidate_updates_in_node(trans, b); } /* * A btree node has been modified in such a way as to invalidate iterators - fix * them: */ -void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b) +void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b) { - struct btree_iter *linked; + struct btree_path *path; - trans_for_each_iter_with_node(iter->trans, b, linked) - __btree_iter_init(linked, b->level); + trans_for_each_path_with_node(trans, b, path) + __btree_path_level_init(path, b->c.level); + + bch2_trans_revalidate_updates_in_node(trans, b); } -static inline int btree_iter_lock_root(struct btree_iter *iter, - unsigned depth_want) +/* Btree path: traverse, set_pos: */ + +static inline int btree_path_lock_root(struct btree_trans *trans, + struct btree_path *path, + unsigned depth_want, + unsigned long trace_ip) { - struct bch_fs *c = iter->trans->c; - struct btree *b; + struct bch_fs *c = trans->c; + struct btree *b, **rootp = &c->btree_roots[path->btree_id].b; enum six_lock_type lock_type; unsigned i; + int ret; - EBUG_ON(iter->nodes_locked); + EBUG_ON(path->nodes_locked); while (1) { - b = READ_ONCE(c->btree_roots[iter->btree_id].b); - iter->level = READ_ONCE(b->level); + b = READ_ONCE(*rootp); + path->level = READ_ONCE(b->c.level); - if (unlikely(iter->level < depth_want)) { + if (unlikely(path->level < depth_want)) { /* * the root is at a lower depth than the depth we want: * got to the end of the btree, or we're walking nodes * greater than some depth and there are no nodes >= * that depth */ - iter->level = depth_want; - for (i = iter->level; i < BTREE_MAX_DEPTH; i++) - iter->l[i].b = NULL; + path->level = depth_want; + for (i = path->level; i < BTREE_MAX_DEPTH; i++) + path->l[i].b = NULL; return 1; } - lock_type = __btree_lock_want(iter, iter->level); - if (unlikely(!btree_node_lock(b, POS_MAX, iter->level, - iter, lock_type))) - return -EINTR; + lock_type = __btree_lock_want(path, path->level); + ret = btree_node_lock(trans, path, &b->c, + path->level, lock_type, trace_ip); + if (unlikely(ret)) { + if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed)) + continue; + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + return ret; + BUG(); + } - if (likely(b == c->btree_roots[iter->btree_id].b && - b->level == iter->level && + if (likely(b == READ_ONCE(*rootp) && + b->c.level == path->level && !race_fault())) { - for (i = 0; i < iter->level; i++) - iter->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT; - iter->l[iter->level].b = b; - for (i = iter->level + 1; i < BTREE_MAX_DEPTH; i++) - iter->l[i].b = NULL; - - mark_btree_node_locked(iter, iter->level, lock_type); - btree_iter_node_set(iter, b); + for (i = 0; i < path->level; i++) + path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root); + path->l[path->level].b = b; + for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++) + path->l[i].b = NULL; + + mark_btree_node_locked(trans, path, path->level, lock_type); + bch2_btree_path_level_init(trans, path, b); return 0; } - six_unlock_type(&b->lock, lock_type); + six_unlock_type(&b->c.lock, lock_type); } } noinline -static void btree_iter_prefetch(struct btree_iter *iter) +static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path) { - struct bch_fs *c = iter->trans->c; - struct btree_iter_level *l = &iter->l[iter->level]; + struct bch_fs *c = trans->c; + struct btree_path_level *l = path_l(path); struct btree_node_iter node_iter = l->iter; struct bkey_packed *k; - BKEY_PADDED(k) tmp; + struct bkey_buf tmp; unsigned nr = test_bit(BCH_FS_STARTED, &c->flags) - ? (iter->level > 1 ? 0 : 2) - : (iter->level > 1 ? 1 : 16); - bool was_locked = btree_node_locked(iter, iter->level); + ? (path->level > 1 ? 0 : 2) + : (path->level > 1 ? 1 : 16); + bool was_locked = btree_node_locked(path, path->level); + int ret = 0; - while (nr) { - if (!bch2_btree_node_relock(iter, iter->level)) - return; + bch2_bkey_buf_init(&tmp); + + while (nr-- && !ret) { + if (!bch2_btree_node_relock(trans, path, path->level)) + break; bch2_btree_node_iter_advance(&node_iter, l->b); k = bch2_btree_node_iter_peek(&node_iter, l->b); if (!k) break; - bch2_bkey_unpack(l->b, &tmp.k, k); - bch2_btree_node_prefetch(c, iter, &tmp.k, iter->level - 1); + bch2_bkey_buf_unpack(&tmp, c, l->b, k); + ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id, + path->level - 1); } if (!was_locked) - btree_node_unlock(iter, iter->level); + btree_node_unlock(trans, path, path->level); + + bch2_bkey_buf_exit(&tmp, c); + return ret; } -static inline int btree_iter_down(struct btree_iter *iter) +static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path, + struct btree_and_journal_iter *jiter) { - struct bch_fs *c = iter->trans->c; - struct btree_iter_level *l = &iter->l[iter->level]; - struct btree *b; - unsigned level = iter->level - 1; - enum six_lock_type lock_type = __btree_lock_want(iter, level); - BKEY_PADDED(k) tmp; - - BUG_ON(!btree_node_locked(iter, iter->level)); + struct bch_fs *c = trans->c; + struct bkey_s_c k; + struct bkey_buf tmp; + unsigned nr = test_bit(BCH_FS_STARTED, &c->flags) + ? (path->level > 1 ? 0 : 2) + : (path->level > 1 ? 1 : 16); + bool was_locked = btree_node_locked(path, path->level); + int ret = 0; - bch2_bkey_unpack(l->b, &tmp.k, - bch2_btree_node_iter_peek(&l->iter, l->b)); + bch2_bkey_buf_init(&tmp); - b = bch2_btree_node_get(c, iter, &tmp.k, level, lock_type); - if (unlikely(IS_ERR(b))) - return PTR_ERR(b); + while (nr-- && !ret) { + if (!bch2_btree_node_relock(trans, path, path->level)) + break; - mark_btree_node_locked(iter, level, lock_type); - btree_iter_node_set(iter, b); + bch2_btree_and_journal_iter_advance(jiter); + k = bch2_btree_and_journal_iter_peek(jiter); + if (!k.k) + break; - if (iter->flags & BTREE_ITER_PREFETCH) - btree_iter_prefetch(iter); + bch2_bkey_buf_reassemble(&tmp, c, k); + ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id, + path->level - 1); + } - iter->level = level; + if (!was_locked) + btree_node_unlock(trans, path, path->level); - return 0; + bch2_bkey_buf_exit(&tmp, c); + return ret; } -static void btree_iter_up(struct btree_iter *iter) +static noinline void btree_node_mem_ptr_set(struct btree_trans *trans, + struct btree_path *path, + unsigned plevel, struct btree *b) { - btree_node_unlock(iter, iter->level++); + struct btree_path_level *l = &path->l[plevel]; + bool locked = btree_node_locked(path, plevel); + struct bkey_packed *k; + struct bch_btree_ptr_v2 *bp; + + if (!bch2_btree_node_relock(trans, path, plevel)) + return; + + k = bch2_btree_node_iter_peek_all(&l->iter, l->b); + BUG_ON(k->type != KEY_TYPE_btree_ptr_v2); + + bp = (void *) bkeyp_val(&l->b->format, k); + bp->mem_ptr = (unsigned long)b; + + if (!locked) + btree_node_unlock(trans, path, plevel); } -int __must_check __bch2_btree_iter_traverse(struct btree_iter *); +static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans, + struct btree_path *path, + unsigned flags, + struct bkey_buf *out) +{ + struct bch_fs *c = trans->c; + struct btree_path_level *l = path_l(path); + struct btree_and_journal_iter jiter; + struct bkey_s_c k; + int ret = 0; + + __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos); + + k = bch2_btree_and_journal_iter_peek(&jiter); + + bch2_bkey_buf_reassemble(out, c, k); + + if (flags & BTREE_ITER_PREFETCH) + ret = btree_path_prefetch_j(trans, path, &jiter); + + bch2_btree_and_journal_iter_exit(&jiter); + return ret; +} -static int __btree_iter_traverse_all(struct btree_trans *trans, - struct btree_iter *orig_iter, int ret) +static __always_inline int btree_path_down(struct btree_trans *trans, + struct btree_path *path, + unsigned flags, + unsigned long trace_ip) { struct bch_fs *c = trans->c; - struct btree_iter *iter; - u8 sorted[BTREE_ITER_MAX]; - unsigned i, nr_sorted = 0; + struct btree_path_level *l = path_l(path); + struct btree *b; + unsigned level = path->level - 1; + enum six_lock_type lock_type = __btree_lock_want(path, level); + struct bkey_buf tmp; + int ret; + + EBUG_ON(!btree_node_locked(path, path->level)); + + bch2_bkey_buf_init(&tmp); + + if (unlikely(trans->journal_replay_not_finished)) { + ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp); + if (ret) + goto err; + } else { + bch2_bkey_buf_unpack(&tmp, c, l->b, + bch2_btree_node_iter_peek(&l->iter, l->b)); + + if (flags & BTREE_ITER_PREFETCH) { + ret = btree_path_prefetch(trans, path); + if (ret) + goto err; + } + } + + b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip); + ret = PTR_ERR_OR_ZERO(b); + if (unlikely(ret)) + goto err; + + if (likely(!trans->journal_replay_not_finished && + tmp.k->k.type == KEY_TYPE_btree_ptr_v2) && + unlikely(b != btree_node_mem_ptr(tmp.k))) + btree_node_mem_ptr_set(trans, path, level + 1, b); + + if (btree_node_read_locked(path, level + 1)) + btree_node_unlock(trans, path, level + 1); - trans_for_each_iter(trans, iter) - sorted[nr_sorted++] = iter - trans->iters; + mark_btree_node_locked(trans, path, level, lock_type); + path->level = level; + bch2_btree_path_level_init(trans, path, b); -#define btree_iter_cmp_by_idx(_l, _r) \ - btree_iter_cmp(&trans->iters[_l], &trans->iters[_r]) + bch2_btree_path_verify_locks(path); +err: + bch2_bkey_buf_exit(&tmp, c); + return ret; +} + + +static int bch2_btree_path_traverse_all(struct btree_trans *trans) +{ + struct bch_fs *c = trans->c; + struct btree_path *path; + unsigned long trace_ip = _RET_IP_; + int i, ret = 0; - bubble_sort(sorted, nr_sorted, btree_iter_cmp_by_idx); -#undef btree_iter_cmp_by_idx + if (trans->in_traverse_all) + return -BCH_ERR_transaction_restart_in_traverse_all; + trans->in_traverse_all = true; retry_all: + trans->restarted = 0; + trans->last_restarted_ip = 0; + + trans_for_each_path(trans, path) + path->should_be_locked = false; + + btree_trans_sort_paths(trans); + bch2_trans_unlock(trans); + cond_resched(); - if (unlikely(ret == -ENOMEM)) { + if (unlikely(trans->memory_allocation_failure)) { struct closure cl; closure_init_stack(&cl); @@ -989,56 +997,113 @@ retry_all: } while (ret); } - if (unlikely(ret == -EIO)) { - trans->error = true; - orig_iter->flags |= BTREE_ITER_ERROR; - orig_iter->l[orig_iter->level].b = BTREE_ITER_NO_NODE_ERROR; - goto out; - } - - BUG_ON(ret && ret != -EINTR); - /* Now, redo traversals in correct order: */ - for (i = 0; i < nr_sorted; i++) { - iter = &trans->iters[sorted[i]]; - - do { - ret = __bch2_btree_iter_traverse(iter); - } while (ret == -EINTR); + i = 0; + while (i < trans->nr_sorted) { + path = trans->paths + trans->sorted[i]; - if (ret) - goto retry_all; + /* + * Traversing a path can cause another path to be added at about + * the same position: + */ + if (path->uptodate) { + __btree_path_get(path, false); + ret = bch2_btree_path_traverse_one(trans, path, 0, _THIS_IP_); + __btree_path_put(path, false); + + if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || + bch2_err_matches(ret, ENOMEM)) + goto retry_all; + if (ret) + goto err; + } else { + i++; + } } - ret = hweight64(trans->iters_live) > 1 ? -EINTR : 0; -out: + /* + * We used to assert that all paths had been traversed here + * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since + * path->Should_be_locked is not set yet, we we might have unlocked and + * then failed to relock a path - that's fine. + */ +err: bch2_btree_cache_cannibalize_unlock(c); + + trans->in_traverse_all = false; + + trace_and_count(c, trans_traverse_all, trans, trace_ip); return ret; } -int bch2_btree_iter_traverse_all(struct btree_trans *trans) +static inline bool btree_path_check_pos_in_node(struct btree_path *path, + unsigned l, int check_pos) { - return __btree_iter_traverse_all(trans, NULL, 0); + if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b)) + return false; + if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b)) + return false; + return true; +} + +static inline bool btree_path_good_node(struct btree_trans *trans, + struct btree_path *path, + unsigned l, int check_pos) +{ + return is_btree_node(path, l) && + bch2_btree_node_relock(trans, path, l) && + btree_path_check_pos_in_node(path, l, check_pos); } -static unsigned btree_iter_up_until_locked(struct btree_iter *iter, - bool check_pos) +static void btree_path_set_level_down(struct btree_trans *trans, + struct btree_path *path, + unsigned new_level) { - unsigned l = iter->level; + unsigned l; - while (btree_iter_node(iter, l) && - (!is_btree_node(iter, l) || - !bch2_btree_node_relock(iter, l) || - (check_pos && - !btree_iter_pos_in_node(iter, iter->l[l].b)))) { - btree_node_unlock(iter, l); - iter->l[l].b = BTREE_ITER_NO_NODE_UP; - l++; - } + path->level = new_level; + + for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++) + if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED) + btree_node_unlock(trans, path, l); + + btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + bch2_btree_path_verify(trans, path); +} + +static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans, + struct btree_path *path, + int check_pos) +{ + unsigned i, l = path->level; +again: + while (btree_path_node(path, l) && + !btree_path_good_node(trans, path, l, check_pos)) + __btree_path_set_level_up(trans, path, l++); + + /* If we need intent locks, take them too: */ + for (i = l + 1; + i < path->locks_want && btree_path_node(path, i); + i++) + if (!bch2_btree_node_relock(trans, path, i)) { + while (l <= i) + __btree_path_set_level_up(trans, path, l++); + goto again; + } return l; } +static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans, + struct btree_path *path, + int check_pos) +{ + return likely(btree_node_locked(path, path->level) && + btree_path_check_pos_in_node(path, path->level, check_pos)) + ? path->level + : __btree_path_up_until_good_node(trans, path, check_pos); +} + /* * This is the main state machine for walking down the btree - walks down to a * specified depth @@ -1048,928 +1113,2088 @@ static unsigned btree_iter_up_until_locked(struct btree_iter *iter, * On error, caller (peek_node()/peek_key()) must return NULL; the error is * stashed in the iterator and returned from bch2_trans_exit(). */ -int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter) +int bch2_btree_path_traverse_one(struct btree_trans *trans, + struct btree_path *path, + unsigned flags, + unsigned long trace_ip) { - unsigned depth_want = iter->level; - - if (unlikely(iter->level >= BTREE_MAX_DEPTH)) - return 0; + unsigned depth_want = path->level; + int ret = -((int) trans->restarted); - if (bch2_btree_iter_relock(iter, false)) - return 0; + if (unlikely(ret)) + goto out; /* - * XXX: correctly using BTREE_ITER_UPTODATE should make using check_pos - * here unnecessary + * Ensure we obey path->should_be_locked: if it's set, we can't unlock + * and re-traverse the path without a transaction restart: */ - iter->level = btree_iter_up_until_locked(iter, true); + if (path->should_be_locked) { + ret = bch2_btree_path_relock(trans, path, trace_ip); + goto out; + } - /* - * If we've got a btree node locked (i.e. we aren't about to relock the - * root) - advance its node iterator if necessary: - * - * XXX correctly using BTREE_ITER_UPTODATE should make this unnecessary - */ - if (btree_iter_node(iter, iter->level)) - btree_iter_advance_to_pos(iter, &iter->l[iter->level], -1); + if (path->cached) { + ret = bch2_btree_path_traverse_cached(trans, path, flags); + goto out; + } + + if (unlikely(path->level >= BTREE_MAX_DEPTH)) + goto out; + + path->level = btree_path_up_until_good_node(trans, path, 0); + + EBUG_ON(btree_path_node(path, path->level) && + !btree_node_locked(path, path->level)); /* - * Note: iter->nodes[iter->level] may be temporarily NULL here - that + * Note: path->nodes[path->level] may be temporarily NULL here - that * would indicate to other code that we got to the end of the btree, * here it indicates that relocking the root failed - it's critical that - * btree_iter_lock_root() comes next and that it can't fail + * btree_path_lock_root() comes next and that it can't fail */ - while (iter->level > depth_want) { - int ret = btree_iter_node(iter, iter->level) - ? btree_iter_down(iter) - : btree_iter_lock_root(iter, depth_want); + while (path->level > depth_want) { + ret = btree_path_node(path, path->level) + ? btree_path_down(trans, path, flags, trace_ip) + : btree_path_lock_root(trans, path, depth_want, trace_ip); if (unlikely(ret)) { - if (ret == 1) - return 0; + if (ret == 1) { + /* + * No nodes at this level - got to the end of + * the btree: + */ + ret = 0; + goto out; + } - iter->level = depth_want; - iter->l[iter->level].b = BTREE_ITER_NO_NODE_DOWN; - return ret; + __bch2_btree_path_unlock(trans, path); + path->level = depth_want; + path->l[path->level].b = ERR_PTR(ret); + goto out; } } - iter->uptodate = BTREE_ITER_NEED_PEEK; - - bch2_btree_trans_verify_locks(iter->trans); - __bch2_btree_iter_verify(iter, iter->l[iter->level].b); - return 0; + path->uptodate = BTREE_ITER_UPTODATE; +out: + if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted) + panic("ret %s (%i) trans->restarted %s (%i)\n", + bch2_err_str(ret), ret, + bch2_err_str(trans->restarted), trans->restarted); + bch2_btree_path_verify(trans, path); + return ret; } -int __must_check bch2_btree_iter_traverse(struct btree_iter *iter) +static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst, + struct btree_path *src) { - int ret; + unsigned i, offset = offsetof(struct btree_path, pos); - ret = bch2_trans_cond_resched(iter->trans) ?: - __bch2_btree_iter_traverse(iter); - if (unlikely(ret)) - ret = __btree_iter_traverse_all(iter->trans, iter, ret); + memcpy((void *) dst + offset, + (void *) src + offset, + sizeof(struct btree_path) - offset); - return ret; + for (i = 0; i < BTREE_MAX_DEPTH; i++) { + unsigned t = btree_node_locked_type(dst, i); + + if (t != BTREE_NODE_UNLOCKED) + six_lock_increment(&dst->l[i].b->c.lock, t); + } } -static inline void bch2_btree_iter_checks(struct btree_iter *iter, - enum btree_iter_type type) +static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src, + bool intent) { - EBUG_ON(iter->btree_id >= BTREE_ID_NR); - EBUG_ON(!!(iter->flags & BTREE_ITER_IS_EXTENTS) != - (btree_node_type_is_extents(iter->btree_id) && - type != BTREE_ITER_NODES)); + struct btree_path *new = btree_path_alloc(trans, src); - bch2_btree_trans_verify_locks(iter->trans); + btree_path_copy(trans, new, src); + __btree_path_get(new, intent); + return new; } -/* Iterate across nodes (leaf and interior nodes) */ - -struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter) +__flatten +struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *trans, + struct btree_path *path, bool intent, + unsigned long ip) { - struct btree *b; - int ret; - - bch2_btree_iter_checks(iter, BTREE_ITER_NODES); - - if (iter->uptodate == BTREE_ITER_UPTODATE) - return iter->l[iter->level].b; - - ret = bch2_btree_iter_traverse(iter); - if (ret) - return NULL; - - b = btree_iter_node(iter, iter->level); - if (!b) - return NULL; - - BUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0); - - iter->pos = b->key.k.p; - iter->uptodate = BTREE_ITER_UPTODATE; - - return b; + __btree_path_put(path, intent); + path = btree_path_clone(trans, path, intent); + path->preserve = false; + return path; } -struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth) +struct btree_path * __must_check +__bch2_btree_path_set_pos(struct btree_trans *trans, + struct btree_path *path, struct bpos new_pos, + bool intent, unsigned long ip, int cmp) { - struct btree *b; - int ret; - - bch2_btree_iter_checks(iter, BTREE_ITER_NODES); + unsigned level = path->level; - /* already got to end? */ - if (!btree_iter_node(iter, iter->level)) - return NULL; + bch2_trans_verify_not_in_restart(trans); + EBUG_ON(!path->ref); - bch2_trans_cond_resched(iter->trans); + path = bch2_btree_path_make_mut(trans, path, intent, ip); - btree_iter_up(iter); + path->pos = new_pos; + trans->paths_sorted = false; - if (!bch2_btree_node_relock(iter, iter->level)) - btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK); + if (unlikely(path->cached)) { + btree_node_unlock(trans, path, 0); + path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up); + btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + goto out; + } - ret = bch2_btree_iter_traverse(iter); - if (ret) - return NULL; + level = btree_path_up_until_good_node(trans, path, cmp); - /* got to end? */ - b = btree_iter_node(iter, iter->level); - if (!b) - return NULL; + if (btree_path_node(path, level)) { + struct btree_path_level *l = &path->l[level]; - if (bkey_cmp(iter->pos, b->key.k.p) < 0) { + BUG_ON(!btree_node_locked(path, level)); /* - * Haven't gotten to the end of the parent node: go back down to - * the next child node + * We might have to skip over many keys, or just a few: try + * advancing the node iterator, and if we have to skip over too + * many keys just reinit it (or if we're rewinding, since that + * is expensive). */ + if (cmp < 0 || + !btree_path_advance_to_pos(path, l, 8)) + bch2_btree_node_iter_init(&l->iter, l->b, &path->pos); /* - * We don't really want to be unlocking here except we can't - * directly tell btree_iter_traverse() "traverse to this level" - * except by setting iter->level, so we have to unlock so we - * don't screw up our lock invariants: + * Iterators to interior nodes should always be pointed at the first non + * whiteout: */ - if (btree_node_read_locked(iter, iter->level)) - btree_node_unlock(iter, iter->level); + if (unlikely(level)) + bch2_btree_node_iter_peek(&l->iter, l->b); + } - /* ick: */ - iter->pos = iter->btree_id == BTREE_ID_INODES - ? btree_type_successor(iter->btree_id, iter->pos) - : bkey_successor(iter->pos); - iter->level = depth; + if (unlikely(level != path->level)) { + btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + __bch2_btree_path_unlock(trans, path); + } +out: + bch2_btree_path_verify(trans, path); + return path; +} - btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); - ret = bch2_btree_iter_traverse(iter); - if (ret) - return NULL; +/* Btree path: main interface: */ - b = iter->l[iter->level].b; - } +static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path) +{ + struct btree_path *sib; - iter->pos = b->key.k.p; - iter->uptodate = BTREE_ITER_UPTODATE; + sib = prev_btree_path(trans, path); + if (sib && !btree_path_cmp(sib, path)) + return sib; - return b; -} + sib = next_btree_path(trans, path); + if (sib && !btree_path_cmp(sib, path)) + return sib; -/* Iterate across keys (in leaf nodes only) */ + return NULL; +} -void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_pos) +static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path) { - struct btree_iter_level *l = &iter->l[0]; + struct btree_path *sib; - EBUG_ON(iter->level != 0); - EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0); - EBUG_ON(!btree_node_locked(iter, 0)); - EBUG_ON(bkey_cmp(new_pos, l->b->key.k.p) > 0); + sib = prev_btree_path(trans, path); + if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b) + return sib; - iter->pos = new_pos; - btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK); + sib = next_btree_path(trans, path); + if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b) + return sib; - btree_iter_advance_to_pos(iter, l, -1); + return NULL; +} - if (bch2_btree_node_iter_end(&l->iter) && - btree_iter_pos_after_node(iter, l->b)) - btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); +static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path) +{ + __bch2_btree_path_unlock(trans, path); + btree_path_list_remove(trans, path); + trans->paths_allocated &= ~(1ULL << path->idx); } -void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos) +void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent) { - int cmp = bkey_cmp(new_pos, iter->pos); - unsigned level; + struct btree_path *dup; - if (!cmp) + EBUG_ON(trans->paths + path->idx != path); + EBUG_ON(!path->ref); + + if (!__btree_path_put(path, intent)) return; - iter->pos = new_pos; + dup = path->preserve + ? have_path_at_pos(trans, path) + : have_node_at_pos(trans, path); - level = btree_iter_up_until_locked(iter, true); + if (!dup && !(!path->preserve && !is_btree_node(path, path->level))) + return; - if (btree_iter_node(iter, level)) { - /* - * We might have to skip over many keys, or just a few: try - * advancing the node iterator, and if we have to skip over too - * many keys just reinit it (or if we're rewinding, since that - * is expensive). - */ - if (cmp < 0 || - !btree_iter_advance_to_pos(iter, &iter->l[level], 8)) - __btree_iter_init(iter, level); + if (path->should_be_locked && + !trans->restarted && + (!dup || !bch2_btree_path_relock_norestart(trans, dup, _THIS_IP_))) + return; - /* Don't leave it locked if we're not supposed to: */ - if (btree_lock_want(iter, level) == BTREE_NODE_UNLOCKED) - btree_node_unlock(iter, level); + if (dup) { + dup->preserve |= path->preserve; + dup->should_be_locked |= path->should_be_locked; } - if (level != iter->level) - btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); - else - btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK); + __bch2_path_free(trans, path); } -static inline struct bkey_s_c btree_iter_peek_uptodate(struct btree_iter *iter) +static void bch2_path_put_nokeep(struct btree_trans *trans, struct btree_path *path, + bool intent) { - struct btree_iter_level *l = &iter->l[0]; - struct bkey_s_c ret = { .k = &iter->k }; + EBUG_ON(trans->paths + path->idx != path); + EBUG_ON(!path->ref); - if (!bkey_deleted(&iter->k)) { - EBUG_ON(bch2_btree_node_iter_end(&l->iter)); - ret.v = bkeyp_val(&l->b->format, - __bch2_btree_node_iter_peek_all(&l->iter, l->b)); - } + if (!__btree_path_put(path, intent)) + return; - if (debug_check_bkeys(iter->trans->c) && - !bkey_deleted(ret.k)) - bch2_bkey_debugcheck(iter->trans->c, l->b, ret); - return ret; + __bch2_path_free(trans, path); } -struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter) +void bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count) { - struct btree_iter_level *l = &iter->l[0]; - struct bkey_s_c k; - int ret; + panic("trans->restart_count %u, should be %u, last restarted by %pS\n", + trans->restart_count, restart_count, + (void *) trans->last_begin_ip); +} - bch2_btree_iter_checks(iter, BTREE_ITER_KEYS); +void bch2_trans_in_restart_error(struct btree_trans *trans) +{ + panic("in transaction restart: %s, last restarted by %pS\n", + bch2_err_str(trans->restarted), + (void *) trans->last_restarted_ip); +} - if (iter->uptodate == BTREE_ITER_UPTODATE) - return btree_iter_peek_uptodate(iter); +noinline __cold +void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans) +{ + struct btree_insert_entry *i; + struct btree_write_buffered_key *wb; - while (1) { - if (iter->uptodate >= BTREE_ITER_NEED_RELOCK) { - ret = bch2_btree_iter_traverse(iter); - if (unlikely(ret)) - return bkey_s_c_err(ret); - } + prt_printf(buf, "transaction updates for %s journal seq %llu", + trans->fn, trans->journal_res.seq); + prt_newline(buf); + printbuf_indent_add(buf, 2); - k = __btree_iter_peek(iter, l); - if (likely(k.k)) - break; + trans_for_each_update(trans, i) { + struct bkey_s_c old = { &i->old_k, i->old_v }; - /* got to the end of the leaf, iterator needs to be traversed: */ - iter->pos = l->b->key.k.p; - iter->uptodate = BTREE_ITER_NEED_TRAVERSE; + prt_printf(buf, "update: btree=%s cached=%u %pS", + bch2_btree_ids[i->btree_id], + i->cached, + (void *) i->ip_allocated); + prt_newline(buf); - if (!bkey_cmp(iter->pos, POS_MAX)) - return bkey_s_c_null; + prt_printf(buf, " old "); + bch2_bkey_val_to_text(buf, trans->c, old); + prt_newline(buf); - iter->pos = btree_type_successor(iter->btree_id, iter->pos); + prt_printf(buf, " new "); + bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k)); + prt_newline(buf); } - /* - * iter->pos should always be equal to the key we just - * returned - except extents can straddle iter->pos: - */ - if (!(iter->flags & BTREE_ITER_IS_EXTENTS) || - bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) - iter->pos = bkey_start_pos(k.k); + trans_for_each_wb_update(trans, wb) { + prt_printf(buf, "update: btree=%s wb=1 %pS", + bch2_btree_ids[wb->btree], + (void *) i->ip_allocated); + prt_newline(buf); - iter->uptodate = BTREE_ITER_UPTODATE; - return k; + prt_printf(buf, " new "); + bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(&wb->k)); + prt_newline(buf); + } + + printbuf_indent_sub(buf, 2); } -static noinline -struct bkey_s_c bch2_btree_iter_peek_next_leaf(struct btree_iter *iter) +noinline __cold +void bch2_dump_trans_updates(struct btree_trans *trans) { - struct btree_iter_level *l = &iter->l[0]; - - iter->pos = l->b->key.k.p; - iter->uptodate = BTREE_ITER_NEED_TRAVERSE; - - if (!bkey_cmp(iter->pos, POS_MAX)) - return bkey_s_c_null; - - iter->pos = btree_type_successor(iter->btree_id, iter->pos); + struct printbuf buf = PRINTBUF; - return bch2_btree_iter_peek(iter); + bch2_trans_updates_to_text(&buf, trans); + bch2_print_string_as_lines(KERN_ERR, buf.buf); + printbuf_exit(&buf); } -struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter) +noinline __cold +void bch2_btree_path_to_text(struct printbuf *out, struct btree_path *path) { - struct btree_iter_level *l = &iter->l[0]; - struct bkey_packed *p; - struct bkey_s_c k; - - bch2_btree_iter_checks(iter, BTREE_ITER_KEYS); + prt_printf(out, "path: idx %2u ref %u:%u %c %c btree=%s l=%u pos ", + path->idx, path->ref, path->intent_ref, + path->preserve ? 'P' : ' ', + path->should_be_locked ? 'S' : ' ', + bch2_btree_ids[path->btree_id], + path->level); + bch2_bpos_to_text(out, path->pos); - iter->pos = btree_type_successor(iter->btree_id, iter->k.p); - - if (unlikely(iter->uptodate != BTREE_ITER_UPTODATE)) { - /* - * XXX: when we just need to relock we should be able to avoid - * calling traverse, but we need to kill BTREE_ITER_NEED_PEEK - * for that to work - */ - btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); + prt_printf(out, " locks %u", path->nodes_locked); +#ifdef TRACK_PATH_ALLOCATED + prt_printf(out, " %pS", (void *) path->ip_allocated); +#endif + prt_newline(out); +} - return bch2_btree_iter_peek(iter); - } +noinline __cold +void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans, + bool nosort) +{ + struct btree_path *path; + unsigned idx; - do { - bch2_btree_node_iter_advance(&l->iter, l->b); - p = bch2_btree_node_iter_peek_all(&l->iter, l->b); - if (unlikely(!p)) - return bch2_btree_iter_peek_next_leaf(iter); - } while (bkey_whiteout(p)); + if (!nosort) + btree_trans_sort_paths(trans); - k = __btree_iter_unpack(iter, l, &iter->k, p); + trans_for_each_path_inorder(trans, path, idx) + bch2_btree_path_to_text(out, path); +} - EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) < 0); - iter->pos = bkey_start_pos(k.k); - return k; +noinline __cold +void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans) +{ + __bch2_trans_paths_to_text(out, trans, false); } -struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter) +noinline __cold +void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort) { - struct btree_iter_level *l = &iter->l[0]; - struct bkey_packed *p; - struct bkey_s_c k; - int ret; + struct printbuf buf = PRINTBUF; - bch2_btree_iter_checks(iter, BTREE_ITER_KEYS); + __bch2_trans_paths_to_text(&buf, trans, nosort); + bch2_trans_updates_to_text(&buf, trans); - if (unlikely(iter->uptodate != BTREE_ITER_UPTODATE)) { - k = bch2_btree_iter_peek(iter); - if (IS_ERR(k.k)) - return k; - } + bch2_print_string_as_lines(KERN_ERR, buf.buf); + printbuf_exit(&buf); +} - while (1) { - p = bch2_btree_node_iter_prev(&l->iter, l->b); - if (likely(p)) - break; +noinline __cold +void bch2_dump_trans_paths_updates(struct btree_trans *trans) +{ + __bch2_dump_trans_paths_updates(trans, false); +} - iter->pos = l->b->data->min_key; - if (!bkey_cmp(iter->pos, POS_MIN)) - return bkey_s_c_null; +noinline __cold +static void bch2_trans_update_max_paths(struct btree_trans *trans) +{ + struct btree_transaction_stats *s = btree_trans_stats(trans); + struct printbuf buf = PRINTBUF; - bch2_btree_iter_set_pos(iter, - btree_type_predecessor(iter->btree_id, iter->pos)); + if (!s) + return; - ret = bch2_btree_iter_traverse(iter); - if (unlikely(ret)) - return bkey_s_c_err(ret); + bch2_trans_paths_to_text(&buf, trans); - p = bch2_btree_node_iter_peek(&l->iter, l->b); - if (p) - break; + if (!buf.allocation_failure) { + mutex_lock(&s->lock); + if (s->nr_max_paths < hweight64(trans->paths_allocated)) { + s->nr_max_paths = trans->nr_max_paths = + hweight64(trans->paths_allocated); + swap(s->max_paths_text, buf.buf); + } + mutex_unlock(&s->lock); } - k = __btree_iter_unpack(iter, l, &iter->k, p); + printbuf_exit(&buf); - EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0); + trans->nr_max_paths = hweight64(trans->paths_allocated); +} - iter->pos = bkey_start_pos(k.k); - iter->uptodate = BTREE_ITER_UPTODATE; - return k; +static noinline void btree_path_overflow(struct btree_trans *trans) +{ + bch2_dump_trans_paths_updates(trans); + panic("trans path oveflow\n"); } -static inline struct bkey_s_c -__bch2_btree_iter_peek_slot_extents(struct btree_iter *iter) +static inline struct btree_path *btree_path_alloc(struct btree_trans *trans, + struct btree_path *pos) { - struct btree_iter_level *l = &iter->l[0]; - struct btree_node_iter node_iter; - struct bkey_s_c k; - struct bkey n; - int ret; + struct btree_path *path; + unsigned idx; -recheck: - while ((k = __btree_iter_peek_all(iter, l, &iter->k)).k && - bkey_deleted(k.k) && - bkey_cmp(bkey_start_pos(k.k), iter->pos) == 0) - bch2_btree_node_iter_advance(&l->iter, l->b); + if (unlikely(trans->paths_allocated == + ~((~0ULL << 1) << (BTREE_ITER_MAX - 1)))) + btree_path_overflow(trans); + + idx = __ffs64(~trans->paths_allocated); /* - * iterator is now at the correct position for inserting at iter->pos, - * but we need to keep iterating until we find the first non whiteout so - * we know how big a hole we have, if any: + * Do this before marking the new path as allocated, since it won't be + * initialized yet: */ + if (unlikely(idx > trans->nr_max_paths)) + bch2_trans_update_max_paths(trans); - node_iter = l->iter; - if (k.k && bkey_whiteout(k.k)) - k = __btree_iter_unpack(iter, l, &iter->k, - bch2_btree_node_iter_peek(&node_iter, l->b)); + trans->paths_allocated |= 1ULL << idx; - /* - * If we got to the end of the node, check if we need to traverse to the - * next node: - */ - if (unlikely(!k.k && btree_iter_pos_after_node(iter, l->b))) { - btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); - ret = bch2_btree_iter_traverse(iter); - if (unlikely(ret)) - return bkey_s_c_err(ret); + path = &trans->paths[idx]; + path->idx = idx; + path->ref = 0; + path->intent_ref = 0; + path->nodes_locked = 0; - goto recheck; - } + btree_path_list_add(trans, pos, path); + trans->paths_sorted = false; + return path; +} - if (k.k && - !bkey_whiteout(k.k) && - bkey_cmp(bkey_start_pos(k.k), iter->pos) <= 0) { - /* - * if we skipped forward to find the first non whiteout and - * there _wasn't_ actually a hole, we want the iterator to be - * pointed at the key we found: - */ - l->iter = node_iter; +struct btree_path *bch2_path_get(struct btree_trans *trans, + enum btree_id btree_id, struct bpos pos, + unsigned locks_want, unsigned level, + unsigned flags, unsigned long ip) +{ + struct btree_path *path, *path_pos = NULL; + bool cached = flags & BTREE_ITER_CACHED; + bool intent = flags & BTREE_ITER_INTENT; + int i; - EBUG_ON(bkey_cmp(k.k->p, iter->pos) < 0); - EBUG_ON(bkey_deleted(k.k)); - iter->uptodate = BTREE_ITER_UPTODATE; - return k; - } + bch2_trans_verify_not_in_restart(trans); + bch2_trans_verify_locks(trans); - /* hole */ + btree_trans_sort_paths(trans); - /* holes can't span inode numbers: */ - if (iter->pos.offset == KEY_OFFSET_MAX) { - if (iter->pos.inode == KEY_INODE_MAX) - return bkey_s_c_null; + trans_for_each_path_inorder(trans, path, i) { + if (__btree_path_cmp(path, + btree_id, + cached, + pos, + level) > 0) + break; - iter->pos = bkey_successor(iter->pos); - goto recheck; + path_pos = path; } - if (!k.k) - k.k = &l->b->key.k; + if (path_pos && + path_pos->cached == cached && + path_pos->btree_id == btree_id && + path_pos->level == level) { + __btree_path_get(path_pos, intent); + path = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip); + } else { + path = btree_path_alloc(trans, path_pos); + path_pos = NULL; + + __btree_path_get(path, intent); + path->pos = pos; + path->btree_id = btree_id; + path->cached = cached; + path->uptodate = BTREE_ITER_NEED_TRAVERSE; + path->should_be_locked = false; + path->level = level; + path->locks_want = locks_want; + path->nodes_locked = 0; + for (i = 0; i < ARRAY_SIZE(path->l); i++) + path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init); +#ifdef TRACK_PATH_ALLOCATED + path->ip_allocated = ip; +#endif + trans->paths_sorted = false; + } + + if (!(flags & BTREE_ITER_NOPRESERVE)) + path->preserve = true; - bkey_init(&n); - n.p = iter->pos; - bch2_key_resize(&n, - min_t(u64, KEY_SIZE_MAX, - (k.k->p.inode == n.p.inode - ? bkey_start_offset(k.k) - : KEY_OFFSET_MAX) - - n.p.offset)); + if (path->intent_ref) + locks_want = max(locks_want, level + 1); - EBUG_ON(!n.size); + /* + * If the path has locks_want greater than requested, we don't downgrade + * it here - on transaction restart because btree node split needs to + * upgrade locks, we might be putting/getting the iterator again. + * Downgrading iterators only happens via bch2_trans_downgrade(), after + * a successful transaction commit. + */ - iter->k = n; - iter->uptodate = BTREE_ITER_UPTODATE; - return (struct bkey_s_c) { &iter->k, NULL }; + locks_want = min(locks_want, BTREE_MAX_DEPTH); + if (locks_want > path->locks_want) + bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want); + + return path; } -static inline struct bkey_s_c -__bch2_btree_iter_peek_slot(struct btree_iter *iter) +struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u) { - struct btree_iter_level *l = &iter->l[0]; + + struct btree_path_level *l = path_l(path); + struct bkey_packed *_k; struct bkey_s_c k; - int ret; - if (iter->flags & BTREE_ITER_IS_EXTENTS) - return __bch2_btree_iter_peek_slot_extents(iter); + if (unlikely(!l->b)) + return bkey_s_c_null; -recheck: - while ((k = __btree_iter_peek_all(iter, l, &iter->k)).k && - bkey_deleted(k.k) && - bkey_cmp(k.k->p, iter->pos) == 0) - bch2_btree_node_iter_advance(&l->iter, l->b); + EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE); + EBUG_ON(!btree_node_locked(path, path->level)); - /* - * If we got to the end of the node, check if we need to traverse to the - * next node: - */ - if (unlikely(!k.k && btree_iter_pos_after_node(iter, l->b))) { - btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); - ret = bch2_btree_iter_traverse(iter); - if (unlikely(ret)) - return bkey_s_c_err(ret); + if (!path->cached) { + _k = bch2_btree_node_iter_peek_all(&l->iter, l->b); + k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null; - goto recheck; - } + EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos)); - if (k.k && - !bkey_deleted(k.k) && - !bkey_cmp(iter->pos, k.k->p)) { - iter->uptodate = BTREE_ITER_UPTODATE; - return k; + if (!k.k || !bpos_eq(path->pos, k.k->p)) + goto hole; } else { - /* hole */ - bkey_init(&iter->k); - iter->k.p = iter->pos; + struct bkey_cached *ck = (void *) path->l[0].b; - iter->uptodate = BTREE_ITER_UPTODATE; - return (struct bkey_s_c) { &iter->k, NULL }; - } + EBUG_ON(ck && + (path->btree_id != ck->key.btree_id || + !bkey_eq(path->pos, ck->key.pos))); + if (!ck || !ck->valid) + return bkey_s_c_null; + + *u = ck->k->k; + k = bkey_i_to_s_c(ck->k); + } + + return k; +hole: + bkey_init(u); + u->p = path->pos; + return (struct bkey_s_c) { u, NULL }; } -struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) +/* Btree iterators: */ + +int __must_check +__bch2_btree_iter_traverse(struct btree_iter *iter) +{ + return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags); +} + +int __must_check +bch2_btree_iter_traverse(struct btree_iter *iter) { int ret; - bch2_btree_iter_checks(iter, BTREE_ITER_SLOTS); + iter->path = bch2_btree_path_set_pos(iter->trans, iter->path, + btree_iter_search_key(iter), + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); - if (iter->uptodate == BTREE_ITER_UPTODATE) - return btree_iter_peek_uptodate(iter); + ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags); + if (ret) + return ret; - if (iter->uptodate >= BTREE_ITER_NEED_RELOCK) { - ret = bch2_btree_iter_traverse(iter); - if (unlikely(ret)) - return bkey_s_c_err(ret); - } + btree_path_set_should_be_locked(iter->path); + return 0; +} - return __bch2_btree_iter_peek_slot(iter); +/* Iterate across nodes (leaf and interior nodes) */ + +struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter) +{ + struct btree_trans *trans = iter->trans; + struct btree *b = NULL; + int ret; + + EBUG_ON(iter->path->cached); + bch2_btree_iter_verify(iter); + + ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); + if (ret) + goto err; + + b = btree_path_node(iter->path, iter->path->level); + if (!b) + goto out; + + BUG_ON(bpos_lt(b->key.k.p, iter->pos)); + + bkey_init(&iter->k); + iter->k.p = iter->pos = b->key.k.p; + + iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p, + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); + btree_path_set_should_be_locked(iter->path); +out: + bch2_btree_iter_verify_entry_exit(iter); + bch2_btree_iter_verify(iter); + + return b; +err: + b = ERR_PTR(ret); + goto out; } -struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter) +struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter) { - bch2_btree_iter_checks(iter, BTREE_ITER_SLOTS); + struct btree *b; + + while (b = bch2_btree_iter_peek_node(iter), + bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart)) + bch2_trans_begin(iter->trans); + + return b; +} + +struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) +{ + struct btree_trans *trans = iter->trans; + struct btree_path *path = iter->path; + struct btree *b = NULL; + int ret; + + bch2_trans_verify_not_in_restart(trans); + EBUG_ON(iter->path->cached); + bch2_btree_iter_verify(iter); + + /* already at end? */ + if (!btree_path_node(path, path->level)) + return NULL; + + /* got to end? */ + if (!btree_path_node(path, path->level + 1)) { + btree_path_set_level_up(trans, path); + return NULL; + } + + if (!bch2_btree_node_relock(trans, path, path->level + 1)) { + __bch2_btree_path_unlock(trans, path); + path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock); + path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock); + btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path); + ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock); + goto err; + } - iter->pos = btree_type_successor(iter->btree_id, iter->k.p); + b = btree_path_node(path, path->level + 1); - if (unlikely(iter->uptodate != BTREE_ITER_UPTODATE)) { + if (bpos_eq(iter->pos, b->key.k.p)) { + __btree_path_set_level_up(trans, path, path->level++); + } else { /* - * XXX: when we just need to relock we should be able to avoid - * calling traverse, but we need to kill BTREE_ITER_NEED_PEEK - * for that to work + * Haven't gotten to the end of the parent node: go back down to + * the next child node */ - btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); + path = iter->path = + bch2_btree_path_set_pos(trans, path, bpos_successor(iter->pos), + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); - return bch2_btree_iter_peek_slot(iter); + btree_path_set_level_down(trans, path, iter->min_depth); + + ret = bch2_btree_path_traverse(trans, path, iter->flags); + if (ret) + goto err; + + b = path->l[path->level].b; } - if (!bkey_deleted(&iter->k)) - bch2_btree_node_iter_advance(&iter->l[0].iter, iter->l[0].b); + bkey_init(&iter->k); + iter->k.p = iter->pos = b->key.k.p; - btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK); + iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p, + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); + btree_path_set_should_be_locked(iter->path); + BUG_ON(iter->path->uptodate); +out: + bch2_btree_iter_verify_entry_exit(iter); + bch2_btree_iter_verify(iter); - return __bch2_btree_iter_peek_slot(iter); + return b; +err: + b = ERR_PTR(ret); + goto out; } -static inline void bch2_btree_iter_init(struct btree_trans *trans, - struct btree_iter *iter, enum btree_id btree_id, - struct bpos pos, unsigned flags) -{ - struct bch_fs *c = trans->c; - unsigned i; +/* Iterate across keys (in leaf nodes only) */ - if (btree_node_type_is_extents(btree_id) && - !(flags & BTREE_ITER_NODES)) - flags |= BTREE_ITER_IS_EXTENTS; +inline bool bch2_btree_iter_advance(struct btree_iter *iter) +{ + if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) { + struct bpos pos = iter->k.p; + bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS + ? bpos_eq(pos, SPOS_MAX) + : bkey_eq(pos, SPOS_MAX)); - iter->trans = trans; - iter->pos = pos; - bkey_init(&iter->k); - iter->k.p = pos; - iter->flags = flags; - iter->uptodate = BTREE_ITER_NEED_TRAVERSE; - iter->btree_id = btree_id; - iter->level = 0; - iter->locks_want = flags & BTREE_ITER_INTENT ? 1 : 0; - iter->nodes_locked = 0; - iter->nodes_intent_locked = 0; - for (i = 0; i < ARRAY_SIZE(iter->l); i++) - iter->l[i].b = NULL; - iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT; + if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS)) + pos = bkey_successor(iter, pos); + bch2_btree_iter_set_pos(iter, pos); + return ret; + } else { + if (!btree_path_node(iter->path, iter->path->level)) + return true; - prefetch(c->btree_roots[btree_id].b); + iter->advanced = true; + return false; + } } -/* new transactional stuff: */ +inline bool bch2_btree_iter_rewind(struct btree_iter *iter) +{ + struct bpos pos = bkey_start_pos(&iter->k); + bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS + ? bpos_eq(pos, POS_MIN) + : bkey_eq(pos, POS_MIN)); -int bch2_trans_iter_put(struct btree_trans *trans, - struct btree_iter *iter) + if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS)) + pos = bkey_predecessor(iter, pos); + bch2_btree_iter_set_pos(iter, pos); + return ret; +} + +static noinline +struct bkey_i *__bch2_btree_trans_peek_updates(struct btree_iter *iter) { - int ret = btree_iter_err(iter); + struct btree_insert_entry *i; + struct bkey_i *ret = NULL; + + trans_for_each_update(iter->trans, i) { + if (i->btree_id < iter->btree_id) + continue; + if (i->btree_id > iter->btree_id) + break; + if (bpos_lt(i->k->k.p, iter->path->pos)) + continue; + if (i->key_cache_already_flushed) + continue; + if (!ret || bpos_lt(i->k->k.p, ret->k.p)) + ret = i->k; + } - trans->iters_live &= ~(1ULL << iter->idx); return ret; } -static inline void __bch2_trans_iter_free(struct btree_trans *trans, - unsigned idx) +static inline struct bkey_i *btree_trans_peek_updates(struct btree_iter *iter) { - __bch2_btree_iter_unlock(&trans->iters[idx]); - trans->iters_linked &= ~(1ULL << idx); - trans->iters_live &= ~(1ULL << idx); - trans->iters_touched &= ~(1ULL << idx); - trans->iters_unlink_on_restart &= ~(1ULL << idx); - trans->iters_unlink_on_commit &= ~(1ULL << idx); + return iter->flags & BTREE_ITER_WITH_UPDATES + ? __bch2_btree_trans_peek_updates(iter) + : NULL; } -int bch2_trans_iter_free(struct btree_trans *trans, - struct btree_iter *iter) +struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans, + struct btree_iter *iter, + struct bpos end_pos) { - int ret = btree_iter_err(iter); + struct bkey_i *k; - __bch2_trans_iter_free(trans, iter->idx); - return ret; + if (bpos_lt(iter->path->pos, iter->journal_pos)) + iter->journal_idx = 0; + + k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id, + iter->path->level, + iter->path->pos, + end_pos, + &iter->journal_idx); + + iter->journal_pos = k ? k->k.p : end_pos; + return k; } -int bch2_trans_iter_free_on_commit(struct btree_trans *trans, - struct btree_iter *iter) +static noinline +struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans, + struct btree_iter *iter) { - int ret = btree_iter_err(iter); + struct bkey_i *k = bch2_btree_journal_peek(trans, iter, iter->path->pos); - trans->iters_unlink_on_commit |= 1ULL << iter->idx; - return ret; + if (k) { + iter->k = k->k; + return bkey_i_to_s_c(k); + } else { + return bkey_s_c_null; + } } -static int bch2_trans_realloc_iters(struct btree_trans *trans, - unsigned new_size) +static noinline +struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_s_c k) { - void *new_iters, *new_updates; + struct bkey_i *next_journal = + bch2_btree_journal_peek(trans, iter, + k.k ? k.k->p : path_l(iter->path)->b->key.k.p); - new_size = roundup_pow_of_two(new_size); + if (next_journal) { + iter->k = next_journal->k; + k = bkey_i_to_s_c(next_journal); + } - BUG_ON(new_size > BTREE_ITER_MAX); + return k; +} - if (new_size <= trans->size) - return 0; +/* + * Checks btree key cache for key at iter->pos and returns it if present, or + * bkey_s_c_null: + */ +static noinline +struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos) +{ + struct btree_trans *trans = iter->trans; + struct bch_fs *c = trans->c; + struct bkey u; + struct bkey_s_c k; + int ret; - BUG_ON(trans->used_mempool); + if ((iter->flags & BTREE_ITER_KEY_CACHE_FILL) && + bpos_eq(iter->pos, pos)) + return bkey_s_c_null; - bch2_trans_unlock(trans); + if (!bch2_btree_key_cache_find(c, iter->btree_id, pos)) + return bkey_s_c_null; - new_iters = kmalloc(sizeof(struct btree_iter) * new_size + - sizeof(struct btree_insert_entry) * (new_size + 4), - GFP_NOFS); - if (new_iters) - goto success; + if (!iter->key_cache_path) + iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos, + iter->flags & BTREE_ITER_INTENT, 0, + iter->flags|BTREE_ITER_CACHED| + BTREE_ITER_CACHED_NOFILL, + _THIS_IP_); - new_iters = mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS); - new_size = BTREE_ITER_MAX; + iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos, + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); - trans->used_mempool = true; -success: - new_updates = new_iters + sizeof(struct btree_iter) * new_size; + ret = bch2_btree_path_traverse(trans, iter->key_cache_path, + iter->flags|BTREE_ITER_CACHED) ?: + bch2_btree_path_relock(trans, iter->path, _THIS_IP_); + if (unlikely(ret)) + return bkey_s_c_err(ret); - memcpy(new_iters, trans->iters, - sizeof(struct btree_iter) * trans->nr_iters); - memcpy(new_updates, trans->updates, - sizeof(struct btree_insert_entry) * trans->nr_updates); + btree_path_set_should_be_locked(iter->key_cache_path); - if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) - memset(trans->iters, POISON_FREE, - sizeof(struct btree_iter) * trans->nr_iters + - sizeof(struct btree_insert_entry) * trans->nr_iters); + k = bch2_btree_path_peek_slot(iter->key_cache_path, &u); + if (k.k && !bkey_err(k)) { + iter->k = u; + k.k = &iter->k; + } + return k; +} - if (trans->iters != trans->iters_onstack) - kfree(trans->iters); +static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key) +{ + struct btree_trans *trans = iter->trans; + struct bkey_i *next_update; + struct bkey_s_c k, k2; + int ret; - trans->iters = new_iters; - trans->updates = new_updates; - trans->size = new_size; + EBUG_ON(iter->path->cached); + bch2_btree_iter_verify(iter); - if (trans->iters_live) { - trace_trans_restart_iters_realloced(trans->ip, trans->size); - return -EINTR; + while (1) { + struct btree_path_level *l; + + iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key, + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); + + ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); + if (unlikely(ret)) { + /* ensure that iter->k is consistent with iter->pos: */ + bch2_btree_iter_set_pos(iter, iter->pos); + k = bkey_s_c_err(ret); + goto out; + } + + l = path_l(iter->path); + + if (unlikely(!l->b)) { + /* No btree nodes at requested level: */ + bch2_btree_iter_set_pos(iter, SPOS_MAX); + k = bkey_s_c_null; + goto out; + } + + btree_path_set_should_be_locked(iter->path); + + k = btree_path_level_peek_all(trans->c, l, &iter->k); + + if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) && + k.k && + (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) { + k = k2; + ret = bkey_err(k); + if (ret) { + bch2_btree_iter_set_pos(iter, iter->pos); + goto out; + } + } + + if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL)) + k = btree_trans_peek_journal(trans, iter, k); + + next_update = btree_trans_peek_updates(iter); + + if (next_update && + bpos_le(next_update->k.p, + k.k ? k.k->p : l->b->key.k.p)) { + iter->k = next_update->k; + k = bkey_i_to_s_c(next_update); + } + + if (k.k && bkey_deleted(k.k)) { + /* + * If we've got a whiteout, and it's after the search + * key, advance the search key to the whiteout instead + * of just after the whiteout - it might be a btree + * whiteout, with a real key at the same position, since + * in the btree deleted keys sort before non deleted. + */ + search_key = !bpos_eq(search_key, k.k->p) + ? k.k->p + : bpos_successor(k.k->p); + continue; + } + + if (likely(k.k)) { + break; + } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) { + /* Advance to next leaf node: */ + search_key = bpos_successor(l->b->key.k.p); + } else { + /* End of btree: */ + bch2_btree_iter_set_pos(iter, SPOS_MAX); + k = bkey_s_c_null; + goto out; + } } +out: + bch2_btree_iter_verify(iter); - return 0; + return k; } -static int btree_trans_iter_alloc(struct btree_trans *trans) +/** + * bch2_btree_iter_peek: returns first key greater than or equal to iterator's + * current position + */ +struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end) { - unsigned idx = __ffs64(~trans->iters_linked); + struct btree_trans *trans = iter->trans; + struct bpos search_key = btree_iter_search_key(iter); + struct bkey_s_c k; + struct bpos iter_pos; + int ret; - if (idx < trans->nr_iters) - goto got_slot; + EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS); + EBUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && bkey_eq(end, POS_MAX)); - if (trans->nr_iters == trans->size) { - int ret = bch2_trans_realloc_iters(trans, trans->size * 2); - if (ret) - return ret; + if (iter->update_path) { + bch2_path_put_nokeep(trans, iter->update_path, + iter->flags & BTREE_ITER_INTENT); + iter->update_path = NULL; + } + + bch2_btree_iter_verify_entry_exit(iter); + + while (1) { + k = __bch2_btree_iter_peek(iter, search_key); + if (unlikely(!k.k)) + goto end; + if (unlikely(bkey_err(k))) + goto out_no_locked; + + /* + * iter->pos should be mononotically increasing, and always be + * equal to the key we just returned - except extents can + * straddle iter->pos: + */ + if (!(iter->flags & BTREE_ITER_IS_EXTENTS)) + iter_pos = k.k->p; + else + iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k)); + + if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS) + ? bkey_gt(iter_pos, end) + : bkey_ge(iter_pos, end))) + goto end; + + if (iter->update_path && + !bkey_eq(iter->update_path->pos, k.k->p)) { + bch2_path_put_nokeep(trans, iter->update_path, + iter->flags & BTREE_ITER_INTENT); + iter->update_path = NULL; + } + + if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && + (iter->flags & BTREE_ITER_INTENT) && + !(iter->flags & BTREE_ITER_IS_EXTENTS) && + !iter->update_path) { + struct bpos pos = k.k->p; + + if (pos.snapshot < iter->snapshot) { + search_key = bpos_successor(k.k->p); + continue; + } + + pos.snapshot = iter->snapshot; + + /* + * advance, same as on exit for iter->path, but only up + * to snapshot + */ + __btree_path_get(iter->path, iter->flags & BTREE_ITER_INTENT); + iter->update_path = iter->path; + + iter->update_path = bch2_btree_path_set_pos(trans, + iter->update_path, pos, + iter->flags & BTREE_ITER_INTENT, + _THIS_IP_); + ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags); + if (unlikely(ret)) { + k = bkey_s_c_err(ret); + goto out_no_locked; + } + } + + /* + * We can never have a key in a leaf node at POS_MAX, so + * we don't have to check these successor() calls: + */ + if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && + !bch2_snapshot_is_ancestor(trans->c, + iter->snapshot, + k.k->p.snapshot)) { + search_key = bpos_successor(k.k->p); + continue; + } + + if (bkey_whiteout(k.k) && + !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) { + search_key = bkey_successor(iter, k.k->p); + continue; + } + + break; + } + + iter->pos = iter_pos; + + iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p, + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); + + btree_path_set_should_be_locked(iter->path); +out_no_locked: + if (iter->update_path) { + ret = bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_); + if (unlikely(ret)) + k = bkey_s_c_err(ret); + else + btree_path_set_should_be_locked(iter->update_path); + } + + if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) + iter->pos.snapshot = iter->snapshot; + + ret = bch2_btree_iter_verify_ret(iter, k); + if (unlikely(ret)) { + bch2_btree_iter_set_pos(iter, iter->pos); + k = bkey_s_c_err(ret); } - idx = trans->nr_iters++; - BUG_ON(trans->nr_iters > trans->size); + bch2_btree_iter_verify_entry_exit(iter); - trans->iters[idx].idx = idx; -got_slot: - BUG_ON(trans->iters_linked & (1ULL << idx)); - trans->iters_linked |= 1ULL << idx; - return idx; + return k; +end: + bch2_btree_iter_set_pos(iter, end); + k = bkey_s_c_null; + goto out_no_locked; } -static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans, - unsigned btree_id, struct bpos pos, - unsigned flags, u64 iter_id) +/** + * bch2_btree_iter_peek_all_levels: returns the first key greater than or equal + * to iterator's current position, returning keys from every level of the btree. + * For keys at different levels of the btree that compare equal, the key from + * the lower level (leaf) is returned first. + */ +struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter) { - struct btree_iter *iter; - int idx; + struct btree_trans *trans = iter->trans; + struct bkey_s_c k; + int ret; + + EBUG_ON(iter->path->cached); + bch2_btree_iter_verify(iter); + BUG_ON(iter->path->level < iter->min_depth); + BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)); + EBUG_ON(!(iter->flags & BTREE_ITER_ALL_LEVELS)); - BUG_ON(trans->nr_iters > BTREE_ITER_MAX); + while (1) { + iter->path = bch2_btree_path_set_pos(trans, iter->path, iter->pos, + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); - for (idx = 0; idx < trans->nr_iters; idx++) { - if (!(trans->iters_linked & (1ULL << idx))) + ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); + if (unlikely(ret)) { + /* ensure that iter->k is consistent with iter->pos: */ + bch2_btree_iter_set_pos(iter, iter->pos); + k = bkey_s_c_err(ret); + goto out_no_locked; + } + + /* Already at end? */ + if (!btree_path_node(iter->path, iter->path->level)) { + k = bkey_s_c_null; + goto out_no_locked; + } + + k = btree_path_level_peek_all(trans->c, + &iter->path->l[iter->path->level], &iter->k); + + /* Check if we should go up to the parent node: */ + if (!k.k || + (iter->advanced && + bpos_eq(path_l(iter->path)->b->key.k.p, iter->pos))) { + iter->pos = path_l(iter->path)->b->key.k.p; + btree_path_set_level_up(trans, iter->path); + iter->advanced = false; continue; + } - iter = &trans->iters[idx]; - if (iter_id - ? iter->id == iter_id - : (iter->btree_id == btree_id && - !bkey_cmp(iter->pos, pos))) - goto found; + /* + * Check if we should go back down to a leaf: + * If we're not in a leaf node, we only return the current key + * if it exactly matches iter->pos - otherwise we first have to + * go back to the leaf: + */ + if (iter->path->level != iter->min_depth && + (iter->advanced || + !k.k || + !bpos_eq(iter->pos, k.k->p))) { + btree_path_set_level_down(trans, iter->path, iter->min_depth); + iter->pos = bpos_successor(iter->pos); + iter->advanced = false; + continue; + } + + /* Check if we should go to the next key: */ + if (iter->path->level == iter->min_depth && + iter->advanced && + k.k && + bpos_eq(iter->pos, k.k->p)) { + iter->pos = bpos_successor(iter->pos); + iter->advanced = false; + continue; + } + + if (iter->advanced && + iter->path->level == iter->min_depth && + !bpos_eq(k.k->p, iter->pos)) + iter->advanced = false; + + BUG_ON(iter->advanced); + BUG_ON(!k.k); + break; } - idx = -1; -found: - if (idx < 0) { - idx = btree_trans_iter_alloc(trans); - if (idx < 0) - return ERR_PTR(idx); - iter = &trans->iters[idx]; - iter->id = iter_id; + iter->pos = k.k->p; + btree_path_set_should_be_locked(iter->path); +out_no_locked: + bch2_btree_iter_verify(iter); - bch2_btree_iter_init(trans, iter, btree_id, pos, flags); - } else { - iter = &trans->iters[idx]; + return k; +} + +/** + * bch2_btree_iter_next: returns first key greater than iterator's current + * position + */ +struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter) +{ + if (!bch2_btree_iter_advance(iter)) + return bkey_s_c_null; + + return bch2_btree_iter_peek(iter); +} + +/** + * bch2_btree_iter_peek_prev: returns first key less than or equal to + * iterator's current position + */ +struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) +{ + struct btree_trans *trans = iter->trans; + struct bpos search_key = iter->pos; + struct btree_path *saved_path = NULL; + struct bkey_s_c k; + struct bkey saved_k; + const struct bch_val *saved_v; + int ret; + + EBUG_ON(iter->path->cached || iter->path->level); + EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES); + + if (iter->flags & BTREE_ITER_WITH_JOURNAL) + return bkey_s_c_err(-EIO); + + bch2_btree_iter_verify(iter); + bch2_btree_iter_verify_entry_exit(iter); + + if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) + search_key.snapshot = U32_MAX; - iter->flags &= ~(BTREE_ITER_INTENT|BTREE_ITER_PREFETCH); - iter->flags |= flags & (BTREE_ITER_INTENT|BTREE_ITER_PREFETCH); + while (1) { + iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key, + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); + + ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); + if (unlikely(ret)) { + /* ensure that iter->k is consistent with iter->pos: */ + bch2_btree_iter_set_pos(iter, iter->pos); + k = bkey_s_c_err(ret); + goto out_no_locked; + } + + k = btree_path_level_peek(trans, iter->path, + &iter->path->l[0], &iter->k); + if (!k.k || + ((iter->flags & BTREE_ITER_IS_EXTENTS) + ? bpos_ge(bkey_start_pos(k.k), search_key) + : bpos_gt(k.k->p, search_key))) + k = btree_path_level_prev(trans, iter->path, + &iter->path->l[0], &iter->k); + + if (likely(k.k)) { + if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) { + if (k.k->p.snapshot == iter->snapshot) + goto got_key; + + /* + * If we have a saved candidate, and we're no + * longer at the same _key_ (not pos), return + * that candidate + */ + if (saved_path && !bkey_eq(k.k->p, saved_k.p)) { + bch2_path_put_nokeep(trans, iter->path, + iter->flags & BTREE_ITER_INTENT); + iter->path = saved_path; + saved_path = NULL; + iter->k = saved_k; + k.v = saved_v; + goto got_key; + } + + if (bch2_snapshot_is_ancestor(iter->trans->c, + iter->snapshot, + k.k->p.snapshot)) { + if (saved_path) + bch2_path_put_nokeep(trans, saved_path, + iter->flags & BTREE_ITER_INTENT); + saved_path = btree_path_clone(trans, iter->path, + iter->flags & BTREE_ITER_INTENT); + saved_k = *k.k; + saved_v = k.v; + } + + search_key = bpos_predecessor(k.k->p); + continue; + } +got_key: + if (bkey_whiteout(k.k) && + !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) { + search_key = bkey_predecessor(iter, k.k->p); + if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) + search_key.snapshot = U32_MAX; + continue; + } + + break; + } else if (likely(!bpos_eq(iter->path->l[0].b->data->min_key, POS_MIN))) { + /* Advance to previous leaf node: */ + search_key = bpos_predecessor(iter->path->l[0].b->data->min_key); + } else { + /* Start of btree: */ + bch2_btree_iter_set_pos(iter, POS_MIN); + k = bkey_s_c_null; + goto out_no_locked; + } } - BUG_ON(iter->btree_id != btree_id); - BUG_ON(trans->iters_live & (1ULL << idx)); - trans->iters_live |= 1ULL << idx; - trans->iters_touched |= 1ULL << idx; + EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos)); + + /* Extents can straddle iter->pos: */ + if (bkey_lt(k.k->p, iter->pos)) + iter->pos = k.k->p; + + if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) + iter->pos.snapshot = iter->snapshot; + + btree_path_set_should_be_locked(iter->path); +out_no_locked: + if (saved_path) + bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_INTENT); - BUG_ON(iter->btree_id != btree_id); - BUG_ON((iter->flags ^ flags) & BTREE_ITER_TYPE); + bch2_btree_iter_verify_entry_exit(iter); + bch2_btree_iter_verify(iter); - return iter; + return k; } -struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans, - enum btree_id btree_id, - struct bpos pos, unsigned flags, - u64 iter_id) +/** + * bch2_btree_iter_prev: returns first key less than iterator's current + * position + */ +struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter) { - struct btree_iter *iter = - __btree_trans_get_iter(trans, btree_id, pos, flags, iter_id); + if (!bch2_btree_iter_rewind(iter)) + return bkey_s_c_null; - if (!IS_ERR(iter)) - bch2_btree_iter_set_pos(iter, pos); - return iter; + return bch2_btree_iter_peek_prev(iter); } -struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans, - enum btree_id btree_id, - struct bpos pos, - unsigned locks_want, - unsigned depth, - unsigned flags) +struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) { - struct btree_iter *iter = - __btree_trans_get_iter(trans, btree_id, pos, - flags|BTREE_ITER_NODES, 0); - unsigned i; + struct btree_trans *trans = iter->trans; + struct bpos search_key; + struct bkey_s_c k; + int ret; + + bch2_btree_iter_verify(iter); + bch2_btree_iter_verify_entry_exit(iter); + EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS); + EBUG_ON(iter->path->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE)); + + /* extents can't span inode numbers: */ + if ((iter->flags & BTREE_ITER_IS_EXTENTS) && + unlikely(iter->pos.offset == KEY_OFFSET_MAX)) { + if (iter->pos.inode == KEY_INODE_MAX) + return bkey_s_c_null; + + bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos)); + } + + search_key = btree_iter_search_key(iter); + iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key, + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); + + ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); + if (unlikely(ret)) { + k = bkey_s_c_err(ret); + goto out_no_locked; + } + + if ((iter->flags & BTREE_ITER_CACHED) || + !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) { + struct bkey_i *next_update; + + if ((next_update = btree_trans_peek_updates(iter)) && + bpos_eq(next_update->k.p, iter->pos)) { + iter->k = next_update->k; + k = bkey_i_to_s_c(next_update); + goto out; + } + + if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) && + (k = btree_trans_peek_slot_journal(trans, iter)).k) + goto out; + + if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) && + (k = btree_trans_peek_key_cache(iter, iter->pos)).k) { + if (!bkey_err(k)) + iter->k = *k.k; + /* We're not returning a key from iter->path: */ + goto out_no_locked; + } + + k = bch2_btree_path_peek_slot(iter->path, &iter->k); + if (unlikely(!k.k)) + goto out_no_locked; + } else { + struct bpos next; + struct bpos end = iter->pos; + + if (iter->flags & BTREE_ITER_IS_EXTENTS) + end.offset = U64_MAX; - BUG_ON(IS_ERR(iter)); - BUG_ON(bkey_cmp(iter->pos, pos)); + EBUG_ON(iter->path->level); - iter->locks_want = locks_want; - iter->level = depth; + if (iter->flags & BTREE_ITER_INTENT) { + struct btree_iter iter2; - for (i = 0; i < ARRAY_SIZE(iter->l); i++) - iter->l[i].b = NULL; - iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT; + bch2_trans_copy_iter(&iter2, iter); + k = bch2_btree_iter_peek_upto(&iter2, end); - return iter; + if (k.k && !bkey_err(k)) { + iter->k = iter2.k; + k.k = &iter->k; + } + bch2_trans_iter_exit(trans, &iter2); + } else { + struct bpos pos = iter->pos; + + k = bch2_btree_iter_peek_upto(iter, end); + if (unlikely(bkey_err(k))) + bch2_btree_iter_set_pos(iter, pos); + else + iter->pos = pos; + } + + if (unlikely(bkey_err(k))) + goto out_no_locked; + + next = k.k ? bkey_start_pos(k.k) : POS_MAX; + + if (bkey_lt(iter->pos, next)) { + bkey_init(&iter->k); + iter->k.p = iter->pos; + + if (iter->flags & BTREE_ITER_IS_EXTENTS) { + bch2_key_resize(&iter->k, + min_t(u64, KEY_SIZE_MAX, + (next.inode == iter->pos.inode + ? next.offset + : KEY_OFFSET_MAX) - + iter->pos.offset)); + EBUG_ON(!iter->k.size); + } + + k = (struct bkey_s_c) { &iter->k, NULL }; + } + } +out: + btree_path_set_should_be_locked(iter->path); +out_no_locked: + bch2_btree_iter_verify_entry_exit(iter); + bch2_btree_iter_verify(iter); + ret = bch2_btree_iter_verify_ret(iter, k); + if (unlikely(ret)) + return bkey_s_c_err(ret); + + return k; } -struct btree_iter *bch2_trans_copy_iter(struct btree_trans *trans, - struct btree_iter *src) +struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter) { - struct btree_iter *iter; - int i, idx; + if (!bch2_btree_iter_advance(iter)) + return bkey_s_c_null; - idx = btree_trans_iter_alloc(trans); - if (idx < 0) - return ERR_PTR(idx); + return bch2_btree_iter_peek_slot(iter); +} - trans->iters_live |= 1ULL << idx; - trans->iters_touched |= 1ULL << idx; - trans->iters_unlink_on_restart |= 1ULL << idx; +struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter) +{ + if (!bch2_btree_iter_rewind(iter)) + return bkey_s_c_null; - iter = &trans->iters[idx]; + return bch2_btree_iter_peek_slot(iter); +} - memcpy(&iter->trans, - &src->trans, - (void *) &iter[1] - (void *) &iter->trans); +struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter) +{ + struct bkey_s_c k; - for (i = 0; i < BTREE_MAX_DEPTH; i++) - if (btree_node_locked(iter, i)) - six_lock_increment(&iter->l[i].b->lock, - __btree_lock_want(iter, i)); + while (btree_trans_too_many_iters(iter->trans) || + (k = bch2_btree_iter_peek_type(iter, iter->flags), + bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart))) + bch2_trans_begin(iter->trans); - return &trans->iters[idx]; + return k; } -static int bch2_trans_preload_mem(struct btree_trans *trans, size_t size) +/* new transactional stuff: */ + +#ifdef CONFIG_BCACHEFS_DEBUG +static void btree_trans_verify_sorted_refs(struct btree_trans *trans) { - if (size > trans->mem_bytes) { - size_t old_bytes = trans->mem_bytes; - size_t new_bytes = roundup_pow_of_two(size); - void *new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS); + struct btree_path *path; + unsigned i; - if (!new_mem) - return -ENOMEM; + BUG_ON(trans->nr_sorted != hweight64(trans->paths_allocated)); - trans->mem = new_mem; - trans->mem_bytes = new_bytes; + trans_for_each_path(trans, path) { + BUG_ON(path->sorted_idx >= trans->nr_sorted); + BUG_ON(trans->sorted[path->sorted_idx] != path->idx); + } + + for (i = 0; i < trans->nr_sorted; i++) { + unsigned idx = trans->sorted[i]; - if (old_bytes) { - trace_trans_restart_mem_realloced(trans->ip, new_bytes); - return -EINTR; + EBUG_ON(!(trans->paths_allocated & (1ULL << idx))); + BUG_ON(trans->paths[idx].sorted_idx != i); + } +} + +static void btree_trans_verify_sorted(struct btree_trans *trans) +{ + struct btree_path *path, *prev = NULL; + unsigned i; + + if (!bch2_debug_check_iterators) + return; + + trans_for_each_path_inorder(trans, path, i) { + if (prev && btree_path_cmp(prev, path) > 0) { + __bch2_dump_trans_paths_updates(trans, true); + panic("trans paths out of order!\n"); } + prev = path; } +} +#else +static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {} +static inline void btree_trans_verify_sorted(struct btree_trans *trans) {} +#endif - return 0; +void __bch2_btree_trans_sort_paths(struct btree_trans *trans) +{ + int i, l = 0, r = trans->nr_sorted, inc = 1; + bool swapped; + + btree_trans_verify_sorted_refs(trans); + + if (trans->paths_sorted) + goto out; + + /* + * Cocktail shaker sort: this is efficient because iterators will be + * mostly sorted. + */ + do { + swapped = false; + + for (i = inc > 0 ? l : r - 2; + i + 1 < r && i >= l; + i += inc) { + if (btree_path_cmp(trans->paths + trans->sorted[i], + trans->paths + trans->sorted[i + 1]) > 0) { + swap(trans->sorted[i], trans->sorted[i + 1]); + trans->paths[trans->sorted[i]].sorted_idx = i; + trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1; + swapped = true; + } + } + + if (inc > 0) + --r; + else + l++; + inc = -inc; + } while (swapped); + + trans->paths_sorted = true; +out: + btree_trans_verify_sorted(trans); +} + +static inline void btree_path_list_remove(struct btree_trans *trans, + struct btree_path *path) +{ + unsigned i; + + EBUG_ON(path->sorted_idx >= trans->nr_sorted); +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + trans->nr_sorted--; + memmove_u64s_down_small(trans->sorted + path->sorted_idx, + trans->sorted + path->sorted_idx + 1, + DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 8)); +#else + array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx); +#endif + for (i = path->sorted_idx; i < trans->nr_sorted; i++) + trans->paths[trans->sorted[i]].sorted_idx = i; + + path->sorted_idx = U8_MAX; +} + +static inline void btree_path_list_add(struct btree_trans *trans, + struct btree_path *pos, + struct btree_path *path) +{ + unsigned i; + + path->sorted_idx = pos ? pos->sorted_idx + 1 : trans->nr_sorted; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1, + trans->sorted + path->sorted_idx, + DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 8)); + trans->nr_sorted++; + trans->sorted[path->sorted_idx] = path->idx; +#else + array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx); +#endif + + for (i = path->sorted_idx; i < trans->nr_sorted; i++) + trans->paths[trans->sorted[i]].sorted_idx = i; + + btree_trans_verify_sorted_refs(trans); +} + +void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter) +{ + if (iter->update_path) + bch2_path_put_nokeep(trans, iter->update_path, + iter->flags & BTREE_ITER_INTENT); + if (iter->path) + bch2_path_put(trans, iter->path, + iter->flags & BTREE_ITER_INTENT); + if (iter->key_cache_path) + bch2_path_put(trans, iter->key_cache_path, + iter->flags & BTREE_ITER_INTENT); + iter->path = NULL; + iter->update_path = NULL; + iter->key_cache_path = NULL; +} + +static inline void bch2_trans_iter_init_inlined(struct btree_trans *trans, + struct btree_iter *iter, + unsigned btree_id, struct bpos pos, + unsigned flags) +{ + bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0, + bch2_btree_iter_flags(trans, btree_id, flags), + _RET_IP_); } -void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size) +void bch2_trans_iter_init_outlined(struct btree_trans *trans, + struct btree_iter *iter, + unsigned btree_id, struct bpos pos, + unsigned flags) { + bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0, + bch2_btree_iter_flags(trans, btree_id, flags), + _RET_IP_); +} + +void bch2_trans_node_iter_init(struct btree_trans *trans, + struct btree_iter *iter, + enum btree_id btree_id, + struct bpos pos, + unsigned locks_want, + unsigned depth, + unsigned flags) +{ + flags |= BTREE_ITER_NOT_EXTENTS; + flags |= __BTREE_ITER_ALL_SNAPSHOTS; + flags |= BTREE_ITER_ALL_SNAPSHOTS; + + bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth, + __bch2_btree_iter_flags(trans, btree_id, flags), + _RET_IP_); + + iter->min_depth = depth; + + BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH)); + BUG_ON(iter->path->level != depth); + BUG_ON(iter->min_depth != depth); +} + +void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src) +{ + *dst = *src; + if (src->path) + __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT); + if (src->update_path) + __btree_path_get(src->update_path, src->flags & BTREE_ITER_INTENT); + dst->key_cache_path = NULL; +} + +void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size) +{ + unsigned new_top = trans->mem_top + size; + size_t old_bytes = trans->mem_bytes; + size_t new_bytes = roundup_pow_of_two(new_top); + void *new_mem; void *p; - int ret; - ret = bch2_trans_preload_mem(trans, trans->mem_top + size); - if (ret) - return ERR_PTR(ret); + trans->mem_max = max(trans->mem_max, new_top); + + WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX); + + new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS); + if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) { + new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL); + new_bytes = BTREE_TRANS_MEM_MAX; + kfree(trans->mem); + } + + if (!new_mem) + return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc); + + trans->mem = new_mem; + trans->mem_bytes = new_bytes; + + if (old_bytes) { + trace_and_count(trans->c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes); + return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced)); + } p = trans->mem + trans->mem_top; trans->mem_top += size; + memset(p, 0, size); return p; } -inline void bch2_trans_unlink_iters(struct btree_trans *trans, u64 iters) +static noinline void bch2_trans_reset_srcu_lock(struct btree_trans *trans) +{ + struct bch_fs *c = trans->c; + struct btree_path *path; + + trans_for_each_path(trans, path) + if (path->cached && !btree_node_locked(path, 0)) + path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset); + + srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx); + trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier); + trans->srcu_lock_time = jiffies; +} + +/** + * bch2_trans_begin() - reset a transaction after a interrupted attempt + * @trans: transaction to reset + * + * While iterating over nodes or updating nodes a attempt to lock a btree node + * may return BCH_ERR_transaction_restart when the trylock fails. When this + * occurs bch2_trans_begin() should be called and the transaction retried. + */ +u32 bch2_trans_begin(struct btree_trans *trans) { - iters &= trans->iters_linked; - iters &= ~trans->iters_live; + struct btree_path *path; + + bch2_trans_reset_updates(trans); + + trans->restart_count++; + trans->mem_top = 0; + + trans_for_each_path(trans, path) { + path->should_be_locked = false; + + /* + * If the transaction wasn't restarted, we're presuming to be + * doing something new: dont keep iterators excpt the ones that + * are in use - except for the subvolumes btree: + */ + if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes) + path->preserve = false; + + /* + * XXX: we probably shouldn't be doing this if the transaction + * was restarted, but currently we still overflow transaction + * iterators if we do that + */ + if (!path->ref && !path->preserve) + __bch2_path_free(trans, path); + else + path->preserve = false; + } - while (iters) { - unsigned idx = __ffs64(iters); + if (!trans->restarted && + (need_resched() || + local_clock() - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) { + bch2_trans_unlock(trans); + cond_resched(); + bch2_trans_relock(trans); + } + + if (unlikely(time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10)))) + bch2_trans_reset_srcu_lock(trans); - iters &= ~(1ULL << idx); - __bch2_trans_iter_free(trans, idx); + trans->last_begin_ip = _RET_IP_; + if (trans->restarted) { + bch2_btree_path_traverse_all(trans); + trans->notrace_relock_fail = false; } + + trans->last_begin_time = local_clock(); + return trans->restart_count; } -void bch2_trans_begin(struct btree_trans *trans) +static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c) { - u64 iters_to_unlink; + size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX; + size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX; + void *p = NULL; - /* - * On transaction restart, the transaction isn't required to allocate - * all the same iterators it on the last iteration: - * - * Unlink any iterators it didn't use this iteration, assuming it got - * further (allocated an iter with a higher idx) than where the iter - * was originally allocated: - */ - iters_to_unlink = ~trans->iters_live & - ((1ULL << fls64(trans->iters_live)) - 1); + BUG_ON(trans->used_mempool); - iters_to_unlink |= trans->iters_unlink_on_restart; - iters_to_unlink |= trans->iters_unlink_on_commit; +#ifdef __KERNEL__ + p = this_cpu_xchg(c->btree_paths_bufs->path, NULL); +#endif + if (!p) + p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS); - trans->iters_live = 0; + trans->paths = p; p += paths_bytes; + trans->updates = p; p += updates_bytes; +} - bch2_trans_unlink_iters(trans, iters_to_unlink); +const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR]; - trans->iters_touched = 0; - trans->iters_unlink_on_restart = 0; - trans->iters_unlink_on_commit = 0; - trans->nr_updates = 0; - trans->mem_top = 0; +unsigned bch2_trans_get_fn_idx(const char *fn) +{ + unsigned i; - bch2_btree_iter_traverse_all(trans); + for (i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++) + if (!bch2_btree_transaction_fns[i] || + bch2_btree_transaction_fns[i] == fn) { + bch2_btree_transaction_fns[i] = fn; + return i; + } + + pr_warn_once("BCH_TRANSACTIONS_NR not big enough!"); + return i; } -void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, - unsigned expected_nr_iters, - size_t expected_mem_bytes) +void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, unsigned fn_idx) + __acquires(&c->btree_trans_barrier) { - memset(trans, 0, offsetof(struct btree_trans, iters_onstack)); + struct btree_transaction_stats *s; + + bch2_assert_btree_nodes_not_locked(); + memset(trans, 0, sizeof(*trans)); trans->c = c; - trans->ip = _RET_IP_; - trans->size = ARRAY_SIZE(trans->iters_onstack); - trans->iters = trans->iters_onstack; - trans->updates = trans->updates_onstack; - trans->fs_usage_deltas = NULL; + trans->fn = fn_idx < ARRAY_SIZE(bch2_btree_transaction_fns) + ? bch2_btree_transaction_fns[fn_idx] : NULL; + trans->last_begin_time = local_clock(); + trans->fn_idx = fn_idx; + trans->locking_wait.task = current; + trans->journal_replay_not_finished = + !test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags); + closure_init_stack(&trans->ref); + + bch2_trans_alloc_paths(trans, c); + + s = btree_trans_stats(trans); + if (s && s->max_mem) { + unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem); + + trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL); + + if (!unlikely(trans->mem)) { + trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL); + trans->mem_bytes = BTREE_TRANS_MEM_MAX; + } else { + trans->mem_bytes = expected_mem_bytes; + } + } + + if (s) { + trans->nr_max_paths = s->nr_max_paths; + trans->wb_updates_size = s->wb_updates_size; + } - if (expected_nr_iters > trans->size) - bch2_trans_realloc_iters(trans, expected_nr_iters); + trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier); + trans->srcu_lock_time = jiffies; + + if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) { + struct btree_trans *pos; + + mutex_lock(&c->btree_trans_lock); + list_for_each_entry(pos, &c->btree_trans_list, list) { + /* + * We'd much prefer to be stricter here and completely + * disallow multiple btree_trans in the same thread - + * but the data move path calls bch2_write when we + * already have a btree_trans initialized. + */ + BUG_ON(trans->locking_wait.task->pid == pos->locking_wait.task->pid && + bch2_trans_locked(pos)); + + if (trans->locking_wait.task->pid < pos->locking_wait.task->pid) { + list_add_tail(&trans->list, &pos->list); + goto list_add_done; + } + } + list_add_tail(&trans->list, &c->btree_trans_list); +list_add_done: + mutex_unlock(&c->btree_trans_lock); + } +} + +static void check_btree_paths_leaked(struct btree_trans *trans) +{ +#ifdef CONFIG_BCACHEFS_DEBUG + struct bch_fs *c = trans->c; + struct btree_path *path; - if (expected_mem_bytes) - bch2_trans_preload_mem(trans, expected_mem_bytes); + trans_for_each_path(trans, path) + if (path->ref) + goto leaked; + return; +leaked: + bch_err(c, "btree paths leaked from %s!", trans->fn); + trans_for_each_path(trans, path) + if (path->ref) + printk(KERN_ERR " btree %s %pS\n", + bch2_btree_ids[path->btree_id], + (void *) path->ip_allocated); + /* Be noisy about this: */ + bch2_fatal_error(c); +#endif } -int bch2_trans_exit(struct btree_trans *trans) +void bch2_trans_exit(struct btree_trans *trans) + __releases(&c->btree_trans_barrier) { + struct btree_insert_entry *i; + struct bch_fs *c = trans->c; + struct btree_transaction_stats *s = btree_trans_stats(trans); + bch2_trans_unlock(trans); - kfree(trans->fs_usage_deltas); - kfree(trans->mem); - if (trans->used_mempool) - mempool_free(trans->iters, &trans->c->btree_iters_pool); - else if (trans->iters != trans->iters_onstack) - kfree(trans->iters); + closure_sync(&trans->ref); + + if (s) + s->max_mem = max(s->max_mem, trans->mem_max); + + trans_for_each_update(trans, i) + __btree_path_put(i->path, true); + trans->nr_updates = 0; + + check_btree_paths_leaked(trans); + + if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) { + mutex_lock(&c->btree_trans_lock); + list_del(&trans->list); + mutex_unlock(&c->btree_trans_lock); + } + + srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx); + + bch2_journal_preres_put(&c->journal, &trans->journal_preres); + + kfree(trans->extra_journal_entries.data); + + if (trans->fs_usage_deltas) { + if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) == + REPLICAS_DELTA_LIST_MAX) + mempool_free(trans->fs_usage_deltas, + &c->replicas_delta_pool); + else + kfree(trans->fs_usage_deltas); + } + + if (trans->mem_bytes == BTREE_TRANS_MEM_MAX) + mempool_free(trans->mem, &c->btree_trans_mem_pool); + else + kfree(trans->mem); + +#ifdef __KERNEL__ + /* + * Userspace doesn't have a real percpu implementation: + */ + trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths); +#endif + + if (trans->paths) + mempool_free(trans->paths, &c->btree_paths_pool); + trans->mem = (void *) 0x1; - trans->iters = (void *) 0x1; + trans->paths = (void *) 0x1; +} + +static void __maybe_unused +bch2_btree_bkey_cached_common_to_text(struct printbuf *out, + struct btree_bkey_cached_common *b) +{ + struct six_lock_count c = six_lock_counts(&b->lock); + struct task_struct *owner; + pid_t pid; + + rcu_read_lock(); + owner = READ_ONCE(b->lock.owner); + pid = owner ? owner->pid : 0; + rcu_read_unlock(); - return trans->error ? -EIO : 0; + prt_tab(out); + prt_printf(out, "%px %c l=%u %s:", b, b->cached ? 'c' : 'b', + b->level, bch2_btree_ids[b->btree_id]); + bch2_bpos_to_text(out, btree_node_pos(b)); + + prt_tab(out); + prt_printf(out, " locks %u:%u:%u held by pid %u", + c.n[0], c.n[1], c.n[2], pid); +} + +void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans) +{ + struct btree_path *path; + struct btree_bkey_cached_common *b; + static char lock_types[] = { 'r', 'i', 'w' }; + unsigned l; + + if (!out->nr_tabstops) { + printbuf_tabstop_push(out, 16); + printbuf_tabstop_push(out, 32); + } + + prt_printf(out, "%i %s\n", trans->locking_wait.task->pid, trans->fn); + + trans_for_each_path(trans, path) { + if (!path->nodes_locked) + continue; + + prt_printf(out, " path %u %c l=%u %s:", + path->idx, + path->cached ? 'c' : 'b', + path->level, + bch2_btree_ids[path->btree_id]); + bch2_bpos_to_text(out, path->pos); + prt_newline(out); + + for (l = 0; l < BTREE_MAX_DEPTH; l++) { + if (btree_node_locked(path, l) && + !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) { + prt_printf(out, " %c l=%u ", + lock_types[btree_node_locked_type(path, l)], l); + bch2_btree_bkey_cached_common_to_text(out, b); + prt_newline(out); + } + } + } + + b = READ_ONCE(trans->locking); + if (b) { + prt_printf(out, " blocked for %lluus on", + div_u64(local_clock() - trans->locking_wait.start_time, + 1000)); + prt_newline(out); + prt_printf(out, " %c", lock_types[trans->locking_wait.lock_want]); + bch2_btree_bkey_cached_common_to_text(out, b); + prt_newline(out); + } +} + +void bch2_fs_btree_iter_exit(struct bch_fs *c) +{ + struct btree_transaction_stats *s; + + for (s = c->btree_transaction_stats; + s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats); + s++) { + kfree(s->max_paths_text); + bch2_time_stats_exit(&s->lock_hold_times); + } + + if (c->btree_trans_barrier_initialized) + cleanup_srcu_struct(&c->btree_trans_barrier); + mempool_exit(&c->btree_trans_mem_pool); + mempool_exit(&c->btree_paths_pool); +} + +int bch2_fs_btree_iter_init(struct bch_fs *c) +{ + struct btree_transaction_stats *s; + unsigned nr = BTREE_ITER_MAX; + int ret; + + for (s = c->btree_transaction_stats; + s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats); + s++) { + bch2_time_stats_init(&s->lock_hold_times); + mutex_init(&s->lock); + } + + INIT_LIST_HEAD(&c->btree_trans_list); + mutex_init(&c->btree_trans_lock); + + ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1, + sizeof(struct btree_path) * nr + + sizeof(struct btree_insert_entry) * nr) ?: + mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1, + BTREE_TRANS_MEM_MAX) ?: + init_srcu_struct(&c->btree_trans_barrier); + if (!ret) + c->btree_trans_barrier_initialized = true; + return ret; }