X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_iter.c;h=425c9ad779e326b7b3ea32c01f54046bd0b73891;hb=ff247cc54875d28a82245371f174e15eb304c367;hp=6fab76c3220c55f64687d93299ac942d941cb84d;hpb=04159ffa875ec3088d00408253db95669470b64c;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_iter.c b/libbcachefs/btree_iter.c index 6fab76c..425c9ad 100644 --- a/libbcachefs/btree_iter.c +++ b/libbcachefs/btree_iter.c @@ -2,18 +2,53 @@ #include "bcachefs.h" #include "bkey_methods.h" +#include "bkey_buf.h" #include "btree_cache.h" #include "btree_iter.h" #include "btree_key_cache.h" #include "btree_locking.h" #include "btree_update.h" #include "debug.h" +#include "error.h" #include "extents.h" #include "journal.h" +#include "replicas.h" #include #include +static void btree_iter_set_search_pos(struct btree_iter *, struct bpos); + +static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p) +{ + EBUG_ON(btree_iter_type(iter) == BTREE_ITER_NODES); + + /* Are we iterating over keys in all snapshots? */ + if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) { + p = bpos_successor(p); + } else { + p = bpos_nosnap_successor(p); + p.snapshot = iter->snapshot; + } + + return p; +} + +static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p) +{ + EBUG_ON(btree_iter_type(iter) == BTREE_ITER_NODES); + + /* Are we iterating over keys in all snapshots? */ + if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) { + p = bpos_predecessor(p); + } else { + p = bpos_nosnap_predecessor(p); + p.snapshot = iter->snapshot; + } + + return p; +} + static inline bool is_btree_node(struct btree_iter *iter, unsigned l) { return l < BTREE_MAX_DEPTH && @@ -26,20 +61,20 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter) if ((iter->flags & BTREE_ITER_IS_EXTENTS) && bkey_cmp(pos, POS_MAX)) - pos = bkey_successor(pos); + pos = bkey_successor(iter, pos); return pos; } static inline bool btree_iter_pos_before_node(struct btree_iter *iter, struct btree *b) { - return bkey_cmp(btree_iter_search_key(iter), b->data->min_key) < 0; + return bpos_cmp(iter->real_pos, b->data->min_key) < 0; } static inline bool btree_iter_pos_after_node(struct btree_iter *iter, struct btree *b) { - return bkey_cmp(b->key.k.p, btree_iter_search_key(iter)) < 0; + return bpos_cmp(b->key.k.p, iter->real_pos) < 0; } static inline bool btree_iter_pos_in_node(struct btree_iter *iter, @@ -197,13 +232,14 @@ static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b, bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, unsigned level, struct btree_iter *iter, enum six_lock_type type, - six_lock_should_sleep_fn should_sleep_fn, - void *p) + six_lock_should_sleep_fn should_sleep_fn, void *p, + unsigned long ip) { struct btree_trans *trans = iter->trans; - struct btree_iter *linked; + struct btree_iter *linked, *deadlock_iter = NULL; u64 start_time = local_clock(); - bool ret = true; + unsigned reason = 9; + bool ret; /* Check if it's safe to block: */ trans_for_each_iter(trans, linked) { @@ -224,15 +260,33 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, */ if (type == SIX_LOCK_intent && linked->nodes_locked != linked->nodes_intent_locked) { - if (!(trans->nounlock)) { - linked->locks_want = max_t(unsigned, - linked->locks_want, - __fls(linked->nodes_locked) + 1); - if (!btree_iter_get_locks(linked, true, false)) - ret = false; - } else { - ret = false; + linked->locks_want = max_t(unsigned, + linked->locks_want, + __fls(linked->nodes_locked) + 1); + if (!btree_iter_get_locks(linked, true, false)) { + deadlock_iter = linked; + reason = 1; + } + } + + if (linked->btree_id != iter->btree_id) { + if (linked->btree_id > iter->btree_id) { + deadlock_iter = linked; + reason = 3; } + continue; + } + + /* + * Within the same btree, cached iterators come before non + * cached iterators: + */ + if (btree_iter_is_cached(linked) != btree_iter_is_cached(iter)) { + if (btree_iter_is_cached(iter)) { + deadlock_iter = linked; + reason = 4; + } + continue; } /* @@ -240,30 +294,24 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, * another iterator has possible descendants locked of the node * we're about to lock, it must have the ancestors locked too: */ - if (linked->btree_id == iter->btree_id && - level > __fls(linked->nodes_locked)) { - if (!(trans->nounlock)) { - linked->locks_want = - max(level + 1, max_t(unsigned, - linked->locks_want, - iter->locks_want)); - if (!btree_iter_get_locks(linked, true, false)) - ret = false; - } else { - ret = false; + if (level > __fls(linked->nodes_locked)) { + linked->locks_want = + max(level + 1, max_t(unsigned, + linked->locks_want, + iter->locks_want)); + if (!btree_iter_get_locks(linked, true, false)) { + deadlock_iter = linked; + reason = 5; } } /* Must lock btree nodes in key order: */ - if ((cmp_int(iter->btree_id, linked->btree_id) ?: - -cmp_int(btree_iter_type(iter), btree_iter_type(linked))) < 0) - ret = false; - - if (iter->btree_id == linked->btree_id && - btree_node_locked(linked, level) && - bkey_cmp(pos, btree_node_pos((void *) linked->l[level].b, - btree_iter_type(linked))) <= 0) - ret = false; + if (btree_node_locked(linked, level) && + bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b, + btree_iter_type(linked))) <= 0) { + deadlock_iter = linked; + reason = 7; + } /* * Recheck if this is a node we already have locked - since one @@ -277,20 +325,36 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, } } - if (unlikely(!ret)) { - trace_trans_restart_would_deadlock(iter->trans->ip); + if (unlikely(deadlock_iter)) { + trace_trans_restart_would_deadlock(iter->trans->ip, ip, + reason, + deadlock_iter->btree_id, + btree_iter_type(deadlock_iter), + iter->btree_id, + btree_iter_type(iter)); return false; } if (six_trylock_type(&b->c.lock, type)) return true; - if (six_lock_type(&b->c.lock, type, should_sleep_fn, p)) - return false; +#ifdef CONFIG_BCACHEFS_DEBUG + trans->locking_iter_idx = iter->idx; + trans->locking_pos = pos; + trans->locking_btree_id = iter->btree_id; + trans->locking_level = level; + trans->locking = b; +#endif - bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)], - start_time); - return true; + ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0; + +#ifdef CONFIG_BCACHEFS_DEBUG + trans->locking = NULL; +#endif + if (ret) + bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)], + start_time); + return ret; } /* Btree iterator locking: */ @@ -319,7 +383,7 @@ void bch2_btree_trans_verify_locks(struct btree_trans *trans) { struct btree_iter *iter; - trans_for_each_iter_all(trans, iter) + trans_for_each_iter(trans, iter) bch2_btree_iter_verify_locks(iter); } #else @@ -360,50 +424,25 @@ bool __bch2_btree_iter_upgrade(struct btree_iter *iter, return false; } -bool __bch2_btree_iter_upgrade_nounlock(struct btree_iter *iter, - unsigned new_locks_want) +void __bch2_btree_iter_downgrade(struct btree_iter *iter, + unsigned new_locks_want) { - unsigned l = iter->level; + unsigned l; - EBUG_ON(iter->locks_want >= new_locks_want); + EBUG_ON(iter->locks_want < new_locks_want); iter->locks_want = new_locks_want; - do { - if (!btree_iter_node(iter, l)) - break; - - if (!bch2_btree_node_upgrade(iter, l)) { - iter->locks_want = l; - return false; - } - - l++; - } while (l < iter->locks_want); - - return true; -} - -void __bch2_btree_iter_downgrade(struct btree_iter *iter, - unsigned downgrade_to) -{ - unsigned l, new_locks_want = downgrade_to ?: - (iter->flags & BTREE_ITER_INTENT ? 1 : 0); - - if (iter->locks_want < downgrade_to) { - iter->locks_want = new_locks_want; - - while (iter->nodes_locked && - (l = __fls(iter->nodes_locked)) >= iter->locks_want) { - if (l > iter->level) { - btree_node_unlock(iter, l); - } else { - if (btree_node_intent_locked(iter, l)) { - six_lock_downgrade(&iter->l[l].b->c.lock); - iter->nodes_intent_locked ^= 1 << l; - } - break; + while (iter->nodes_locked && + (l = __fls(iter->nodes_locked)) >= iter->locks_want) { + if (l > iter->level) { + btree_node_unlock(iter, l); + } else { + if (btree_node_intent_locked(iter, l)) { + six_lock_downgrade(&iter->l[l].b->c.lock); + iter->nodes_intent_locked ^= 1 << l; } + break; } } @@ -423,13 +462,12 @@ void bch2_trans_downgrade(struct btree_trans *trans) bool bch2_trans_relock(struct btree_trans *trans) { struct btree_iter *iter; - bool ret = true; trans_for_each_iter(trans, iter) - if (iter->uptodate == BTREE_ITER_NEED_RELOCK) - ret &= bch2_btree_iter_relock(iter, true); - - return ret; + if (btree_iter_keep(trans, iter) && + !bch2_btree_iter_relock(iter, true)) + return false; + return true; } void bch2_trans_unlock(struct btree_trans *trans) @@ -463,17 +501,20 @@ static void bch2_btree_iter_verify_cached(struct btree_iter *iter) static void bch2_btree_iter_verify_level(struct btree_iter *iter, unsigned level) { - struct bpos pos = btree_iter_search_key(iter); - struct btree_iter_level *l = &iter->l[level]; - struct btree_node_iter tmp = l->iter; - bool locked = btree_node_locked(iter, level); + struct btree_iter_level *l; + struct btree_node_iter tmp; + bool locked; struct bkey_packed *p, *k; - char buf1[100], buf2[100]; + char buf1[100], buf2[100], buf3[100]; const char *msg; - if (!debug_check_iterators(iter->trans->c)) + if (!bch2_debug_check_iterators) return; + l = &iter->l[level]; + tmp = l->iter; + locked = btree_node_locked(iter, level); + if (btree_iter_type(iter) == BTREE_ITER_CACHED) { if (!level) bch2_btree_iter_verify_cached(iter); @@ -488,12 +529,7 @@ static void bch2_btree_iter_verify_level(struct btree_iter *iter, if (!bch2_btree_node_relock(iter, level)) return; - /* - * Ideally this invariant would always be true, and hopefully in the - * future it will be, but for now set_pos_same_leaf() breaks it: - */ - BUG_ON(iter->uptodate < BTREE_ITER_NEED_TRAVERSE && - !btree_iter_pos_in_node(iter, l->b)); + BUG_ON(!btree_iter_pos_in_node(iter, l->b)); /* * node iterators don't use leaf node iterator: @@ -512,16 +548,16 @@ static void bch2_btree_iter_verify_level(struct btree_iter *iter, * whiteouts) */ p = level || btree_node_type_is_extents(iter->btree_id) - ? bch2_btree_node_iter_prev_filter(&tmp, l->b, KEY_TYPE_discard) + ? bch2_btree_node_iter_prev(&tmp, l->b) : bch2_btree_node_iter_prev_all(&tmp, l->b); k = bch2_btree_node_iter_peek_all(&l->iter, l->b); - if (p && bkey_iter_pos_cmp(l->b, p, &pos) >= 0) { + if (p && bkey_iter_pos_cmp(l->b, p, &iter->real_pos) >= 0) { msg = "before"; goto err; } - if (k && bkey_iter_pos_cmp(l->b, k, &pos) < 0) { + if (k && bkey_iter_pos_cmp(l->b, k, &iter->real_pos) < 0) { msg = "after"; goto err; } @@ -530,44 +566,72 @@ unlock: btree_node_unlock(iter, level); return; err: - strcpy(buf1, "(none)"); strcpy(buf2, "(none)"); + strcpy(buf3, "(none)"); + + bch2_bpos_to_text(&PBUF(buf1), iter->real_pos); if (p) { struct bkey uk = bkey_unpack_key(l->b, p); - bch2_bkey_to_text(&PBUF(buf1), &uk); + bch2_bkey_to_text(&PBUF(buf2), &uk); } if (k) { struct bkey uk = bkey_unpack_key(l->b, k); - bch2_bkey_to_text(&PBUF(buf2), &uk); + bch2_bkey_to_text(&PBUF(buf3), &uk); } panic("iterator should be %s key at level %u:\n" - "iter pos %s %llu:%llu\n" + "iter pos %s\n" "prev key %s\n" "cur key %s\n", - msg, level, - iter->flags & BTREE_ITER_IS_EXTENTS ? ">" : "=>", - iter->pos.inode, iter->pos.offset, - buf1, buf2); + msg, level, buf1, buf2, buf3); } static void bch2_btree_iter_verify(struct btree_iter *iter) { + enum btree_iter_type type = btree_iter_type(iter); unsigned i; - bch2_btree_trans_verify_locks(iter->trans); + EBUG_ON(iter->btree_id >= BTREE_ID_NR); + + BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) && + iter->pos.snapshot != iter->snapshot); + + BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) && + (iter->flags & BTREE_ITER_ALL_SNAPSHOTS)); + + BUG_ON(type == BTREE_ITER_NODES && + !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)); + + BUG_ON(type != BTREE_ITER_NODES && + (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) && + !btree_type_has_snapshots(iter->btree_id)); + + bch2_btree_iter_verify_locks(iter); for (i = 0; i < BTREE_MAX_DEPTH; i++) bch2_btree_iter_verify_level(iter, i); } +static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) +{ + enum btree_iter_type type = btree_iter_type(iter); + + BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) && + iter->pos.snapshot != iter->snapshot); + + BUG_ON((type == BTREE_ITER_KEYS || + type == BTREE_ITER_CACHED) && + (bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 || + bkey_cmp(iter->pos, iter->k.p) > 0)); +} + void bch2_btree_trans_verify_iters(struct btree_trans *trans, struct btree *b) { struct btree_iter *iter; - if (!debug_check_iterators(trans->c)) + if (!bch2_debug_check_iterators) return; trans_for_each_iter_with_node(trans, b, iter) @@ -578,6 +642,7 @@ void bch2_btree_trans_verify_iters(struct btree_trans *trans, struct btree *b) static inline void bch2_btree_iter_verify_level(struct btree_iter *iter, unsigned l) {} static inline void bch2_btree_iter_verify(struct btree_iter *iter) {} +static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {} #endif @@ -603,12 +668,11 @@ static void __bch2_btree_iter_fix_key_modified(struct btree_iter *iter, struct bkey_packed *where) { struct btree_iter_level *l = &iter->l[b->c.level]; - struct bpos pos = btree_iter_search_key(iter); if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b)) return; - if (bkey_iter_pos_cmp(l->b, where, &pos) < 0) + if (bkey_iter_pos_cmp(l->b, where, &iter->real_pos) < 0) bch2_btree_node_iter_advance(&l->iter, l->b); btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK); @@ -643,7 +707,6 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter, bool iter_current_key_modified = orig_iter_pos >= offset && orig_iter_pos <= offset + clobber_u64s; - struct bpos iter_pos = btree_iter_search_key(iter); btree_node_iter_for_each(node_iter, set) if (set->end == old_end) @@ -651,7 +714,7 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter, /* didn't find the bset in the iterator - might have to readd it: */ if (new_u64s && - bkey_iter_pos_cmp(b, where, &iter_pos) >= 0) { + bkey_iter_pos_cmp(b, where, &iter->real_pos) >= 0) { bch2_btree_node_iter_push(node_iter, b, where, end); goto fixup_done; } else { @@ -666,7 +729,7 @@ found: return; if (new_u64s && - bkey_iter_pos_cmp(b, where, &iter_pos) >= 0) { + bkey_iter_pos_cmp(b, where, &iter->real_pos) >= 0) { set->k = offset; } else if (set->k < offset + clobber_u64s) { set->k = offset + new_u64s; @@ -739,7 +802,7 @@ void bch2_btree_node_iter_fix(struct btree_iter *iter, __bch2_btree_node_iter_fix(iter, b, node_iter, t, where, clobber_u64s, new_u64s); - if (debug_check_iterators(iter->trans->c)) + if (bch2_debug_check_iterators) bch2_btree_node_iter_verify(node_iter, b); } @@ -769,45 +832,50 @@ static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter, ret = bkey_disassemble(l->b, k, u); - if (debug_check_bkeys(iter->trans->c)) + if (bch2_debug_check_bkeys) bch2_bkey_debugcheck(iter->trans->c, l->b, ret); return ret; } /* peek_all() doesn't skip deleted keys */ -static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter, - struct btree_iter_level *l, - struct bkey *u) +static inline struct bkey_s_c btree_iter_level_peek_all(struct btree_iter *iter, + struct btree_iter_level *l, + struct bkey *u) { return __btree_iter_unpack(iter, l, u, bch2_btree_node_iter_peek_all(&l->iter, l->b)); } -static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter, - struct btree_iter_level *l) +static inline struct bkey_s_c btree_iter_level_peek(struct btree_iter *iter, + struct btree_iter_level *l) { - return __btree_iter_unpack(iter, l, &iter->k, + struct bkey_s_c k = __btree_iter_unpack(iter, l, &iter->k, bch2_btree_node_iter_peek(&l->iter, l->b)); + + iter->real_pos = k.k ? k.k->p : l->b->key.k.p; + return k; } -static inline struct bkey_s_c __btree_iter_prev(struct btree_iter *iter, - struct btree_iter_level *l) +static inline struct bkey_s_c btree_iter_level_prev(struct btree_iter *iter, + struct btree_iter_level *l) { - return __btree_iter_unpack(iter, l, &iter->k, + struct bkey_s_c k = __btree_iter_unpack(iter, l, &iter->k, bch2_btree_node_iter_prev(&l->iter, l->b)); + + iter->real_pos = k.k ? k.k->p : l->b->data->min_key; + return k; } static inline bool btree_iter_advance_to_pos(struct btree_iter *iter, struct btree_iter_level *l, int max_advance) { - struct bpos pos = btree_iter_search_key(iter); struct bkey_packed *k; int nr_advanced = 0; while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) && - bkey_iter_pos_cmp(l->b, k, &pos) < 0) { + bkey_iter_pos_cmp(l->b, k, &iter->real_pos) < 0) { if (max_advance > 0 && nr_advanced >= max_advance) return false; @@ -845,12 +913,23 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b) if (!k || bkey_deleted(k) || bkey_cmp_left_packed(l->b, k, &b->key.k.p)) { - char buf[100]; + char buf1[100]; + char buf2[100]; + char buf3[100]; + char buf4[100]; struct bkey uk = bkey_unpack_key(b, k); - bch2_bkey_to_text(&PBUF(buf), &uk); - panic("parent iter doesn't point to new node:\n%s\n%llu:%llu\n", - buf, b->key.k.p.inode, b->key.k.p.offset); + bch2_dump_btree_node(iter->trans->c, l->b); + bch2_bpos_to_text(&PBUF(buf1), iter->real_pos); + bch2_bkey_to_text(&PBUF(buf2), &uk); + bch2_bpos_to_text(&PBUF(buf3), b->data->min_key); + bch2_bpos_to_text(&PBUF(buf3), b->data->max_key); + panic("parent iter doesn't point to new node:\n" + "iter pos %s %s\n" + "iter key %s\n" + "new node %s-%s\n", + bch2_btree_ids[iter->btree_id], buf1, + buf2, buf3, buf4); } if (!parent_locked) @@ -860,10 +939,16 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b) static inline void __btree_iter_init(struct btree_iter *iter, unsigned level) { - struct bpos pos = btree_iter_search_key(iter); struct btree_iter_level *l = &iter->l[level]; - bch2_btree_node_iter_init(&l->iter, l->b, &pos); + bch2_btree_node_iter_init(&l->iter, l->b, &iter->real_pos); + + /* + * Iterators to interior nodes should always be pointed at the first non + * whiteout: + */ + if (level) + bch2_btree_node_iter_peek(&l->iter, l->b); btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK); } @@ -919,7 +1004,7 @@ void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b) trans_for_each_iter(iter->trans, linked) if (linked->l[level].b == b) { - __btree_node_unlock(linked, level); + btree_node_unlock(linked, level); linked->l[level].b = BTREE_ITER_NO_NODE_DROP; } } @@ -945,7 +1030,8 @@ static int lock_root_check_fn(struct six_lock *lock, void *p) } static inline int btree_iter_lock_root(struct btree_iter *iter, - unsigned depth_want) + unsigned depth_want, + unsigned long trace_ip) { struct bch_fs *c = iter->trans->c; struct btree *b, **rootp = &c->btree_roots[iter->btree_id].b; @@ -974,7 +1060,8 @@ static inline int btree_iter_lock_root(struct btree_iter *iter, lock_type = __btree_lock_want(iter, iter->level); if (unlikely(!btree_node_lock(b, POS_MAX, iter->level, iter, lock_type, - lock_root_check_fn, rootp))) + lock_root_check_fn, rootp, + trace_ip))) return -EINTR; if (likely(b == READ_ONCE(*rootp) && @@ -1002,27 +1089,32 @@ static void btree_iter_prefetch(struct btree_iter *iter) struct btree_iter_level *l = &iter->l[iter->level]; struct btree_node_iter node_iter = l->iter; struct bkey_packed *k; - BKEY_PADDED(k) tmp; + struct bkey_buf tmp; unsigned nr = test_bit(BCH_FS_STARTED, &c->flags) ? (iter->level > 1 ? 0 : 2) : (iter->level > 1 ? 1 : 16); bool was_locked = btree_node_locked(iter, iter->level); + bch2_bkey_buf_init(&tmp); + while (nr) { if (!bch2_btree_node_relock(iter, iter->level)) - return; + break; bch2_btree_node_iter_advance(&node_iter, l->b); k = bch2_btree_node_iter_peek(&node_iter, l->b); if (!k) break; - bch2_bkey_unpack(l->b, &tmp.k, k); - bch2_btree_node_prefetch(c, iter, &tmp.k, iter->level - 1); + bch2_bkey_buf_unpack(&tmp, c, l->b, k); + bch2_btree_node_prefetch(c, iter, tmp.k, iter->btree_id, + iter->level - 1); } if (!was_locked) btree_node_unlock(iter, iter->level); + + bch2_bkey_buf_exit(&tmp, c); } static noinline void btree_node_mem_ptr_set(struct btree_iter *iter, @@ -1046,45 +1138,45 @@ static noinline void btree_node_mem_ptr_set(struct btree_iter *iter, btree_node_unlock(iter, plevel); } -static __always_inline int btree_iter_down(struct btree_iter *iter) +static __always_inline int btree_iter_down(struct btree_iter *iter, + unsigned long trace_ip) { struct bch_fs *c = iter->trans->c; struct btree_iter_level *l = &iter->l[iter->level]; struct btree *b; unsigned level = iter->level - 1; enum six_lock_type lock_type = __btree_lock_want(iter, level); - BKEY_PADDED(k) tmp; + struct bkey_buf tmp; + int ret; EBUG_ON(!btree_node_locked(iter, iter->level)); - bch2_bkey_unpack(l->b, &tmp.k, + bch2_bkey_buf_init(&tmp); + bch2_bkey_buf_unpack(&tmp, c, l->b, bch2_btree_node_iter_peek(&l->iter, l->b)); - b = bch2_btree_node_get(c, iter, &tmp.k, level, lock_type); - if (unlikely(IS_ERR(b))) - return PTR_ERR(b); + b = bch2_btree_node_get(c, iter, tmp.k, level, lock_type, trace_ip); + ret = PTR_ERR_OR_ZERO(b); + if (unlikely(ret)) + goto err; mark_btree_node_locked(iter, level, lock_type); btree_iter_node_set(iter, b); - if (tmp.k.k.type == KEY_TYPE_btree_ptr_v2 && - unlikely(b != btree_node_mem_ptr(&tmp.k))) + if (tmp.k->k.type == KEY_TYPE_btree_ptr_v2 && + unlikely(b != btree_node_mem_ptr(tmp.k))) btree_node_mem_ptr_set(iter, level + 1, b); if (iter->flags & BTREE_ITER_PREFETCH) btree_iter_prefetch(iter); iter->level = level; - - return 0; -} - -static void btree_iter_up(struct btree_iter *iter) -{ - btree_node_unlock(iter, iter->level++); +err: + bch2_bkey_buf_exit(&tmp, c); + return ret; } -static int btree_iter_traverse_one(struct btree_iter *); +static int btree_iter_traverse_one(struct btree_iter *, unsigned long); static int __btree_iter_traverse_all(struct btree_trans *trans, int ret) { @@ -1104,11 +1196,12 @@ retry_all: sorted[nr_sorted++] = iter->idx; #define btree_iter_cmp_by_idx(_l, _r) \ - btree_iter_cmp(&trans->iters[_l], &trans->iters[_r]) + btree_iter_lock_cmp(&trans->iters[_l], &trans->iters[_r]) bubble_sort(sorted, nr_sorted, btree_iter_cmp_by_idx); #undef btree_iter_cmp_by_idx bch2_trans_unlock(trans); + cond_resched(); if (unlikely(ret == -ENOMEM)) { struct closure cl; @@ -1139,7 +1232,7 @@ retry_all: if (!(trans->iters_linked & (1ULL << idx))) continue; - ret = btree_iter_traverse_one(&trans->iters[idx]); + ret = btree_iter_traverse_one(&trans->iters[idx], _THIS_IP_); if (ret) goto retry_all; } @@ -1171,9 +1264,9 @@ static inline bool btree_iter_good_node(struct btree_iter *iter, !bch2_btree_node_relock(iter, l)) return false; - if (check_pos <= 0 && btree_iter_pos_before_node(iter, iter->l[l].b)) + if (check_pos < 0 && btree_iter_pos_before_node(iter, iter->l[l].b)) return false; - if (check_pos >= 0 && btree_iter_pos_after_node(iter, iter->l[l].b)) + if (check_pos > 0 && btree_iter_pos_after_node(iter, iter->l[l].b)) return false; return true; } @@ -1202,7 +1295,8 @@ static inline unsigned btree_iter_up_until_good_node(struct btree_iter *iter, * On error, caller (peek_node()/peek_key()) must return NULL; the error is * stashed in the iterator and returned from bch2_trans_exit(). */ -static int btree_iter_traverse_one(struct btree_iter *iter) +static int btree_iter_traverse_one(struct btree_iter *iter, + unsigned long trace_ip) { unsigned depth_want = iter->level; @@ -1223,24 +1317,8 @@ static int btree_iter_traverse_one(struct btree_iter *iter) if (unlikely(iter->level >= BTREE_MAX_DEPTH)) return 0; - /* - * XXX: correctly using BTREE_ITER_UPTODATE should make using check_pos - * here unnecessary - */ iter->level = btree_iter_up_until_good_node(iter, 0); - /* - * If we've got a btree node locked (i.e. we aren't about to relock the - * root) - advance its node iterator if necessary: - * - * XXX correctly using BTREE_ITER_UPTODATE should make this unnecessary - */ - if (is_btree_node(iter, iter->level)) { - BUG_ON(!btree_iter_pos_in_node(iter, iter->l[iter->level].b)); - - btree_iter_advance_to_pos(iter, &iter->l[iter->level], -1); - } - /* * Note: iter->nodes[iter->level] may be temporarily NULL here - that * would indicate to other code that we got to the end of the btree, @@ -1249,8 +1327,8 @@ static int btree_iter_traverse_one(struct btree_iter *iter) */ while (iter->level > depth_want) { int ret = btree_iter_node(iter, iter->level) - ? btree_iter_down(iter) - : btree_iter_lock_root(iter, depth_want); + ? btree_iter_down(iter, trace_ip) + : btree_iter_lock_root(iter, depth_want, trace_ip); if (unlikely(ret)) { if (ret == 1) return 0; @@ -1275,32 +1353,41 @@ static int btree_iter_traverse_one(struct btree_iter *iter) return 0; } -int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter) +static int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter) { struct btree_trans *trans = iter->trans; int ret; ret = bch2_trans_cond_resched(trans) ?: - btree_iter_traverse_one(iter); + btree_iter_traverse_one(iter, _RET_IP_); if (unlikely(ret)) ret = __btree_iter_traverse_all(trans, ret); return ret; } -static inline void bch2_btree_iter_checks(struct btree_iter *iter) +/* + * Note: + * bch2_btree_iter_traverse() is for external users, btree_iter_traverse() is + * for internal btree iterator users + * + * bch2_btree_iter_traverse sets iter->real_pos to iter->pos, + * btree_iter_traverse() does not: + */ +static inline int __must_check +btree_iter_traverse(struct btree_iter *iter) { - enum btree_iter_type type = btree_iter_type(iter); - - EBUG_ON(iter->btree_id >= BTREE_ID_NR); + return iter->uptodate >= BTREE_ITER_NEED_RELOCK + ? __bch2_btree_iter_traverse(iter) + : 0; +} - BUG_ON((type == BTREE_ITER_KEYS || - type == BTREE_ITER_CACHED) && - (bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 || - bkey_cmp(iter->pos, iter->k.p) > 0)); +int __must_check +bch2_btree_iter_traverse(struct btree_iter *iter) +{ + btree_iter_set_search_pos(iter, btree_iter_search_key(iter)); - bch2_btree_iter_verify_locks(iter); - bch2_btree_iter_verify_level(iter, iter->level); + return btree_iter_traverse(iter); } /* Iterate across nodes (leaf and interior nodes) */ @@ -1311,12 +1398,9 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter) int ret; EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES); - bch2_btree_iter_checks(iter); - - if (iter->uptodate == BTREE_ITER_UPTODATE) - return iter->l[iter->level].b; + bch2_btree_iter_verify(iter); - ret = bch2_btree_iter_traverse(iter); + ret = btree_iter_traverse(iter); if (ret) return NULL; @@ -1324,10 +1408,9 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter) if (!b) return NULL; - BUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0); + BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0); - iter->pos = b->key.k.p; - iter->uptodate = BTREE_ITER_UPTODATE; + iter->pos = iter->real_pos = b->key.k.p; bch2_btree_iter_verify(iter); @@ -1340,7 +1423,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) int ret; EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES); - bch2_btree_iter_checks(iter); + bch2_btree_iter_verify(iter); /* already got to end? */ if (!btree_iter_node(iter, iter->level)) @@ -1348,12 +1431,12 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) bch2_trans_cond_resched(iter->trans); - btree_iter_up(iter); + btree_node_unlock(iter, iter->level); + iter->l[iter->level].b = BTREE_ITER_NO_NODE_UP; + iter->level++; - if (!bch2_btree_node_relock(iter, iter->level)) - btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK); - - ret = bch2_btree_iter_traverse(iter); + btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); + ret = btree_iter_traverse(iter); if (ret) return NULL; @@ -1362,34 +1445,28 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) if (!b) return NULL; - if (bkey_cmp(iter->pos, b->key.k.p) < 0) { + if (bpos_cmp(iter->pos, b->key.k.p) < 0) { /* * Haven't gotten to the end of the parent node: go back down to * the next child node */ + btree_iter_set_search_pos(iter, bpos_successor(iter->pos)); - /* - * We don't really want to be unlocking here except we can't - * directly tell btree_iter_traverse() "traverse to this level" - * except by setting iter->level, so we have to unlock so we - * don't screw up our lock invariants: - */ - if (btree_node_read_locked(iter, iter->level)) - btree_node_unlock(iter, iter->level); - - iter->pos = bkey_successor(iter->pos); - iter->level = iter->min_depth; + /* Unlock to avoid screwing up our lock invariants: */ + btree_node_unlock(iter, iter->level); + iter->level = iter->min_depth; btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); - ret = bch2_btree_iter_traverse(iter); + bch2_btree_iter_verify(iter); + + ret = btree_iter_traverse(iter); if (ret) return NULL; b = iter->l[iter->level].b; } - iter->pos = b->key.k.p; - iter->uptodate = BTREE_ITER_UPTODATE; + iter->pos = iter->real_pos = b->key.k.p; bch2_btree_iter_verify(iter); @@ -1398,43 +1475,16 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) /* Iterate across keys (in leaf nodes only) */ -void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_pos) -{ - struct btree_iter_level *l = &iter->l[0]; - - EBUG_ON(iter->level != 0); - EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0); - EBUG_ON(!btree_node_locked(iter, 0)); - EBUG_ON(bkey_cmp(new_pos, l->b->key.k.p) > 0); - - bkey_init(&iter->k); - iter->k.p = iter->pos = new_pos; - btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK); - - btree_iter_advance_to_pos(iter, l, -1); - - /* - * XXX: - * keeping a node locked that's outside (even just outside) iter->pos - * breaks __bch2_btree_node_lock(). This seems to only affect - * bch2_btree_node_get_sibling so for now it's fixed there, but we - * should try to get rid of this corner case. - * - * (this behaviour is currently needed for BTREE_INSERT_NOUNLOCK) - */ - - if (bch2_btree_node_iter_end(&l->iter) && - btree_iter_pos_after_node(iter, l->b)) - btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); -} - -static void btree_iter_pos_changed(struct btree_iter *iter, int cmp) +static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_pos) { + int cmp = bpos_cmp(new_pos, iter->real_pos); unsigned l = iter->level; if (!cmp) goto out; + iter->real_pos = new_pos; + if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) { btree_node_unlock(iter, 0); iter->l[0].b = BTREE_ITER_NO_NODE_UP; @@ -1464,245 +1514,162 @@ out: btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); else btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK); + + bch2_btree_iter_verify(iter); } -void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos, - bool strictly_greater) +inline bool bch2_btree_iter_advance(struct btree_iter *iter) { - struct bpos old = btree_iter_search_key(iter); - int cmp; - - iter->flags &= ~BTREE_ITER_IS_EXTENTS; - iter->flags |= strictly_greater ? BTREE_ITER_IS_EXTENTS : 0; + struct bpos pos = iter->k.p; + bool ret = bpos_cmp(pos, POS_MAX) != 0; - bkey_init(&iter->k); - iter->k.p = iter->pos = new_pos; - - cmp = bkey_cmp(btree_iter_search_key(iter), old); - - btree_iter_pos_changed(iter, cmp); + if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS)) + pos = bkey_successor(iter, pos); + bch2_btree_iter_set_pos(iter, pos); + return ret; } -void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos) +inline bool bch2_btree_iter_rewind(struct btree_iter *iter) { - int cmp = bkey_cmp(new_pos, iter->pos); - - bkey_init(&iter->k); - iter->k.p = iter->pos = new_pos; + struct bpos pos = bkey_start_pos(&iter->k); + bool ret = bpos_cmp(pos, POS_MIN) != 0; - btree_iter_pos_changed(iter, cmp); + if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS)) + pos = bkey_predecessor(iter, pos); + bch2_btree_iter_set_pos(iter, pos); + return ret; } static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter) { - struct btree_iter_level *l = &iter->l[0]; - bool ret; - - bkey_init(&iter->k); - iter->k.p = iter->pos = l->b->key.k.p; + struct bpos next_pos = iter->l[0].b->key.k.p; + bool ret = bpos_cmp(next_pos, POS_MAX) != 0; - ret = bkey_cmp(iter->pos, POS_MAX) != 0; - if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS)) - iter->k.p = iter->pos = bkey_successor(iter->pos); + /* + * Typically, we don't want to modify iter->pos here, since that + * indicates where we searched from - unless we got to the end of the + * btree, in that case we want iter->pos to reflect that: + */ + if (ret) + btree_iter_set_search_pos(iter, bpos_successor(next_pos)); + else + bch2_btree_iter_set_pos(iter, POS_MAX); - btree_iter_pos_changed(iter, 1); return ret; } static inline bool btree_iter_set_pos_to_prev_leaf(struct btree_iter *iter) { - struct btree_iter_level *l = &iter->l[0]; - bool ret; - - bkey_init(&iter->k); - iter->k.p = iter->pos = l->b->data->min_key; - iter->uptodate = BTREE_ITER_NEED_TRAVERSE; - - ret = bkey_cmp(iter->pos, POS_MIN) != 0; - if (ret) { - iter->k.p = iter->pos = bkey_predecessor(iter->pos); + struct bpos next_pos = iter->l[0].b->data->min_key; + bool ret = bpos_cmp(next_pos, POS_MIN) != 0; - if (iter->flags & BTREE_ITER_IS_EXTENTS) - iter->k.p = iter->pos = bkey_predecessor(iter->pos); - } + if (ret) + btree_iter_set_search_pos(iter, bpos_predecessor(next_pos)); + else + bch2_btree_iter_set_pos(iter, POS_MIN); - btree_iter_pos_changed(iter, -1); return ret; } -/** - * btree_iter_peek_uptodate - given an iterator that is uptodate, return the key - * it currently points to - */ -static inline struct bkey_s_c btree_iter_peek_uptodate(struct btree_iter *iter) +static struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans, + enum btree_id btree_id, struct bpos pos) { - struct btree_iter_level *l = &iter->l[0]; - struct bkey_s_c ret = { .k = &iter->k }; - - if (!bkey_deleted(&iter->k)) { - struct bkey_packed *_k = - __bch2_btree_node_iter_peek_all(&l->iter, l->b); - - ret.v = bkeyp_val(&l->b->format, _k); - - if (debug_check_iterators(iter->trans->c)) { - struct bkey k = bkey_unpack_key(l->b, _k); + struct btree_insert_entry *i; - BUG_ON(memcmp(&k, &iter->k, sizeof(k))); + trans_for_each_update2(trans, i) + if ((cmp_int(btree_id, i->iter->btree_id) ?: + bkey_cmp(pos, i->k->k.p)) <= 0) { + if (btree_id == i->iter->btree_id) + return i->k; + break; } - if (debug_check_bkeys(iter->trans->c)) - bch2_bkey_debugcheck(iter->trans->c, l->b, ret); - } - - return ret; + return NULL; } -/** - * bch2_btree_iter_peek: returns first key greater than or equal to iterator's - * current position - */ -struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter) +static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter, bool with_updates) { - struct btree_iter_level *l = &iter->l[0]; + struct bpos search_key = btree_iter_search_key(iter); + struct bkey_i *next_update = with_updates + ? btree_trans_peek_updates(iter->trans, iter->btree_id, search_key) + : NULL; struct bkey_s_c k; int ret; EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS); - bch2_btree_iter_checks(iter); + bch2_btree_iter_verify(iter); + bch2_btree_iter_verify_entry_exit(iter); - if (iter->uptodate == BTREE_ITER_UPTODATE && - !bkey_deleted(&iter->k)) - return btree_iter_peek_uptodate(iter); + btree_iter_set_search_pos(iter, search_key); while (1) { - ret = bch2_btree_iter_traverse(iter); + ret = btree_iter_traverse(iter); if (unlikely(ret)) return bkey_s_c_err(ret); - k = __btree_iter_peek(iter, l); - if (likely(k.k)) + k = btree_iter_level_peek(iter, &iter->l[0]); + + if (next_update && + bpos_cmp(next_update->k.p, iter->real_pos) <= 0) + k = bkey_i_to_s_c(next_update); + + if (likely(k.k)) { + if (bkey_deleted(k.k)) { + btree_iter_set_search_pos(iter, + bkey_successor(iter, k.k->p)); + continue; + } + break; + } if (!btree_iter_set_pos_to_next_leaf(iter)) return bkey_s_c_null; } /* - * iter->pos should always be equal to the key we just - * returned - except extents can straddle iter->pos: + * iter->pos should be mononotically increasing, and always be equal to + * the key we just returned - except extents can straddle iter->pos: */ - if (!(iter->flags & BTREE_ITER_IS_EXTENTS) || - bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) + if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) iter->pos = bkey_start_pos(k.k); - iter->uptodate = BTREE_ITER_UPTODATE; - - bch2_btree_iter_verify_level(iter, 0); + bch2_btree_iter_verify_entry_exit(iter); + bch2_btree_iter_verify(iter); return k; } +/** + * bch2_btree_iter_peek: returns first key greater than or equal to iterator's + * current position + */ +struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter) +{ + return __btree_iter_peek(iter, false); +} + /** * bch2_btree_iter_next: returns first key greater than iterator's current * position */ struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter) { - if (unlikely(!bkey_cmp(iter->k.p, POS_MAX))) + if (!bch2_btree_iter_advance(iter)) return bkey_s_c_null; - bch2_btree_iter_set_pos(iter, - (iter->flags & BTREE_ITER_IS_EXTENTS) - ? iter->k.p - : bkey_successor(iter->k.p)); - return bch2_btree_iter_peek(iter); } -static struct bkey_s_c __btree_trans_updates_peek(struct btree_iter *iter) -{ - struct bpos pos = btree_iter_search_key(iter); - struct btree_trans *trans = iter->trans; - struct btree_insert_entry *i; - - trans_for_each_update2(trans, i) - if ((cmp_int(iter->btree_id, i->iter->btree_id) ?: - bkey_cmp(pos, i->k->k.p)) <= 0) - break; - - return i < trans->updates2 + trans->nr_updates2 && - iter->btree_id == i->iter->btree_id - ? bkey_i_to_s_c(i->k) - : bkey_s_c_null; -} - -static struct bkey_s_c __bch2_btree_iter_peek_with_updates(struct btree_iter *iter) -{ - struct btree_iter_level *l = &iter->l[0]; - struct bkey_s_c k = __btree_iter_peek(iter, l); - struct bkey_s_c u = __btree_trans_updates_peek(iter); - - if (k.k && (!u.k || bkey_cmp(k.k->p, u.k->p) < 0)) - return k; - if (u.k && bkey_cmp(u.k->p, l->b->key.k.p) <= 0) { - iter->k = *u.k; - return u; - } - return bkey_s_c_null; -} - struct bkey_s_c bch2_btree_iter_peek_with_updates(struct btree_iter *iter) { - struct bkey_s_c k; - int ret; - - EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS); - bch2_btree_iter_checks(iter); - - while (1) { - ret = bch2_btree_iter_traverse(iter); - if (unlikely(ret)) - return bkey_s_c_err(ret); - - k = __bch2_btree_iter_peek_with_updates(iter); - - if (k.k && bkey_deleted(k.k)) { - bch2_btree_iter_set_pos(iter, - (iter->flags & BTREE_ITER_IS_EXTENTS) - ? iter->k.p - : bkey_successor(iter->k.p)); - continue; - } - - if (likely(k.k)) - break; - - if (!btree_iter_set_pos_to_next_leaf(iter)) - return bkey_s_c_null; - } - - /* - * iter->pos should always be equal to the key we just - * returned - except extents can straddle iter->pos: - */ - if (!(iter->flags & BTREE_ITER_IS_EXTENTS) || - bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) - iter->pos = bkey_start_pos(k.k); - - iter->uptodate = BTREE_ITER_UPTODATE; - return k; + return __btree_iter_peek(iter, true); } struct bkey_s_c bch2_btree_iter_next_with_updates(struct btree_iter *iter) { - if (unlikely(!bkey_cmp(iter->k.p, POS_MAX))) + if (!bch2_btree_iter_advance(iter)) return bkey_s_c_null; - bch2_btree_iter_set_pos(iter, - (iter->flags & BTREE_ITER_IS_EXTENTS) - ? iter->k.p - : bkey_successor(iter->k.p)); - return bch2_btree_iter_peek_with_updates(iter); } @@ -1712,38 +1679,57 @@ struct bkey_s_c bch2_btree_iter_next_with_updates(struct btree_iter *iter) */ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) { - struct bpos pos = iter->pos; struct btree_iter_level *l = &iter->l[0]; struct bkey_s_c k; int ret; EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS); - bch2_btree_iter_checks(iter); + bch2_btree_iter_verify(iter); + bch2_btree_iter_verify_entry_exit(iter); - if (iter->uptodate == BTREE_ITER_UPTODATE && - !bkey_deleted(&iter->k)) - return btree_iter_peek_uptodate(iter); + btree_iter_set_search_pos(iter, iter->pos); while (1) { - ret = bch2_btree_iter_traverse(iter); - if (unlikely(ret)) - return bkey_s_c_err(ret); + ret = btree_iter_traverse(iter); + if (unlikely(ret)) { + k = bkey_s_c_err(ret); + goto no_key; + } - k = __btree_iter_peek(iter, l); - if (!k.k || bkey_cmp(bkey_start_pos(k.k), pos) > 0) - k = __btree_iter_prev(iter, l); + k = btree_iter_level_peek(iter, l); + if (!k.k || + ((iter->flags & BTREE_ITER_IS_EXTENTS) + ? bkey_cmp(bkey_start_pos(k.k), iter->pos) >= 0 + : bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)) + k = btree_iter_level_prev(iter, l); if (likely(k.k)) break; - if (!btree_iter_set_pos_to_prev_leaf(iter)) - return bkey_s_c_null; + if (!btree_iter_set_pos_to_prev_leaf(iter)) { + k = bkey_s_c_null; + goto no_key; + } } - EBUG_ON(bkey_cmp(bkey_start_pos(k.k), pos) > 0); - iter->pos = bkey_start_pos(k.k); - iter->uptodate = BTREE_ITER_UPTODATE; + EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0); + + /* Extents can straddle iter->pos: */ + if (bkey_cmp(k.k->p, iter->pos) < 0) + iter->pos = k.k->p; +out: + bch2_btree_iter_verify_entry_exit(iter); + bch2_btree_iter_verify(iter); return k; +no_key: + /* + * btree_iter_level_peek() may have set iter->k to a key we didn't want, and + * then we errored going to the previous leaf - make sure it's + * consistent with iter->pos: + */ + bkey_init(&iter->k); + iter->k.p = iter->pos; + goto out; } /** @@ -1752,81 +1738,52 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) */ struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter) { - struct bpos pos = bkey_start_pos(&iter->k); - - EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS); - bch2_btree_iter_checks(iter); - - if (unlikely(!bkey_cmp(pos, POS_MIN))) + if (!bch2_btree_iter_rewind(iter)) return bkey_s_c_null; - bch2_btree_iter_set_pos(iter, bkey_predecessor(pos)); - return bch2_btree_iter_peek_prev(iter); } static inline struct bkey_s_c __bch2_btree_iter_peek_slot_extents(struct btree_iter *iter) { - struct btree_iter_level *l = &iter->l[0]; - struct btree_node_iter node_iter; struct bkey_s_c k; - struct bkey n; - int ret; + struct bpos pos, next_start; /* keys & holes can't span inode numbers: */ if (iter->pos.offset == KEY_OFFSET_MAX) { if (iter->pos.inode == KEY_INODE_MAX) return bkey_s_c_null; - bch2_btree_iter_set_pos(iter, bkey_successor(iter->pos)); - - ret = bch2_btree_iter_traverse(iter); - if (unlikely(ret)) - return bkey_s_c_err(ret); + bch2_btree_iter_set_pos(iter, bkey_successor(iter, iter->pos)); } - /* - * iterator is now at the correct position for inserting at iter->pos, - * but we need to keep iterating until we find the first non whiteout so - * we know how big a hole we have, if any: - */ - - node_iter = l->iter; - k = __btree_iter_unpack(iter, l, &iter->k, - bch2_btree_node_iter_peek(&node_iter, l->b)); - - if (k.k && bkey_cmp(bkey_start_pos(k.k), iter->pos) <= 0) { - /* - * We're not setting iter->uptodate because the node iterator - * doesn't necessarily point at the key we're returning: - */ + pos = iter->pos; + k = bch2_btree_iter_peek(iter); + iter->pos = pos; - EBUG_ON(bkey_cmp(k.k->p, iter->pos) <= 0); - bch2_btree_iter_verify_level(iter, 0); + if (bkey_err(k)) return k; - } - /* hole */ + if (k.k && bkey_cmp(bkey_start_pos(k.k), iter->pos) <= 0) + return k; - if (!k.k) - k.k = &l->b->key.k; + next_start = k.k ? bkey_start_pos(k.k) : POS_MAX; - bkey_init(&n); - n.p = iter->pos; - bch2_key_resize(&n, + bkey_init(&iter->k); + iter->k.p = iter->pos; + bch2_key_resize(&iter->k, min_t(u64, KEY_SIZE_MAX, - (k.k->p.inode == n.p.inode - ? bkey_start_offset(k.k) + (next_start.inode == iter->pos.inode + ? next_start.offset : KEY_OFFSET_MAX) - - n.p.offset)); + iter->pos.offset)); - EBUG_ON(!n.size); + EBUG_ON(!iter->k.size); - iter->k = n; - iter->uptodate = BTREE_ITER_UPTODATE; + bch2_btree_iter_verify_entry_exit(iter); + bch2_btree_iter_verify(iter); - bch2_btree_iter_verify_level(iter, 0); return (struct bkey_s_c) { &iter->k, NULL }; } @@ -1837,19 +1794,19 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) int ret; EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS); - bch2_btree_iter_checks(iter); + bch2_btree_iter_verify(iter); + bch2_btree_iter_verify_entry_exit(iter); - if (iter->uptodate == BTREE_ITER_UPTODATE) - return btree_iter_peek_uptodate(iter); - - ret = bch2_btree_iter_traverse(iter); - if (unlikely(ret)) - return bkey_s_c_err(ret); + btree_iter_set_search_pos(iter, btree_iter_search_key(iter)); if (iter->flags & BTREE_ITER_IS_EXTENTS) return __bch2_btree_iter_peek_slot_extents(iter); - k = __btree_iter_peek_all(iter, l, &iter->k); + ret = btree_iter_traverse(iter); + if (unlikely(ret)) + return bkey_s_c_err(ret); + + k = btree_iter_level_peek_all(iter, l, &iter->k); EBUG_ON(k.k && bkey_deleted(k.k) && bkey_cmp(k.k->p, iter->pos) == 0); @@ -1860,20 +1817,23 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) k = (struct bkey_s_c) { &iter->k, NULL }; } - iter->uptodate = BTREE_ITER_UPTODATE; - bch2_btree_iter_verify_level(iter, 0); + bch2_btree_iter_verify_entry_exit(iter); + bch2_btree_iter_verify(iter); return k; } struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter) { - if (unlikely(!bkey_cmp(iter->k.p, POS_MAX))) + if (!bch2_btree_iter_advance(iter)) return bkey_s_c_null; - bch2_btree_iter_set_pos(iter, - (iter->flags & BTREE_ITER_IS_EXTENTS) - ? iter->k.p - : bkey_successor(iter->k.p)); + return bch2_btree_iter_peek_slot(iter); +} + +struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter) +{ + if (!bch2_btree_iter_rewind(iter)) + return bkey_s_c_null; return bch2_btree_iter_peek_slot(iter); } @@ -1884,9 +1844,9 @@ struct bkey_s_c bch2_btree_iter_peek_cached(struct btree_iter *iter) int ret; EBUG_ON(btree_iter_type(iter) != BTREE_ITER_CACHED); - bch2_btree_iter_checks(iter); + bch2_btree_iter_verify(iter); - ret = bch2_btree_iter_traverse(iter); + ret = btree_iter_traverse(iter); if (unlikely(ret)) return bkey_s_c_err(ret); @@ -1900,26 +1860,17 @@ struct bkey_s_c bch2_btree_iter_peek_cached(struct btree_iter *iter) } static inline void bch2_btree_iter_init(struct btree_trans *trans, - struct btree_iter *iter, enum btree_id btree_id, - struct bpos pos, unsigned flags) + struct btree_iter *iter, enum btree_id btree_id) { struct bch_fs *c = trans->c; unsigned i; - if (btree_node_type_is_extents(btree_id) && - !(flags & BTREE_ITER_NODES)) - flags |= BTREE_ITER_IS_EXTENTS; - iter->trans = trans; - iter->pos = pos; - bkey_init(&iter->k); - iter->k.p = pos; - iter->flags = flags; iter->uptodate = BTREE_ITER_NEED_TRAVERSE; iter->btree_id = btree_id; iter->level = 0; iter->min_depth = 0; - iter->locks_want = flags & BTREE_ITER_INTENT ? 1 : 0; + iter->locks_want = 0; iter->nodes_locked = 0; iter->nodes_intent_locked = 0; for (i = 0; i < ARRAY_SIZE(iter->l); i++) @@ -1948,6 +1899,7 @@ int bch2_trans_iter_put(struct btree_trans *trans, return 0; BUG_ON(trans->iters + iter->idx != iter); + BUG_ON(!btree_iter_live(trans, iter)); ret = btree_iter_err(iter); @@ -1965,115 +1917,51 @@ int bch2_trans_iter_free(struct btree_trans *trans, if (IS_ERR_OR_NULL(iter)) return 0; - trans->iters_touched &= ~(1ULL << iter->idx); + set_btree_iter_dontneed(trans, iter); return bch2_trans_iter_put(trans, iter); } -static int bch2_trans_realloc_iters(struct btree_trans *trans, - unsigned new_size) +noinline __cold +static void btree_trans_iter_alloc_fail(struct btree_trans *trans) { - void *p, *new_iters, *new_updates, *new_updates2; - size_t iters_bytes; - size_t updates_bytes; - - new_size = roundup_pow_of_two(new_size); - - BUG_ON(new_size > BTREE_ITER_MAX); - - if (new_size <= trans->size) - return 0; - - BUG_ON(trans->used_mempool); - bch2_trans_unlock(trans); - - iters_bytes = sizeof(struct btree_iter) * new_size; - updates_bytes = sizeof(struct btree_insert_entry) * new_size; - - p = kmalloc(iters_bytes + - updates_bytes + - updates_bytes, GFP_NOFS); - if (p) - goto success; - - p = mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS); - new_size = BTREE_ITER_MAX; - - trans->used_mempool = true; -success: - new_iters = p; p += iters_bytes; - new_updates = p; p += updates_bytes; - new_updates2 = p; p += updates_bytes; - - memcpy(new_iters, trans->iters, - sizeof(struct btree_iter) * trans->nr_iters); - memcpy(new_updates, trans->updates, - sizeof(struct btree_insert_entry) * trans->nr_updates); - memcpy(new_updates2, trans->updates2, - sizeof(struct btree_insert_entry) * trans->nr_updates2); - - if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) - memset(trans->iters, POISON_FREE, - sizeof(struct btree_iter) * trans->nr_iters + - sizeof(struct btree_insert_entry) * trans->nr_iters); - - if (trans->iters != trans->iters_onstack) - kfree(trans->iters); - - trans->iters = new_iters; - trans->updates = new_updates; - trans->updates2 = new_updates2; - trans->size = new_size; + struct btree_iter *iter; + struct btree_insert_entry *i; + char buf[100]; - if (trans->iters_live) { - trace_trans_restart_iters_realloced(trans->ip, trans->size); - return -EINTR; + trans_for_each_iter(trans, iter) + printk(KERN_ERR "iter: btree %s pos %s%s%s%s %pS\n", + bch2_btree_ids[iter->btree_id], + (bch2_bpos_to_text(&PBUF(buf), iter->pos), buf), + btree_iter_live(trans, iter) ? " live" : "", + (trans->iters_touched & (1ULL << iter->idx)) ? " touched" : "", + iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "", + (void *) iter->ip_allocated); + + trans_for_each_update(trans, i) { + char buf[300]; + + bch2_bkey_val_to_text(&PBUF(buf), trans->c, bkey_i_to_s_c(i->k)); + printk(KERN_ERR "update: btree %s %s\n", + bch2_btree_ids[i->iter->btree_id], buf); } - - return 0; + panic("trans iter oveflow\n"); } static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans) { - unsigned idx = __ffs64(~trans->iters_linked); + unsigned idx; - if (idx < trans->nr_iters) - goto got_slot; + if (unlikely(trans->iters_linked == + ~((~0ULL << 1) << (BTREE_ITER_MAX - 1)))) + btree_trans_iter_alloc_fail(trans); - if (trans->nr_iters == trans->size) { - int ret; - - if (trans->nr_iters >= BTREE_ITER_MAX) { - struct btree_iter *iter; - - trans_for_each_iter(trans, iter) { - pr_err("iter: btree %s pos %llu:%llu%s%s%s %ps", - bch2_btree_ids[iter->btree_id], - iter->pos.inode, - iter->pos.offset, - (trans->iters_live & (1ULL << iter->idx)) ? " live" : "", - (trans->iters_touched & (1ULL << iter->idx)) ? " touched" : "", - iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "", - (void *) iter->ip_allocated); - } - - panic("trans iter oveflow\n"); - } + idx = __ffs64(~trans->iters_linked); - ret = bch2_trans_realloc_iters(trans, trans->size * 2); - if (ret) - return ERR_PTR(ret); - } - - idx = trans->nr_iters++; - BUG_ON(trans->nr_iters > trans->size); - - trans->iters[idx].idx = idx; -got_slot: - BUG_ON(trans->iters_linked & (1ULL << idx)); - trans->iters_linked |= 1ULL << idx; - trans->iters[idx].flags = 0; + trans->iters_linked |= 1ULL << idx; + trans->iters[idx].idx = idx; + trans->iters[idx].flags = 0; return &trans->iters[idx]; } @@ -2095,21 +1983,21 @@ static inline void btree_iter_copy(struct btree_iter *dst, dst->flags &= ~BTREE_ITER_SET_POS_AFTER_COMMIT; } -static inline struct bpos bpos_diff(struct bpos l, struct bpos r) -{ - if (bkey_cmp(l, r) > 0) - swap(l, r); - - return POS(r.inode - l.inode, r.offset - l.offset); -} - -static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans, - unsigned btree_id, struct bpos pos, - unsigned flags) +struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans, + unsigned btree_id, struct bpos pos, + unsigned locks_want, + unsigned depth, + unsigned flags) { struct btree_iter *iter, *best = NULL; - BUG_ON(trans->nr_iters > BTREE_ITER_MAX); + if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES && + !btree_type_has_snapshots(btree_id)) + flags &= ~BTREE_ITER_ALL_SNAPSHOTS; + + if (!(flags & BTREE_ITER_ALL_SNAPSHOTS)) + pos.snapshot = btree_type_has_snapshots(btree_id) + ? U32_MAX : 0; trans_for_each_iter(trans, iter) { if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE)) @@ -2119,8 +2007,8 @@ static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans, continue; if (best && - bkey_cmp(bpos_diff(best->pos, pos), - bpos_diff(iter->pos, pos)) < 0) + bkey_cmp(bpos_diff(best->real_pos, pos), + bpos_diff(iter->real_pos, pos)) > 0) continue; best = iter; @@ -2128,52 +2016,50 @@ static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans, if (!best) { iter = btree_trans_iter_alloc(trans); - if (IS_ERR(iter)) - return iter; - - bch2_btree_iter_init(trans, iter, btree_id, pos, flags); - } else if ((trans->iters_live & (1ULL << best->idx)) || - (best->flags & BTREE_ITER_KEEP_UNTIL_COMMIT)) { + bch2_btree_iter_init(trans, iter, btree_id); + } else if (btree_iter_keep(trans, best)) { iter = btree_trans_iter_alloc(trans); - if (IS_ERR(iter)) - return iter; - btree_iter_copy(iter, best); } else { iter = best; } - iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT; - iter->flags &= ~BTREE_ITER_USER_FLAGS; - iter->flags |= flags & BTREE_ITER_USER_FLAGS; + trans->iters_live |= 1ULL << iter->idx; + trans->iters_touched |= 1ULL << iter->idx; - if (iter->flags & BTREE_ITER_INTENT) - bch2_btree_iter_upgrade(iter, 1); - else - bch2_btree_iter_downgrade(iter); + if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES && + btree_node_type_is_extents(btree_id) && + !(flags & BTREE_ITER_NOT_EXTENTS) && + !(flags & BTREE_ITER_ALL_SNAPSHOTS)) + flags |= BTREE_ITER_IS_EXTENTS; - BUG_ON(iter->btree_id != btree_id); - BUG_ON((iter->flags ^ flags) & BTREE_ITER_TYPE); - BUG_ON(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT); - BUG_ON(iter->flags & BTREE_ITER_SET_POS_AFTER_COMMIT); - BUG_ON(trans->iters_live & (1ULL << iter->idx)); + iter->flags = flags; - trans->iters_live |= 1ULL << iter->idx; - trans->iters_touched |= 1ULL << iter->idx; + iter->snapshot = pos.snapshot; - return iter; -} + locks_want = min(locks_want, BTREE_MAX_DEPTH); -struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans, - enum btree_id btree_id, - struct bpos pos, unsigned flags) -{ - struct btree_iter *iter = - __btree_trans_get_iter(trans, btree_id, pos, flags); + if (locks_want > iter->locks_want) { + iter->locks_want = locks_want; + btree_iter_get_locks(iter, true, false); + } else if (locks_want < iter->locks_want) { + __bch2_btree_iter_downgrade(iter, locks_want); + } + + while (iter->level < depth) { + btree_node_unlock(iter, iter->level); + iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT; + iter->level++; + } + + while (iter->level > depth) + iter->l[--iter->level].b = BTREE_ITER_NO_NODE_INIT; + + iter->min_depth = depth; + + bch2_btree_iter_set_pos(iter, pos); + btree_iter_set_search_pos(iter, btree_iter_search_key(iter)); - if (!IS_ERR(iter)) - __bch2_btree_iter_set_pos(iter, pos, - btree_node_type_is_extents(btree_id)); return iter; } @@ -2185,20 +2071,18 @@ struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans, unsigned flags) { struct btree_iter *iter = - __btree_trans_get_iter(trans, btree_id, pos, - flags|BTREE_ITER_NODES); - unsigned i; + __bch2_trans_get_iter(trans, btree_id, pos, + locks_want, depth, + BTREE_ITER_NODES| + BTREE_ITER_NOT_EXTENTS| + BTREE_ITER_ALL_SNAPSHOTS| + flags); - BUG_ON(IS_ERR(iter)); BUG_ON(bkey_cmp(iter->pos, pos)); - - iter->locks_want = locks_want; - iter->level = depth; - iter->min_depth = depth; - - for (i = 0; i < ARRAY_SIZE(iter->l); i++) - iter->l[i].b = NULL; - iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT; + BUG_ON(iter->locks_want != min(locks_want, BTREE_MAX_DEPTH)); + BUG_ON(iter->level != depth); + BUG_ON(iter->min_depth != depth); + iter->ip_allocated = _RET_IP_; return iter; } @@ -2209,9 +2093,6 @@ struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans, struct btree_iter *iter; iter = btree_trans_iter_alloc(trans); - if (IS_ERR(iter)) - return iter; - btree_iter_copy(iter, src); trans->iters_live |= 1ULL << iter->idx; @@ -2219,7 +2100,7 @@ struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans, * We don't need to preserve this iter since it's cheap to copy it * again - this will cause trans_iter_put() to free it right away: */ - trans->iters_touched &= ~(1ULL << iter->idx); + set_btree_iter_dontneed(trans, iter); return iter; } @@ -2286,11 +2167,11 @@ void bch2_trans_reset(struct btree_trans *trans, unsigned flags) trans->iters_touched &= trans->iters_live; - trans->need_reset = 0; trans->nr_updates = 0; trans->nr_updates2 = 0; trans->mem_top = 0; + trans->hooks = NULL; trans->extra_journal_entries = NULL; trans->extra_journal_entry_u64s = 0; @@ -2301,35 +2182,52 @@ void bch2_trans_reset(struct btree_trans *trans, unsigned flags) (void *) &trans->fs_usage_deltas->memset_start); } + if (!(flags & TRANS_RESET_NOUNLOCK)) + bch2_trans_cond_resched(trans); + if (!(flags & TRANS_RESET_NOTRAVERSE)) bch2_btree_iter_traverse_all(trans); } +static void bch2_trans_alloc_iters(struct btree_trans *trans, struct bch_fs *c) +{ + size_t iters_bytes = sizeof(struct btree_iter) * BTREE_ITER_MAX; + size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX; + void *p = NULL; + + BUG_ON(trans->used_mempool); + +#ifdef __KERNEL__ + p = this_cpu_xchg(c->btree_iters_bufs->iter, NULL); +#endif + if (!p) + p = mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS); + + trans->iters = p; p += iters_bytes; + trans->updates = p; p += updates_bytes; + trans->updates2 = p; p += updates_bytes; +} + void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, unsigned expected_nr_iters, size_t expected_mem_bytes) { - memset(trans, 0, offsetof(struct btree_trans, iters_onstack)); + memset(trans, 0, sizeof(*trans)); + trans->c = c; + trans->ip = _RET_IP_; /* * reallocating iterators currently completely breaks - * bch2_trans_iter_put(): + * bch2_trans_iter_put(), we always allocate the max: */ - expected_nr_iters = BTREE_ITER_MAX; - - trans->c = c; - trans->ip = _RET_IP_; - trans->size = ARRAY_SIZE(trans->iters_onstack); - trans->iters = trans->iters_onstack; - trans->updates = trans->updates_onstack; - trans->updates2 = trans->updates2_onstack; - trans->fs_usage_deltas = NULL; + bch2_trans_alloc_iters(trans, c); - if (expected_nr_iters > trans->size) - bch2_trans_realloc_iters(trans, expected_nr_iters); + if (expected_mem_bytes) { + trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes); + trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL); + } - if (expected_mem_bytes) - bch2_trans_preload_mem(trans, expected_mem_bytes); + trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier); #ifdef CONFIG_BCACHEFS_DEBUG trans->pid = current->pid; @@ -2341,37 +2239,74 @@ void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, int bch2_trans_exit(struct btree_trans *trans) { + struct bch_fs *c = trans->c; + bch2_trans_unlock(trans); #ifdef CONFIG_BCACHEFS_DEBUG + if (trans->iters_live) { + struct btree_iter *iter; + + bch_err(c, "btree iterators leaked!"); + trans_for_each_iter(trans, iter) + if (btree_iter_live(trans, iter)) + printk(KERN_ERR " btree %s allocated at %pS\n", + bch2_btree_ids[iter->btree_id], + (void *) iter->ip_allocated); + /* Be noisy about this: */ + bch2_fatal_error(c); + } + mutex_lock(&trans->c->btree_trans_lock); list_del(&trans->list); mutex_unlock(&trans->c->btree_trans_lock); #endif + srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx); + bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres); kfree(trans->fs_usage_deltas); kfree(trans->mem); - if (trans->used_mempool) + +#ifdef __KERNEL__ + /* + * Userspace doesn't have a real percpu implementation: + */ + trans->iters = this_cpu_xchg(c->btree_iters_bufs->iter, trans->iters); +#endif + if (trans->iters) mempool_free(trans->iters, &trans->c->btree_iters_pool); - else if (trans->iters != trans->iters_onstack) - kfree(trans->iters); + trans->mem = (void *) 0x1; trans->iters = (void *) 0x1; return trans->error ? -EIO : 0; } -static void bch2_btree_iter_node_to_text(struct printbuf *out, - struct btree_bkey_cached_common *_b, - enum btree_iter_type type) +static void __maybe_unused +bch2_btree_iter_node_to_text(struct printbuf *out, + struct btree_bkey_cached_common *_b, + enum btree_iter_type type) { - pr_buf(out, " %px l=%u %s:", - _b, _b->level, bch2_btree_ids[_b->btree_id]); + pr_buf(out, " l=%u %s:", + _b->level, bch2_btree_ids[_b->btree_id]); bch2_bpos_to_text(out, btree_node_pos(_b, type)); } +#ifdef CONFIG_BCACHEFS_DEBUG +static bool trans_has_btree_nodes_locked(struct btree_trans *trans) +{ + struct btree_iter *iter; + + trans_for_each_iter(trans, iter) + if (btree_iter_type(iter) != BTREE_ITER_CACHED && + iter->nodes_locked) + return true; + return false; +} +#endif + void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c) { #ifdef CONFIG_BCACHEFS_DEBUG @@ -2382,14 +2317,18 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c) mutex_lock(&c->btree_trans_lock); list_for_each_entry(trans, &c->btree_trans_list, list) { - pr_buf(out, "%i %px %ps\n", trans->pid, trans, (void *) trans->ip); + if (!trans_has_btree_nodes_locked(trans)) + continue; + + pr_buf(out, "%i %ps\n", trans->pid, (void *) trans->ip); trans_for_each_iter(trans, iter) { if (!iter->nodes_locked) continue; - pr_buf(out, " iter %u %s:", + pr_buf(out, " iter %u %c %s:", iter->idx, + btree_iter_type(iter) == BTREE_ITER_CACHED ? 'c' : 'b', bch2_btree_ids[iter->btree_id]); bch2_bpos_to_text(out, iter->pos); pr_buf(out, "\n"); @@ -2408,17 +2347,18 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c) b = READ_ONCE(trans->locking); if (b) { - pr_buf(out, " locking iter %u l=%u %s:", + iter = &trans->iters[trans->locking_iter_idx]; + pr_buf(out, " locking iter %u %c l=%u %s:", trans->locking_iter_idx, + btree_iter_type(iter) == BTREE_ITER_CACHED ? 'c' : 'b', trans->locking_level, bch2_btree_ids[trans->locking_btree_id]); bch2_bpos_to_text(out, trans->locking_pos); - pr_buf(out, " node "); bch2_btree_iter_node_to_text(out, (void *) b, - btree_iter_type(&trans->iters[trans->locking_iter_idx])); + btree_iter_type(iter)); pr_buf(out, "\n"); } } @@ -2429,6 +2369,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c) void bch2_fs_btree_iter_exit(struct bch_fs *c) { mempool_exit(&c->btree_iters_pool); + cleanup_srcu_struct(&c->btree_trans_barrier); } int bch2_fs_btree_iter_init(struct bch_fs *c) @@ -2438,7 +2379,8 @@ int bch2_fs_btree_iter_init(struct bch_fs *c) INIT_LIST_HEAD(&c->btree_trans_list); mutex_init(&c->btree_trans_lock); - return mempool_init_kmalloc_pool(&c->btree_iters_pool, 1, + return init_srcu_struct(&c->btree_trans_barrier) ?: + mempool_init_kmalloc_pool(&c->btree_iters_pool, 1, sizeof(struct btree_iter) * nr + sizeof(struct btree_insert_entry) * nr + sizeof(struct btree_insert_entry) * nr);