#include "bcachefs.h"
#include "bkey_methods.h"
+#include "bkey_buf.h"
#include "btree_cache.h"
#include "btree_iter.h"
#include "btree_key_cache.h"
#include "btree_locking.h"
#include "btree_update.h"
#include "debug.h"
+#include "error.h"
#include "extents.h"
#include "journal.h"
+#include "recovery.h"
+#include "replicas.h"
+#include "subvolume.h"
#include <linux/prefetch.h>
#include <trace/events/bcachefs.h>
-static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
+static void btree_trans_verify_sorted(struct btree_trans *);
+inline void bch2_btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
+
+static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
+static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
+ struct btree_path *);
+
+static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
+{
+#ifdef CONFIG_BCACHEFS_DEBUG
+ return iter->ip_allocated;
+#else
+ return 0;
+#endif
+}
+
+static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
+
+/*
+ * Unlocks before scheduling
+ * Note: does not revalidate iterator
+ */
+static inline int bch2_trans_cond_resched(struct btree_trans *trans)
+{
+ if (need_resched() || race_fault()) {
+ bch2_trans_unlock(trans);
+ schedule();
+ return bch2_trans_relock(trans) ? 0 : -EINTR;
+ } else {
+ return 0;
+ }
+}
+
+static inline int __btree_path_cmp(const struct btree_path *l,
+ enum btree_id r_btree_id,
+ bool r_cached,
+ struct bpos r_pos,
+ unsigned r_level)
+{
+ /*
+ * Must match lock ordering as defined by __bch2_btree_node_lock:
+ */
+ return cmp_int(l->btree_id, r_btree_id) ?:
+ cmp_int((int) l->cached, (int) r_cached) ?:
+ bpos_cmp(l->pos, r_pos) ?:
+ -cmp_int(l->level, r_level);
+}
+
+static inline int btree_path_cmp(const struct btree_path *l,
+ const struct btree_path *r)
+{
+ return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
+}
+
+static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
+{
+ /* Are we iterating over keys in all snapshots? */
+ if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
+ p = bpos_successor(p);
+ } else {
+ p = bpos_nosnap_successor(p);
+ p.snapshot = iter->snapshot;
+ }
+
+ return p;
+}
+
+static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
+{
+ /* Are we iterating over keys in all snapshots? */
+ if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
+ p = bpos_predecessor(p);
+ } else {
+ p = bpos_nosnap_predecessor(p);
+ p.snapshot = iter->snapshot;
+ }
+
+ return p;
+}
+
+static inline bool is_btree_node(struct btree_path *path, unsigned l)
{
return l < BTREE_MAX_DEPTH &&
- (unsigned long) iter->l[l].b >= 128;
+ (unsigned long) path->l[l].b >= 128;
}
static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
bkey_cmp(pos, POS_MAX))
- pos = bkey_successor(pos);
+ pos = bkey_successor(iter, pos);
return pos;
}
-static inline bool btree_iter_pos_before_node(struct btree_iter *iter,
+static inline bool btree_path_pos_before_node(struct btree_path *path,
struct btree *b)
{
- return bkey_cmp(btree_iter_search_key(iter), b->data->min_key) < 0;
+ return bpos_cmp(path->pos, b->data->min_key) < 0;
}
-static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
+static inline bool btree_path_pos_after_node(struct btree_path *path,
struct btree *b)
{
- return bkey_cmp(b->key.k.p, btree_iter_search_key(iter)) < 0;
+ return bpos_cmp(b->key.k.p, path->pos) < 0;
}
-static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
+static inline bool btree_path_pos_in_node(struct btree_path *path,
struct btree *b)
{
- return iter->btree_id == b->c.btree_id &&
- !btree_iter_pos_before_node(iter, b) &&
- !btree_iter_pos_after_node(iter, b);
+ return path->btree_id == b->c.btree_id &&
+ !btree_path_pos_before_node(path, b) &&
+ !btree_path_pos_after_node(path, b);
}
/* Btree node locking: */
-void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
+void bch2_btree_node_unlock_write(struct btree_trans *trans,
+ struct btree_path *path, struct btree *b)
{
- bch2_btree_node_unlock_write_inlined(b, iter);
+ bch2_btree_node_unlock_write_inlined(trans, path, b);
}
-void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
+void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
{
- struct btree_iter *linked;
+ struct btree_path *linked;
unsigned readers = 0;
- EBUG_ON(!btree_node_intent_locked(iter, b->c.level));
-
- trans_for_each_iter(iter->trans, linked)
+ trans_for_each_path(trans, linked)
if (linked->l[b->c.level].b == b &&
btree_node_read_locked(linked, b->c.level))
readers++;
* goes to 0, and it's safe because we have the node intent
* locked:
*/
- atomic64_sub(__SIX_VAL(read_lock, readers),
- &b->c.lock.state.counter);
- btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
- atomic64_add(__SIX_VAL(read_lock, readers),
- &b->c.lock.state.counter);
+ if (!b->c.lock.readers)
+ atomic64_sub(__SIX_VAL(read_lock, readers),
+ &b->c.lock.state.counter);
+ else
+ this_cpu_sub(*b->c.lock.readers, readers);
+
+ six_lock_write(&b->c.lock, NULL, NULL);
+
+ if (!b->c.lock.readers)
+ atomic64_add(__SIX_VAL(read_lock, readers),
+ &b->c.lock.state.counter);
+ else
+ this_cpu_add(*b->c.lock.readers, readers);
}
-bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
+bool __bch2_btree_node_relock(struct btree_trans *trans,
+ struct btree_path *path, unsigned level)
{
- struct btree *b = btree_iter_node(iter, level);
- int want = __btree_lock_want(iter, level);
+ struct btree *b = btree_path_node(path, level);
+ int want = __btree_lock_want(path, level);
- if (!is_btree_node(iter, level))
- return false;
+ if (!is_btree_node(path, level))
+ goto fail;
if (race_fault())
- return false;
+ goto fail;
- if (six_relock_type(&b->c.lock, want, iter->l[level].lock_seq) ||
- (btree_node_lock_seq_matches(iter, b, level) &&
- btree_node_lock_increment(iter->trans, b, level, want))) {
- mark_btree_node_locked(iter, level, want);
+ if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
+ (btree_node_lock_seq_matches(path, b, level) &&
+ btree_node_lock_increment(trans, b, level, want))) {
+ mark_btree_node_locked(trans, path, level, want);
return true;
- } else {
- return false;
}
+fail:
+ trace_btree_node_relock_fail(trans->fn, _RET_IP_,
+ path->btree_id,
+ &path->pos,
+ (unsigned long) b,
+ path->l[level].lock_seq,
+ is_btree_node(path, level) ? b->c.lock.state.seq : 0);
+ return false;
}
-static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level)
+bool bch2_btree_node_upgrade(struct btree_trans *trans,
+ struct btree_path *path, unsigned level)
{
- struct btree *b = iter->l[level].b;
+ struct btree *b = path->l[level].b;
- EBUG_ON(btree_lock_want(iter, level) != BTREE_NODE_INTENT_LOCKED);
-
- if (!is_btree_node(iter, level))
+ if (!is_btree_node(path, level))
return false;
- if (btree_node_intent_locked(iter, level))
+ switch (btree_lock_want(path, level)) {
+ case BTREE_NODE_UNLOCKED:
+ BUG_ON(btree_node_locked(path, level));
+ return true;
+ case BTREE_NODE_READ_LOCKED:
+ BUG_ON(btree_node_intent_locked(path, level));
+ return bch2_btree_node_relock(trans, path, level);
+ case BTREE_NODE_INTENT_LOCKED:
+ break;
+ }
+
+ if (btree_node_intent_locked(path, level))
return true;
if (race_fault())
return false;
- if (btree_node_locked(iter, level)
+ if (btree_node_locked(path, level)
? six_lock_tryupgrade(&b->c.lock)
- : six_relock_type(&b->c.lock, SIX_LOCK_intent, iter->l[level].lock_seq))
+ : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
goto success;
- if (btree_node_lock_seq_matches(iter, b, level) &&
- btree_node_lock_increment(iter->trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
- btree_node_unlock(iter, level);
+ if (btree_node_lock_seq_matches(path, b, level) &&
+ btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
+ btree_node_unlock(path, level);
goto success;
}
return false;
success:
- mark_btree_node_intent_locked(iter, level);
+ mark_btree_node_intent_locked(trans, path, level);
return true;
}
-static inline bool btree_iter_get_locks(struct btree_iter *iter,
- bool upgrade, bool trace)
+static inline bool btree_path_get_locks(struct btree_trans *trans,
+ struct btree_path *path,
+ bool upgrade)
{
- unsigned l = iter->level;
+ unsigned l = path->level;
int fail_idx = -1;
do {
- if (!btree_iter_node(iter, l))
+ if (!btree_path_node(path, l))
break;
if (!(upgrade
- ? bch2_btree_node_upgrade(iter, l)
- : bch2_btree_node_relock(iter, l))) {
- if (trace)
- (upgrade
- ? trace_node_upgrade_fail
- : trace_node_relock_fail)(l, iter->l[l].lock_seq,
- is_btree_node(iter, l)
- ? 0
- : (unsigned long) iter->l[l].b,
- is_btree_node(iter, l)
- ? iter->l[l].b->c.lock.state.seq
- : 0);
-
+ ? bch2_btree_node_upgrade(trans, path, l)
+ : bch2_btree_node_relock(trans, path, l)))
fail_idx = l;
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
- }
l++;
- } while (l < iter->locks_want);
+ } while (l < path->locks_want);
/*
* When we fail to get a lock, we have to ensure that any child nodes
- * can't be relocked so bch2_btree_iter_traverse has to walk back up to
+ * can't be relocked so bch2_btree_path_traverse has to walk back up to
* the node that we failed to relock:
*/
- while (fail_idx >= 0) {
- btree_node_unlock(iter, fail_idx);
- iter->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
- --fail_idx;
+ if (fail_idx >= 0) {
+ __bch2_btree_path_unlock(path);
+ btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+
+ do {
+ path->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
+ --fail_idx;
+ } while (fail_idx >= 0);
}
- if (iter->uptodate == BTREE_ITER_NEED_RELOCK)
- iter->uptodate = BTREE_ITER_NEED_PEEK;
+ if (path->uptodate == BTREE_ITER_NEED_RELOCK)
+ path->uptodate = BTREE_ITER_UPTODATE;
- bch2_btree_trans_verify_locks(iter->trans);
+ bch2_trans_verify_locks(trans);
- return iter->uptodate < BTREE_ITER_NEED_RELOCK;
+ return path->uptodate < BTREE_ITER_NEED_RELOCK;
}
static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
- enum btree_iter_type type)
+ bool cached)
{
- return type != BTREE_ITER_CACHED
+ return !cached
? container_of(_b, struct btree, c)->key.k.p
: container_of(_b, struct bkey_cached, c)->key.pos;
}
/* Slowpath: */
-bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
- unsigned level, struct btree_iter *iter,
+bool __bch2_btree_node_lock(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b,
+ struct bpos pos, unsigned level,
enum six_lock_type type,
six_lock_should_sleep_fn should_sleep_fn, void *p,
unsigned long ip)
{
- struct btree_trans *trans = iter->trans;
- struct btree_iter *linked, *deadlock_iter = NULL;
- u64 start_time = local_clock();
- unsigned reason = 9;
+ struct btree_path *linked;
+ unsigned reason;
/* Check if it's safe to block: */
- trans_for_each_iter(trans, linked) {
+ trans_for_each_path(trans, linked) {
if (!linked->nodes_locked)
continue;
*/
if (type == SIX_LOCK_intent &&
linked->nodes_locked != linked->nodes_intent_locked) {
- if (!(trans->nounlock)) {
- linked->locks_want = max_t(unsigned,
- linked->locks_want,
- __fls(linked->nodes_locked) + 1);
- if (!btree_iter_get_locks(linked, true, false)) {
- deadlock_iter = linked;
- reason = 1;
- }
- } else {
- deadlock_iter = linked;
- reason = 2;
- }
+ reason = 1;
+ goto deadlock;
}
- if (linked->btree_id != iter->btree_id) {
- if (linked->btree_id > iter->btree_id) {
- deadlock_iter = linked;
- reason = 3;
- }
- continue;
+ if (linked->btree_id != path->btree_id) {
+ if (linked->btree_id < path->btree_id)
+ continue;
+
+ reason = 3;
+ goto deadlock;
}
/*
- * Within the same btree, cached iterators come before non
- * cached iterators:
+ * Within the same btree, non-cached paths come before cached
+ * paths:
*/
- if (btree_iter_is_cached(linked) != btree_iter_is_cached(iter)) {
- if (btree_iter_is_cached(iter)) {
- deadlock_iter = linked;
- reason = 4;
- }
- continue;
+ if (linked->cached != path->cached) {
+ if (!linked->cached)
+ continue;
+
+ reason = 4;
+ goto deadlock;
}
/*
* Interior nodes must be locked before their descendants: if
- * another iterator has possible descendants locked of the node
+ * another path has possible descendants locked of the node
* we're about to lock, it must have the ancestors locked too:
*/
if (level > __fls(linked->nodes_locked)) {
- if (!(trans->nounlock)) {
- linked->locks_want =
- max(level + 1, max_t(unsigned,
- linked->locks_want,
- iter->locks_want));
- if (!btree_iter_get_locks(linked, true, false)) {
- deadlock_iter = linked;
- reason = 5;
- }
- } else {
- deadlock_iter = linked;
- reason = 6;
- }
+ reason = 5;
+ goto deadlock;
}
/* Must lock btree nodes in key order: */
if (btree_node_locked(linked, level) &&
- bkey_cmp(pos, btree_node_pos((void *) linked->l[level].b,
- btree_iter_type(linked))) <= 0) {
- deadlock_iter = linked;
+ bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
+ linked->cached)) <= 0) {
+ BUG_ON(trans->in_traverse_all);
reason = 7;
- }
-
- /*
- * Recheck if this is a node we already have locked - since one
- * of the get_locks() calls might've successfully
- * upgraded/relocked it:
- */
- if (linked->l[level].b == b &&
- btree_node_locked_type(linked, level) >= type) {
- six_lock_increment(&b->c.lock, type);
- return true;
+ goto deadlock;
}
}
- if (unlikely(deadlock_iter)) {
- trace_trans_restart_would_deadlock(iter->trans->ip, ip,
- reason,
- deadlock_iter->btree_id,
- btree_iter_type(deadlock_iter),
- iter->btree_id,
- btree_iter_type(iter));
- return false;
- }
-
- if (six_trylock_type(&b->c.lock, type))
- return true;
-
- if (six_lock_type(&b->c.lock, type, should_sleep_fn, p))
- return false;
-
- bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
- start_time);
- return true;
+ return btree_node_lock_type(trans, path, b, pos, level,
+ type, should_sleep_fn, p);
+deadlock:
+ trace_trans_restart_would_deadlock(trans->fn, ip,
+ trans->in_traverse_all, reason,
+ linked->btree_id,
+ linked->cached,
+ &linked->pos,
+ path->btree_id,
+ path->cached,
+ &pos);
+ btree_trans_restart(trans);
+ return false;
}
/* Btree iterator locking: */
#ifdef CONFIG_BCACHEFS_DEBUG
-static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
+
+static void bch2_btree_path_verify_locks(struct btree_path *path)
{
unsigned l;
- if (!(iter->trans->iters_linked & (1ULL << iter->idx))) {
- BUG_ON(iter->nodes_locked);
+ if (!path->nodes_locked) {
+ BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
+ btree_path_node(path, path->level));
return;
}
- for (l = 0; is_btree_node(iter, l); l++) {
- if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
- !btree_node_locked(iter, l))
- continue;
-
- BUG_ON(btree_lock_want(iter, l) !=
- btree_node_locked_type(iter, l));
- }
+ for (l = 0; btree_path_node(path, l); l++)
+ BUG_ON(btree_lock_want(path, l) !=
+ btree_node_locked_type(path, l));
}
-void bch2_btree_trans_verify_locks(struct btree_trans *trans)
+void bch2_trans_verify_locks(struct btree_trans *trans)
{
- struct btree_iter *iter;
+ struct btree_path *path;
- trans_for_each_iter_all(trans, iter)
- bch2_btree_iter_verify_locks(iter);
+ trans_for_each_path(trans, path)
+ bch2_btree_path_verify_locks(path);
}
#else
-static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
+static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
#endif
+/* Btree path locking: */
+
+/*
+ * Only for btree_cache.c - only relocks intent locks
+ */
+bool bch2_btree_path_relock_intent(struct btree_trans *trans,
+ struct btree_path *path)
+{
+ unsigned l;
+
+ for (l = path->level;
+ l < path->locks_want && btree_path_node(path, l);
+ l++) {
+ if (!bch2_btree_node_relock(trans, path, l)) {
+ __bch2_btree_path_unlock(path);
+ btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+ trace_trans_restart_relock_path_intent(trans->fn, _RET_IP_,
+ path->btree_id, &path->pos);
+ btree_trans_restart(trans);
+ return false;
+ }
+ }
+
+ return true;
+}
+
__flatten
-bool bch2_btree_iter_relock(struct btree_iter *iter, bool trace)
+static bool bch2_btree_path_relock(struct btree_trans *trans,
+ struct btree_path *path, unsigned long trace_ip)
{
- return btree_iter_get_locks(iter, false, trace);
+ bool ret = btree_path_get_locks(trans, path, false);
+
+ if (!ret) {
+ trace_trans_restart_relock_path(trans->fn, trace_ip,
+ path->btree_id, &path->pos);
+ btree_trans_restart(trans);
+ }
+ return ret;
}
-bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
+bool __bch2_btree_path_upgrade(struct btree_trans *trans,
+ struct btree_path *path,
unsigned new_locks_want)
{
- struct btree_iter *linked;
+ struct btree_path *linked;
- EBUG_ON(iter->locks_want >= new_locks_want);
+ EBUG_ON(path->locks_want >= new_locks_want);
- iter->locks_want = new_locks_want;
+ path->locks_want = new_locks_want;
- if (btree_iter_get_locks(iter, true, true))
+ if (btree_path_get_locks(trans, path, true))
return true;
/*
- * Ancestor nodes must be locked before child nodes, so set locks_want
- * on iterators that might lock ancestors before us to avoid getting
- * -EINTR later:
+ * XXX: this is ugly - we'd prefer to not be mucking with other
+ * iterators in the btree_trans here.
+ *
+ * On failure to upgrade the iterator, setting iter->locks_want and
+ * calling get_locks() is sufficient to make bch2_btree_path_traverse()
+ * get the locks we want on transaction restart.
+ *
+ * But if this iterator was a clone, on transaction restart what we did
+ * to this iterator isn't going to be preserved.
+ *
+ * Possibly we could add an iterator field for the parent iterator when
+ * an iterator is a copy - for now, we'll just upgrade any other
+ * iterators with the same btree id.
+ *
+ * The code below used to be needed to ensure ancestor nodes get locked
+ * before interior nodes - now that's handled by
+ * bch2_btree_path_traverse_all().
*/
- trans_for_each_iter(iter->trans, linked)
- if (linked != iter &&
- linked->btree_id == iter->btree_id &&
- linked->locks_want < new_locks_want) {
- linked->locks_want = new_locks_want;
- btree_iter_get_locks(linked, true, false);
- }
+ if (!path->cached && !trans->in_traverse_all)
+ trans_for_each_path(trans, linked)
+ if (linked != path &&
+ linked->cached == path->cached &&
+ linked->btree_id == path->btree_id &&
+ linked->locks_want < new_locks_want) {
+ linked->locks_want = new_locks_want;
+ btree_path_get_locks(trans, linked, true);
+ }
return false;
}
-bool __bch2_btree_iter_upgrade_nounlock(struct btree_iter *iter,
- unsigned new_locks_want)
+void __bch2_btree_path_downgrade(struct btree_path *path,
+ unsigned new_locks_want)
{
- unsigned l = iter->level;
-
- EBUG_ON(iter->locks_want >= new_locks_want);
-
- iter->locks_want = new_locks_want;
-
- do {
- if (!btree_iter_node(iter, l))
- break;
-
- if (!bch2_btree_node_upgrade(iter, l)) {
- iter->locks_want = l;
- return false;
- }
-
- l++;
- } while (l < iter->locks_want);
-
- return true;
-}
+ unsigned l;
-void __bch2_btree_iter_downgrade(struct btree_iter *iter,
- unsigned downgrade_to)
-{
- unsigned l, new_locks_want = downgrade_to ?:
- (iter->flags & BTREE_ITER_INTENT ? 1 : 0);
+ EBUG_ON(path->locks_want < new_locks_want);
- if (iter->locks_want < downgrade_to) {
- iter->locks_want = new_locks_want;
+ path->locks_want = new_locks_want;
- while (iter->nodes_locked &&
- (l = __fls(iter->nodes_locked)) >= iter->locks_want) {
- if (l > iter->level) {
- btree_node_unlock(iter, l);
- } else {
- if (btree_node_intent_locked(iter, l)) {
- six_lock_downgrade(&iter->l[l].b->c.lock);
- iter->nodes_intent_locked ^= 1 << l;
- }
- break;
+ while (path->nodes_locked &&
+ (l = __fls(path->nodes_locked)) >= path->locks_want) {
+ if (l > path->level) {
+ btree_node_unlock(path, l);
+ } else {
+ if (btree_node_intent_locked(path, l)) {
+ six_lock_downgrade(&path->l[l].b->c.lock);
+ path->nodes_intent_locked ^= 1 << l;
}
+ break;
}
}
- bch2_btree_trans_verify_locks(iter->trans);
+ bch2_btree_path_verify_locks(path);
}
void bch2_trans_downgrade(struct btree_trans *trans)
{
- struct btree_iter *iter;
+ struct btree_path *path;
- trans_for_each_iter(trans, iter)
- bch2_btree_iter_downgrade(iter);
+ trans_for_each_path(trans, path)
+ bch2_btree_path_downgrade(path);
}
/* Btree transaction locking: */
bool bch2_trans_relock(struct btree_trans *trans)
{
- struct btree_iter *iter;
- bool ret = true;
+ struct btree_path *path;
- trans_for_each_iter(trans, iter)
- if (iter->uptodate == BTREE_ITER_NEED_RELOCK)
- ret &= bch2_btree_iter_relock(iter, true);
+ if (unlikely(trans->restarted))
+ return false;
- return ret;
+ trans_for_each_path(trans, path)
+ if (path->should_be_locked &&
+ !bch2_btree_path_relock(trans, path, _RET_IP_)) {
+ trace_trans_restart_relock(trans->fn, _RET_IP_,
+ path->btree_id, &path->pos);
+ BUG_ON(!trans->restarted);
+ return false;
+ }
+ return true;
}
void bch2_trans_unlock(struct btree_trans *trans)
{
- struct btree_iter *iter;
+ struct btree_path *path;
+
+ trans_for_each_path(trans, path)
+ __bch2_btree_path_unlock(path);
- trans_for_each_iter(trans, iter)
- __bch2_btree_iter_unlock(iter);
+ /*
+ * bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
+ * btree nodes, it implements its own walking:
+ */
+ BUG_ON(!trans->is_initial_gc &&
+ lock_class_is_held(&bch2_btree_node_lock_key));
}
/* Btree iterator: */
#ifdef CONFIG_BCACHEFS_DEBUG
-static void bch2_btree_iter_verify_cached(struct btree_iter *iter)
+static void bch2_btree_path_verify_cached(struct btree_trans *trans,
+ struct btree_path *path)
{
struct bkey_cached *ck;
- bool locked = btree_node_locked(iter, 0);
+ bool locked = btree_node_locked(path, 0);
- if (!bch2_btree_node_relock(iter, 0))
+ if (!bch2_btree_node_relock(trans, path, 0))
return;
- ck = (void *) iter->l[0].b;
- BUG_ON(ck->key.btree_id != iter->btree_id ||
- bkey_cmp(ck->key.pos, iter->pos));
+ ck = (void *) path->l[0].b;
+ BUG_ON(ck->key.btree_id != path->btree_id ||
+ bkey_cmp(ck->key.pos, path->pos));
if (!locked)
- btree_node_unlock(iter, 0);
+ btree_node_unlock(path, 0);
}
-static void bch2_btree_iter_verify_level(struct btree_iter *iter,
- unsigned level)
+static void bch2_btree_path_verify_level(struct btree_trans *trans,
+ struct btree_path *path, unsigned level)
{
- struct bpos pos = btree_iter_search_key(iter);
- struct btree_iter_level *l = &iter->l[level];
- struct btree_node_iter tmp = l->iter;
- bool locked = btree_node_locked(iter, level);
+ struct btree_path_level *l;
+ struct btree_node_iter tmp;
+ bool locked;
struct bkey_packed *p, *k;
- char buf1[100], buf2[100];
+ struct printbuf buf1 = PRINTBUF;
+ struct printbuf buf2 = PRINTBUF;
+ struct printbuf buf3 = PRINTBUF;
const char *msg;
if (!bch2_debug_check_iterators)
return;
- if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
+ l = &path->l[level];
+ tmp = l->iter;
+ locked = btree_node_locked(path, level);
+
+ if (path->cached) {
if (!level)
- bch2_btree_iter_verify_cached(iter);
+ bch2_btree_path_verify_cached(trans, path);
return;
}
- BUG_ON(iter->level < iter->min_depth);
-
- if (!btree_iter_node(iter, level))
+ if (!btree_path_node(path, level))
return;
- if (!bch2_btree_node_relock(iter, level))
+ if (!bch2_btree_node_relock(trans, path, level))
return;
- /*
- * Ideally this invariant would always be true, and hopefully in the
- * future it will be, but for now set_pos_same_leaf() breaks it:
- */
- BUG_ON(iter->uptodate < BTREE_ITER_NEED_TRAVERSE &&
- !btree_iter_pos_in_node(iter, l->b));
-
- /*
- * node iterators don't use leaf node iterator:
- */
- if (btree_iter_type(iter) == BTREE_ITER_NODES &&
- level <= iter->min_depth)
- goto unlock;
+ BUG_ON(!btree_path_pos_in_node(path, l->b));
bch2_btree_node_iter_verify(&l->iter, l->b);
/*
- * For interior nodes, the iterator will have skipped past
- * deleted keys:
- *
- * For extents, the iterator may have skipped past deleted keys (but not
- * whiteouts)
+ * For interior nodes, the iterator will have skipped past deleted keys:
*/
- p = level || btree_node_type_is_extents(iter->btree_id)
- ? bch2_btree_node_iter_prev_filter(&tmp, l->b, KEY_TYPE_discard)
+ p = level
+ ? bch2_btree_node_iter_prev(&tmp, l->b)
: bch2_btree_node_iter_prev_all(&tmp, l->b);
k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
- if (p && bkey_iter_pos_cmp(l->b, p, &pos) >= 0) {
+ if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
msg = "before";
goto err;
}
- if (k && bkey_iter_pos_cmp(l->b, k, &pos) < 0) {
+ if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
msg = "after";
goto err;
}
-unlock:
+
if (!locked)
- btree_node_unlock(iter, level);
+ btree_node_unlock(path, level);
return;
err:
- strcpy(buf1, "(none)");
- strcpy(buf2, "(none)");
+ bch2_bpos_to_text(&buf1, path->pos);
if (p) {
struct bkey uk = bkey_unpack_key(l->b, p);
- bch2_bkey_to_text(&PBUF(buf1), &uk);
+ bch2_bkey_to_text(&buf2, &uk);
+ } else {
+ pr_buf(&buf2, "(none)");
}
if (k) {
struct bkey uk = bkey_unpack_key(l->b, k);
- bch2_bkey_to_text(&PBUF(buf2), &uk);
+ bch2_bkey_to_text(&buf3, &uk);
+ } else {
+ pr_buf(&buf3, "(none)");
}
- panic("iterator should be %s key at level %u:\n"
- "iter pos %s %llu:%llu\n"
+ panic("path should be %s key at level %u:\n"
+ "path pos %s\n"
"prev key %s\n"
"cur key %s\n",
- msg, level,
- iter->flags & BTREE_ITER_IS_EXTENTS ? ">" : "=>",
- iter->pos.inode, iter->pos.offset,
- buf1, buf2);
+ msg, level, buf1.buf, buf2.buf, buf3.buf);
}
-static void bch2_btree_iter_verify(struct btree_iter *iter)
+static void bch2_btree_path_verify(struct btree_trans *trans,
+ struct btree_path *path)
{
+ struct bch_fs *c = trans->c;
unsigned i;
- bch2_btree_trans_verify_locks(iter->trans);
+ EBUG_ON(path->btree_id >= BTREE_ID_NR);
- for (i = 0; i < BTREE_MAX_DEPTH; i++)
- bch2_btree_iter_verify_level(iter, i);
+ for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
+ if (!path->l[i].b) {
+ BUG_ON(!path->cached &&
+ c->btree_roots[path->btree_id].b->c.level > i);
+ break;
+ }
+
+ bch2_btree_path_verify_level(trans, path, i);
+ }
+
+ bch2_btree_path_verify_locks(path);
+}
+
+void bch2_trans_verify_paths(struct btree_trans *trans)
+{
+ struct btree_path *path;
+
+ trans_for_each_path(trans, path)
+ bch2_btree_path_verify(trans, path);
+}
+
+static void bch2_btree_iter_verify(struct btree_iter *iter)
+{
+ struct btree_trans *trans = iter->trans;
+
+ BUG_ON(iter->btree_id >= BTREE_ID_NR);
+
+ BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
+
+ BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
+ (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
+
+ BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
+ (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
+ !btree_type_has_snapshots(iter->btree_id));
+
+ if (iter->update_path)
+ bch2_btree_path_verify(trans, iter->update_path);
+ bch2_btree_path_verify(trans, iter->path);
+}
+
+static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
+{
+ BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
+ !iter->pos.snapshot);
+
+ BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
+ iter->pos.snapshot != iter->snapshot);
+
+ BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
+ bkey_cmp(iter->pos, iter->k.p) > 0);
}
-void bch2_btree_trans_verify_iters(struct btree_trans *trans, struct btree *b)
+static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
{
- struct btree_iter *iter;
+ struct btree_trans *trans = iter->trans;
+ struct btree_iter copy;
+ struct bkey_s_c prev;
+ int ret = 0;
if (!bch2_debug_check_iterators)
- return;
+ return 0;
+
+ if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
+ return 0;
+
+ if (bkey_err(k) || !k.k)
+ return 0;
+
+ BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
+ iter->snapshot,
+ k.k->p.snapshot));
+
+ bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
+ BTREE_ITER_NOPRESERVE|
+ BTREE_ITER_ALL_SNAPSHOTS);
+ prev = bch2_btree_iter_prev(©);
+ if (!prev.k)
+ goto out;
+
+ ret = bkey_err(prev);
+ if (ret)
+ goto out;
+
+ if (!bkey_cmp(prev.k->p, k.k->p) &&
+ bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
+ prev.k->p.snapshot) > 0) {
+ struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
+
+ bch2_bkey_to_text(&buf1, k.k);
+ bch2_bkey_to_text(&buf2, prev.k);
+
+ panic("iter snap %u\n"
+ "k %s\n"
+ "prev %s\n",
+ iter->snapshot,
+ buf1.buf, buf2.buf);
+ }
+out:
+ bch2_trans_iter_exit(trans, ©);
+ return ret;
+}
+
+void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
+ struct bpos pos, bool key_cache)
+{
+ struct btree_path *path;
+ unsigned idx;
+ struct printbuf buf = PRINTBUF;
+
+ trans_for_each_path_inorder(trans, path, idx) {
+ int cmp = cmp_int(path->btree_id, id) ?:
+ cmp_int(path->cached, key_cache);
+
+ if (cmp > 0)
+ break;
+ if (cmp < 0)
+ continue;
- trans_for_each_iter_with_node(trans, b, iter)
- bch2_btree_iter_verify_level(iter, b->c.level);
+ if (!(path->nodes_locked & 1) ||
+ !path->should_be_locked)
+ continue;
+
+ if (!key_cache) {
+ if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 &&
+ bkey_cmp(pos, path->l[0].b->key.k.p) <= 0)
+ return;
+ } else {
+ if (!bkey_cmp(pos, path->pos))
+ return;
+ }
+ }
+
+ bch2_dump_trans_paths_updates(trans);
+ bch2_bpos_to_text(&buf, pos);
+
+ panic("not locked: %s %s%s\n",
+ bch2_btree_ids[id], buf.buf,
+ key_cache ? " cached" : "");
}
#else
-static inline void bch2_btree_iter_verify_level(struct btree_iter *iter, unsigned l) {}
+static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
+ struct btree_path *path, unsigned l) {}
+static inline void bch2_btree_path_verify(struct btree_trans *trans,
+ struct btree_path *path) {}
static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
+static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
+static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
#endif
+/* Btree path: fixups after btree updates */
+
static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
struct btree *b,
struct bset_tree *t,
bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
}
-static void __bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
+static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
struct btree *b,
struct bkey_packed *where)
{
- struct btree_iter_level *l = &iter->l[b->c.level];
- struct bpos pos = btree_iter_search_key(iter);
+ struct btree_path_level *l = &path->l[b->c.level];
if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
return;
- if (bkey_iter_pos_cmp(l->b, where, &pos) < 0)
+ if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
bch2_btree_node_iter_advance(&l->iter, l->b);
-
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
}
-void bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
+void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
struct btree *b,
struct bkey_packed *where)
{
- struct btree_iter *linked;
+ struct btree_path *path;
- trans_for_each_iter_with_node(iter->trans, b, linked) {
- __bch2_btree_iter_fix_key_modified(linked, b, where);
- bch2_btree_iter_verify_level(linked, b->c.level);
+ trans_for_each_path_with_node(trans, b, path) {
+ __bch2_btree_path_fix_key_modified(path, b, where);
+ bch2_btree_path_verify_level(trans, path, b->c.level);
}
}
-static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
- struct btree *b,
- struct btree_node_iter *node_iter,
- struct bset_tree *t,
- struct bkey_packed *where,
- unsigned clobber_u64s,
- unsigned new_u64s)
+static void __bch2_btree_node_iter_fix(struct btree_path *path,
+ struct btree *b,
+ struct btree_node_iter *node_iter,
+ struct bset_tree *t,
+ struct bkey_packed *where,
+ unsigned clobber_u64s,
+ unsigned new_u64s)
{
const struct bkey_packed *end = btree_bkey_last(b, t);
struct btree_node_iter_set *set;
bool iter_current_key_modified =
orig_iter_pos >= offset &&
orig_iter_pos <= offset + clobber_u64s;
- struct bpos iter_pos = btree_iter_search_key(iter);
btree_node_iter_for_each(node_iter, set)
if (set->end == old_end)
/* didn't find the bset in the iterator - might have to readd it: */
if (new_u64s &&
- bkey_iter_pos_cmp(b, where, &iter_pos) >= 0) {
+ bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
bch2_btree_node_iter_push(node_iter, b, where, end);
goto fixup_done;
} else {
return;
if (new_u64s &&
- bkey_iter_pos_cmp(b, where, &iter_pos) >= 0) {
+ bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
set->k = offset;
} else if (set->k < offset + clobber_u64s) {
set->k = offset + new_u64s;
*/
if (!bch2_btree_node_iter_end(node_iter) &&
iter_current_key_modified &&
- (b->c.level ||
- btree_node_type_is_extents(iter->btree_id))) {
+ b->c.level) {
struct bset_tree *t;
struct bkey_packed *k, *k2, *p;
b, t, k2);
}
}
-
- if (!b->c.level &&
- node_iter == &iter->l[0].iter &&
- iter_current_key_modified)
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
}
-void bch2_btree_node_iter_fix(struct btree_iter *iter,
+void bch2_btree_node_iter_fix(struct btree_trans *trans,
+ struct btree_path *path,
struct btree *b,
struct btree_node_iter *node_iter,
struct bkey_packed *where,
unsigned new_u64s)
{
struct bset_tree *t = bch2_bkey_to_bset(b, where);
- struct btree_iter *linked;
+ struct btree_path *linked;
- if (node_iter != &iter->l[b->c.level].iter) {
- __bch2_btree_node_iter_fix(iter, b, node_iter, t,
+ if (node_iter != &path->l[b->c.level].iter) {
+ __bch2_btree_node_iter_fix(path, b, node_iter, t,
where, clobber_u64s, new_u64s);
if (bch2_debug_check_iterators)
bch2_btree_node_iter_verify(node_iter, b);
}
- trans_for_each_iter_with_node(iter->trans, b, linked) {
+ trans_for_each_path_with_node(trans, b, linked) {
__bch2_btree_node_iter_fix(linked, b,
&linked->l[b->c.level].iter, t,
where, clobber_u64s, new_u64s);
- bch2_btree_iter_verify_level(linked, b->c.level);
+ bch2_btree_path_verify_level(trans, linked, b->c.level);
}
}
-static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter,
- struct btree_iter_level *l,
+/* Btree path level: pointer to a particular btree node and node iter */
+
+static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
+ struct btree_path_level *l,
struct bkey *u,
struct bkey_packed *k)
{
- struct bkey_s_c ret;
-
if (unlikely(!k)) {
/*
* signal to bch2_btree_iter_peek_slot() that we're currently at
return bkey_s_c_null;
}
- ret = bkey_disassemble(l->b, k, u);
-
- if (bch2_debug_check_bkeys)
- bch2_bkey_debugcheck(iter->trans->c, l->b, ret);
-
- return ret;
+ return bkey_disassemble(l->b, k, u);
}
-/* peek_all() doesn't skip deleted keys */
-static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter,
- struct btree_iter_level *l,
- struct bkey *u)
+static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
+ struct btree_path_level *l,
+ struct bkey *u)
{
- return __btree_iter_unpack(iter, l, u,
+ return __btree_iter_unpack(c, l, u,
bch2_btree_node_iter_peek_all(&l->iter, l->b));
}
-static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter,
- struct btree_iter_level *l)
+static inline struct bkey_s_c btree_path_level_peek(struct bch_fs *c,
+ struct btree_path *path,
+ struct btree_path_level *l,
+ struct bkey *u)
{
- return __btree_iter_unpack(iter, l, &iter->k,
+ struct bkey_s_c k = __btree_iter_unpack(c, l, u,
bch2_btree_node_iter_peek(&l->iter, l->b));
+
+ path->pos = k.k ? k.k->p : l->b->key.k.p;
+ return k;
}
-static inline struct bkey_s_c __btree_iter_prev(struct btree_iter *iter,
- struct btree_iter_level *l)
+static inline struct bkey_s_c btree_path_level_prev(struct bch_fs *c,
+ struct btree_path *path,
+ struct btree_path_level *l,
+ struct bkey *u)
{
- return __btree_iter_unpack(iter, l, &iter->k,
+ struct bkey_s_c k = __btree_iter_unpack(c, l, u,
bch2_btree_node_iter_prev(&l->iter, l->b));
+
+ path->pos = k.k ? k.k->p : l->b->data->min_key;
+ return k;
}
-static inline bool btree_iter_advance_to_pos(struct btree_iter *iter,
- struct btree_iter_level *l,
+static inline bool btree_path_advance_to_pos(struct btree_path *path,
+ struct btree_path_level *l,
int max_advance)
{
- struct bpos pos = btree_iter_search_key(iter);
struct bkey_packed *k;
int nr_advanced = 0;
while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
- bkey_iter_pos_cmp(l->b, k, &pos) < 0) {
+ bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
if (max_advance > 0 && nr_advanced >= max_advance)
return false;
/*
* Verify that iterator for parent node points to child node:
*/
-static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
+static void btree_path_verify_new_node(struct btree_trans *trans,
+ struct btree_path *path, struct btree *b)
{
- struct btree_iter_level *l;
+ struct bch_fs *c = trans->c;
+ struct btree_path_level *l;
unsigned plevel;
bool parent_locked;
struct bkey_packed *k;
if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
return;
+ if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
+ return;
+
plevel = b->c.level + 1;
- if (!btree_iter_node(iter, plevel))
+ if (!btree_path_node(path, plevel))
return;
- parent_locked = btree_node_locked(iter, plevel);
+ parent_locked = btree_node_locked(path, plevel);
- if (!bch2_btree_node_relock(iter, plevel))
+ if (!bch2_btree_node_relock(trans, path, plevel))
return;
- l = &iter->l[plevel];
+ l = &path->l[plevel];
k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
if (!k ||
bkey_deleted(k) ||
bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
- char buf[100];
+ struct printbuf buf1 = PRINTBUF;
+ struct printbuf buf2 = PRINTBUF;
+ struct printbuf buf3 = PRINTBUF;
+ struct printbuf buf4 = PRINTBUF;
struct bkey uk = bkey_unpack_key(b, k);
- bch2_bkey_to_text(&PBUF(buf), &uk);
- panic("parent iter doesn't point to new node:\n%s\n%llu:%llu\n",
- buf, b->key.k.p.inode, b->key.k.p.offset);
+ bch2_dump_btree_node(c, l->b);
+ bch2_bpos_to_text(&buf1, path->pos);
+ bch2_bkey_to_text(&buf2, &uk);
+ bch2_bpos_to_text(&buf3, b->data->min_key);
+ bch2_bpos_to_text(&buf3, b->data->max_key);
+ panic("parent iter doesn't point to new node:\n"
+ "iter pos %s %s\n"
+ "iter key %s\n"
+ "new node %s-%s\n",
+ bch2_btree_ids[path->btree_id],
+ buf1.buf, buf2.buf, buf3.buf, buf4.buf);
}
if (!parent_locked)
- btree_node_unlock(iter, b->c.level + 1);
+ btree_node_unlock(path, plevel);
}
-static inline void __btree_iter_init(struct btree_iter *iter,
- unsigned level)
+static inline void __btree_path_level_init(struct btree_path *path,
+ unsigned level)
{
- struct bpos pos = btree_iter_search_key(iter);
- struct btree_iter_level *l = &iter->l[level];
+ struct btree_path_level *l = &path->l[level];
- bch2_btree_node_iter_init(&l->iter, l->b, &pos);
+ bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
+ /*
+ * Iterators to interior nodes should always be pointed at the first non
+ * whiteout:
+ */
+ if (level)
+ bch2_btree_node_iter_peek(&l->iter, l->b);
}
-static inline void btree_iter_node_set(struct btree_iter *iter,
- struct btree *b)
+static inline void btree_path_level_init(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b)
{
- BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
+ BUG_ON(path->cached);
- btree_iter_verify_new_node(iter, b);
+ btree_path_verify_new_node(trans, path, b);
- EBUG_ON(!btree_iter_pos_in_node(iter, b));
+ EBUG_ON(!btree_path_pos_in_node(path, b));
EBUG_ON(b->c.lock.state.seq & 1);
- iter->l[b->c.level].lock_seq = b->c.lock.state.seq;
- iter->l[b->c.level].b = b;
- __btree_iter_init(iter, b->c.level);
+ path->l[b->c.level].lock_seq = b->c.lock.state.seq;
+ path->l[b->c.level].b = b;
+ __btree_path_level_init(path, b->c.level);
}
+/* Btree path: fixups after btree node updates: */
+
/*
* A btree node is being replaced - update the iterator to point to the new
* node:
*/
-void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
+void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
{
- enum btree_node_locked_type t;
- struct btree_iter *linked;
+ struct btree_path *path;
- trans_for_each_iter(iter->trans, linked)
- if (btree_iter_type(linked) != BTREE_ITER_CACHED &&
- btree_iter_pos_in_node(linked, b)) {
- /*
- * bch2_btree_iter_node_drop() has already been called -
- * the old node we're replacing has already been
- * unlocked and the pointer invalidated
- */
- BUG_ON(btree_node_locked(linked, b->c.level));
+ trans_for_each_path(trans, path)
+ if (!path->cached &&
+ btree_path_pos_in_node(path, b)) {
+ enum btree_node_locked_type t =
+ btree_lock_want(path, b->c.level);
- t = btree_lock_want(linked, b->c.level);
- if (t != BTREE_NODE_UNLOCKED) {
+ if (path->nodes_locked &&
+ t != BTREE_NODE_UNLOCKED) {
+ btree_node_unlock(path, b->c.level);
six_lock_increment(&b->c.lock, t);
- mark_btree_node_locked(linked, b->c.level, t);
+ mark_btree_node_locked(trans, path, b->c.level, t);
}
- btree_iter_node_set(linked, b);
- }
-}
-
-void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
-{
- struct btree_iter *linked;
- unsigned level = b->c.level;
-
- trans_for_each_iter(iter->trans, linked)
- if (linked->l[level].b == b) {
- __btree_node_unlock(linked, level);
- linked->l[level].b = BTREE_ITER_NO_NODE_DROP;
+ btree_path_level_init(trans, path, b);
}
}
* A btree node has been modified in such a way as to invalidate iterators - fix
* them:
*/
-void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
+void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
{
- struct btree_iter *linked;
+ struct btree_path *path;
- trans_for_each_iter_with_node(iter->trans, b, linked)
- __btree_iter_init(linked, b->c.level);
+ trans_for_each_path_with_node(trans, b, path)
+ __btree_path_level_init(path, b->c.level);
}
+/* Btree path: traverse, set_pos: */
+
static int lock_root_check_fn(struct six_lock *lock, void *p)
{
struct btree *b = container_of(lock, struct btree, c.lock);
return b == *rootp ? 0 : -1;
}
-static inline int btree_iter_lock_root(struct btree_iter *iter,
+static inline int btree_path_lock_root(struct btree_trans *trans,
+ struct btree_path *path,
unsigned depth_want,
unsigned long trace_ip)
{
- struct bch_fs *c = iter->trans->c;
- struct btree *b, **rootp = &c->btree_roots[iter->btree_id].b;
+ struct bch_fs *c = trans->c;
+ struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
enum six_lock_type lock_type;
unsigned i;
- EBUG_ON(iter->nodes_locked);
+ EBUG_ON(path->nodes_locked);
while (1) {
b = READ_ONCE(*rootp);
- iter->level = READ_ONCE(b->c.level);
+ path->level = READ_ONCE(b->c.level);
- if (unlikely(iter->level < depth_want)) {
+ if (unlikely(path->level < depth_want)) {
/*
* the root is at a lower depth than the depth we want:
* got to the end of the btree, or we're walking nodes
* greater than some depth and there are no nodes >=
* that depth
*/
- iter->level = depth_want;
- for (i = iter->level; i < BTREE_MAX_DEPTH; i++)
- iter->l[i].b = NULL;
+ path->level = depth_want;
+ for (i = path->level; i < BTREE_MAX_DEPTH; i++)
+ path->l[i].b = NULL;
return 1;
}
- lock_type = __btree_lock_want(iter, iter->level);
- if (unlikely(!btree_node_lock(b, POS_MAX, iter->level,
- iter, lock_type,
+ lock_type = __btree_lock_want(path, path->level);
+ if (unlikely(!btree_node_lock(trans, path, b, SPOS_MAX,
+ path->level, lock_type,
lock_root_check_fn, rootp,
- trace_ip)))
- return -EINTR;
+ trace_ip))) {
+ if (trans->restarted)
+ return -EINTR;
+ continue;
+ }
if (likely(b == READ_ONCE(*rootp) &&
- b->c.level == iter->level &&
+ b->c.level == path->level &&
!race_fault())) {
- for (i = 0; i < iter->level; i++)
- iter->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
- iter->l[iter->level].b = b;
- for (i = iter->level + 1; i < BTREE_MAX_DEPTH; i++)
- iter->l[i].b = NULL;
-
- mark_btree_node_locked(iter, iter->level, lock_type);
- btree_iter_node_set(iter, b);
+ for (i = 0; i < path->level; i++)
+ path->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
+ path->l[path->level].b = b;
+ for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
+ path->l[i].b = NULL;
+
+ mark_btree_node_locked(trans, path, path->level, lock_type);
+ btree_path_level_init(trans, path, b);
return 0;
}
}
noinline
-static void btree_iter_prefetch(struct btree_iter *iter)
+static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
{
- struct bch_fs *c = iter->trans->c;
- struct btree_iter_level *l = &iter->l[iter->level];
+ struct bch_fs *c = trans->c;
+ struct btree_path_level *l = path_l(path);
struct btree_node_iter node_iter = l->iter;
struct bkey_packed *k;
- BKEY_PADDED(k) tmp;
+ struct bkey_buf tmp;
unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
- ? (iter->level > 1 ? 0 : 2)
- : (iter->level > 1 ? 1 : 16);
- bool was_locked = btree_node_locked(iter, iter->level);
+ ? (path->level > 1 ? 0 : 2)
+ : (path->level > 1 ? 1 : 16);
+ bool was_locked = btree_node_locked(path, path->level);
+ int ret = 0;
- while (nr) {
- if (!bch2_btree_node_relock(iter, iter->level))
- return;
+ bch2_bkey_buf_init(&tmp);
+
+ while (nr && !ret) {
+ if (!bch2_btree_node_relock(trans, path, path->level))
+ break;
bch2_btree_node_iter_advance(&node_iter, l->b);
k = bch2_btree_node_iter_peek(&node_iter, l->b);
if (!k)
break;
- bch2_bkey_unpack(l->b, &tmp.k, k);
- bch2_btree_node_prefetch(c, iter, &tmp.k, iter->level - 1);
+ bch2_bkey_buf_unpack(&tmp, c, l->b, k);
+ ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
+ path->level - 1);
}
if (!was_locked)
- btree_node_unlock(iter, iter->level);
+ btree_node_unlock(path, path->level);
+
+ bch2_bkey_buf_exit(&tmp, c);
+ return ret;
}
-static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
- unsigned plevel, struct btree *b)
+static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
+ struct btree_and_journal_iter *jiter)
{
- struct btree_iter_level *l = &iter->l[plevel];
- bool locked = btree_node_locked(iter, plevel);
- struct bkey_packed *k;
- struct bch_btree_ptr_v2 *bp;
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c k;
+ struct bkey_buf tmp;
+ unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
+ ? (path->level > 1 ? 0 : 2)
+ : (path->level > 1 ? 1 : 16);
+ bool was_locked = btree_node_locked(path, path->level);
+ int ret = 0;
- if (!bch2_btree_node_relock(iter, plevel))
- return;
+ bch2_bkey_buf_init(&tmp);
- k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
- BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
+ while (nr && !ret) {
+ if (!bch2_btree_node_relock(trans, path, path->level))
+ break;
- bp = (void *) bkeyp_val(&l->b->format, k);
+ bch2_btree_and_journal_iter_advance(jiter);
+ k = bch2_btree_and_journal_iter_peek(jiter);
+ if (!k.k)
+ break;
+
+ bch2_bkey_buf_reassemble(&tmp, c, k);
+ ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
+ path->level - 1);
+ }
+
+ if (!was_locked)
+ btree_node_unlock(path, path->level);
+
+ bch2_bkey_buf_exit(&tmp, c);
+ return ret;
+}
+
+static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned plevel, struct btree *b)
+{
+ struct btree_path_level *l = &path->l[plevel];
+ bool locked = btree_node_locked(path, plevel);
+ struct bkey_packed *k;
+ struct bch_btree_ptr_v2 *bp;
+
+ if (!bch2_btree_node_relock(trans, path, plevel))
+ return;
+
+ k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
+ BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
+
+ bp = (void *) bkeyp_val(&l->b->format, k);
bp->mem_ptr = (unsigned long)b;
if (!locked)
- btree_node_unlock(iter, plevel);
+ btree_node_unlock(path, plevel);
+}
+
+static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned flags,
+ struct bkey_buf *out)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_path_level *l = path_l(path);
+ struct btree_and_journal_iter jiter;
+ struct bkey_s_c k;
+ int ret = 0;
+
+ __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos);
+
+ k = bch2_btree_and_journal_iter_peek(&jiter);
+
+ bch2_bkey_buf_reassemble(out, c, k);
+
+ if (flags & BTREE_ITER_PREFETCH)
+ ret = btree_path_prefetch_j(trans, path, &jiter);
+
+ bch2_btree_and_journal_iter_exit(&jiter);
+ return ret;
}
-static __always_inline int btree_iter_down(struct btree_iter *iter,
+static __always_inline int btree_path_down(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned flags,
unsigned long trace_ip)
{
- struct bch_fs *c = iter->trans->c;
- struct btree_iter_level *l = &iter->l[iter->level];
+ struct bch_fs *c = trans->c;
+ struct btree_path_level *l = path_l(path);
struct btree *b;
- unsigned level = iter->level - 1;
- enum six_lock_type lock_type = __btree_lock_want(iter, level);
- BKEY_PADDED(k) tmp;
+ unsigned level = path->level - 1;
+ enum six_lock_type lock_type = __btree_lock_want(path, level);
+ bool replay_done = test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
+ struct bkey_buf tmp;
+ int ret;
- EBUG_ON(!btree_node_locked(iter, iter->level));
+ EBUG_ON(!btree_node_locked(path, path->level));
- bch2_bkey_unpack(l->b, &tmp.k,
- bch2_btree_node_iter_peek(&l->iter, l->b));
+ bch2_bkey_buf_init(&tmp);
- b = bch2_btree_node_get(c, iter, &tmp.k, level, lock_type, trace_ip);
- if (unlikely(IS_ERR(b)))
- return PTR_ERR(b);
+ if (unlikely(!replay_done)) {
+ ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
+ if (ret)
+ goto err;
+ } else {
+ bch2_bkey_buf_unpack(&tmp, c, l->b,
+ bch2_btree_node_iter_peek(&l->iter, l->b));
- mark_btree_node_locked(iter, level, lock_type);
- btree_iter_node_set(iter, b);
+ if (flags & BTREE_ITER_PREFETCH) {
+ ret = btree_path_prefetch(trans, path);
+ if (ret)
+ goto err;
+ }
+ }
- if (tmp.k.k.type == KEY_TYPE_btree_ptr_v2 &&
- unlikely(b != btree_node_mem_ptr(&tmp.k)))
- btree_node_mem_ptr_set(iter, level + 1, b);
+ b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
+ ret = PTR_ERR_OR_ZERO(b);
+ if (unlikely(ret))
+ goto err;
- if (iter->flags & BTREE_ITER_PREFETCH)
- btree_iter_prefetch(iter);
+ mark_btree_node_locked(trans, path, level, lock_type);
+ btree_path_level_init(trans, path, b);
- iter->level = level;
+ if (likely(replay_done && tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
+ unlikely(b != btree_node_mem_ptr(tmp.k)))
+ btree_node_mem_ptr_set(trans, path, level + 1, b);
- return 0;
-}
+ if (btree_node_read_locked(path, level + 1))
+ btree_node_unlock(path, level + 1);
+ path->level = level;
-static void btree_iter_up(struct btree_iter *iter)
-{
- btree_node_unlock(iter, iter->level++);
+ bch2_btree_path_verify_locks(path);
+err:
+ bch2_bkey_buf_exit(&tmp, c);
+ return ret;
}
-static int btree_iter_traverse_one(struct btree_iter *, unsigned long);
+static int btree_path_traverse_one(struct btree_trans *, struct btree_path *,
+ unsigned, unsigned long);
-static int __btree_iter_traverse_all(struct btree_trans *trans, int ret)
+static int bch2_btree_path_traverse_all(struct btree_trans *trans)
{
struct bch_fs *c = trans->c;
- struct btree_iter *iter;
- u8 sorted[BTREE_ITER_MAX];
- unsigned i, nr_sorted = 0;
+ struct btree_path *path;
+ unsigned long trace_ip = _RET_IP_;
+ int i, ret = 0;
if (trans->in_traverse_all)
return -EINTR;
trans->in_traverse_all = true;
retry_all:
- nr_sorted = 0;
+ trans->restarted = false;
+ trans->traverse_all_idx = U8_MAX;
+
+ trans_for_each_path(trans, path)
+ path->should_be_locked = false;
- trans_for_each_iter(trans, iter)
- sorted[nr_sorted++] = iter->idx;
+ btree_trans_verify_sorted(trans);
-#define btree_iter_cmp_by_idx(_l, _r) \
- btree_iter_lock_cmp(&trans->iters[_l], &trans->iters[_r])
+ for (i = trans->nr_sorted - 2; i >= 0; --i) {
+ struct btree_path *path1 = trans->paths + trans->sorted[i];
+ struct btree_path *path2 = trans->paths + trans->sorted[i + 1];
+
+ if (path1->btree_id == path2->btree_id &&
+ path1->locks_want < path2->locks_want)
+ __bch2_btree_path_upgrade(trans, path1, path2->locks_want);
+ else if (!path1->locks_want && path2->locks_want)
+ __bch2_btree_path_upgrade(trans, path1, 1);
+ }
- bubble_sort(sorted, nr_sorted, btree_iter_cmp_by_idx);
-#undef btree_iter_cmp_by_idx
bch2_trans_unlock(trans);
cond_resched();
- if (unlikely(ret == -ENOMEM)) {
+ if (unlikely(trans->memory_allocation_failure)) {
struct closure cl;
closure_init_stack(&cl);
} while (ret);
}
- if (unlikely(ret == -EIO)) {
- trans->error = true;
- goto out;
- }
-
- BUG_ON(ret && ret != -EINTR);
-
/* Now, redo traversals in correct order: */
- for (i = 0; i < nr_sorted; i++) {
- unsigned idx = sorted[i];
+ trans->traverse_all_idx = 0;
+ while (trans->traverse_all_idx < trans->nr_sorted) {
+ path = trans->paths + trans->sorted[trans->traverse_all_idx];
/*
- * sucessfully traversing one iterator can cause another to be
- * unlinked, in btree_key_cache_fill()
+ * Traversing a path can cause another path to be added at about
+ * the same position:
*/
- if (!(trans->iters_linked & (1ULL << idx)))
- continue;
-
- ret = btree_iter_traverse_one(&trans->iters[idx], _THIS_IP_);
- if (ret)
- goto retry_all;
+ if (path->uptodate) {
+ ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
+ if (ret == -EINTR || ret == -ENOMEM)
+ goto retry_all;
+ if (ret)
+ goto err;
+ BUG_ON(path->uptodate);
+ } else {
+ trans->traverse_all_idx++;
+ }
}
- if (hweight64(trans->iters_live) > 1)
- ret = -EINTR;
- else
- trans_for_each_iter(trans, iter)
- if (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) {
- ret = -EINTR;
- break;
- }
-out:
+ /*
+ * BTREE_ITER_NEED_RELOCK is ok here - if we called bch2_trans_unlock()
+ * and relock(), relock() won't relock since path->should_be_locked
+ * isn't set yet, which is all fine
+ */
+ trans_for_each_path(trans, path)
+ BUG_ON(path->uptodate >= BTREE_ITER_NEED_TRAVERSE);
+err:
bch2_btree_cache_cannibalize_unlock(c);
trans->in_traverse_all = false;
- return ret;
-}
-int bch2_btree_iter_traverse_all(struct btree_trans *trans)
-{
- return __btree_iter_traverse_all(trans, 0);
+ trace_trans_traverse_all(trans->fn, trace_ip);
+ return ret;
}
-static inline bool btree_iter_good_node(struct btree_iter *iter,
+static inline bool btree_path_good_node(struct btree_trans *trans,
+ struct btree_path *path,
unsigned l, int check_pos)
{
- if (!is_btree_node(iter, l) ||
- !bch2_btree_node_relock(iter, l))
+ if (!is_btree_node(path, l) ||
+ !bch2_btree_node_relock(trans, path, l))
return false;
- if (check_pos <= 0 && btree_iter_pos_before_node(iter, iter->l[l].b))
+ if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
return false;
- if (check_pos >= 0 && btree_iter_pos_after_node(iter, iter->l[l].b))
+ if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
return false;
return true;
}
-static inline unsigned btree_iter_up_until_good_node(struct btree_iter *iter,
+static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
+ struct btree_path *path,
int check_pos)
{
- unsigned l = iter->level;
+ unsigned i, l = path->level;
- while (btree_iter_node(iter, l) &&
- !btree_iter_good_node(iter, l, check_pos)) {
- btree_node_unlock(iter, l);
- iter->l[l].b = BTREE_ITER_NO_NODE_UP;
+ while (btree_path_node(path, l) &&
+ !btree_path_good_node(trans, path, l, check_pos)) {
+ btree_node_unlock(path, l);
+ path->l[l].b = BTREE_ITER_NO_NODE_UP;
l++;
}
+ /* If we need intent locks, take them too: */
+ for (i = l + 1;
+ i < path->locks_want && btree_path_node(path, i);
+ i++)
+ if (!bch2_btree_node_relock(trans, path, i))
+ while (l <= i) {
+ btree_node_unlock(path, l);
+ path->l[l].b = BTREE_ITER_NO_NODE_UP;
+ l++;
+ }
+
return l;
}
* On error, caller (peek_node()/peek_key()) must return NULL; the error is
* stashed in the iterator and returned from bch2_trans_exit().
*/
-static int btree_iter_traverse_one(struct btree_iter *iter,
+static int btree_path_traverse_one(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned flags,
unsigned long trace_ip)
{
- unsigned depth_want = iter->level;
-
- /*
- * if we need interior nodes locked, call btree_iter_relock() to make
- * sure we walk back up enough that we lock them:
- */
- if (iter->uptodate == BTREE_ITER_NEED_RELOCK ||
- iter->locks_want > 1)
- bch2_btree_iter_relock(iter, false);
-
- if (btree_iter_type(iter) == BTREE_ITER_CACHED)
- return bch2_btree_iter_traverse_cached(iter);
+ unsigned depth_want = path->level;
+ int ret = 0;
- if (iter->uptodate < BTREE_ITER_NEED_RELOCK)
- return 0;
-
- if (unlikely(iter->level >= BTREE_MAX_DEPTH))
- return 0;
-
- /*
- * XXX: correctly using BTREE_ITER_UPTODATE should make using check_pos
- * here unnecessary
- */
- iter->level = btree_iter_up_until_good_node(iter, 0);
+ if (unlikely(trans->restarted)) {
+ ret = -EINTR;
+ goto out;
+ }
/*
- * If we've got a btree node locked (i.e. we aren't about to relock the
- * root) - advance its node iterator if necessary:
- *
- * XXX correctly using BTREE_ITER_UPTODATE should make this unnecessary
+ * Ensure we obey path->should_be_locked: if it's set, we can't unlock
+ * and re-traverse the path without a transaction restart:
*/
- if (is_btree_node(iter, iter->level)) {
- BUG_ON(!btree_iter_pos_in_node(iter, iter->l[iter->level].b));
+ if (path->should_be_locked) {
+ ret = bch2_btree_path_relock(trans, path, trace_ip) ? 0 : -EINTR;
+ goto out;
+ }
- btree_iter_advance_to_pos(iter, &iter->l[iter->level], -1);
+ if (path->cached) {
+ ret = bch2_btree_path_traverse_cached(trans, path, flags);
+ goto out;
}
+ if (unlikely(path->level >= BTREE_MAX_DEPTH))
+ goto out;
+
+ path->level = btree_path_up_until_good_node(trans, path, 0);
+
/*
- * Note: iter->nodes[iter->level] may be temporarily NULL here - that
+ * Note: path->nodes[path->level] may be temporarily NULL here - that
* would indicate to other code that we got to the end of the btree,
* here it indicates that relocking the root failed - it's critical that
- * btree_iter_lock_root() comes next and that it can't fail
+ * btree_path_lock_root() comes next and that it can't fail
*/
- while (iter->level > depth_want) {
- int ret = btree_iter_node(iter, iter->level)
- ? btree_iter_down(iter, trace_ip)
- : btree_iter_lock_root(iter, depth_want, trace_ip);
+ while (path->level > depth_want) {
+ ret = btree_path_node(path, path->level)
+ ? btree_path_down(trans, path, flags, trace_ip)
+ : btree_path_lock_root(trans, path, depth_want, trace_ip);
if (unlikely(ret)) {
- if (ret == 1)
- return 0;
+ if (ret == 1) {
+ /*
+ * No nodes at this level - got to the end of
+ * the btree:
+ */
+ ret = 0;
+ goto out;
+ }
- iter->level = depth_want;
+ __bch2_btree_path_unlock(path);
+ path->level = depth_want;
- if (ret == -EIO) {
- iter->flags |= BTREE_ITER_ERROR;
- iter->l[iter->level].b =
+ if (ret == -EIO)
+ path->l[path->level].b =
BTREE_ITER_NO_NODE_ERROR;
- } else {
- iter->l[iter->level].b =
+ else
+ path->l[path->level].b =
BTREE_ITER_NO_NODE_DOWN;
- }
- return ret;
+ goto out;
}
}
- iter->uptodate = BTREE_ITER_NEED_PEEK;
+ path->uptodate = BTREE_ITER_UPTODATE;
+out:
+ BUG_ON((ret == -EINTR) != !!trans->restarted);
+ bch2_btree_path_verify(trans, path);
+ return ret;
+}
+
+int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
+ struct btree_path *path, unsigned flags)
+{
+ if (path->uptodate < BTREE_ITER_NEED_RELOCK)
+ return 0;
- bch2_btree_iter_verify(iter);
- return 0;
+ return bch2_trans_cond_resched(trans) ?:
+ btree_path_traverse_one(trans, path, flags, _RET_IP_);
}
-int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
+static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
+ struct btree_path *src)
{
- struct btree_trans *trans = iter->trans;
- int ret;
+ unsigned i;
- ret = bch2_trans_cond_resched(trans) ?:
- btree_iter_traverse_one(iter, _RET_IP_);
- if (unlikely(ret))
- ret = __btree_iter_traverse_all(trans, ret);
+ memcpy(&dst->pos, &src->pos,
+ sizeof(struct btree_path) - offsetof(struct btree_path, pos));
- return ret;
+ for (i = 0; i < BTREE_MAX_DEPTH; i++)
+ if (btree_node_locked(dst, i))
+ six_lock_increment(&dst->l[i].b->c.lock,
+ __btree_lock_want(dst, i));
+
+ bch2_btree_path_check_sort(trans, dst, 0);
}
-static inline void bch2_btree_iter_checks(struct btree_iter *iter)
+static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
+ bool intent)
{
- enum btree_iter_type type = btree_iter_type(iter);
+ struct btree_path *new = btree_path_alloc(trans, src);
- EBUG_ON(iter->btree_id >= BTREE_ID_NR);
+ btree_path_copy(trans, new, src);
+ __btree_path_get(new, intent);
+ return new;
+}
- BUG_ON((type == BTREE_ITER_KEYS ||
- type == BTREE_ITER_CACHED) &&
- (bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
- bkey_cmp(iter->pos, iter->k.p) > 0));
+inline struct btree_path * __must_check
+bch2_btree_path_make_mut(struct btree_trans *trans,
+ struct btree_path *path, bool intent,
+ unsigned long ip)
+{
+ if (path->ref > 1 || path->preserve) {
+ __btree_path_put(path, intent);
+ path = btree_path_clone(trans, path, intent);
+ path->preserve = false;
+#ifdef CONFIG_BCACHEFS_DEBUG
+ path->ip_allocated = ip;
+#endif
+ btree_trans_verify_sorted(trans);
+ }
- bch2_btree_iter_verify_locks(iter);
- bch2_btree_iter_verify_level(iter, iter->level);
+ path->should_be_locked = false;
+ return path;
}
-/* Iterate across nodes (leaf and interior nodes) */
-
-struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
+struct btree_path * __must_check
+bch2_btree_path_set_pos(struct btree_trans *trans,
+ struct btree_path *path, struct bpos new_pos,
+ bool intent, unsigned long ip)
{
- struct btree *b;
- int ret;
+ int cmp = bpos_cmp(new_pos, path->pos);
+ unsigned l = path->level;
- EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
- bch2_btree_iter_checks(iter);
+ EBUG_ON(trans->restarted);
+ EBUG_ON(!path->ref);
- if (iter->uptodate == BTREE_ITER_UPTODATE)
- return iter->l[iter->level].b;
+ if (!cmp)
+ return path;
- ret = bch2_btree_iter_traverse(iter);
- if (ret)
- return NULL;
+ path = bch2_btree_path_make_mut(trans, path, intent, ip);
- b = btree_iter_node(iter, iter->level);
- if (!b)
- return NULL;
+ path->pos = new_pos;
- BUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0);
+ bch2_btree_path_check_sort(trans, path, cmp);
- iter->pos = b->key.k.p;
- iter->uptodate = BTREE_ITER_UPTODATE;
+ if (unlikely(path->cached)) {
+ btree_node_unlock(path, 0);
+ path->l[0].b = BTREE_ITER_NO_NODE_CACHED;
+ btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+ goto out;
+ }
- bch2_btree_iter_verify(iter);
+ l = btree_path_up_until_good_node(trans, path, cmp);
- return b;
+ if (btree_path_node(path, l)) {
+ BUG_ON(!btree_node_locked(path, l));
+ /*
+ * We might have to skip over many keys, or just a few: try
+ * advancing the node iterator, and if we have to skip over too
+ * many keys just reinit it (or if we're rewinding, since that
+ * is expensive).
+ */
+ if (cmp < 0 ||
+ !btree_path_advance_to_pos(path, &path->l[l], 8))
+ __btree_path_level_init(path, l);
+ }
+
+ if (l != path->level) {
+ btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+ __bch2_btree_path_unlock(path);
+ }
+out:
+ bch2_btree_path_verify(trans, path);
+ return path;
}
-struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
+/* Btree path: main interface: */
+
+static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
{
- struct btree *b;
- int ret;
+ struct btree_path *next;
- EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
- bch2_btree_iter_checks(iter);
+ next = prev_btree_path(trans, path);
+ if (next && !btree_path_cmp(next, path))
+ return next;
- /* already got to end? */
- if (!btree_iter_node(iter, iter->level))
- return NULL;
+ next = next_btree_path(trans, path);
+ if (next && !btree_path_cmp(next, path))
+ return next;
- bch2_trans_cond_resched(iter->trans);
+ return NULL;
+}
- btree_iter_up(iter);
+static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
+{
+ struct btree_path *next;
- if (!bch2_btree_node_relock(iter, iter->level))
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
+ next = prev_btree_path(trans, path);
+ if (next && next->level == path->level && path_l(next)->b == path_l(path)->b)
+ return next;
- ret = bch2_btree_iter_traverse(iter);
- if (ret)
- return NULL;
+ next = next_btree_path(trans, path);
+ if (next && next->level == path->level && path_l(next)->b == path_l(path)->b)
+ return next;
- /* got to end? */
- b = btree_iter_node(iter, iter->level);
- if (!b)
- return NULL;
+ return NULL;
+}
- if (bkey_cmp(iter->pos, b->key.k.p) < 0) {
- /*
- * Haven't gotten to the end of the parent node: go back down to
- * the next child node
- */
+static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
+{
+ __bch2_btree_path_unlock(path);
+ btree_path_list_remove(trans, path);
+ trans->paths_allocated &= ~(1ULL << path->idx);
+}
- /*
- * We don't really want to be unlocking here except we can't
- * directly tell btree_iter_traverse() "traverse to this level"
- * except by setting iter->level, so we have to unlock so we
- * don't screw up our lock invariants:
- */
- if (btree_node_read_locked(iter, iter->level))
- btree_node_unlock(iter, iter->level);
+void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
+{
+ struct btree_path *dup;
- iter->pos = bkey_successor(iter->pos);
- iter->level = iter->min_depth;
+ EBUG_ON(trans->paths + path->idx != path);
+ EBUG_ON(!path->ref);
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
- ret = bch2_btree_iter_traverse(iter);
- if (ret)
- return NULL;
+ if (!__btree_path_put(path, intent))
+ return;
- b = iter->l[iter->level].b;
+ /*
+ * Perhaps instead we should check for duplicate paths in traverse_all:
+ */
+ if (path->preserve &&
+ (dup = have_path_at_pos(trans, path))) {
+ dup->preserve = true;
+ path->preserve = false;
+ goto free;
}
- iter->pos = b->key.k.p;
- iter->uptodate = BTREE_ITER_UPTODATE;
+ if (!path->preserve &&
+ (dup = have_node_at_pos(trans, path)))
+ goto free;
+ return;
+free:
+ if (path->should_be_locked &&
+ !btree_node_locked(dup, path->level))
+ return;
- bch2_btree_iter_verify(iter);
+ dup->should_be_locked |= path->should_be_locked;
+ __bch2_path_free(trans, path);
+}
- return b;
+void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
+{
+ struct btree_insert_entry *i;
+
+ pr_buf(buf, "transaction updates for %s journal seq %llu",
+ trans->fn, trans->journal_res.seq);
+ pr_newline(buf);
+ pr_indent_push(buf, 2);
+
+ trans_for_each_update(trans, i) {
+ struct bkey_s_c old = { &i->old_k, i->old_v };
+
+ pr_buf(buf, "update: btree %s %pS",
+ bch2_btree_ids[i->btree_id],
+ (void *) i->ip_allocated);
+ pr_newline(buf);
+
+ pr_buf(buf, " old ");
+ bch2_bkey_val_to_text(buf, trans->c, old);
+ pr_newline(buf);
+
+ pr_buf(buf, " new ");
+ bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
+ pr_newline(buf);
+ }
+
+ pr_indent_pop(buf, 2);
}
-/* Iterate across keys (in leaf nodes only) */
+noinline __cold
+void bch2_dump_trans_updates(struct btree_trans *trans)
+{
+ struct printbuf buf = PRINTBUF;
+
+ bch2_trans_updates_to_text(&buf, trans);
+ bch_err(trans->c, "%s", buf.buf);
+ printbuf_exit(&buf);
+}
-void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_pos)
+noinline __cold
+void bch2_dump_trans_paths_updates(struct btree_trans *trans)
{
- struct btree_iter_level *l = &iter->l[0];
+ struct btree_path *path;
+ struct printbuf buf = PRINTBUF;
+ unsigned idx;
- EBUG_ON(iter->level != 0);
- EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0);
- EBUG_ON(!btree_node_locked(iter, 0));
- EBUG_ON(bkey_cmp(new_pos, l->b->key.k.p) > 0);
+ trans_for_each_path_inorder(trans, path, idx) {
+ printbuf_reset(&buf);
- bkey_init(&iter->k);
- iter->k.p = iter->pos = new_pos;
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
+ bch2_bpos_to_text(&buf, path->pos);
- btree_iter_advance_to_pos(iter, l, -1);
+ printk(KERN_ERR "path: idx %u ref %u:%u%s%s btree=%s l=%u pos %s locks %u %pS\n",
+ path->idx, path->ref, path->intent_ref,
+ path->should_be_locked ? " S" : "",
+ path->preserve ? " P" : "",
+ bch2_btree_ids[path->btree_id],
+ path->level,
+ buf.buf,
+ path->nodes_locked,
+#ifdef CONFIG_BCACHEFS_DEBUG
+ (void *) path->ip_allocated
+#else
+ NULL
+#endif
+ );
+ }
- /*
- * XXX:
- * keeping a node locked that's outside (even just outside) iter->pos
- * breaks __bch2_btree_node_lock(). This seems to only affect
- * bch2_btree_node_get_sibling so for now it's fixed there, but we
- * should try to get rid of this corner case.
- *
- * (this behaviour is currently needed for BTREE_INSERT_NOUNLOCK)
- */
+ printbuf_exit(&buf);
- if (bch2_btree_node_iter_end(&l->iter) &&
- btree_iter_pos_after_node(iter, l->b))
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
+ bch2_dump_trans_updates(trans);
}
-static void btree_iter_pos_changed(struct btree_iter *iter, int cmp)
+static struct btree_path *btree_path_alloc(struct btree_trans *trans,
+ struct btree_path *pos)
{
- unsigned l = iter->level;
+ struct btree_path *path;
+ unsigned idx;
- if (!cmp)
- goto out;
+ if (unlikely(trans->paths_allocated ==
+ ~((~0ULL << 1) << (BTREE_ITER_MAX - 1)))) {
+ bch2_dump_trans_paths_updates(trans);
+ panic("trans path oveflow\n");
+ }
- if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) {
- btree_node_unlock(iter, 0);
- iter->l[0].b = BTREE_ITER_NO_NODE_UP;
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
- return;
+ idx = __ffs64(~trans->paths_allocated);
+ trans->paths_allocated |= 1ULL << idx;
+
+ path = &trans->paths[idx];
+
+ path->idx = idx;
+ path->ref = 0;
+ path->intent_ref = 0;
+ path->nodes_locked = 0;
+ path->nodes_intent_locked = 0;
+
+ btree_path_list_add(trans, pos, path);
+ return path;
+}
+
+struct btree_path *bch2_path_get(struct btree_trans *trans,
+ enum btree_id btree_id, struct bpos pos,
+ unsigned locks_want, unsigned level,
+ unsigned flags, unsigned long ip)
+{
+ struct btree_path *path, *path_pos = NULL;
+ bool cached = flags & BTREE_ITER_CACHED;
+ bool intent = flags & BTREE_ITER_INTENT;
+ int i;
+
+ BUG_ON(trans->restarted);
+ btree_trans_verify_sorted(trans);
+ bch2_trans_verify_locks(trans);
+
+ trans_for_each_path_inorder(trans, path, i) {
+ if (__btree_path_cmp(path,
+ btree_id,
+ cached,
+ pos,
+ level) > 0)
+ break;
+
+ path_pos = path;
}
- l = btree_iter_up_until_good_node(iter, cmp);
+ if (path_pos &&
+ path_pos->cached == cached &&
+ path_pos->btree_id == btree_id &&
+ path_pos->level == level) {
+ __btree_path_get(path_pos, intent);
+ path = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
+ } else {
+ path = btree_path_alloc(trans, path_pos);
+ path_pos = NULL;
+
+ __btree_path_get(path, intent);
+ path->pos = pos;
+ path->btree_id = btree_id;
+ path->cached = cached;
+ path->uptodate = BTREE_ITER_NEED_TRAVERSE;
+ path->should_be_locked = false;
+ path->level = level;
+ path->locks_want = locks_want;
+ path->nodes_locked = 0;
+ path->nodes_intent_locked = 0;
+ for (i = 0; i < ARRAY_SIZE(path->l); i++)
+ path->l[i].b = BTREE_ITER_NO_NODE_INIT;
+#ifdef CONFIG_BCACHEFS_DEBUG
+ path->ip_allocated = ip;
+#endif
+ btree_trans_verify_sorted(trans);
+ }
- if (btree_iter_node(iter, l)) {
- /*
- * We might have to skip over many keys, or just a few: try
- * advancing the node iterator, and if we have to skip over too
- * many keys just reinit it (or if we're rewinding, since that
- * is expensive).
- */
- if (cmp < 0 ||
- !btree_iter_advance_to_pos(iter, &iter->l[l], 8))
- __btree_iter_init(iter, l);
+ if (!(flags & BTREE_ITER_NOPRESERVE))
+ path->preserve = true;
- /* Don't leave it locked if we're not supposed to: */
- if (btree_lock_want(iter, l) == BTREE_NODE_UNLOCKED)
- btree_node_unlock(iter, l);
+ if (path->intent_ref)
+ locks_want = max(locks_want, level + 1);
+
+ /*
+ * If the path has locks_want greater than requested, we don't downgrade
+ * it here - on transaction restart because btree node split needs to
+ * upgrade locks, we might be putting/getting the iterator again.
+ * Downgrading iterators only happens via bch2_trans_downgrade(), after
+ * a successful transaction commit.
+ */
+
+ locks_want = min(locks_want, BTREE_MAX_DEPTH);
+ if (locks_want > path->locks_want) {
+ path->locks_want = locks_want;
+ btree_path_get_locks(trans, path, true);
}
-out:
- if (l != iter->level)
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
- else
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
+
+ return path;
}
-void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos,
- bool strictly_greater)
+inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
{
- struct bpos old = btree_iter_search_key(iter);
- int cmp;
- iter->flags &= ~BTREE_ITER_IS_EXTENTS;
- iter->flags |= strictly_greater ? BTREE_ITER_IS_EXTENTS : 0;
+ struct bkey_s_c k;
+
+ if (!path->cached) {
+ struct btree_path_level *l = path_l(path);
+ struct bkey_packed *_k;
- bkey_init(&iter->k);
- iter->k.p = iter->pos = new_pos;
+ EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
+
+ _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
+ k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
+
+ EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
+
+ if (!k.k || bpos_cmp(path->pos, k.k->p))
+ goto hole;
+ } else {
+ struct bkey_cached *ck = (void *) path->l[0].b;
+
+ EBUG_ON(ck &&
+ (path->btree_id != ck->key.btree_id ||
+ bkey_cmp(path->pos, ck->key.pos)));
+
+ /* BTREE_ITER_CACHED_NOFILL|BTREE_ITER_CACHED_NOCREATE? */
+ if (unlikely(!ck || !ck->valid))
+ return bkey_s_c_null;
+
+ EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
+
+ *u = ck->k->k;
+ k = bkey_i_to_s_c(ck->k);
+ }
+
+ return k;
+hole:
+ bkey_init(u);
+ u->p = path->pos;
+ return (struct bkey_s_c) { u, NULL };
+}
- cmp = bkey_cmp(btree_iter_search_key(iter), old);
+/* Btree iterators: */
- btree_iter_pos_changed(iter, cmp);
+int __must_check
+__bch2_btree_iter_traverse(struct btree_iter *iter)
+{
+ return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
}
-void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
+int __must_check
+bch2_btree_iter_traverse(struct btree_iter *iter)
{
- int cmp = bkey_cmp(new_pos, iter->pos);
+ int ret;
- bkey_init(&iter->k);
- iter->k.p = iter->pos = new_pos;
+ iter->path = bch2_btree_path_set_pos(iter->trans, iter->path,
+ btree_iter_search_key(iter),
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
- btree_iter_pos_changed(iter, cmp);
+ ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
+ if (ret)
+ return ret;
+
+ iter->path->should_be_locked = true;
+ return 0;
}
-static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter)
+/* Iterate across nodes (leaf and interior nodes) */
+
+struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
{
- struct btree_iter_level *l = &iter->l[0];
- bool ret;
+ struct btree_trans *trans = iter->trans;
+ struct btree *b = NULL;
+ int ret;
+
+ EBUG_ON(iter->path->cached);
+ bch2_btree_iter_verify(iter);
+
+ ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
+ if (ret)
+ goto err;
+
+ b = btree_path_node(iter->path, iter->path->level);
+ if (!b)
+ goto out;
+
+ BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
bkey_init(&iter->k);
- iter->k.p = iter->pos = l->b->key.k.p;
+ iter->k.p = iter->pos = b->key.k.p;
- ret = bkey_cmp(iter->pos, POS_MAX) != 0;
- if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
- iter->k.p = iter->pos = bkey_successor(iter->pos);
+ iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
+ iter->path->should_be_locked = true;
+ BUG_ON(iter->path->uptodate);
+out:
+ bch2_btree_iter_verify_entry_exit(iter);
+ bch2_btree_iter_verify(iter);
- btree_iter_pos_changed(iter, 1);
- return ret;
+ return b;
+err:
+ b = ERR_PTR(ret);
+ goto out;
}
-static inline bool btree_iter_set_pos_to_prev_leaf(struct btree_iter *iter)
+struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
{
- struct btree_iter_level *l = &iter->l[0];
- bool ret;
+ struct btree_trans *trans = iter->trans;
+ struct btree_path *path = iter->path;
+ struct btree *b = NULL;
+ unsigned l;
+ int ret;
- bkey_init(&iter->k);
- iter->k.p = iter->pos = l->b->data->min_key;
- iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
+ BUG_ON(trans->restarted);
+ EBUG_ON(iter->path->cached);
+ bch2_btree_iter_verify(iter);
- ret = bkey_cmp(iter->pos, POS_MIN) != 0;
- if (ret) {
- iter->k.p = iter->pos = bkey_predecessor(iter->pos);
+ /* already at end? */
+ if (!btree_path_node(path, path->level))
+ return NULL;
+
+ /* got to end? */
+ if (!btree_path_node(path, path->level + 1)) {
+ btree_node_unlock(path, path->level);
+ path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
+ path->level++;
+ btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+ return NULL;
+ }
- if (iter->flags & BTREE_ITER_IS_EXTENTS)
- iter->k.p = iter->pos = bkey_predecessor(iter->pos);
+ if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
+ __bch2_btree_path_unlock(path);
+ path->l[path->level].b = BTREE_ITER_NO_NODE_GET_LOCKS;
+ path->l[path->level + 1].b = BTREE_ITER_NO_NODE_GET_LOCKS;
+ btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+ trace_trans_restart_relock_next_node(trans->fn, _THIS_IP_,
+ path->btree_id, &path->pos);
+ btree_trans_restart(trans);
+ ret = -EINTR;
+ goto err;
}
- btree_iter_pos_changed(iter, -1);
- return ret;
-}
+ b = btree_path_node(path, path->level + 1);
-/**
- * btree_iter_peek_uptodate - given an iterator that is uptodate, return the key
- * it currently points to
- */
-static inline struct bkey_s_c btree_iter_peek_uptodate(struct btree_iter *iter)
-{
- struct btree_iter_level *l = &iter->l[0];
- struct bkey_s_c ret = { .k = &iter->k };
+ if (!bpos_cmp(iter->pos, b->key.k.p)) {
+ btree_node_unlock(path, path->level);
+ path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
+ path->level++;
+ } else {
+ /*
+ * Haven't gotten to the end of the parent node: go back down to
+ * the next child node
+ */
+ path = iter->path =
+ bch2_btree_path_set_pos(trans, path, bpos_successor(iter->pos),
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
- if (!bkey_deleted(&iter->k)) {
- struct bkey_packed *_k =
- __bch2_btree_node_iter_peek_all(&l->iter, l->b);
+ path->level = iter->min_depth;
- ret.v = bkeyp_val(&l->b->format, _k);
+ for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
+ if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
+ btree_node_unlock(path, l);
- if (bch2_debug_check_iterators) {
- struct bkey k = bkey_unpack_key(l->b, _k);
+ btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+ bch2_btree_iter_verify(iter);
- BUG_ON(memcmp(&k, &iter->k, sizeof(k)));
- }
+ ret = bch2_btree_path_traverse(trans, path, iter->flags);
+ if (ret)
+ goto err;
- if (bch2_debug_check_bkeys)
- bch2_bkey_debugcheck(iter->trans->c, l->b, ret);
+ b = path->l[path->level].b;
}
- return ret;
+ bkey_init(&iter->k);
+ iter->k.p = iter->pos = b->key.k.p;
+
+ iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
+ iter->path->should_be_locked = true;
+ BUG_ON(iter->path->uptodate);
+out:
+ bch2_btree_iter_verify_entry_exit(iter);
+ bch2_btree_iter_verify(iter);
+
+ return b;
+err:
+ b = ERR_PTR(ret);
+ goto out;
}
-/**
- * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
- * current position
- */
-struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
+/* Iterate across keys (in leaf nodes only) */
+
+inline bool bch2_btree_iter_advance(struct btree_iter *iter)
{
- struct btree_iter_level *l = &iter->l[0];
- struct bkey_s_c k;
- int ret;
+ struct bpos pos = iter->k.p;
+ bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
+ ? bpos_cmp(pos, SPOS_MAX)
+ : bkey_cmp(pos, SPOS_MAX)) != 0;
- EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
- bch2_btree_iter_checks(iter);
+ if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
+ pos = bkey_successor(iter, pos);
+ bch2_btree_iter_set_pos(iter, pos);
+ return ret;
+}
- if (iter->uptodate == BTREE_ITER_UPTODATE &&
- !bkey_deleted(&iter->k))
- return btree_iter_peek_uptodate(iter);
+inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
+{
+ struct bpos pos = bkey_start_pos(&iter->k);
+ bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
+ ? bpos_cmp(pos, POS_MIN)
+ : bkey_cmp(pos, POS_MIN)) != 0;
- while (1) {
- ret = bch2_btree_iter_traverse(iter);
- if (unlikely(ret))
- return bkey_s_c_err(ret);
+ if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
+ pos = bkey_predecessor(iter, pos);
+ bch2_btree_iter_set_pos(iter, pos);
+ return ret;
+}
- k = __btree_iter_peek(iter, l);
- if (likely(k.k))
+static inline struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
+ enum btree_id btree_id,
+ struct bpos pos)
+{
+ struct btree_insert_entry *i;
+
+ trans_for_each_update(trans, i)
+ if ((cmp_int(btree_id, i->btree_id) ?:
+ bpos_cmp(pos, i->k->k.p)) <= 0) {
+ if (btree_id == i->btree_id)
+ return i->k;
break;
+ }
- if (!btree_iter_set_pos_to_next_leaf(iter))
- return bkey_s_c_null;
- }
+ return NULL;
+}
- /*
- * iter->pos should always be equal to the key we just
- * returned - except extents can straddle iter->pos:
- */
- if (!(iter->flags & BTREE_ITER_IS_EXTENTS) ||
- bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
- iter->pos = bkey_start_pos(k.k);
+static noinline
+struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bkey_i *next_journal =
+ bch2_journal_keys_peek(trans->c, iter->btree_id, 0,
+ iter->path->pos);
- iter->uptodate = BTREE_ITER_UPTODATE;
+ if (next_journal &&
+ bpos_cmp(next_journal->k.p,
+ k.k ? k.k->p : iter->path->l[0].b->key.k.p) <= 0) {
+ iter->k = next_journal->k;
+ k = bkey_i_to_s_c(next_journal);
+ }
- bch2_btree_iter_verify_level(iter, 0);
return k;
}
-/**
- * bch2_btree_iter_next: returns first key greater than iterator's current
- * position
+/*
+ * Checks btree key cache for key at iter->pos and returns it if present, or
+ * bkey_s_c_null:
*/
-struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
+static noinline
+struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
{
- if (unlikely(!bkey_cmp(iter->k.p, POS_MAX)))
+ struct btree_trans *trans = iter->trans;
+ struct bch_fs *c = trans->c;
+ struct bkey u;
+ int ret;
+
+ if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
return bkey_s_c_null;
- bch2_btree_iter_set_pos(iter,
- (iter->flags & BTREE_ITER_IS_EXTENTS)
- ? iter->k.p
- : bkey_successor(iter->k.p));
+ if (!iter->key_cache_path)
+ iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
+ iter->flags & BTREE_ITER_INTENT, 0,
+ iter->flags|BTREE_ITER_CACHED,
+ _THIS_IP_);
- return bch2_btree_iter_peek(iter);
+ iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
+
+ ret = bch2_btree_path_traverse(trans, iter->key_cache_path, iter->flags|BTREE_ITER_CACHED);
+ if (unlikely(ret))
+ return bkey_s_c_err(ret);
+
+ iter->key_cache_path->should_be_locked = true;
+
+ return bch2_btree_path_peek_slot(iter->key_cache_path, &u);
}
-static struct bkey_s_c __btree_trans_updates_peek(struct btree_iter *iter)
+static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
{
- struct bpos pos = btree_iter_search_key(iter);
struct btree_trans *trans = iter->trans;
- struct btree_insert_entry *i;
+ struct bkey_i *next_update;
+ struct bkey_s_c k, k2;
+ int ret;
- trans_for_each_update2(trans, i)
- if ((cmp_int(iter->btree_id, i->iter->btree_id) ?:
- bkey_cmp(pos, i->k->k.p)) <= 0)
- break;
+ EBUG_ON(iter->path->cached || iter->path->level);
+ bch2_btree_iter_verify(iter);
- return i < trans->updates2 + trans->nr_updates2 &&
- iter->btree_id == i->iter->btree_id
- ? bkey_i_to_s_c(i->k)
- : bkey_s_c_null;
-}
+ while (1) {
+ iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
-static struct bkey_s_c __bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
-{
- struct btree_iter_level *l = &iter->l[0];
- struct bkey_s_c k = __btree_iter_peek(iter, l);
- struct bkey_s_c u = __btree_trans_updates_peek(iter);
+ ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
+ if (unlikely(ret)) {
+ /* ensure that iter->k is consistent with iter->pos: */
+ bch2_btree_iter_set_pos(iter, iter->pos);
+ k = bkey_s_c_err(ret);
+ goto out;
+ }
+
+ iter->path->should_be_locked = true;
+
+ k = btree_path_level_peek_all(trans->c, &iter->path->l[0], &iter->k);
- if (k.k && (!u.k || bkey_cmp(k.k->p, u.k->p) < 0))
- return k;
- if (u.k && bkey_cmp(u.k->p, l->b->key.k.p) <= 0) {
- iter->k = *u.k;
- return u;
+ if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
+ k.k &&
+ (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
+ ret = bkey_err(k2);
+ if (ret) {
+ k = k2;
+ bch2_btree_iter_set_pos(iter, iter->pos);
+ goto out;
+ }
+
+ k = k2;
+ iter->k = *k.k;
+ }
+
+ if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
+ k = btree_trans_peek_journal(trans, iter, k);
+
+ next_update = iter->flags & BTREE_ITER_WITH_UPDATES
+ ? btree_trans_peek_updates(trans, iter->btree_id, search_key)
+ : NULL;
+ if (next_update &&
+ bpos_cmp(next_update->k.p,
+ k.k ? k.k->p : iter->path->l[0].b->key.k.p) <= 0) {
+ iter->k = next_update->k;
+ k = bkey_i_to_s_c(next_update);
+ }
+
+ if (k.k && bkey_deleted(k.k)) {
+ /*
+ * If we've got a whiteout, and it's after the search
+ * key, advance the search key to the whiteout instead
+ * of just after the whiteout - it might be a btree
+ * whiteout, with a real key at the same position, since
+ * in the btree deleted keys sort before non deleted.
+ */
+ search_key = bpos_cmp(search_key, k.k->p)
+ ? k.k->p
+ : bpos_successor(k.k->p);
+ continue;
+ }
+
+ if (likely(k.k)) {
+ break;
+ } else if (likely(bpos_cmp(iter->path->l[0].b->key.k.p, SPOS_MAX))) {
+ /* Advance to next leaf node: */
+ search_key = bpos_successor(iter->path->l[0].b->key.k.p);
+ } else {
+ /* End of btree: */
+ bch2_btree_iter_set_pos(iter, SPOS_MAX);
+ k = bkey_s_c_null;
+ goto out;
+ }
}
- return bkey_s_c_null;
+out:
+ bch2_btree_iter_verify(iter);
+
+ return k;
}
-struct bkey_s_c bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
+/**
+ * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
+ * current position
+ */
+struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
{
+ struct btree_trans *trans = iter->trans;
+ struct bpos search_key = btree_iter_search_key(iter);
struct bkey_s_c k;
+ struct bpos iter_pos;
int ret;
- EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
- bch2_btree_iter_checks(iter);
+ if (iter->update_path) {
+ bch2_path_put(trans, iter->update_path,
+ iter->flags & BTREE_ITER_INTENT);
+ iter->update_path = NULL;
+ }
+
+ bch2_btree_iter_verify_entry_exit(iter);
while (1) {
- ret = bch2_btree_iter_traverse(iter);
- if (unlikely(ret))
- return bkey_s_c_err(ret);
+ k = __bch2_btree_iter_peek(iter, search_key);
+ if (!k.k || bkey_err(k))
+ goto out;
+
+ /*
+ * iter->pos should be mononotically increasing, and always be
+ * equal to the key we just returned - except extents can
+ * straddle iter->pos:
+ */
+ if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
+ iter_pos = k.k->p;
+ else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
+ iter_pos = bkey_start_pos(k.k);
+ else
+ iter_pos = iter->pos;
+
+ if (bkey_cmp(iter_pos, end) > 0) {
+ bch2_btree_iter_set_pos(iter, end);
+ k = bkey_s_c_null;
+ goto out;
+ }
- k = __bch2_btree_iter_peek_with_updates(iter);
+ if (iter->update_path &&
+ bkey_cmp(iter->update_path->pos, k.k->p)) {
+ bch2_path_put(trans, iter->update_path,
+ iter->flags & BTREE_ITER_INTENT);
+ iter->update_path = NULL;
+ }
- if (k.k && bkey_deleted(k.k)) {
- bch2_btree_iter_set_pos(iter,
- (iter->flags & BTREE_ITER_IS_EXTENTS)
- ? iter->k.p
- : bkey_successor(iter->k.p));
+ if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
+ (iter->flags & BTREE_ITER_INTENT) &&
+ !(iter->flags & BTREE_ITER_IS_EXTENTS) &&
+ !iter->update_path) {
+ struct bpos pos = k.k->p;
+
+ if (pos.snapshot < iter->snapshot) {
+ search_key = bpos_successor(k.k->p);
+ continue;
+ }
+
+ pos.snapshot = iter->snapshot;
+
+ /*
+ * advance, same as on exit for iter->path, but only up
+ * to snapshot
+ */
+ __btree_path_get(iter->path, iter->flags & BTREE_ITER_INTENT);
+ iter->update_path = iter->path;
+
+ iter->update_path = bch2_btree_path_set_pos(trans,
+ iter->update_path, pos,
+ iter->flags & BTREE_ITER_INTENT,
+ _THIS_IP_);
+ }
+
+ /*
+ * We can never have a key in a leaf node at POS_MAX, so
+ * we don't have to check these successor() calls:
+ */
+ if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
+ !bch2_snapshot_is_ancestor(trans->c,
+ iter->snapshot,
+ k.k->p.snapshot)) {
+ search_key = bpos_successor(k.k->p);
continue;
}
- if (likely(k.k))
- break;
+ if (bkey_whiteout(k.k) &&
+ !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
+ search_key = bkey_successor(iter, k.k->p);
+ continue;
+ }
- if (!btree_iter_set_pos_to_next_leaf(iter))
- return bkey_s_c_null;
+ break;
}
- /*
- * iter->pos should always be equal to the key we just
- * returned - except extents can straddle iter->pos:
- */
- if (!(iter->flags & BTREE_ITER_IS_EXTENTS) ||
- bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
- iter->pos = bkey_start_pos(k.k);
+ iter->pos = iter_pos;
+
+ iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
+ BUG_ON(!iter->path->nodes_locked);
+out:
+ if (iter->update_path) {
+ if (iter->update_path->uptodate &&
+ !bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_)) {
+ k = bkey_s_c_err(-EINTR);
+ } else {
+ BUG_ON(!(iter->update_path->nodes_locked & 1));
+ iter->update_path->should_be_locked = true;
+ }
+ }
+ iter->path->should_be_locked = true;
+
+ if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
+ iter->pos.snapshot = iter->snapshot;
+
+ ret = bch2_btree_iter_verify_ret(iter, k);
+ if (unlikely(ret)) {
+ bch2_btree_iter_set_pos(iter, iter->pos);
+ k = bkey_s_c_err(ret);
+ }
+
+ bch2_btree_iter_verify_entry_exit(iter);
- iter->uptodate = BTREE_ITER_UPTODATE;
return k;
}
-struct bkey_s_c bch2_btree_iter_next_with_updates(struct btree_iter *iter)
+/**
+ * bch2_btree_iter_next: returns first key greater than iterator's current
+ * position
+ */
+struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
{
- if (unlikely(!bkey_cmp(iter->k.p, POS_MAX)))
+ if (!bch2_btree_iter_advance(iter))
return bkey_s_c_null;
- bch2_btree_iter_set_pos(iter,
- (iter->flags & BTREE_ITER_IS_EXTENTS)
- ? iter->k.p
- : bkey_successor(iter->k.p));
-
- return bch2_btree_iter_peek_with_updates(iter);
+ return bch2_btree_iter_peek(iter);
}
/**
*/
struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
{
- struct bpos pos = iter->pos;
- struct btree_iter_level *l = &iter->l[0];
+ struct btree_trans *trans = iter->trans;
+ struct bpos search_key = iter->pos;
+ struct btree_path *saved_path = NULL;
struct bkey_s_c k;
+ struct bkey saved_k;
+ const struct bch_val *saved_v;
int ret;
- EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
- bch2_btree_iter_checks(iter);
+ EBUG_ON(iter->path->cached || iter->path->level);
+ EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
+
+ if (iter->flags & BTREE_ITER_WITH_JOURNAL)
+ return bkey_s_c_err(-EIO);
+
+ bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify_entry_exit(iter);
- if (iter->uptodate == BTREE_ITER_UPTODATE &&
- !bkey_deleted(&iter->k))
- return btree_iter_peek_uptodate(iter);
+ if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
+ search_key.snapshot = U32_MAX;
while (1) {
- ret = bch2_btree_iter_traverse(iter);
- if (unlikely(ret))
- return bkey_s_c_err(ret);
+ iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
- k = __btree_iter_peek(iter, l);
- if (!k.k || bkey_cmp(bkey_start_pos(k.k), pos) > 0)
- k = __btree_iter_prev(iter, l);
+ ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
+ if (unlikely(ret)) {
+ /* ensure that iter->k is consistent with iter->pos: */
+ bch2_btree_iter_set_pos(iter, iter->pos);
+ k = bkey_s_c_err(ret);
+ goto out;
+ }
- if (likely(k.k))
- break;
+ k = btree_path_level_peek(trans->c, iter->path,
+ &iter->path->l[0], &iter->k);
+ if (!k.k ||
+ ((iter->flags & BTREE_ITER_IS_EXTENTS)
+ ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0
+ : bpos_cmp(k.k->p, search_key) > 0))
+ k = btree_path_level_prev(trans->c, iter->path,
+ &iter->path->l[0], &iter->k);
+
+ bch2_btree_path_check_sort(trans, iter->path, 0);
+
+ if (likely(k.k)) {
+ if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
+ if (k.k->p.snapshot == iter->snapshot)
+ goto got_key;
+
+ /*
+ * If we have a saved candidate, and we're no
+ * longer at the same _key_ (not pos), return
+ * that candidate
+ */
+ if (saved_path && bkey_cmp(k.k->p, saved_k.p)) {
+ bch2_path_put(trans, iter->path,
+ iter->flags & BTREE_ITER_INTENT);
+ iter->path = saved_path;
+ saved_path = NULL;
+ iter->k = saved_k;
+ k.v = saved_v;
+ goto got_key;
+ }
- if (!btree_iter_set_pos_to_prev_leaf(iter))
- return bkey_s_c_null;
+ if (bch2_snapshot_is_ancestor(iter->trans->c,
+ iter->snapshot,
+ k.k->p.snapshot)) {
+ if (saved_path)
+ bch2_path_put(trans, saved_path,
+ iter->flags & BTREE_ITER_INTENT);
+ saved_path = btree_path_clone(trans, iter->path,
+ iter->flags & BTREE_ITER_INTENT);
+ saved_k = *k.k;
+ saved_v = k.v;
+ }
+
+ search_key = bpos_predecessor(k.k->p);
+ continue;
+ }
+got_key:
+ if (bkey_whiteout(k.k) &&
+ !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
+ search_key = bkey_predecessor(iter, k.k->p);
+ if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
+ search_key.snapshot = U32_MAX;
+ continue;
+ }
+
+ break;
+ } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) {
+ /* Advance to previous leaf node: */
+ search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
+ } else {
+ /* Start of btree: */
+ bch2_btree_iter_set_pos(iter, POS_MIN);
+ k = bkey_s_c_null;
+ goto out;
+ }
}
- EBUG_ON(bkey_cmp(bkey_start_pos(k.k), pos) > 0);
- iter->pos = bkey_start_pos(k.k);
- iter->uptodate = BTREE_ITER_UPTODATE;
+ EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
+
+ /* Extents can straddle iter->pos: */
+ if (bkey_cmp(k.k->p, iter->pos) < 0)
+ iter->pos = k.k->p;
+
+ if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
+ iter->pos.snapshot = iter->snapshot;
+out:
+ if (saved_path)
+ bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
+ iter->path->should_be_locked = true;
+
+ bch2_btree_iter_verify_entry_exit(iter);
+ bch2_btree_iter_verify(iter);
+
return k;
}
*/
struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
{
- struct bpos pos = bkey_start_pos(&iter->k);
-
- EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
- bch2_btree_iter_checks(iter);
-
- if (unlikely(!bkey_cmp(pos, POS_MIN)))
+ if (!bch2_btree_iter_rewind(iter))
return bkey_s_c_null;
- bch2_btree_iter_set_pos(iter, bkey_predecessor(pos));
-
return bch2_btree_iter_peek_prev(iter);
}
-static inline struct bkey_s_c
-__bch2_btree_iter_peek_slot_extents(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
{
- struct btree_iter_level *l = &iter->l[0];
- struct btree_node_iter node_iter;
+ struct btree_trans *trans = iter->trans;
+ struct bpos search_key;
struct bkey_s_c k;
- struct bkey n;
int ret;
- /* keys & holes can't span inode numbers: */
- if (iter->pos.offset == KEY_OFFSET_MAX) {
+ EBUG_ON(iter->path->level);
+ bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify_entry_exit(iter);
+
+ /* extents can't span inode numbers: */
+ if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
+ unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
if (iter->pos.inode == KEY_INODE_MAX)
return bkey_s_c_null;
- bch2_btree_iter_set_pos(iter, bkey_successor(iter->pos));
-
- ret = bch2_btree_iter_traverse(iter);
- if (unlikely(ret))
- return bkey_s_c_err(ret);
+ bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
}
- /*
- * iterator is now at the correct position for inserting at iter->pos,
- * but we need to keep iterating until we find the first non whiteout so
- * we know how big a hole we have, if any:
- */
-
- node_iter = l->iter;
- k = __btree_iter_unpack(iter, l, &iter->k,
- bch2_btree_node_iter_peek(&node_iter, l->b));
-
- if (k.k && bkey_cmp(bkey_start_pos(k.k), iter->pos) <= 0) {
- /*
- * We're not setting iter->uptodate because the node iterator
- * doesn't necessarily point at the key we're returning:
- */
+ search_key = btree_iter_search_key(iter);
+ iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
- EBUG_ON(bkey_cmp(k.k->p, iter->pos) <= 0);
- bch2_btree_iter_verify_level(iter, 0);
- return k;
- }
+ ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
+ if (unlikely(ret))
+ return bkey_s_c_err(ret);
- /* hole */
+ if ((iter->flags & BTREE_ITER_CACHED) ||
+ !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
+ struct bkey_i *next_update;
+
+ if ((iter->flags & BTREE_ITER_WITH_UPDATES) &&
+ (next_update = btree_trans_peek_updates(trans,
+ iter->btree_id, search_key)) &&
+ !bpos_cmp(next_update->k.p, iter->pos)) {
+ iter->k = next_update->k;
+ k = bkey_i_to_s_c(next_update);
+ goto out;
+ }
- if (!k.k)
- k.k = &l->b->key.k;
+ if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
+ (next_update = bch2_journal_keys_peek(trans->c, iter->btree_id,
+ 0, iter->pos)) &&
+ !bpos_cmp(next_update->k.p, iter->pos)) {
+ iter->k = next_update->k;
+ k = bkey_i_to_s_c(next_update);
+ goto out;
+ }
- bkey_init(&n);
- n.p = iter->pos;
- bch2_key_resize(&n,
- min_t(u64, KEY_SIZE_MAX,
- (k.k->p.inode == n.p.inode
- ? bkey_start_offset(k.k)
- : KEY_OFFSET_MAX) -
- n.p.offset));
+ if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
+ (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
+ if (!bkey_err(k))
+ iter->k = *k.k;
+ goto out;
+ }
- EBUG_ON(!n.size);
+ k = bch2_btree_path_peek_slot(iter->path, &iter->k);
+ } else {
+ struct bpos next;
- iter->k = n;
- iter->uptodate = BTREE_ITER_UPTODATE;
+ if (iter->flags & BTREE_ITER_INTENT) {
+ struct btree_iter iter2;
+ struct bpos end = iter->pos;
- bch2_btree_iter_verify_level(iter, 0);
- return (struct bkey_s_c) { &iter->k, NULL };
-}
+ if (iter->flags & BTREE_ITER_IS_EXTENTS)
+ end.offset = U64_MAX;
-struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
-{
- struct btree_iter_level *l = &iter->l[0];
- struct bkey_s_c k;
- int ret;
+ bch2_trans_copy_iter(&iter2, iter);
+ k = bch2_btree_iter_peek_upto(&iter2, end);
- EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
- bch2_btree_iter_checks(iter);
+ if (k.k && !bkey_err(k)) {
+ iter->k = iter2.k;
+ k.k = &iter->k;
+ }
+ bch2_trans_iter_exit(trans, &iter2);
+ } else {
+ struct bpos pos = iter->pos;
- if (iter->uptodate == BTREE_ITER_UPTODATE)
- return btree_iter_peek_uptodate(iter);
+ k = bch2_btree_iter_peek(iter);
+ iter->pos = pos;
+ }
- ret = bch2_btree_iter_traverse(iter);
- if (unlikely(ret))
- return bkey_s_c_err(ret);
+ if (unlikely(bkey_err(k)))
+ return k;
- if (iter->flags & BTREE_ITER_IS_EXTENTS)
- return __bch2_btree_iter_peek_slot_extents(iter);
+ next = k.k ? bkey_start_pos(k.k) : POS_MAX;
- k = __btree_iter_peek_all(iter, l, &iter->k);
+ if (bkey_cmp(iter->pos, next) < 0) {
+ bkey_init(&iter->k);
+ iter->k.p = iter->pos;
- EBUG_ON(k.k && bkey_deleted(k.k) && bkey_cmp(k.k->p, iter->pos) == 0);
+ if (iter->flags & BTREE_ITER_IS_EXTENTS) {
+ bch2_key_resize(&iter->k,
+ min_t(u64, KEY_SIZE_MAX,
+ (next.inode == iter->pos.inode
+ ? next.offset
+ : KEY_OFFSET_MAX) -
+ iter->pos.offset));
+ EBUG_ON(!iter->k.size);
+ }
- if (!k.k || bkey_cmp(iter->pos, k.k->p)) {
- /* hole */
- bkey_init(&iter->k);
- iter->k.p = iter->pos;
- k = (struct bkey_s_c) { &iter->k, NULL };
+ k = (struct bkey_s_c) { &iter->k, NULL };
+ }
}
+out:
+ iter->path->should_be_locked = true;
+
+ bch2_btree_iter_verify_entry_exit(iter);
+ bch2_btree_iter_verify(iter);
+ ret = bch2_btree_iter_verify_ret(iter, k);
+ if (unlikely(ret))
+ return bkey_s_c_err(ret);
- iter->uptodate = BTREE_ITER_UPTODATE;
- bch2_btree_iter_verify_level(iter, 0);
return k;
}
struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
{
- if (unlikely(!bkey_cmp(iter->k.p, POS_MAX)))
+ if (!bch2_btree_iter_advance(iter))
return bkey_s_c_null;
- bch2_btree_iter_set_pos(iter,
- (iter->flags & BTREE_ITER_IS_EXTENTS)
- ? iter->k.p
- : bkey_successor(iter->k.p));
-
return bch2_btree_iter_peek_slot(iter);
}
-struct bkey_s_c bch2_btree_iter_peek_cached(struct btree_iter *iter)
-{
- struct bkey_cached *ck;
- int ret;
-
- EBUG_ON(btree_iter_type(iter) != BTREE_ITER_CACHED);
- bch2_btree_iter_checks(iter);
-
- ret = bch2_btree_iter_traverse(iter);
- if (unlikely(ret))
- return bkey_s_c_err(ret);
-
- ck = (void *) iter->l[0].b;
-
- EBUG_ON(iter->btree_id != ck->key.btree_id ||
- bkey_cmp(iter->pos, ck->key.pos));
- BUG_ON(!ck->valid);
-
- return bkey_i_to_s_c(ck->k);
-}
-
-static inline void bch2_btree_iter_init(struct btree_trans *trans,
- struct btree_iter *iter, enum btree_id btree_id,
- struct bpos pos, unsigned flags)
+struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
{
- struct bch_fs *c = trans->c;
- unsigned i;
-
- if (btree_node_type_is_extents(btree_id) &&
- !(flags & BTREE_ITER_NODES))
- flags |= BTREE_ITER_IS_EXTENTS;
-
- iter->trans = trans;
- iter->pos = pos;
- bkey_init(&iter->k);
- iter->k.p = pos;
- iter->flags = flags;
- iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
- iter->btree_id = btree_id;
- iter->level = 0;
- iter->min_depth = 0;
- iter->locks_want = flags & BTREE_ITER_INTENT ? 1 : 0;
- iter->nodes_locked = 0;
- iter->nodes_intent_locked = 0;
- for (i = 0; i < ARRAY_SIZE(iter->l); i++)
- iter->l[i].b = BTREE_ITER_NO_NODE_INIT;
+ if (!bch2_btree_iter_rewind(iter))
+ return bkey_s_c_null;
- prefetch(c->btree_roots[btree_id].b);
+ return bch2_btree_iter_peek_slot(iter);
}
/* new transactional stuff: */
-static inline void __bch2_trans_iter_free(struct btree_trans *trans,
- unsigned idx)
+static inline void btree_path_verify_sorted_ref(struct btree_trans *trans,
+ struct btree_path *path)
{
- __bch2_btree_iter_unlock(&trans->iters[idx]);
- trans->iters_linked &= ~(1ULL << idx);
- trans->iters_live &= ~(1ULL << idx);
- trans->iters_touched &= ~(1ULL << idx);
+ EBUG_ON(path->sorted_idx >= trans->nr_sorted);
+ EBUG_ON(trans->sorted[path->sorted_idx] != path->idx);
+ EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
}
-int bch2_trans_iter_put(struct btree_trans *trans,
- struct btree_iter *iter)
+static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans)
{
- int ret;
-
- if (IS_ERR_OR_NULL(iter))
- return 0;
-
- BUG_ON(trans->iters + iter->idx != iter);
-
- ret = btree_iter_err(iter);
-
- if (!(trans->iters_touched & (1ULL << iter->idx)) &&
- !(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT))
- __bch2_trans_iter_free(trans, iter->idx);
+#ifdef CONFIG_BCACHEFS_DEBUG
+ unsigned i;
- trans->iters_live &= ~(1ULL << iter->idx);
- return ret;
+ for (i = 0; i < trans->nr_sorted; i++)
+ btree_path_verify_sorted_ref(trans, trans->paths + trans->sorted[i]);
+#endif
}
-int bch2_trans_iter_free(struct btree_trans *trans,
- struct btree_iter *iter)
+static void btree_trans_verify_sorted(struct btree_trans *trans)
{
- if (IS_ERR_OR_NULL(iter))
- return 0;
-
- trans->iters_touched &= ~(1ULL << iter->idx);
+#ifdef CONFIG_BCACHEFS_DEBUG
+ struct btree_path *path, *prev = NULL;
+ unsigned i;
- return bch2_trans_iter_put(trans, iter);
+ trans_for_each_path_inorder(trans, path, i) {
+ if (prev && btree_path_cmp(prev, path) > 0) {
+ bch2_dump_trans_paths_updates(trans);
+ panic("trans paths out of order!\n");
+ }
+ prev = path;
+ }
+#endif
}
-#if 0
-static int bch2_trans_realloc_iters(struct btree_trans *trans,
- unsigned new_size)
+static inline void btree_path_swap(struct btree_trans *trans,
+ struct btree_path *l, struct btree_path *r)
{
- void *p, *new_iters, *new_updates, *new_updates2;
- size_t iters_bytes;
- size_t updates_bytes;
-
- new_size = roundup_pow_of_two(new_size);
-
- BUG_ON(new_size > BTREE_ITER_MAX);
-
- if (new_size <= trans->size)
- return 0;
-
- BUG_ON(trans->used_mempool);
-
- bch2_trans_unlock(trans);
-
- iters_bytes = sizeof(struct btree_iter) * new_size;
- updates_bytes = sizeof(struct btree_insert_entry) * new_size;
-
- p = kmalloc(iters_bytes +
- updates_bytes +
- updates_bytes, GFP_NOFS);
- if (p)
- goto success;
-
- p = mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS);
- new_size = BTREE_ITER_MAX;
-
- trans->used_mempool = true;
-success:
- new_iters = p; p += iters_bytes;
- new_updates = p; p += updates_bytes;
- new_updates2 = p; p += updates_bytes;
-
- memcpy(new_iters, trans->iters,
- sizeof(struct btree_iter) * trans->nr_iters);
- memcpy(new_updates, trans->updates,
- sizeof(struct btree_insert_entry) * trans->nr_updates);
- memcpy(new_updates2, trans->updates2,
- sizeof(struct btree_insert_entry) * trans->nr_updates2);
-
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
- memset(trans->iters, POISON_FREE,
- sizeof(struct btree_iter) * trans->nr_iters +
- sizeof(struct btree_insert_entry) * trans->nr_iters);
-
- kfree(trans->iters);
-
- trans->iters = new_iters;
- trans->updates = new_updates;
- trans->updates2 = new_updates2;
- trans->size = new_size;
-
- if (trans->iters_live) {
- trace_trans_restart_iters_realloced(trans->ip, trans->size);
- return -EINTR;
- }
+ swap(l->sorted_idx, r->sorted_idx);
+ swap(trans->sorted[l->sorted_idx],
+ trans->sorted[r->sorted_idx]);
- return 0;
+ btree_path_verify_sorted_ref(trans, l);
+ btree_path_verify_sorted_ref(trans, r);
}
-#endif
-static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans)
+inline void bch2_btree_path_check_sort(struct btree_trans *trans, struct btree_path *path,
+ int cmp)
{
- unsigned idx = __ffs64(~trans->iters_linked);
+ struct btree_path *n;
- if (idx < trans->nr_iters)
- goto got_slot;
+ if (cmp <= 0) {
+ n = prev_btree_path(trans, path);
+ if (n && btree_path_cmp(n, path) > 0) {
+ do {
+ btree_path_swap(trans, n, path);
+ n = prev_btree_path(trans, path);
+ } while (n && btree_path_cmp(n, path) > 0);
- if (trans->nr_iters == trans->size) {
- struct btree_iter *iter;
-
- BUG_ON(trans->size < BTREE_ITER_MAX);
-
- trans_for_each_iter(trans, iter) {
- pr_err("iter: btree %s pos %llu:%llu%s%s%s %ps",
- bch2_btree_ids[iter->btree_id],
- iter->pos.inode,
- iter->pos.offset,
- (trans->iters_live & (1ULL << iter->idx)) ? " live" : "",
- (trans->iters_touched & (1ULL << iter->idx)) ? " touched" : "",
- iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "",
- (void *) iter->ip_allocated);
+ goto out;
}
-
- panic("trans iter oveflow\n");
-#if 0
- ret = bch2_trans_realloc_iters(trans, trans->size * 2);
- if (ret)
- return ERR_PTR(ret);
-#endif
}
- idx = trans->nr_iters++;
- BUG_ON(trans->nr_iters > trans->size);
-
- trans->iters[idx].idx = idx;
-got_slot:
- BUG_ON(trans->iters_linked & (1ULL << idx));
- trans->iters_linked |= 1ULL << idx;
- trans->iters[idx].flags = 0;
- return &trans->iters[idx];
+ if (cmp >= 0) {
+ n = next_btree_path(trans, path);
+ if (n && btree_path_cmp(path, n) > 0) {
+ do {
+ btree_path_swap(trans, path, n);
+ n = next_btree_path(trans, path);
+ } while (n && btree_path_cmp(path, n) > 0);
+ }
+ }
+out:
+ btree_trans_verify_sorted(trans);
}
-static inline void btree_iter_copy(struct btree_iter *dst,
- struct btree_iter *src)
+static inline void btree_path_list_remove(struct btree_trans *trans,
+ struct btree_path *path)
{
- unsigned i, idx = dst->idx;
+ unsigned i;
- *dst = *src;
- dst->idx = idx;
- dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
+ EBUG_ON(path->sorted_idx >= trans->nr_sorted);
- for (i = 0; i < BTREE_MAX_DEPTH; i++)
- if (btree_node_locked(dst, i))
- six_lock_increment(&dst->l[i].b->c.lock,
- __btree_lock_want(dst, i));
+ array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
- dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
- dst->flags &= ~BTREE_ITER_SET_POS_AFTER_COMMIT;
-}
+ for (i = path->sorted_idx; i < trans->nr_sorted; i++)
+ trans->paths[trans->sorted[i]].sorted_idx = i;
-static inline struct bpos bpos_diff(struct bpos l, struct bpos r)
-{
- if (bkey_cmp(l, r) > 0)
- swap(l, r);
+ path->sorted_idx = U8_MAX;
- return POS(r.inode - l.inode, r.offset - l.offset);
+ btree_trans_verify_sorted_refs(trans);
}
-static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans,
- unsigned btree_id, struct bpos pos,
- unsigned flags)
+static inline void btree_path_list_add(struct btree_trans *trans,
+ struct btree_path *pos,
+ struct btree_path *path)
{
- struct btree_iter *iter, *best = NULL;
+ unsigned i;
- BUG_ON(trans->nr_iters > BTREE_ITER_MAX);
+ btree_trans_verify_sorted_refs(trans);
- trans_for_each_iter(trans, iter) {
- if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE))
- continue;
+ path->sorted_idx = pos ? pos->sorted_idx + 1 : 0;
- if (iter->btree_id != btree_id)
- continue;
+ if (trans->in_traverse_all &&
+ trans->traverse_all_idx != U8_MAX &&
+ trans->traverse_all_idx >= path->sorted_idx)
+ trans->traverse_all_idx++;
- if (best &&
- bkey_cmp(bpos_diff(best->pos, pos),
- bpos_diff(iter->pos, pos)) < 0)
- continue;
+ array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
- best = iter;
- }
+ for (i = path->sorted_idx; i < trans->nr_sorted; i++)
+ trans->paths[trans->sorted[i]].sorted_idx = i;
- if (!best) {
- iter = btree_trans_iter_alloc(trans);
- if (IS_ERR(iter))
- return iter;
+ btree_trans_verify_sorted_refs(trans);
+}
- bch2_btree_iter_init(trans, iter, btree_id, pos, flags);
- } else if ((trans->iters_live & (1ULL << best->idx)) ||
- (best->flags & BTREE_ITER_KEEP_UNTIL_COMMIT)) {
- iter = btree_trans_iter_alloc(trans);
- if (IS_ERR(iter))
- return iter;
+void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
+{
+ if (iter->path)
+ bch2_path_put(trans, iter->path,
+ iter->flags & BTREE_ITER_INTENT);
+ if (iter->update_path)
+ bch2_path_put(trans, iter->update_path,
+ iter->flags & BTREE_ITER_INTENT);
+ if (iter->key_cache_path)
+ bch2_path_put(trans, iter->key_cache_path,
+ iter->flags & BTREE_ITER_INTENT);
+ iter->path = NULL;
+ iter->update_path = NULL;
+ iter->key_cache_path = NULL;
+}
- btree_iter_copy(iter, best);
- } else {
- iter = best;
- }
+static void __bch2_trans_iter_init(struct btree_trans *trans,
+ struct btree_iter *iter,
+ unsigned btree_id, struct bpos pos,
+ unsigned locks_want,
+ unsigned depth,
+ unsigned flags,
+ unsigned long ip)
+{
+ EBUG_ON(trans->restarted);
- iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
- iter->flags &= ~BTREE_ITER_USER_FLAGS;
- iter->flags |= flags & BTREE_ITER_USER_FLAGS;
+ if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
+ btree_node_type_is_extents(btree_id))
+ flags |= BTREE_ITER_IS_EXTENTS;
- if (iter->flags & BTREE_ITER_INTENT)
- bch2_btree_iter_upgrade(iter, 1);
- else
- bch2_btree_iter_downgrade(iter);
+ if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
+ !btree_type_has_snapshots(btree_id))
+ flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
- BUG_ON(iter->btree_id != btree_id);
- BUG_ON((iter->flags ^ flags) & BTREE_ITER_TYPE);
- BUG_ON(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT);
- BUG_ON(iter->flags & BTREE_ITER_SET_POS_AFTER_COMMIT);
- BUG_ON(trans->iters_live & (1ULL << iter->idx));
+ if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
+ btree_type_has_snapshots(btree_id))
+ flags |= BTREE_ITER_FILTER_SNAPSHOTS;
- trans->iters_live |= 1ULL << iter->idx;
- trans->iters_touched |= 1ULL << iter->idx;
+ if (!test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags))
+ flags |= BTREE_ITER_WITH_JOURNAL;
- return iter;
-}
+ if (!btree_id_cached(trans->c, btree_id)) {
+ flags &= ~BTREE_ITER_CACHED;
+ flags &= ~BTREE_ITER_WITH_KEY_CACHE;
+ } else if (!(flags & BTREE_ITER_CACHED))
+ flags |= BTREE_ITER_WITH_KEY_CACHE;
-struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
- enum btree_id btree_id,
- struct bpos pos, unsigned flags)
-{
- struct btree_iter *iter =
- __btree_trans_get_iter(trans, btree_id, pos, flags);
+ iter->trans = trans;
+ iter->path = NULL;
+ iter->update_path = NULL;
+ iter->key_cache_path = NULL;
+ iter->btree_id = btree_id;
+ iter->min_depth = depth;
+ iter->flags = flags;
+ iter->snapshot = pos.snapshot;
+ iter->pos = pos;
+ iter->k.type = KEY_TYPE_deleted;
+ iter->k.p = pos;
+ iter->k.size = 0;
+#ifdef CONFIG_BCACHEFS_DEBUG
+ iter->ip_allocated = ip;
+#endif
- if (!IS_ERR(iter))
- __bch2_btree_iter_set_pos(iter, pos,
- btree_node_type_is_extents(btree_id));
- return iter;
+ iter->path = bch2_path_get(trans, btree_id, iter->pos,
+ locks_want, depth, flags, ip);
}
-struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans,
- enum btree_id btree_id,
- struct bpos pos,
- unsigned locks_want,
- unsigned depth,
- unsigned flags)
+void bch2_trans_iter_init(struct btree_trans *trans,
+ struct btree_iter *iter,
+ unsigned btree_id, struct bpos pos,
+ unsigned flags)
{
- struct btree_iter *iter =
- __btree_trans_get_iter(trans, btree_id, pos,
- flags|BTREE_ITER_NODES);
- unsigned i;
-
- BUG_ON(IS_ERR(iter));
- BUG_ON(bkey_cmp(iter->pos, pos));
-
- iter->locks_want = locks_want;
- iter->level = depth;
- iter->min_depth = depth;
-
- for (i = 0; i < ARRAY_SIZE(iter->l); i++)
- iter->l[i].b = NULL;
- iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT;
-
- return iter;
+ __bch2_trans_iter_init(trans, iter, btree_id, pos,
+ 0, 0, flags, _RET_IP_);
}
-struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans,
- struct btree_iter *src)
+void bch2_trans_node_iter_init(struct btree_trans *trans,
+ struct btree_iter *iter,
+ enum btree_id btree_id,
+ struct bpos pos,
+ unsigned locks_want,
+ unsigned depth,
+ unsigned flags)
{
- struct btree_iter *iter;
-
- iter = btree_trans_iter_alloc(trans);
- if (IS_ERR(iter))
- return iter;
-
- btree_iter_copy(iter, src);
-
- trans->iters_live |= 1ULL << iter->idx;
- /*
- * We don't need to preserve this iter since it's cheap to copy it
- * again - this will cause trans_iter_put() to free it right away:
- */
- trans->iters_touched &= ~(1ULL << iter->idx);
+ __bch2_trans_iter_init(trans, iter, btree_id, pos, locks_want, depth,
+ BTREE_ITER_NOT_EXTENTS|
+ __BTREE_ITER_ALL_SNAPSHOTS|
+ BTREE_ITER_ALL_SNAPSHOTS|
+ flags, _RET_IP_);
+ BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
+ BUG_ON(iter->path->level != depth);
+ BUG_ON(iter->min_depth != depth);
+}
- return iter;
+void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
+{
+ *dst = *src;
+ if (src->path)
+ __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
+ if (src->update_path)
+ __btree_path_get(src->update_path, src->flags & BTREE_ITER_INTENT);
+ dst->key_cache_path = NULL;
}
-static int bch2_trans_preload_mem(struct btree_trans *trans, size_t size)
+void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
{
- if (size > trans->mem_bytes) {
+ size_t new_top = trans->mem_top + size;
+ void *p;
+
+ if (new_top > trans->mem_bytes) {
size_t old_bytes = trans->mem_bytes;
- size_t new_bytes = roundup_pow_of_two(size);
- void *new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
+ size_t new_bytes = roundup_pow_of_two(new_top);
+ void *new_mem;
+
+ WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
+
+ new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
+ if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
+ new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
+ new_bytes = BTREE_TRANS_MEM_MAX;
+ kfree(trans->mem);
+ }
if (!new_mem)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
trans->mem = new_mem;
trans->mem_bytes = new_bytes;
if (old_bytes) {
- trace_trans_restart_mem_realloced(trans->ip, new_bytes);
- return -EINTR;
+ trace_trans_restart_mem_realloced(trans->fn, _RET_IP_, new_bytes);
+ btree_trans_restart(trans);
+ return ERR_PTR(-EINTR);
}
}
- return 0;
-}
-
-void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
-{
- void *p;
- int ret;
-
- ret = bch2_trans_preload_mem(trans, trans->mem_top + size);
- if (ret)
- return ERR_PTR(ret);
-
p = trans->mem + trans->mem_top;
trans->mem_top += size;
+ memset(p, 0, size);
return p;
}
-inline void bch2_trans_unlink_iters(struct btree_trans *trans)
-{
- u64 iters = trans->iters_linked &
- ~trans->iters_touched &
- ~trans->iters_live;
-
- while (iters) {
- unsigned idx = __ffs64(iters);
-
- iters &= ~(1ULL << idx);
- __bch2_trans_iter_free(trans, idx);
- }
-}
-
-void bch2_trans_reset(struct btree_trans *trans, unsigned flags)
+/**
+ * bch2_trans_begin() - reset a transaction after a interrupted attempt
+ * @trans: transaction to reset
+ *
+ * While iterating over nodes or updating nodes a attempt to lock a btree
+ * node may return EINTR when the trylock fails. When this occurs
+ * bch2_trans_begin() should be called and the transaction retried.
+ */
+void bch2_trans_begin(struct btree_trans *trans)
{
- struct btree_iter *iter;
-
- trans_for_each_iter(trans, iter)
- iter->flags &= ~(BTREE_ITER_KEEP_UNTIL_COMMIT|
- BTREE_ITER_SET_POS_AFTER_COMMIT);
-
- bch2_trans_unlink_iters(trans);
+ struct btree_insert_entry *i;
+ struct btree_path *path;
- trans->iters_touched &= trans->iters_live;
+ trans_for_each_update(trans, i)
+ __btree_path_put(i->path, true);
- trans->need_reset = 0;
+ memset(&trans->journal_res, 0, sizeof(trans->journal_res));
+ trans->extra_journal_res = 0;
trans->nr_updates = 0;
- trans->nr_updates2 = 0;
trans->mem_top = 0;
- trans->extra_journal_entries = NULL;
- trans->extra_journal_entry_u64s = 0;
+ trans->hooks = NULL;
+ trans->extra_journal_entries.nr = 0;
if (trans->fs_usage_deltas) {
trans->fs_usage_deltas->used = 0;
(void *) &trans->fs_usage_deltas->memset_start);
}
- if (!(flags & TRANS_RESET_NOTRAVERSE))
- bch2_btree_iter_traverse_all(trans);
+ trans_for_each_path(trans, path) {
+ path->should_be_locked = false;
+
+ /*
+ * If the transaction wasn't restarted, we're presuming to be
+ * doing something new: dont keep iterators excpt the ones that
+ * are in use - except for the subvolumes btree:
+ */
+ if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
+ path->preserve = false;
+
+ /*
+ * XXX: we probably shouldn't be doing this if the transaction
+ * was restarted, but currently we still overflow transaction
+ * iterators if we do that
+ */
+ if (!path->ref && !path->preserve)
+ __bch2_path_free(trans, path);
+ else
+ path->preserve = false;
+ }
+
+ bch2_trans_cond_resched(trans);
+
+ if (trans->restarted)
+ bch2_btree_path_traverse_all(trans);
+
+ trans->restarted = false;
}
-static void bch2_trans_alloc_iters(struct btree_trans *trans, struct bch_fs *c)
+static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
{
- unsigned new_size = BTREE_ITER_MAX;
- size_t iters_bytes = sizeof(struct btree_iter) * new_size;
- size_t updates_bytes = sizeof(struct btree_insert_entry) * new_size;
- void *p;
+ size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX;
+ size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
+ void *p = NULL;
BUG_ON(trans->used_mempool);
- p = this_cpu_xchg(c->btree_iters_bufs->iter, NULL) ?:
- mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS);
+#ifdef __KERNEL__
+ p = this_cpu_xchg(c->btree_paths_bufs->path , NULL);
+#endif
+ if (!p)
+ p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
- trans->iters = p; p += iters_bytes;
+ trans->paths = p; p += paths_bytes;
trans->updates = p; p += updates_bytes;
- trans->updates2 = p; p += updates_bytes;
- trans->size = new_size;
}
-void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
- unsigned expected_nr_iters,
- size_t expected_mem_bytes)
+void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
+ unsigned expected_nr_iters,
+ size_t expected_mem_bytes,
+ const char *fn)
+ __acquires(&c->btree_trans_barrier)
{
+ BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
+
memset(trans, 0, sizeof(*trans));
trans->c = c;
- trans->ip = _RET_IP_;
+ trans->fn = fn;
- /*
- * reallocating iterators currently completely breaks
- * bch2_trans_iter_put(), we always allocate the max:
- */
- bch2_trans_alloc_iters(trans, c);
+ bch2_trans_alloc_paths(trans, c);
- if (expected_mem_bytes)
- bch2_trans_preload_mem(trans, expected_mem_bytes);
+ if (expected_mem_bytes) {
+ trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes);
+ trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL);
+
+ if (!unlikely(trans->mem)) {
+ trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
+ trans->mem_bytes = BTREE_TRANS_MEM_MAX;
+ }
+ }
+
+ trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
-#ifdef CONFIG_BCACHEFS_DEBUG
trans->pid = current->pid;
mutex_lock(&c->btree_trans_lock);
list_add(&trans->list, &c->btree_trans_list);
mutex_unlock(&c->btree_trans_lock);
+}
+
+static void check_btree_paths_leaked(struct btree_trans *trans)
+{
+#ifdef CONFIG_BCACHEFS_DEBUG
+ struct bch_fs *c = trans->c;
+ struct btree_path *path;
+
+ trans_for_each_path(trans, path)
+ if (path->ref)
+ goto leaked;
+ return;
+leaked:
+ bch_err(c, "btree paths leaked from %s!", trans->fn);
+ trans_for_each_path(trans, path)
+ if (path->ref)
+ printk(KERN_ERR " btree %s %pS\n",
+ bch2_btree_ids[path->btree_id],
+ (void *) path->ip_allocated);
+ /* Be noisy about this: */
+ bch2_fatal_error(c);
#endif
}
-int bch2_trans_exit(struct btree_trans *trans)
+void bch2_trans_exit(struct btree_trans *trans)
+ __releases(&c->btree_trans_barrier)
{
+ struct btree_insert_entry *i;
struct bch_fs *c = trans->c;
bch2_trans_unlock(trans);
-#ifdef CONFIG_BCACHEFS_DEBUG
- mutex_lock(&trans->c->btree_trans_lock);
+ trans_for_each_update(trans, i)
+ __btree_path_put(i->path, true);
+ trans->nr_updates = 0;
+
+ check_btree_paths_leaked(trans);
+
+ mutex_lock(&c->btree_trans_lock);
list_del(&trans->list);
- mutex_unlock(&trans->c->btree_trans_lock);
-#endif
+ mutex_unlock(&c->btree_trans_lock);
- bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
+ srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
- kfree(trans->fs_usage_deltas);
- kfree(trans->mem);
+ bch2_journal_preres_put(&c->journal, &trans->journal_preres);
- trans->iters = this_cpu_xchg(c->btree_iters_bufs->iter, trans->iters);
- if (trans->iters)
- mempool_free(trans->iters, &trans->c->btree_iters_pool);
+ kfree(trans->extra_journal_entries.data);
- trans->mem = (void *) 0x1;
- trans->iters = (void *) 0x1;
+ if (trans->fs_usage_deltas) {
+ if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
+ REPLICAS_DELTA_LIST_MAX)
+ mempool_free(trans->fs_usage_deltas,
+ &c->replicas_delta_pool);
+ else
+ kfree(trans->fs_usage_deltas);
+ }
+
+ if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
+ mempool_free(trans->mem, &c->btree_trans_mem_pool);
+ else
+ kfree(trans->mem);
+
+#ifdef __KERNEL__
+ /*
+ * Userspace doesn't have a real percpu implementation:
+ */
+ trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths);
+#endif
+
+ if (trans->paths)
+ mempool_free(trans->paths, &c->btree_paths_pool);
- return trans->error ? -EIO : 0;
+ trans->mem = (void *) 0x1;
+ trans->paths = (void *) 0x1;
}
static void __maybe_unused
-bch2_btree_iter_node_to_text(struct printbuf *out,
+bch2_btree_path_node_to_text(struct printbuf *out,
struct btree_bkey_cached_common *_b,
- enum btree_iter_type type)
+ bool cached)
+{
+ pr_buf(out, " l=%u %s:",
+ _b->level, bch2_btree_ids[_b->btree_id]);
+ bch2_bpos_to_text(out, btree_node_pos(_b, cached));
+}
+
+static bool trans_has_locks(struct btree_trans *trans)
{
- pr_buf(out, " %px l=%u %s:",
- _b, _b->level, bch2_btree_ids[_b->btree_id]);
- bch2_bpos_to_text(out, btree_node_pos(_b, type));
+ struct btree_path *path;
+
+ trans_for_each_path(trans, path)
+ if (path->nodes_locked)
+ return true;
+ return false;
}
void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
{
-#ifdef CONFIG_BCACHEFS_DEBUG
struct btree_trans *trans;
- struct btree_iter *iter;
+ struct btree_path *path;
struct btree *b;
+ static char lock_types[] = { 'r', 'i', 'w' };
unsigned l;
mutex_lock(&c->btree_trans_lock);
list_for_each_entry(trans, &c->btree_trans_list, list) {
- pr_buf(out, "%i %px %ps\n", trans->pid, trans, (void *) trans->ip);
+ if (!trans_has_locks(trans))
+ continue;
+
+ pr_buf(out, "%i %s\n", trans->pid, trans->fn);
- trans_for_each_iter(trans, iter) {
- if (!iter->nodes_locked)
+ trans_for_each_path(trans, path) {
+ if (!path->nodes_locked)
continue;
- pr_buf(out, " iter %u %s:",
- iter->idx,
- bch2_btree_ids[iter->btree_id]);
- bch2_bpos_to_text(out, iter->pos);
+ pr_buf(out, " path %u %c l=%u %s:",
+ path->idx,
+ path->cached ? 'c' : 'b',
+ path->level,
+ bch2_btree_ids[path->btree_id]);
+ bch2_bpos_to_text(out, path->pos);
pr_buf(out, "\n");
for (l = 0; l < BTREE_MAX_DEPTH; l++) {
- if (btree_node_locked(iter, l)) {
+ if (btree_node_locked(path, l)) {
pr_buf(out, " %s l=%u ",
- btree_node_intent_locked(iter, l) ? "i" : "r", l);
- bch2_btree_iter_node_to_text(out,
- (void *) iter->l[l].b,
- btree_iter_type(iter));
+ btree_node_intent_locked(path, l) ? "i" : "r", l);
+ bch2_btree_path_node_to_text(out,
+ (void *) path->l[l].b,
+ path->cached);
pr_buf(out, "\n");
}
}
b = READ_ONCE(trans->locking);
if (b) {
- pr_buf(out, " locking iter %u l=%u %s:",
- trans->locking_iter_idx,
+ path = &trans->paths[trans->locking_path_idx];
+ pr_buf(out, " locking path %u %c l=%u %c %s:",
+ trans->locking_path_idx,
+ path->cached ? 'c' : 'b',
trans->locking_level,
+ lock_types[trans->locking_lock_type],
bch2_btree_ids[trans->locking_btree_id]);
bch2_bpos_to_text(out, trans->locking_pos);
-
pr_buf(out, " node ");
- bch2_btree_iter_node_to_text(out,
- (void *) b,
- btree_iter_type(&trans->iters[trans->locking_iter_idx]));
+ bch2_btree_path_node_to_text(out,
+ (void *) b, path->cached);
pr_buf(out, "\n");
}
}
mutex_unlock(&c->btree_trans_lock);
-#endif
}
void bch2_fs_btree_iter_exit(struct bch_fs *c)
{
- mempool_exit(&c->btree_iters_pool);
+ if (c->btree_trans_barrier_initialized)
+ cleanup_srcu_struct(&c->btree_trans_barrier);
+ mempool_exit(&c->btree_trans_mem_pool);
+ mempool_exit(&c->btree_paths_pool);
}
int bch2_fs_btree_iter_init(struct bch_fs *c)
{
unsigned nr = BTREE_ITER_MAX;
+ int ret;
INIT_LIST_HEAD(&c->btree_trans_list);
mutex_init(&c->btree_trans_lock);
- return mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
- sizeof(struct btree_iter) * nr +
- sizeof(struct btree_insert_entry) * nr +
- sizeof(struct btree_insert_entry) * nr);
+ ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
+ sizeof(struct btree_path) * nr +
+ sizeof(struct btree_insert_entry) * nr) ?:
+ mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
+ BTREE_TRANS_MEM_MAX) ?:
+ init_srcu_struct(&c->btree_trans_barrier);
+ if (!ret)
+ c->btree_trans_barrier_initialized = true;
+ return ret;
}