-#ifndef _BCACHE_BTREE_ITER_H
-#define _BCACHE_BTREE_ITER_H
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_ITER_H
+#define _BCACHEFS_BTREE_ITER_H
+#include "bset.h"
#include "btree_types.h"
-struct btree_iter {
- /* Current btree depth */
- u8 level;
-
- /*
- * Used in bch2_btree_iter_traverse(), to indicate whether we're
- * searching for @pos or the first key strictly greater than @pos
- */
- u8 is_extents;
-
- /* Bitmasks for read/intent locks held per level */
- u8 nodes_locked;
- u8 nodes_intent_locked;
-
- /* Btree level below which we start taking intent locks */
- u8 locks_want;
+static inline void btree_iter_set_dirty(struct btree_iter *iter,
+ enum btree_iter_uptodate u)
+{
+ iter->uptodate = max_t(unsigned, iter->uptodate, u);
+}
- enum btree_id btree_id:8;
+static inline struct btree *btree_iter_node(struct btree_iter *iter,
+ unsigned level)
+{
+ return level < BTREE_MAX_DEPTH ? iter->l[level].b : NULL;
+}
+static inline bool btree_node_lock_seq_matches(const struct btree_iter *iter,
+ const struct btree *b, unsigned level)
+{
/*
- * indicates we need to call bch2_btree_iter_traverse() to revalidate
- * iterator:
+ * We don't compare the low bits of the lock sequence numbers because
+ * @iter might have taken a write lock on @b, and we don't want to skip
+ * the linked iterator if the sequence numbers were equal before taking
+ * that write lock. The lock sequence number is incremented by taking
+ * and releasing write locks and is even when unlocked:
*/
- u8 at_end_of_leaf;
+ return iter->l[level].lock_seq >> 1 == b->lock.state.seq >> 1;
+}
- s8 error;
+static inline struct btree *btree_node_parent(struct btree_iter *iter,
+ struct btree *b)
+{
+ return btree_iter_node(iter, b->level + 1);
+}
- struct bch_fs *c;
+static inline bool btree_trans_has_multiple_iters(const struct btree_trans *trans)
+{
+ return hweight64(trans->iters_linked) > 1;
+}
- /* Current position of the iterator */
- struct bpos pos;
+static inline int btree_iter_err(const struct btree_iter *iter)
+{
+ return iter->flags & BTREE_ITER_ERROR ? -EIO : 0;
+}
- u32 lock_seq[BTREE_MAX_DEPTH];
+/* Iterate over iters within a transaction: */
- /*
- * NOTE: Never set iter->nodes to NULL except in btree_iter_lock_root().
- *
- * This is because iter->nodes[iter->level] == NULL is how
- * btree_iter_next_node() knows that it's finished with a depth first
- * traversal. Just unlocking a node (with btree_node_unlock()) is fine,
- * and if you really don't want that node used again (e.g. btree_split()
- * freed it) decrementing lock_seq will cause bch2_btree_node_relock() to
- * always fail (but since freeing a btree node takes a write lock on the
- * node, which increments the node's lock seq, that's not actually
- * necessary in that example).
- *
- * One extra slot for a sentinel NULL:
- */
- struct btree *nodes[BTREE_MAX_DEPTH + 1];
- struct btree_node_iter node_iters[BTREE_MAX_DEPTH];
+static inline struct btree_iter *
+__trans_next_iter(struct btree_trans *trans, unsigned idx)
+{
+ EBUG_ON(idx < trans->nr_iters && trans->iters[idx].idx != idx);
- /*
- * Current unpacked key - so that bch2_btree_iter_next()/
- * bch2_btree_iter_next_with_holes() can correctly advance pos.
- */
- struct bkey k;
+ for (; idx < trans->nr_iters; idx++)
+ if (trans->iters_linked & (1ULL << idx))
+ return &trans->iters[idx];
- /*
- * Circular linked list of linked iterators: linked iterators share
- * locks (e.g. two linked iterators may have the same node intent
- * locked, or read and write locked, at the same time), and insertions
- * through one iterator won't invalidate the other linked iterators.
- */
+ return NULL;
+}
- /* Must come last: */
- struct btree_iter *next;
-};
+#define trans_for_each_iter(_trans, _iter) \
+ for (_iter = __trans_next_iter((_trans), 0); \
+ (_iter); \
+ _iter = __trans_next_iter((_trans), (_iter)->idx + 1))
-static inline bool btree_iter_linked(const struct btree_iter *iter)
+static inline bool __iter_has_node(const struct btree_iter *iter,
+ const struct btree *b)
{
- return iter->next != iter;
+ return iter->l[b->level].b == b &&
+ btree_node_lock_seq_matches(iter, b, b->level);
}
-/**
- * for_each_linked_btree_iter - iterate over all iterators linked with @_iter
- */
-#define for_each_linked_btree_iter(_iter, _linked) \
- for ((_linked) = (_iter)->next; \
- (_linked) != (_iter); \
- (_linked) = (_linked)->next)
-
static inline struct btree_iter *
-__next_linked_btree_node(struct btree_iter *iter, struct btree *b,
- struct btree_iter *linked)
+__trans_next_iter_with_node(struct btree_trans *trans, struct btree *b,
+ unsigned idx)
{
- do {
- linked = linked->next;
-
- if (linked == iter)
- return NULL;
-
- /*
- * We don't compare the low bits of the lock sequence numbers
- * because @iter might have taken a write lock on @b, and we
- * don't want to skip the linked iterator if the sequence
- * numbers were equal before taking that write lock. The lock
- * sequence number is incremented by taking and releasing write
- * locks and is even when unlocked:
- */
- } while (linked->nodes[b->level] != b ||
- linked->lock_seq[b->level] >> 1 != b->lock.state.seq >> 1);
-
- return linked;
+ struct btree_iter *iter = __trans_next_iter(trans, idx);
+
+ while (iter && !__iter_has_node(iter, b))
+ iter = __trans_next_iter(trans, iter->idx + 1);
+
+ return iter;
}
-/**
- * for_each_linked_btree_node - iterate over all iterators linked with @_iter
- * that also point to @_b
- *
- * @_b is assumed to be locked by @_iter
- *
- * Filters out iterators that don't have a valid btree_node iterator for @_b -
- * i.e. iterators for which bch2_btree_node_relock() would not succeed.
- */
-#define for_each_linked_btree_node(_iter, _b, _linked) \
- for ((_linked) = (_iter); \
- ((_linked) = __next_linked_btree_node(_iter, _b, _linked));)
+#define trans_for_each_iter_with_node(_trans, _b, _iter) \
+ for (_iter = __trans_next_iter_with_node((_trans), (_b), 0); \
+ (_iter); \
+ _iter = __trans_next_iter_with_node((_trans), (_b), \
+ (_iter)->idx + 1))
#ifdef CONFIG_BCACHEFS_DEBUG
void bch2_btree_iter_verify(struct btree_iter *, struct btree *);
+void bch2_btree_trans_verify_locks(struct btree_trans *);
#else
static inline void bch2_btree_iter_verify(struct btree_iter *iter,
- struct btree *b) {}
+ struct btree *b) {}
+static inline void bch2_btree_trans_verify_locks(struct btree_trans *iter) {}
#endif
void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
- struct btree_node_iter *, struct bset_tree *,
- struct bkey_packed *, unsigned, unsigned);
+ struct btree_node_iter *, struct bkey_packed *,
+ unsigned, unsigned);
+
+bool bch2_trans_relock(struct btree_trans *);
+void bch2_trans_unlock(struct btree_trans *);
-int bch2_btree_iter_unlock(struct btree_iter *);
-bool __bch2_btree_iter_set_locks_want(struct btree_iter *, unsigned);
+bool __bch2_btree_iter_upgrade(struct btree_iter *, unsigned);
+bool __bch2_btree_iter_upgrade_nounlock(struct btree_iter *, unsigned);
-static inline bool bch2_btree_iter_set_locks_want(struct btree_iter *iter,
- unsigned new_locks_want)
+static inline bool bch2_btree_iter_upgrade(struct btree_iter *iter,
+ unsigned new_locks_want)
{
new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
- if (iter->locks_want == new_locks_want &&
- iter->nodes_intent_locked == (1 << new_locks_want) - 1)
- return true;
+ return iter->locks_want < new_locks_want
+ ? (!iter->trans->nounlock
+ ? __bch2_btree_iter_upgrade(iter, new_locks_want)
+ : __bch2_btree_iter_upgrade_nounlock(iter, new_locks_want))
+ : iter->uptodate <= BTREE_ITER_NEED_PEEK;
+}
- return __bch2_btree_iter_set_locks_want(iter, new_locks_want);
+void __bch2_btree_iter_downgrade(struct btree_iter *, unsigned);
+
+static inline void bch2_btree_iter_downgrade(struct btree_iter *iter)
+{
+ if (iter->locks_want > (iter->flags & BTREE_ITER_INTENT) ? 1 : 0)
+ __bch2_btree_iter_downgrade(iter, 0);
}
-bool bch2_btree_iter_node_replace(struct btree_iter *, struct btree *);
-void bch2_btree_iter_node_drop_linked(struct btree_iter *, struct btree *);
+void bch2_btree_iter_node_replace(struct btree_iter *, struct btree *);
void bch2_btree_iter_node_drop(struct btree_iter *, struct btree *);
void bch2_btree_iter_reinit_node(struct btree_iter *, struct btree *);
int __must_check bch2_btree_iter_traverse(struct btree_iter *);
+int bch2_btree_iter_traverse_all(struct btree_trans *);
struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
struct btree *bch2_btree_iter_next_node(struct btree_iter *, unsigned);
struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_peek_with_holes(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
+
+struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
+
void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *, struct bpos);
void bch2_btree_iter_set_pos(struct btree_iter *, struct bpos);
-void bch2_btree_iter_advance_pos(struct btree_iter *);
-void bch2_btree_iter_rewind(struct btree_iter *, struct bpos);
-
-void __bch2_btree_iter_init(struct btree_iter *, struct bch_fs *,
- enum btree_id, struct bpos, unsigned , unsigned);
-static inline void bch2_btree_iter_init(struct btree_iter *iter,
- struct bch_fs *c,
- enum btree_id btree_id,
- struct bpos pos)
+static inline struct bpos btree_type_successor(enum btree_id id,
+ struct bpos pos)
{
- __bch2_btree_iter_init(iter, c, btree_id, pos, 0, 0);
-}
+ if (id == BTREE_ID_INODES) {
+ pos.inode++;
+ pos.offset = 0;
+ } else if (!btree_node_type_is_extents(id)) {
+ pos = bkey_successor(pos);
+ }
-static inline void bch2_btree_iter_init_intent(struct btree_iter *iter,
- struct bch_fs *c,
- enum btree_id btree_id,
- struct bpos pos)
-{
- __bch2_btree_iter_init(iter, c, btree_id, pos, 1, 0);
+ return pos;
}
-void bch2_btree_iter_link(struct btree_iter *, struct btree_iter *);
-void bch2_btree_iter_copy(struct btree_iter *, struct btree_iter *);
-
-static inline struct bpos btree_type_successor(enum btree_id id,
+static inline struct bpos btree_type_predecessor(enum btree_id id,
struct bpos pos)
{
if (id == BTREE_ID_INODES) {
- pos.inode++;
+ --pos.inode;
pos.offset = 0;
- } else if (id != BTREE_ID_EXTENTS) {
- pos = bkey_successor(pos);
+ } else {
+ pos = bkey_predecessor(pos);
}
return pos;
return __btree_iter_cmp(l->btree_id, l->pos, r);
}
-#define __for_each_btree_node(_iter, _c, _btree_id, _start, _depth, \
- _b, _locks_want) \
- for (__bch2_btree_iter_init((_iter), (_c), (_btree_id), \
- _start, _locks_want, _depth), \
- (_iter)->is_extents = false, \
+/*
+ * Unlocks before scheduling
+ * Note: does not revalidate iterator
+ */
+static inline int bch2_trans_cond_resched(struct btree_trans *trans)
+{
+ if (need_resched() || race_fault()) {
+ bch2_trans_unlock(trans);
+ schedule();
+ return bch2_trans_relock(trans) ? 0 : -EINTR;
+ } else {
+ return 0;
+ }
+}
+
+#define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
+ _locks_want, _depth, _flags, _b) \
+ for (iter = bch2_trans_get_node_iter((_trans), (_btree_id), \
+ _start, _locks_want, _depth, _flags), \
_b = bch2_btree_iter_peek_node(_iter); \
(_b); \
(_b) = bch2_btree_iter_next_node(_iter, _depth))
-#define for_each_btree_node(_iter, _c, _btree_id, _start, _depth, _b) \
- __for_each_btree_node(_iter, _c, _btree_id, _start, _depth, _b, 0)
+#define for_each_btree_node(_trans, _iter, _btree_id, _start, \
+ _flags, _b) \
+ __for_each_btree_node(_trans, _iter, _btree_id, _start, \
+ 0, 0, _flags, _b)
-#define __for_each_btree_key(_iter, _c, _btree_id, _start, \
- _k, _locks_want) \
- for (__bch2_btree_iter_init((_iter), (_c), (_btree_id), \
- _start, _locks_want, 0); \
- !IS_ERR_OR_NULL(((_k) = bch2_btree_iter_peek(_iter)).k); \
- bch2_btree_iter_advance_pos(_iter))
+static inline struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter,
+ unsigned flags)
+{
+ return flags & BTREE_ITER_SLOTS
+ ? bch2_btree_iter_peek_slot(iter)
+ : bch2_btree_iter_peek(iter);
+}
+
+static inline struct bkey_s_c __bch2_btree_iter_next(struct btree_iter *iter,
+ unsigned flags)
+{
+ return flags & BTREE_ITER_SLOTS
+ ? bch2_btree_iter_next_slot(iter)
+ : bch2_btree_iter_next(iter);
+}
-#define for_each_btree_key(_iter, _c, _btree_id, _start, _k) \
- __for_each_btree_key(_iter, _c, _btree_id, _start, _k, 0)
+#define for_each_btree_key(_trans, _iter, _btree_id, \
+ _start, _flags, _k, _ret) \
+ for ((_ret) = PTR_ERR_OR_ZERO((_iter) = \
+ bch2_trans_get_iter((_trans), (_btree_id), \
+ (_start), (_flags))) ?: \
+ PTR_ERR_OR_ZERO(((_k) = \
+ __bch2_btree_iter_peek(_iter, _flags)).k); \
+ !ret && (_k).k; \
+ (_ret) = PTR_ERR_OR_ZERO(((_k) = \
+ __bch2_btree_iter_next(_iter, _flags)).k))
+
+#define for_each_btree_key_continue(_iter, _flags, _k) \
+ for ((_k) = __bch2_btree_iter_peek(_iter, _flags); \
+ !IS_ERR_OR_NULL((_k).k); \
+ (_k) = __bch2_btree_iter_next(_iter, _flags))
+
+static inline int bkey_err(struct bkey_s_c k)
+{
+ return PTR_ERR_OR_ZERO(k.k);
+}
-#define for_each_btree_key_intent(_iter, _c, _btree_id, _start, _k) \
- __for_each_btree_key(_iter, _c, _btree_id, _start, _k, 1)
+/* new multiple iterator interface: */
-#define __for_each_btree_key_with_holes(_iter, _c, _btree_id, \
- _start, _k, _locks_want) \
- for (__bch2_btree_iter_init((_iter), (_c), (_btree_id), \
- _start, _locks_want, 0); \
- !IS_ERR_OR_NULL(((_k) = bch2_btree_iter_peek_with_holes(_iter)).k);\
- bch2_btree_iter_advance_pos(_iter))
+int bch2_trans_iter_put(struct btree_trans *, struct btree_iter *);
+int bch2_trans_iter_free(struct btree_trans *, struct btree_iter *);
+int bch2_trans_iter_free_on_commit(struct btree_trans *, struct btree_iter *);
-#define for_each_btree_key_with_holes(_iter, _c, _btree_id, _start, _k) \
- __for_each_btree_key_with_holes(_iter, _c, _btree_id, _start, _k, 0)
+void bch2_trans_unlink_iters(struct btree_trans *, u64);
-#define for_each_btree_key_with_holes_intent(_iter, _c, _btree_id, \
- _start, _k) \
- __for_each_btree_key_with_holes(_iter, _c, _btree_id, _start, _k, 1)
+struct btree_iter *__bch2_trans_get_iter(struct btree_trans *, enum btree_id,
+ struct bpos, unsigned, u64);
+struct btree_iter *bch2_trans_copy_iter(struct btree_trans *,
+ struct btree_iter *);
-static inline int btree_iter_err(struct bkey_s_c k)
+static __always_inline u64 __btree_iter_id(void)
{
- return IS_ERR(k.k) ? PTR_ERR(k.k) : 0;
+ u64 ret = 0;
+
+ ret <<= 32;
+ ret |= _RET_IP_ & U32_MAX;
+ ret <<= 32;
+ ret |= _THIS_IP_ & U32_MAX;
+ return ret;
}
-/*
- * Unlocks before scheduling
- * Note: does not revalidate iterator
- */
-static inline void bch2_btree_iter_cond_resched(struct btree_iter *iter)
+static __always_inline struct btree_iter *
+bch2_trans_get_iter(struct btree_trans *trans, enum btree_id btree_id,
+ struct bpos pos, unsigned flags)
{
- struct btree_iter *linked;
+ return __bch2_trans_get_iter(trans, btree_id, pos, flags,
+ __btree_iter_id());
+}
- if (need_resched()) {
- for_each_linked_btree_iter(iter, linked)
- bch2_btree_iter_unlock(linked);
- bch2_btree_iter_unlock(iter);
- schedule();
- } else if (race_fault()) {
- for_each_linked_btree_iter(iter, linked)
- bch2_btree_iter_unlock(linked);
- bch2_btree_iter_unlock(iter);
- }
+struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *,
+ enum btree_id, struct bpos,
+ unsigned, unsigned, unsigned);
+
+void bch2_trans_begin(struct btree_trans *);
+
+static inline void bch2_trans_begin_updates(struct btree_trans *trans)
+{
+ trans->nr_updates = 0;
}
-#endif /* _BCACHE_BTREE_ITER_H */
+void *bch2_trans_kmalloc(struct btree_trans *, size_t);
+void bch2_trans_init(struct btree_trans *, struct bch_fs *, unsigned, size_t);
+int bch2_trans_exit(struct btree_trans *);
+
+#endif /* _BCACHEFS_BTREE_ITER_H */