* that write lock. The lock sequence number is incremented by taking
* and releasing write locks and is even when unlocked:
*/
- return iter->l[level].lock_seq >> 1 == b->lock.state.seq >> 1;
+ return iter->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1;
}
static inline struct btree *btree_node_parent(struct btree_iter *iter,
struct btree *b)
{
- return btree_iter_node(iter, b->level + 1);
+ return btree_iter_node(iter, b->c.level + 1);
}
static inline bool btree_trans_has_multiple_iters(const struct btree_trans *trans)
static inline bool __iter_has_node(const struct btree_iter *iter,
const struct btree *b)
{
- return iter->l[b->level].b == b &&
- btree_node_lock_seq_matches(iter, b, b->level);
+ return iter->l[b->c.level].b == b &&
+ btree_node_lock_seq_matches(iter, b, b->c.level);
}
static inline struct btree_iter *
(_iter)->idx + 1))
#ifdef CONFIG_BCACHEFS_DEBUG
-void bch2_btree_iter_verify(struct btree_iter *, struct btree *);
+void bch2_btree_trans_verify_iters(struct btree_trans *, struct btree *);
void bch2_btree_trans_verify_locks(struct btree_trans *);
#else
-static inline void bch2_btree_iter_verify(struct btree_iter *iter,
- struct btree *b) {}
+static inline void bch2_btree_trans_verify_iters(struct btree_trans *trans,
+ struct btree *b) {}
static inline void bch2_btree_trans_verify_locks(struct btree_trans *iter) {}
#endif
struct btree_node_iter *, struct bkey_packed *,
unsigned, unsigned);
+bool bch2_btree_iter_relock(struct btree_iter *, bool);
bool bch2_trans_relock(struct btree_trans *);
void bch2_trans_unlock(struct btree_trans *);
__bch2_btree_iter_downgrade(iter, 0);
}
+void bch2_trans_downgrade(struct btree_trans *);
+
void bch2_btree_iter_node_replace(struct btree_iter *, struct btree *);
void bch2_btree_iter_node_drop(struct btree_iter *, struct btree *);
int bch2_btree_iter_traverse_all(struct btree_trans *);
struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
-struct btree *bch2_btree_iter_next_node(struct btree_iter *, unsigned);
+struct btree *bch2_btree_iter_next_node(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_peek_with_updates(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_next_with_updates(struct btree_iter *);
+
struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_peek_cached(struct btree_iter *);
+
void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *, struct bpos);
void __bch2_btree_iter_set_pos(struct btree_iter *, struct bpos, bool);
void bch2_btree_iter_set_pos(struct btree_iter *, struct bpos);
-static inline struct bpos btree_type_successor(enum btree_id id,
- struct bpos pos)
-{
- if (id == BTREE_ID_INODES) {
- pos.inode++;
- pos.offset = 0;
- } else if (!btree_node_type_is_extents(id)) {
- pos = bkey_successor(pos);
- }
-
- return pos;
-}
-
-static inline struct bpos btree_type_predecessor(enum btree_id id,
- struct bpos pos)
-{
- if (id == BTREE_ID_INODES) {
- --pos.inode;
- pos.offset = 0;
- } else {
- pos = bkey_predecessor(pos);
- }
-
- return pos;
-}
-
-static inline int __btree_iter_cmp(enum btree_id id,
- struct bpos pos,
- const struct btree_iter *r)
-{
- if (id != r->btree_id)
- return id < r->btree_id ? -1 : 1;
- return bkey_cmp(pos, r->pos);
-}
-
-static inline int btree_iter_cmp(const struct btree_iter *l,
- const struct btree_iter *r)
+/* Sort order for locking btree iterators: */
+static inline int btree_iter_lock_cmp(const struct btree_iter *l,
+ const struct btree_iter *r)
{
- return __btree_iter_cmp(l->btree_id, l->pos, r);
+ return cmp_int(l->btree_id, r->btree_id) ?:
+ -cmp_int(btree_iter_is_cached(l), btree_iter_is_cached(r)) ?:
+ bkey_cmp(l->pos, r->pos);
}
/*
_start, _locks_want, _depth, _flags), \
_b = bch2_btree_iter_peek_node(_iter); \
(_b); \
- (_b) = bch2_btree_iter_next_node(_iter, _depth))
+ (_b) = bch2_btree_iter_next_node(_iter))
#define for_each_btree_node(_trans, _iter, _btree_id, _start, \
_flags, _b) \
static inline struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter,
unsigned flags)
{
- return flags & BTREE_ITER_SLOTS
- ? bch2_btree_iter_peek_slot(iter)
- : bch2_btree_iter_peek(iter);
+ if ((flags & BTREE_ITER_TYPE) == BTREE_ITER_CACHED)
+ return bch2_btree_iter_peek_cached(iter);
+ else
+ return flags & BTREE_ITER_SLOTS
+ ? bch2_btree_iter_peek_slot(iter)
+ : bch2_btree_iter_peek(iter);
}
static inline struct bkey_s_c __bch2_btree_iter_next(struct btree_iter *iter,
void bch2_trans_unlink_iters(struct btree_trans *);
-struct btree_iter *bch2_trans_get_iter(struct btree_trans *, enum btree_id,
- struct bpos, unsigned);
-struct btree_iter *bch2_trans_copy_iter(struct btree_trans *,
+struct btree_iter *__bch2_trans_get_iter(struct btree_trans *, enum btree_id,
+ struct bpos, unsigned);
+
+static inline struct btree_iter *
+bch2_trans_get_iter(struct btree_trans *trans, enum btree_id btree_id,
+ struct bpos pos, unsigned flags)
+{
+ struct btree_iter *iter =
+ __bch2_trans_get_iter(trans, btree_id, pos, flags);
+
+ if (!IS_ERR(iter))
+ iter->ip_allocated = _THIS_IP_;
+ return iter;
+}
+
+struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *,
struct btree_iter *);
+static inline struct btree_iter *
+bch2_trans_copy_iter(struct btree_trans *trans, struct btree_iter *src)
+{
+ struct btree_iter *iter =
+ __bch2_trans_copy_iter(trans, src);
+
+ if (!IS_ERR(iter))
+ iter->ip_allocated = _THIS_IP_;
+ return iter;
+
+}
+
struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *,
enum btree_id, struct bpos,
unsigned, unsigned, unsigned);
void bch2_trans_init(struct btree_trans *, struct bch_fs *, unsigned, size_t);
int bch2_trans_exit(struct btree_trans *);
+void bch2_btree_trans_to_text(struct printbuf *, struct bch_fs *);
+
void bch2_fs_btree_iter_exit(struct bch_fs *);
int bch2_fs_btree_iter_init(struct bch_fs *);