* that write lock. The lock sequence number is incremented by taking
* and releasing write locks and is even when unlocked:
*/
- return iter->l[level].lock_seq >> 1 == b->lock.state.seq >> 1;
+ return iter->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1;
}
static inline struct btree *btree_node_parent(struct btree_iter *iter,
struct btree *b)
{
- return btree_iter_node(iter, b->level + 1);
+ return btree_iter_node(iter, b->c.level + 1);
}
static inline bool btree_trans_has_multiple_iters(const struct btree_trans *trans)
/* Iterate over iters within a transaction: */
-#define trans_for_each_iter_all(_trans, _iter) \
- for (_iter = (_trans)->iters; \
- _iter < (_trans)->iters + (_trans)->nr_iters; \
- _iter++)
-
static inline struct btree_iter *
__trans_next_iter(struct btree_trans *trans, unsigned idx)
{
- EBUG_ON(idx < trans->nr_iters && trans->iters[idx].idx != idx);
+ u64 l;
+
+ if (idx == BTREE_ITER_MAX)
+ return NULL;
- for (; idx < trans->nr_iters; idx++)
- if (trans->iters_linked & (1ULL << idx))
- return &trans->iters[idx];
+ l = trans->iters_linked >> idx;
+ if (!l)
+ return NULL;
- return NULL;
+ idx += __ffs64(l);
+ EBUG_ON(idx >= BTREE_ITER_MAX);
+ EBUG_ON(trans->iters[idx].idx != idx);
+ return &trans->iters[idx];
}
#define trans_for_each_iter(_trans, _iter) \
static inline bool __iter_has_node(const struct btree_iter *iter,
const struct btree *b)
{
- return iter->l[b->level].b == b &&
- btree_node_lock_seq_matches(iter, b, b->level);
+ return iter->l[b->c.level].b == b &&
+ btree_node_lock_seq_matches(iter, b, b->c.level);
}
static inline struct btree_iter *
(_iter)->idx + 1))
#ifdef CONFIG_BCACHEFS_DEBUG
-void bch2_btree_iter_verify(struct btree_iter *, struct btree *);
+void bch2_btree_trans_verify_iters(struct btree_trans *, struct btree *);
void bch2_btree_trans_verify_locks(struct btree_trans *);
#else
-static inline void bch2_btree_iter_verify(struct btree_iter *iter,
- struct btree *b) {}
+static inline void bch2_btree_trans_verify_iters(struct btree_trans *trans,
+ struct btree *b) {}
static inline void bch2_btree_trans_verify_locks(struct btree_trans *iter) {}
#endif
void bch2_trans_unlock(struct btree_trans *);
bool __bch2_btree_iter_upgrade(struct btree_iter *, unsigned);
-bool __bch2_btree_iter_upgrade_nounlock(struct btree_iter *, unsigned);
static inline bool bch2_btree_iter_upgrade(struct btree_iter *iter,
unsigned new_locks_want)
new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
return iter->locks_want < new_locks_want
- ? (!iter->trans->nounlock
- ? __bch2_btree_iter_upgrade(iter, new_locks_want)
- : __bch2_btree_iter_upgrade_nounlock(iter, new_locks_want))
+ ? __bch2_btree_iter_upgrade(iter, new_locks_want)
: iter->uptodate <= BTREE_ITER_NEED_PEEK;
}
static inline void bch2_btree_iter_downgrade(struct btree_iter *iter)
{
- if (iter->locks_want > (iter->flags & BTREE_ITER_INTENT) ? 1 : 0)
- __bch2_btree_iter_downgrade(iter, 0);
+ unsigned new_locks_want = (iter->flags & BTREE_ITER_INTENT ? 1 : 0);
+
+ if (iter->locks_want > new_locks_want)
+ __bch2_btree_iter_downgrade(iter, new_locks_want);
}
+void bch2_trans_downgrade(struct btree_trans *);
+
void bch2_btree_iter_node_replace(struct btree_iter *, struct btree *);
void bch2_btree_iter_node_drop(struct btree_iter *, struct btree *);
void bch2_btree_iter_reinit_node(struct btree_iter *, struct btree *);
-int __must_check __bch2_btree_iter_traverse(struct btree_iter *);
-
-static inline int __must_check
-bch2_btree_iter_traverse(struct btree_iter *iter)
-{
- return iter->uptodate >= BTREE_ITER_NEED_RELOCK
- ? __bch2_btree_iter_traverse(iter)
- : 0;
-}
+int __must_check bch2_btree_iter_traverse(struct btree_iter *);
int bch2_btree_iter_traverse_all(struct btree_trans *);
struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
-struct btree *bch2_btree_iter_next_node(struct btree_iter *, unsigned);
+struct btree *bch2_btree_iter_next_node(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *);
-void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *, struct bpos);
-void bch2_btree_iter_set_pos(struct btree_iter *, struct bpos);
+bool bch2_btree_iter_advance(struct btree_iter *);
+bool bch2_btree_iter_rewind(struct btree_iter *);
-static inline struct bpos btree_type_successor(enum btree_id id,
- struct bpos pos)
+static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
{
- if (id == BTREE_ID_INODES) {
- pos.inode++;
- pos.offset = 0;
- } else if (!btree_node_type_is_extents(id)) {
- pos = bkey_successor(pos);
- }
-
- return pos;
+ if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
+ new_pos.snapshot = iter->snapshot;
+
+ iter->k.type = KEY_TYPE_deleted;
+ iter->k.p.inode = iter->pos.inode = new_pos.inode;
+ iter->k.p.offset = iter->pos.offset = new_pos.offset;
+ iter->k.p.snapshot = iter->pos.snapshot = new_pos.snapshot;
+ iter->k.size = 0;
+ iter->should_be_locked = false;
}
-static inline struct bpos btree_type_predecessor(enum btree_id id,
- struct bpos pos)
+static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
{
- if (id == BTREE_ID_INODES) {
- --pos.inode;
- pos.offset = 0;
- } else {
- pos = bkey_predecessor(pos);
- }
-
- return pos;
+ BUG_ON(!(iter->flags & BTREE_ITER_IS_EXTENTS));
+ iter->pos = bkey_start_pos(&iter->k);
}
-static inline int __btree_iter_cmp(enum btree_id id,
- struct bpos pos,
- const struct btree_iter *r)
+static inline struct btree_iter *btree_iter_child(struct btree_iter *iter)
{
- if (id != r->btree_id)
- return id < r->btree_id ? -1 : 1;
- return bkey_cmp(pos, r->pos);
+ return iter->child_idx == U8_MAX ? NULL
+ : iter->trans->iters + iter->child_idx;
}
-static inline int btree_iter_cmp(const struct btree_iter *l,
- const struct btree_iter *r)
+/* Sort order for locking btree iterators: */
+static inline int btree_iter_lock_cmp(const struct btree_iter *l,
+ const struct btree_iter *r)
{
- return __btree_iter_cmp(l->btree_id, l->pos, r);
+ return cmp_int(l->btree_id, r->btree_id) ?:
+ -cmp_int(btree_iter_is_cached(l), btree_iter_is_cached(r)) ?:
+ bkey_cmp(l->real_pos, r->real_pos);
}
/*
_start, _locks_want, _depth, _flags), \
_b = bch2_btree_iter_peek_node(_iter); \
(_b); \
- (_b) = bch2_btree_iter_next_node(_iter, _depth))
+ (_b) = bch2_btree_iter_next_node(_iter))
#define for_each_btree_node(_trans, _iter, _btree_id, _start, \
_flags, _b) \
#define for_each_btree_key(_trans, _iter, _btree_id, \
_start, _flags, _k, _ret) \
- for ((_ret) = PTR_ERR_OR_ZERO((_iter) = \
- bch2_trans_get_iter((_trans), (_btree_id), \
- (_start), (_flags))) ?: \
- PTR_ERR_OR_ZERO(((_k) = \
- __bch2_btree_iter_peek(_iter, _flags)).k); \
- !_ret && (_k).k; \
- (_ret) = PTR_ERR_OR_ZERO(((_k) = \
- __bch2_btree_iter_next(_iter, _flags)).k))
+ for ((_iter) = bch2_trans_get_iter((_trans), (_btree_id), \
+ (_start), (_flags)), \
+ (_k) = __bch2_btree_iter_peek(_iter, _flags); \
+ !((_ret) = bkey_err(_k)) && (_k).k; \
+ (_k) = __bch2_btree_iter_next(_iter, _flags))
#define for_each_btree_key_continue(_iter, _flags, _k, _ret) \
for ((_k) = __bch2_btree_iter_peek(_iter, _flags); \
void bch2_trans_unlink_iters(struct btree_trans *);
-struct btree_iter *bch2_trans_get_iter(struct btree_trans *, enum btree_id,
- struct bpos, unsigned);
-struct btree_iter *bch2_trans_copy_iter(struct btree_trans *,
+struct btree_iter *__bch2_trans_get_iter(struct btree_trans *, enum btree_id,
+ struct bpos, unsigned,
+ unsigned, unsigned);
+
+static inline struct btree_iter *
+bch2_trans_get_iter(struct btree_trans *trans, enum btree_id btree_id,
+ struct bpos pos, unsigned flags)
+{
+ struct btree_iter *iter =
+ __bch2_trans_get_iter(trans, btree_id, pos,
+ (flags & BTREE_ITER_INTENT) != 0, 0,
+ flags);
+ iter->ip_allocated = _THIS_IP_;
+ return iter;
+}
+
+struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *,
struct btree_iter *);
+static inline struct btree_iter *
+bch2_trans_copy_iter(struct btree_trans *trans, struct btree_iter *src)
+{
+ struct btree_iter *iter =
+ __bch2_trans_copy_iter(trans, src);
+
+ iter->ip_allocated = _THIS_IP_;
+ return iter;
+}
+
struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *,
enum btree_id, struct bpos,
unsigned, unsigned, unsigned);
-#define TRANS_RESET_ITERS (1 << 0)
-#define TRANS_RESET_MEM (1 << 1)
+static inline bool btree_iter_live(struct btree_trans *trans, struct btree_iter *iter)
+{
+ return (trans->iters_live & (1ULL << iter->idx)) != 0;
+}
-void bch2_trans_reset(struct btree_trans *, unsigned);
+static inline bool btree_iter_keep(struct btree_trans *trans, struct btree_iter *iter)
+{
+ return btree_iter_live(trans, iter) ||
+ (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT);
+}
-static inline void bch2_trans_begin(struct btree_trans *trans)
+static inline void set_btree_iter_dontneed(struct btree_trans *trans, struct btree_iter *iter)
{
- return bch2_trans_reset(trans, TRANS_RESET_ITERS|TRANS_RESET_MEM);
+ trans->iters_touched &= ~(1ULL << iter->idx);
}
-static inline void bch2_trans_begin_updates(struct btree_trans *trans)
+#define TRANS_RESET_NOTRAVERSE (1 << 0)
+#define TRANS_RESET_NOUNLOCK (1 << 1)
+
+void bch2_trans_reset(struct btree_trans *, unsigned);
+
+static inline void bch2_trans_begin(struct btree_trans *trans)
{
- return bch2_trans_reset(trans, TRANS_RESET_MEM);
+ return bch2_trans_reset(trans, 0);
}
void *bch2_trans_kmalloc(struct btree_trans *, size_t);
void bch2_trans_init(struct btree_trans *, struct bch_fs *, unsigned, size_t);
int bch2_trans_exit(struct btree_trans *);
+void bch2_btree_trans_to_text(struct printbuf *, struct bch_fs *);
+
void bch2_fs_btree_iter_exit(struct bch_fs *);
int bch2_fs_btree_iter_init(struct bch_fs *);