* updating the iterator state
*/
-#include <linux/six.h>
-
#include "btree_iter.h"
+#include "six.h"
void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
static inline void bch2_assert_btree_nodes_not_locked(void) {}
#endif
+void bch2_trans_unlock_noassert(struct btree_trans *);
+
static inline bool is_btree_node(struct btree_path *path, unsigned l)
{
return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
static inline void mark_btree_node_locked(struct btree_trans *trans,
struct btree_path *path,
unsigned level,
- enum six_lock_type type)
+ enum btree_node_locked_type type)
{
- mark_btree_node_locked_noreset(path, level, type);
+ mark_btree_node_locked_noreset(path, level, (enum btree_node_locked_type) type);
#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
path->l[level].lock_taken_time = local_clock();
#endif
EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock));
EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
- mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_intent);
+ mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
trans_for_each_path_with_node(trans, b, linked)
linked->l[b->c.level].lock_seq++;
trans_for_each_path(trans, path)
if (&path->l[level].b->c == b &&
btree_node_locked_type(path, level) >= want) {
- six_lock_increment(&b->lock, want);
+ six_lock_increment(&b->lock, (enum six_lock_type) want);
return true;
}
int ret = 0;
EBUG_ON(level >= BTREE_MAX_DEPTH);
- EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
+ EBUG_ON(!test_bit(path->idx, trans->paths_allocated));
if (likely(six_trylock_type(&b->lock, type)) ||
- btree_node_lock_increment(trans, b, level, type) ||
+ btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) ||
!(ret = btree_node_lock_nopath(trans, b, type, btree_path_ip_allocated(path)))) {
#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
path->l[b->level].lock_taken_time = local_clock();
* write lock: thus, we need to tell the cycle detector we have a write
* lock _before_ taking the lock:
*/
- mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_write);
+ mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_WRITE_LOCKED);
return likely(six_trylock_write(&b->lock))
? 0
/* upgrade */
+
+struct get_locks_fail {
+ unsigned l;
+ struct btree *b;
+};
+
bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *,
- struct btree_path *, unsigned);
+ struct btree_path *, unsigned,
+ struct get_locks_fail *);
+
bool __bch2_btree_path_upgrade(struct btree_trans *,
- struct btree_path *, unsigned);
+ struct btree_path *, unsigned,
+ struct get_locks_fail *);
static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
struct btree_path *path,
unsigned new_locks_want)
{
+ struct get_locks_fail f;
unsigned old_locks_want = path->locks_want;
new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
if (path->locks_want < new_locks_want
- ? __bch2_btree_path_upgrade(trans, path, new_locks_want)
+ ? __bch2_btree_path_upgrade(trans, path, new_locks_want, &f)
: path->uptodate == BTREE_ITER_UPTODATE)
return 0;
trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path,
- old_locks_want, new_locks_want);
+ old_locks_want, new_locks_want, &f);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
}