#include "bkey_buf.h"
#include "btree_cache.h"
#include "btree_iter.h"
+#include "btree_journal_iter.h"
#include "btree_key_cache.h"
#include "btree_locking.h"
#include "btree_update.h"
#include "error.h"
#include "extents.h"
#include "journal.h"
-#include "recovery.h"
#include "replicas.h"
-#include "subvolume.h"
+#include "snapshot.h"
#include "trace.h"
#include <linux/random.h>
static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
-/*
- * Unlocks before scheduling
- * Note: does not revalidate iterator
- */
-static inline int bch2_trans_cond_resched(struct btree_trans *trans)
-{
- if (need_resched() || race_fault())
- return drop_locks_do(trans, (schedule(), 0));
- else
- return 0;
-}
-
static inline int __btree_path_cmp(const struct btree_path *l,
enum btree_id r_btree_id,
bool r_cached,
for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
if (!path->l[i].b) {
BUG_ON(!path->cached &&
- c->btree_roots[path->btree_id].b->c.level > i);
+ bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
break;
}
if (t != BTREE_NODE_UNLOCKED) {
btree_node_unlock(trans, path, b->c.level);
- six_lock_increment(&b->c.lock, t);
- mark_btree_node_locked(trans, path, b->c.level, t);
+ six_lock_increment(&b->c.lock, (enum six_lock_type) t);
+ mark_btree_node_locked(trans, path, b->c.level, (enum six_lock_type) t);
}
bch2_btree_path_level_init(trans, path, b);
unsigned long trace_ip)
{
struct bch_fs *c = trans->c;
- struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
+ struct btree *b, **rootp = &bch2_btree_id_root(c, path->btree_id)->b;
enum six_lock_type lock_type;
unsigned i;
int ret;
/*
* We used to assert that all paths had been traversed here
* (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
- * path->Should_be_locked is not set yet, we we might have unlocked and
+ * path->should_be_locked is not set yet, we might have unlocked and
* then failed to relock a path - that's fine.
*/
err:
prt_newline(out);
}
-noinline __cold
+static noinline __cold
void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
bool nosort)
{
__bch2_trans_paths_to_text(out, trans, false);
}
-noinline __cold
+static noinline __cold
void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
{
struct printbuf buf = PRINTBUF;
: NULL;
}
-struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bpos end_pos)
+static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bpos end_pos)
{
struct bkey_i *k;
iter->key_cache_path = NULL;
}
-static inline void bch2_trans_iter_init_inlined(struct btree_trans *trans,
- struct btree_iter *iter,
- unsigned btree_id, struct bpos pos,
- unsigned flags)
-{
- bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
- bch2_btree_iter_flags(trans, btree_id, flags),
- _RET_IP_);
-}
-
void bch2_trans_iter_init_outlined(struct btree_trans *trans,
struct btree_iter *iter,
unsigned btree_id, struct bpos pos,
unsigned depth,
unsigned flags)
{
- flags |= BTREE_ITER_NOT_EXTENTS;
- flags |= __BTREE_ITER_ALL_SNAPSHOTS;
- flags |= BTREE_ITER_ALL_SNAPSHOTS;
+ flags |= BTREE_ITER_NOT_EXTENTS;
+ flags |= __BTREE_ITER_ALL_SNAPSHOTS;
+ flags |= BTREE_ITER_ALL_SNAPSHOTS;
bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
__bch2_btree_iter_flags(trans, btree_id, flags),
u32 bch2_trans_begin(struct btree_trans *trans)
{
struct btree_path *path;
+ u64 now;
bch2_trans_reset_updates(trans);
path->preserve = false;
}
+ now = local_clock();
if (!trans->restarted &&
(need_resched() ||
- local_clock() - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))
+ now - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) {
drop_locks_do(trans, (cond_resched(), 0));
+ now = local_clock();
+ }
+ trans->last_begin_time = now;
if (unlikely(time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
bch2_trans_reset_srcu_lock(trans);
trans->notrace_relock_fail = false;
}
- trans->last_begin_time = local_clock();
return trans->restart_count;
}
#ifdef __KERNEL__
p = this_cpu_xchg(c->btree_paths_bufs->path, NULL);
#endif
- if (!p)
+ if (!p) {
p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
- /*
- * paths need to be zeroed, bch2_check_for_deadlock looks at paths in
- * other threads
- */
+ /*
+ * paths need to be zeroed, bch2_check_for_deadlock looks at
+ * paths in other threads
+ */
+ memset(p, 0, paths_bytes);
+ }
trans->paths = p; p += paths_bytes;
trans->updates = p; p += updates_bytes;
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
struct btree_trans *pos;
- mutex_lock(&c->btree_trans_lock);
+ seqmutex_lock(&c->btree_trans_lock);
list_for_each_entry(pos, &c->btree_trans_list, list) {
/*
* We'd much prefer to be stricter here and completely
}
list_add_tail(&trans->list, &c->btree_trans_list);
list_add_done:
- mutex_unlock(&c->btree_trans_lock);
+ seqmutex_unlock(&c->btree_trans_lock);
}
}
bch2_trans_unlock(trans);
+ if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
+ seqmutex_lock(&c->btree_trans_lock);
+ list_del(&trans->list);
+ seqmutex_unlock(&c->btree_trans_lock);
+ }
+
closure_sync(&trans->ref);
if (s)
check_btree_paths_leaked(trans);
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
- mutex_lock(&c->btree_trans_lock);
- list_del(&trans->list);
- mutex_unlock(&c->btree_trans_lock);
- }
-
srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
bch2_journal_preres_put(&c->journal, &trans->journal_preres);
}
INIT_LIST_HEAD(&c->btree_trans_list);
- mutex_init(&c->btree_trans_lock);
+ seqmutex_init(&c->btree_trans_lock);
ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
sizeof(struct btree_path) * nr +