#include "recovery.h"
#include "replicas.h"
#include "subvolume.h"
+#include "trace.h"
-#include <linux/prandom.h>
+#include <linux/random.h>
#include <linux/prefetch.h>
-#include <trace/events/bcachefs.h>
-
-static void btree_trans_verify_sorted(struct btree_trans *);
-inline void bch2_btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
-static __always_inline void bch2_btree_path_check_sort_fast(struct btree_trans *,
- struct btree_path *, int);
static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
{
-#ifdef CONFIG_BCACHEFS_DEBUG
+#ifdef TRACK_PATH_ALLOCATED
return iter->ip_allocated;
#else
return 0;
static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
-/*
- * Unlocks before scheduling
- * Note: does not revalidate iterator
- */
-static inline int bch2_trans_cond_resched(struct btree_trans *trans)
-{
- if (need_resched() || race_fault()) {
- bch2_trans_unlock(trans);
- schedule();
- return bch2_trans_relock(trans);
- } else {
- return 0;
- }
-}
-
static inline int __btree_path_cmp(const struct btree_path *l,
enum btree_id r_btree_id,
bool r_cached,
struct bpos pos = iter->pos;
if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
- bkey_cmp(pos, POS_MAX))
+ !bkey_eq(pos, POS_MAX))
pos = bkey_successor(iter, pos);
return pos;
}
static inline bool btree_path_pos_before_node(struct btree_path *path,
struct btree *b)
{
- return bpos_cmp(path->pos, b->data->min_key) < 0;
+ return bpos_lt(path->pos, b->data->min_key);
}
static inline bool btree_path_pos_after_node(struct btree_path *path,
struct btree *b)
{
- return bpos_cmp(b->key.k.p, path->pos) < 0;
+ return bpos_gt(path->pos, b->key.k.p);
}
static inline bool btree_path_pos_in_node(struct btree_path *path,
ck = (void *) path->l[0].b;
BUG_ON(ck->key.btree_id != path->btree_id ||
- bkey_cmp(ck->key.pos, path->pos));
+ !bkey_eq(ck->key.pos, path->pos));
if (!locked)
btree_node_unlock(trans, path, 0);
for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
if (!path->l[i].b) {
BUG_ON(!path->cached &&
- c->btree_roots[path->btree_id].b->c.level > i);
+ bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
break;
}
BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
iter->pos.snapshot != iter->snapshot);
- BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
- bkey_cmp(iter->pos, iter->k.p) > 0);
+ BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
+ bkey_gt(iter->pos, iter->k.p));
}
static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
if (ret)
goto out;
- if (!bkey_cmp(prev.k->p, k.k->p) &&
+ if (bkey_eq(prev.k->p, k.k->p) &&
bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
prev.k->p.snapshot) > 0) {
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
unsigned idx;
struct printbuf buf = PRINTBUF;
+ btree_trans_sort_paths(trans);
+
trans_for_each_path_inorder(trans, path, idx) {
int cmp = cmp_int(path->btree_id, id) ?:
cmp_int(path->cached, key_cache);
continue;
if (!key_cache) {
- if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 &&
- bkey_cmp(pos, path->l[0].b->key.k.p) <= 0)
+ if (bkey_ge(pos, path->l[0].b->data->min_key) &&
+ bkey_le(pos, path->l[0].b->key.k.p))
return;
} else {
- if (!bkey_cmp(pos, path->pos))
+ if (bkey_eq(pos, path->pos))
return;
}
}
unsigned clobber_u64s,
unsigned new_u64s)
{
- struct bset_tree *t = bch2_bkey_to_bset(b, where);
+ struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
struct btree_path *linked;
if (node_iter != &path->l[b->c.level].iter) {
bch2_btree_node_iter_peek(&l->iter, l->b));
path->pos = k.k ? k.k->p : l->b->key.k.p;
+ trans->paths_sorted = false;
bch2_btree_path_verify_level(trans, path, l - path->l);
return k;
}
bch2_btree_node_iter_prev(&l->iter, l->b));
path->pos = k.k ? k.k->p : l->b->data->min_key;
+ trans->paths_sorted = false;
bch2_btree_path_verify_level(trans, path, l - path->l);
return k;
}
BUG_ON(path->cached);
EBUG_ON(!btree_path_pos_in_node(path, b));
- EBUG_ON(b->c.lock.state.seq & 1);
- path->l[b->c.level].lock_seq = b->c.lock.state.seq;
+ path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
path->l[b->c.level].b = b;
__btree_path_level_init(path, b->c.level);
}
/* Btree path: fixups after btree node updates: */
+static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_insert_entry *i;
+
+ trans_for_each_update(trans, i)
+ if (!i->cached &&
+ i->level == b->c.level &&
+ i->btree_id == b->c.btree_id &&
+ bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
+ bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
+ i->old_v = bch2_btree_path_peek_slot(i->path, &i->old_k).v;
+
+ if (unlikely(trans->journal_replay_not_finished)) {
+ struct bkey_i *j_k =
+ bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
+ i->k->k.p);
+
+ if (j_k) {
+ i->old_k = j_k->k;
+ i->old_v = &j_k->v;
+ }
+ }
+ }
+}
+
/*
* A btree node is being replaced - update the iterator to point to the new
* node:
if (t != BTREE_NODE_UNLOCKED) {
btree_node_unlock(trans, path, b->c.level);
- six_lock_increment(&b->c.lock, t);
- mark_btree_node_locked(trans, path, b->c.level, t);
+ six_lock_increment(&b->c.lock, (enum six_lock_type) t);
+ mark_btree_node_locked(trans, path, b->c.level, (enum six_lock_type) t);
}
bch2_btree_path_level_init(trans, path, b);
}
+
+ bch2_trans_revalidate_updates_in_node(trans, b);
}
/*
trans_for_each_path_with_node(trans, b, path)
__btree_path_level_init(path, b->c.level);
+
+ bch2_trans_revalidate_updates_in_node(trans, b);
}
/* Btree path: traverse, set_pos: */
unsigned long trace_ip)
{
struct bch_fs *c = trans->c;
- struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
+ struct btree *b, **rootp = &bch2_btree_id_root(c, path->btree_id)->b;
enum six_lock_type lock_type;
unsigned i;
int ret;
break;
bch2_bkey_buf_unpack(&tmp, c, l->b, k);
- ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
+ ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
path->level - 1);
}
break;
bch2_bkey_buf_reassemble(&tmp, c, k);
- ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
+ ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
path->level - 1);
}
struct btree *b;
unsigned level = path->level - 1;
enum six_lock_type lock_type = __btree_lock_want(path, level);
- bool replay_done = test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
struct bkey_buf tmp;
int ret;
bch2_bkey_buf_init(&tmp);
- if (unlikely(!replay_done)) {
+ if (unlikely(trans->journal_replay_not_finished)) {
ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
if (ret)
goto err;
if (unlikely(ret))
goto err;
- if (likely(replay_done && tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
+ if (likely(!trans->journal_replay_not_finished &&
+ tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
unlikely(b != btree_node_mem_ptr(tmp.k)))
btree_node_mem_ptr_set(trans, path, level + 1, b);
return ret;
}
-static int btree_path_traverse_one(struct btree_trans *, struct btree_path *,
- unsigned, unsigned long);
static int bch2_btree_path_traverse_all(struct btree_trans *trans)
{
struct bch_fs *c = trans->c;
struct btree_path *path;
unsigned long trace_ip = _RET_IP_;
- int ret = 0;
+ int i, ret = 0;
if (trans->in_traverse_all)
return -BCH_ERR_transaction_restart_in_traverse_all;
trans->in_traverse_all = true;
retry_all:
trans->restarted = 0;
- trans->traverse_all_idx = U8_MAX;
+ trans->last_restarted_ip = 0;
trans_for_each_path(trans, path)
path->should_be_locked = false;
- btree_trans_verify_sorted(trans);
+ btree_trans_sort_paths(trans);
bch2_trans_unlock(trans);
cond_resched();
}
/* Now, redo traversals in correct order: */
- trans->traverse_all_idx = 0;
- while (trans->traverse_all_idx < trans->nr_sorted) {
- path = trans->paths + trans->sorted[trans->traverse_all_idx];
+ i = 0;
+ while (i < trans->nr_sorted) {
+ path = trans->paths + trans->sorted[i];
/*
* Traversing a path can cause another path to be added at about
* the same position:
*/
if (path->uptodate) {
- ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
+ __btree_path_get(path, false);
+ ret = bch2_btree_path_traverse_one(trans, path, 0, _THIS_IP_);
+ __btree_path_put(path, false);
+
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
- ret == -ENOMEM)
+ bch2_err_matches(ret, ENOMEM))
goto retry_all;
if (ret)
goto err;
- BUG_ON(path->uptodate);
} else {
- trans->traverse_all_idx++;
+ i++;
}
}
/*
- * BTREE_ITER_NEED_RELOCK is ok here - if we called bch2_trans_unlock()
- * and relock(), relock() won't relock since path->should_be_locked
- * isn't set yet, which is all fine
+ * We used to assert that all paths had been traversed here
+ * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
+ * path->Should_be_locked is not set yet, we we might have unlocked and
+ * then failed to relock a path - that's fine.
*/
- trans_for_each_path(trans, path)
- BUG_ON(path->uptodate >= BTREE_ITER_NEED_TRAVERSE);
err:
bch2_btree_cache_cannibalize_unlock(c);
* On error, caller (peek_node()/peek_key()) must return NULL; the error is
* stashed in the iterator and returned from bch2_trans_exit().
*/
-static int btree_path_traverse_one(struct btree_trans *trans,
- struct btree_path *path,
- unsigned flags,
- unsigned long trace_ip)
+int bch2_btree_path_traverse_one(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned flags,
+ unsigned long trace_ip)
{
unsigned depth_want = path->level;
int ret = -((int) trans->restarted);
path->uptodate = BTREE_ITER_UPTODATE;
out:
- BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
+ panic("ret %s (%i) trans->restarted %s (%i)\n",
+ bch2_err_str(ret), ret,
+ bch2_err_str(trans->restarted), trans->restarted);
bch2_btree_path_verify(trans, path);
return ret;
}
-int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
- struct btree_path *path, unsigned flags)
-{
- if (0 && IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
- unsigned restart_probability_bits = 4 << min(trans->restart_count, 32U);
- u64 mask = ~(~0ULL << restart_probability_bits);
-
- if ((prandom_u32() & mask) == mask) {
- trace_and_count(trans->c, trans_restart_injected, trans, _RET_IP_);
- return btree_trans_restart(trans, BCH_ERR_transaction_restart_fault_inject);
- }
- }
-
- if (path->uptodate < BTREE_ITER_NEED_RELOCK)
- return 0;
-
- return bch2_trans_cond_resched(trans) ?:
- btree_path_traverse_one(trans, path, flags, _RET_IP_);
-}
-
static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
struct btree_path *src)
{
__btree_path_put(path, intent);
path = btree_path_clone(trans, path, intent);
path->preserve = false;
-#ifdef CONFIG_BCACHEFS_DEBUG
- path->ip_allocated = ip;
-#endif
- btree_trans_verify_sorted(trans);
return path;
}
struct btree_path *path, struct bpos new_pos,
bool intent, unsigned long ip, int cmp)
{
- unsigned l = path->level;
+ unsigned level = path->level;
- EBUG_ON(trans->restarted);
+ bch2_trans_verify_not_in_restart(trans);
EBUG_ON(!path->ref);
path = bch2_btree_path_make_mut(trans, path, intent, ip);
- path->pos = new_pos;
-
- bch2_btree_path_check_sort_fast(trans, path, cmp);
+ path->pos = new_pos;
+ trans->paths_sorted = false;
if (unlikely(path->cached)) {
btree_node_unlock(trans, path, 0);
goto out;
}
- l = btree_path_up_until_good_node(trans, path, cmp);
+ level = btree_path_up_until_good_node(trans, path, cmp);
- if (btree_path_node(path, l)) {
- BUG_ON(!btree_node_locked(path, l));
+ if (btree_path_node(path, level)) {
+ struct btree_path_level *l = &path->l[level];
+
+ BUG_ON(!btree_node_locked(path, level));
/*
* We might have to skip over many keys, or just a few: try
* advancing the node iterator, and if we have to skip over too
* is expensive).
*/
if (cmp < 0 ||
- !btree_path_advance_to_pos(path, &path->l[l], 8))
- __btree_path_level_init(path, l);
+ !btree_path_advance_to_pos(path, l, 8))
+ bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
+
+ /*
+ * Iterators to interior nodes should always be pointed at the first non
+ * whiteout:
+ */
+ if (unlikely(level))
+ bch2_btree_node_iter_peek(&l->iter, l->b);
}
- if (unlikely(l != path->level)) {
+ if (unlikely(level != path->level)) {
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
__bch2_btree_path_unlock(trans, path);
}
__bch2_path_free(trans, path);
}
+void bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
+{
+ panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
+ trans->restart_count, restart_count,
+ (void *) trans->last_begin_ip);
+}
+
+void bch2_trans_in_restart_error(struct btree_trans *trans)
+{
+ panic("in transaction restart: %s, last restarted by %pS\n",
+ bch2_err_str(trans->restarted),
+ (void *) trans->last_restarted_ip);
+}
+
+noinline __cold
void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
{
struct btree_insert_entry *i;
+ struct btree_write_buffered_key *wb;
prt_printf(buf, "transaction updates for %s journal seq %llu",
trans->fn, trans->journal_res.seq);
prt_newline(buf);
}
+ trans_for_each_wb_update(trans, wb) {
+ prt_printf(buf, "update: btree=%s wb=1 %pS",
+ bch2_btree_ids[wb->btree],
+ (void *) i->ip_allocated);
+ prt_newline(buf);
+
+ prt_printf(buf, " new ");
+ bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(&wb->k));
+ prt_newline(buf);
+ }
+
printbuf_indent_sub(buf, 2);
}
printbuf_exit(&buf);
}
+noinline __cold
void bch2_btree_path_to_text(struct printbuf *out, struct btree_path *path)
{
prt_printf(out, "path: idx %2u ref %u:%u %c %c btree=%s l=%u pos ",
bch2_bpos_to_text(out, path->pos);
prt_printf(out, " locks %u", path->nodes_locked);
-#ifdef CONFIG_BCACHEFS_DEBUG
+#ifdef TRACK_PATH_ALLOCATED
prt_printf(out, " %pS", (void *) path->ip_allocated);
#endif
prt_newline(out);
}
-void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
+static noinline __cold
+void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
+ bool nosort)
{
struct btree_path *path;
unsigned idx;
+ if (!nosort)
+ btree_trans_sort_paths(trans);
+
trans_for_each_path_inorder(trans, path, idx)
bch2_btree_path_to_text(out, path);
}
noinline __cold
-void bch2_dump_trans_paths_updates(struct btree_trans *trans)
+void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
+{
+ __bch2_trans_paths_to_text(out, trans, false);
+}
+
+static noinline __cold
+void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
{
struct printbuf buf = PRINTBUF;
- bch2_trans_paths_to_text(&buf, trans);
+ __bch2_trans_paths_to_text(&buf, trans, nosort);
bch2_trans_updates_to_text(&buf, trans);
bch2_print_string_as_lines(KERN_ERR, buf.buf);
printbuf_exit(&buf);
}
-noinline
+noinline __cold
+void bch2_dump_trans_paths_updates(struct btree_trans *trans)
+{
+ __bch2_dump_trans_paths_updates(trans, false);
+}
+
+noinline __cold
static void bch2_trans_update_max_paths(struct btree_trans *trans)
{
struct btree_transaction_stats *s = btree_trans_stats(trans);
struct printbuf buf = PRINTBUF;
+ if (!s)
+ return;
+
bch2_trans_paths_to_text(&buf, trans);
if (!buf.allocation_failure) {
}
printbuf_exit(&buf);
+
+ trans->nr_max_paths = hweight64(trans->paths_allocated);
}
static noinline void btree_path_overflow(struct btree_trans *trans)
btree_path_overflow(trans);
idx = __ffs64(~trans->paths_allocated);
- trans->paths_allocated |= 1ULL << idx;
+ /*
+ * Do this before marking the new path as allocated, since it won't be
+ * initialized yet:
+ */
if (unlikely(idx > trans->nr_max_paths))
bch2_trans_update_max_paths(trans);
- path = &trans->paths[idx];
+ trans->paths_allocated |= 1ULL << idx;
+ path = &trans->paths[idx];
path->idx = idx;
path->ref = 0;
path->intent_ref = 0;
path->nodes_locked = 0;
btree_path_list_add(trans, pos, path);
+ trans->paths_sorted = false;
return path;
}
bool intent = flags & BTREE_ITER_INTENT;
int i;
- BUG_ON(trans->restarted);
- btree_trans_verify_sorted(trans);
+ bch2_trans_verify_not_in_restart(trans);
bch2_trans_verify_locks(trans);
+ btree_trans_sort_paths(trans);
+
trans_for_each_path_inorder(trans, path, i) {
if (__btree_path_cmp(path,
btree_id,
path->nodes_locked = 0;
for (i = 0; i < ARRAY_SIZE(path->l); i++)
path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
-#ifdef CONFIG_BCACHEFS_DEBUG
+#ifdef TRACK_PATH_ALLOCATED
path->ip_allocated = ip;
#endif
- btree_trans_verify_sorted(trans);
+ trans->paths_sorted = false;
}
if (!(flags & BTREE_ITER_NOPRESERVE))
_k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
- EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
+ EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
- if (!k.k || bpos_cmp(path->pos, k.k->p))
+ if (!k.k || !bpos_eq(path->pos, k.k->p))
goto hole;
} else {
struct bkey_cached *ck = (void *) path->l[0].b;
EBUG_ON(ck &&
(path->btree_id != ck->key.btree_id ||
- bkey_cmp(path->pos, ck->key.pos)));
- EBUG_ON(!ck || !ck->valid);
+ !bkey_eq(path->pos, ck->key.pos)));
+ if (!ck || !ck->valid)
+ return bkey_s_c_null;
*u = ck->k->k;
k = bkey_i_to_s_c(ck->k);
if (!b)
goto out;
- BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
+ BUG_ON(bpos_lt(b->key.k.p, iter->pos));
bkey_init(&iter->k);
iter->k.p = iter->pos = b->key.k.p;
goto out;
}
+struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
+{
+ struct btree *b;
+
+ while (b = bch2_btree_iter_peek_node(iter),
+ bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
+ bch2_trans_begin(iter->trans);
+
+ return b;
+}
+
struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
{
struct btree_trans *trans = iter->trans;
struct btree *b = NULL;
int ret;
- BUG_ON(trans->restarted);
+ bch2_trans_verify_not_in_restart(trans);
EBUG_ON(iter->path->cached);
bch2_btree_iter_verify(iter);
b = btree_path_node(path, path->level + 1);
- if (!bpos_cmp(iter->pos, b->key.k.p)) {
+ if (bpos_eq(iter->pos, b->key.k.p)) {
__btree_path_set_level_up(trans, path, path->level++);
} else {
/*
{
if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) {
struct bpos pos = iter->k.p;
- bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
- ? bpos_cmp(pos, SPOS_MAX)
- : bkey_cmp(pos, SPOS_MAX)) != 0;
+ bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
+ ? bpos_eq(pos, SPOS_MAX)
+ : bkey_eq(pos, SPOS_MAX));
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
pos = bkey_successor(iter, pos);
inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
{
struct bpos pos = bkey_start_pos(&iter->k);
- bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
- ? bpos_cmp(pos, POS_MIN)
- : bkey_cmp(pos, POS_MIN)) != 0;
+ bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
+ ? bpos_eq(pos, POS_MIN)
+ : bkey_eq(pos, POS_MIN));
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
pos = bkey_predecessor(iter, pos);
return ret;
}
-static inline struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
- enum btree_id btree_id,
- struct bpos pos)
+static noinline
+struct bkey_i *__bch2_btree_trans_peek_updates(struct btree_iter *iter)
{
struct btree_insert_entry *i;
struct bkey_i *ret = NULL;
- trans_for_each_update(trans, i) {
- if (i->btree_id < btree_id)
+ trans_for_each_update(iter->trans, i) {
+ if (i->btree_id < iter->btree_id)
continue;
- if (i->btree_id > btree_id)
+ if (i->btree_id > iter->btree_id)
break;
- if (bpos_cmp(i->k->k.p, pos) < 0)
+ if (bpos_lt(i->k->k.p, iter->path->pos))
continue;
if (i->key_cache_already_flushed)
continue;
- if (!ret || bpos_cmp(i->k->k.p, ret->k.p) < 0)
+ if (!ret || bpos_lt(i->k->k.p, ret->k.p))
ret = i->k;
}
return ret;
}
-struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bpos start_pos,
- struct bpos end_pos)
+static inline struct bkey_i *btree_trans_peek_updates(struct btree_iter *iter)
+{
+ return iter->flags & BTREE_ITER_WITH_UPDATES
+ ? __bch2_btree_trans_peek_updates(iter)
+ : NULL;
+}
+
+static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bpos end_pos)
{
struct bkey_i *k;
- if (bpos_cmp(start_pos, iter->journal_pos) < 0)
+ if (bpos_lt(iter->path->pos, iter->journal_pos))
iter->journal_idx = 0;
k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
iter->path->level,
- start_pos, end_pos,
+ iter->path->pos,
+ end_pos,
&iter->journal_idx);
iter->journal_pos = k ? k->k.p : end_pos;
return k;
}
-struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bpos pos)
+static noinline
+struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
+ struct btree_iter *iter)
{
- return bch2_btree_journal_peek(trans, iter, pos, pos);
+ struct bkey_i *k = bch2_btree_journal_peek(trans, iter, iter->path->pos);
+
+ if (k) {
+ iter->k = k->k;
+ return bkey_i_to_s_c(k);
+ } else {
+ return bkey_s_c_null;
+ }
}
static noinline
struct bkey_s_c k)
{
struct bkey_i *next_journal =
- bch2_btree_journal_peek(trans, iter, iter->path->pos,
+ bch2_btree_journal_peek(trans, iter,
k.k ? k.k->p : path_l(iter->path)->b->key.k.p);
if (next_journal) {
* bkey_s_c_null:
*/
static noinline
-struct bkey_s_c __btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
+struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
{
struct btree_trans *trans = iter->trans;
struct bch_fs *c = trans->c;
struct bkey u;
+ struct bkey_s_c k;
int ret;
+ if ((iter->flags & BTREE_ITER_KEY_CACHE_FILL) &&
+ bpos_eq(iter->pos, pos))
+ return bkey_s_c_null;
+
if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
return bkey_s_c_null;
if (!iter->key_cache_path)
iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
iter->flags & BTREE_ITER_INTENT, 0,
- iter->flags|BTREE_ITER_CACHED,
+ iter->flags|BTREE_ITER_CACHED|
+ BTREE_ITER_CACHED_NOFILL,
_THIS_IP_);
iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
- ret = bch2_btree_path_traverse(trans, iter->key_cache_path, iter->flags|BTREE_ITER_CACHED);
+ ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
+ iter->flags|BTREE_ITER_CACHED) ?:
+ bch2_btree_path_relock(trans, iter->path, _THIS_IP_);
if (unlikely(ret))
return bkey_s_c_err(ret);
btree_path_set_should_be_locked(iter->key_cache_path);
- return bch2_btree_path_peek_slot(iter->key_cache_path, &u);
-}
-
-static noinline
-struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
-{
- struct bkey_s_c ret = __btree_trans_peek_key_cache(iter, pos);
- int err = bkey_err(ret) ?: bch2_btree_path_relock(iter->trans, iter->path, _THIS_IP_);
-
- return err ? bkey_s_c_err(err) : ret;
+ k = bch2_btree_path_peek_slot(iter->key_cache_path, &u);
+ if (k.k && !bkey_err(k)) {
+ iter->k = u;
+ k.k = &iter->k;
+ }
+ return k;
}
static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
k = btree_trans_peek_journal(trans, iter, k);
- next_update = iter->flags & BTREE_ITER_WITH_UPDATES
- ? btree_trans_peek_updates(trans, iter->btree_id, search_key)
- : NULL;
+ next_update = btree_trans_peek_updates(iter);
+
if (next_update &&
- bpos_cmp(next_update->k.p,
- k.k ? k.k->p : l->b->key.k.p) <= 0) {
+ bpos_le(next_update->k.p,
+ k.k ? k.k->p : l->b->key.k.p)) {
iter->k = next_update->k;
k = bkey_i_to_s_c(next_update);
}
* whiteout, with a real key at the same position, since
* in the btree deleted keys sort before non deleted.
*/
- search_key = bpos_cmp(search_key, k.k->p)
+ search_key = !bpos_eq(search_key, k.k->p)
? k.k->p
: bpos_successor(k.k->p);
continue;
if (likely(k.k)) {
break;
- } else if (likely(bpos_cmp(l->b->key.k.p, SPOS_MAX))) {
+ } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
/* Advance to next leaf node: */
search_key = bpos_successor(l->b->key.k.p);
} else {
int ret;
EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
+ EBUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && bkey_eq(end, POS_MAX));
if (iter->update_path) {
bch2_path_put_nokeep(trans, iter->update_path,
while (1) {
k = __bch2_btree_iter_peek(iter, search_key);
- if (!k.k || bkey_err(k))
+ if (unlikely(!k.k))
+ goto end;
+ if (unlikely(bkey_err(k)))
goto out_no_locked;
/*
*/
if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
iter_pos = k.k->p;
- else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
- iter_pos = bkey_start_pos(k.k);
else
- iter_pos = iter->pos;
+ iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
- if (bkey_cmp(iter_pos, end) > 0) {
- bch2_btree_iter_set_pos(iter, end);
- k = bkey_s_c_null;
- goto out_no_locked;
- }
+ if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
+ ? bkey_gt(iter_pos, end)
+ : bkey_ge(iter_pos, end)))
+ goto end;
if (iter->update_path &&
- bkey_cmp(iter->update_path->pos, k.k->p)) {
+ !bkey_eq(iter->update_path->pos, k.k->p)) {
bch2_path_put_nokeep(trans, iter->update_path,
iter->flags & BTREE_ITER_INTENT);
iter->update_path = NULL;
iter->update_path, pos,
iter->flags & BTREE_ITER_INTENT,
_THIS_IP_);
+ ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
+ if (unlikely(ret)) {
+ k = bkey_s_c_err(ret);
+ goto out_no_locked;
+ }
}
/*
btree_path_set_should_be_locked(iter->path);
out_no_locked:
if (iter->update_path) {
- if (iter->update_path->uptodate &&
- (ret = bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_)))
+ ret = bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_);
+ if (unlikely(ret))
k = bkey_s_c_err(ret);
else
btree_path_set_should_be_locked(iter->update_path);
bch2_btree_iter_verify_entry_exit(iter);
return k;
+end:
+ bch2_btree_iter_set_pos(iter, end);
+ k = bkey_s_c_null;
+ goto out_no_locked;
}
/**
/* Check if we should go up to the parent node: */
if (!k.k ||
(iter->advanced &&
- !bpos_cmp(path_l(iter->path)->b->key.k.p, iter->pos))) {
+ bpos_eq(path_l(iter->path)->b->key.k.p, iter->pos))) {
iter->pos = path_l(iter->path)->b->key.k.p;
btree_path_set_level_up(trans, iter->path);
iter->advanced = false;
if (iter->path->level != iter->min_depth &&
(iter->advanced ||
!k.k ||
- bpos_cmp(iter->pos, k.k->p))) {
+ !bpos_eq(iter->pos, k.k->p))) {
btree_path_set_level_down(trans, iter->path, iter->min_depth);
iter->pos = bpos_successor(iter->pos);
iter->advanced = false;
if (iter->path->level == iter->min_depth &&
iter->advanced &&
k.k &&
- !bpos_cmp(iter->pos, k.k->p)) {
+ bpos_eq(iter->pos, k.k->p)) {
iter->pos = bpos_successor(iter->pos);
iter->advanced = false;
continue;
if (iter->advanced &&
iter->path->level == iter->min_depth &&
- bpos_cmp(k.k->p, iter->pos))
+ !bpos_eq(k.k->p, iter->pos))
iter->advanced = false;
BUG_ON(iter->advanced);
&iter->path->l[0], &iter->k);
if (!k.k ||
((iter->flags & BTREE_ITER_IS_EXTENTS)
- ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0
- : bpos_cmp(k.k->p, search_key) > 0))
+ ? bpos_ge(bkey_start_pos(k.k), search_key)
+ : bpos_gt(k.k->p, search_key)))
k = btree_path_level_prev(trans, iter->path,
&iter->path->l[0], &iter->k);
- bch2_btree_path_check_sort(trans, iter->path, 0);
-
if (likely(k.k)) {
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
if (k.k->p.snapshot == iter->snapshot)
* longer at the same _key_ (not pos), return
* that candidate
*/
- if (saved_path && bkey_cmp(k.k->p, saved_k.p)) {
+ if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
bch2_path_put_nokeep(trans, iter->path,
iter->flags & BTREE_ITER_INTENT);
iter->path = saved_path;
}
break;
- } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) {
+ } else if (likely(!bpos_eq(iter->path->l[0].b->data->min_key, POS_MIN))) {
/* Advance to previous leaf node: */
search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
} else {
}
}
- EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
+ EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos));
/* Extents can straddle iter->pos: */
- if (bkey_cmp(k.k->p, iter->pos) < 0)
+ if (bkey_lt(k.k->p, iter->pos))
iter->pos = k.k->p;
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
!(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
struct bkey_i *next_update;
- if ((iter->flags & BTREE_ITER_WITH_UPDATES) &&
- (next_update = btree_trans_peek_updates(trans,
- iter->btree_id, search_key)) &&
- !bpos_cmp(next_update->k.p, iter->pos)) {
+ if ((next_update = btree_trans_peek_updates(iter)) &&
+ bpos_eq(next_update->k.p, iter->pos)) {
iter->k = next_update->k;
k = bkey_i_to_s_c(next_update);
goto out;
}
if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
- (next_update = bch2_btree_journal_peek_slot(trans,
- iter, iter->pos))) {
- iter->k = next_update->k;
- k = bkey_i_to_s_c(next_update);
+ (k = btree_trans_peek_slot_journal(trans, iter)).k)
goto out;
- }
if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
- (k = __btree_trans_peek_key_cache(iter, iter->pos)).k) {
+ (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
if (!bkey_err(k))
iter->k = *k.k;
/* We're not returning a key from iter->path: */
goto out_no_locked;
} else {
struct bpos next;
+ struct bpos end = iter->pos;
+
+ if (iter->flags & BTREE_ITER_IS_EXTENTS)
+ end.offset = U64_MAX;
EBUG_ON(iter->path->level);
if (iter->flags & BTREE_ITER_INTENT) {
struct btree_iter iter2;
- struct bpos end = iter->pos;
-
- if (iter->flags & BTREE_ITER_IS_EXTENTS)
- end.offset = U64_MAX;
bch2_trans_copy_iter(&iter2, iter);
k = bch2_btree_iter_peek_upto(&iter2, end);
} else {
struct bpos pos = iter->pos;
- k = bch2_btree_iter_peek(iter);
+ k = bch2_btree_iter_peek_upto(iter, end);
if (unlikely(bkey_err(k)))
bch2_btree_iter_set_pos(iter, pos);
else
next = k.k ? bkey_start_pos(k.k) : POS_MAX;
- if (bkey_cmp(iter->pos, next) < 0) {
+ if (bkey_lt(iter->pos, next)) {
bkey_init(&iter->k);
iter->k.p = iter->pos;
return bch2_btree_iter_peek_slot(iter);
}
-/* new transactional stuff: */
-
-static inline void btree_path_verify_sorted_ref(struct btree_trans *trans,
- struct btree_path *path)
+struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
{
- EBUG_ON(path->sorted_idx >= trans->nr_sorted);
- EBUG_ON(trans->sorted[path->sorted_idx] != path->idx);
- EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
+ struct bkey_s_c k;
+
+ while (btree_trans_too_many_iters(iter->trans) ||
+ (k = bch2_btree_iter_peek_type(iter, iter->flags),
+ bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
+ bch2_trans_begin(iter->trans);
+
+ return k;
}
-static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans)
-{
+/* new transactional stuff: */
+
#ifdef CONFIG_BCACHEFS_DEBUG
+static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
+{
+ struct btree_path *path;
unsigned i;
- for (i = 0; i < trans->nr_sorted; i++)
- btree_path_verify_sorted_ref(trans, trans->paths + trans->sorted[i]);
-#endif
+ BUG_ON(trans->nr_sorted != hweight64(trans->paths_allocated));
+
+ trans_for_each_path(trans, path) {
+ BUG_ON(path->sorted_idx >= trans->nr_sorted);
+ BUG_ON(trans->sorted[path->sorted_idx] != path->idx);
+ }
+
+ for (i = 0; i < trans->nr_sorted; i++) {
+ unsigned idx = trans->sorted[i];
+
+ EBUG_ON(!(trans->paths_allocated & (1ULL << idx)));
+ BUG_ON(trans->paths[idx].sorted_idx != i);
+ }
}
static void btree_trans_verify_sorted(struct btree_trans *trans)
{
-#ifdef CONFIG_BCACHEFS_DEBUG
struct btree_path *path, *prev = NULL;
unsigned i;
trans_for_each_path_inorder(trans, path, i) {
if (prev && btree_path_cmp(prev, path) > 0) {
- bch2_dump_trans_paths_updates(trans);
+ __bch2_dump_trans_paths_updates(trans, true);
panic("trans paths out of order!\n");
}
prev = path;
}
-#endif
-}
-
-static inline void btree_path_swap(struct btree_trans *trans,
- struct btree_path *l, struct btree_path *r)
-{
- swap(l->sorted_idx, r->sorted_idx);
- swap(trans->sorted[l->sorted_idx],
- trans->sorted[r->sorted_idx]);
-
- btree_path_verify_sorted_ref(trans, l);
- btree_path_verify_sorted_ref(trans, r);
-}
-
-static inline struct btree_path *sib_btree_path(struct btree_trans *trans,
- struct btree_path *path, int sib)
-{
- unsigned idx = (unsigned) path->sorted_idx + sib;
-
- EBUG_ON(sib != -1 && sib != 1);
-
- return idx < trans->nr_sorted
- ? trans->paths + trans->sorted[idx]
- : NULL;
}
+#else
+static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
+static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
+#endif
-static __always_inline void bch2_btree_path_check_sort_fast(struct btree_trans *trans,
- struct btree_path *path,
- int cmp)
+void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
{
- struct btree_path *n;
- int cmp2;
-
- EBUG_ON(!cmp);
-
- while ((n = sib_btree_path(trans, path, cmp)) &&
- (cmp2 = btree_path_cmp(n, path)) &&
- cmp2 != cmp)
- btree_path_swap(trans, n, path);
-
- btree_trans_verify_sorted(trans);
-}
+ int i, l = 0, r = trans->nr_sorted, inc = 1;
+ bool swapped;
-inline void bch2_btree_path_check_sort(struct btree_trans *trans, struct btree_path *path,
- int cmp)
-{
- struct btree_path *n;
+ btree_trans_verify_sorted_refs(trans);
- if (cmp <= 0) {
- n = prev_btree_path(trans, path);
- if (n && btree_path_cmp(n, path) > 0) {
- do {
- btree_path_swap(trans, n, path);
- n = prev_btree_path(trans, path);
- } while (n && btree_path_cmp(n, path) > 0);
+ if (trans->paths_sorted)
+ goto out;
- goto out;
+ /*
+ * Cocktail shaker sort: this is efficient because iterators will be
+ * mostly sorted.
+ */
+ do {
+ swapped = false;
+
+ for (i = inc > 0 ? l : r - 2;
+ i + 1 < r && i >= l;
+ i += inc) {
+ if (btree_path_cmp(trans->paths + trans->sorted[i],
+ trans->paths + trans->sorted[i + 1]) > 0) {
+ swap(trans->sorted[i], trans->sorted[i + 1]);
+ trans->paths[trans->sorted[i]].sorted_idx = i;
+ trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
+ swapped = true;
+ }
}
- }
- if (cmp >= 0) {
- n = next_btree_path(trans, path);
- if (n && btree_path_cmp(path, n) > 0) {
- do {
- btree_path_swap(trans, path, n);
- n = next_btree_path(trans, path);
- } while (n && btree_path_cmp(path, n) > 0);
- }
- }
+ if (inc > 0)
+ --r;
+ else
+ l++;
+ inc = -inc;
+ } while (swapped);
+
+ trans->paths_sorted = true;
out:
btree_trans_verify_sorted(trans);
}
unsigned i;
EBUG_ON(path->sorted_idx >= trans->nr_sorted);
-
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ trans->nr_sorted--;
+ memmove_u64s_down_small(trans->sorted + path->sorted_idx,
+ trans->sorted + path->sorted_idx + 1,
+ DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 8));
+#else
array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
-
+#endif
for (i = path->sorted_idx; i < trans->nr_sorted; i++)
trans->paths[trans->sorted[i]].sorted_idx = i;
path->sorted_idx = U8_MAX;
-
- btree_trans_verify_sorted_refs(trans);
}
static inline void btree_path_list_add(struct btree_trans *trans,
{
unsigned i;
- btree_trans_verify_sorted_refs(trans);
-
- path->sorted_idx = pos ? pos->sorted_idx + 1 : 0;
-
- if (trans->in_traverse_all &&
- trans->traverse_all_idx != U8_MAX &&
- trans->traverse_all_idx >= path->sorted_idx)
- trans->traverse_all_idx++;
+ path->sorted_idx = pos ? pos->sorted_idx + 1 : trans->nr_sorted;
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
+ trans->sorted + path->sorted_idx,
+ DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 8));
+ trans->nr_sorted++;
+ trans->sorted[path->sorted_idx] = path->idx;
+#else
array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
+#endif
for (i = path->sorted_idx; i < trans->nr_sorted; i++)
trans->paths[trans->sorted[i]].sorted_idx = i;
void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
{
- if (iter->path)
- bch2_path_put(trans, iter->path,
- iter->flags & BTREE_ITER_INTENT);
if (iter->update_path)
bch2_path_put_nokeep(trans, iter->update_path,
iter->flags & BTREE_ITER_INTENT);
+ if (iter->path)
+ bch2_path_put(trans, iter->path,
+ iter->flags & BTREE_ITER_INTENT);
if (iter->key_cache_path)
bch2_path_put(trans, iter->key_cache_path,
iter->flags & BTREE_ITER_INTENT);
iter->key_cache_path = NULL;
}
-static inline void __bch2_trans_iter_init(struct btree_trans *trans,
- struct btree_iter *iter,
- unsigned btree_id, struct bpos pos,
- unsigned locks_want,
- unsigned depth,
- unsigned flags,
- unsigned long ip)
-{
- if (unlikely(trans->restarted))
- panic("bch2_trans_iter_init(): in transaction restart, %s by %pS\n",
- bch2_err_str(trans->restarted),
- (void *) trans->last_restarted_ip);
-
- if (flags & BTREE_ITER_ALL_LEVELS)
- flags |= BTREE_ITER_ALL_SNAPSHOTS|__BTREE_ITER_ALL_SNAPSHOTS;
-
- if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
- btree_node_type_is_extents(btree_id))
- flags |= BTREE_ITER_IS_EXTENTS;
-
- if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
- !btree_type_has_snapshots(btree_id))
- flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
-
- if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
- btree_type_has_snapshots(btree_id))
- flags |= BTREE_ITER_FILTER_SNAPSHOTS;
-
- if (trans->journal_replay_not_finished)
- flags |= BTREE_ITER_WITH_JOURNAL;
-
- iter->trans = trans;
- iter->path = NULL;
- iter->update_path = NULL;
- iter->key_cache_path = NULL;
- iter->btree_id = btree_id;
- iter->min_depth = depth;
- iter->flags = flags;
- iter->snapshot = pos.snapshot;
- iter->pos = pos;
- iter->k.type = KEY_TYPE_deleted;
- iter->k.p = pos;
- iter->k.size = 0;
- iter->journal_idx = 0;
- iter->journal_pos = POS_MIN;
-#ifdef CONFIG_BCACHEFS_DEBUG
- iter->ip_allocated = ip;
-#endif
-
- iter->path = bch2_path_get(trans, btree_id, iter->pos,
- locks_want, depth, flags, ip);
-}
-
-void bch2_trans_iter_init(struct btree_trans *trans,
+void bch2_trans_iter_init_outlined(struct btree_trans *trans,
struct btree_iter *iter,
unsigned btree_id, struct bpos pos,
unsigned flags)
{
- if (!btree_id_cached(trans->c, btree_id)) {
- flags &= ~BTREE_ITER_CACHED;
- flags &= ~BTREE_ITER_WITH_KEY_CACHE;
- } else if (!(flags & BTREE_ITER_CACHED))
- flags |= BTREE_ITER_WITH_KEY_CACHE;
-
- __bch2_trans_iter_init(trans, iter, btree_id, pos,
- 0, 0, flags, _RET_IP_);
+ bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
+ bch2_btree_iter_flags(trans, btree_id, flags),
+ _RET_IP_);
}
void bch2_trans_node_iter_init(struct btree_trans *trans,
unsigned depth,
unsigned flags)
{
- __bch2_trans_iter_init(trans, iter, btree_id, pos, locks_want, depth,
- BTREE_ITER_NOT_EXTENTS|
- __BTREE_ITER_ALL_SNAPSHOTS|
- BTREE_ITER_ALL_SNAPSHOTS|
- flags, _RET_IP_);
+ flags |= BTREE_ITER_NOT_EXTENTS;
+ flags |= __BTREE_ITER_ALL_SNAPSHOTS;
+ flags |= BTREE_ITER_ALL_SNAPSHOTS;
+
+ bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
+ __bch2_btree_iter_flags(trans, btree_id, flags),
+ _RET_IP_);
+
+ iter->min_depth = depth;
+
BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
BUG_ON(iter->path->level != depth);
BUG_ON(iter->min_depth != depth);
unsigned new_top = trans->mem_top + size;
size_t old_bytes = trans->mem_bytes;
size_t new_bytes = roundup_pow_of_two(new_top);
+ int ret;
void *new_mem;
void *p;
WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
- new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
- if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
- new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
- new_bytes = BTREE_TRANS_MEM_MAX;
- kfree(trans->mem);
- }
+ new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
+ if (unlikely(!new_mem)) {
+ bch2_trans_unlock(trans);
+
+ new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL);
+ if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
+ new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
+ new_bytes = BTREE_TRANS_MEM_MAX;
+ kfree(trans->mem);
+ }
- if (!new_mem)
- return ERR_PTR(-ENOMEM);
+ if (!new_mem)
+ return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
+
+ trans->mem = new_mem;
+ trans->mem_bytes = new_bytes;
+
+ ret = bch2_trans_relock(trans);
+ if (ret)
+ return ERR_PTR(ret);
+ }
trans->mem = new_mem;
trans->mem_bytes = new_bytes;
return p;
}
+static noinline void bch2_trans_reset_srcu_lock(struct btree_trans *trans)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_path *path;
+
+ trans_for_each_path(trans, path)
+ if (path->cached && !btree_node_locked(path, 0))
+ path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
+
+ srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
+ trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
+ trans->srcu_lock_time = jiffies;
+}
+
/**
* bch2_trans_begin() - reset a transaction after a interrupted attempt
* @trans: transaction to reset
u32 bch2_trans_begin(struct btree_trans *trans)
{
struct btree_path *path;
+ u64 now;
bch2_trans_reset_updates(trans);
trans->restart_count++;
trans->mem_top = 0;
- if (trans->fs_usage_deltas) {
- trans->fs_usage_deltas->used = 0;
- memset((void *) trans->fs_usage_deltas +
- offsetof(struct replicas_delta_list, memset_start), 0,
- (void *) &trans->fs_usage_deltas->memset_end -
- (void *) &trans->fs_usage_deltas->memset_start);
- }
-
trans_for_each_path(trans, path) {
path->should_be_locked = false;
path->preserve = false;
}
+ now = local_clock();
if (!trans->restarted &&
(need_resched() ||
- local_clock() - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) {
- bch2_trans_unlock(trans);
- cond_resched();
- bch2_trans_relock(trans);
+ now - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) {
+ drop_locks_do(trans, (cond_resched(), 0));
+ now = local_clock();
}
+ trans->last_begin_time = now;
+
+ if (unlikely(time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
+ bch2_trans_reset_srcu_lock(trans);
- trans->last_restarted_ip = _RET_IP_;
- if (trans->restarted)
+ trans->last_begin_ip = _RET_IP_;
+ if (trans->restarted) {
bch2_btree_path_traverse_all(trans);
+ trans->notrace_relock_fail = false;
+ }
- trans->last_begin_time = local_clock();
return trans->restart_count;
}
-void bch2_trans_verify_not_restarted(struct btree_trans *trans, u32 restart_count)
-{
- if (trans_was_restarted(trans, restart_count))
- panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
- trans->restart_count, restart_count,
- (void *) trans->last_restarted_ip);
-}
-
static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
{
size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX;
#endif
if (!p)
p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
+ /*
+ * paths need to be zeroed, bch2_check_for_deadlock looks at paths in
+ * other threads
+ */
trans->paths = p; p += paths_bytes;
trans->updates = p; p += updates_bytes;
__acquires(&c->btree_trans_barrier)
{
struct btree_transaction_stats *s;
- struct btree_trans *pos;
- BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
+ bch2_assert_btree_nodes_not_locked();
memset(trans, 0, sizeof(*trans));
trans->c = c;
trans->mem_bytes = expected_mem_bytes;
}
}
- if (s)
+
+ if (s) {
trans->nr_max_paths = s->nr_max_paths;
+ trans->wb_updates_size = s->wb_updates_size;
+ }
trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
+ trans->srcu_lock_time = jiffies;
+
+ if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
+ struct btree_trans *pos;
- mutex_lock(&c->btree_trans_lock);
- list_for_each_entry(pos, &c->btree_trans_list, list) {
- if (trans->locking_wait.task->pid < pos->locking_wait.task->pid) {
- list_add_tail(&trans->list, &pos->list);
- goto list_add_done;
+ seqmutex_lock(&c->btree_trans_lock);
+ list_for_each_entry(pos, &c->btree_trans_list, list) {
+ /*
+ * We'd much prefer to be stricter here and completely
+ * disallow multiple btree_trans in the same thread -
+ * but the data move path calls bch2_write when we
+ * already have a btree_trans initialized.
+ */
+ BUG_ON(trans->locking_wait.task->pid == pos->locking_wait.task->pid &&
+ bch2_trans_locked(pos));
+
+ if (trans->locking_wait.task->pid < pos->locking_wait.task->pid) {
+ list_add_tail(&trans->list, &pos->list);
+ goto list_add_done;
+ }
}
- }
- list_add_tail(&trans->list, &c->btree_trans_list);
+ list_add_tail(&trans->list, &c->btree_trans_list);
list_add_done:
- mutex_unlock(&c->btree_trans_lock);
+ seqmutex_unlock(&c->btree_trans_lock);
+ }
}
static void check_btree_paths_leaked(struct btree_trans *trans)
bch2_trans_unlock(trans);
+ if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
+ seqmutex_lock(&c->btree_trans_lock);
+ list_del(&trans->list);
+ seqmutex_unlock(&c->btree_trans_lock);
+ }
+
closure_sync(&trans->ref);
if (s)
check_btree_paths_leaked(trans);
- mutex_lock(&c->btree_trans_lock);
- list_del(&trans->list);
- mutex_unlock(&c->btree_trans_lock);
-
srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
bch2_journal_preres_put(&c->journal, &trans->journal_preres);
struct btree_path *path;
struct btree_bkey_cached_common *b;
static char lock_types[] = { 'r', 'i', 'w' };
- unsigned l;
+ unsigned l, idx;
if (!out->nr_tabstops) {
printbuf_tabstop_push(out, 16);
prt_printf(out, "%i %s\n", trans->locking_wait.task->pid, trans->fn);
- trans_for_each_path(trans, path) {
+ trans_for_each_path_safe(trans, path, idx) {
if (!path->nodes_locked)
continue;
b = READ_ONCE(trans->locking);
if (b) {
- prt_str(out, " want");
+ prt_printf(out, " blocked for %lluus on",
+ div_u64(local_clock() - trans->locking_wait.start_time,
+ 1000));
prt_newline(out);
prt_printf(out, " %c", lock_types[trans->locking_wait.lock_want]);
bch2_btree_bkey_cached_common_to_text(out, b);
for (s = c->btree_transaction_stats;
s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
- s++)
+ s++) {
kfree(s->max_paths_text);
+ bch2_time_stats_exit(&s->lock_hold_times);
+ }
if (c->btree_trans_barrier_initialized)
cleanup_srcu_struct(&c->btree_trans_barrier);
int bch2_fs_btree_iter_init(struct bch_fs *c)
{
- unsigned i, nr = BTREE_ITER_MAX;
+ struct btree_transaction_stats *s;
+ unsigned nr = BTREE_ITER_MAX;
int ret;
- for (i = 0; i < ARRAY_SIZE(c->btree_transaction_stats); i++)
- mutex_init(&c->btree_transaction_stats[i].lock);
+ for (s = c->btree_transaction_stats;
+ s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
+ s++) {
+ bch2_time_stats_init(&s->lock_hold_times);
+ mutex_init(&s->lock);
+ }
INIT_LIST_HEAD(&c->btree_trans_list);
- mutex_init(&c->btree_trans_lock);
+ seqmutex_init(&c->btree_trans_lock);
ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
sizeof(struct btree_path) * nr +