#include <trace/events/bcachefs.h>
static void btree_trans_verify_sorted(struct btree_trans *);
-static void btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
+inline void bch2_btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
struct bpos r_pos,
unsigned r_level)
{
+ /*
+ * Must match lock ordering as defined by __bch2_btree_node_lock:
+ */
return cmp_int(l->btree_id, r_btree_id) ?:
cmp_int((int) l->cached, (int) r_cached) ?:
bpos_cmp(l->pos, r_pos) ?:
else
this_cpu_sub(*b->c.lock.readers, readers);
- btree_node_lock_type(trans->c, b, SIX_LOCK_write);
+ six_lock_write(&b->c.lock, NULL, NULL);
if (!b->c.lock.readers)
atomic64_add(__SIX_VAL(read_lock, readers),
if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
(btree_node_lock_seq_matches(path, b, level) &&
btree_node_lock_increment(trans, b, level, want))) {
- mark_btree_node_locked(path, level, want);
+ mark_btree_node_locked(trans, path, level, want);
return true;
}
fail:
return false;
success:
- mark_btree_node_intent_locked(path, level);
+ mark_btree_node_intent_locked(trans, path, level);
return true;
}
six_lock_should_sleep_fn should_sleep_fn, void *p,
unsigned long ip)
{
- struct btree_path *linked, *deadlock_path = NULL;
- u64 start_time = local_clock();
- unsigned reason = 9;
- bool ret;
+ struct btree_path *linked;
+ unsigned reason;
/* Check if it's safe to block: */
trans_for_each_path(trans, linked) {
*/
if (type == SIX_LOCK_intent &&
linked->nodes_locked != linked->nodes_intent_locked) {
- deadlock_path = linked;
reason = 1;
+ goto deadlock;
}
if (linked->btree_id != path->btree_id) {
- if (linked->btree_id > path->btree_id) {
- deadlock_path = linked;
- reason = 3;
- }
- continue;
+ if (linked->btree_id < path->btree_id)
+ continue;
+
+ reason = 3;
+ goto deadlock;
}
/*
- * Within the same btree, cached paths come before non
- * cached paths:
+ * Within the same btree, non-cached paths come before cached
+ * paths:
*/
if (linked->cached != path->cached) {
- if (path->cached) {
- deadlock_path = linked;
- reason = 4;
- }
- continue;
+ if (!linked->cached)
+ continue;
+
+ reason = 4;
+ goto deadlock;
}
/*
* we're about to lock, it must have the ancestors locked too:
*/
if (level > __fls(linked->nodes_locked)) {
- deadlock_path = linked;
reason = 5;
+ goto deadlock;
}
/* Must lock btree nodes in key order: */
if (btree_node_locked(linked, level) &&
bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
linked->cached)) <= 0) {
- deadlock_path = linked;
- reason = 7;
BUG_ON(trans->in_traverse_all);
+ reason = 7;
+ goto deadlock;
}
}
- if (unlikely(deadlock_path)) {
- trace_trans_restart_would_deadlock(trans->fn, ip,
- trans->in_traverse_all, reason,
- deadlock_path->btree_id,
- deadlock_path->cached,
- &deadlock_path->pos,
- path->btree_id,
- path->cached,
- &pos);
- btree_trans_restart(trans);
- return false;
- }
-
- if (six_trylock_type(&b->c.lock, type))
- return true;
-
- trans->locking_path_idx = path->idx;
- trans->locking_pos = pos;
- trans->locking_btree_id = path->btree_id;
- trans->locking_level = level;
- trans->locking = b;
-
- ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0;
-
- trans->locking = NULL;
-
- if (ret)
- bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
- start_time);
- return ret;
+ return btree_node_lock_type(trans, path, b, pos, level,
+ type, should_sleep_fn, p);
+deadlock:
+ trace_trans_restart_would_deadlock(trans->fn, ip,
+ trans->in_traverse_all, reason,
+ linked->btree_id,
+ linked->cached,
+ &linked->pos,
+ path->btree_id,
+ path->cached,
+ &pos);
+ btree_trans_restart(trans);
+ return false;
}
/* Btree iterator locking: */
* before interior nodes - now that's handled by
* bch2_btree_path_traverse_all().
*/
- trans_for_each_path(trans, linked)
- if (linked != path &&
- linked->cached == path->cached &&
- linked->btree_id == path->btree_id &&
- linked->locks_want < new_locks_want) {
- linked->locks_want = new_locks_want;
- btree_path_get_locks(trans, linked, true);
- }
+ if (!path->cached && !trans->in_traverse_all)
+ trans_for_each_path(trans, linked)
+ if (linked != path &&
+ linked->cached == path->cached &&
+ linked->btree_id == path->btree_id &&
+ linked->locks_want < new_locks_want) {
+ linked->locks_want = new_locks_want;
+ btree_path_get_locks(trans, linked, true);
+ }
return false;
}
trans_for_each_path(trans, path)
__bch2_btree_path_unlock(path);
- BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
+ /*
+ * bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
+ * btree nodes, it implements its own walking:
+ */
+ BUG_ON(!trans->is_initial_gc &&
+ lock_class_is_held(&bch2_btree_node_lock_key));
}
/* Btree iterator: */
struct btree_node_iter tmp;
bool locked;
struct bkey_packed *p, *k;
- char buf1[100], buf2[100], buf3[100];
+ struct printbuf buf1 = PRINTBUF;
+ struct printbuf buf2 = PRINTBUF;
+ struct printbuf buf3 = PRINTBUF;
const char *msg;
if (!bch2_debug_check_iterators)
btree_node_unlock(path, level);
return;
err:
- strcpy(buf2, "(none)");
- strcpy(buf3, "(none)");
-
- bch2_bpos_to_text(&PBUF(buf1), path->pos);
+ bch2_bpos_to_text(&buf1, path->pos);
if (p) {
struct bkey uk = bkey_unpack_key(l->b, p);
- bch2_bkey_to_text(&PBUF(buf2), &uk);
+ bch2_bkey_to_text(&buf2, &uk);
+ } else {
+ pr_buf(&buf2, "(none)");
}
if (k) {
struct bkey uk = bkey_unpack_key(l->b, k);
- bch2_bkey_to_text(&PBUF(buf3), &uk);
+ bch2_bkey_to_text(&buf3, &uk);
+ } else {
+ pr_buf(&buf3, "(none)");
}
panic("path should be %s key at level %u:\n"
"path pos %s\n"
"prev key %s\n"
"cur key %s\n",
- msg, level, buf1, buf2, buf3);
+ msg, level, buf1.buf, buf2.buf, buf3.buf);
}
static void bch2_btree_path_verify(struct btree_trans *trans,
if (!bkey_cmp(prev.k->p, k.k->p) &&
bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
prev.k->p.snapshot) > 0) {
- char buf1[100], buf2[200];
+ struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
- bch2_bkey_to_text(&PBUF(buf1), k.k);
- bch2_bkey_to_text(&PBUF(buf2), prev.k);
+ bch2_bkey_to_text(&buf1, k.k);
+ bch2_bkey_to_text(&buf2, prev.k);
panic("iter snap %u\n"
"k %s\n"
"prev %s\n",
iter->snapshot,
- buf1, buf2);
+ buf1.buf, buf2.buf);
}
out:
bch2_trans_iter_exit(trans, ©);
{
struct btree_path *path;
unsigned idx;
- char buf[100];
+ struct printbuf buf = PRINTBUF;
trans_for_each_path_inorder(trans, path, idx) {
int cmp = cmp_int(path->btree_id, id) ?:
}
bch2_dump_trans_paths_updates(trans);
+ bch2_bpos_to_text(&buf, pos);
+
panic("not locked: %s %s%s\n",
- bch2_btree_ids[id],
- (bch2_bpos_to_text(&PBUF(buf), pos), buf),
+ bch2_btree_ids[id], buf.buf,
key_cache ? " cached" : "");
}
struct bkey *u,
struct bkey_packed *k)
{
- struct bkey_s_c ret;
-
if (unlikely(!k)) {
/*
* signal to bch2_btree_iter_peek_slot() that we're currently at
return bkey_s_c_null;
}
- ret = bkey_disassemble(l->b, k, u);
-
- /*
- * XXX: bch2_btree_bset_insert_key() generates invalid keys when we
- * overwrite extents - it sets k->type = KEY_TYPE_deleted on the key
- * being overwritten but doesn't change k->size. But this is ok, because
- * those keys are never written out, we just have to avoid a spurious
- * assertion here:
- */
- if (bch2_debug_check_bkeys && !bkey_deleted(ret.k))
- bch2_bkey_debugcheck(c, l->b, ret);
-
- return ret;
+ return bkey_disassemble(l->b, k, u);
}
static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
if (!k ||
bkey_deleted(k) ||
bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
- char buf1[100];
- char buf2[100];
- char buf3[100];
- char buf4[100];
+ struct printbuf buf1 = PRINTBUF;
+ struct printbuf buf2 = PRINTBUF;
+ struct printbuf buf3 = PRINTBUF;
+ struct printbuf buf4 = PRINTBUF;
struct bkey uk = bkey_unpack_key(b, k);
bch2_dump_btree_node(c, l->b);
- bch2_bpos_to_text(&PBUF(buf1), path->pos);
- bch2_bkey_to_text(&PBUF(buf2), &uk);
- bch2_bpos_to_text(&PBUF(buf3), b->data->min_key);
- bch2_bpos_to_text(&PBUF(buf3), b->data->max_key);
+ bch2_bpos_to_text(&buf1, path->pos);
+ bch2_bkey_to_text(&buf2, &uk);
+ bch2_bpos_to_text(&buf3, b->data->min_key);
+ bch2_bpos_to_text(&buf3, b->data->max_key);
panic("parent iter doesn't point to new node:\n"
"iter pos %s %s\n"
"iter key %s\n"
"new node %s-%s\n",
- bch2_btree_ids[path->btree_id], buf1,
- buf2, buf3, buf4);
+ bch2_btree_ids[path->btree_id],
+ buf1.buf, buf2.buf, buf3.buf, buf4.buf);
}
if (!parent_locked)
t != BTREE_NODE_UNLOCKED) {
btree_node_unlock(path, b->c.level);
six_lock_increment(&b->c.lock, t);
- mark_btree_node_locked(path, b->c.level, t);
+ mark_btree_node_locked(trans, path, b->c.level, t);
}
btree_path_level_init(trans, path, b);
for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
path->l[i].b = NULL;
- mark_btree_node_locked(path, path->level, lock_type);
+ mark_btree_node_locked(trans, path, path->level, lock_type);
btree_path_level_init(trans, path, b);
return 0;
}
if (unlikely(ret))
goto err;
- mark_btree_node_locked(path, level, lock_type);
+ mark_btree_node_locked(trans, path, level, lock_type);
btree_path_level_init(trans, path, b);
if (likely(replay_done && tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
static int btree_path_traverse_one(struct btree_trans *, struct btree_path *,
unsigned, unsigned long);
-static int __btree_path_traverse_all(struct btree_trans *trans, int ret,
- unsigned long trace_ip)
+static int bch2_btree_path_traverse_all(struct btree_trans *trans)
{
struct bch_fs *c = trans->c;
struct btree_path *path;
- int i;
+ unsigned long trace_ip = _RET_IP_;
+ int i, ret = 0;
if (trans->in_traverse_all)
return -EINTR;
trans->in_traverse_all = true;
retry_all:
trans->restarted = false;
+ trans->traverse_all_idx = U8_MAX;
trans_for_each_path(trans, path)
path->should_be_locked = false;
bch2_trans_unlock(trans);
cond_resched();
- if (unlikely(ret == -ENOMEM)) {
+ if (unlikely(trans->memory_allocation_failure)) {
struct closure cl;
closure_init_stack(&cl);
} while (ret);
}
- if (unlikely(ret == -EIO))
- goto out;
-
- BUG_ON(ret && ret != -EINTR);
-
/* Now, redo traversals in correct order: */
- i = 0;
- while (i < trans->nr_sorted) {
- path = trans->paths + trans->sorted[i];
-
- EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
-
- ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
- if (ret)
- goto retry_all;
+ trans->traverse_all_idx = 0;
+ while (trans->traverse_all_idx < trans->nr_sorted) {
+ path = trans->paths + trans->sorted[trans->traverse_all_idx];
- EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
-
- if (path->nodes_locked ||
- !btree_path_node(path, path->level))
- i++;
+ /*
+ * Traversing a path can cause another path to be added at about
+ * the same position:
+ */
+ if (path->uptodate) {
+ ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
+ if (ret == -EINTR || ret == -ENOMEM)
+ goto retry_all;
+ if (ret)
+ goto err;
+ BUG_ON(path->uptodate);
+ } else {
+ trans->traverse_all_idx++;
+ }
}
/*
*/
trans_for_each_path(trans, path)
BUG_ON(path->uptodate >= BTREE_ITER_NEED_TRAVERSE);
-out:
+err:
bch2_btree_cache_cannibalize_unlock(c);
trans->in_traverse_all = false;
return ret;
}
-static int bch2_btree_path_traverse_all(struct btree_trans *trans)
-{
- return __btree_path_traverse_all(trans, 0, _RET_IP_);
-}
-
static inline bool btree_path_good_node(struct btree_trans *trans,
struct btree_path *path,
unsigned l, int check_pos)
return ret;
}
-static int __btree_path_traverse_all(struct btree_trans *, int, unsigned long);
-
int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
struct btree_path *path, unsigned flags)
{
six_lock_increment(&dst->l[i].b->c.lock,
__btree_lock_want(dst, i));
- btree_path_check_sort(trans, dst, 0);
+ bch2_btree_path_check_sort(trans, dst, 0);
}
static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
btree_trans_verify_sorted(trans);
}
+ path->should_be_locked = false;
return path;
}
path = bch2_btree_path_make_mut(trans, path, intent, ip);
- path->pos = new_pos;
- path->should_be_locked = false;
+ path->pos = new_pos;
- btree_path_check_sort(trans, path, cmp);
+ bch2_btree_path_check_sort(trans, path, cmp);
if (unlikely(path->cached)) {
btree_node_unlock(path, 0);
l = btree_path_up_until_good_node(trans, path, cmp);
if (btree_path_node(path, l)) {
+ BUG_ON(!btree_node_locked(path, l));
/*
* We might have to skip over many keys, or just a few: try
* advancing the node iterator, and if we have to skip over too
__bch2_path_free(trans, path);
}
+void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
+{
+ struct btree_insert_entry *i;
+
+ pr_buf(buf, "transaction updates for %s journal seq %llu",
+ trans->fn, trans->journal_res.seq);
+ pr_newline(buf);
+ pr_indent_push(buf, 2);
+
+ trans_for_each_update(trans, i) {
+ struct bkey_s_c old = { &i->old_k, i->old_v };
+
+ pr_buf(buf, "update: btree %s %pS",
+ bch2_btree_ids[i->btree_id],
+ (void *) i->ip_allocated);
+ pr_newline(buf);
+
+ pr_buf(buf, " old ");
+ bch2_bkey_val_to_text(buf, trans->c, old);
+ pr_newline(buf);
+
+ pr_buf(buf, " new ");
+ bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
+ pr_newline(buf);
+ }
+
+ pr_indent_pop(buf, 2);
+}
+
+noinline __cold
+void bch2_dump_trans_updates(struct btree_trans *trans)
+{
+ struct printbuf buf = PRINTBUF;
+
+ bch2_trans_updates_to_text(&buf, trans);
+ bch_err(trans->c, "%s", buf.buf);
+ printbuf_exit(&buf);
+}
+
noinline __cold
void bch2_dump_trans_paths_updates(struct btree_trans *trans)
{
struct btree_path *path;
- struct btree_insert_entry *i;
+ struct printbuf buf = PRINTBUF;
unsigned idx;
- char buf1[300], buf2[300];
- btree_trans_verify_sorted(trans);
+ trans_for_each_path_inorder(trans, path, idx) {
+ printbuf_reset(&buf);
- trans_for_each_path_inorder(trans, path, idx)
- printk(KERN_ERR "path: idx %u ref %u:%u%s%s btree %s pos %s locks %u %pS\n",
+ bch2_bpos_to_text(&buf, path->pos);
+
+ printk(KERN_ERR "path: idx %u ref %u:%u%s%s btree=%s l=%u pos %s locks %u %pS\n",
path->idx, path->ref, path->intent_ref,
path->should_be_locked ? " S" : "",
path->preserve ? " P" : "",
bch2_btree_ids[path->btree_id],
- (bch2_bpos_to_text(&PBUF(buf1), path->pos), buf1),
+ path->level,
+ buf.buf,
path->nodes_locked,
#ifdef CONFIG_BCACHEFS_DEBUG
(void *) path->ip_allocated
NULL
#endif
);
+ }
- trans_for_each_update(trans, i) {
- struct bkey u;
- struct bkey_s_c old = bch2_btree_path_peek_slot(i->path, &u);
+ printbuf_exit(&buf);
- printk(KERN_ERR "update: btree %s %pS\n old %s\n new %s",
- bch2_btree_ids[i->btree_id],
- (void *) i->ip_allocated,
- (bch2_bkey_val_to_text(&PBUF(buf1), trans->c, old), buf1),
- (bch2_bkey_val_to_text(&PBUF(buf2), trans->c, bkey_i_to_s_c(i->k)), buf2));
- }
+ bch2_dump_trans_updates(trans);
}
static struct btree_path *btree_path_alloc(struct btree_trans *trans,
int i;
BUG_ON(trans->restarted);
+ btree_trans_verify_sorted(trans);
+ bch2_trans_verify_locks(trans);
trans_for_each_path_inorder(trans, path, i) {
if (__btree_path_cmp(path,
EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
+ *u = ck->k->k;
k = bkey_i_to_s_c(ck->k);
}
btree_node_unlock(path, path->level);
path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
path->level++;
+ btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
return NULL;
}
__bch2_btree_path_unlock(path);
path->l[path->level].b = BTREE_ITER_NO_NODE_GET_LOCKS;
path->l[path->level + 1].b = BTREE_ITER_NO_NODE_GET_LOCKS;
+ btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
trace_trans_restart_relock_next_node(trans->fn, _THIS_IP_,
path->btree_id, &path->pos);
btree_trans_restart(trans);
return NULL;
}
-static noinline
-struct bkey_i *__btree_trans_peek_journal(struct btree_trans *trans,
- struct btree_path *path)
-{
- struct journal_keys *keys = &trans->c->journal_keys;
- size_t idx = bch2_journal_key_search(keys, path->btree_id,
- path->level, path->pos);
-
- while (idx < keys->nr && keys->d[idx].overwritten)
- idx++;
-
- return (idx < keys->nr &&
- keys->d[idx].btree_id == path->btree_id &&
- keys->d[idx].level == path->level)
- ? keys->d[idx].k
- : NULL;
-}
-
static noinline
struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k)
{
struct bkey_i *next_journal =
- __btree_trans_peek_journal(trans, iter->path);
+ bch2_journal_keys_peek(trans->c, iter->btree_id, 0,
+ iter->path->pos);
if (next_journal &&
bpos_cmp(next_journal->k.p,
ret = bkey_err(k2);
if (ret) {
k = k2;
+ bch2_btree_iter_set_pos(iter, iter->pos);
goto out;
}
* bch2_btree_iter_peek: returns first key greater than or equal to iterator's
* current position
*/
-struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
{
struct btree_trans *trans = iter->trans;
struct bpos search_key = btree_iter_search_key(iter);
struct bkey_s_c k;
+ struct bpos iter_pos;
int ret;
if (iter->update_path) {
if (!k.k || bkey_err(k))
goto out;
+ /*
+ * iter->pos should be mononotically increasing, and always be
+ * equal to the key we just returned - except extents can
+ * straddle iter->pos:
+ */
+ if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
+ iter_pos = k.k->p;
+ else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
+ iter_pos = bkey_start_pos(k.k);
+ else
+ iter_pos = iter->pos;
+
+ if (bkey_cmp(iter_pos, end) > 0) {
+ bch2_btree_iter_set_pos(iter, end);
+ k = bkey_s_c_null;
+ goto out;
+ }
+
if (iter->update_path &&
bkey_cmp(iter->update_path->pos, k.k->p)) {
bch2_path_put(trans, iter->update_path,
iter->update_path = bch2_btree_path_set_pos(trans,
iter->update_path, pos,
iter->flags & BTREE_ITER_INTENT,
- btree_iter_ip_allocated(iter));
-
- BUG_ON(!(iter->update_path->nodes_locked & 1));
- iter->update_path->should_be_locked = true;
+ _THIS_IP_);
}
/*
break;
}
- /*
- * iter->pos should be mononotically increasing, and always be equal to
- * the key we just returned - except extents can straddle iter->pos:
- */
- if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
- iter->pos = k.k->p;
- else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
- iter->pos = bkey_start_pos(k.k);
+ iter->pos = iter_pos;
iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
iter->flags & BTREE_ITER_INTENT,
BUG_ON(!iter->path->nodes_locked);
out:
if (iter->update_path) {
- BUG_ON(!(iter->update_path->nodes_locked & 1));
- iter->update_path->should_be_locked = true;
+ if (iter->update_path->uptodate &&
+ !bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_)) {
+ k = bkey_s_c_err(-EINTR);
+ } else {
+ BUG_ON(!(iter->update_path->nodes_locked & 1));
+ iter->update_path->should_be_locked = true;
+ }
}
iter->path->should_be_locked = true;
k = btree_path_level_prev(trans->c, iter->path,
&iter->path->l[0], &iter->k);
- btree_path_check_sort(trans, iter->path, 0);
+ bch2_btree_path_check_sort(trans, iter->path, 0);
if (likely(k.k)) {
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
}
if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
- (next_update = __btree_trans_peek_journal(trans, iter->path)) &&
+ (next_update = bch2_journal_keys_peek(trans->c, iter->btree_id,
+ 0, iter->pos)) &&
!bpos_cmp(next_update->k.p, iter->pos)) {
iter->k = next_update->k;
k = bkey_i_to_s_c(next_update);
if (iter->flags & BTREE_ITER_INTENT) {
struct btree_iter iter2;
+ struct bpos end = iter->pos;
+
+ if (iter->flags & BTREE_ITER_IS_EXTENTS)
+ end.offset = U64_MAX;
bch2_trans_copy_iter(&iter2, iter);
- k = bch2_btree_iter_peek(&iter2);
+ k = bch2_btree_iter_peek_upto(&iter2, end);
if (k.k && !bkey_err(k)) {
iter->k = iter2.k;
unsigned i;
trans_for_each_path_inorder(trans, path, i) {
- BUG_ON(prev && btree_path_cmp(prev, path) > 0);
+ if (prev && btree_path_cmp(prev, path) > 0) {
+ bch2_dump_trans_paths_updates(trans);
+ panic("trans paths out of order!\n");
+ }
prev = path;
}
#endif
btree_path_verify_sorted_ref(trans, r);
}
-static void btree_path_check_sort(struct btree_trans *trans, struct btree_path *path,
- int cmp)
+inline void bch2_btree_path_check_sort(struct btree_trans *trans, struct btree_path *path,
+ int cmp)
{
struct btree_path *n;
path->sorted_idx = pos ? pos->sorted_idx + 1 : 0;
+ if (trans->in_traverse_all &&
+ trans->traverse_all_idx != U8_MAX &&
+ trans->traverse_all_idx >= path->sorted_idx)
+ trans->traverse_all_idx++;
+
array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
for (i = path->sorted_idx; i < trans->nr_sorted; i++)
trans->mem_top = 0;
trans->hooks = NULL;
- trans->extra_journal_entries = NULL;
- trans->extra_journal_entry_u64s = 0;
+ trans->extra_journal_entries.nr = 0;
if (trans->fs_usage_deltas) {
trans->fs_usage_deltas->used = 0;
trans_for_each_path(trans, path) {
path->should_be_locked = false;
+ /*
+ * If the transaction wasn't restarted, we're presuming to be
+ * doing something new: dont keep iterators excpt the ones that
+ * are in use - except for the subvolumes btree:
+ */
+ if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
+ path->preserve = false;
+
/*
* XXX: we probably shouldn't be doing this if the transaction
* was restarted, but currently we still overflow transaction
*/
if (!path->ref && !path->preserve)
__bch2_path_free(trans, path);
- else if (!path->ref)
+ else
path->preserve = false;
}
const char *fn)
__acquires(&c->btree_trans_barrier)
{
+ BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
+
memset(trans, 0, sizeof(*trans));
trans->c = c;
trans->fn = fn;
bch2_journal_preres_put(&c->journal, &trans->journal_preres);
+ kfree(trans->extra_journal_entries.data);
+
if (trans->fs_usage_deltas) {
if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
REPLICAS_DELTA_LIST_MAX)
struct btree_trans *trans;
struct btree_path *path;
struct btree *b;
+ static char lock_types[] = { 'r', 'i', 'w' };
unsigned l;
mutex_lock(&c->btree_trans_lock);
b = READ_ONCE(trans->locking);
if (b) {
path = &trans->paths[trans->locking_path_idx];
- pr_buf(out, " locking path %u %c l=%u %s:",
+ pr_buf(out, " locking path %u %c l=%u %c %s:",
trans->locking_path_idx,
path->cached ? 'c' : 'b',
trans->locking_level,
+ lock_types[trans->locking_lock_type],
bch2_btree_ids[trans->locking_btree_id]);
bch2_bpos_to_text(out, trans->locking_pos);