-021e62a098d9fa7e558ae935180e2fb16bb50a3a
+00104032654027a8f4406a82d28911b243f19d94
CFLAGS+=-std=gnu89 -O2 -g -MMD -Wall \
-Wno-pointer-sign \
-Wno-zero-length-bounds \
+ -Wno-stringop-overflow \
-fno-strict-aliasing \
-fno-delete-null-pointer-checks \
-I. -Iinclude -Iraid \
{
struct btree_iter *iter;
- trans_for_each_iter_all(trans, iter)
+ trans_for_each_iter(trans, iter)
bch2_btree_iter_verify_locks(iter);
}
#else
return bch2_trans_iter_put(trans, iter);
}
-#if 0
-static int bch2_trans_realloc_iters(struct btree_trans *trans,
- unsigned new_size)
+noinline __cold
+static void btree_trans_iter_alloc_fail(struct btree_trans *trans)
{
- void *p, *new_iters, *new_updates, *new_updates2;
- size_t iters_bytes;
- size_t updates_bytes;
-
- new_size = roundup_pow_of_two(new_size);
-
- BUG_ON(new_size > BTREE_ITER_MAX);
-
- if (new_size <= trans->size)
- return 0;
-
- BUG_ON(trans->used_mempool);
-
- bch2_trans_unlock(trans);
- iters_bytes = sizeof(struct btree_iter) * new_size;
- updates_bytes = sizeof(struct btree_insert_entry) * new_size;
-
- p = kmalloc(iters_bytes +
- updates_bytes +
- updates_bytes, GFP_NOFS);
- if (p)
- goto success;
-
- p = mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS);
- new_size = BTREE_ITER_MAX;
-
- trans->used_mempool = true;
-success:
- new_iters = p; p += iters_bytes;
- new_updates = p; p += updates_bytes;
- new_updates2 = p; p += updates_bytes;
-
- memcpy(new_iters, trans->iters,
- sizeof(struct btree_iter) * trans->nr_iters);
- memcpy(new_updates, trans->updates,
- sizeof(struct btree_insert_entry) * trans->nr_updates);
- memcpy(new_updates2, trans->updates2,
- sizeof(struct btree_insert_entry) * trans->nr_updates2);
-
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
- memset(trans->iters, POISON_FREE,
- sizeof(struct btree_iter) * trans->nr_iters +
- sizeof(struct btree_insert_entry) * trans->nr_iters);
-
- kfree(trans->iters);
-
- trans->iters = new_iters;
- trans->updates = new_updates;
- trans->updates2 = new_updates2;
- trans->size = new_size;
-
- if (trans->iters_live) {
- trace_trans_restart_iters_realloced(trans->ip, trans->size);
- return -EINTR;
- }
+ struct btree_iter *iter;
- return 0;
+ trans_for_each_iter(trans, iter)
+ pr_err("iter: btree %s pos %llu:%llu%s%s%s %ps",
+ bch2_btree_ids[iter->btree_id],
+ iter->pos.inode,
+ iter->pos.offset,
+ (trans->iters_live & (1ULL << iter->idx)) ? " live" : "",
+ (trans->iters_touched & (1ULL << iter->idx)) ? " touched" : "",
+ iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "",
+ (void *) iter->ip_allocated);
+ panic("trans iter oveflow\n");
}
-#endif
static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans)
{
- unsigned idx = __ffs64(~trans->iters_linked);
-
- if (idx < trans->nr_iters)
- goto got_slot;
-
- if (trans->nr_iters == trans->size) {
- struct btree_iter *iter;
-
- BUG_ON(trans->size < BTREE_ITER_MAX);
-
- trans_for_each_iter(trans, iter) {
- pr_err("iter: btree %s pos %llu:%llu%s%s%s %ps",
- bch2_btree_ids[iter->btree_id],
- iter->pos.inode,
- iter->pos.offset,
- (trans->iters_live & (1ULL << iter->idx)) ? " live" : "",
- (trans->iters_touched & (1ULL << iter->idx)) ? " touched" : "",
- iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "",
- (void *) iter->ip_allocated);
- }
+ unsigned idx;
- panic("trans iter oveflow\n");
-#if 0
- ret = bch2_trans_realloc_iters(trans, trans->size * 2);
- if (ret)
- return ERR_PTR(ret);
-#endif
- }
+ if (unlikely(trans->iters_linked ==
+ ~((~0ULL << 1) << (BTREE_ITER_MAX - 1))))
+ btree_trans_iter_alloc_fail(trans);
- idx = trans->nr_iters++;
- BUG_ON(trans->nr_iters > trans->size);
+ idx = __ffs64(~trans->iters_linked);
- trans->iters[idx].idx = idx;
-got_slot:
- BUG_ON(trans->iters_linked & (1ULL << idx));
- trans->iters_linked |= 1ULL << idx;
- trans->iters[idx].flags = 0;
+ trans->iters_linked |= 1ULL << idx;
+ trans->iters[idx].idx = idx;
+ trans->iters[idx].flags = 0;
return &trans->iters[idx];
}
{
struct btree_iter *iter, *best = NULL;
- BUG_ON(trans->nr_iters > BTREE_ITER_MAX);
-
trans_for_each_iter(trans, iter) {
if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE))
continue;
if (!best) {
iter = btree_trans_iter_alloc(trans);
- if (IS_ERR(iter))
- return iter;
-
bch2_btree_iter_init(trans, iter, btree_id, pos, flags);
} else if ((trans->iters_live & (1ULL << best->idx)) ||
(best->flags & BTREE_ITER_KEEP_UNTIL_COMMIT)) {
iter = btree_trans_iter_alloc(trans);
- if (IS_ERR(iter))
- return iter;
-
btree_iter_copy(iter, best);
} else {
iter = best;
struct btree_iter *iter =
__btree_trans_get_iter(trans, btree_id, pos, flags);
- if (!IS_ERR(iter))
- __bch2_btree_iter_set_pos(iter, pos,
- btree_node_type_is_extents(btree_id));
+ __bch2_btree_iter_set_pos(iter, pos,
+ btree_node_type_is_extents(btree_id));
return iter;
}
flags|BTREE_ITER_NODES);
unsigned i;
- BUG_ON(IS_ERR(iter));
BUG_ON(bkey_cmp(iter->pos, pos));
iter->locks_want = locks_want;
struct btree_iter *iter;
iter = btree_trans_iter_alloc(trans);
- if (IS_ERR(iter))
- return iter;
-
btree_iter_copy(iter, src);
trans->iters_live |= 1ULL << iter->idx;
trans->iters_touched &= trans->iters_live;
- trans->need_reset = 0;
trans->nr_updates = 0;
trans->nr_updates2 = 0;
trans->mem_top = 0;
static void bch2_trans_alloc_iters(struct btree_trans *trans, struct bch_fs *c)
{
- unsigned new_size = BTREE_ITER_MAX;
- size_t iters_bytes = sizeof(struct btree_iter) * new_size;
- size_t updates_bytes = sizeof(struct btree_insert_entry) * new_size;
+ size_t iters_bytes = sizeof(struct btree_iter) * BTREE_ITER_MAX;
+ size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
void *p = NULL;
BUG_ON(trans->used_mempool);
trans->iters = p; p += iters_bytes;
trans->updates = p; p += updates_bytes;
trans->updates2 = p; p += updates_bytes;
- trans->size = new_size;
}
void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
/* Iterate over iters within a transaction: */
-#define trans_for_each_iter_all(_trans, _iter) \
- for (_iter = (_trans)->iters; \
- _iter < (_trans)->iters + (_trans)->nr_iters; \
- _iter++)
-
static inline struct btree_iter *
__trans_next_iter(struct btree_trans *trans, unsigned idx)
{
- EBUG_ON(idx < trans->nr_iters && trans->iters[idx].idx != idx);
-
- for (; idx < trans->nr_iters; idx++)
- if (trans->iters_linked & (1ULL << idx))
- return &trans->iters[idx];
+ u64 l = trans->iters_linked >> idx;
+ if (!l)
+ return NULL;
- return NULL;
+ idx += __ffs64(l);
+ EBUG_ON(trans->iters[idx].idx != idx);
+ return &trans->iters[idx];
}
#define trans_for_each_iter(_trans, _iter) \
#define for_each_btree_key(_trans, _iter, _btree_id, \
_start, _flags, _k, _ret) \
- for ((_ret) = PTR_ERR_OR_ZERO((_iter) = \
- bch2_trans_get_iter((_trans), (_btree_id), \
- (_start), (_flags))) ?: \
- PTR_ERR_OR_ZERO(((_k) = \
+ for ((_iter) = bch2_trans_get_iter((_trans), (_btree_id), \
+ (_start), (_flags)), \
+ (_ret) = PTR_ERR_OR_ZERO(((_k) = \
__bch2_btree_iter_peek(_iter, _flags)).k); \
!_ret && (_k).k; \
(_ret) = PTR_ERR_OR_ZERO(((_k) = \
{
struct btree_iter *iter =
__bch2_trans_get_iter(trans, btree_id, pos, flags);
-
- if (!IS_ERR(iter))
- iter->ip_allocated = _THIS_IP_;
+ iter->ip_allocated = _THIS_IP_;
return iter;
}
struct btree_iter *iter =
__bch2_trans_copy_iter(trans, src);
- if (!IS_ERR(iter))
- iter->ip_allocated = _THIS_IP_;
+ iter->ip_allocated = _THIS_IP_;
return iter;
-
}
struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *,
iter = bch2_trans_get_iter(trans, ck->key.btree_id,
ck->key.pos, BTREE_ITER_SLOTS);
- if (IS_ERR(iter))
- return PTR_ERR(iter);
-
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret) {
struct bch_fs *c = trans->c;
struct journal *j = &c->journal;
struct btree_iter *c_iter = NULL, *b_iter = NULL;
- struct bkey_cached *ck;
+ struct bkey_cached *ck = NULL;
int ret;
b_iter = bch2_trans_get_iter(trans, key.btree_id, key.pos,
BTREE_ITER_SLOTS|
BTREE_ITER_INTENT);
- ret = PTR_ERR_OR_ZERO(b_iter);
- if (ret)
- goto out;
-
c_iter = bch2_trans_get_iter(trans, key.btree_id, key.pos,
BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_CACHED_NOCREATE|
BTREE_ITER_INTENT);
- ret = PTR_ERR_OR_ZERO(c_iter);
- if (ret)
- goto out;
retry:
ret = bch2_btree_iter_traverse(c_iter);
if (ret)
if (ret == -EINTR)
goto retry;
- BUG_ON(ret && !bch2_journal_error(j));
-
- if (ret)
+ if (ret) {
+ bch2_fs_fatal_err_on(!bch2_journal_error(j), c,
+ "error flushing key cache: %i", ret);
goto out;
+ }
bch2_journal_pin_drop(j, &ck->journal);
bch2_journal_preres_put(j, &ck->res);
unsigned long ip;
int srcu_idx;
- u64 iters_linked;
- u64 iters_live;
- u64 iters_touched;
-
- u8 nr_iters;
u8 nr_updates;
u8 nr_updates2;
- u8 size;
unsigned used_mempool:1;
unsigned error:1;
unsigned nounlock:1;
- unsigned need_reset:1;
unsigned in_traverse_all:1;
+ u64 iters_linked;
+ u64 iters_live;
+ u64 iters_touched;
+
unsigned mem_top;
unsigned mem_bytes;
void *mem;
unsigned i;
int ret;
+ /*
+ * If we're already in an error state, it might be because a btree node
+ * was never written, and we might be trying to free that same btree
+ * node here, but it won't have been marked as allocated and we'll see
+ * spurious disk usage inconsistencies in the transactional part below
+ * if we don't skip it:
+ */
+ ret = bch2_journal_error(&c->journal);
+ if (ret)
+ goto err;
+
+ BUG_ON(!journal_pin_active(&as->journal));
+
/*
* We did an update to a parent node where the pointers we added pointed
* to child nodes that weren't written yet: now, the child nodes have
BTREE_INSERT_JOURNAL_RESERVED,
btree_update_nodes_written_trans(&trans, as));
bch2_trans_exit(&trans);
- BUG_ON(ret && !bch2_journal_error(&c->journal));
+ bch2_fs_fatal_err_on(ret && !bch2_journal_error(&c->journal), c,
+ "error %i in btree_update_nodes_written()", ret);
+err:
if (b) {
/*
* @b is the node we did the final insert into:
child->b = NULL;
child->mode = BTREE_INTERIOR_UPDATING_AS;
- /*
- * When we write a new btree root, we have to drop our journal pin
- * _before_ the new nodes are technically reachable; see
- * btree_update_nodes_written().
- *
- * This goes for journal pins that are recursively blocked on us - so,
- * just transfer the journal pin to the new interior update so
- * btree_update_nodes_written() can drop it.
- */
bch2_journal_pin_copy(&c->journal, &as->journal, &child->journal, NULL);
- bch2_journal_pin_drop(&c->journal, &child->journal);
}
static void btree_update_updated_root(struct btree_update *as, struct btree *b)
if (ret)
goto err;
+ bch2_journal_pin_add(&c->journal,
+ atomic64_read(&c->journal.seq),
+ &as->journal, NULL);
+
mutex_lock(&c->btree_interior_update_lock);
list_add_tail(&as->list, &c->btree_interior_update_list);
mutex_unlock(&c->btree_interior_update_lock);
BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
- EBUG_ON(trans->nr_updates2 >= trans->nr_iters);
+ EBUG_ON(trans->nr_updates2 >= BTREE_ITER_MAX);
iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT;
return 0;
iter = bch2_trans_copy_iter(trans, orig_iter);
- if (IS_ERR(iter))
- return PTR_ERR(iter);
iter->flags |= BTREE_ITER_INTENT;
__bch2_btree_iter_set_pos(iter, insert->k.p, false);
int ret = 0;
iter = bch2_trans_get_iter(trans, btree_id, start, BTREE_ITER_INTENT);
- ret = PTR_ERR_OR_ZERO(iter);
- if (ret)
- return ret;
-
k = bch2_btree_iter_peek_with_updates(iter);
while (k.k && !(ret = bkey_err(k))) {
if (bkey_cmp(bkey_start_pos(k.k), start) < 0) {
update_iter = bch2_trans_copy_iter(trans, iter);
- if ((ret = PTR_ERR_OR_ZERO(update_iter)))
- goto err;
update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
if ((ret = PTR_ERR_OR_ZERO(update)))
if (bkey_cmp(k.k->p, end) > 0) {
update_iter = bch2_trans_copy_iter(trans, iter);
- if ((ret = PTR_ERR_OR_ZERO(update_iter)))
- goto err;
update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
if ((ret = PTR_ERR_OR_ZERO(update)))
bch2_trans_iter_put(trans, update_iter);
} else {
update_iter = bch2_trans_copy_iter(trans, iter);
- if ((ret = PTR_ERR_OR_ZERO(update_iter)))
- goto err;
update = bch2_trans_kmalloc(trans, sizeof(struct bkey));
if ((ret = PTR_ERR_OR_ZERO(update)))
unsigned u64s;
int ret = 0;
- BUG_ON(trans->need_reset);
-
if (!trans->nr_updates)
goto out_noupdates;
*/
if (trans->iters_live & (1ULL << i->iter->idx)) {
i->iter = bch2_trans_copy_iter(trans, i->iter);
- if (IS_ERR(i->iter)) {
- trans->need_reset = true;
- return PTR_ERR(i->iter);
- }
i->iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT;
bch2_trans_iter_put(trans, i->iter);
bch2_btree_iter_set_pos(i->iter, n.k->k.p);
}
- EBUG_ON(trans->nr_updates >= trans->nr_iters);
+ EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
array_insert_item(trans->updates, trans->nr_updates,
i - trans->updates, n);
iter = bch2_trans_get_iter(trans, id, bkey_start_pos(&k->k),
BTREE_ITER_INTENT);
- if (IS_ERR(iter))
- return PTR_ERR(iter);
ret = bch2_btree_iter_traverse(iter) ?:
bch2_trans_update(trans, iter, k, 0);
int ret = 0;
iter = bch2_trans_get_iter(trans, id, start, BTREE_ITER_INTENT);
- ret = PTR_ERR_OR_ZERO(iter);
- if (ret)
- return ret;
retry:
while ((k = bch2_btree_iter_peek(iter)).k &&
!(ret = bkey_err(k)) &&
*iter = bch2_trans_get_iter(trans, btree_id, pos,
flags|BTREE_ITER_INTENT);
- if (IS_ERR(*iter))
- return PTR_ERR(*iter);
-
*k = __bch2_btree_iter_peek(*iter, flags);
ret = bkey_err(*k);
if (ret)
BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_INTENT);
- if (IS_ERR(iter))
- return PTR_ERR(iter);
-
ret = bch2_btree_iter_traverse(iter);
if (ret) {
bch2_trans_iter_put(trans, iter);
return avail_factor(__bch2_fs_usage_read_short(c).free);
}
+void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
+{
+ percpu_down_read(&c->mark_lock);
+ this_cpu_sub(c->usage[0]->online_reserved,
+ res->sectors);
+ percpu_up_read(&c->mark_lock);
+
+ res->sectors = 0;
+}
+
#define SECTORS_CACHE 1024
int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
/* disk reservations: */
+void __bch2_disk_reservation_put(struct bch_fs *, struct disk_reservation *);
+
static inline void bch2_disk_reservation_put(struct bch_fs *c,
struct disk_reservation *res)
{
- this_cpu_sub(c->usage[0]->online_reserved, res->sectors);
- res->sectors = 0;
+ if (res->sectors)
+ __bch2_disk_reservation_put(c, res);
}
#define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
src = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
POS(inode->v.i_ino, src_start >> 9),
BTREE_ITER_INTENT);
- BUG_ON(IS_ERR_OR_NULL(src));
-
dst = bch2_trans_copy_iter(&trans, src);
- BUG_ON(IS_ERR_OR_NULL(dst));
while (1) {
struct disk_reservation disk_res =
return 0;
iter = bch2_trans_copy_iter(trans, h->chain);
- BUG_ON(IS_ERR(iter));
for_each_btree_key_continue(iter, 0, k2, ret) {
if (bkey_cmp(k2.k->p, k.k->p) >= 0)
hash_stop_chain(trans, h);
if (!hole) {
- if (!h->chain) {
+ if (!h->chain)
h->chain = bch2_trans_copy_iter(trans, k_iter);
- BUG_ON(IS_ERR(h->chain));
- }
h->chain_end = k.k->p.offset;
}
bch2_cut_front(cut_at, u);
u_iter = bch2_trans_copy_iter(trans, iter);
- ret = PTR_ERR_OR_ZERO(u_iter);
- if (ret)
- return ret;
/*
* We don't want to go through the
BTREE_ITER_INTENT);
retry:
for_each_btree_key_continue(iter, 0, k, ret) {
- if (bkey_cmp(prev.k->k.p, bkey_start_pos(k.k)) > 0) {
+ /*
+ * due to retry errors we might see the same extent twice:
+ */
+ if (bkey_cmp(prev.k->k.p, k.k->p) &&
+ bkey_cmp(prev.k->k.p, bkey_start_pos(k.k)) > 0) {
char buf1[200];
char buf2[200];
iter = bch2_trans_get_iter(trans, BTREE_ID_INODES, POS(0, inum),
BTREE_ITER_CACHED|flags);
- if (IS_ERR(iter))
- return iter;
-
k = bch2_btree_iter_peek_cached(iter);
ret = bkey_err(k);
if (ret)
iter = bch2_trans_get_iter(trans, BTREE_ID_INODES,
POS(0, inode_nr), BTREE_ITER_CACHED);
- if (IS_ERR(iter))
- return PTR_ERR(iter);
-
k = bch2_btree_iter_peek_cached(iter);
ret = bkey_err(k);
if (ret)
*delta = 0;
iter = bch2_trans_copy_iter(trans, extent_iter);
- if (IS_ERR(iter))
- return PTR_ERR(iter);
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, old, ret) {
if (!may_allocate &&
iter = bch2_trans_get_iter(trans, BTREE_ID_EXTENTS, rbio->pos,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- if ((ret = PTR_ERR_OR_ZERO(iter)))
- goto out;
-
k = bch2_btree_iter_peek_slot(iter);
if ((ret = bkey_err(k)))
goto out;
iter = bch2_trans_get_iter(trans, BTREE_ID_REFLINK,
POS(0, reflink_offset),
BTREE_ITER_SLOTS);
- ret = PTR_ERR_OR_ZERO(iter);
- if (ret)
- return ret;
-
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret)
struct journal_buf *buf;
int ret = 0;
- if (seq <= j->err_seq)
- return -EIO;
-
if (seq <= j->seq_ondisk)
return 1;
spin_lock(&j->lock);
/* Recheck under lock: */
- if (seq <= j->err_seq) {
+ if (j->err_seq && seq >= j->err_seq) {
ret = -EIO;
goto out;
}
spin_unlock(&j->lock);
}
-static void bch2_journal_pin_add_locked(struct journal *j, u64 seq,
- struct journal_entry_pin *pin,
- journal_pin_flush_fn flush_fn)
+void bch2_journal_pin_set(struct journal *j, u64 seq,
+ struct journal_entry_pin *pin,
+ journal_pin_flush_fn flush_fn)
{
- struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
+ struct journal_entry_pin_list *pin_list;
+
+ spin_lock(&j->lock);
+ pin_list = journal_seq_pin(j, seq);
__journal_pin_drop(j, pin);
pin->flush = flush_fn;
list_add(&pin->list, flush_fn ? &pin_list->list : &pin_list->flushed);
-}
-
-void __bch2_journal_pin_add(struct journal *j, u64 seq,
- struct journal_entry_pin *pin,
- journal_pin_flush_fn flush_fn)
-{
- spin_lock(&j->lock);
- bch2_journal_pin_add_locked(j, seq, pin, flush_fn);
- spin_unlock(&j->lock);
-
- /*
- * If the journal is currently full, we might want to call flush_fn
- * immediately:
- */
- journal_wake(j);
-}
-
-void bch2_journal_pin_update(struct journal *j, u64 seq,
- struct journal_entry_pin *pin,
- journal_pin_flush_fn flush_fn)
-{
- if (journal_pin_active(pin) && pin->seq < seq)
- return;
-
- spin_lock(&j->lock);
-
- if (pin->seq != seq) {
- bch2_journal_pin_add_locked(j, seq, pin, flush_fn);
- } else {
- struct journal_entry_pin_list *pin_list =
- journal_seq_pin(j, seq);
-
- /*
- * If the pin is already pinning the right sequence number, it
- * still might've already been flushed:
- */
- list_move(&pin->list, &pin_list->list);
- }
-
spin_unlock(&j->lock);
/*
journal_wake(j);
}
-void bch2_journal_pin_copy(struct journal *j,
- struct journal_entry_pin *dst,
- struct journal_entry_pin *src,
- journal_pin_flush_fn flush_fn)
-{
- spin_lock(&j->lock);
-
- if (journal_pin_active(src) &&
- (!journal_pin_active(dst) || src->seq < dst->seq))
- bch2_journal_pin_add_locked(j, src->seq, dst, flush_fn);
-
- spin_unlock(&j->lock);
-}
-
/**
* bch2_journal_pin_flush: ensure journal pin callback is no longer running
*/
void bch2_journal_pin_put(struct journal *, u64);
void bch2_journal_pin_drop(struct journal *, struct journal_entry_pin *);
-void __bch2_journal_pin_add(struct journal *, u64, struct journal_entry_pin *,
- journal_pin_flush_fn);
+void bch2_journal_pin_set(struct journal *, u64, struct journal_entry_pin *,
+ journal_pin_flush_fn);
static inline void bch2_journal_pin_add(struct journal *j, u64 seq,
struct journal_entry_pin *pin,
journal_pin_flush_fn flush_fn)
{
if (unlikely(!journal_pin_active(pin) || pin->seq > seq))
- __bch2_journal_pin_add(j, seq, pin, flush_fn);
+ bch2_journal_pin_set(j, seq, pin, flush_fn);
}
-void bch2_journal_pin_update(struct journal *, u64,
- struct journal_entry_pin *,
- journal_pin_flush_fn);
+static inline void bch2_journal_pin_copy(struct journal *j,
+ struct journal_entry_pin *dst,
+ struct journal_entry_pin *src,
+ journal_pin_flush_fn flush_fn)
+{
+ if (journal_pin_active(src))
+ bch2_journal_pin_add(j, src->seq, dst, flush_fn);
+}
-void bch2_journal_pin_copy(struct journal *,
- struct journal_entry_pin *,
- struct journal_entry_pin *,
- journal_pin_flush_fn);
+static inline void bch2_journal_pin_update(struct journal *j, u64 seq,
+ struct journal_entry_pin *pin,
+ journal_pin_flush_fn flush_fn)
+{
+ if (unlikely(!journal_pin_active(pin) || pin->seq < seq))
+ bch2_journal_pin_set(j, seq, pin, flush_fn);
+}
void bch2_journal_pin_flush(struct journal *, struct journal_entry_pin *);
copygc_heap *h = &c->copygc_heap;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
+ struct extent_ptr_decoded p = { 0 };
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
{
memset(iter, 0, sizeof(*iter));
- iter->btree = bch2_trans_get_iter(trans, id, pos, 0);
+ iter->btree = bch2_trans_get_iter(trans, id, pos, BTREE_ITER_PREFETCH);
bch2_journal_iter_init(&iter->journal, journal_keys, id, 0, pos);
}
bch2_cut_back(atomic_end, split);
split_iter = bch2_trans_copy_iter(&trans, iter);
- ret = PTR_ERR_OR_ZERO(split_iter);
- if (ret)
- goto err;
/*
* It's important that we don't go through the
iter = bch2_trans_get_node_iter(trans, id, k->k.p,
BTREE_MAX_DEPTH, level,
BTREE_ITER_INTENT);
- if (IS_ERR(iter))
- return PTR_ERR(iter);
/*
* iter->flags & BTREE_ITER_IS_EXTENTS triggers the update path to run
BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_INTENT);
- ret = PTR_ERR_OR_ZERO(iter) ?:
- bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
+ ret = bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
bch2_trans_iter_put(trans, iter);
return ret;
}
int ret;
iter = bch2_trans_copy_iter(trans, start);
- if (IS_ERR(iter))
- return PTR_ERR(iter);
bch2_btree_iter_next_slot(iter);
}
if (!slot &&
- !(flags & BCH_HASH_SET_MUST_REPLACE)) {
+ !(flags & BCH_HASH_SET_MUST_REPLACE))
slot = bch2_trans_copy_iter(trans, iter);
- if (IS_ERR(slot))
- return PTR_ERR(slot);
- }
if (k.k->type != KEY_TYPE_whiteout)
goto not_found;
if (threads_str &&
!(ret = kstrtouint(threads_str, 10, &threads)) &&
!(ret = bch2_strtoull_h(nr_str, &nr)))
- bch2_btree_perf_test(c, test, nr, threads);
- else
- size = ret;
+ ret = bch2_btree_perf_test(c, test, nr, threads);
kfree(tmp);
+
+ if (ret)
+ size = ret;
}
#endif
return size;
/* unit tests */
-static void test_delete(struct bch_fs *c, u64 nr)
+static int test_delete(struct bch_fs *c, u64 nr)
{
struct btree_trans trans;
struct btree_iter *iter;
BTREE_ITER_INTENT);
ret = bch2_btree_iter_traverse(iter);
- BUG_ON(ret);
+ if (ret) {
+ bch_err(c, "lookup error in test_delete: %i", ret);
+ goto err;
+ }
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
bch2_trans_update(&trans, iter, &k.k_i, 0));
- BUG_ON(ret);
+ if (ret) {
+ bch_err(c, "update error in test_delete: %i", ret);
+ goto err;
+ }
pr_info("deleting once");
ret = bch2_btree_delete_at(&trans, iter, 0);
- BUG_ON(ret);
+ if (ret) {
+ bch_err(c, "delete error (first) in test_delete: %i", ret);
+ goto err;
+ }
pr_info("deleting twice");
ret = bch2_btree_delete_at(&trans, iter, 0);
- BUG_ON(ret);
-
+ if (ret) {
+ bch_err(c, "delete error (second) in test_delete: %i", ret);
+ goto err;
+ }
+err:
bch2_trans_exit(&trans);
+ return ret;
}
-static void test_delete_written(struct bch_fs *c, u64 nr)
+static int test_delete_written(struct bch_fs *c, u64 nr)
{
struct btree_trans trans;
struct btree_iter *iter;
BTREE_ITER_INTENT);
ret = bch2_btree_iter_traverse(iter);
- BUG_ON(ret);
+ if (ret) {
+ bch_err(c, "lookup error in test_delete_written: %i", ret);
+ goto err;
+ }
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
bch2_trans_update(&trans, iter, &k.k_i, 0));
- BUG_ON(ret);
+ if (ret) {
+ bch_err(c, "update error in test_delete_written: %i", ret);
+ goto err;
+ }
bch2_journal_flush_all_pins(&c->journal);
ret = bch2_btree_delete_at(&trans, iter, 0);
- BUG_ON(ret);
-
+ if (ret) {
+ bch_err(c, "delete error in test_delete_written: %i", ret);
+ goto err;
+ }
+err:
bch2_trans_exit(&trans);
+ return ret;
}
-static void test_iterate(struct bch_fs *c, u64 nr)
+static int test_iterate(struct bch_fs *c, u64 nr)
{
struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k;
u64 i;
- int ret;
+ int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
ret = bch2_btree_insert(c, BTREE_ID_XATTRS, &k.k_i,
NULL, NULL, 0);
- BUG_ON(ret);
+ if (ret) {
+ bch_err(c, "insert error in test_iterate: %i", ret);
+ goto err;
+ }
}
pr_info("iterating forwards");
BUG_ON(k.k->p.offset != --i);
BUG_ON(i);
-
+err:
bch2_trans_exit(&trans);
+ return ret;
}
-static void test_iterate_extents(struct bch_fs *c, u64 nr)
+static int test_iterate_extents(struct bch_fs *c, u64 nr)
{
struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k;
u64 i;
- int ret;
+ int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
NULL, NULL, 0);
- BUG_ON(ret);
+ if (ret) {
+ bch_err(c, "insert error in test_iterate_extents: %i", ret);
+ goto err;
+ }
}
pr_info("iterating forwards");
}
BUG_ON(i);
-
+err:
bch2_trans_exit(&trans);
+ return ret;
}
-static void test_iterate_slots(struct bch_fs *c, u64 nr)
+static int test_iterate_slots(struct bch_fs *c, u64 nr)
{
struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k;
u64 i;
- int ret;
+ int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
ret = bch2_btree_insert(c, BTREE_ID_XATTRS, &k.k_i,
NULL, NULL, 0);
- BUG_ON(ret);
+ if (ret) {
+ bch_err(c, "insert error in test_iterate_slots: %i", ret);
+ goto err;
+ }
}
pr_info("iterating forwards");
if (i == nr * 2)
break;
}
-
+err:
bch2_trans_exit(&trans);
+ return ret;
}
-static void test_iterate_slots_extents(struct bch_fs *c, u64 nr)
+static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
{
struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k;
u64 i;
- int ret;
+ int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
NULL, NULL, 0);
- BUG_ON(ret);
+ if (ret) {
+ bch_err(c, "insert error in test_iterate_slots_extents: %i", ret);
+ goto err;
+ }
}
pr_info("iterating forwards");
if (i == nr)
break;
}
-
+err:
bch2_trans_exit(&trans);
+ return 0;
}
/*
* XXX: we really want to make sure we've got a btree with depth > 0 for these
* tests
*/
-static void test_peek_end(struct bch_fs *c, u64 nr)
+static int test_peek_end(struct bch_fs *c, u64 nr)
{
struct btree_trans trans;
struct btree_iter *iter;
BUG_ON(k.k);
bch2_trans_exit(&trans);
+ return 0;
}
-static void test_peek_end_extents(struct bch_fs *c, u64 nr)
+static int test_peek_end_extents(struct bch_fs *c, u64 nr)
{
struct btree_trans trans;
struct btree_iter *iter;
BUG_ON(k.k);
bch2_trans_exit(&trans);
+ return 0;
}
/* extent unit tests */
u64 test_version;
-static void insert_test_extent(struct bch_fs *c,
- u64 start, u64 end)
+static int insert_test_extent(struct bch_fs *c,
+ u64 start, u64 end)
{
struct bkey_i_cookie k;
int ret;
ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
NULL, NULL, 0);
- BUG_ON(ret);
+ if (ret)
+ bch_err(c, "insert error in insert_test_extent: %i", ret);
+ return ret;
}
-static void __test_extent_overwrite(struct bch_fs *c,
+static int __test_extent_overwrite(struct bch_fs *c,
u64 e1_start, u64 e1_end,
u64 e2_start, u64 e2_end)
{
- insert_test_extent(c, e1_start, e1_end);
- insert_test_extent(c, e2_start, e2_end);
+ int ret;
+
+ ret = insert_test_extent(c, e1_start, e1_end) ?:
+ insert_test_extent(c, e2_start, e2_end);
delete_test_keys(c);
+ return ret;
}
-static void test_extent_overwrite_front(struct bch_fs *c, u64 nr)
+static int test_extent_overwrite_front(struct bch_fs *c, u64 nr)
{
- __test_extent_overwrite(c, 0, 64, 0, 32);
- __test_extent_overwrite(c, 8, 64, 0, 32);
+ return __test_extent_overwrite(c, 0, 64, 0, 32) ?:
+ __test_extent_overwrite(c, 8, 64, 0, 32);
}
-static void test_extent_overwrite_back(struct bch_fs *c, u64 nr)
+static int test_extent_overwrite_back(struct bch_fs *c, u64 nr)
{
- __test_extent_overwrite(c, 0, 64, 32, 64);
- __test_extent_overwrite(c, 0, 64, 32, 72);
+ return __test_extent_overwrite(c, 0, 64, 32, 64) ?:
+ __test_extent_overwrite(c, 0, 64, 32, 72);
}
-static void test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
+static int test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
{
- __test_extent_overwrite(c, 0, 64, 32, 40);
+ return __test_extent_overwrite(c, 0, 64, 32, 40);
}
-static void test_extent_overwrite_all(struct bch_fs *c, u64 nr)
+static int test_extent_overwrite_all(struct bch_fs *c, u64 nr)
{
- __test_extent_overwrite(c, 32, 64, 0, 64);
- __test_extent_overwrite(c, 32, 64, 0, 128);
- __test_extent_overwrite(c, 32, 64, 32, 64);
- __test_extent_overwrite(c, 32, 64, 32, 128);
+ return __test_extent_overwrite(c, 32, 64, 0, 64) ?:
+ __test_extent_overwrite(c, 32, 64, 0, 128) ?:
+ __test_extent_overwrite(c, 32, 64, 32, 64) ?:
+ __test_extent_overwrite(c, 32, 64, 32, 128);
}
/* perf tests */
return v;
}
-static void rand_insert(struct bch_fs *c, u64 nr)
+static int rand_insert(struct bch_fs *c, u64 nr)
{
struct btree_trans trans;
struct bkey_i_cookie k;
- int ret;
+ int ret = 0;
u64 i;
bch2_trans_init(&trans, c, 0, 0);
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
__bch2_btree_insert(&trans, BTREE_ID_XATTRS, &k.k_i));
-
- BUG_ON(ret);
+ if (ret) {
+ bch_err(c, "error in rand_insert: %i", ret);
+ break;
+ }
}
bch2_trans_exit(&trans);
+ return ret;
}
-static void rand_lookup(struct bch_fs *c, u64 nr)
+static int rand_lookup(struct bch_fs *c, u64 nr)
{
struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k;
+ int ret = 0;
u64 i;
bch2_trans_init(&trans, c, 0, 0);
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, POS_MIN, 0);
for (i = 0; i < nr; i++) {
- iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS,
- POS(0, test_rand()), 0);
+ bch2_btree_iter_set_pos(iter, POS(0, test_rand()));
k = bch2_btree_iter_peek(iter);
- bch2_trans_iter_free(&trans, iter);
+ ret = bkey_err(k);
+ if (ret) {
+ bch_err(c, "error in rand_lookup: %i", ret);
+ break;
+ }
}
+ bch2_trans_iter_free(&trans, iter);
bch2_trans_exit(&trans);
+ return ret;
}
-static void rand_mixed(struct bch_fs *c, u64 nr)
+static int rand_mixed(struct bch_fs *c, u64 nr)
{
struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k;
- int ret;
+ int ret = 0;
u64 i;
bch2_trans_init(&trans, c, 0, 0);
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, POS_MIN, 0);
for (i = 0; i < nr; i++) {
- iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS,
- POS(0, test_rand()), 0);
+ bch2_btree_iter_set_pos(iter, POS(0, test_rand()));
k = bch2_btree_iter_peek(iter);
+ ret = bkey_err(k);
+ if (ret) {
+ bch_err(c, "lookup error in rand_mixed: %i", ret);
+ break;
+ }
if (!(i & 3) && k.k) {
struct bkey_i_cookie k;
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
bch2_trans_update(&trans, iter, &k.k_i, 0));
-
- BUG_ON(ret);
+ if (ret) {
+ bch_err(c, "update error in rand_mixed: %i", ret);
+ break;
+ }
}
-
- bch2_trans_iter_free(&trans, iter);
}
+ bch2_trans_iter_free(&trans, iter);
bch2_trans_exit(&trans);
+ return ret;
}
static int __do_delete(struct btree_trans *trans, struct bpos pos)
return ret;
}
-static void rand_delete(struct bch_fs *c, u64 nr)
+static int rand_delete(struct bch_fs *c, u64 nr)
{
struct btree_trans trans;
- int ret;
+ int ret = 0;
u64 i;
bch2_trans_init(&trans, c, 0, 0);
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
__do_delete(&trans, pos));
- BUG_ON(ret);
+ if (ret) {
+ bch_err(c, "error in rand_delete: %i", ret);
+ break;
+ }
}
bch2_trans_exit(&trans);
+ return ret;
}
-static void seq_insert(struct bch_fs *c, u64 nr)
+static int seq_insert(struct bch_fs *c, u64 nr)
{
struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k;
struct bkey_i_cookie insert;
- int ret;
+ int ret = 0;
u64 i = 0;
bkey_cookie_init(&insert.k_i);
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
bch2_trans_update(&trans, iter, &insert.k_i, 0));
-
- BUG_ON(ret);
+ if (ret) {
+ bch_err(c, "error in seq_insert: %i", ret);
+ break;
+ }
if (++i == nr)
break;
}
bch2_trans_exit(&trans);
+ return ret;
}
-static void seq_lookup(struct bch_fs *c, u64 nr)
+static int seq_lookup(struct bch_fs *c, u64 nr)
{
struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k;
- int ret;
+ int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN, 0, k, ret)
;
bch2_trans_exit(&trans);
+ return ret;
}
-static void seq_overwrite(struct bch_fs *c, u64 nr)
+static int seq_overwrite(struct bch_fs *c, u64 nr)
{
struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k;
- int ret;
+ int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
bch2_trans_update(&trans, iter, &u.k_i, 0));
-
- BUG_ON(ret);
+ if (ret) {
+ bch_err(c, "error in seq_overwrite: %i", ret);
+ break;
+ }
}
bch2_trans_exit(&trans);
+ return ret;
}
-static void seq_delete(struct bch_fs *c, u64 nr)
+static int seq_delete(struct bch_fs *c, u64 nr)
{
int ret;
ret = bch2_btree_delete_range(c, BTREE_ID_XATTRS,
POS(0, 0), POS(0, U64_MAX),
NULL);
- BUG_ON(ret);
+ if (ret)
+ bch_err(c, "error in seq_delete: %i", ret);
+ return ret;
}
-typedef void (*perf_test_fn)(struct bch_fs *, u64);
+typedef int (*perf_test_fn)(struct bch_fs *, u64);
struct test_job {
struct bch_fs *c;
u64 start;
u64 finish;
+ int ret;
};
static int btree_perf_test_thread(void *data)
{
struct test_job *j = data;
+ int ret;
if (atomic_dec_and_test(&j->ready)) {
wake_up(&j->ready_wait);
wait_event(j->ready_wait, !atomic_read(&j->ready));
}
- j->fn(j->c, j->nr / j->nr_threads);
+ ret = j->fn(j->c, j->nr / j->nr_threads);
+ if (ret)
+ j->ret = ret;
if (atomic_dec_and_test(&j->done)) {
j->finish = sched_clock();
return 0;
}
-void bch2_btree_perf_test(struct bch_fs *c, const char *testname,
- u64 nr, unsigned nr_threads)
+int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
+ u64 nr, unsigned nr_threads)
{
struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
char name_buf[20], nr_buf[20], per_sec_buf[20];
if (!j.fn) {
pr_err("unknown test %s", testname);
- return;
+ return -EINVAL;
}
//pr_info("running test %s:", testname);
time / NSEC_PER_SEC,
time * nr_threads / nr,
per_sec_buf);
+ return j.ret;
}
#endif /* CONFIG_BCACHEFS_TESTS */
#ifdef CONFIG_BCACHEFS_TESTS
-void bch2_btree_perf_test(struct bch_fs *, const char *, u64, unsigned);
+int bch2_btree_perf_test(struct bch_fs *, const char *, u64, unsigned);
#else