]> git.sesse.net Git - bcachefs-tools-debian/commitdiff
Update bcachefs sources to 0010403265 bcachefs: Fix spurious alloc errors on forced...
authorKent Overstreet <kent.overstreet@gmail.com>
Thu, 3 Dec 2020 18:07:16 +0000 (13:07 -0500)
committerKent Overstreet <kent.overstreet@gmail.com>
Thu, 3 Dec 2020 18:13:53 +0000 (13:13 -0500)
23 files changed:
.bcachefs_revision
Makefile
libbcachefs/btree_iter.c
libbcachefs/btree_iter.h
libbcachefs/btree_key_cache.c
libbcachefs/btree_types.h
libbcachefs/btree_update_interior.c
libbcachefs/btree_update_leaf.c
libbcachefs/buckets.c
libbcachefs/buckets.h
libbcachefs/fs-io.c
libbcachefs/fsck.c
libbcachefs/inode.c
libbcachefs/io.c
libbcachefs/journal.c
libbcachefs/journal_reclaim.c
libbcachefs/journal_reclaim.h
libbcachefs/movinggc.c
libbcachefs/recovery.c
libbcachefs/str_hash.h
libbcachefs/sysfs.c
libbcachefs/tests.c
libbcachefs/tests.h

index 6ba1c9af6484e66345dc5b54826080a4ca8be948..8f42b13966f1e2b34b81f1e4a3cde3c723ba437f 100644 (file)
@@ -1 +1 @@
-021e62a098d9fa7e558ae935180e2fb16bb50a3a
+00104032654027a8f4406a82d28911b243f19d94
index cc00ac6eb931110fcebc6992dadf1d3553199815..6999b93ac373f7d4b631829a3cc3ebfd4ed989d0 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -6,6 +6,7 @@ PYTEST=pytest-3
 CFLAGS+=-std=gnu89 -O2 -g -MMD -Wall                           \
        -Wno-pointer-sign                                       \
        -Wno-zero-length-bounds                                 \
+       -Wno-stringop-overflow                                  \
        -fno-strict-aliasing                                    \
        -fno-delete-null-pointer-checks                         \
        -I. -Iinclude -Iraid                                    \
index 96cc5394295e1d65b20c0fb449cedc8ad9d6b91d..7a95fcc0b244f750eefacbf4df032687725d998d 100644 (file)
@@ -346,7 +346,7 @@ void bch2_btree_trans_verify_locks(struct btree_trans *trans)
 {
        struct btree_iter *iter;
 
-       trans_for_each_iter_all(trans, iter)
+       trans_for_each_iter(trans, iter)
                bch2_btree_iter_verify_locks(iter);
 }
 #else
@@ -2002,110 +2002,37 @@ int bch2_trans_iter_free(struct btree_trans *trans,
        return bch2_trans_iter_put(trans, iter);
 }
 
-#if 0
-static int bch2_trans_realloc_iters(struct btree_trans *trans,
-                                   unsigned new_size)
+noinline __cold
+static void btree_trans_iter_alloc_fail(struct btree_trans *trans)
 {
-       void *p, *new_iters, *new_updates, *new_updates2;
-       size_t iters_bytes;
-       size_t updates_bytes;
-
-       new_size = roundup_pow_of_two(new_size);
-
-       BUG_ON(new_size > BTREE_ITER_MAX);
-
-       if (new_size <= trans->size)
-               return 0;
-
-       BUG_ON(trans->used_mempool);
-
-       bch2_trans_unlock(trans);
 
-       iters_bytes     = sizeof(struct btree_iter) * new_size;
-       updates_bytes   = sizeof(struct btree_insert_entry) * new_size;
-
-       p = kmalloc(iters_bytes +
-                   updates_bytes +
-                   updates_bytes, GFP_NOFS);
-       if (p)
-               goto success;
-
-       p = mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS);
-       new_size = BTREE_ITER_MAX;
-
-       trans->used_mempool = true;
-success:
-       new_iters       = p; p += iters_bytes;
-       new_updates     = p; p += updates_bytes;
-       new_updates2    = p; p += updates_bytes;
-
-       memcpy(new_iters, trans->iters,
-              sizeof(struct btree_iter) * trans->nr_iters);
-       memcpy(new_updates, trans->updates,
-              sizeof(struct btree_insert_entry) * trans->nr_updates);
-       memcpy(new_updates2, trans->updates2,
-              sizeof(struct btree_insert_entry) * trans->nr_updates2);
-
-       if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
-               memset(trans->iters, POISON_FREE,
-                      sizeof(struct btree_iter) * trans->nr_iters +
-                      sizeof(struct btree_insert_entry) * trans->nr_iters);
-
-       kfree(trans->iters);
-
-       trans->iters            = new_iters;
-       trans->updates          = new_updates;
-       trans->updates2         = new_updates2;
-       trans->size             = new_size;
-
-       if (trans->iters_live) {
-               trace_trans_restart_iters_realloced(trans->ip, trans->size);
-               return -EINTR;
-       }
+       struct btree_iter *iter;
 
-       return 0;
+       trans_for_each_iter(trans, iter)
+               pr_err("iter: btree %s pos %llu:%llu%s%s%s %ps",
+                      bch2_btree_ids[iter->btree_id],
+                      iter->pos.inode,
+                      iter->pos.offset,
+                      (trans->iters_live & (1ULL << iter->idx)) ? " live" : "",
+                      (trans->iters_touched & (1ULL << iter->idx)) ? " touched" : "",
+                      iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "",
+                      (void *) iter->ip_allocated);
+       panic("trans iter oveflow\n");
 }
-#endif
 
 static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans)
 {
-       unsigned idx = __ffs64(~trans->iters_linked);
-
-       if (idx < trans->nr_iters)
-               goto got_slot;
-
-       if (trans->nr_iters == trans->size) {
-               struct btree_iter *iter;
-
-               BUG_ON(trans->size < BTREE_ITER_MAX);
-
-               trans_for_each_iter(trans, iter) {
-                       pr_err("iter: btree %s pos %llu:%llu%s%s%s %ps",
-                              bch2_btree_ids[iter->btree_id],
-                              iter->pos.inode,
-                              iter->pos.offset,
-                              (trans->iters_live & (1ULL << iter->idx)) ? " live" : "",
-                              (trans->iters_touched & (1ULL << iter->idx)) ? " touched" : "",
-                              iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "",
-                              (void *) iter->ip_allocated);
-               }
+       unsigned idx;
 
-               panic("trans iter oveflow\n");
-#if 0
-               ret = bch2_trans_realloc_iters(trans, trans->size * 2);
-               if (ret)
-                       return ERR_PTR(ret);
-#endif
-       }
+       if (unlikely(trans->iters_linked ==
+                    ~((~0ULL << 1) << (BTREE_ITER_MAX - 1))))
+               btree_trans_iter_alloc_fail(trans);
 
-       idx = trans->nr_iters++;
-       BUG_ON(trans->nr_iters > trans->size);
+       idx = __ffs64(~trans->iters_linked);
 
-       trans->iters[idx].idx = idx;
-got_slot:
-       BUG_ON(trans->iters_linked & (1ULL << idx));
-       trans->iters_linked |= 1ULL << idx;
-       trans->iters[idx].flags = 0;
+       trans->iters_linked     |= 1ULL << idx;
+       trans->iters[idx].idx    = idx;
+       trans->iters[idx].flags  = 0;
        return &trans->iters[idx];
 }
 
@@ -2141,8 +2068,6 @@ static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans,
 {
        struct btree_iter *iter, *best = NULL;
 
-       BUG_ON(trans->nr_iters > BTREE_ITER_MAX);
-
        trans_for_each_iter(trans, iter) {
                if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE))
                        continue;
@@ -2160,16 +2085,10 @@ static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans,
 
        if (!best) {
                iter = btree_trans_iter_alloc(trans);
-               if (IS_ERR(iter))
-                       return iter;
-
                bch2_btree_iter_init(trans, iter, btree_id, pos, flags);
        } else if ((trans->iters_live & (1ULL << best->idx)) ||
                   (best->flags & BTREE_ITER_KEEP_UNTIL_COMMIT)) {
                iter = btree_trans_iter_alloc(trans);
-               if (IS_ERR(iter))
-                       return iter;
-
                btree_iter_copy(iter, best);
        } else {
                iter = best;
@@ -2203,9 +2122,8 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
        struct btree_iter *iter =
                __btree_trans_get_iter(trans, btree_id, pos, flags);
 
-       if (!IS_ERR(iter))
-               __bch2_btree_iter_set_pos(iter, pos,
-                       btree_node_type_is_extents(btree_id));
+       __bch2_btree_iter_set_pos(iter, pos,
+               btree_node_type_is_extents(btree_id));
        return iter;
 }
 
@@ -2221,7 +2139,6 @@ struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans,
                                       flags|BTREE_ITER_NODES);
        unsigned i;
 
-       BUG_ON(IS_ERR(iter));
        BUG_ON(bkey_cmp(iter->pos, pos));
 
        iter->locks_want = locks_want;
@@ -2241,9 +2158,6 @@ struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans,
        struct btree_iter *iter;
 
        iter = btree_trans_iter_alloc(trans);
-       if (IS_ERR(iter))
-               return iter;
-
        btree_iter_copy(iter, src);
 
        trans->iters_live |= 1ULL << iter->idx;
@@ -2318,7 +2232,6 @@ void bch2_trans_reset(struct btree_trans *trans, unsigned flags)
 
        trans->iters_touched &= trans->iters_live;
 
-       trans->need_reset               = 0;
        trans->nr_updates               = 0;
        trans->nr_updates2              = 0;
        trans->mem_top                  = 0;
@@ -2339,9 +2252,8 @@ void bch2_trans_reset(struct btree_trans *trans, unsigned flags)
 
 static void bch2_trans_alloc_iters(struct btree_trans *trans, struct bch_fs *c)
 {
-       unsigned new_size = BTREE_ITER_MAX;
-       size_t iters_bytes      = sizeof(struct btree_iter) * new_size;
-       size_t updates_bytes    = sizeof(struct btree_insert_entry) * new_size;
+       size_t iters_bytes      = sizeof(struct btree_iter) * BTREE_ITER_MAX;
+       size_t updates_bytes    = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
        void *p = NULL;
 
        BUG_ON(trans->used_mempool);
@@ -2355,7 +2267,6 @@ static void bch2_trans_alloc_iters(struct btree_trans *trans, struct bch_fs *c)
        trans->iters            = p; p += iters_bytes;
        trans->updates          = p; p += updates_bytes;
        trans->updates2         = p; p += updates_bytes;
-       trans->size             = new_size;
 }
 
 void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
index f7a73619c85b2ff00680e10c95a952bcfebb7c7c..ee8c4346aadbfd638e93b42bf40d8cdd07ec97c5 100644 (file)
@@ -48,21 +48,16 @@ static inline int btree_iter_err(const struct btree_iter *iter)
 
 /* Iterate over iters within a transaction: */
 
-#define trans_for_each_iter_all(_trans, _iter)                         \
-       for (_iter = (_trans)->iters;                                   \
-            _iter < (_trans)->iters + (_trans)->nr_iters;              \
-            _iter++)
-
 static inline struct btree_iter *
 __trans_next_iter(struct btree_trans *trans, unsigned idx)
 {
-       EBUG_ON(idx < trans->nr_iters && trans->iters[idx].idx != idx);
-
-       for (; idx < trans->nr_iters; idx++)
-               if (trans->iters_linked & (1ULL << idx))
-                       return &trans->iters[idx];
+       u64 l = trans->iters_linked >> idx;
+       if (!l)
+               return NULL;
 
-       return NULL;
+       idx += __ffs64(l);
+       EBUG_ON(trans->iters[idx].idx != idx);
+       return &trans->iters[idx];
 }
 
 #define trans_for_each_iter(_trans, _iter)                             \
@@ -240,10 +235,9 @@ static inline int bkey_err(struct bkey_s_c k)
 
 #define for_each_btree_key(_trans, _iter, _btree_id,                   \
                           _start, _flags, _k, _ret)                    \
-       for ((_ret) = PTR_ERR_OR_ZERO((_iter) =                         \
-                       bch2_trans_get_iter((_trans), (_btree_id),      \
-                                           (_start), (_flags))) ?:     \
-                     PTR_ERR_OR_ZERO(((_k) =                           \
+       for ((_iter) = bch2_trans_get_iter((_trans), (_btree_id),       \
+                                          (_start), (_flags)),         \
+            (_ret) = PTR_ERR_OR_ZERO(((_k) =                           \
                        __bch2_btree_iter_peek(_iter, _flags)).k);      \
             !_ret && (_k).k;                                           \
             (_ret) = PTR_ERR_OR_ZERO(((_k) =                           \
@@ -270,9 +264,7 @@ bch2_trans_get_iter(struct btree_trans *trans, enum btree_id btree_id,
 {
        struct btree_iter *iter =
                __bch2_trans_get_iter(trans, btree_id, pos, flags);
-
-       if (!IS_ERR(iter))
-               iter->ip_allocated = _THIS_IP_;
+       iter->ip_allocated = _THIS_IP_;
        return iter;
 }
 
@@ -284,10 +276,8 @@ bch2_trans_copy_iter(struct btree_trans *trans, struct btree_iter *src)
        struct btree_iter *iter =
                __bch2_trans_copy_iter(trans, src);
 
-       if (!IS_ERR(iter))
-               iter->ip_allocated = _THIS_IP_;
+       iter->ip_allocated = _THIS_IP_;
        return iter;
-
 }
 
 struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *,
index a21dc485c677556a5072fc8ef9599bb5b8dbfe18..244c5dbcd3e9098db5b6a7c0374efe8b19c56d59 100644 (file)
@@ -169,9 +169,6 @@ static int btree_key_cache_fill(struct btree_trans *trans,
 
        iter = bch2_trans_get_iter(trans, ck->key.btree_id,
                                   ck->key.pos, BTREE_ITER_SLOTS);
-       if (IS_ERR(iter))
-               return PTR_ERR(iter);
-
        k = bch2_btree_iter_peek_slot(iter);
        ret = bkey_err(k);
        if (ret) {
@@ -319,24 +316,17 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
        struct bch_fs *c = trans->c;
        struct journal *j = &c->journal;
        struct btree_iter *c_iter = NULL, *b_iter = NULL;
-       struct bkey_cached *ck;
+       struct bkey_cached *ck = NULL;
        int ret;
 
        b_iter = bch2_trans_get_iter(trans, key.btree_id, key.pos,
                                     BTREE_ITER_SLOTS|
                                     BTREE_ITER_INTENT);
-       ret = PTR_ERR_OR_ZERO(b_iter);
-       if (ret)
-               goto out;
-
        c_iter = bch2_trans_get_iter(trans, key.btree_id, key.pos,
                                     BTREE_ITER_CACHED|
                                     BTREE_ITER_CACHED_NOFILL|
                                     BTREE_ITER_CACHED_NOCREATE|
                                     BTREE_ITER_INTENT);
-       ret = PTR_ERR_OR_ZERO(c_iter);
-       if (ret)
-               goto out;
 retry:
        ret = bch2_btree_iter_traverse(c_iter);
        if (ret)
@@ -367,10 +357,11 @@ err:
        if (ret == -EINTR)
                goto retry;
 
-       BUG_ON(ret && !bch2_journal_error(j));
-
-       if (ret)
+       if (ret) {
+               bch2_fs_fatal_err_on(!bch2_journal_error(j), c,
+                       "error flushing key cache: %i", ret);
                goto out;
+       }
 
        bch2_journal_pin_drop(j, &ck->journal);
        bch2_journal_preres_put(j, &ck->res);
index cf59f12247413aca9c3a4d788d77d2110bf420c0..15af60e9282051261585c53ec2eb82323ac2c260 100644 (file)
@@ -357,20 +357,17 @@ struct btree_trans {
        unsigned long           ip;
        int                     srcu_idx;
 
-       u64                     iters_linked;
-       u64                     iters_live;
-       u64                     iters_touched;
-
-       u8                      nr_iters;
        u8                      nr_updates;
        u8                      nr_updates2;
-       u8                      size;
        unsigned                used_mempool:1;
        unsigned                error:1;
        unsigned                nounlock:1;
-       unsigned                need_reset:1;
        unsigned                in_traverse_all:1;
 
+       u64                     iters_linked;
+       u64                     iters_live;
+       u64                     iters_touched;
+
        unsigned                mem_top;
        unsigned                mem_bytes;
        void                    *mem;
index 5143896e1b29e1f07e71d106789ad69587ba887b..edc11c22308c1dfa4de71cce501604d4da08fcea 100644 (file)
@@ -544,6 +544,19 @@ static void btree_update_nodes_written(struct btree_update *as)
        unsigned i;
        int ret;
 
+       /*
+        * If we're already in an error state, it might be because a btree node
+        * was never written, and we might be trying to free that same btree
+        * node here, but it won't have been marked as allocated and we'll see
+        * spurious disk usage inconsistencies in the transactional part below
+        * if we don't skip it:
+        */
+       ret = bch2_journal_error(&c->journal);
+       if (ret)
+               goto err;
+
+       BUG_ON(!journal_pin_active(&as->journal));
+
        /*
         * We did an update to a parent node where the pointers we added pointed
         * to child nodes that weren't written yet: now, the child nodes have
@@ -567,8 +580,10 @@ static void btree_update_nodes_written(struct btree_update *as)
                              BTREE_INSERT_JOURNAL_RESERVED,
                              btree_update_nodes_written_trans(&trans, as));
        bch2_trans_exit(&trans);
-       BUG_ON(ret && !bch2_journal_error(&c->journal));
 
+       bch2_fs_fatal_err_on(ret && !bch2_journal_error(&c->journal), c,
+                            "error %i in btree_update_nodes_written()", ret);
+err:
        if (b) {
                /*
                 * @b is the node we did the final insert into:
@@ -699,17 +714,7 @@ static void btree_update_reparent(struct btree_update *as,
        child->b = NULL;
        child->mode = BTREE_INTERIOR_UPDATING_AS;
 
-       /*
-        * When we write a new btree root, we have to drop our journal pin
-        * _before_ the new nodes are technically reachable; see
-        * btree_update_nodes_written().
-        *
-        * This goes for journal pins that are recursively blocked on us - so,
-        * just transfer the journal pin to the new interior update so
-        * btree_update_nodes_written() can drop it.
-        */
        bch2_journal_pin_copy(&c->journal, &as->journal, &child->journal, NULL);
-       bch2_journal_pin_drop(&c->journal, &child->journal);
 }
 
 static void btree_update_updated_root(struct btree_update *as, struct btree *b)
@@ -956,6 +961,10 @@ bch2_btree_update_start(struct btree_trans *trans, enum btree_id id,
        if (ret)
                goto err;
 
+       bch2_journal_pin_add(&c->journal,
+                            atomic64_read(&c->journal.seq),
+                            &as->journal, NULL);
+
        mutex_lock(&c->btree_interior_update_lock);
        list_add_tail(&as->list, &c->btree_interior_update_list);
        mutex_unlock(&c->btree_interior_update_lock);
index bbc6d51242751ebcf0615c9f522544a19157b4a0..496855233c4c82a962c5ee6b91a8e0419d51e9ae 100644 (file)
@@ -707,7 +707,7 @@ static void bch2_trans_update2(struct btree_trans *trans,
 
        BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
 
-       EBUG_ON(trans->nr_updates2 >= trans->nr_iters);
+       EBUG_ON(trans->nr_updates2 >= BTREE_ITER_MAX);
 
        iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT;
 
@@ -740,8 +740,6 @@ static int extent_update_to_keys(struct btree_trans *trans,
                return 0;
 
        iter = bch2_trans_copy_iter(trans, orig_iter);
-       if (IS_ERR(iter))
-               return PTR_ERR(iter);
 
        iter->flags |= BTREE_ITER_INTENT;
        __bch2_btree_iter_set_pos(iter, insert->k.p, false);
@@ -760,10 +758,6 @@ static int extent_handle_overwrites(struct btree_trans *trans,
        int ret = 0;
 
        iter = bch2_trans_get_iter(trans, btree_id, start, BTREE_ITER_INTENT);
-       ret = PTR_ERR_OR_ZERO(iter);
-       if (ret)
-               return ret;
-
        k = bch2_btree_iter_peek_with_updates(iter);
 
        while (k.k && !(ret = bkey_err(k))) {
@@ -772,8 +766,6 @@ static int extent_handle_overwrites(struct btree_trans *trans,
 
                if (bkey_cmp(bkey_start_pos(k.k), start) < 0) {
                        update_iter = bch2_trans_copy_iter(trans, iter);
-                       if ((ret = PTR_ERR_OR_ZERO(update_iter)))
-                               goto err;
 
                        update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
                        if ((ret = PTR_ERR_OR_ZERO(update)))
@@ -789,8 +781,6 @@ static int extent_handle_overwrites(struct btree_trans *trans,
 
                if (bkey_cmp(k.k->p, end) > 0) {
                        update_iter = bch2_trans_copy_iter(trans, iter);
-                       if ((ret = PTR_ERR_OR_ZERO(update_iter)))
-                               goto err;
 
                        update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
                        if ((ret = PTR_ERR_OR_ZERO(update)))
@@ -804,8 +794,6 @@ static int extent_handle_overwrites(struct btree_trans *trans,
                        bch2_trans_iter_put(trans, update_iter);
                } else {
                        update_iter = bch2_trans_copy_iter(trans, iter);
-                       if ((ret = PTR_ERR_OR_ZERO(update_iter)))
-                               goto err;
 
                        update = bch2_trans_kmalloc(trans, sizeof(struct bkey));
                        if ((ret = PTR_ERR_OR_ZERO(update)))
@@ -837,8 +825,6 @@ int __bch2_trans_commit(struct btree_trans *trans)
        unsigned u64s;
        int ret = 0;
 
-       BUG_ON(trans->need_reset);
-
        if (!trans->nr_updates)
                goto out_noupdates;
 
@@ -1031,10 +1017,6 @@ int bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
                 */
                if (trans->iters_live & (1ULL << i->iter->idx)) {
                        i->iter = bch2_trans_copy_iter(trans, i->iter);
-                       if (IS_ERR(i->iter)) {
-                               trans->need_reset = true;
-                               return PTR_ERR(i->iter);
-                       }
 
                        i->iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT;
                        bch2_trans_iter_put(trans, i->iter);
@@ -1044,7 +1026,7 @@ int bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
                bch2_btree_iter_set_pos(i->iter, n.k->k.p);
        }
 
-       EBUG_ON(trans->nr_updates >= trans->nr_iters);
+       EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
 
        array_insert_item(trans->updates, trans->nr_updates,
                          i - trans->updates, n);
@@ -1059,8 +1041,6 @@ int __bch2_btree_insert(struct btree_trans *trans,
 
        iter = bch2_trans_get_iter(trans, id, bkey_start_pos(&k->k),
                                   BTREE_ITER_INTENT);
-       if (IS_ERR(iter))
-               return PTR_ERR(iter);
 
        ret   = bch2_btree_iter_traverse(iter) ?:
                bch2_trans_update(trans, iter, k, 0);
@@ -1107,9 +1087,6 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
        int ret = 0;
 
        iter = bch2_trans_get_iter(trans, id, start, BTREE_ITER_INTENT);
-       ret = PTR_ERR_OR_ZERO(iter);
-       if (ret)
-               return ret;
 retry:
        while ((k = bch2_btree_iter_peek(iter)).k &&
               !(ret = bkey_err(k)) &&
index f7bdb14372f81bdbeb549fe6942bb6d0df7bea19..1b1200c551346ddb73996de4d3653029917a1776 100644 (file)
@@ -1576,9 +1576,6 @@ static int trans_get_key(struct btree_trans *trans,
 
        *iter = bch2_trans_get_iter(trans, btree_id, pos,
                                    flags|BTREE_ITER_INTENT);
-       if (IS_ERR(*iter))
-               return PTR_ERR(*iter);
-
        *k = __bch2_btree_iter_peek(*iter, flags);
        ret = bkey_err(*k);
        if (ret)
@@ -1606,9 +1603,6 @@ static int bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree
                                           BTREE_ITER_CACHED|
                                           BTREE_ITER_CACHED_NOFILL|
                                           BTREE_ITER_INTENT);
-               if (IS_ERR(iter))
-                       return PTR_ERR(iter);
-
                ret = bch2_btree_iter_traverse(iter);
                if (ret) {
                        bch2_trans_iter_put(trans, iter);
@@ -2044,6 +2038,16 @@ static u64 bch2_recalc_sectors_available(struct bch_fs *c)
        return avail_factor(__bch2_fs_usage_read_short(c).free);
 }
 
+void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
+{
+       percpu_down_read(&c->mark_lock);
+       this_cpu_sub(c->usage[0]->online_reserved,
+                    res->sectors);
+       percpu_up_read(&c->mark_lock);
+
+       res->sectors = 0;
+}
+
 #define SECTORS_CACHE  1024
 
 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
index 856dc5a8c8a3f19a5f197456c8b1bcf0a9a6b52a..a3873becbb70111b173b6c42e369e2bc5012027f 100644 (file)
@@ -272,11 +272,13 @@ void bch2_trans_fs_usage_apply(struct btree_trans *, struct bch_fs_usage *);
 
 /* disk reservations: */
 
+void __bch2_disk_reservation_put(struct bch_fs *, struct disk_reservation *);
+
 static inline void bch2_disk_reservation_put(struct bch_fs *c,
                                             struct disk_reservation *res)
 {
-       this_cpu_sub(c->usage[0]->online_reserved, res->sectors);
-       res->sectors = 0;
+       if (res->sectors)
+               __bch2_disk_reservation_put(c, res);
 }
 
 #define BCH_DISK_RESERVATION_NOFAIL            (1 << 0)
index 7d193ce4780ef78aab3fdebcf1ecfaa6967d67dd..8e6f7300e9b61421931230b73250a4aab8dd40d1 100644 (file)
@@ -2474,10 +2474,7 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
        src = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
                        POS(inode->v.i_ino, src_start >> 9),
                        BTREE_ITER_INTENT);
-       BUG_ON(IS_ERR_OR_NULL(src));
-
        dst = bch2_trans_copy_iter(&trans, src);
-       BUG_ON(IS_ERR_OR_NULL(dst));
 
        while (1) {
                struct disk_reservation disk_res =
index 09ce6c29b88cf91e85a57d6c9abce4b7bf56fa5d..39f872de0c186d5ec388867d2bc848c8640af080 100644 (file)
@@ -230,7 +230,6 @@ static int hash_check_duplicates(struct btree_trans *trans,
                return 0;
 
        iter = bch2_trans_copy_iter(trans, h->chain);
-       BUG_ON(IS_ERR(iter));
 
        for_each_btree_key_continue(iter, 0, k2, ret) {
                if (bkey_cmp(k2.k->p, k.k->p) >= 0)
@@ -265,10 +264,8 @@ static void hash_set_chain_start(struct btree_trans *trans,
                hash_stop_chain(trans, h);
 
        if (!hole) {
-               if (!h->chain) {
+               if (!h->chain)
                        h->chain = bch2_trans_copy_iter(trans, k_iter);
-                       BUG_ON(IS_ERR(h->chain));
-               }
 
                h->chain_end = k.k->p.offset;
        }
@@ -440,9 +437,6 @@ static int bch2_fix_overlapping_extent(struct btree_trans *trans,
        bch2_cut_front(cut_at, u);
 
        u_iter = bch2_trans_copy_iter(trans, iter);
-       ret = PTR_ERR_OR_ZERO(u_iter);
-       if (ret)
-               return ret;
 
        /*
         * We don't want to go through the
@@ -485,7 +479,11 @@ static int check_extents(struct bch_fs *c)
                                   BTREE_ITER_INTENT);
 retry:
        for_each_btree_key_continue(iter, 0, k, ret) {
-               if (bkey_cmp(prev.k->k.p, bkey_start_pos(k.k)) > 0) {
+               /*
+                * due to retry errors we might see the same extent twice:
+                */
+               if (bkey_cmp(prev.k->k.p, k.k->p) &&
+                   bkey_cmp(prev.k->k.p, bkey_start_pos(k.k)) > 0) {
                        char buf1[200];
                        char buf2[200];
 
index 82099e5a48d8f0f98009cd327a67b1f802f4f289..bf1c7319669c93342cc36ec1e8d7219addbbc4f9 100644 (file)
@@ -302,9 +302,6 @@ struct btree_iter *bch2_inode_peek(struct btree_trans *trans,
 
        iter = bch2_trans_get_iter(trans, BTREE_ID_INODES, POS(0, inum),
                                   BTREE_ITER_CACHED|flags);
-       if (IS_ERR(iter))
-               return iter;
-
        k = bch2_btree_iter_peek_cached(iter);
        ret = bkey_err(k);
        if (ret)
@@ -640,9 +637,6 @@ int bch2_inode_find_by_inum_trans(struct btree_trans *trans, u64 inode_nr,
 
        iter = bch2_trans_get_iter(trans, BTREE_ID_INODES,
                        POS(0, inode_nr), BTREE_ITER_CACHED);
-       if (IS_ERR(iter))
-               return PTR_ERR(iter);
-
        k = bch2_btree_iter_peek_cached(iter);
        ret = bkey_err(k);
        if (ret)
index 21087d1193dcf9143be1fa7f947b16d1073d9bc0..46856c669f52ac1edf5f626be80fb2cc6a46ec63 100644 (file)
@@ -198,8 +198,6 @@ static int sum_sector_overwrites(struct btree_trans *trans,
        *delta = 0;
 
        iter = bch2_trans_copy_iter(trans, extent_iter);
-       if (IS_ERR(iter))
-               return PTR_ERR(iter);
 
        for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, old, ret) {
                if (!may_allocate &&
@@ -1790,9 +1788,6 @@ static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
 
        iter = bch2_trans_get_iter(trans, BTREE_ID_EXTENTS, rbio->pos,
                                   BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
-       if ((ret = PTR_ERR_OR_ZERO(iter)))
-               goto out;
-
        k = bch2_btree_iter_peek_slot(iter);
        if ((ret = bkey_err(k)))
                goto out;
@@ -2000,10 +1995,6 @@ int __bch2_read_indirect_extent(struct btree_trans *trans,
        iter = bch2_trans_get_iter(trans, BTREE_ID_REFLINK,
                                   POS(0, reflink_offset),
                                   BTREE_ITER_SLOTS);
-       ret = PTR_ERR_OR_ZERO(iter);
-       if (ret)
-               return ret;
-
        k = bch2_btree_iter_peek_slot(iter);
        ret = bkey_err(k);
        if (ret)
index 5874a9ff2204fc0244fa108825ad1a19d3e8d788..dd8db8c0c980544438355253ff5ed874ab31ff35 100644 (file)
@@ -553,16 +553,13 @@ int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
        struct journal_buf *buf;
        int ret = 0;
 
-       if (seq <= j->err_seq)
-               return -EIO;
-
        if (seq <= j->seq_ondisk)
                return 1;
 
        spin_lock(&j->lock);
 
        /* Recheck under lock: */
-       if (seq <= j->err_seq) {
+       if (j->err_seq && seq >= j->err_seq) {
                ret = -EIO;
                goto out;
        }
index 66f5dcce8889771bb113fa8c93863406a1528576..beaa39f7bf5ee5d75340c736e952d373010ffbc7 100644 (file)
@@ -320,11 +320,14 @@ void bch2_journal_pin_drop(struct journal *j,
        spin_unlock(&j->lock);
 }
 
-static void bch2_journal_pin_add_locked(struct journal *j, u64 seq,
-                           struct journal_entry_pin *pin,
-                           journal_pin_flush_fn flush_fn)
+void bch2_journal_pin_set(struct journal *j, u64 seq,
+                         struct journal_entry_pin *pin,
+                         journal_pin_flush_fn flush_fn)
 {
-       struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
+       struct journal_entry_pin_list *pin_list;
+
+       spin_lock(&j->lock);
+       pin_list = journal_seq_pin(j, seq);
 
        __journal_pin_drop(j, pin);
 
@@ -335,45 +338,6 @@ static void bch2_journal_pin_add_locked(struct journal *j, u64 seq,
        pin->flush      = flush_fn;
 
        list_add(&pin->list, flush_fn ? &pin_list->list : &pin_list->flushed);
-}
-
-void __bch2_journal_pin_add(struct journal *j, u64 seq,
-                           struct journal_entry_pin *pin,
-                           journal_pin_flush_fn flush_fn)
-{
-       spin_lock(&j->lock);
-       bch2_journal_pin_add_locked(j, seq, pin, flush_fn);
-       spin_unlock(&j->lock);
-
-       /*
-        * If the journal is currently full,  we might want to call flush_fn
-        * immediately:
-        */
-       journal_wake(j);
-}
-
-void bch2_journal_pin_update(struct journal *j, u64 seq,
-                            struct journal_entry_pin *pin,
-                            journal_pin_flush_fn flush_fn)
-{
-       if (journal_pin_active(pin) && pin->seq < seq)
-               return;
-
-       spin_lock(&j->lock);
-
-       if (pin->seq != seq) {
-               bch2_journal_pin_add_locked(j, seq, pin, flush_fn);
-       } else {
-               struct journal_entry_pin_list *pin_list =
-                       journal_seq_pin(j, seq);
-
-               /*
-                * If the pin is already pinning the right sequence number, it
-                * still might've already been flushed:
-                */
-               list_move(&pin->list, &pin_list->list);
-       }
-
        spin_unlock(&j->lock);
 
        /*
@@ -383,20 +347,6 @@ void bch2_journal_pin_update(struct journal *j, u64 seq,
        journal_wake(j);
 }
 
-void bch2_journal_pin_copy(struct journal *j,
-                          struct journal_entry_pin *dst,
-                          struct journal_entry_pin *src,
-                          journal_pin_flush_fn flush_fn)
-{
-       spin_lock(&j->lock);
-
-       if (journal_pin_active(src) &&
-           (!journal_pin_active(dst) || src->seq < dst->seq))
-               bch2_journal_pin_add_locked(j, src->seq, dst, flush_fn);
-
-       spin_unlock(&j->lock);
-}
-
 /**
  * bch2_journal_pin_flush: ensure journal pin callback is no longer running
  */
index bae2c9210db8612ffb0cd47e731d5143c2b7a1e0..e25355042e6e4c7a6d821fbbecd026eac753276b 100644 (file)
@@ -42,25 +42,33 @@ journal_seq_pin(struct journal *j, u64 seq)
 void bch2_journal_pin_put(struct journal *, u64);
 void bch2_journal_pin_drop(struct journal *, struct journal_entry_pin *);
 
-void __bch2_journal_pin_add(struct journal *, u64, struct journal_entry_pin *,
-                           journal_pin_flush_fn);
+void bch2_journal_pin_set(struct journal *, u64, struct journal_entry_pin *,
+                         journal_pin_flush_fn);
 
 static inline void bch2_journal_pin_add(struct journal *j, u64 seq,
                                        struct journal_entry_pin *pin,
                                        journal_pin_flush_fn flush_fn)
 {
        if (unlikely(!journal_pin_active(pin) || pin->seq > seq))
-               __bch2_journal_pin_add(j, seq, pin, flush_fn);
+               bch2_journal_pin_set(j, seq, pin, flush_fn);
 }
 
-void bch2_journal_pin_update(struct journal *, u64,
-                            struct journal_entry_pin *,
-                            journal_pin_flush_fn);
+static inline void bch2_journal_pin_copy(struct journal *j,
+                                        struct journal_entry_pin *dst,
+                                        struct journal_entry_pin *src,
+                                        journal_pin_flush_fn flush_fn)
+{
+       if (journal_pin_active(src))
+               bch2_journal_pin_add(j, src->seq, dst, flush_fn);
+}
 
-void bch2_journal_pin_copy(struct journal *,
-                          struct journal_entry_pin *,
-                          struct journal_entry_pin *,
-                          journal_pin_flush_fn);
+static inline void bch2_journal_pin_update(struct journal *j, u64 seq,
+                                          struct journal_entry_pin *pin,
+                                          journal_pin_flush_fn flush_fn)
+{
+       if (unlikely(!journal_pin_active(pin) || pin->seq < seq))
+               bch2_journal_pin_set(j, seq, pin, flush_fn);
+}
 
 void bch2_journal_pin_flush(struct journal *, struct journal_entry_pin *);
 
index 4834f41f48edd4d5ba54a43e573155bcc4bed379..2c5daed58acafe9061f331c3d27e512c1216b456 100644 (file)
@@ -61,7 +61,7 @@ static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
        copygc_heap *h = &c->copygc_heap;
        struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
        const union bch_extent_entry *entry;
-       struct extent_ptr_decoded p;
+       struct extent_ptr_decoded p = { 0 };
 
        bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
                struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
index 0b3521c9cc19ef3ba285805a05de94bcc8bfd717..d24cef2bf1aa3252b611e1fee6e84a41dc2b4964 100644 (file)
@@ -187,7 +187,7 @@ void bch2_btree_and_journal_iter_init(struct btree_and_journal_iter *iter,
 {
        memset(iter, 0, sizeof(*iter));
 
-       iter->btree = bch2_trans_get_iter(trans, id, pos, 0);
+       iter->btree = bch2_trans_get_iter(trans, id, pos, BTREE_ITER_PREFETCH);
        bch2_journal_iter_init(&iter->journal, journal_keys, id, 0, pos);
 }
 
@@ -443,9 +443,6 @@ retry:
                bch2_cut_back(atomic_end, split);
 
                split_iter = bch2_trans_copy_iter(&trans, iter);
-               ret = PTR_ERR_OR_ZERO(split_iter);
-               if (ret)
-                       goto err;
 
                /*
                 * It's important that we don't go through the
@@ -502,8 +499,6 @@ static int __bch2_journal_replay_key(struct btree_trans *trans,
        iter = bch2_trans_get_node_iter(trans, id, k->k.p,
                                        BTREE_MAX_DEPTH, level,
                                        BTREE_ITER_INTENT);
-       if (IS_ERR(iter))
-               return PTR_ERR(iter);
 
        /*
         * iter->flags & BTREE_ITER_IS_EXTENTS triggers the update path to run
@@ -538,8 +533,7 @@ static int __bch2_alloc_replay_key(struct btree_trans *trans, struct bkey_i *k)
                                   BTREE_ITER_CACHED|
                                   BTREE_ITER_CACHED_NOFILL|
                                   BTREE_ITER_INTENT);
-       ret =   PTR_ERR_OR_ZERO(iter) ?:
-               bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
+       ret = bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
        bch2_trans_iter_put(trans, iter);
        return ret;
 }
index dea9b7252b88b9a24e59f91f21cd53e44e1a6e2d..1ecf72c9487cdedb6c2fb94e891dde8dd12f9753 100644 (file)
@@ -205,8 +205,6 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans,
        int ret;
 
        iter = bch2_trans_copy_iter(trans, start);
-       if (IS_ERR(iter))
-               return PTR_ERR(iter);
 
        bch2_btree_iter_next_slot(iter);
 
@@ -253,11 +251,8 @@ int bch2_hash_set(struct btree_trans *trans,
                }
 
                if (!slot &&
-                   !(flags & BCH_HASH_SET_MUST_REPLACE)) {
+                   !(flags & BCH_HASH_SET_MUST_REPLACE))
                        slot = bch2_trans_copy_iter(trans, iter);
-                       if (IS_ERR(slot))
-                               return PTR_ERR(slot);
-               }
 
                if (k.k->type != KEY_TYPE_whiteout)
                        goto not_found;
index 900eda88a5dc00e581b8d058b3998d0207266821..cc13fc2581150874c2fa54a34b7ad63d831264d1 100644 (file)
@@ -503,10 +503,11 @@ STORE(bch2_fs)
                if (threads_str &&
                    !(ret = kstrtouint(threads_str, 10, &threads)) &&
                    !(ret = bch2_strtoull_h(nr_str, &nr)))
-                       bch2_btree_perf_test(c, test, nr, threads);
-               else
-                       size = ret;
+                       ret = bch2_btree_perf_test(c, test, nr, threads);
                kfree(tmp);
+
+               if (ret)
+                       size = ret;
        }
 #endif
        return size;
index 4dcace650416750b59ca4ae9c16db5544e196eca..5f40b048dd0d9bd6764499261f85c435d9f7e845 100644 (file)
@@ -26,7 +26,7 @@ static void delete_test_keys(struct bch_fs *c)
 
 /* unit tests */
 
-static void test_delete(struct bch_fs *c, u64 nr)
+static int test_delete(struct bch_fs *c, u64 nr)
 {
        struct btree_trans trans;
        struct btree_iter *iter;
@@ -41,24 +41,37 @@ static void test_delete(struct bch_fs *c, u64 nr)
                                   BTREE_ITER_INTENT);
 
        ret = bch2_btree_iter_traverse(iter);
-       BUG_ON(ret);
+       if (ret) {
+               bch_err(c, "lookup error in test_delete: %i", ret);
+               goto err;
+       }
 
        ret = __bch2_trans_do(&trans, NULL, NULL, 0,
                bch2_trans_update(&trans, iter, &k.k_i, 0));
-       BUG_ON(ret);
+       if (ret) {
+               bch_err(c, "update error in test_delete: %i", ret);
+               goto err;
+       }
 
        pr_info("deleting once");
        ret = bch2_btree_delete_at(&trans, iter, 0);
-       BUG_ON(ret);
+       if (ret) {
+               bch_err(c, "delete error (first) in test_delete: %i", ret);
+               goto err;
+       }
 
        pr_info("deleting twice");
        ret = bch2_btree_delete_at(&trans, iter, 0);
-       BUG_ON(ret);
-
+       if (ret) {
+               bch_err(c, "delete error (second) in test_delete: %i", ret);
+               goto err;
+       }
+err:
        bch2_trans_exit(&trans);
+       return ret;
 }
 
-static void test_delete_written(struct bch_fs *c, u64 nr)
+static int test_delete_written(struct bch_fs *c, u64 nr)
 {
        struct btree_trans trans;
        struct btree_iter *iter;
@@ -73,27 +86,37 @@ static void test_delete_written(struct bch_fs *c, u64 nr)
                                   BTREE_ITER_INTENT);
 
        ret = bch2_btree_iter_traverse(iter);
-       BUG_ON(ret);
+       if (ret) {
+               bch_err(c, "lookup error in test_delete_written: %i", ret);
+               goto err;
+       }
 
        ret = __bch2_trans_do(&trans, NULL, NULL, 0,
                bch2_trans_update(&trans, iter, &k.k_i, 0));
-       BUG_ON(ret);
+       if (ret) {
+               bch_err(c, "update error in test_delete_written: %i", ret);
+               goto err;
+       }
 
        bch2_journal_flush_all_pins(&c->journal);
 
        ret = bch2_btree_delete_at(&trans, iter, 0);
-       BUG_ON(ret);
-
+       if (ret) {
+               bch_err(c, "delete error in test_delete_written: %i", ret);
+               goto err;
+       }
+err:
        bch2_trans_exit(&trans);
+       return ret;
 }
 
-static void test_iterate(struct bch_fs *c, u64 nr)
+static int test_iterate(struct bch_fs *c, u64 nr)
 {
        struct btree_trans trans;
        struct btree_iter *iter;
        struct bkey_s_c k;
        u64 i;
-       int ret;
+       int ret = 0;
 
        bch2_trans_init(&trans, c, 0, 0);
 
@@ -109,7 +132,10 @@ static void test_iterate(struct bch_fs *c, u64 nr)
 
                ret = bch2_btree_insert(c, BTREE_ID_XATTRS, &k.k_i,
                                        NULL, NULL, 0);
-               BUG_ON(ret);
+               if (ret) {
+                       bch_err(c, "insert error in test_iterate: %i", ret);
+                       goto err;
+               }
        }
 
        pr_info("iterating forwards");
@@ -132,17 +158,18 @@ static void test_iterate(struct bch_fs *c, u64 nr)
                BUG_ON(k.k->p.offset != --i);
 
        BUG_ON(i);
-
+err:
        bch2_trans_exit(&trans);
+       return ret;
 }
 
-static void test_iterate_extents(struct bch_fs *c, u64 nr)
+static int test_iterate_extents(struct bch_fs *c, u64 nr)
 {
        struct btree_trans trans;
        struct btree_iter *iter;
        struct bkey_s_c k;
        u64 i;
-       int ret;
+       int ret = 0;
 
        bch2_trans_init(&trans, c, 0, 0);
 
@@ -159,7 +186,10 @@ static void test_iterate_extents(struct bch_fs *c, u64 nr)
 
                ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
                                        NULL, NULL, 0);
-               BUG_ON(ret);
+               if (ret) {
+                       bch_err(c, "insert error in test_iterate_extents: %i", ret);
+                       goto err;
+               }
        }
 
        pr_info("iterating forwards");
@@ -182,17 +212,18 @@ static void test_iterate_extents(struct bch_fs *c, u64 nr)
        }
 
        BUG_ON(i);
-
+err:
        bch2_trans_exit(&trans);
+       return ret;
 }
 
-static void test_iterate_slots(struct bch_fs *c, u64 nr)
+static int test_iterate_slots(struct bch_fs *c, u64 nr)
 {
        struct btree_trans trans;
        struct btree_iter *iter;
        struct bkey_s_c k;
        u64 i;
-       int ret;
+       int ret = 0;
 
        bch2_trans_init(&trans, c, 0, 0);
 
@@ -208,7 +239,10 @@ static void test_iterate_slots(struct bch_fs *c, u64 nr)
 
                ret = bch2_btree_insert(c, BTREE_ID_XATTRS, &k.k_i,
                                        NULL, NULL, 0);
-               BUG_ON(ret);
+               if (ret) {
+                       bch_err(c, "insert error in test_iterate_slots: %i", ret);
+                       goto err;
+               }
        }
 
        pr_info("iterating forwards");
@@ -240,17 +274,18 @@ static void test_iterate_slots(struct bch_fs *c, u64 nr)
                if (i == nr * 2)
                        break;
        }
-
+err:
        bch2_trans_exit(&trans);
+       return ret;
 }
 
-static void test_iterate_slots_extents(struct bch_fs *c, u64 nr)
+static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
 {
        struct btree_trans trans;
        struct btree_iter *iter;
        struct bkey_s_c k;
        u64 i;
-       int ret;
+       int ret = 0;
 
        bch2_trans_init(&trans, c, 0, 0);
 
@@ -267,7 +302,10 @@ static void test_iterate_slots_extents(struct bch_fs *c, u64 nr)
 
                ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
                                        NULL, NULL, 0);
-               BUG_ON(ret);
+               if (ret) {
+                       bch_err(c, "insert error in test_iterate_slots_extents: %i", ret);
+                       goto err;
+               }
        }
 
        pr_info("iterating forwards");
@@ -299,15 +337,16 @@ static void test_iterate_slots_extents(struct bch_fs *c, u64 nr)
                if (i == nr)
                        break;
        }
-
+err:
        bch2_trans_exit(&trans);
+       return 0;
 }
 
 /*
  * XXX: we really want to make sure we've got a btree with depth > 0 for these
  * tests
  */
-static void test_peek_end(struct bch_fs *c, u64 nr)
+static int test_peek_end(struct bch_fs *c, u64 nr)
 {
        struct btree_trans trans;
        struct btree_iter *iter;
@@ -324,9 +363,10 @@ static void test_peek_end(struct bch_fs *c, u64 nr)
        BUG_ON(k.k);
 
        bch2_trans_exit(&trans);
+       return 0;
 }
 
-static void test_peek_end_extents(struct bch_fs *c, u64 nr)
+static int test_peek_end_extents(struct bch_fs *c, u64 nr)
 {
        struct btree_trans trans;
        struct btree_iter *iter;
@@ -343,14 +383,15 @@ static void test_peek_end_extents(struct bch_fs *c, u64 nr)
        BUG_ON(k.k);
 
        bch2_trans_exit(&trans);
+       return 0;
 }
 
 /* extent unit tests */
 
 u64 test_version;
 
-static void insert_test_extent(struct bch_fs *c,
-                              u64 start, u64 end)
+static int insert_test_extent(struct bch_fs *c,
+                             u64 start, u64 end)
 {
        struct bkey_i_cookie k;
        int ret;
@@ -364,42 +405,47 @@ static void insert_test_extent(struct bch_fs *c,
 
        ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
                                NULL, NULL, 0);
-       BUG_ON(ret);
+       if (ret)
+               bch_err(c, "insert error in insert_test_extent: %i", ret);
+       return ret;
 }
 
-static void __test_extent_overwrite(struct bch_fs *c,
+static int __test_extent_overwrite(struct bch_fs *c,
                                    u64 e1_start, u64 e1_end,
                                    u64 e2_start, u64 e2_end)
 {
-       insert_test_extent(c, e1_start, e1_end);
-       insert_test_extent(c, e2_start, e2_end);
+       int ret;
+
+       ret   = insert_test_extent(c, e1_start, e1_end) ?:
+               insert_test_extent(c, e2_start, e2_end);
 
        delete_test_keys(c);
+       return ret;
 }
 
-static void test_extent_overwrite_front(struct bch_fs *c, u64 nr)
+static int test_extent_overwrite_front(struct bch_fs *c, u64 nr)
 {
-       __test_extent_overwrite(c, 0, 64, 0, 32);
-       __test_extent_overwrite(c, 8, 64, 0, 32);
+       return  __test_extent_overwrite(c, 0, 64, 0, 32) ?:
+               __test_extent_overwrite(c, 8, 64, 0, 32);
 }
 
-static void test_extent_overwrite_back(struct bch_fs *c, u64 nr)
+static int test_extent_overwrite_back(struct bch_fs *c, u64 nr)
 {
-       __test_extent_overwrite(c, 0, 64, 32, 64);
-       __test_extent_overwrite(c, 0, 64, 32, 72);
+       return  __test_extent_overwrite(c, 0, 64, 32, 64) ?:
+               __test_extent_overwrite(c, 0, 64, 32, 72);
 }
 
-static void test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
+static int test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
 {
-       __test_extent_overwrite(c, 0, 64, 32, 40);
+       return __test_extent_overwrite(c, 0, 64, 32, 40);
 }
 
-static void test_extent_overwrite_all(struct bch_fs *c, u64 nr)
+static int test_extent_overwrite_all(struct bch_fs *c, u64 nr)
 {
-       __test_extent_overwrite(c, 32, 64,  0,  64);
-       __test_extent_overwrite(c, 32, 64,  0, 128);
-       __test_extent_overwrite(c, 32, 64, 32,  64);
-       __test_extent_overwrite(c, 32, 64, 32, 128);
+       return  __test_extent_overwrite(c, 32, 64,  0,  64) ?:
+               __test_extent_overwrite(c, 32, 64,  0, 128) ?:
+               __test_extent_overwrite(c, 32, 64, 32,  64) ?:
+               __test_extent_overwrite(c, 32, 64, 32, 128);
 }
 
 /* perf tests */
@@ -415,11 +461,11 @@ static u64 test_rand(void)
        return v;
 }
 
-static void rand_insert(struct bch_fs *c, u64 nr)
+static int rand_insert(struct bch_fs *c, u64 nr)
 {
        struct btree_trans trans;
        struct bkey_i_cookie k;
-       int ret;
+       int ret = 0;
        u64 i;
 
        bch2_trans_init(&trans, c, 0, 0);
@@ -430,48 +476,63 @@ static void rand_insert(struct bch_fs *c, u64 nr)
 
                ret = __bch2_trans_do(&trans, NULL, NULL, 0,
                        __bch2_btree_insert(&trans, BTREE_ID_XATTRS, &k.k_i));
-
-               BUG_ON(ret);
+               if (ret) {
+                       bch_err(c, "error in rand_insert: %i", ret);
+                       break;
+               }
        }
 
        bch2_trans_exit(&trans);
+       return ret;
 }
 
-static void rand_lookup(struct bch_fs *c, u64 nr)
+static int rand_lookup(struct bch_fs *c, u64 nr)
 {
        struct btree_trans trans;
        struct btree_iter *iter;
        struct bkey_s_c k;
+       int ret = 0;
        u64 i;
 
        bch2_trans_init(&trans, c, 0, 0);
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, POS_MIN, 0);
 
        for (i = 0; i < nr; i++) {
-               iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS,
-                                          POS(0, test_rand()), 0);
+               bch2_btree_iter_set_pos(iter, POS(0, test_rand()));
 
                k = bch2_btree_iter_peek(iter);
-               bch2_trans_iter_free(&trans, iter);
+               ret = bkey_err(k);
+               if (ret) {
+                       bch_err(c, "error in rand_lookup: %i", ret);
+                       break;
+               }
        }
 
+       bch2_trans_iter_free(&trans, iter);
        bch2_trans_exit(&trans);
+       return ret;
 }
 
-static void rand_mixed(struct bch_fs *c, u64 nr)
+static int rand_mixed(struct bch_fs *c, u64 nr)
 {
        struct btree_trans trans;
        struct btree_iter *iter;
        struct bkey_s_c k;
-       int ret;
+       int ret = 0;
        u64 i;
 
        bch2_trans_init(&trans, c, 0, 0);
+       iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, POS_MIN, 0);
 
        for (i = 0; i < nr; i++) {
-               iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS,
-                                          POS(0, test_rand()), 0);
+               bch2_btree_iter_set_pos(iter, POS(0, test_rand()));
 
                k = bch2_btree_iter_peek(iter);
+               ret = bkey_err(k);
+               if (ret) {
+                       bch_err(c, "lookup error in rand_mixed: %i", ret);
+                       break;
+               }
 
                if (!(i & 3) && k.k) {
                        struct bkey_i_cookie k;
@@ -481,14 +542,16 @@ static void rand_mixed(struct bch_fs *c, u64 nr)
 
                        ret = __bch2_trans_do(&trans, NULL, NULL, 0,
                                bch2_trans_update(&trans, iter, &k.k_i, 0));
-
-                       BUG_ON(ret);
+                       if (ret) {
+                               bch_err(c, "update error in rand_mixed: %i", ret);
+                               break;
+                       }
                }
-
-               bch2_trans_iter_free(&trans, iter);
        }
 
+       bch2_trans_iter_free(&trans, iter);
        bch2_trans_exit(&trans);
+       return ret;
 }
 
 static int __do_delete(struct btree_trans *trans, struct bpos pos)
@@ -518,10 +581,10 @@ err:
        return ret;
 }
 
-static void rand_delete(struct bch_fs *c, u64 nr)
+static int rand_delete(struct bch_fs *c, u64 nr)
 {
        struct btree_trans trans;
-       int ret;
+       int ret = 0;
        u64 i;
 
        bch2_trans_init(&trans, c, 0, 0);
@@ -531,19 +594,23 @@ static void rand_delete(struct bch_fs *c, u64 nr)
 
                ret = __bch2_trans_do(&trans, NULL, NULL, 0,
                        __do_delete(&trans, pos));
-               BUG_ON(ret);
+               if (ret) {
+                       bch_err(c, "error in rand_delete: %i", ret);
+                       break;
+               }
        }
 
        bch2_trans_exit(&trans);
+       return ret;
 }
 
-static void seq_insert(struct bch_fs *c, u64 nr)
+static int seq_insert(struct bch_fs *c, u64 nr)
 {
        struct btree_trans trans;
        struct btree_iter *iter;
        struct bkey_s_c k;
        struct bkey_i_cookie insert;
-       int ret;
+       int ret = 0;
        u64 i = 0;
 
        bkey_cookie_init(&insert.k_i);
@@ -556,35 +623,39 @@ static void seq_insert(struct bch_fs *c, u64 nr)
 
                ret = __bch2_trans_do(&trans, NULL, NULL, 0,
                        bch2_trans_update(&trans, iter, &insert.k_i, 0));
-
-               BUG_ON(ret);
+               if (ret) {
+                       bch_err(c, "error in seq_insert: %i", ret);
+                       break;
+               }
 
                if (++i == nr)
                        break;
        }
        bch2_trans_exit(&trans);
+       return ret;
 }
 
-static void seq_lookup(struct bch_fs *c, u64 nr)
+static int seq_lookup(struct bch_fs *c, u64 nr)
 {
        struct btree_trans trans;
        struct btree_iter *iter;
        struct bkey_s_c k;
-       int ret;
+       int ret = 0;
 
        bch2_trans_init(&trans, c, 0, 0);
 
        for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN, 0, k, ret)
                ;
        bch2_trans_exit(&trans);
+       return ret;
 }
 
-static void seq_overwrite(struct bch_fs *c, u64 nr)
+static int seq_overwrite(struct bch_fs *c, u64 nr)
 {
        struct btree_trans trans;
        struct btree_iter *iter;
        struct bkey_s_c k;
-       int ret;
+       int ret = 0;
 
        bch2_trans_init(&trans, c, 0, 0);
 
@@ -596,23 +667,28 @@ static void seq_overwrite(struct bch_fs *c, u64 nr)
 
                ret = __bch2_trans_do(&trans, NULL, NULL, 0,
                        bch2_trans_update(&trans, iter, &u.k_i, 0));
-
-               BUG_ON(ret);
+               if (ret) {
+                       bch_err(c, "error in seq_overwrite: %i", ret);
+                       break;
+               }
        }
        bch2_trans_exit(&trans);
+       return ret;
 }
 
-static void seq_delete(struct bch_fs *c, u64 nr)
+static int seq_delete(struct bch_fs *c, u64 nr)
 {
        int ret;
 
        ret = bch2_btree_delete_range(c, BTREE_ID_XATTRS,
                                      POS(0, 0), POS(0, U64_MAX),
                                      NULL);
-       BUG_ON(ret);
+       if (ret)
+               bch_err(c, "error in seq_delete: %i", ret);
+       return ret;
 }
 
-typedef void (*perf_test_fn)(struct bch_fs *, u64);
+typedef int (*perf_test_fn)(struct bch_fs *, u64);
 
 struct test_job {
        struct bch_fs                   *c;
@@ -628,11 +704,13 @@ struct test_job {
 
        u64                             start;
        u64                             finish;
+       int                             ret;
 };
 
 static int btree_perf_test_thread(void *data)
 {
        struct test_job *j = data;
+       int ret;
 
        if (atomic_dec_and_test(&j->ready)) {
                wake_up(&j->ready_wait);
@@ -641,7 +719,9 @@ static int btree_perf_test_thread(void *data)
                wait_event(j->ready_wait, !atomic_read(&j->ready));
        }
 
-       j->fn(j->c, j->nr / j->nr_threads);
+       ret = j->fn(j->c, j->nr / j->nr_threads);
+       if (ret)
+               j->ret = ret;
 
        if (atomic_dec_and_test(&j->done)) {
                j->finish = sched_clock();
@@ -651,8 +731,8 @@ static int btree_perf_test_thread(void *data)
        return 0;
 }
 
-void bch2_btree_perf_test(struct bch_fs *c, const char *testname,
-                         u64 nr, unsigned nr_threads)
+int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
+                        u64 nr, unsigned nr_threads)
 {
        struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
        char name_buf[20], nr_buf[20], per_sec_buf[20];
@@ -695,7 +775,7 @@ void bch2_btree_perf_test(struct bch_fs *c, const char *testname,
 
        if (!j.fn) {
                pr_err("unknown test %s", testname);
-               return;
+               return -EINVAL;
        }
 
        //pr_info("running test %s:", testname);
@@ -720,6 +800,7 @@ void bch2_btree_perf_test(struct bch_fs *c, const char *testname,
                time / NSEC_PER_SEC,
                time * nr_threads / nr,
                per_sec_buf);
+       return j.ret;
 }
 
 #endif /* CONFIG_BCACHEFS_TESTS */
index 551d0764225ecf47eb79658b2fd2bf5eaa5c8d6d..c73b18aea7e01d01aece515369a8e38fd37c1870 100644 (file)
@@ -6,7 +6,7 @@ struct bch_fs;
 
 #ifdef CONFIG_BCACHEFS_TESTS
 
-void bch2_btree_perf_test(struct bch_fs *, const char *, u64, unsigned);
+int bch2_btree_perf_test(struct bch_fs *, const char *, u64, unsigned);
 
 #else