]> git.sesse.net Git - bcachefs-tools-debian/commitdiff
Update bcachefs sources to ed6b7f81a7 six locks: Disable percpu read lock mode in...
authorKent Overstreet <kent.overstreet@linux.dev>
Sun, 11 Jun 2023 02:13:01 +0000 (22:13 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 11 Jun 2023 02:15:00 +0000 (22:15 -0400)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
18 files changed:
.bcachefs_revision
include/linux/bitmap.h
libbcachefs/alloc_foreground.c
libbcachefs/backpointers.c
libbcachefs/btree_iter.c
libbcachefs/btree_iter.h
libbcachefs/btree_locking.c
libbcachefs/btree_update_leaf.c
libbcachefs/disk_groups.c
libbcachefs/disk_groups.h
libbcachefs/ec.c
libbcachefs/fs.c
libbcachefs/io.c
libbcachefs/rebalance.c
libbcachefs/subvolume.h
libbcachefs/sysfs.c
libbcachefs/tests.c
linux/six.c

index 1df24ce61dbf8200da92535ccdcdcf9a31a148e8..d1b87e30d0151b827f87523f2f85fd98d138e5ab 100644 (file)
@@ -1 +1 @@
-7c0fe6f104a68065c15b069176247bf5d237b2b3
+ed6b7f81a7b51ac05d02635907f92aff4a3f8445
index 80e8ecda27da7405f4785188067fbddf23ca0f1c..db2dfdb2ef051a634794dfce9a2bf61bdd7eb6f4 100644 (file)
@@ -135,4 +135,12 @@ static inline unsigned long find_next_zero_bit(const unsigned long *addr, unsign
 #define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
 #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
 
+static inline bool bitmap_empty(const unsigned long *src, unsigned nbits)
+{
+       if (small_const_nbits(nbits))
+               return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
+
+       return find_first_bit(src, nbits) == nbits;
+}
+
 #endif /* _PERF_BITOPS_H */
index ec77601ebd0cb704683514ac42b0339ae0fc1eb2..95829bbfe033558818b6cbc03dd393103928789f 100644 (file)
@@ -371,7 +371,8 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
        if (!ob)
                iter.path->preserve = false;
 err:
-       set_btree_iter_dontneed(&iter);
+       if (iter.trans && iter.path)
+               set_btree_iter_dontneed(&iter);
        bch2_trans_iter_exit(trans, &iter);
        printbuf_exit(&buf);
        return ob;
@@ -934,9 +935,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans,
        unsigned i;
        int ret;
 
-       rcu_read_lock();
        devs = target_rw_devs(c, wp->data_type, target);
-       rcu_read_unlock();
 
        /* Don't allocate from devices we already have pointers to: */
        for (i = 0; i < devs_have->nr; i++)
index e9ae623cf4a8145fd8dc148565f21235976866ec..11201064d9a4522a373ad7b4e26e8fc0cd5a759a 100644 (file)
@@ -805,7 +805,7 @@ static int check_one_backpointer(struct btree_trans *trans,
 
        if (fsck_err_on(!k.k, c,
                        "backpointer for missing extent\n  %s",
-                       (bch2_backpointer_k_to_text(&buf, c, bp.s_c), buf.buf)))
+                       (bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf)))
                return bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
 out:
 fsck_err:
index 485e93c12625a73521ec23ddb795bcb07b06a0c9..0e32247fe1966cfe7061908e93ba0e8062367d1d 100644 (file)
@@ -2918,6 +2918,10 @@ static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
 #endif
        if (!p)
                p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
+       /*
+        * paths need to be zeroed, bch2_check_for_deadlock looks at paths in
+        * other threads
+        */
 
        trans->paths            = p; p += paths_bytes;
        trans->updates          = p; p += updates_bytes;
index f81a1158f1089fb9f28b77ff361a8a7a39779b16..c6129a4346a3b07038f6eb23ce3df70c5b9f7763 100644 (file)
@@ -110,11 +110,14 @@ __trans_next_path_safe(struct btree_trans *trans, unsigned *idx)
  * This version is intended to be safe for use on a btree_trans that is owned by
  * another thread, for bch2_btree_trans_to_text();
  */
-#define trans_for_each_path_safe(_trans, _path, _idx)                  \
-       for (_idx = 0;                                                  \
+#define trans_for_each_path_safe_from(_trans, _path, _idx, _start)     \
+       for (_idx = _start;                                             \
             (_path = __trans_next_path_safe((_trans), &_idx));         \
             _idx++)
 
+#define trans_for_each_path_safe(_trans, _path, _idx)                  \
+       trans_for_each_path_safe_from(_trans, _path, _idx, 0)
+
 static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
 {
        unsigned idx = path ? path->sorted_idx + 1 : 0;
index a17256fc2aad5764d52f08b4516265aacb6d2397..5b290324b967eac1a93231f7d3ede9d6295dab3f 100644 (file)
@@ -254,6 +254,7 @@ int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
        struct trans_waiting_for_lock *top;
        struct btree_bkey_cached_common *b;
        struct btree_path *path;
+       unsigned path_idx;
        int ret;
 
        if (trans->lock_must_abort) {
@@ -272,12 +273,12 @@ next:
 
        top = &g.g[g.nr - 1];
 
-       trans_for_each_path_from(top->trans, path, top->path_idx) {
+       trans_for_each_path_safe_from(top->trans, path, path_idx, top->path_idx) {
                if (!path->nodes_locked)
                        continue;
 
-               if (top->path_idx != path->idx) {
-                       top->path_idx           = path->idx;
+               if (path_idx != top->path_idx) {
+                       top->path_idx           = path_idx;
                        top->level              = 0;
                        top->lock_start_time    = 0;
                }
index 779338e145457132b3f6a628941ee18394656af1..bf7b1199cee5b17bc5d1061b1f08a85b999184b1 100644 (file)
@@ -963,11 +963,16 @@ int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
                                        JOURNAL_RES_GET_CHECK));
                break;
        case -BCH_ERR_btree_insert_need_journal_reclaim:
+               bch2_trans_unlock(trans);
+
                trace_and_count(c, trans_blocked_journal_reclaim, trans, trace_ip);
 
-               ret = drop_locks_do(trans,
-                       (wait_event_freezable(c->journal.reclaim_wait,
-                                    (ret = journal_reclaim_wait_done(c))), ret));
+               wait_event_freezable(c->journal.reclaim_wait,
+                                    (ret = journal_reclaim_wait_done(c)));
+               if (ret < 0)
+                       break;
+
+               ret = bch2_trans_relock(trans);
                break;
        case -BCH_ERR_btree_insert_need_flush_buffer: {
                struct btree_write_buffer *wb = &c->btree_write_buffer;
@@ -1306,29 +1311,52 @@ static int need_whiteout_for_snapshot(struct btree_trans *trans,
        return ret;
 }
 
-static int pos_overwritten_in_snapshot(struct btree_trans *trans, enum btree_id btree,
-                                      struct bpos pos, u32 snapshot)
+static int get_snapshot_overwrites(struct btree_trans *trans,
+                                  enum btree_id btree,
+                                  struct bpos pos,
+                                  snapshot_id_list *overwrites)
 {
        struct bch_fs *c = trans->c;
        struct btree_iter iter;
        struct bkey_s_c k;
-       int ret;
+       snapshot_id_list overwrites2;
+       u32 *i;
+       int ret = 0;
+
+       darray_init(overwrites);
+       darray_init(&overwrites2);
 
-       for_each_btree_key_norestart(trans, iter,
-                       btree, SPOS(pos.inode, pos.offset, snapshot),
-                       BTREE_ITER_ALL_SNAPSHOTS|
-                       BTREE_ITER_NOPRESERVE, k, ret) {
+       for_each_btree_key_norestart(trans, iter, btree,
+                                    SPOS(pos.inode, pos.offset, 0),
+                                    BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
                if (bpos_ge(k.k->p, pos))
                        break;
 
                if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) {
-                       ret = 1;
-                       break;
+                       ret = snapshot_list_add(c, &overwrites2, k.k->p.snapshot);
+                       if (ret)
+                               break;
                }
        }
        bch2_trans_iter_exit(trans, &iter);
 
+       if (ret)
+               goto err;
+
+       darray_for_each(overwrites2, i)
+               if (!snapshot_list_has_ancestor(c, &overwrites2, *i)) {
+                       ret = snapshot_list_add(c, overwrites, *i);
+                       if (ret)
+                               goto err;
+               }
+
+       *overwrites = overwrites2;
+out:
+       darray_exit(&overwrites2);
        return ret;
+err:
+       darray_exit(overwrites);
+       goto out;
 }
 
 int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
@@ -1337,61 +1365,76 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
                                   struct bpos new_pos)
 {
        struct bch_fs *c = trans->c;
-       struct btree_iter old_iter, new_iter;
-       struct bkey_s_c k;
-       snapshot_id_list s;
-       struct bkey_i *update;
+       snapshot_id_list old_overwrites, new_overwrites, updates;
+       bool began_transaction = false;
+       u32 *i;
        int ret;
 
        if (!bch2_snapshot_has_children(c, old_pos.snapshot))
                return 0;
 
-       darray_init(&s);
+       darray_init(&old_overwrites);
+       darray_init(&new_overwrites);
+       darray_init(&updates);
 
-       bch2_trans_iter_init(trans, &old_iter, btree, old_pos,
-                            BTREE_ITER_NOT_EXTENTS|
-                            BTREE_ITER_ALL_SNAPSHOTS);
-       while ((k = bch2_btree_iter_prev(&old_iter)).k &&
-              !(ret = bkey_err(k)) &&
-              bkey_eq(old_pos, k.k->p)) {
+       ret =   get_snapshot_overwrites(trans, btree, old_pos, &old_overwrites) ?:
+               get_snapshot_overwrites(trans, btree, new_pos, &new_overwrites);
+       if (ret)
+               goto err;
 
-               if (!bch2_snapshot_is_ancestor(c, k.k->p.snapshot, old_pos.snapshot) ||
-                   snapshot_list_has_ancestor(c, &s, k.k->p.snapshot))
-                       continue;
+       darray_for_each(old_overwrites, i)
+               if (!snapshot_list_has_ancestor(c, &new_overwrites, *i)) {
+                       ret = darray_push(&updates, *i);
+                       if (ret)
+                               goto err;
+               }
 
-               ret = pos_overwritten_in_snapshot(trans, btree,
-                                       new_pos, k.k->p.snapshot);
-               if (ret < 0)
+       if (updates.nr > 4) {
+               bch2_trans_begin(trans);
+               began_transaction = true;
+       }
+
+       darray_for_each(updates, i) {
+               struct btree_iter iter;
+               struct bkey_i *update;
+
+               bch2_trans_iter_init(trans, &iter, btree,
+                                    SPOS(new_pos.inode, new_pos.offset, *i),
+                                    BTREE_ITER_NOT_EXTENTS|
+                                    BTREE_ITER_INTENT);
+               update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
+               ret = PTR_ERR_OR_ZERO(update);
+               if (ret)
                        break;
 
-               if (!ret) {
-                       struct bpos whiteout_pos =
-                               SPOS(new_pos.inode, new_pos.offset, k.k->p.snapshot);;
-
-                       bch2_trans_iter_init(trans, &new_iter, btree, whiteout_pos,
-                                            BTREE_ITER_NOT_EXTENTS|
-                                            BTREE_ITER_INTENT);
-                       update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
-                       ret = PTR_ERR_OR_ZERO(update);
-                       if (ret)
-                               break;
+               bkey_init(&update->k);
+               update->k.p             = iter.pos;
+               update->k.type          = KEY_TYPE_whiteout;
 
-                       bkey_init(&update->k);
-                       update->k.p             = whiteout_pos;
-                       update->k.type          = KEY_TYPE_whiteout;
+               ret   = bch2_btree_iter_traverse(&iter) ?:
+                       bch2_trans_update(trans, &iter, update,
+                                         BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
+                       (began_transaction && trans->nr_updates > 4
+                        ? bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL) : 0);
 
-                       ret   = bch2_btree_iter_traverse(&new_iter) ?:
-                               bch2_trans_update(trans, &new_iter, update,
-                                               BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
-                       bch2_trans_iter_exit(trans, &new_iter);
-               }
+               bch2_trans_iter_exit(trans, &iter);
 
-               ret = snapshot_list_add(c, &s, k.k->p.snapshot);
                if (ret)
-                       break;
+                       goto err;
        }
-       bch2_trans_iter_exit(trans, &old_iter);
-       darray_exit(&s);
+
+       if (began_transaction && trans->nr_updates) {
+               ret = bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL);
+               if (ret)
+                       goto err;
+       }
+
+       if (began_transaction)
+               ret = -BCH_ERR_transaction_restart_nested;
+err:
+       darray_exit(&updates);
+       darray_exit(&new_overwrites);
+       darray_exit(&old_overwrites);
 
        return ret;
 }
index 1a8f8b3750da15ebf6e9731cdab43de35e391b79..52b6400779704abad9f9f66ace56cecb9355a8ed 100644 (file)
@@ -87,6 +87,40 @@ err:
        return ret;
 }
 
+void bch2_disk_groups_to_text(struct printbuf *out, struct bch_fs *c)
+{
+       struct bch_disk_groups_cpu *g;
+       struct bch_dev *ca;
+       int i;
+       unsigned iter;
+
+       out->atomic++;
+       rcu_read_lock();
+
+       g = rcu_dereference(c->disk_groups);
+       if (!g)
+               goto out;
+
+       for (i = 0; i < g->nr; i++) {
+               if (i)
+                       prt_printf(out, " ");
+
+               if (g->entries[i].deleted) {
+                       prt_printf(out, "[deleted]");
+                       continue;
+               }
+
+               prt_printf(out, "[parent %d devs", g->entries[i].parent);
+               for_each_member_device_rcu(ca, c, iter, &g->entries[i].devs)
+                       prt_printf(out, " %s", ca->name);
+               prt_printf(out, "]");
+       }
+
+out:
+       rcu_read_unlock();
+       out->atomic--;
+}
+
 static void bch2_sb_disk_groups_to_text(struct printbuf *out,
                                        struct bch_sb *sb,
                                        struct bch_sb_field *f)
@@ -174,26 +208,36 @@ int bch2_sb_disk_groups_to_cpu(struct bch_fs *c)
 const struct bch_devs_mask *bch2_target_to_mask(struct bch_fs *c, unsigned target)
 {
        struct target t = target_decode(target);
+       struct bch_devs_mask *devs;
+
+       rcu_read_lock();
 
        switch (t.type) {
        case TARGET_NULL:
-               return NULL;
+               devs = NULL;
+               break;
        case TARGET_DEV: {
                struct bch_dev *ca = t.dev < c->sb.nr_devices
                        ? rcu_dereference(c->devs[t.dev])
                        : NULL;
-               return ca ? &ca->self : NULL;
+               devs = ca ? &ca->self : NULL;
+               break;
        }
        case TARGET_GROUP: {
                struct bch_disk_groups_cpu *g = rcu_dereference(c->disk_groups);
 
-               return g && t.group < g->nr && !g->entries[t.group].deleted
+               devs = g && t.group < g->nr && !g->entries[t.group].deleted
                        ? &g->entries[t.group].devs
                        : NULL;
+               break;
        }
        default:
                BUG();
        }
+
+       rcu_read_unlock();
+
+       return devs;
 }
 
 bool bch2_dev_in_target(struct bch_fs *c, unsigned dev, unsigned target)
index e4470c357a66b8ee6ae2d72526a558e23edee249..ec12584ceee75fea8552ef83f4ad3df125b48dae 100644 (file)
@@ -68,6 +68,14 @@ static inline struct bch_devs_mask target_rw_devs(struct bch_fs *c,
        return devs;
 }
 
+static inline bool bch2_target_accepts_data(struct bch_fs *c,
+                                           enum bch_data_type data_type,
+                                           u16 target)
+{
+       struct bch_devs_mask rw_devs = target_rw_devs(c, data_type, target);
+       return !bitmap_empty(rw_devs.d, BCH_SB_MEMBERS_MAX);
+}
+
 bool bch2_dev_in_target(struct bch_fs *, unsigned, unsigned);
 
 int bch2_disk_path_find(struct bch_sb_handle *, const char *);
@@ -88,4 +96,6 @@ int bch2_dev_group_set(struct bch_fs *, struct bch_dev *, const char *);
 const char *bch2_sb_validate_disk_groups(struct bch_sb *,
                                         struct bch_sb_field *);
 
+void bch2_disk_groups_to_text(struct printbuf *, struct bch_fs *);
+
 #endif /* _BCACHEFS_DISK_GROUPS_H */
index dfc0a61afa511d4f8d27ef9b3c46bd2f6ae14dfe..b7e3889b114b402491f308e9cec12a0e0909bf98 100644 (file)
@@ -1146,6 +1146,7 @@ err:
        mutex_lock(&c->ec_stripe_new_lock);
        list_del(&s->list);
        mutex_unlock(&c->ec_stripe_new_lock);
+       wake_up(&c->ec_stripe_new_wait);
 
        ec_stripe_buf_exit(&s->existing_stripe);
        ec_stripe_buf_exit(&s->new_stripe);
index 8fc980ee617203f3c6d915202d8d54a55d2110cd..05f1bdee7260d21e34f4c7eec7fdfbef44e205b0 100644 (file)
@@ -1481,22 +1481,14 @@ again:
                        continue;
 
                if (!(inode->v.i_state & I_DONTCACHE) &&
-                   !(inode->v.i_state & I_FREEING)) {
+                   !(inode->v.i_state & I_FREEING) &&
+                   igrab(&inode->v)) {
                        this_pass_clean = false;
 
-                       d_mark_dontcache(&inode->v);
-                       d_prune_aliases(&inode->v);
-
-                       /*
-                        * If i_count was zero, we have to take and release a
-                        * ref in order for I_DONTCACHE to be noticed and the
-                        * inode to be dropped;
-                        */
-
-                       if (!atomic_read(&inode->v.i_count) &&
-                           igrab(&inode->v) &&
-                           darray_push_gfp(&grabbed, inode, GFP_ATOMIC|__GFP_NOWARN))
+                       if (darray_push_gfp(&grabbed, inode, GFP_ATOMIC|__GFP_NOWARN)) {
+                               iput(&inode->v);
                                break;
+                       }
                } else if (clean_pass && this_pass_clean) {
                        wait_queue_head_t *wq = bit_waitqueue(&inode->v.i_state, __I_NEW);
                        DEFINE_WAIT_BIT(wait, &inode->v.i_state, __I_NEW);
@@ -1511,8 +1503,12 @@ again:
        }
        mutex_unlock(&c->vfs_inodes_lock);
 
-       darray_for_each(grabbed, i)
-               iput(&(*i)->v);
+       darray_for_each(grabbed, i) {
+               inode = *i;
+               d_mark_dontcache(&inode->v);
+               d_prune_aliases(&inode->v);
+               iput(&inode->v);
+       }
        grabbed.nr = 0;
 
        if (!clean_pass || !this_pass_clean) {
index 77fe49f6c4147f5a10bea0c5c159250a2c222672..fa7781ee155b9cfc8ec5e1b3fa7cc45049cc62c6 100644 (file)
@@ -2057,10 +2057,11 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
                                .write_flags    = BCH_WRITE_ALLOC_NOWAIT|BCH_WRITE_CACHED,
                        },
                        btree_id, k);
+       /*
+        * possible errors: -BCH_ERR_nocow_lock_blocked,
+        * -BCH_ERR_ENOSPC_disk_reservation:
+        */
        if (ret) {
-               WARN_ONCE(ret != -BCH_ERR_nocow_lock_blocked,
-                         "%s: saw unknown error %s\n", __func__, bch2_err_str(ret));
-
                ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
                                        bch_promote_params);
                BUG_ON(ret);
index 66c40999163d64bcc6fdf8491398da02cac1ab0e..989f37a3b46ab93a5ffbd77b2490a997400a093f 100644 (file)
@@ -57,7 +57,8 @@ static bool rebalance_pred(struct bch_fs *c, void *arg,
                i = 0;
                bkey_for_each_ptr(ptrs, ptr) {
                        if (!ptr->cached &&
-                           !bch2_dev_in_target(c, ptr->dev, io_opts->background_target))
+                           !bch2_dev_in_target(c, ptr->dev, io_opts->background_target) &&
+                           bch2_target_accepts_data(c, BCH_DATA_user, io_opts->background_target))
                                data_opts->rewrite_ptrs |= 1U << i;
                        i++;
                }
index 1a39f713db87c8731778d297f045a946c6835904..f884a20332300e3ad814adf3c0c3a56af77037df 100644 (file)
@@ -121,7 +121,7 @@ static inline int snapshot_list_add(struct bch_fs *c, snapshot_id_list *s, u32 i
 {
        int ret;
 
-       BUG_ON(snapshot_list_has_id(s, id));
+       EBUG_ON(snapshot_list_has_id(s, id));
        ret = darray_push(s, id);
        if (ret)
                bch_err(c, "error reallocating snapshot_id_list (size %zu)", s->size);
index 0f86a6c0c9d8420fd8a7493d6422b414f453674f..0be70bf1cd1663a8c04d15aa2a92cb7914a01f68 100644 (file)
@@ -223,6 +223,7 @@ static void bch2_write_refs_to_text(struct printbuf *out, struct bch_fs *c)
 #endif
 
 read_attribute(internal_uuid);
+read_attribute(disk_groups);
 
 read_attribute(has_data);
 read_attribute(alloc_debug);
@@ -471,6 +472,9 @@ SHOW(bch2_fs)
        if (attr == &sysfs_nocow_lock_table)
                bch2_nocow_locks_to_text(out, &c->nocow_locks);
 
+       if (attr == &sysfs_disk_groups)
+               bch2_disk_groups_to_text(out, c);
+
        return 0;
 }
 
@@ -681,6 +685,8 @@ struct attribute *bch2_fs_internal_files[] = {
        &sysfs_moving_ctxts,
 
        &sysfs_internal_uuid,
+
+       &sysfs_disk_groups,
        NULL
 };
 
index d352821d5614005aee064052fe841a6c87a64189..35df3f940542c2776afaa8299ab476c8a9606ba8 100644 (file)
@@ -593,10 +593,8 @@ static int rand_insert(struct bch_fs *c, u64 nr)
 
                ret = commit_do(&trans, NULL, NULL, 0,
                        __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k.k_i, 0));
-               if (ret) {
-                       bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
+               if (ret)
                        break;
-               }
        }
 
        bch2_trans_exit(&trans);
@@ -629,10 +627,8 @@ static int rand_insert_multi(struct bch_fs *c, u64 nr)
                        __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[5].k_i, 0) ?:
                        __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[6].k_i, 0) ?:
                        __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[7].k_i, 0));
-               if (ret) {
-                       bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
+               if (ret)
                        break;
-               }
        }
 
        bch2_trans_exit(&trans);
@@ -656,10 +652,8 @@ static int rand_lookup(struct bch_fs *c, u64 nr)
 
                lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
                ret = bkey_err(k);
-               if (ret) {
-                       bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
+               if (ret)
                        break;
-               }
        }
 
        bch2_trans_iter_exit(&trans, &iter);
@@ -709,10 +703,8 @@ static int rand_mixed(struct bch_fs *c, u64 nr)
                rand = test_rand();
                ret = commit_do(&trans, NULL, NULL, 0,
                        rand_mixed_trans(&trans, &iter, &cookie, i, rand));
-               if (ret) {
-                       bch_err(c, "%s(): update error: %s", __func__, bch2_err_str(ret));
+               if (ret)
                        break;
-               }
        }
 
        bch2_trans_iter_exit(&trans, &iter);
@@ -728,7 +720,7 @@ static int __do_delete(struct btree_trans *trans, struct bpos pos)
 
        bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
                             BTREE_ITER_INTENT);
-       lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
+       k = bch2_btree_iter_peek(&iter);
        ret = bkey_err(k);
        if (ret)
                goto err;
@@ -755,10 +747,8 @@ static int rand_delete(struct bch_fs *c, u64 nr)
 
                ret = commit_do(&trans, NULL, NULL, 0,
                        __do_delete(&trans, pos));
-               if (ret) {
-                       bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
+               if (ret)
                        break;
-               }
        }
 
        bch2_trans_exit(&trans);
@@ -767,90 +757,59 @@ static int rand_delete(struct bch_fs *c, u64 nr)
 
 static int seq_insert(struct bch_fs *c, u64 nr)
 {
-       struct btree_trans trans;
        struct btree_iter iter;
        struct bkey_s_c k;
        struct bkey_i_cookie insert;
-       int ret = 0;
 
        bkey_cookie_init(&insert.k_i);
 
-       bch2_trans_init(&trans, c, 0, 0);
-
-       ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_xattrs,
+       return bch2_trans_run(c,
+               for_each_btree_key_commit(&trans, iter, BTREE_ID_xattrs,
                                        SPOS(0, 0, U32_MAX),
                                        BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k,
-                                       NULL, NULL, 0,
-               ({
+                                       NULL, NULL, 0, ({
                        if (iter.pos.offset >= nr)
                                break;
                        insert.k.p = iter.pos;
                        bch2_trans_update(&trans, &iter, &insert.k_i, 0);
-               }));
-       if (ret)
-               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
-
-       bch2_trans_exit(&trans);
-       return ret;
+               })));
 }
 
 static int seq_lookup(struct bch_fs *c, u64 nr)
 {
-       struct btree_trans trans;
        struct btree_iter iter;
        struct bkey_s_c k;
-       int ret = 0;
 
-       bch2_trans_init(&trans, c, 0, 0);
-
-       ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs,
+       return bch2_trans_run(c,
+               for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs,
                                  SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
                                  0, k,
-               0);
-       if (ret)
-               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
-
-       bch2_trans_exit(&trans);
-       return ret;
+               0));
 }
 
 static int seq_overwrite(struct bch_fs *c, u64 nr)
 {
-       struct btree_trans trans;
        struct btree_iter iter;
        struct bkey_s_c k;
-       int ret = 0;
-
-       bch2_trans_init(&trans, c, 0, 0);
 
-       ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_xattrs,
+       return bch2_trans_run(c,
+               for_each_btree_key_commit(&trans, iter, BTREE_ID_xattrs,
                                        SPOS(0, 0, U32_MAX),
                                        BTREE_ITER_INTENT, k,
-                                       NULL, NULL, 0,
-               ({
+                                       NULL, NULL, 0, ({
                        struct bkey_i_cookie u;
 
                        bkey_reassemble(&u.k_i, k);
                        bch2_trans_update(&trans, &iter, &u.k_i, 0);
-               }));
-       if (ret)
-               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
-
-       bch2_trans_exit(&trans);
-       return ret;
+               })));
 }
 
 static int seq_delete(struct bch_fs *c, u64 nr)
 {
-       int ret;
-
-       ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
+       return bch2_btree_delete_range(c, BTREE_ID_xattrs,
                                      SPOS(0, 0, U32_MAX),
                                      POS(0, U64_MAX),
                                      0, NULL);
-       if (ret)
-               bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
-       return ret;
 }
 
 typedef int (*perf_test_fn)(struct bch_fs *, u64);
index d4f43f158a967fe1c0e49121f2f8df65051470ca..5b81c3fc18be1df6302325ddd288550496b82d34 100644 (file)
@@ -878,6 +878,11 @@ void __six_lock_init(struct six_lock *lock, const char *name,
        lockdep_init_map(&lock->dep_map, name, key, 0);
 #endif
 
+       /*
+        * Don't assume that we have real percpu variables available in
+        * userspace:
+        */
+#ifdef __KERNEL__
        if (flags & SIX_LOCK_INIT_PCPU) {
                /*
                 * We don't return an error here on memory allocation failure
@@ -888,5 +893,6 @@ void __six_lock_init(struct six_lock *lock, const char *name,
                 */
                lock->readers = alloc_percpu(unsigned);
        }
+#endif
 }
 EXPORT_SYMBOL_GPL(__six_lock_init);