]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/super.c
Update bcachefs sources to 5d0a6c2b32f1 bcachefs: check_directory_structure() can...
[bcachefs-tools-debian] / libbcachefs / super.c
index 1b5c2a1bd68a26356b921baea4474791b72caa26..632d7175c966e762c8f0eb302b5ee99969ee7ff5 100644 (file)
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
 MODULE_DESCRIPTION("bcachefs filesystem");
+MODULE_SOFTDEP("pre: crc32c");
+MODULE_SOFTDEP("pre: crc64");
+MODULE_SOFTDEP("pre: sha256");
+MODULE_SOFTDEP("pre: chacha20");
+MODULE_SOFTDEP("pre: poly1305");
+MODULE_SOFTDEP("pre: xxhash");
+
+const char * const bch2_fs_flag_strs[] = {
+#define x(n)           #n,
+       BCH_FS_FLAGS()
+#undef x
+       NULL
+};
+
+void __bch2_print(struct bch_fs *c, const char *fmt, ...)
+{
+       struct log_output *output = c->output;
+       va_list args;
+
+       if (c->output_filter && c->output_filter != current)
+               output = NULL;
+
+       va_start(args, fmt);
+       if (likely(!output)) {
+               vprintk(fmt, args);
+       } else {
+               unsigned long flags;
+
+               if (fmt[0] == KERN_SOH[0])
+                       fmt += 2;
+
+               spin_lock_irqsave(&output->lock, flags);
+               prt_vprintf(&output->buf, fmt, args);
+               spin_unlock_irqrestore(&output->lock, flags);
+
+               wake_up(&output->wait);
+       }
+       va_end(args);
+}
 
 #define KTYPE(type)                                                    \
 static const struct attribute_group type ## _group = {                 \
@@ -128,14 +167,12 @@ static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
 struct bch_fs *bch2_dev_to_fs(dev_t dev)
 {
        struct bch_fs *c;
-       struct bch_dev *ca;
-       unsigned i;
 
        mutex_lock(&bch_fs_list_lock);
        rcu_read_lock();
 
        list_for_each_entry(c, &bch_fs_list, list)
-               for_each_member_device_rcu(ca, c, i, NULL)
+               for_each_member_device_rcu(c, ca, NULL)
                        if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) {
                                closure_get(&c->cl);
                                goto found;
@@ -176,14 +213,13 @@ struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid)
 
 static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i, nr = 0, u64s =
+       unsigned nr = 0, u64s =
                ((sizeof(struct jset_entry_dev_usage) +
                  sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR)) /
                sizeof(u64);
 
        rcu_read_lock();
-       for_each_member_device_rcu(ca, c, i, NULL)
+       for_each_member_device_rcu(c, ca, NULL)
                nr++;
        rcu_read_unlock();
 
@@ -210,8 +246,7 @@ static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
 
 static void __bch2_fs_read_only(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i, clean_passes = 0;
+       unsigned clean_passes = 0;
        u64 seq = 0;
 
        bch2_fs_ec_stop(c);
@@ -240,14 +275,14 @@ static void __bch2_fs_read_only(struct bch_fs *c)
                    journal_cur_seq(&c->journal));
 
        if (test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) &&
-           !test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
-               set_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags);
+           !test_bit(BCH_FS_emergency_ro, &c->flags))
+               set_bit(BCH_FS_clean_shutdown, &c->flags);
        bch2_fs_journal_stop(&c->journal);
 
        /*
         * After stopping journal:
         */
-       for_each_member_device(ca, c, i)
+       for_each_member_device(c, ca)
                bch2_dev_allocator_remove(c, ca);
 }
 
@@ -256,25 +291,27 @@ static void bch2_writes_disabled(struct percpu_ref *writes)
 {
        struct bch_fs *c = container_of(writes, struct bch_fs, writes);
 
-       set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
+       set_bit(BCH_FS_write_disable_complete, &c->flags);
        wake_up(&bch2_read_only_wait);
 }
 #endif
 
 void bch2_fs_read_only(struct bch_fs *c)
 {
-       if (!test_bit(BCH_FS_RW, &c->flags)) {
+       if (!test_bit(BCH_FS_rw, &c->flags)) {
                bch2_journal_reclaim_stop(&c->journal);
                return;
        }
 
-       BUG_ON(test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
+       BUG_ON(test_bit(BCH_FS_write_disable_complete, &c->flags));
+
+       bch_verbose(c, "going read-only");
 
        /*
         * Block new foreground-end write operations from starting - any new
         * writes will return -EROFS:
         */
-       set_bit(BCH_FS_GOING_RO, &c->flags);
+       set_bit(BCH_FS_going_ro, &c->flags);
 #ifndef BCH_WRITE_REF_DEBUG
        percpu_ref_kill(&c->writes);
 #else
@@ -294,33 +331,42 @@ void bch2_fs_read_only(struct bch_fs *c)
         * that going RO is complete:
         */
        wait_event(bch2_read_only_wait,
-                  test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags) ||
-                  test_bit(BCH_FS_EMERGENCY_RO, &c->flags));
+                  test_bit(BCH_FS_write_disable_complete, &c->flags) ||
+                  test_bit(BCH_FS_emergency_ro, &c->flags));
+
+       bool writes_disabled = test_bit(BCH_FS_write_disable_complete, &c->flags);
+       if (writes_disabled)
+               bch_verbose(c, "finished waiting for writes to stop");
 
        __bch2_fs_read_only(c);
 
        wait_event(bch2_read_only_wait,
-                  test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
+                  test_bit(BCH_FS_write_disable_complete, &c->flags));
 
-       clear_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
-       clear_bit(BCH_FS_GOING_RO, &c->flags);
+       if (!writes_disabled)
+               bch_verbose(c, "finished waiting for writes to stop");
+
+       clear_bit(BCH_FS_write_disable_complete, &c->flags);
+       clear_bit(BCH_FS_going_ro, &c->flags);
+       clear_bit(BCH_FS_rw, &c->flags);
 
        if (!bch2_journal_error(&c->journal) &&
-           !test_bit(BCH_FS_ERROR, &c->flags) &&
-           !test_bit(BCH_FS_EMERGENCY_RO, &c->flags) &&
-           test_bit(BCH_FS_STARTED, &c->flags) &&
-           test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags) &&
+           !test_bit(BCH_FS_error, &c->flags) &&
+           !test_bit(BCH_FS_emergency_ro, &c->flags) &&
+           test_bit(BCH_FS_started, &c->flags) &&
+           test_bit(BCH_FS_clean_shutdown, &c->flags) &&
            !c->opts.norecovery) {
                BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
                BUG_ON(atomic_read(&c->btree_cache.dirty));
                BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty));
-               BUG_ON(c->btree_write_buffer.state.nr);
+               BUG_ON(c->btree_write_buffer.inc.keys.nr);
+               BUG_ON(c->btree_write_buffer.flushing.keys.nr);
 
                bch_verbose(c, "marking filesystem clean");
                bch2_fs_mark_clean(c);
+       } else {
+               bch_verbose(c, "done going read-only, filesystem not clean");
        }
-
-       clear_bit(BCH_FS_RW, &c->flags);
 }
 
 static void bch2_fs_read_only_work(struct work_struct *work)
@@ -340,7 +386,7 @@ static void bch2_fs_read_only_async(struct bch_fs *c)
 
 bool bch2_fs_emergency_read_only(struct bch_fs *c)
 {
-       bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags);
+       bool ret = !test_and_set_bit(BCH_FS_emergency_ro, &c->flags);
 
        bch2_journal_halt(&c->journal);
        bch2_fs_read_only_async(c);
@@ -377,16 +423,14 @@ static int bch2_fs_read_write_late(struct bch_fs *c)
 
 static int __bch2_fs_read_write(struct bch_fs *c, bool early)
 {
-       struct bch_dev *ca;
-       unsigned i;
        int ret;
 
-       if (test_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags)) {
+       if (test_bit(BCH_FS_initial_gc_unfixed, &c->flags)) {
                bch_err(c, "cannot go rw, unfixed btree errors");
                return -BCH_ERR_erofs_unfixed_errors;
        }
 
-       if (test_bit(BCH_FS_RW, &c->flags))
+       if (test_bit(BCH_FS_rw, &c->flags))
                return 0;
 
        if (c->opts.norecovery)
@@ -409,7 +453,7 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
        if (ret)
                goto err;
 
-       clear_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags);
+       clear_bit(BCH_FS_clean_shutdown, &c->flags);
 
        /*
         * First journal write must be a flush write: after a clean shutdown we
@@ -419,10 +463,22 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
         */
        set_bit(JOURNAL_NEED_FLUSH_WRITE, &c->journal.flags);
 
-       for_each_rw_member(ca, c, i)
+       for_each_rw_member(c, ca)
                bch2_dev_allocator_add(c, ca);
        bch2_recalc_capacity(c);
 
+       set_bit(BCH_FS_rw, &c->flags);
+       set_bit(BCH_FS_was_rw, &c->flags);
+
+#ifndef BCH_WRITE_REF_DEBUG
+       percpu_ref_reinit(&c->writes);
+#else
+       for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++) {
+               BUG_ON(atomic_long_read(&c->writes[i]));
+               atomic_long_inc(&c->writes[i]);
+       }
+#endif
+
        ret = bch2_gc_thread_start(c);
        if (ret) {
                bch_err(c, "error starting gc thread");
@@ -439,24 +495,16 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
                        goto err;
        }
 
-#ifndef BCH_WRITE_REF_DEBUG
-       percpu_ref_reinit(&c->writes);
-#else
-       for (i = 0; i < BCH_WRITE_REF_NR; i++) {
-               BUG_ON(atomic_long_read(&c->writes[i]));
-               atomic_long_inc(&c->writes[i]);
-       }
-#endif
-       set_bit(BCH_FS_RW, &c->flags);
-       set_bit(BCH_FS_WAS_RW, &c->flags);
-
        bch2_do_discards(c);
        bch2_do_invalidates(c);
        bch2_do_stripe_deletes(c);
        bch2_do_pending_node_rewrites(c);
        return 0;
 err:
-       __bch2_fs_read_only(c);
+       if (test_bit(BCH_FS_rw, &c->flags))
+               bch2_fs_read_only(c);
+       else
+               __bch2_fs_read_only(c);
        return ret;
 }
 
@@ -504,8 +552,8 @@ static void __bch2_fs_free(struct bch_fs *c)
        bch2_io_clock_exit(&c->io_clock[WRITE]);
        bch2_io_clock_exit(&c->io_clock[READ]);
        bch2_fs_compress_exit(c);
-       bch2_journal_keys_free(&c->journal_keys);
-       bch2_journal_entries_free(c);
+       bch2_journal_keys_put_initial(c);
+       BUG_ON(atomic_read(&c->journal_keys.ref));
        bch2_fs_btree_write_buffer_exit(c);
        percpu_free_rwsem(&c->mark_lock);
        free_percpu(c->online_reserved);
@@ -548,12 +596,9 @@ static void bch2_fs_release(struct kobject *kobj)
 
 void __bch2_fs_stop(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i;
-
        bch_verbose(c, "shutting down");
 
-       set_bit(BCH_FS_STOPPING, &c->flags);
+       set_bit(BCH_FS_stopping, &c->flags);
 
        cancel_work_sync(&c->journal_seq_blacklist_gc_work);
 
@@ -561,7 +606,7 @@ void __bch2_fs_stop(struct bch_fs *c)
        bch2_fs_read_only(c);
        up_write(&c->state_lock);
 
-       for_each_member_device(ca, c, i)
+       for_each_member_device(c, ca)
                if (ca->kobj.state_in_sysfs &&
                    ca->disk_sb.bdev)
                        sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
@@ -572,6 +617,9 @@ void __bch2_fs_stop(struct bch_fs *c)
        bch2_fs_debug_exit(c);
        bch2_fs_chardev_exit(c);
 
+       bch2_ro_ref_put(c);
+       wait_event(c->ro_ref_wait, !refcount_read(&c->ro_ref));
+
        kobject_put(&c->counters_kobj);
        kobject_put(&c->time_stats);
        kobject_put(&c->opts_dir);
@@ -580,7 +628,7 @@ void __bch2_fs_stop(struct bch_fs *c)
        /* btree prefetch might have kicked off reads in the background: */
        bch2_btree_flush_all_reads(c);
 
-       for_each_member_device(ca, c, i)
+       for_each_member_device(c, ca)
                cancel_work_sync(&ca->io_error_work);
 
        cancel_work_sync(&c->read_only_work);
@@ -619,8 +667,6 @@ void bch2_fs_stop(struct bch_fs *c)
 
 static int bch2_fs_online(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i;
        int ret = 0;
 
        lockdep_assert_held(&bch_fs_list_lock);
@@ -641,7 +687,9 @@ static int bch2_fs_online(struct bch_fs *c)
        ret = kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ?:
            kobject_add(&c->internal, &c->kobj, "internal") ?:
            kobject_add(&c->opts_dir, &c->kobj, "options") ?:
+#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
            kobject_add(&c->time_stats, &c->kobj, "time_stats") ?:
+#endif
            kobject_add(&c->counters_kobj, &c->kobj, "counters") ?:
            bch2_opts_create_sysfs_files(&c->opts_dir);
        if (ret) {
@@ -651,7 +699,7 @@ static int bch2_fs_online(struct bch_fs *c)
 
        down_write(&c->state_lock);
 
-       for_each_member_device(ca, c, i) {
+       for_each_member_device(c, ca) {
                ret = bch2_dev_sysfs_online(c, ca);
                if (ret) {
                        bch_err(c, "error creating sysfs objects");
@@ -680,6 +728,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
                goto out;
        }
 
+       c->output = (void *)(unsigned long) opts.log_output;
+
        __module_get(THIS_MODULE);
 
        closure_init(&c->cl, NULL);
@@ -700,14 +750,21 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
        mutex_init(&c->btree_root_lock);
        INIT_WORK(&c->read_only_work, bch2_fs_read_only_work);
 
+       refcount_set(&c->ro_ref, 1);
+       init_waitqueue_head(&c->ro_ref_wait);
+       sema_init(&c->online_fsck_mutex, 1);
+
        init_rwsem(&c->gc_lock);
        mutex_init(&c->gc_gens_lock);
+       atomic_set(&c->journal_keys.ref, 1);
+       c->journal_keys.initial_ref_held = true;
 
        for (i = 0; i < BCH_TIME_STAT_NR; i++)
                bch2_time_stats_init(&c->times[i]);
 
        bch2_fs_copygc_init(c);
        bch2_fs_btree_key_cache_init_early(&c->btree_key_cache);
+       bch2_fs_btree_iter_init_early(c);
        bch2_fs_btree_interior_update_init_early(c);
        bch2_fs_allocator_background_init(c);
        bch2_fs_allocator_foreground_init(c);
@@ -750,7 +807,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
 
        c->journal.flush_write_time     = &c->times[BCH_TIME_journal_flush_write];
        c->journal.noflush_write_time   = &c->times[BCH_TIME_journal_noflush_write];
-       c->journal.blocked_time         = &c->times[BCH_TIME_blocked_journal];
        c->journal.flush_seq_time       = &c->times[BCH_TIME_journal_flush_seq];
 
        bch2_fs_btree_cache_init_early(&c->btree_cache);
@@ -933,16 +989,14 @@ static void print_mount_opts(struct bch_fs *c)
 
 int bch2_fs_start(struct bch_fs *c)
 {
-       struct bch_dev *ca;
        time64_t now = ktime_get_real_seconds();
-       unsigned i;
        int ret;
 
        print_mount_opts(c);
 
        down_write(&c->state_lock);
 
-       BUG_ON(test_bit(BCH_FS_STARTED, &c->flags));
+       BUG_ON(test_bit(BCH_FS_started, &c->flags));
 
        mutex_lock(&c->sb_lock);
 
@@ -952,12 +1006,12 @@ int bch2_fs_start(struct bch_fs *c)
                goto err;
        }
 
-       for_each_online_member(ca, c, i)
-               bch2_members_v2_get_mut(c->disk_sb.sb, i)->last_mount = cpu_to_le64(now);
+       for_each_online_member(c, ca)
+               bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = cpu_to_le64(now);
 
        mutex_unlock(&c->sb_lock);
 
-       for_each_rw_member(ca, c, i)
+       for_each_rw_member(c, ca)
                bch2_dev_allocator_add(c, ca);
        bch2_recalc_capacity(c);
 
@@ -977,12 +1031,12 @@ int bch2_fs_start(struct bch_fs *c)
                goto err;
        }
 
-       set_bit(BCH_FS_STARTED, &c->flags);
+       set_bit(BCH_FS_started, &c->flags);
 
        if (c->opts.read_only || c->opts.nochanges) {
                bch2_fs_read_only(c);
        } else {
-               ret = !test_bit(BCH_FS_RW, &c->flags)
+               ret = !test_bit(BCH_FS_rw, &c->flags)
                        ? bch2_fs_read_write(c)
                        : bch2_fs_read_write_late(c);
                if (ret)
@@ -1294,8 +1348,7 @@ bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
                            enum bch_member_state new_state, int flags)
 {
        struct bch_devs_mask new_online_devs;
-       struct bch_dev *ca2;
-       int i, nr_rw = 0, required;
+       int nr_rw = 0, required;
 
        lockdep_assert_held(&c->state_lock);
 
@@ -1307,7 +1360,7 @@ bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
                        return true;
 
                /* do we have enough devices to write to?  */
-               for_each_member_device(ca2, c, i)
+               for_each_member_device(c, ca2)
                        if (ca2 != ca)
                                nr_rw += ca2->mi.state == BCH_MEMBER_STATE_rw;
 
@@ -1455,9 +1508,7 @@ static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
                                        BTREE_TRIGGER_NORUN, NULL) ?:
                bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
                                        BTREE_TRIGGER_NORUN, NULL);
-       if (ret)
-               bch_err_msg(c, ret, "removing dev alloc info");
-
+       bch_err_msg(c, ret, "removing dev alloc info");
        return ret;
 }
 
@@ -1484,34 +1535,29 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
        __bch2_dev_read_only(c, ca);
 
        ret = bch2_dev_data_drop(c, ca->dev_idx, flags);
-       if (ret) {
-               bch_err_msg(ca, ret, "dropping data");
+       bch_err_msg(ca, ret, "dropping data");
+       if (ret)
                goto err;
-       }
 
        ret = bch2_dev_remove_alloc(c, ca);
-       if (ret) {
-               bch_err_msg(ca, ret, "deleting alloc info");
+       bch_err_msg(ca, ret, "deleting alloc info");
+       if (ret)
                goto err;
-       }
 
        ret = bch2_journal_flush_device_pins(&c->journal, ca->dev_idx);
-       if (ret) {
-               bch_err_msg(ca, ret, "flushing journal");
+       bch_err_msg(ca, ret, "flushing journal");
+       if (ret)
                goto err;
-       }
 
        ret = bch2_journal_flush(&c->journal);
-       if (ret) {
-               bch_err(ca, "journal error");
+       bch_err(ca, "journal error");
+       if (ret)
                goto err;
-       }
 
        ret = bch2_replicas_gc2(c);
-       if (ret) {
-               bch_err_msg(ca, ret, "in replicas_gc2()");
+       bch_err_msg(ca, ret, "in replicas_gc2()");
+       if (ret)
                goto err;
-       }
 
        data = bch2_dev_has_data(c, ca);
        if (data) {
@@ -1583,10 +1629,9 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
        int ret;
 
        ret = bch2_read_super(path, &opts, &sb);
-       if (ret) {
-               bch_err_msg(c, ret, "reading super");
+       bch_err_msg(c, ret, "reading super");
+       if (ret)
                goto err;
-       }
 
        dev_mi = bch2_sb_member_get(sb.sb, sb.sb->dev_idx);
 
@@ -1599,10 +1644,8 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
        }
 
        ret = bch2_dev_may_add(sb.sb, c);
-       if (ret) {
-               bch_err_fn(c, ret);
+       if (ret)
                goto err;
-       }
 
        ca = __bch2_dev_alloc(c, &dev_mi);
        if (!ca) {
@@ -1617,19 +1660,17 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
                goto err;
 
        ret = bch2_dev_journal_alloc(ca);
-       if (ret) {
-               bch_err_msg(c, ret, "allocating journal");
+       bch_err_msg(c, ret, "allocating journal");
+       if (ret)
                goto err;
-       }
 
        down_write(&c->state_lock);
        mutex_lock(&c->sb_lock);
 
        ret = bch2_sb_from_fs(c, ca);
-       if (ret) {
-               bch_err_msg(c, ret, "setting up new superblock");
+       bch_err_msg(c, ret, "setting up new superblock");
+       if (ret)
                goto err_unlock;
-       }
 
        if (dynamic_fault("bcachefs:add:no_slot"))
                goto no_slot;
@@ -1668,10 +1709,9 @@ have_slot:
 
        if (BCH_MEMBER_GROUP(&dev_mi)) {
                ret = __bch2_dev_group_set(c, ca, label.buf);
-               if (ret) {
-                       bch_err_msg(c, ret, "creating new label");
+               bch_err_msg(c, ret, "creating new label");
+               if (ret)
                        goto err_unlock;
-               }
        }
 
        bch2_write_super(c);
@@ -1680,16 +1720,14 @@ have_slot:
        bch2_dev_usage_journal_reserve(c);
 
        ret = bch2_trans_mark_dev_sb(c, ca);
-       if (ret) {
-               bch_err_msg(ca, ret, "marking new superblock");
+       bch_err_msg(ca, ret, "marking new superblock");
+       if (ret)
                goto err_late;
-       }
 
        ret = bch2_fs_freespace_init(c);
-       if (ret) {
-               bch_err_msg(ca, ret, "initializing free space");
+       bch_err_msg(ca, ret, "initializing free space");
+       if (ret)
                goto err_late;
-       }
 
        ca->new_fs_bucket_idx = 0;
 
@@ -1708,6 +1746,7 @@ err:
        bch2_free_super(&sb);
        printbuf_exit(&label);
        printbuf_exit(&errbuf);
+       bch_err_fn(c, ret);
        return ret;
 err_late:
        up_write(&c->state_lock);
@@ -1735,10 +1774,9 @@ int bch2_dev_online(struct bch_fs *c, const char *path)
        dev_idx = sb.sb->dev_idx;
 
        ret = bch2_dev_in_fs(c->disk_sb.sb, sb.sb);
-       if (ret) {
-               bch_err_msg(c, ret, "bringing %s online", path);
+       bch_err_msg(c, ret, "bringing %s online", path);
+       if (ret)
                goto err;
-       }
 
        ret = bch2_dev_attach_bdev(c, &sb);
        if (ret)
@@ -1747,10 +1785,9 @@ int bch2_dev_online(struct bch_fs *c, const char *path)
        ca = bch_dev_locked(c, dev_idx);
 
        ret = bch2_trans_mark_dev_sb(c, ca);
-       if (ret) {
-               bch_err_msg(c, ret, "bringing %s online: error from bch2_trans_mark_dev_sb", path);
+       bch_err_msg(c, ret, "bringing %s online: error from bch2_trans_mark_dev_sb", path);
+       if (ret)
                goto err;
-       }
 
        if (ca->mi.state == BCH_MEMBER_STATE_rw)
                __bch2_dev_read_write(c, ca);
@@ -1829,10 +1866,9 @@ int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
        }
 
        ret = bch2_dev_buckets_resize(c, ca, nbuckets);
-       if (ret) {
-               bch_err_msg(ca, ret, "resizing buckets");
+       bch_err_msg(ca, ret, "resizing buckets");
+       if (ret)
                goto err;
-       }
 
        ret = bch2_trans_mark_dev_sb(c, ca);
        if (ret)
@@ -1866,18 +1902,14 @@ err:
 /* return with ref on ca->ref: */
 struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name)
 {
-       struct bch_dev *ca;
-       unsigned i;
-
        rcu_read_lock();
-       for_each_member_device_rcu(ca, c, i, NULL)
-               if (!strcmp(name, ca->name))
-                       goto found;
-       ca = ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
-found:
+       for_each_member_device_rcu(c, ca, NULL)
+               if (!strcmp(name, ca->name)) {
+                       rcu_read_unlock();
+                       return ca;
+               }
        rcu_read_unlock();
-
-       return ca;
+       return ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
 }
 
 /* Filesystem open: */
@@ -1885,9 +1917,9 @@ found:
 struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
                            struct bch_opts opts)
 {
-       struct bch_sb_handle *sb = NULL;
+       DARRAY(struct bch_sb_handle) sbs = { 0 };
        struct bch_fs *c = NULL;
-       unsigned i, best_sb = 0;
+       struct bch_sb_handle *best = NULL;
        struct printbuf errbuf = PRINTBUF;
        int ret = 0;
 
@@ -1899,49 +1931,46 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
                goto err;
        }
 
-       sb = kcalloc(nr_devices, sizeof(*sb), GFP_KERNEL);
-       if (!sb) {
-               ret = -ENOMEM;
+       ret = darray_make_room(&sbs, nr_devices);
+       if (ret)
                goto err;
-       }
 
-       for (i = 0; i < nr_devices; i++) {
-               ret = bch2_read_super(devices[i], &opts, &sb[i]);
+       for (unsigned i = 0; i < nr_devices; i++) {
+               struct bch_sb_handle sb = { NULL };
+
+               ret = bch2_read_super(devices[i], &opts, &sb);
                if (ret)
                        goto err;
 
+               BUG_ON(darray_push(&sbs, sb));
        }
 
-       for (i = 1; i < nr_devices; i++)
-               if (le64_to_cpu(sb[i].sb->seq) >
-                   le64_to_cpu(sb[best_sb].sb->seq))
-                       best_sb = i;
-
-       i = 0;
-       while (i < nr_devices) {
-               if (i != best_sb &&
-                   !bch2_dev_exists(sb[best_sb].sb, sb[i].sb->dev_idx)) {
-                       pr_info("%pg has been removed, skipping", sb[i].bdev);
-                       bch2_free_super(&sb[i]);
-                       array_remove_item(sb, nr_devices, i);
+       darray_for_each(sbs, sb)
+               if (!best || le64_to_cpu(sb->sb->seq) > le64_to_cpu(best->sb->seq))
+                       best = sb;
+
+       darray_for_each_reverse(sbs, sb) {
+               if (sb != best && !bch2_dev_exists(best->sb, sb->sb->dev_idx)) {
+                       pr_info("%pg has been removed, skipping", sb->bdev);
+                       bch2_free_super(sb);
+                       darray_remove_item(&sbs, sb);
+                       best -= best > sb;
                        continue;
                }
 
-               ret = bch2_dev_in_fs(sb[best_sb].sb, sb[i].sb);
+               ret = bch2_dev_in_fs(best->sb, sb->sb);
                if (ret)
                        goto err_print;
-               i++;
        }
 
-       c = bch2_fs_alloc(sb[best_sb].sb, opts);
-       if (IS_ERR(c)) {
-               ret = PTR_ERR(c);
+       c = bch2_fs_alloc(best->sb, opts);
+       ret = PTR_ERR_OR_ZERO(c);
+       if (ret)
                goto err;
-       }
 
        down_write(&c->state_lock);
-       for (i = 0; i < nr_devices; i++) {
-               ret = bch2_dev_attach_bdev(c, &sb[i]);
+       darray_for_each(sbs, sb) {
+               ret = bch2_dev_attach_bdev(c, sb);
                if (ret) {
                        up_write(&c->state_lock);
                        goto err;
@@ -1960,7 +1989,9 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
                        goto err;
        }
 out:
-       kfree(sb);
+       darray_for_each(sbs, sb)
+               bch2_free_super(sb);
+       darray_exit(&sbs);
        printbuf_exit(&errbuf);
        module_put(THIS_MODULE);
        return c;
@@ -1970,9 +2001,6 @@ err_print:
 err:
        if (!IS_ERR_OR_NULL(c))
                bch2_fs_stop(c);
-       if (sb)
-               for (i = 0; i < nr_devices; i++)
-                       bch2_free_super(&sb[i]);
        c = ERR_PTR(ret);
        goto out;
 }