]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/super.c
Move c_src dirs back to toplevel
[bcachefs-tools-debian] / libbcachefs / super.c
index 1b5c2a1bd68a26356b921baea4474791b72caa26..9dbc35940197f1c55c1bc48746bc23a3983ac203 100644 (file)
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
 MODULE_DESCRIPTION("bcachefs filesystem");
+MODULE_SOFTDEP("pre: crc32c");
+MODULE_SOFTDEP("pre: crc64");
+MODULE_SOFTDEP("pre: sha256");
+MODULE_SOFTDEP("pre: chacha20");
+MODULE_SOFTDEP("pre: poly1305");
+MODULE_SOFTDEP("pre: xxhash");
+
+const char * const bch2_fs_flag_strs[] = {
+#define x(n)           #n,
+       BCH_FS_FLAGS()
+#undef x
+       NULL
+};
+
+void __bch2_print(struct bch_fs *c, const char *fmt, ...)
+{
+       struct stdio_redirect *stdio = bch2_fs_stdio_redirect(c);
+
+       va_list args;
+       va_start(args, fmt);
+       if (likely(!stdio)) {
+               vprintk(fmt, args);
+       } else {
+               unsigned long flags;
+
+               if (fmt[0] == KERN_SOH[0])
+                       fmt += 2;
+
+               spin_lock_irqsave(&stdio->output_lock, flags);
+               prt_vprintf(&stdio->output_buf, fmt, args);
+               spin_unlock_irqrestore(&stdio->output_lock, flags);
+
+               wake_up(&stdio->output_wait);
+       }
+       va_end(args);
+}
 
 #define KTYPE(type)                                                    \
 static const struct attribute_group type ## _group = {                 \
@@ -128,14 +164,12 @@ static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
 struct bch_fs *bch2_dev_to_fs(dev_t dev)
 {
        struct bch_fs *c;
-       struct bch_dev *ca;
-       unsigned i;
 
        mutex_lock(&bch_fs_list_lock);
        rcu_read_lock();
 
        list_for_each_entry(c, &bch_fs_list, list)
-               for_each_member_device_rcu(ca, c, i, NULL)
+               for_each_member_device_rcu(c, ca, NULL)
                        if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) {
                                closure_get(&c->cl);
                                goto found;
@@ -176,14 +210,13 @@ struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid)
 
 static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i, nr = 0, u64s =
+       unsigned nr = 0, u64s =
                ((sizeof(struct jset_entry_dev_usage) +
                  sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR)) /
                sizeof(u64);
 
        rcu_read_lock();
-       for_each_member_device_rcu(ca, c, i, NULL)
+       for_each_member_device_rcu(c, ca, NULL)
                nr++;
        rcu_read_unlock();
 
@@ -210,8 +243,7 @@ static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
 
 static void __bch2_fs_read_only(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i, clean_passes = 0;
+       unsigned clean_passes = 0;
        u64 seq = 0;
 
        bch2_fs_ec_stop(c);
@@ -240,14 +272,14 @@ static void __bch2_fs_read_only(struct bch_fs *c)
                    journal_cur_seq(&c->journal));
 
        if (test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) &&
-           !test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
-               set_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags);
+           !test_bit(BCH_FS_emergency_ro, &c->flags))
+               set_bit(BCH_FS_clean_shutdown, &c->flags);
        bch2_fs_journal_stop(&c->journal);
 
        /*
         * After stopping journal:
         */
-       for_each_member_device(ca, c, i)
+       for_each_member_device(c, ca)
                bch2_dev_allocator_remove(c, ca);
 }
 
@@ -256,25 +288,27 @@ static void bch2_writes_disabled(struct percpu_ref *writes)
 {
        struct bch_fs *c = container_of(writes, struct bch_fs, writes);
 
-       set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
+       set_bit(BCH_FS_write_disable_complete, &c->flags);
        wake_up(&bch2_read_only_wait);
 }
 #endif
 
 void bch2_fs_read_only(struct bch_fs *c)
 {
-       if (!test_bit(BCH_FS_RW, &c->flags)) {
+       if (!test_bit(BCH_FS_rw, &c->flags)) {
                bch2_journal_reclaim_stop(&c->journal);
                return;
        }
 
-       BUG_ON(test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
+       BUG_ON(test_bit(BCH_FS_write_disable_complete, &c->flags));
+
+       bch_verbose(c, "going read-only");
 
        /*
         * Block new foreground-end write operations from starting - any new
         * writes will return -EROFS:
         */
-       set_bit(BCH_FS_GOING_RO, &c->flags);
+       set_bit(BCH_FS_going_ro, &c->flags);
 #ifndef BCH_WRITE_REF_DEBUG
        percpu_ref_kill(&c->writes);
 #else
@@ -294,33 +328,42 @@ void bch2_fs_read_only(struct bch_fs *c)
         * that going RO is complete:
         */
        wait_event(bch2_read_only_wait,
-                  test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags) ||
-                  test_bit(BCH_FS_EMERGENCY_RO, &c->flags));
+                  test_bit(BCH_FS_write_disable_complete, &c->flags) ||
+                  test_bit(BCH_FS_emergency_ro, &c->flags));
+
+       bool writes_disabled = test_bit(BCH_FS_write_disable_complete, &c->flags);
+       if (writes_disabled)
+               bch_verbose(c, "finished waiting for writes to stop");
 
        __bch2_fs_read_only(c);
 
        wait_event(bch2_read_only_wait,
-                  test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
+                  test_bit(BCH_FS_write_disable_complete, &c->flags));
+
+       if (!writes_disabled)
+               bch_verbose(c, "finished waiting for writes to stop");
 
-       clear_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
-       clear_bit(BCH_FS_GOING_RO, &c->flags);
+       clear_bit(BCH_FS_write_disable_complete, &c->flags);
+       clear_bit(BCH_FS_going_ro, &c->flags);
+       clear_bit(BCH_FS_rw, &c->flags);
 
        if (!bch2_journal_error(&c->journal) &&
-           !test_bit(BCH_FS_ERROR, &c->flags) &&
-           !test_bit(BCH_FS_EMERGENCY_RO, &c->flags) &&
-           test_bit(BCH_FS_STARTED, &c->flags) &&
-           test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags) &&
+           !test_bit(BCH_FS_error, &c->flags) &&
+           !test_bit(BCH_FS_emergency_ro, &c->flags) &&
+           test_bit(BCH_FS_started, &c->flags) &&
+           test_bit(BCH_FS_clean_shutdown, &c->flags) &&
            !c->opts.norecovery) {
                BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
                BUG_ON(atomic_read(&c->btree_cache.dirty));
                BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty));
-               BUG_ON(c->btree_write_buffer.state.nr);
+               BUG_ON(c->btree_write_buffer.inc.keys.nr);
+               BUG_ON(c->btree_write_buffer.flushing.keys.nr);
 
                bch_verbose(c, "marking filesystem clean");
                bch2_fs_mark_clean(c);
+       } else {
+               bch_verbose(c, "done going read-only, filesystem not clean");
        }
-
-       clear_bit(BCH_FS_RW, &c->flags);
 }
 
 static void bch2_fs_read_only_work(struct work_struct *work)
@@ -340,7 +383,7 @@ static void bch2_fs_read_only_async(struct bch_fs *c)
 
 bool bch2_fs_emergency_read_only(struct bch_fs *c)
 {
-       bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags);
+       bool ret = !test_and_set_bit(BCH_FS_emergency_ro, &c->flags);
 
        bch2_journal_halt(&c->journal);
        bch2_fs_read_only_async(c);
@@ -377,28 +420,16 @@ static int bch2_fs_read_write_late(struct bch_fs *c)
 
 static int __bch2_fs_read_write(struct bch_fs *c, bool early)
 {
-       struct bch_dev *ca;
-       unsigned i;
        int ret;
 
-       if (test_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags)) {
+       if (test_bit(BCH_FS_initial_gc_unfixed, &c->flags)) {
                bch_err(c, "cannot go rw, unfixed btree errors");
                return -BCH_ERR_erofs_unfixed_errors;
        }
 
-       if (test_bit(BCH_FS_RW, &c->flags))
+       if (test_bit(BCH_FS_rw, &c->flags))
                return 0;
 
-       if (c->opts.norecovery)
-               return -BCH_ERR_erofs_norecovery;
-
-       /*
-        * nochanges is used for fsck -n mode - we have to allow going rw
-        * during recovery for that to work:
-        */
-       if (c->opts.nochanges && (!early || c->opts.read_only))
-               return -BCH_ERR_erofs_nochanges;
-
        bch_info(c, "going read-write");
 
        ret = bch2_sb_members_v2_init(c);
@@ -409,7 +440,7 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
        if (ret)
                goto err;
 
-       clear_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags);
+       clear_bit(BCH_FS_clean_shutdown, &c->flags);
 
        /*
         * First journal write must be a flush write: after a clean shutdown we
@@ -419,10 +450,22 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
         */
        set_bit(JOURNAL_NEED_FLUSH_WRITE, &c->journal.flags);
 
-       for_each_rw_member(ca, c, i)
+       for_each_rw_member(c, ca)
                bch2_dev_allocator_add(c, ca);
        bch2_recalc_capacity(c);
 
+       set_bit(BCH_FS_rw, &c->flags);
+       set_bit(BCH_FS_was_rw, &c->flags);
+
+#ifndef BCH_WRITE_REF_DEBUG
+       percpu_ref_reinit(&c->writes);
+#else
+       for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++) {
+               BUG_ON(atomic_long_read(&c->writes[i]));
+               atomic_long_inc(&c->writes[i]);
+       }
+#endif
+
        ret = bch2_gc_thread_start(c);
        if (ret) {
                bch_err(c, "error starting gc thread");
@@ -439,29 +482,27 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
                        goto err;
        }
 
-#ifndef BCH_WRITE_REF_DEBUG
-       percpu_ref_reinit(&c->writes);
-#else
-       for (i = 0; i < BCH_WRITE_REF_NR; i++) {
-               BUG_ON(atomic_long_read(&c->writes[i]));
-               atomic_long_inc(&c->writes[i]);
-       }
-#endif
-       set_bit(BCH_FS_RW, &c->flags);
-       set_bit(BCH_FS_WAS_RW, &c->flags);
-
        bch2_do_discards(c);
        bch2_do_invalidates(c);
        bch2_do_stripe_deletes(c);
        bch2_do_pending_node_rewrites(c);
        return 0;
 err:
-       __bch2_fs_read_only(c);
+       if (test_bit(BCH_FS_rw, &c->flags))
+               bch2_fs_read_only(c);
+       else
+               __bch2_fs_read_only(c);
        return ret;
 }
 
 int bch2_fs_read_write(struct bch_fs *c)
 {
+       if (c->opts.norecovery)
+               return -BCH_ERR_erofs_norecovery;
+
+       if (c->opts.nochanges)
+               return -BCH_ERR_erofs_nochanges;
+
        return __bch2_fs_read_write(c, false);
 }
 
@@ -504,8 +545,8 @@ static void __bch2_fs_free(struct bch_fs *c)
        bch2_io_clock_exit(&c->io_clock[WRITE]);
        bch2_io_clock_exit(&c->io_clock[READ]);
        bch2_fs_compress_exit(c);
-       bch2_journal_keys_free(&c->journal_keys);
-       bch2_journal_entries_free(c);
+       bch2_journal_keys_put_initial(c);
+       BUG_ON(atomic_read(&c->journal_keys.ref));
        bch2_fs_btree_write_buffer_exit(c);
        percpu_free_rwsem(&c->mark_lock);
        free_percpu(c->online_reserved);
@@ -548,12 +589,9 @@ static void bch2_fs_release(struct kobject *kobj)
 
 void __bch2_fs_stop(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i;
-
        bch_verbose(c, "shutting down");
 
-       set_bit(BCH_FS_STOPPING, &c->flags);
+       set_bit(BCH_FS_stopping, &c->flags);
 
        cancel_work_sync(&c->journal_seq_blacklist_gc_work);
 
@@ -561,7 +599,7 @@ void __bch2_fs_stop(struct bch_fs *c)
        bch2_fs_read_only(c);
        up_write(&c->state_lock);
 
-       for_each_member_device(ca, c, i)
+       for_each_member_device(c, ca)
                if (ca->kobj.state_in_sysfs &&
                    ca->disk_sb.bdev)
                        sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
@@ -572,6 +610,9 @@ void __bch2_fs_stop(struct bch_fs *c)
        bch2_fs_debug_exit(c);
        bch2_fs_chardev_exit(c);
 
+       bch2_ro_ref_put(c);
+       wait_event(c->ro_ref_wait, !refcount_read(&c->ro_ref));
+
        kobject_put(&c->counters_kobj);
        kobject_put(&c->time_stats);
        kobject_put(&c->opts_dir);
@@ -580,7 +621,7 @@ void __bch2_fs_stop(struct bch_fs *c)
        /* btree prefetch might have kicked off reads in the background: */
        bch2_btree_flush_all_reads(c);
 
-       for_each_member_device(ca, c, i)
+       for_each_member_device(c, ca)
                cancel_work_sync(&ca->io_error_work);
 
        cancel_work_sync(&c->read_only_work);
@@ -619,8 +660,6 @@ void bch2_fs_stop(struct bch_fs *c)
 
 static int bch2_fs_online(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i;
        int ret = 0;
 
        lockdep_assert_held(&bch_fs_list_lock);
@@ -641,7 +680,9 @@ static int bch2_fs_online(struct bch_fs *c)
        ret = kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ?:
            kobject_add(&c->internal, &c->kobj, "internal") ?:
            kobject_add(&c->opts_dir, &c->kobj, "options") ?:
+#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
            kobject_add(&c->time_stats, &c->kobj, "time_stats") ?:
+#endif
            kobject_add(&c->counters_kobj, &c->kobj, "counters") ?:
            bch2_opts_create_sysfs_files(&c->opts_dir);
        if (ret) {
@@ -651,7 +692,7 @@ static int bch2_fs_online(struct bch_fs *c)
 
        down_write(&c->state_lock);
 
-       for_each_member_device(ca, c, i) {
+       for_each_member_device(c, ca) {
                ret = bch2_dev_sysfs_online(c, ca);
                if (ret) {
                        bch_err(c, "error creating sysfs objects");
@@ -680,6 +721,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
                goto out;
        }
 
+       c->stdio = (void *)(unsigned long) opts.stdio;
+
        __module_get(THIS_MODULE);
 
        closure_init(&c->cl, NULL);
@@ -700,14 +743,21 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
        mutex_init(&c->btree_root_lock);
        INIT_WORK(&c->read_only_work, bch2_fs_read_only_work);
 
+       refcount_set(&c->ro_ref, 1);
+       init_waitqueue_head(&c->ro_ref_wait);
+       sema_init(&c->online_fsck_mutex, 1);
+
        init_rwsem(&c->gc_lock);
        mutex_init(&c->gc_gens_lock);
+       atomic_set(&c->journal_keys.ref, 1);
+       c->journal_keys.initial_ref_held = true;
 
        for (i = 0; i < BCH_TIME_STAT_NR; i++)
                bch2_time_stats_init(&c->times[i]);
 
        bch2_fs_copygc_init(c);
        bch2_fs_btree_key_cache_init_early(&c->btree_key_cache);
+       bch2_fs_btree_iter_init_early(c);
        bch2_fs_btree_interior_update_init_early(c);
        bch2_fs_allocator_background_init(c);
        bch2_fs_allocator_foreground_init(c);
@@ -750,7 +800,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
 
        c->journal.flush_write_time     = &c->times[BCH_TIME_journal_flush_write];
        c->journal.noflush_write_time   = &c->times[BCH_TIME_journal_noflush_write];
-       c->journal.blocked_time         = &c->times[BCH_TIME_blocked_journal];
        c->journal.flush_seq_time       = &c->times[BCH_TIME_journal_flush_seq];
 
        bch2_fs_btree_cache_init_early(&c->btree_cache);
@@ -819,7 +868,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
            !(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
                                WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
            !(c->io_complete_wq = alloc_workqueue("bcachefs_io",
-                               WQ_FREEZABLE|WQ_HIGHPRI|WQ_MEM_RECLAIM, 1)) ||
+                               WQ_FREEZABLE|WQ_HIGHPRI|WQ_MEM_RECLAIM, 512)) ||
            !(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref",
                                WQ_FREEZABLE, 0)) ||
 #ifndef BCH_WRITE_REF_DEBUG
@@ -933,16 +982,14 @@ static void print_mount_opts(struct bch_fs *c)
 
 int bch2_fs_start(struct bch_fs *c)
 {
-       struct bch_dev *ca;
        time64_t now = ktime_get_real_seconds();
-       unsigned i;
        int ret;
 
        print_mount_opts(c);
 
        down_write(&c->state_lock);
 
-       BUG_ON(test_bit(BCH_FS_STARTED, &c->flags));
+       BUG_ON(test_bit(BCH_FS_started, &c->flags));
 
        mutex_lock(&c->sb_lock);
 
@@ -952,12 +999,12 @@ int bch2_fs_start(struct bch_fs *c)
                goto err;
        }
 
-       for_each_online_member(ca, c, i)
-               bch2_members_v2_get_mut(c->disk_sb.sb, i)->last_mount = cpu_to_le64(now);
+       for_each_online_member(c, ca)
+               bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = cpu_to_le64(now);
 
        mutex_unlock(&c->sb_lock);
 
-       for_each_rw_member(ca, c, i)
+       for_each_rw_member(c, ca)
                bch2_dev_allocator_add(c, ca);
        bch2_recalc_capacity(c);
 
@@ -977,12 +1024,12 @@ int bch2_fs_start(struct bch_fs *c)
                goto err;
        }
 
-       set_bit(BCH_FS_STARTED, &c->flags);
+       set_bit(BCH_FS_started, &c->flags);
 
-       if (c->opts.read_only || c->opts.nochanges) {
+       if (c->opts.read_only) {
                bch2_fs_read_only(c);
        } else {
-               ret = !test_bit(BCH_FS_RW, &c->flags)
+               ret = !test_bit(BCH_FS_rw, &c->flags)
                        ? bch2_fs_read_write(c)
                        : bch2_fs_read_write_late(c);
                if (ret)
@@ -990,12 +1037,13 @@ int bch2_fs_start(struct bch_fs *c)
        }
 
        ret = 0;
-out:
+err:
+       if (ret)
+               bch_err_msg(c, ret, "starting filesystem");
+       else
+               bch_verbose(c, "done starting filesystem");
        up_write(&c->state_lock);
        return ret;
-err:
-       bch_err_msg(c, ret, "starting filesystem");
-       goto out;
 }
 
 static int bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
@@ -1012,20 +1060,83 @@ static int bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
        return 0;
 }
 
-static int bch2_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb)
+static int bch2_dev_in_fs(struct bch_sb_handle *fs,
+                         struct bch_sb_handle *sb)
 {
-       struct bch_sb *newest =
-               le64_to_cpu(fs->seq) > le64_to_cpu(sb->seq) ? fs : sb;
+       if (fs == sb)
+               return 0;
 
-       if (!uuid_equal(&fs->uuid, &sb->uuid))
+       if (!uuid_equal(&fs->sb->uuid, &sb->sb->uuid))
                return -BCH_ERR_device_not_a_member_of_filesystem;
 
-       if (!bch2_dev_exists(newest, sb->dev_idx))
+       if (!bch2_dev_exists(fs->sb, sb->sb->dev_idx))
                return -BCH_ERR_device_has_been_removed;
 
-       if (fs->block_size != sb->block_size)
+       if (fs->sb->block_size != sb->sb->block_size)
                return -BCH_ERR_mismatched_block_size;
 
+       if (le16_to_cpu(fs->sb->version) < bcachefs_metadata_version_member_seq ||
+           le16_to_cpu(sb->sb->version) < bcachefs_metadata_version_member_seq)
+               return 0;
+
+       if (fs->sb->seq == sb->sb->seq &&
+           fs->sb->write_time != sb->sb->write_time) {
+               struct printbuf buf = PRINTBUF;
+
+               prt_str(&buf, "Split brain detected between ");
+               prt_bdevname(&buf, sb->bdev);
+               prt_str(&buf, " and ");
+               prt_bdevname(&buf, fs->bdev);
+               prt_char(&buf, ':');
+               prt_newline(&buf);
+               prt_printf(&buf, "seq=%llu but write_time different, got", le64_to_cpu(sb->sb->seq));
+               prt_newline(&buf);
+
+               prt_bdevname(&buf, fs->bdev);
+               prt_char(&buf, ' ');
+               bch2_prt_datetime(&buf, le64_to_cpu(fs->sb->write_time));;
+               prt_newline(&buf);
+
+               prt_bdevname(&buf, sb->bdev);
+               prt_char(&buf, ' ');
+               bch2_prt_datetime(&buf, le64_to_cpu(sb->sb->write_time));;
+               prt_newline(&buf);
+
+               prt_printf(&buf, "Not using older sb");
+
+               pr_err("%s", buf.buf);
+               printbuf_exit(&buf);
+               return -BCH_ERR_device_splitbrain;
+       }
+
+       struct bch_member m = bch2_sb_member_get(fs->sb, sb->sb->dev_idx);
+       u64 seq_from_fs         = le64_to_cpu(m.seq);
+       u64 seq_from_member     = le64_to_cpu(sb->sb->seq);
+
+       if (seq_from_fs && seq_from_fs < seq_from_member) {
+               struct printbuf buf = PRINTBUF;
+
+               prt_str(&buf, "Split brain detected between ");
+               prt_bdevname(&buf, sb->bdev);
+               prt_str(&buf, " and ");
+               prt_bdevname(&buf, fs->bdev);
+               prt_char(&buf, ':');
+               prt_newline(&buf);
+
+               prt_bdevname(&buf, fs->bdev);
+               prt_str(&buf, "believes seq of ");
+               prt_bdevname(&buf, sb->bdev);
+               prt_printf(&buf, " to be %llu, but ", seq_from_fs);
+               prt_bdevname(&buf, sb->bdev);
+               prt_printf(&buf, " has %llu\n", seq_from_member);
+               prt_str(&buf, "Not using ");
+               prt_bdevname(&buf, sb->bdev);
+
+               pr_err("%s", buf.buf);
+               printbuf_exit(&buf);
+               return -BCH_ERR_device_splitbrain;
+       }
+
        return 0;
 }
 
@@ -1271,9 +1382,14 @@ static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
 
        bch2_dev_sysfs_online(c, ca);
 
+       struct printbuf name = PRINTBUF;
+       prt_bdevname(&name, ca->disk_sb.bdev);
+
        if (c->sb.nr_devices == 1)
-               snprintf(c->name, sizeof(c->name), "%pg", ca->disk_sb.bdev);
-       snprintf(ca->name, sizeof(ca->name), "%pg", ca->disk_sb.bdev);
+               strlcpy(c->name, name.buf, sizeof(c->name));
+       strlcpy(ca->name, name.buf, sizeof(ca->name));
+
+       printbuf_exit(&name);
 
        rebalance_wakeup(c);
        return 0;
@@ -1294,8 +1410,7 @@ bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
                            enum bch_member_state new_state, int flags)
 {
        struct bch_devs_mask new_online_devs;
-       struct bch_dev *ca2;
-       int i, nr_rw = 0, required;
+       int nr_rw = 0, required;
 
        lockdep_assert_held(&c->state_lock);
 
@@ -1307,7 +1422,7 @@ bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
                        return true;
 
                /* do we have enough devices to write to?  */
-               for_each_member_device(ca2, c, i)
+               for_each_member_device(c, ca2)
                        if (ca2 != ca)
                                nr_rw += ca2->mi.state == BCH_MEMBER_STATE_rw;
 
@@ -1455,9 +1570,7 @@ static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
                                        BTREE_TRIGGER_NORUN, NULL) ?:
                bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
                                        BTREE_TRIGGER_NORUN, NULL);
-       if (ret)
-               bch_err_msg(c, ret, "removing dev alloc info");
-
+       bch_err_msg(c, ret, "removing dev alloc info");
        return ret;
 }
 
@@ -1484,34 +1597,29 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
        __bch2_dev_read_only(c, ca);
 
        ret = bch2_dev_data_drop(c, ca->dev_idx, flags);
-       if (ret) {
-               bch_err_msg(ca, ret, "dropping data");
+       bch_err_msg(ca, ret, "dropping data");
+       if (ret)
                goto err;
-       }
 
        ret = bch2_dev_remove_alloc(c, ca);
-       if (ret) {
-               bch_err_msg(ca, ret, "deleting alloc info");
+       bch_err_msg(ca, ret, "deleting alloc info");
+       if (ret)
                goto err;
-       }
 
        ret = bch2_journal_flush_device_pins(&c->journal, ca->dev_idx);
-       if (ret) {
-               bch_err_msg(ca, ret, "flushing journal");
+       bch_err_msg(ca, ret, "flushing journal");
+       if (ret)
                goto err;
-       }
 
        ret = bch2_journal_flush(&c->journal);
-       if (ret) {
-               bch_err(ca, "journal error");
+       bch_err(ca, "journal error");
+       if (ret)
                goto err;
-       }
 
        ret = bch2_replicas_gc2(c);
-       if (ret) {
-               bch_err_msg(ca, ret, "in replicas_gc2()");
+       bch_err_msg(ca, ret, "in replicas_gc2()");
+       if (ret)
                goto err;
-       }
 
        data = bch2_dev_has_data(c, ca);
        if (data) {
@@ -1583,10 +1691,9 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
        int ret;
 
        ret = bch2_read_super(path, &opts, &sb);
-       if (ret) {
-               bch_err_msg(c, ret, "reading super");
+       bch_err_msg(c, ret, "reading super");
+       if (ret)
                goto err;
-       }
 
        dev_mi = bch2_sb_member_get(sb.sb, sb.sb->dev_idx);
 
@@ -1599,10 +1706,8 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
        }
 
        ret = bch2_dev_may_add(sb.sb, c);
-       if (ret) {
-               bch_err_fn(c, ret);
+       if (ret)
                goto err;
-       }
 
        ca = __bch2_dev_alloc(c, &dev_mi);
        if (!ca) {
@@ -1617,19 +1722,17 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
                goto err;
 
        ret = bch2_dev_journal_alloc(ca);
-       if (ret) {
-               bch_err_msg(c, ret, "allocating journal");
+       bch_err_msg(c, ret, "allocating journal");
+       if (ret)
                goto err;
-       }
 
        down_write(&c->state_lock);
        mutex_lock(&c->sb_lock);
 
        ret = bch2_sb_from_fs(c, ca);
-       if (ret) {
-               bch_err_msg(c, ret, "setting up new superblock");
+       bch_err_msg(c, ret, "setting up new superblock");
+       if (ret)
                goto err_unlock;
-       }
 
        if (dynamic_fault("bcachefs:add:no_slot"))
                goto no_slot;
@@ -1668,10 +1771,9 @@ have_slot:
 
        if (BCH_MEMBER_GROUP(&dev_mi)) {
                ret = __bch2_dev_group_set(c, ca, label.buf);
-               if (ret) {
-                       bch_err_msg(c, ret, "creating new label");
+               bch_err_msg(c, ret, "creating new label");
+               if (ret)
                        goto err_unlock;
-               }
        }
 
        bch2_write_super(c);
@@ -1680,16 +1782,14 @@ have_slot:
        bch2_dev_usage_journal_reserve(c);
 
        ret = bch2_trans_mark_dev_sb(c, ca);
-       if (ret) {
-               bch_err_msg(ca, ret, "marking new superblock");
+       bch_err_msg(ca, ret, "marking new superblock");
+       if (ret)
                goto err_late;
-       }
 
        ret = bch2_fs_freespace_init(c);
-       if (ret) {
-               bch_err_msg(ca, ret, "initializing free space");
+       bch_err_msg(ca, ret, "initializing free space");
+       if (ret)
                goto err_late;
-       }
 
        ca->new_fs_bucket_idx = 0;
 
@@ -1708,6 +1808,7 @@ err:
        bch2_free_super(&sb);
        printbuf_exit(&label);
        printbuf_exit(&errbuf);
+       bch_err_fn(c, ret);
        return ret;
 err_late:
        up_write(&c->state_lock);
@@ -1734,11 +1835,10 @@ int bch2_dev_online(struct bch_fs *c, const char *path)
 
        dev_idx = sb.sb->dev_idx;
 
-       ret = bch2_dev_in_fs(c->disk_sb.sb, sb.sb);
-       if (ret) {
-               bch_err_msg(c, ret, "bringing %s online", path);
+       ret = bch2_dev_in_fs(&c->disk_sb, &sb);
+       bch_err_msg(c, ret, "bringing %s online", path);
+       if (ret)
                goto err;
-       }
 
        ret = bch2_dev_attach_bdev(c, &sb);
        if (ret)
@@ -1747,10 +1847,9 @@ int bch2_dev_online(struct bch_fs *c, const char *path)
        ca = bch_dev_locked(c, dev_idx);
 
        ret = bch2_trans_mark_dev_sb(c, ca);
-       if (ret) {
-               bch_err_msg(c, ret, "bringing %s online: error from bch2_trans_mark_dev_sb", path);
+       bch_err_msg(c, ret, "bringing %s online: error from bch2_trans_mark_dev_sb", path);
+       if (ret)
                goto err;
-       }
 
        if (ca->mi.state == BCH_MEMBER_STATE_rw)
                __bch2_dev_read_write(c, ca);
@@ -1829,10 +1928,9 @@ int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
        }
 
        ret = bch2_dev_buckets_resize(c, ca, nbuckets);
-       if (ret) {
-               bch_err_msg(ca, ret, "resizing buckets");
+       bch_err_msg(ca, ret, "resizing buckets");
+       if (ret)
                goto err;
-       }
 
        ret = bch2_trans_mark_dev_sb(c, ca);
        if (ret)
@@ -1866,28 +1964,30 @@ err:
 /* return with ref on ca->ref: */
 struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name)
 {
-       struct bch_dev *ca;
-       unsigned i;
-
        rcu_read_lock();
-       for_each_member_device_rcu(ca, c, i, NULL)
-               if (!strcmp(name, ca->name))
-                       goto found;
-       ca = ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
-found:
+       for_each_member_device_rcu(c, ca, NULL)
+               if (!strcmp(name, ca->name)) {
+                       rcu_read_unlock();
+                       return ca;
+               }
        rcu_read_unlock();
-
-       return ca;
+       return ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
 }
 
 /* Filesystem open: */
 
+static inline int sb_cmp(struct bch_sb *l, struct bch_sb *r)
+{
+       return  cmp_int(le64_to_cpu(l->seq), le64_to_cpu(r->seq)) ?:
+               cmp_int(le64_to_cpu(l->write_time), le64_to_cpu(r->write_time));
+}
+
 struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
                            struct bch_opts opts)
 {
-       struct bch_sb_handle *sb = NULL;
+       DARRAY(struct bch_sb_handle) sbs = { 0 };
        struct bch_fs *c = NULL;
-       unsigned i, best_sb = 0;
+       struct bch_sb_handle *best = NULL;
        struct printbuf errbuf = PRINTBUF;
        int ret = 0;
 
@@ -1899,49 +1999,53 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
                goto err;
        }
 
-       sb = kcalloc(nr_devices, sizeof(*sb), GFP_KERNEL);
-       if (!sb) {
-               ret = -ENOMEM;
+       ret = darray_make_room(&sbs, nr_devices);
+       if (ret)
                goto err;
-       }
 
-       for (i = 0; i < nr_devices; i++) {
-               ret = bch2_read_super(devices[i], &opts, &sb[i]);
+       for (unsigned i = 0; i < nr_devices; i++) {
+               struct bch_sb_handle sb = { NULL };
+
+               ret = bch2_read_super(devices[i], &opts, &sb);
                if (ret)
                        goto err;
 
+               BUG_ON(darray_push(&sbs, sb));
+       }
+
+       if (opts.nochanges && !opts.read_only) {
+               ret = -BCH_ERR_erofs_nochanges;
+               goto err_print;
        }
 
-       for (i = 1; i < nr_devices; i++)
-               if (le64_to_cpu(sb[i].sb->seq) >
-                   le64_to_cpu(sb[best_sb].sb->seq))
-                       best_sb = i;
-
-       i = 0;
-       while (i < nr_devices) {
-               if (i != best_sb &&
-                   !bch2_dev_exists(sb[best_sb].sb, sb[i].sb->dev_idx)) {
-                       pr_info("%pg has been removed, skipping", sb[i].bdev);
-                       bch2_free_super(&sb[i]);
-                       array_remove_item(sb, nr_devices, i);
+       darray_for_each(sbs, sb)
+               if (!best || sb_cmp(sb->sb, best->sb) > 0)
+                       best = sb;
+
+       darray_for_each_reverse(sbs, sb) {
+               ret = bch2_dev_in_fs(best, sb);
+
+               if (ret == -BCH_ERR_device_has_been_removed ||
+                   ret == -BCH_ERR_device_splitbrain) {
+                       bch2_free_super(sb);
+                       darray_remove_item(&sbs, sb);
+                       best -= best > sb;
+                       ret = 0;
                        continue;
                }
 
-               ret = bch2_dev_in_fs(sb[best_sb].sb, sb[i].sb);
                if (ret)
                        goto err_print;
-               i++;
        }
 
-       c = bch2_fs_alloc(sb[best_sb].sb, opts);
-       if (IS_ERR(c)) {
-               ret = PTR_ERR(c);
+       c = bch2_fs_alloc(best->sb, opts);
+       ret = PTR_ERR_OR_ZERO(c);
+       if (ret)
                goto err;
-       }
 
        down_write(&c->state_lock);
-       for (i = 0; i < nr_devices; i++) {
-               ret = bch2_dev_attach_bdev(c, &sb[i]);
+       darray_for_each(sbs, sb) {
+               ret = bch2_dev_attach_bdev(c, sb);
                if (ret) {
                        up_write(&c->state_lock);
                        goto err;
@@ -1960,7 +2064,9 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
                        goto err;
        }
 out:
-       kfree(sb);
+       darray_for_each(sbs, sb)
+               bch2_free_super(sb);
+       darray_exit(&sbs);
        printbuf_exit(&errbuf);
        module_put(THIS_MODULE);
        return c;
@@ -1970,9 +2076,6 @@ err_print:
 err:
        if (!IS_ERR_OR_NULL(c))
                bch2_fs_stop(c);
-       if (sb)
-               for (i = 0; i < nr_devices; i++)
-                       bch2_free_super(&sb[i]);
        c = ERR_PTR(ret);
        goto out;
 }