]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/super.c
Update bcachefs sources to 841a95c29f4c bcachefs: fix userspace build errors
[bcachefs-tools-debian] / libbcachefs / super.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs setup/teardown code, and some metadata io - read a superblock and
4  * figure out what to do with it.
5  *
6  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7  * Copyright 2012 Google, Inc.
8  */
9
10 #include "bcachefs.h"
11 #include "alloc_background.h"
12 #include "alloc_foreground.h"
13 #include "bkey_sort.h"
14 #include "btree_cache.h"
15 #include "btree_gc.h"
16 #include "btree_journal_iter.h"
17 #include "btree_key_cache.h"
18 #include "btree_update_interior.h"
19 #include "btree_io.h"
20 #include "btree_write_buffer.h"
21 #include "buckets_waiting_for_journal.h"
22 #include "chardev.h"
23 #include "checksum.h"
24 #include "clock.h"
25 #include "compress.h"
26 #include "counters.h"
27 #include "debug.h"
28 #include "disk_groups.h"
29 #include "ec.h"
30 #include "errcode.h"
31 #include "error.h"
32 #include "fs.h"
33 #include "fs-io.h"
34 #include "fs-io-buffered.h"
35 #include "fs-io-direct.h"
36 #include "fsck.h"
37 #include "inode.h"
38 #include "io_read.h"
39 #include "io_write.h"
40 #include "journal.h"
41 #include "journal_reclaim.h"
42 #include "journal_seq_blacklist.h"
43 #include "move.h"
44 #include "migrate.h"
45 #include "movinggc.h"
46 #include "nocow_locking.h"
47 #include "quota.h"
48 #include "rebalance.h"
49 #include "recovery.h"
50 #include "replicas.h"
51 #include "sb-clean.h"
52 #include "sb-errors.h"
53 #include "sb-members.h"
54 #include "snapshot.h"
55 #include "subvolume.h"
56 #include "super.h"
57 #include "super-io.h"
58 #include "sysfs.h"
59 #include "trace.h"
60
61 #include <linux/backing-dev.h>
62 #include <linux/blkdev.h>
63 #include <linux/debugfs.h>
64 #include <linux/device.h>
65 #include <linux/idr.h>
66 #include <linux/module.h>
67 #include <linux/percpu.h>
68 #include <linux/random.h>
69 #include <linux/sysfs.h>
70 #include <crypto/hash.h>
71
72 MODULE_LICENSE("GPL");
73 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
74 MODULE_DESCRIPTION("bcachefs filesystem");
75 MODULE_SOFTDEP("pre: crc32c");
76 MODULE_SOFTDEP("pre: crc64");
77 MODULE_SOFTDEP("pre: sha256");
78 MODULE_SOFTDEP("pre: chacha20");
79 MODULE_SOFTDEP("pre: poly1305");
80 MODULE_SOFTDEP("pre: xxhash");
81
82 const char * const bch2_fs_flag_strs[] = {
83 #define x(n)            #n,
84         BCH_FS_FLAGS()
85 #undef x
86         NULL
87 };
88
89 void __bch2_print(struct bch_fs *c, const char *fmt, ...)
90 {
91         struct log_output *output = c->output;
92         va_list args;
93
94         if (c->output_filter && c->output_filter != current)
95                 output = NULL;
96
97         va_start(args, fmt);
98         if (likely(!output)) {
99                 vprintk(fmt, args);
100         } else {
101                 unsigned long flags;
102
103                 if (fmt[0] == KERN_SOH[0])
104                         fmt += 2;
105
106                 spin_lock_irqsave(&output->lock, flags);
107                 prt_vprintf(&output->buf, fmt, args);
108                 spin_unlock_irqrestore(&output->lock, flags);
109
110                 wake_up(&output->wait);
111         }
112         va_end(args);
113 }
114
115 #define KTYPE(type)                                                     \
116 static const struct attribute_group type ## _group = {                  \
117         .attrs = type ## _files                                         \
118 };                                                                      \
119                                                                         \
120 static const struct attribute_group *type ## _groups[] = {              \
121         &type ## _group,                                                \
122         NULL                                                            \
123 };                                                                      \
124                                                                         \
125 static const struct kobj_type type ## _ktype = {                        \
126         .release        = type ## _release,                             \
127         .sysfs_ops      = &type ## _sysfs_ops,                          \
128         .default_groups = type ## _groups                               \
129 }
130
131 static void bch2_fs_release(struct kobject *);
132 static void bch2_dev_release(struct kobject *);
133 static void bch2_fs_counters_release(struct kobject *k)
134 {
135 }
136
137 static void bch2_fs_internal_release(struct kobject *k)
138 {
139 }
140
141 static void bch2_fs_opts_dir_release(struct kobject *k)
142 {
143 }
144
145 static void bch2_fs_time_stats_release(struct kobject *k)
146 {
147 }
148
149 KTYPE(bch2_fs);
150 KTYPE(bch2_fs_counters);
151 KTYPE(bch2_fs_internal);
152 KTYPE(bch2_fs_opts_dir);
153 KTYPE(bch2_fs_time_stats);
154 KTYPE(bch2_dev);
155
156 static struct kset *bcachefs_kset;
157 static LIST_HEAD(bch_fs_list);
158 static DEFINE_MUTEX(bch_fs_list_lock);
159
160 DECLARE_WAIT_QUEUE_HEAD(bch2_read_only_wait);
161
162 static void bch2_dev_free(struct bch_dev *);
163 static int bch2_dev_alloc(struct bch_fs *, unsigned);
164 static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *);
165 static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
166
167 struct bch_fs *bch2_dev_to_fs(dev_t dev)
168 {
169         struct bch_fs *c;
170         struct bch_dev *ca;
171         unsigned i;
172
173         mutex_lock(&bch_fs_list_lock);
174         rcu_read_lock();
175
176         list_for_each_entry(c, &bch_fs_list, list)
177                 for_each_member_device_rcu(ca, c, i, NULL)
178                         if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) {
179                                 closure_get(&c->cl);
180                                 goto found;
181                         }
182         c = NULL;
183 found:
184         rcu_read_unlock();
185         mutex_unlock(&bch_fs_list_lock);
186
187         return c;
188 }
189
190 static struct bch_fs *__bch2_uuid_to_fs(__uuid_t uuid)
191 {
192         struct bch_fs *c;
193
194         lockdep_assert_held(&bch_fs_list_lock);
195
196         list_for_each_entry(c, &bch_fs_list, list)
197                 if (!memcmp(&c->disk_sb.sb->uuid, &uuid, sizeof(uuid)))
198                         return c;
199
200         return NULL;
201 }
202
203 struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid)
204 {
205         struct bch_fs *c;
206
207         mutex_lock(&bch_fs_list_lock);
208         c = __bch2_uuid_to_fs(uuid);
209         if (c)
210                 closure_get(&c->cl);
211         mutex_unlock(&bch_fs_list_lock);
212
213         return c;
214 }
215
216 static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
217 {
218         struct bch_dev *ca;
219         unsigned i, nr = 0, u64s =
220                 ((sizeof(struct jset_entry_dev_usage) +
221                   sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR)) /
222                 sizeof(u64);
223
224         rcu_read_lock();
225         for_each_member_device_rcu(ca, c, i, NULL)
226                 nr++;
227         rcu_read_unlock();
228
229         bch2_journal_entry_res_resize(&c->journal,
230                         &c->dev_usage_journal_res, u64s * nr);
231 }
232
233 /* Filesystem RO/RW: */
234
235 /*
236  * For startup/shutdown of RW stuff, the dependencies are:
237  *
238  * - foreground writes depend on copygc and rebalance (to free up space)
239  *
240  * - copygc and rebalance depend on mark and sweep gc (they actually probably
241  *   don't because they either reserve ahead of time or don't block if
242  *   allocations fail, but allocations can require mark and sweep gc to run
243  *   because of generation number wraparound)
244  *
245  * - all of the above depends on the allocator threads
246  *
247  * - allocator depends on the journal (when it rewrites prios and gens)
248  */
249
250 static void __bch2_fs_read_only(struct bch_fs *c)
251 {
252         struct bch_dev *ca;
253         unsigned i, clean_passes = 0;
254         u64 seq = 0;
255
256         bch2_fs_ec_stop(c);
257         bch2_open_buckets_stop(c, NULL, true);
258         bch2_rebalance_stop(c);
259         bch2_copygc_stop(c);
260         bch2_gc_thread_stop(c);
261         bch2_fs_ec_flush(c);
262
263         bch_verbose(c, "flushing journal and stopping allocators, journal seq %llu",
264                     journal_cur_seq(&c->journal));
265
266         do {
267                 clean_passes++;
268
269                 if (bch2_btree_interior_updates_flush(c) ||
270                     bch2_journal_flush_all_pins(&c->journal) ||
271                     bch2_btree_flush_all_writes(c) ||
272                     seq != atomic64_read(&c->journal.seq)) {
273                         seq = atomic64_read(&c->journal.seq);
274                         clean_passes = 0;
275                 }
276         } while (clean_passes < 2);
277
278         bch_verbose(c, "flushing journal and stopping allocators complete, journal seq %llu",
279                     journal_cur_seq(&c->journal));
280
281         if (test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) &&
282             !test_bit(BCH_FS_emergency_ro, &c->flags))
283                 set_bit(BCH_FS_clean_shutdown, &c->flags);
284         bch2_fs_journal_stop(&c->journal);
285
286         /*
287          * After stopping journal:
288          */
289         for_each_member_device(ca, c, i)
290                 bch2_dev_allocator_remove(c, ca);
291 }
292
293 #ifndef BCH_WRITE_REF_DEBUG
294 static void bch2_writes_disabled(struct percpu_ref *writes)
295 {
296         struct bch_fs *c = container_of(writes, struct bch_fs, writes);
297
298         set_bit(BCH_FS_write_disable_complete, &c->flags);
299         wake_up(&bch2_read_only_wait);
300 }
301 #endif
302
303 void bch2_fs_read_only(struct bch_fs *c)
304 {
305         if (!test_bit(BCH_FS_rw, &c->flags)) {
306                 bch2_journal_reclaim_stop(&c->journal);
307                 return;
308         }
309
310         BUG_ON(test_bit(BCH_FS_write_disable_complete, &c->flags));
311
312         bch_verbose(c, "going read-only");
313
314         /*
315          * Block new foreground-end write operations from starting - any new
316          * writes will return -EROFS:
317          */
318         set_bit(BCH_FS_going_ro, &c->flags);
319 #ifndef BCH_WRITE_REF_DEBUG
320         percpu_ref_kill(&c->writes);
321 #else
322         for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++)
323                 bch2_write_ref_put(c, i);
324 #endif
325
326         /*
327          * If we're not doing an emergency shutdown, we want to wait on
328          * outstanding writes to complete so they don't see spurious errors due
329          * to shutting down the allocator:
330          *
331          * If we are doing an emergency shutdown outstanding writes may
332          * hang until we shutdown the allocator so we don't want to wait
333          * on outstanding writes before shutting everything down - but
334          * we do need to wait on them before returning and signalling
335          * that going RO is complete:
336          */
337         wait_event(bch2_read_only_wait,
338                    test_bit(BCH_FS_write_disable_complete, &c->flags) ||
339                    test_bit(BCH_FS_emergency_ro, &c->flags));
340
341         bool writes_disabled = test_bit(BCH_FS_write_disable_complete, &c->flags);
342         if (writes_disabled)
343                 bch_verbose(c, "finished waiting for writes to stop");
344
345         __bch2_fs_read_only(c);
346
347         wait_event(bch2_read_only_wait,
348                    test_bit(BCH_FS_write_disable_complete, &c->flags));
349
350         if (!writes_disabled)
351                 bch_verbose(c, "finished waiting for writes to stop");
352
353         clear_bit(BCH_FS_write_disable_complete, &c->flags);
354         clear_bit(BCH_FS_going_ro, &c->flags);
355         clear_bit(BCH_FS_rw, &c->flags);
356
357         if (!bch2_journal_error(&c->journal) &&
358             !test_bit(BCH_FS_error, &c->flags) &&
359             !test_bit(BCH_FS_emergency_ro, &c->flags) &&
360             test_bit(BCH_FS_started, &c->flags) &&
361             test_bit(BCH_FS_clean_shutdown, &c->flags) &&
362             !c->opts.norecovery) {
363                 BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
364                 BUG_ON(atomic_read(&c->btree_cache.dirty));
365                 BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty));
366                 BUG_ON(c->btree_write_buffer.inc.keys.nr);
367                 BUG_ON(c->btree_write_buffer.flushing.keys.nr);
368
369                 bch_verbose(c, "marking filesystem clean");
370                 bch2_fs_mark_clean(c);
371         } else {
372                 bch_verbose(c, "done going read-only, filesystem not clean");
373         }
374 }
375
376 static void bch2_fs_read_only_work(struct work_struct *work)
377 {
378         struct bch_fs *c =
379                 container_of(work, struct bch_fs, read_only_work);
380
381         down_write(&c->state_lock);
382         bch2_fs_read_only(c);
383         up_write(&c->state_lock);
384 }
385
386 static void bch2_fs_read_only_async(struct bch_fs *c)
387 {
388         queue_work(system_long_wq, &c->read_only_work);
389 }
390
391 bool bch2_fs_emergency_read_only(struct bch_fs *c)
392 {
393         bool ret = !test_and_set_bit(BCH_FS_emergency_ro, &c->flags);
394
395         bch2_journal_halt(&c->journal);
396         bch2_fs_read_only_async(c);
397
398         wake_up(&bch2_read_only_wait);
399         return ret;
400 }
401
402 static int bch2_fs_read_write_late(struct bch_fs *c)
403 {
404         int ret;
405
406         /*
407          * Data move operations can't run until after check_snapshots has
408          * completed, and bch2_snapshot_is_ancestor() is available.
409          *
410          * Ideally we'd start copygc/rebalance earlier instead of waiting for
411          * all of recovery/fsck to complete:
412          */
413         ret = bch2_copygc_start(c);
414         if (ret) {
415                 bch_err(c, "error starting copygc thread");
416                 return ret;
417         }
418
419         ret = bch2_rebalance_start(c);
420         if (ret) {
421                 bch_err(c, "error starting rebalance thread");
422                 return ret;
423         }
424
425         return 0;
426 }
427
428 static int __bch2_fs_read_write(struct bch_fs *c, bool early)
429 {
430         struct bch_dev *ca;
431         unsigned i;
432         int ret;
433
434         if (test_bit(BCH_FS_initial_gc_unfixed, &c->flags)) {
435                 bch_err(c, "cannot go rw, unfixed btree errors");
436                 return -BCH_ERR_erofs_unfixed_errors;
437         }
438
439         if (test_bit(BCH_FS_rw, &c->flags))
440                 return 0;
441
442         if (c->opts.norecovery)
443                 return -BCH_ERR_erofs_norecovery;
444
445         /*
446          * nochanges is used for fsck -n mode - we have to allow going rw
447          * during recovery for that to work:
448          */
449         if (c->opts.nochanges && (!early || c->opts.read_only))
450                 return -BCH_ERR_erofs_nochanges;
451
452         bch_info(c, "going read-write");
453
454         ret = bch2_sb_members_v2_init(c);
455         if (ret)
456                 goto err;
457
458         ret = bch2_fs_mark_dirty(c);
459         if (ret)
460                 goto err;
461
462         clear_bit(BCH_FS_clean_shutdown, &c->flags);
463
464         /*
465          * First journal write must be a flush write: after a clean shutdown we
466          * don't read the journal, so the first journal write may end up
467          * overwriting whatever was there previously, and there must always be
468          * at least one non-flush write in the journal or recovery will fail:
469          */
470         set_bit(JOURNAL_NEED_FLUSH_WRITE, &c->journal.flags);
471
472         for_each_rw_member(ca, c, i)
473                 bch2_dev_allocator_add(c, ca);
474         bch2_recalc_capacity(c);
475
476         set_bit(BCH_FS_rw, &c->flags);
477         set_bit(BCH_FS_was_rw, &c->flags);
478
479 #ifndef BCH_WRITE_REF_DEBUG
480         percpu_ref_reinit(&c->writes);
481 #else
482         for (i = 0; i < BCH_WRITE_REF_NR; i++) {
483                 BUG_ON(atomic_long_read(&c->writes[i]));
484                 atomic_long_inc(&c->writes[i]);
485         }
486 #endif
487
488         ret = bch2_gc_thread_start(c);
489         if (ret) {
490                 bch_err(c, "error starting gc thread");
491                 return ret;
492         }
493
494         ret = bch2_journal_reclaim_start(&c->journal);
495         if (ret)
496                 goto err;
497
498         if (!early) {
499                 ret = bch2_fs_read_write_late(c);
500                 if (ret)
501                         goto err;
502         }
503
504         bch2_do_discards(c);
505         bch2_do_invalidates(c);
506         bch2_do_stripe_deletes(c);
507         bch2_do_pending_node_rewrites(c);
508         return 0;
509 err:
510         if (test_bit(BCH_FS_rw, &c->flags))
511                 bch2_fs_read_only(c);
512         else
513                 __bch2_fs_read_only(c);
514         return ret;
515 }
516
517 int bch2_fs_read_write(struct bch_fs *c)
518 {
519         return __bch2_fs_read_write(c, false);
520 }
521
522 int bch2_fs_read_write_early(struct bch_fs *c)
523 {
524         lockdep_assert_held(&c->state_lock);
525
526         return __bch2_fs_read_write(c, true);
527 }
528
529 /* Filesystem startup/shutdown: */
530
531 static void __bch2_fs_free(struct bch_fs *c)
532 {
533         unsigned i;
534
535         for (i = 0; i < BCH_TIME_STAT_NR; i++)
536                 bch2_time_stats_exit(&c->times[i]);
537
538         bch2_free_pending_node_rewrites(c);
539         bch2_fs_sb_errors_exit(c);
540         bch2_fs_counters_exit(c);
541         bch2_fs_snapshots_exit(c);
542         bch2_fs_quota_exit(c);
543         bch2_fs_fs_io_direct_exit(c);
544         bch2_fs_fs_io_buffered_exit(c);
545         bch2_fs_fsio_exit(c);
546         bch2_fs_ec_exit(c);
547         bch2_fs_encryption_exit(c);
548         bch2_fs_nocow_locking_exit(c);
549         bch2_fs_io_write_exit(c);
550         bch2_fs_io_read_exit(c);
551         bch2_fs_buckets_waiting_for_journal_exit(c);
552         bch2_fs_btree_interior_update_exit(c);
553         bch2_fs_btree_iter_exit(c);
554         bch2_fs_btree_key_cache_exit(&c->btree_key_cache);
555         bch2_fs_btree_cache_exit(c);
556         bch2_fs_replicas_exit(c);
557         bch2_fs_journal_exit(&c->journal);
558         bch2_io_clock_exit(&c->io_clock[WRITE]);
559         bch2_io_clock_exit(&c->io_clock[READ]);
560         bch2_fs_compress_exit(c);
561         bch2_journal_keys_put_initial(c);
562         BUG_ON(atomic_read(&c->journal_keys.ref));
563         bch2_fs_btree_write_buffer_exit(c);
564         percpu_free_rwsem(&c->mark_lock);
565         free_percpu(c->online_reserved);
566
567         darray_exit(&c->btree_roots_extra);
568         free_percpu(c->pcpu);
569         mempool_exit(&c->large_bkey_pool);
570         mempool_exit(&c->btree_bounce_pool);
571         bioset_exit(&c->btree_bio);
572         mempool_exit(&c->fill_iter);
573 #ifndef BCH_WRITE_REF_DEBUG
574         percpu_ref_exit(&c->writes);
575 #endif
576         kfree(rcu_dereference_protected(c->disk_groups, 1));
577         kfree(c->journal_seq_blacklist_table);
578         kfree(c->unused_inode_hints);
579
580         if (c->write_ref_wq)
581                 destroy_workqueue(c->write_ref_wq);
582         if (c->io_complete_wq)
583                 destroy_workqueue(c->io_complete_wq);
584         if (c->copygc_wq)
585                 destroy_workqueue(c->copygc_wq);
586         if (c->btree_io_complete_wq)
587                 destroy_workqueue(c->btree_io_complete_wq);
588         if (c->btree_update_wq)
589                 destroy_workqueue(c->btree_update_wq);
590
591         bch2_free_super(&c->disk_sb);
592         kvpfree(c, sizeof(*c));
593         module_put(THIS_MODULE);
594 }
595
596 static void bch2_fs_release(struct kobject *kobj)
597 {
598         struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
599
600         __bch2_fs_free(c);
601 }
602
603 void __bch2_fs_stop(struct bch_fs *c)
604 {
605         struct bch_dev *ca;
606         unsigned i;
607
608         bch_verbose(c, "shutting down");
609
610         set_bit(BCH_FS_stopping, &c->flags);
611
612         cancel_work_sync(&c->journal_seq_blacklist_gc_work);
613
614         down_write(&c->state_lock);
615         bch2_fs_read_only(c);
616         up_write(&c->state_lock);
617
618         for_each_member_device(ca, c, i)
619                 if (ca->kobj.state_in_sysfs &&
620                     ca->disk_sb.bdev)
621                         sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
622
623         if (c->kobj.state_in_sysfs)
624                 kobject_del(&c->kobj);
625
626         bch2_fs_debug_exit(c);
627         bch2_fs_chardev_exit(c);
628
629         bch2_ro_ref_put(c);
630         wait_event(c->ro_ref_wait, !refcount_read(&c->ro_ref));
631
632         kobject_put(&c->counters_kobj);
633         kobject_put(&c->time_stats);
634         kobject_put(&c->opts_dir);
635         kobject_put(&c->internal);
636
637         /* btree prefetch might have kicked off reads in the background: */
638         bch2_btree_flush_all_reads(c);
639
640         for_each_member_device(ca, c, i)
641                 cancel_work_sync(&ca->io_error_work);
642
643         cancel_work_sync(&c->read_only_work);
644 }
645
646 void bch2_fs_free(struct bch_fs *c)
647 {
648         unsigned i;
649
650         mutex_lock(&bch_fs_list_lock);
651         list_del(&c->list);
652         mutex_unlock(&bch_fs_list_lock);
653
654         closure_sync(&c->cl);
655         closure_debug_destroy(&c->cl);
656
657         for (i = 0; i < c->sb.nr_devices; i++) {
658                 struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true);
659
660                 if (ca) {
661                         bch2_free_super(&ca->disk_sb);
662                         bch2_dev_free(ca);
663                 }
664         }
665
666         bch_verbose(c, "shutdown complete");
667
668         kobject_put(&c->kobj);
669 }
670
671 void bch2_fs_stop(struct bch_fs *c)
672 {
673         __bch2_fs_stop(c);
674         bch2_fs_free(c);
675 }
676
677 static int bch2_fs_online(struct bch_fs *c)
678 {
679         struct bch_dev *ca;
680         unsigned i;
681         int ret = 0;
682
683         lockdep_assert_held(&bch_fs_list_lock);
684
685         if (__bch2_uuid_to_fs(c->sb.uuid)) {
686                 bch_err(c, "filesystem UUID already open");
687                 return -EINVAL;
688         }
689
690         ret = bch2_fs_chardev_init(c);
691         if (ret) {
692                 bch_err(c, "error creating character device");
693                 return ret;
694         }
695
696         bch2_fs_debug_init(c);
697
698         ret = kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ?:
699             kobject_add(&c->internal, &c->kobj, "internal") ?:
700             kobject_add(&c->opts_dir, &c->kobj, "options") ?:
701 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
702             kobject_add(&c->time_stats, &c->kobj, "time_stats") ?:
703 #endif
704             kobject_add(&c->counters_kobj, &c->kobj, "counters") ?:
705             bch2_opts_create_sysfs_files(&c->opts_dir);
706         if (ret) {
707                 bch_err(c, "error creating sysfs objects");
708                 return ret;
709         }
710
711         down_write(&c->state_lock);
712
713         for_each_member_device(ca, c, i) {
714                 ret = bch2_dev_sysfs_online(c, ca);
715                 if (ret) {
716                         bch_err(c, "error creating sysfs objects");
717                         percpu_ref_put(&ca->ref);
718                         goto err;
719                 }
720         }
721
722         BUG_ON(!list_empty(&c->list));
723         list_add(&c->list, &bch_fs_list);
724 err:
725         up_write(&c->state_lock);
726         return ret;
727 }
728
729 static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
730 {
731         struct bch_fs *c;
732         struct printbuf name = PRINTBUF;
733         unsigned i, iter_size;
734         int ret = 0;
735
736         c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
737         if (!c) {
738                 c = ERR_PTR(-BCH_ERR_ENOMEM_fs_alloc);
739                 goto out;
740         }
741
742         c->output = (void *)(unsigned long) opts.log_output;
743
744         __module_get(THIS_MODULE);
745
746         closure_init(&c->cl, NULL);
747
748         c->kobj.kset = bcachefs_kset;
749         kobject_init(&c->kobj, &bch2_fs_ktype);
750         kobject_init(&c->internal, &bch2_fs_internal_ktype);
751         kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype);
752         kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype);
753         kobject_init(&c->counters_kobj, &bch2_fs_counters_ktype);
754
755         c->minor                = -1;
756         c->disk_sb.fs_sb        = true;
757
758         init_rwsem(&c->state_lock);
759         mutex_init(&c->sb_lock);
760         mutex_init(&c->replicas_gc_lock);
761         mutex_init(&c->btree_root_lock);
762         INIT_WORK(&c->read_only_work, bch2_fs_read_only_work);
763
764         refcount_set(&c->ro_ref, 1);
765         init_waitqueue_head(&c->ro_ref_wait);
766         sema_init(&c->online_fsck_mutex, 1);
767
768         init_rwsem(&c->gc_lock);
769         mutex_init(&c->gc_gens_lock);
770         atomic_set(&c->journal_keys.ref, 1);
771         c->journal_keys.initial_ref_held = true;
772
773         for (i = 0; i < BCH_TIME_STAT_NR; i++)
774                 bch2_time_stats_init(&c->times[i]);
775
776         bch2_fs_copygc_init(c);
777         bch2_fs_btree_key_cache_init_early(&c->btree_key_cache);
778         bch2_fs_btree_iter_init_early(c);
779         bch2_fs_btree_interior_update_init_early(c);
780         bch2_fs_allocator_background_init(c);
781         bch2_fs_allocator_foreground_init(c);
782         bch2_fs_rebalance_init(c);
783         bch2_fs_quota_init(c);
784         bch2_fs_ec_init_early(c);
785         bch2_fs_move_init(c);
786         bch2_fs_sb_errors_init_early(c);
787
788         INIT_LIST_HEAD(&c->list);
789
790         mutex_init(&c->usage_scratch_lock);
791
792         mutex_init(&c->bio_bounce_pages_lock);
793         mutex_init(&c->snapshot_table_lock);
794         init_rwsem(&c->snapshot_create_lock);
795
796         spin_lock_init(&c->btree_write_error_lock);
797
798         INIT_WORK(&c->journal_seq_blacklist_gc_work,
799                   bch2_blacklist_entries_gc);
800
801         INIT_LIST_HEAD(&c->journal_iters);
802
803         INIT_LIST_HEAD(&c->fsck_error_msgs);
804         mutex_init(&c->fsck_error_msgs_lock);
805
806         seqcount_init(&c->gc_pos_lock);
807
808         seqcount_init(&c->usage_lock);
809
810         sema_init(&c->io_in_flight, 128);
811
812         INIT_LIST_HEAD(&c->vfs_inodes_list);
813         mutex_init(&c->vfs_inodes_lock);
814
815         c->copy_gc_enabled              = 1;
816         c->rebalance.enabled            = 1;
817         c->promote_whole_extents        = true;
818
819         c->journal.flush_write_time     = &c->times[BCH_TIME_journal_flush_write];
820         c->journal.noflush_write_time   = &c->times[BCH_TIME_journal_noflush_write];
821         c->journal.flush_seq_time       = &c->times[BCH_TIME_journal_flush_seq];
822
823         bch2_fs_btree_cache_init_early(&c->btree_cache);
824
825         mutex_init(&c->sectors_available_lock);
826
827         ret = percpu_init_rwsem(&c->mark_lock);
828         if (ret)
829                 goto err;
830
831         mutex_lock(&c->sb_lock);
832         ret = bch2_sb_to_fs(c, sb);
833         mutex_unlock(&c->sb_lock);
834
835         if (ret)
836                 goto err;
837
838         pr_uuid(&name, c->sb.user_uuid.b);
839         strscpy(c->name, name.buf, sizeof(c->name));
840         printbuf_exit(&name);
841
842         ret = name.allocation_failure ? -BCH_ERR_ENOMEM_fs_name_alloc : 0;
843         if (ret)
844                 goto err;
845
846         /* Compat: */
847         if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
848             !BCH_SB_JOURNAL_FLUSH_DELAY(sb))
849                 SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000);
850
851         if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
852             !BCH_SB_JOURNAL_RECLAIM_DELAY(sb))
853                 SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 100);
854
855         c->opts = bch2_opts_default;
856         ret = bch2_opts_from_sb(&c->opts, sb);
857         if (ret)
858                 goto err;
859
860         bch2_opts_apply(&c->opts, opts);
861
862         c->btree_key_cache_btrees |= 1U << BTREE_ID_alloc;
863         if (c->opts.inodes_use_key_cache)
864                 c->btree_key_cache_btrees |= 1U << BTREE_ID_inodes;
865         c->btree_key_cache_btrees |= 1U << BTREE_ID_logged_ops;
866
867         c->block_bits           = ilog2(block_sectors(c));
868         c->btree_foreground_merge_threshold = BTREE_FOREGROUND_MERGE_THRESHOLD(c);
869
870         if (bch2_fs_init_fault("fs_alloc")) {
871                 bch_err(c, "fs_alloc fault injected");
872                 ret = -EFAULT;
873                 goto err;
874         }
875
876         iter_size = sizeof(struct sort_iter) +
877                 (btree_blocks(c) + 1) * 2 *
878                 sizeof(struct sort_iter_set);
879
880         c->inode_shard_bits = ilog2(roundup_pow_of_two(num_possible_cpus()));
881
882         if (!(c->btree_update_wq = alloc_workqueue("bcachefs",
883                                 WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512)) ||
884             !(c->btree_io_complete_wq = alloc_workqueue("bcachefs_btree_io",
885                                 WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
886             !(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
887                                 WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
888             !(c->io_complete_wq = alloc_workqueue("bcachefs_io",
889                                 WQ_FREEZABLE|WQ_HIGHPRI|WQ_MEM_RECLAIM, 1)) ||
890             !(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref",
891                                 WQ_FREEZABLE, 0)) ||
892 #ifndef BCH_WRITE_REF_DEBUG
893             percpu_ref_init(&c->writes, bch2_writes_disabled,
894                             PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
895 #endif
896             mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
897             bioset_init(&c->btree_bio, 1,
898                         max(offsetof(struct btree_read_bio, bio),
899                             offsetof(struct btree_write_bio, wbio.bio)),
900                         BIOSET_NEED_BVECS) ||
901             !(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
902             !(c->online_reserved = alloc_percpu(u64)) ||
903             mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
904                                         btree_bytes(c)) ||
905             mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
906             !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
907                                               sizeof(u64), GFP_KERNEL))) {
908                 ret = -BCH_ERR_ENOMEM_fs_other_alloc;
909                 goto err;
910         }
911
912         ret = bch2_fs_counters_init(c) ?:
913             bch2_fs_sb_errors_init(c) ?:
914             bch2_io_clock_init(&c->io_clock[READ]) ?:
915             bch2_io_clock_init(&c->io_clock[WRITE]) ?:
916             bch2_fs_journal_init(&c->journal) ?:
917             bch2_fs_replicas_init(c) ?:
918             bch2_fs_btree_cache_init(c) ?:
919             bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?:
920             bch2_fs_btree_iter_init(c) ?:
921             bch2_fs_btree_interior_update_init(c) ?:
922             bch2_fs_buckets_waiting_for_journal_init(c) ?:
923             bch2_fs_btree_write_buffer_init(c) ?:
924             bch2_fs_subvolumes_init(c) ?:
925             bch2_fs_io_read_init(c) ?:
926             bch2_fs_io_write_init(c) ?:
927             bch2_fs_nocow_locking_init(c) ?:
928             bch2_fs_encryption_init(c) ?:
929             bch2_fs_compress_init(c) ?:
930             bch2_fs_ec_init(c) ?:
931             bch2_fs_fsio_init(c) ?:
932             bch2_fs_fs_io_buffered_init(c) ?:
933             bch2_fs_fs_io_direct_init(c);
934         if (ret)
935                 goto err;
936
937         for (i = 0; i < c->sb.nr_devices; i++)
938                 if (bch2_dev_exists(c->disk_sb.sb, i) &&
939                     bch2_dev_alloc(c, i)) {
940                         ret = -EEXIST;
941                         goto err;
942                 }
943
944         bch2_journal_entry_res_resize(&c->journal,
945                         &c->btree_root_journal_res,
946                         BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_BTREE_PTR_U64s_MAX));
947         bch2_dev_usage_journal_reserve(c);
948         bch2_journal_entry_res_resize(&c->journal,
949                         &c->clock_journal_res,
950                         (sizeof(struct jset_entry_clock) / sizeof(u64)) * 2);
951
952         mutex_lock(&bch_fs_list_lock);
953         ret = bch2_fs_online(c);
954         mutex_unlock(&bch_fs_list_lock);
955
956         if (ret)
957                 goto err;
958 out:
959         return c;
960 err:
961         bch2_fs_free(c);
962         c = ERR_PTR(ret);
963         goto out;
964 }
965
966 noinline_for_stack
967 static void print_mount_opts(struct bch_fs *c)
968 {
969         enum bch_opt_id i;
970         struct printbuf p = PRINTBUF;
971         bool first = true;
972
973         prt_str(&p, "mounting version ");
974         bch2_version_to_text(&p, c->sb.version);
975
976         if (c->opts.read_only) {
977                 prt_str(&p, " opts=");
978                 first = false;
979                 prt_printf(&p, "ro");
980         }
981
982         for (i = 0; i < bch2_opts_nr; i++) {
983                 const struct bch_option *opt = &bch2_opt_table[i];
984                 u64 v = bch2_opt_get_by_id(&c->opts, i);
985
986                 if (!(opt->flags & OPT_MOUNT))
987                         continue;
988
989                 if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
990                         continue;
991
992                 prt_str(&p, first ? " opts=" : ",");
993                 first = false;
994                 bch2_opt_to_text(&p, c, c->disk_sb.sb, opt, v, OPT_SHOW_MOUNT_STYLE);
995         }
996
997         bch_info(c, "%s", p.buf);
998         printbuf_exit(&p);
999 }
1000
1001 int bch2_fs_start(struct bch_fs *c)
1002 {
1003         struct bch_dev *ca;
1004         time64_t now = ktime_get_real_seconds();
1005         unsigned i;
1006         int ret;
1007
1008         print_mount_opts(c);
1009
1010         down_write(&c->state_lock);
1011
1012         BUG_ON(test_bit(BCH_FS_started, &c->flags));
1013
1014         mutex_lock(&c->sb_lock);
1015
1016         ret = bch2_sb_members_v2_init(c);
1017         if (ret) {
1018                 mutex_unlock(&c->sb_lock);
1019                 goto err;
1020         }
1021
1022         for_each_online_member(ca, c, i)
1023                 bch2_members_v2_get_mut(c->disk_sb.sb, i)->last_mount = cpu_to_le64(now);
1024
1025         mutex_unlock(&c->sb_lock);
1026
1027         for_each_rw_member(ca, c, i)
1028                 bch2_dev_allocator_add(c, ca);
1029         bch2_recalc_capacity(c);
1030
1031         ret = BCH_SB_INITIALIZED(c->disk_sb.sb)
1032                 ? bch2_fs_recovery(c)
1033                 : bch2_fs_initialize(c);
1034         if (ret)
1035                 goto err;
1036
1037         ret = bch2_opts_check_may_set(c);
1038         if (ret)
1039                 goto err;
1040
1041         if (bch2_fs_init_fault("fs_start")) {
1042                 bch_err(c, "fs_start fault injected");
1043                 ret = -EINVAL;
1044                 goto err;
1045         }
1046
1047         set_bit(BCH_FS_started, &c->flags);
1048
1049         if (c->opts.read_only || c->opts.nochanges) {
1050                 bch2_fs_read_only(c);
1051         } else {
1052                 ret = !test_bit(BCH_FS_rw, &c->flags)
1053                         ? bch2_fs_read_write(c)
1054                         : bch2_fs_read_write_late(c);
1055                 if (ret)
1056                         goto err;
1057         }
1058
1059         ret = 0;
1060 out:
1061         up_write(&c->state_lock);
1062         return ret;
1063 err:
1064         bch_err_msg(c, ret, "starting filesystem");
1065         goto out;
1066 }
1067
1068 static int bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
1069 {
1070         struct bch_member m = bch2_sb_member_get(sb, sb->dev_idx);
1071
1072         if (le16_to_cpu(sb->block_size) != block_sectors(c))
1073                 return -BCH_ERR_mismatched_block_size;
1074
1075         if (le16_to_cpu(m.bucket_size) <
1076             BCH_SB_BTREE_NODE_SIZE(c->disk_sb.sb))
1077                 return -BCH_ERR_bucket_size_too_small;
1078
1079         return 0;
1080 }
1081
1082 static int bch2_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb)
1083 {
1084         struct bch_sb *newest =
1085                 le64_to_cpu(fs->seq) > le64_to_cpu(sb->seq) ? fs : sb;
1086
1087         if (!uuid_equal(&fs->uuid, &sb->uuid))
1088                 return -BCH_ERR_device_not_a_member_of_filesystem;
1089
1090         if (!bch2_dev_exists(newest, sb->dev_idx))
1091                 return -BCH_ERR_device_has_been_removed;
1092
1093         if (fs->block_size != sb->block_size)
1094                 return -BCH_ERR_mismatched_block_size;
1095
1096         return 0;
1097 }
1098
1099 /* Device startup/shutdown: */
1100
1101 static void bch2_dev_release(struct kobject *kobj)
1102 {
1103         struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
1104
1105         kfree(ca);
1106 }
1107
1108 static void bch2_dev_free(struct bch_dev *ca)
1109 {
1110         cancel_work_sync(&ca->io_error_work);
1111
1112         if (ca->kobj.state_in_sysfs &&
1113             ca->disk_sb.bdev)
1114                 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
1115
1116         if (ca->kobj.state_in_sysfs)
1117                 kobject_del(&ca->kobj);
1118
1119         bch2_free_super(&ca->disk_sb);
1120         bch2_dev_journal_exit(ca);
1121
1122         free_percpu(ca->io_done);
1123         bioset_exit(&ca->replica_set);
1124         bch2_dev_buckets_free(ca);
1125         free_page((unsigned long) ca->sb_read_scratch);
1126
1127         bch2_time_stats_exit(&ca->io_latency[WRITE]);
1128         bch2_time_stats_exit(&ca->io_latency[READ]);
1129
1130         percpu_ref_exit(&ca->io_ref);
1131         percpu_ref_exit(&ca->ref);
1132         kobject_put(&ca->kobj);
1133 }
1134
1135 static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca)
1136 {
1137
1138         lockdep_assert_held(&c->state_lock);
1139
1140         if (percpu_ref_is_zero(&ca->io_ref))
1141                 return;
1142
1143         __bch2_dev_read_only(c, ca);
1144
1145         reinit_completion(&ca->io_ref_completion);
1146         percpu_ref_kill(&ca->io_ref);
1147         wait_for_completion(&ca->io_ref_completion);
1148
1149         if (ca->kobj.state_in_sysfs) {
1150                 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
1151                 sysfs_remove_link(&ca->kobj, "block");
1152         }
1153
1154         bch2_free_super(&ca->disk_sb);
1155         bch2_dev_journal_exit(ca);
1156 }
1157
1158 static void bch2_dev_ref_complete(struct percpu_ref *ref)
1159 {
1160         struct bch_dev *ca = container_of(ref, struct bch_dev, ref);
1161
1162         complete(&ca->ref_completion);
1163 }
1164
1165 static void bch2_dev_io_ref_complete(struct percpu_ref *ref)
1166 {
1167         struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref);
1168
1169         complete(&ca->io_ref_completion);
1170 }
1171
1172 static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca)
1173 {
1174         int ret;
1175
1176         if (!c->kobj.state_in_sysfs)
1177                 return 0;
1178
1179         if (!ca->kobj.state_in_sysfs) {
1180                 ret = kobject_add(&ca->kobj, &c->kobj,
1181                                   "dev-%u", ca->dev_idx);
1182                 if (ret)
1183                         return ret;
1184         }
1185
1186         if (ca->disk_sb.bdev) {
1187                 struct kobject *block = bdev_kobj(ca->disk_sb.bdev);
1188
1189                 ret = sysfs_create_link(block, &ca->kobj, "bcachefs");
1190                 if (ret)
1191                         return ret;
1192
1193                 ret = sysfs_create_link(&ca->kobj, block, "block");
1194                 if (ret)
1195                         return ret;
1196         }
1197
1198         return 0;
1199 }
1200
1201 static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
1202                                         struct bch_member *member)
1203 {
1204         struct bch_dev *ca;
1205         unsigned i;
1206
1207         ca = kzalloc(sizeof(*ca), GFP_KERNEL);
1208         if (!ca)
1209                 return NULL;
1210
1211         kobject_init(&ca->kobj, &bch2_dev_ktype);
1212         init_completion(&ca->ref_completion);
1213         init_completion(&ca->io_ref_completion);
1214
1215         init_rwsem(&ca->bucket_lock);
1216
1217         INIT_WORK(&ca->io_error_work, bch2_io_error_work);
1218
1219         bch2_time_stats_init(&ca->io_latency[READ]);
1220         bch2_time_stats_init(&ca->io_latency[WRITE]);
1221
1222         ca->mi = bch2_mi_to_cpu(member);
1223
1224         for (i = 0; i < ARRAY_SIZE(member->errors); i++)
1225                 atomic64_set(&ca->errors[i], le64_to_cpu(member->errors[i]));
1226
1227         ca->uuid = member->uuid;
1228
1229         ca->nr_btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
1230                              ca->mi.bucket_size / btree_sectors(c));
1231
1232         if (percpu_ref_init(&ca->ref, bch2_dev_ref_complete,
1233                             0, GFP_KERNEL) ||
1234             percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
1235                             PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
1236             !(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) ||
1237             bch2_dev_buckets_alloc(c, ca) ||
1238             bioset_init(&ca->replica_set, 4,
1239                         offsetof(struct bch_write_bio, bio), 0) ||
1240             !(ca->io_done       = alloc_percpu(*ca->io_done)))
1241                 goto err;
1242
1243         return ca;
1244 err:
1245         bch2_dev_free(ca);
1246         return NULL;
1247 }
1248
1249 static void bch2_dev_attach(struct bch_fs *c, struct bch_dev *ca,
1250                             unsigned dev_idx)
1251 {
1252         ca->dev_idx = dev_idx;
1253         __set_bit(ca->dev_idx, ca->self.d);
1254         scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx);
1255
1256         ca->fs = c;
1257         rcu_assign_pointer(c->devs[ca->dev_idx], ca);
1258
1259         if (bch2_dev_sysfs_online(c, ca))
1260                 pr_warn("error creating sysfs objects");
1261 }
1262
1263 static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx)
1264 {
1265         struct bch_member member = bch2_sb_member_get(c->disk_sb.sb, dev_idx);
1266         struct bch_dev *ca = NULL;
1267         int ret = 0;
1268
1269         if (bch2_fs_init_fault("dev_alloc"))
1270                 goto err;
1271
1272         ca = __bch2_dev_alloc(c, &member);
1273         if (!ca)
1274                 goto err;
1275
1276         ca->fs = c;
1277
1278         bch2_dev_attach(c, ca, dev_idx);
1279         return ret;
1280 err:
1281         if (ca)
1282                 bch2_dev_free(ca);
1283         return -BCH_ERR_ENOMEM_dev_alloc;
1284 }
1285
1286 static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
1287 {
1288         unsigned ret;
1289
1290         if (bch2_dev_is_online(ca)) {
1291                 bch_err(ca, "already have device online in slot %u",
1292                         sb->sb->dev_idx);
1293                 return -BCH_ERR_device_already_online;
1294         }
1295
1296         if (get_capacity(sb->bdev->bd_disk) <
1297             ca->mi.bucket_size * ca->mi.nbuckets) {
1298                 bch_err(ca, "cannot online: device too small");
1299                 return -BCH_ERR_device_size_too_small;
1300         }
1301
1302         BUG_ON(!percpu_ref_is_zero(&ca->io_ref));
1303
1304         ret = bch2_dev_journal_init(ca, sb->sb);
1305         if (ret)
1306                 return ret;
1307
1308         /* Commit: */
1309         ca->disk_sb = *sb;
1310         memset(sb, 0, sizeof(*sb));
1311
1312         ca->dev = ca->disk_sb.bdev->bd_dev;
1313
1314         percpu_ref_reinit(&ca->io_ref);
1315
1316         return 0;
1317 }
1318
1319 static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
1320 {
1321         struct bch_dev *ca;
1322         int ret;
1323
1324         lockdep_assert_held(&c->state_lock);
1325
1326         if (le64_to_cpu(sb->sb->seq) >
1327             le64_to_cpu(c->disk_sb.sb->seq))
1328                 bch2_sb_to_fs(c, sb->sb);
1329
1330         BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices ||
1331                !c->devs[sb->sb->dev_idx]);
1332
1333         ca = bch_dev_locked(c, sb->sb->dev_idx);
1334
1335         ret = __bch2_dev_attach_bdev(ca, sb);
1336         if (ret)
1337                 return ret;
1338
1339         bch2_dev_sysfs_online(c, ca);
1340
1341         if (c->sb.nr_devices == 1)
1342                 snprintf(c->name, sizeof(c->name), "%pg", ca->disk_sb.bdev);
1343         snprintf(ca->name, sizeof(ca->name), "%pg", ca->disk_sb.bdev);
1344
1345         rebalance_wakeup(c);
1346         return 0;
1347 }
1348
1349 /* Device management: */
1350
1351 /*
1352  * Note: this function is also used by the error paths - when a particular
1353  * device sees an error, we call it to determine whether we can just set the
1354  * device RO, or - if this function returns false - we'll set the whole
1355  * filesystem RO:
1356  *
1357  * XXX: maybe we should be more explicit about whether we're changing state
1358  * because we got an error or what have you?
1359  */
1360 bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
1361                             enum bch_member_state new_state, int flags)
1362 {
1363         struct bch_devs_mask new_online_devs;
1364         struct bch_dev *ca2;
1365         int i, nr_rw = 0, required;
1366
1367         lockdep_assert_held(&c->state_lock);
1368
1369         switch (new_state) {
1370         case BCH_MEMBER_STATE_rw:
1371                 return true;
1372         case BCH_MEMBER_STATE_ro:
1373                 if (ca->mi.state != BCH_MEMBER_STATE_rw)
1374                         return true;
1375
1376                 /* do we have enough devices to write to?  */
1377                 for_each_member_device(ca2, c, i)
1378                         if (ca2 != ca)
1379                                 nr_rw += ca2->mi.state == BCH_MEMBER_STATE_rw;
1380
1381                 required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED)
1382                                ? c->opts.metadata_replicas
1383                                : c->opts.metadata_replicas_required,
1384                                !(flags & BCH_FORCE_IF_DATA_DEGRADED)
1385                                ? c->opts.data_replicas
1386                                : c->opts.data_replicas_required);
1387
1388                 return nr_rw >= required;
1389         case BCH_MEMBER_STATE_failed:
1390         case BCH_MEMBER_STATE_spare:
1391                 if (ca->mi.state != BCH_MEMBER_STATE_rw &&
1392                     ca->mi.state != BCH_MEMBER_STATE_ro)
1393                         return true;
1394
1395                 /* do we have enough devices to read from?  */
1396                 new_online_devs = bch2_online_devs(c);
1397                 __clear_bit(ca->dev_idx, new_online_devs.d);
1398
1399                 return bch2_have_enough_devs(c, new_online_devs, flags, false);
1400         default:
1401                 BUG();
1402         }
1403 }
1404
1405 static bool bch2_fs_may_start(struct bch_fs *c)
1406 {
1407         struct bch_dev *ca;
1408         unsigned i, flags = 0;
1409
1410         if (c->opts.very_degraded)
1411                 flags |= BCH_FORCE_IF_DEGRADED|BCH_FORCE_IF_LOST;
1412
1413         if (c->opts.degraded)
1414                 flags |= BCH_FORCE_IF_DEGRADED;
1415
1416         if (!c->opts.degraded &&
1417             !c->opts.very_degraded) {
1418                 mutex_lock(&c->sb_lock);
1419
1420                 for (i = 0; i < c->disk_sb.sb->nr_devices; i++) {
1421                         if (!bch2_dev_exists(c->disk_sb.sb, i))
1422                                 continue;
1423
1424                         ca = bch_dev_locked(c, i);
1425
1426                         if (!bch2_dev_is_online(ca) &&
1427                             (ca->mi.state == BCH_MEMBER_STATE_rw ||
1428                              ca->mi.state == BCH_MEMBER_STATE_ro)) {
1429                                 mutex_unlock(&c->sb_lock);
1430                                 return false;
1431                         }
1432                 }
1433                 mutex_unlock(&c->sb_lock);
1434         }
1435
1436         return bch2_have_enough_devs(c, bch2_online_devs(c), flags, true);
1437 }
1438
1439 static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
1440 {
1441         /*
1442          * The allocator thread itself allocates btree nodes, so stop it first:
1443          */
1444         bch2_dev_allocator_remove(c, ca);
1445         bch2_dev_journal_stop(&c->journal, ca);
1446 }
1447
1448 static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
1449 {
1450         lockdep_assert_held(&c->state_lock);
1451
1452         BUG_ON(ca->mi.state != BCH_MEMBER_STATE_rw);
1453
1454         bch2_dev_allocator_add(c, ca);
1455         bch2_recalc_capacity(c);
1456 }
1457
1458 int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1459                          enum bch_member_state new_state, int flags)
1460 {
1461         struct bch_member *m;
1462         int ret = 0;
1463
1464         if (ca->mi.state == new_state)
1465                 return 0;
1466
1467         if (!bch2_dev_state_allowed(c, ca, new_state, flags))
1468                 return -BCH_ERR_device_state_not_allowed;
1469
1470         if (new_state != BCH_MEMBER_STATE_rw)
1471                 __bch2_dev_read_only(c, ca);
1472
1473         bch_notice(ca, "%s", bch2_member_states[new_state]);
1474
1475         mutex_lock(&c->sb_lock);
1476         m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
1477         SET_BCH_MEMBER_STATE(m, new_state);
1478         bch2_write_super(c);
1479         mutex_unlock(&c->sb_lock);
1480
1481         if (new_state == BCH_MEMBER_STATE_rw)
1482                 __bch2_dev_read_write(c, ca);
1483
1484         rebalance_wakeup(c);
1485
1486         return ret;
1487 }
1488
1489 int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1490                        enum bch_member_state new_state, int flags)
1491 {
1492         int ret;
1493
1494         down_write(&c->state_lock);
1495         ret = __bch2_dev_set_state(c, ca, new_state, flags);
1496         up_write(&c->state_lock);
1497
1498         return ret;
1499 }
1500
1501 /* Device add/removal: */
1502
1503 static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
1504 {
1505         struct bpos start       = POS(ca->dev_idx, 0);
1506         struct bpos end         = POS(ca->dev_idx, U64_MAX);
1507         int ret;
1508
1509         /*
1510          * We clear the LRU and need_discard btrees first so that we don't race
1511          * with bch2_do_invalidates() and bch2_do_discards()
1512          */
1513         ret =   bch2_btree_delete_range(c, BTREE_ID_lru, start, end,
1514                                         BTREE_TRIGGER_NORUN, NULL) ?:
1515                 bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end,
1516                                         BTREE_TRIGGER_NORUN, NULL) ?:
1517                 bch2_btree_delete_range(c, BTREE_ID_freespace, start, end,
1518                                         BTREE_TRIGGER_NORUN, NULL) ?:
1519                 bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
1520                                         BTREE_TRIGGER_NORUN, NULL) ?:
1521                 bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
1522                                         BTREE_TRIGGER_NORUN, NULL) ?:
1523                 bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
1524                                         BTREE_TRIGGER_NORUN, NULL);
1525         if (ret)
1526                 bch_err_msg(c, ret, "removing dev alloc info");
1527
1528         return ret;
1529 }
1530
1531 int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
1532 {
1533         struct bch_member *m;
1534         unsigned dev_idx = ca->dev_idx, data;
1535         int ret;
1536
1537         down_write(&c->state_lock);
1538
1539         /*
1540          * We consume a reference to ca->ref, regardless of whether we succeed
1541          * or fail:
1542          */
1543         percpu_ref_put(&ca->ref);
1544
1545         if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
1546                 bch_err(ca, "Cannot remove without losing data");
1547                 ret = -BCH_ERR_device_state_not_allowed;
1548                 goto err;
1549         }
1550
1551         __bch2_dev_read_only(c, ca);
1552
1553         ret = bch2_dev_data_drop(c, ca->dev_idx, flags);
1554         if (ret) {
1555                 bch_err_msg(ca, ret, "dropping data");
1556                 goto err;
1557         }
1558
1559         ret = bch2_dev_remove_alloc(c, ca);
1560         if (ret) {
1561                 bch_err_msg(ca, ret, "deleting alloc info");
1562                 goto err;
1563         }
1564
1565         ret = bch2_journal_flush_device_pins(&c->journal, ca->dev_idx);
1566         if (ret) {
1567                 bch_err_msg(ca, ret, "flushing journal");
1568                 goto err;
1569         }
1570
1571         ret = bch2_journal_flush(&c->journal);
1572         if (ret) {
1573                 bch_err(ca, "journal error");
1574                 goto err;
1575         }
1576
1577         ret = bch2_replicas_gc2(c);
1578         if (ret) {
1579                 bch_err_msg(ca, ret, "in replicas_gc2()");
1580                 goto err;
1581         }
1582
1583         data = bch2_dev_has_data(c, ca);
1584         if (data) {
1585                 struct printbuf data_has = PRINTBUF;
1586
1587                 prt_bitflags(&data_has, bch2_data_types, data);
1588                 bch_err(ca, "Remove failed, still has data (%s)", data_has.buf);
1589                 printbuf_exit(&data_has);
1590                 ret = -EBUSY;
1591                 goto err;
1592         }
1593
1594         __bch2_dev_offline(c, ca);
1595
1596         mutex_lock(&c->sb_lock);
1597         rcu_assign_pointer(c->devs[ca->dev_idx], NULL);
1598         mutex_unlock(&c->sb_lock);
1599
1600         percpu_ref_kill(&ca->ref);
1601         wait_for_completion(&ca->ref_completion);
1602
1603         bch2_dev_free(ca);
1604
1605         /*
1606          * At this point the device object has been removed in-core, but the
1607          * on-disk journal might still refer to the device index via sb device
1608          * usage entries. Recovery fails if it sees usage information for an
1609          * invalid device. Flush journal pins to push the back of the journal
1610          * past now invalid device index references before we update the
1611          * superblock, but after the device object has been removed so any
1612          * further journal writes elide usage info for the device.
1613          */
1614         bch2_journal_flush_all_pins(&c->journal);
1615
1616         /*
1617          * Free this device's slot in the bch_member array - all pointers to
1618          * this device must be gone:
1619          */
1620         mutex_lock(&c->sb_lock);
1621         m = bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx);
1622         memset(&m->uuid, 0, sizeof(m->uuid));
1623
1624         bch2_write_super(c);
1625
1626         mutex_unlock(&c->sb_lock);
1627         up_write(&c->state_lock);
1628
1629         bch2_dev_usage_journal_reserve(c);
1630         return 0;
1631 err:
1632         if (ca->mi.state == BCH_MEMBER_STATE_rw &&
1633             !percpu_ref_is_zero(&ca->io_ref))
1634                 __bch2_dev_read_write(c, ca);
1635         up_write(&c->state_lock);
1636         return ret;
1637 }
1638
1639 /* Add new device to running filesystem: */
1640 int bch2_dev_add(struct bch_fs *c, const char *path)
1641 {
1642         struct bch_opts opts = bch2_opts_empty();
1643         struct bch_sb_handle sb;
1644         struct bch_dev *ca = NULL;
1645         struct bch_sb_field_members_v2 *mi;
1646         struct bch_member dev_mi;
1647         unsigned dev_idx, nr_devices, u64s;
1648         struct printbuf errbuf = PRINTBUF;
1649         struct printbuf label = PRINTBUF;
1650         int ret;
1651
1652         ret = bch2_read_super(path, &opts, &sb);
1653         if (ret) {
1654                 bch_err_msg(c, ret, "reading super");
1655                 goto err;
1656         }
1657
1658         dev_mi = bch2_sb_member_get(sb.sb, sb.sb->dev_idx);
1659
1660         if (BCH_MEMBER_GROUP(&dev_mi)) {
1661                 bch2_disk_path_to_text_sb(&label, sb.sb, BCH_MEMBER_GROUP(&dev_mi) - 1);
1662                 if (label.allocation_failure) {
1663                         ret = -ENOMEM;
1664                         goto err;
1665                 }
1666         }
1667
1668         ret = bch2_dev_may_add(sb.sb, c);
1669         if (ret) {
1670                 bch_err_fn(c, ret);
1671                 goto err;
1672         }
1673
1674         ca = __bch2_dev_alloc(c, &dev_mi);
1675         if (!ca) {
1676                 ret = -ENOMEM;
1677                 goto err;
1678         }
1679
1680         bch2_dev_usage_init(ca);
1681
1682         ret = __bch2_dev_attach_bdev(ca, &sb);
1683         if (ret)
1684                 goto err;
1685
1686         ret = bch2_dev_journal_alloc(ca);
1687         if (ret) {
1688                 bch_err_msg(c, ret, "allocating journal");
1689                 goto err;
1690         }
1691
1692         down_write(&c->state_lock);
1693         mutex_lock(&c->sb_lock);
1694
1695         ret = bch2_sb_from_fs(c, ca);
1696         if (ret) {
1697                 bch_err_msg(c, ret, "setting up new superblock");
1698                 goto err_unlock;
1699         }
1700
1701         if (dynamic_fault("bcachefs:add:no_slot"))
1702                 goto no_slot;
1703
1704         for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++)
1705                 if (!bch2_dev_exists(c->disk_sb.sb, dev_idx))
1706                         goto have_slot;
1707 no_slot:
1708         ret = -BCH_ERR_ENOSPC_sb_members;
1709         bch_err_msg(c, ret, "setting up new superblock");
1710         goto err_unlock;
1711
1712 have_slot:
1713         nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices);
1714
1715         mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
1716         u64s = DIV_ROUND_UP(sizeof(struct bch_sb_field_members_v2) +
1717                             le16_to_cpu(mi->member_bytes) * nr_devices, sizeof(u64));
1718
1719         mi = bch2_sb_field_resize(&c->disk_sb, members_v2, u64s);
1720         if (!mi) {
1721                 ret = -BCH_ERR_ENOSPC_sb_members;
1722                 bch_err_msg(c, ret, "setting up new superblock");
1723                 goto err_unlock;
1724         }
1725         struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx);
1726
1727         /* success: */
1728
1729         *m = dev_mi;
1730         m->last_mount = cpu_to_le64(ktime_get_real_seconds());
1731         c->disk_sb.sb->nr_devices       = nr_devices;
1732
1733         ca->disk_sb.sb->dev_idx = dev_idx;
1734         bch2_dev_attach(c, ca, dev_idx);
1735
1736         if (BCH_MEMBER_GROUP(&dev_mi)) {
1737                 ret = __bch2_dev_group_set(c, ca, label.buf);
1738                 if (ret) {
1739                         bch_err_msg(c, ret, "creating new label");
1740                         goto err_unlock;
1741                 }
1742         }
1743
1744         bch2_write_super(c);
1745         mutex_unlock(&c->sb_lock);
1746
1747         bch2_dev_usage_journal_reserve(c);
1748
1749         ret = bch2_trans_mark_dev_sb(c, ca);
1750         if (ret) {
1751                 bch_err_msg(ca, ret, "marking new superblock");
1752                 goto err_late;
1753         }
1754
1755         ret = bch2_fs_freespace_init(c);
1756         if (ret) {
1757                 bch_err_msg(ca, ret, "initializing free space");
1758                 goto err_late;
1759         }
1760
1761         ca->new_fs_bucket_idx = 0;
1762
1763         if (ca->mi.state == BCH_MEMBER_STATE_rw)
1764                 __bch2_dev_read_write(c, ca);
1765
1766         up_write(&c->state_lock);
1767         return 0;
1768
1769 err_unlock:
1770         mutex_unlock(&c->sb_lock);
1771         up_write(&c->state_lock);
1772 err:
1773         if (ca)
1774                 bch2_dev_free(ca);
1775         bch2_free_super(&sb);
1776         printbuf_exit(&label);
1777         printbuf_exit(&errbuf);
1778         return ret;
1779 err_late:
1780         up_write(&c->state_lock);
1781         ca = NULL;
1782         goto err;
1783 }
1784
1785 /* Hot add existing device to running filesystem: */
1786 int bch2_dev_online(struct bch_fs *c, const char *path)
1787 {
1788         struct bch_opts opts = bch2_opts_empty();
1789         struct bch_sb_handle sb = { NULL };
1790         struct bch_dev *ca;
1791         unsigned dev_idx;
1792         int ret;
1793
1794         down_write(&c->state_lock);
1795
1796         ret = bch2_read_super(path, &opts, &sb);
1797         if (ret) {
1798                 up_write(&c->state_lock);
1799                 return ret;
1800         }
1801
1802         dev_idx = sb.sb->dev_idx;
1803
1804         ret = bch2_dev_in_fs(c->disk_sb.sb, sb.sb);
1805         if (ret) {
1806                 bch_err_msg(c, ret, "bringing %s online", path);
1807                 goto err;
1808         }
1809
1810         ret = bch2_dev_attach_bdev(c, &sb);
1811         if (ret)
1812                 goto err;
1813
1814         ca = bch_dev_locked(c, dev_idx);
1815
1816         ret = bch2_trans_mark_dev_sb(c, ca);
1817         if (ret) {
1818                 bch_err_msg(c, ret, "bringing %s online: error from bch2_trans_mark_dev_sb", path);
1819                 goto err;
1820         }
1821
1822         if (ca->mi.state == BCH_MEMBER_STATE_rw)
1823                 __bch2_dev_read_write(c, ca);
1824
1825         if (!ca->mi.freespace_initialized) {
1826                 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
1827                 bch_err_msg(ca, ret, "initializing free space");
1828                 if (ret)
1829                         goto err;
1830         }
1831
1832         if (!ca->journal.nr) {
1833                 ret = bch2_dev_journal_alloc(ca);
1834                 bch_err_msg(ca, ret, "allocating journal");
1835                 if (ret)
1836                         goto err;
1837         }
1838
1839         mutex_lock(&c->sb_lock);
1840         bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount =
1841                 cpu_to_le64(ktime_get_real_seconds());
1842         bch2_write_super(c);
1843         mutex_unlock(&c->sb_lock);
1844
1845         up_write(&c->state_lock);
1846         return 0;
1847 err:
1848         up_write(&c->state_lock);
1849         bch2_free_super(&sb);
1850         return ret;
1851 }
1852
1853 int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
1854 {
1855         down_write(&c->state_lock);
1856
1857         if (!bch2_dev_is_online(ca)) {
1858                 bch_err(ca, "Already offline");
1859                 up_write(&c->state_lock);
1860                 return 0;
1861         }
1862
1863         if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
1864                 bch_err(ca, "Cannot offline required disk");
1865                 up_write(&c->state_lock);
1866                 return -BCH_ERR_device_state_not_allowed;
1867         }
1868
1869         __bch2_dev_offline(c, ca);
1870
1871         up_write(&c->state_lock);
1872         return 0;
1873 }
1874
1875 int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1876 {
1877         struct bch_member *m;
1878         u64 old_nbuckets;
1879         int ret = 0;
1880
1881         down_write(&c->state_lock);
1882         old_nbuckets = ca->mi.nbuckets;
1883
1884         if (nbuckets < ca->mi.nbuckets) {
1885                 bch_err(ca, "Cannot shrink yet");
1886                 ret = -EINVAL;
1887                 goto err;
1888         }
1889
1890         if (bch2_dev_is_online(ca) &&
1891             get_capacity(ca->disk_sb.bdev->bd_disk) <
1892             ca->mi.bucket_size * nbuckets) {
1893                 bch_err(ca, "New size larger than device");
1894                 ret = -BCH_ERR_device_size_too_small;
1895                 goto err;
1896         }
1897
1898         ret = bch2_dev_buckets_resize(c, ca, nbuckets);
1899         if (ret) {
1900                 bch_err_msg(ca, ret, "resizing buckets");
1901                 goto err;
1902         }
1903
1904         ret = bch2_trans_mark_dev_sb(c, ca);
1905         if (ret)
1906                 goto err;
1907
1908         mutex_lock(&c->sb_lock);
1909         m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
1910         m->nbuckets = cpu_to_le64(nbuckets);
1911
1912         bch2_write_super(c);
1913         mutex_unlock(&c->sb_lock);
1914
1915         if (ca->mi.freespace_initialized) {
1916                 ret = bch2_dev_freespace_init(c, ca, old_nbuckets, nbuckets);
1917                 if (ret)
1918                         goto err;
1919
1920                 /*
1921                  * XXX: this is all wrong transactionally - we'll be able to do
1922                  * this correctly after the disk space accounting rewrite
1923                  */
1924                 ca->usage_base->d[BCH_DATA_free].buckets += nbuckets - old_nbuckets;
1925         }
1926
1927         bch2_recalc_capacity(c);
1928 err:
1929         up_write(&c->state_lock);
1930         return ret;
1931 }
1932
1933 /* return with ref on ca->ref: */
1934 struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name)
1935 {
1936         struct bch_dev *ca;
1937         unsigned i;
1938
1939         rcu_read_lock();
1940         for_each_member_device_rcu(ca, c, i, NULL)
1941                 if (!strcmp(name, ca->name))
1942                         goto found;
1943         ca = ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
1944 found:
1945         rcu_read_unlock();
1946
1947         return ca;
1948 }
1949
1950 /* Filesystem open: */
1951
1952 struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
1953                             struct bch_opts opts)
1954 {
1955         DARRAY(struct bch_sb_handle) sbs = { 0 };
1956         struct bch_fs *c = NULL;
1957         struct bch_sb_handle *sb, *best = NULL;
1958         struct printbuf errbuf = PRINTBUF;
1959         int ret = 0;
1960
1961         if (!try_module_get(THIS_MODULE))
1962                 return ERR_PTR(-ENODEV);
1963
1964         if (!nr_devices) {
1965                 ret = -EINVAL;
1966                 goto err;
1967         }
1968
1969         ret = darray_make_room(&sbs, nr_devices);
1970         if (ret)
1971                 goto err;
1972
1973         for (unsigned i = 0; i < nr_devices; i++) {
1974                 struct bch_sb_handle sb = { NULL };
1975
1976                 ret = bch2_read_super(devices[i], &opts, &sb);
1977                 if (ret)
1978                         goto err;
1979
1980                 BUG_ON(darray_push(&sbs, sb));
1981         }
1982
1983         darray_for_each(sbs, sb)
1984                 if (!best || le64_to_cpu(sb->sb->seq) > le64_to_cpu(best->sb->seq))
1985                         best = sb;
1986
1987         darray_for_each_reverse(sbs, sb) {
1988                 if (sb != best && !bch2_dev_exists(best->sb, sb->sb->dev_idx)) {
1989                         pr_info("%pg has been removed, skipping", sb->bdev);
1990                         bch2_free_super(sb);
1991                         darray_remove_item(&sbs, sb);
1992                         best -= best > sb;
1993                         continue;
1994                 }
1995
1996                 ret = bch2_dev_in_fs(best->sb, sb->sb);
1997                 if (ret)
1998                         goto err_print;
1999         }
2000
2001         c = bch2_fs_alloc(best->sb, opts);
2002         ret = PTR_ERR_OR_ZERO(c);
2003         if (ret)
2004                 goto err;
2005
2006         down_write(&c->state_lock);
2007         darray_for_each(sbs, sb) {
2008                 ret = bch2_dev_attach_bdev(c, sb);
2009                 if (ret) {
2010                         up_write(&c->state_lock);
2011                         goto err;
2012                 }
2013         }
2014         up_write(&c->state_lock);
2015
2016         if (!bch2_fs_may_start(c)) {
2017                 ret = -BCH_ERR_insufficient_devices_to_start;
2018                 goto err_print;
2019         }
2020
2021         if (!c->opts.nostart) {
2022                 ret = bch2_fs_start(c);
2023                 if (ret)
2024                         goto err;
2025         }
2026 out:
2027         darray_for_each(sbs, sb)
2028                 bch2_free_super(sb);
2029         darray_exit(&sbs);
2030         printbuf_exit(&errbuf);
2031         module_put(THIS_MODULE);
2032         return c;
2033 err_print:
2034         pr_err("bch_fs_open err opening %s: %s",
2035                devices[0], bch2_err_str(ret));
2036 err:
2037         if (!IS_ERR_OR_NULL(c))
2038                 bch2_fs_stop(c);
2039         c = ERR_PTR(ret);
2040         goto out;
2041 }
2042
2043 /* Global interfaces/init */
2044
2045 static void bcachefs_exit(void)
2046 {
2047         bch2_debug_exit();
2048         bch2_vfs_exit();
2049         bch2_chardev_exit();
2050         bch2_btree_key_cache_exit();
2051         if (bcachefs_kset)
2052                 kset_unregister(bcachefs_kset);
2053 }
2054
2055 static int __init bcachefs_init(void)
2056 {
2057         bch2_bkey_pack_test();
2058
2059         if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) ||
2060             bch2_btree_key_cache_init() ||
2061             bch2_chardev_init() ||
2062             bch2_vfs_init() ||
2063             bch2_debug_init())
2064                 goto err;
2065
2066         return 0;
2067 err:
2068         bcachefs_exit();
2069         return -ENOMEM;
2070 }
2071
2072 #define BCH_DEBUG_PARAM(name, description)                      \
2073         bool bch2_##name;                                       \
2074         module_param_named(name, bch2_##name, bool, 0644);      \
2075         MODULE_PARM_DESC(name, description);
2076 BCH_DEBUG_PARAMS()
2077 #undef BCH_DEBUG_PARAM
2078
2079 __maybe_unused
2080 static unsigned bch2_metadata_version = bcachefs_metadata_version_current;
2081 module_param_named(version, bch2_metadata_version, uint, 0400);
2082
2083 module_exit(bcachefs_exit);
2084 module_init(bcachefs_init);