]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/super.c
Update bcachefs sources to 938f680845d1 fixup! rename and export __kern_path_locked()
[bcachefs-tools-debian] / libbcachefs / super.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs setup/teardown code, and some metadata io - read a superblock and
4  * figure out what to do with it.
5  *
6  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7  * Copyright 2012 Google, Inc.
8  */
9
10 #include "bcachefs.h"
11 #include "alloc_background.h"
12 #include "alloc_foreground.h"
13 #include "bkey_sort.h"
14 #include "btree_cache.h"
15 #include "btree_gc.h"
16 #include "btree_journal_iter.h"
17 #include "btree_key_cache.h"
18 #include "btree_update_interior.h"
19 #include "btree_io.h"
20 #include "btree_write_buffer.h"
21 #include "buckets_waiting_for_journal.h"
22 #include "chardev.h"
23 #include "checksum.h"
24 #include "clock.h"
25 #include "compress.h"
26 #include "counters.h"
27 #include "debug.h"
28 #include "disk_groups.h"
29 #include "ec.h"
30 #include "errcode.h"
31 #include "error.h"
32 #include "fs.h"
33 #include "fs-io.h"
34 #include "fs-io-buffered.h"
35 #include "fs-io-direct.h"
36 #include "fsck.h"
37 #include "inode.h"
38 #include "io_read.h"
39 #include "io_write.h"
40 #include "journal.h"
41 #include "journal_reclaim.h"
42 #include "journal_seq_blacklist.h"
43 #include "move.h"
44 #include "migrate.h"
45 #include "movinggc.h"
46 #include "nocow_locking.h"
47 #include "quota.h"
48 #include "rebalance.h"
49 #include "recovery.h"
50 #include "replicas.h"
51 #include "sb-clean.h"
52 #include "sb-errors.h"
53 #include "sb-members.h"
54 #include "snapshot.h"
55 #include "subvolume.h"
56 #include "super.h"
57 #include "super-io.h"
58 #include "sysfs.h"
59 #include "trace.h"
60
61 #include <linux/backing-dev.h>
62 #include <linux/blkdev.h>
63 #include <linux/debugfs.h>
64 #include <linux/device.h>
65 #include <linux/idr.h>
66 #include <linux/module.h>
67 #include <linux/percpu.h>
68 #include <linux/random.h>
69 #include <linux/sysfs.h>
70 #include <crypto/hash.h>
71
72 MODULE_LICENSE("GPL");
73 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
74 MODULE_DESCRIPTION("bcachefs filesystem");
75
76 #define KTYPE(type)                                                     \
77 static const struct attribute_group type ## _group = {                  \
78         .attrs = type ## _files                                         \
79 };                                                                      \
80                                                                         \
81 static const struct attribute_group *type ## _groups[] = {              \
82         &type ## _group,                                                \
83         NULL                                                            \
84 };                                                                      \
85                                                                         \
86 static const struct kobj_type type ## _ktype = {                        \
87         .release        = type ## _release,                             \
88         .sysfs_ops      = &type ## _sysfs_ops,                          \
89         .default_groups = type ## _groups                               \
90 }
91
92 static void bch2_fs_release(struct kobject *);
93 static void bch2_dev_release(struct kobject *);
94 static void bch2_fs_counters_release(struct kobject *k)
95 {
96 }
97
98 static void bch2_fs_internal_release(struct kobject *k)
99 {
100 }
101
102 static void bch2_fs_opts_dir_release(struct kobject *k)
103 {
104 }
105
106 static void bch2_fs_time_stats_release(struct kobject *k)
107 {
108 }
109
110 KTYPE(bch2_fs);
111 KTYPE(bch2_fs_counters);
112 KTYPE(bch2_fs_internal);
113 KTYPE(bch2_fs_opts_dir);
114 KTYPE(bch2_fs_time_stats);
115 KTYPE(bch2_dev);
116
117 static struct kset *bcachefs_kset;
118 static LIST_HEAD(bch_fs_list);
119 static DEFINE_MUTEX(bch_fs_list_lock);
120
121 DECLARE_WAIT_QUEUE_HEAD(bch2_read_only_wait);
122
123 static void bch2_dev_free(struct bch_dev *);
124 static int bch2_dev_alloc(struct bch_fs *, unsigned);
125 static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *);
126 static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
127
128 struct bch_fs *bch2_dev_to_fs(dev_t dev)
129 {
130         struct bch_fs *c;
131         struct bch_dev *ca;
132         unsigned i;
133
134         mutex_lock(&bch_fs_list_lock);
135         rcu_read_lock();
136
137         list_for_each_entry(c, &bch_fs_list, list)
138                 for_each_member_device_rcu(ca, c, i, NULL)
139                         if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) {
140                                 closure_get(&c->cl);
141                                 goto found;
142                         }
143         c = NULL;
144 found:
145         rcu_read_unlock();
146         mutex_unlock(&bch_fs_list_lock);
147
148         return c;
149 }
150
151 static struct bch_fs *__bch2_uuid_to_fs(__uuid_t uuid)
152 {
153         struct bch_fs *c;
154
155         lockdep_assert_held(&bch_fs_list_lock);
156
157         list_for_each_entry(c, &bch_fs_list, list)
158                 if (!memcmp(&c->disk_sb.sb->uuid, &uuid, sizeof(uuid)))
159                         return c;
160
161         return NULL;
162 }
163
164 struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid)
165 {
166         struct bch_fs *c;
167
168         mutex_lock(&bch_fs_list_lock);
169         c = __bch2_uuid_to_fs(uuid);
170         if (c)
171                 closure_get(&c->cl);
172         mutex_unlock(&bch_fs_list_lock);
173
174         return c;
175 }
176
177 static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
178 {
179         struct bch_dev *ca;
180         unsigned i, nr = 0, u64s =
181                 ((sizeof(struct jset_entry_dev_usage) +
182                   sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR)) /
183                 sizeof(u64);
184
185         rcu_read_lock();
186         for_each_member_device_rcu(ca, c, i, NULL)
187                 nr++;
188         rcu_read_unlock();
189
190         bch2_journal_entry_res_resize(&c->journal,
191                         &c->dev_usage_journal_res, u64s * nr);
192 }
193
194 /* Filesystem RO/RW: */
195
196 /*
197  * For startup/shutdown of RW stuff, the dependencies are:
198  *
199  * - foreground writes depend on copygc and rebalance (to free up space)
200  *
201  * - copygc and rebalance depend on mark and sweep gc (they actually probably
202  *   don't because they either reserve ahead of time or don't block if
203  *   allocations fail, but allocations can require mark and sweep gc to run
204  *   because of generation number wraparound)
205  *
206  * - all of the above depends on the allocator threads
207  *
208  * - allocator depends on the journal (when it rewrites prios and gens)
209  */
210
211 static void __bch2_fs_read_only(struct bch_fs *c)
212 {
213         struct bch_dev *ca;
214         unsigned i, clean_passes = 0;
215         u64 seq = 0;
216
217         bch2_fs_ec_stop(c);
218         bch2_open_buckets_stop(c, NULL, true);
219         bch2_rebalance_stop(c);
220         bch2_copygc_stop(c);
221         bch2_gc_thread_stop(c);
222         bch2_fs_ec_flush(c);
223
224         bch_verbose(c, "flushing journal and stopping allocators, journal seq %llu",
225                     journal_cur_seq(&c->journal));
226
227         do {
228                 clean_passes++;
229
230                 if (bch2_btree_interior_updates_flush(c) ||
231                     bch2_journal_flush_all_pins(&c->journal) ||
232                     bch2_btree_flush_all_writes(c) ||
233                     seq != atomic64_read(&c->journal.seq)) {
234                         seq = atomic64_read(&c->journal.seq);
235                         clean_passes = 0;
236                 }
237         } while (clean_passes < 2);
238
239         bch_verbose(c, "flushing journal and stopping allocators complete, journal seq %llu",
240                     journal_cur_seq(&c->journal));
241
242         if (test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) &&
243             !test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
244                 set_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags);
245         bch2_fs_journal_stop(&c->journal);
246
247         /*
248          * After stopping journal:
249          */
250         for_each_member_device(ca, c, i)
251                 bch2_dev_allocator_remove(c, ca);
252 }
253
254 #ifndef BCH_WRITE_REF_DEBUG
255 static void bch2_writes_disabled(struct percpu_ref *writes)
256 {
257         struct bch_fs *c = container_of(writes, struct bch_fs, writes);
258
259         set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
260         wake_up(&bch2_read_only_wait);
261 }
262 #endif
263
264 void bch2_fs_read_only(struct bch_fs *c)
265 {
266         if (!test_bit(BCH_FS_RW, &c->flags)) {
267                 bch2_journal_reclaim_stop(&c->journal);
268                 return;
269         }
270
271         BUG_ON(test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
272
273         /*
274          * Block new foreground-end write operations from starting - any new
275          * writes will return -EROFS:
276          */
277         set_bit(BCH_FS_GOING_RO, &c->flags);
278 #ifndef BCH_WRITE_REF_DEBUG
279         percpu_ref_kill(&c->writes);
280 #else
281         for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++)
282                 bch2_write_ref_put(c, i);
283 #endif
284
285         /*
286          * If we're not doing an emergency shutdown, we want to wait on
287          * outstanding writes to complete so they don't see spurious errors due
288          * to shutting down the allocator:
289          *
290          * If we are doing an emergency shutdown outstanding writes may
291          * hang until we shutdown the allocator so we don't want to wait
292          * on outstanding writes before shutting everything down - but
293          * we do need to wait on them before returning and signalling
294          * that going RO is complete:
295          */
296         wait_event(bch2_read_only_wait,
297                    test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags) ||
298                    test_bit(BCH_FS_EMERGENCY_RO, &c->flags));
299
300         __bch2_fs_read_only(c);
301
302         wait_event(bch2_read_only_wait,
303                    test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
304
305         clear_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
306         clear_bit(BCH_FS_GOING_RO, &c->flags);
307
308         if (!bch2_journal_error(&c->journal) &&
309             !test_bit(BCH_FS_ERROR, &c->flags) &&
310             !test_bit(BCH_FS_EMERGENCY_RO, &c->flags) &&
311             test_bit(BCH_FS_STARTED, &c->flags) &&
312             test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags) &&
313             !c->opts.norecovery) {
314                 BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
315                 BUG_ON(atomic_read(&c->btree_cache.dirty));
316                 BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty));
317                 BUG_ON(c->btree_write_buffer.state.nr);
318
319                 bch_verbose(c, "marking filesystem clean");
320                 bch2_fs_mark_clean(c);
321         }
322
323         clear_bit(BCH_FS_RW, &c->flags);
324 }
325
326 static void bch2_fs_read_only_work(struct work_struct *work)
327 {
328         struct bch_fs *c =
329                 container_of(work, struct bch_fs, read_only_work);
330
331         down_write(&c->state_lock);
332         bch2_fs_read_only(c);
333         up_write(&c->state_lock);
334 }
335
336 static void bch2_fs_read_only_async(struct bch_fs *c)
337 {
338         queue_work(system_long_wq, &c->read_only_work);
339 }
340
341 bool bch2_fs_emergency_read_only(struct bch_fs *c)
342 {
343         bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags);
344
345         bch2_journal_halt(&c->journal);
346         bch2_fs_read_only_async(c);
347
348         wake_up(&bch2_read_only_wait);
349         return ret;
350 }
351
352 static int bch2_fs_read_write_late(struct bch_fs *c)
353 {
354         int ret;
355
356         /*
357          * Data move operations can't run until after check_snapshots has
358          * completed, and bch2_snapshot_is_ancestor() is available.
359          *
360          * Ideally we'd start copygc/rebalance earlier instead of waiting for
361          * all of recovery/fsck to complete:
362          */
363         ret = bch2_copygc_start(c);
364         if (ret) {
365                 bch_err(c, "error starting copygc thread");
366                 return ret;
367         }
368
369         ret = bch2_rebalance_start(c);
370         if (ret) {
371                 bch_err(c, "error starting rebalance thread");
372                 return ret;
373         }
374
375         return 0;
376 }
377
378 static int __bch2_fs_read_write(struct bch_fs *c, bool early)
379 {
380         struct bch_dev *ca;
381         unsigned i;
382         int ret;
383
384         if (test_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags)) {
385                 bch_err(c, "cannot go rw, unfixed btree errors");
386                 return -BCH_ERR_erofs_unfixed_errors;
387         }
388
389         if (test_bit(BCH_FS_RW, &c->flags))
390                 return 0;
391
392         if (c->opts.norecovery)
393                 return -BCH_ERR_erofs_norecovery;
394
395         /*
396          * nochanges is used for fsck -n mode - we have to allow going rw
397          * during recovery for that to work:
398          */
399         if (c->opts.nochanges && (!early || c->opts.read_only))
400                 return -BCH_ERR_erofs_nochanges;
401
402         bch_info(c, "going read-write");
403
404         ret = bch2_sb_members_v2_init(c);
405         if (ret)
406                 goto err;
407
408         ret = bch2_fs_mark_dirty(c);
409         if (ret)
410                 goto err;
411
412         clear_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags);
413
414         /*
415          * First journal write must be a flush write: after a clean shutdown we
416          * don't read the journal, so the first journal write may end up
417          * overwriting whatever was there previously, and there must always be
418          * at least one non-flush write in the journal or recovery will fail:
419          */
420         set_bit(JOURNAL_NEED_FLUSH_WRITE, &c->journal.flags);
421
422         for_each_rw_member(ca, c, i)
423                 bch2_dev_allocator_add(c, ca);
424         bch2_recalc_capacity(c);
425
426         ret = bch2_gc_thread_start(c);
427         if (ret) {
428                 bch_err(c, "error starting gc thread");
429                 return ret;
430         }
431
432         ret = bch2_journal_reclaim_start(&c->journal);
433         if (ret)
434                 goto err;
435
436         if (!early) {
437                 ret = bch2_fs_read_write_late(c);
438                 if (ret)
439                         goto err;
440         }
441
442 #ifndef BCH_WRITE_REF_DEBUG
443         percpu_ref_reinit(&c->writes);
444 #else
445         for (i = 0; i < BCH_WRITE_REF_NR; i++) {
446                 BUG_ON(atomic_long_read(&c->writes[i]));
447                 atomic_long_inc(&c->writes[i]);
448         }
449 #endif
450         set_bit(BCH_FS_RW, &c->flags);
451         set_bit(BCH_FS_WAS_RW, &c->flags);
452
453         bch2_do_discards(c);
454         bch2_do_invalidates(c);
455         bch2_do_stripe_deletes(c);
456         bch2_do_pending_node_rewrites(c);
457         return 0;
458 err:
459         __bch2_fs_read_only(c);
460         return ret;
461 }
462
463 int bch2_fs_read_write(struct bch_fs *c)
464 {
465         return __bch2_fs_read_write(c, false);
466 }
467
468 int bch2_fs_read_write_early(struct bch_fs *c)
469 {
470         lockdep_assert_held(&c->state_lock);
471
472         return __bch2_fs_read_write(c, true);
473 }
474
475 /* Filesystem startup/shutdown: */
476
477 static void __bch2_fs_free(struct bch_fs *c)
478 {
479         unsigned i;
480
481         for (i = 0; i < BCH_TIME_STAT_NR; i++)
482                 bch2_time_stats_exit(&c->times[i]);
483
484         bch2_free_pending_node_rewrites(c);
485         bch2_fs_sb_errors_exit(c);
486         bch2_fs_counters_exit(c);
487         bch2_fs_snapshots_exit(c);
488         bch2_fs_quota_exit(c);
489         bch2_fs_fs_io_direct_exit(c);
490         bch2_fs_fs_io_buffered_exit(c);
491         bch2_fs_fsio_exit(c);
492         bch2_fs_ec_exit(c);
493         bch2_fs_encryption_exit(c);
494         bch2_fs_nocow_locking_exit(c);
495         bch2_fs_io_write_exit(c);
496         bch2_fs_io_read_exit(c);
497         bch2_fs_buckets_waiting_for_journal_exit(c);
498         bch2_fs_btree_interior_update_exit(c);
499         bch2_fs_btree_iter_exit(c);
500         bch2_fs_btree_key_cache_exit(&c->btree_key_cache);
501         bch2_fs_btree_cache_exit(c);
502         bch2_fs_replicas_exit(c);
503         bch2_fs_journal_exit(&c->journal);
504         bch2_io_clock_exit(&c->io_clock[WRITE]);
505         bch2_io_clock_exit(&c->io_clock[READ]);
506         bch2_fs_compress_exit(c);
507         bch2_journal_keys_free(&c->journal_keys);
508         bch2_journal_entries_free(c);
509         bch2_fs_btree_write_buffer_exit(c);
510         percpu_free_rwsem(&c->mark_lock);
511         free_percpu(c->online_reserved);
512
513         darray_exit(&c->btree_roots_extra);
514         free_percpu(c->pcpu);
515         mempool_exit(&c->large_bkey_pool);
516         mempool_exit(&c->btree_bounce_pool);
517         bioset_exit(&c->btree_bio);
518         mempool_exit(&c->fill_iter);
519 #ifndef BCH_WRITE_REF_DEBUG
520         percpu_ref_exit(&c->writes);
521 #endif
522         kfree(rcu_dereference_protected(c->disk_groups, 1));
523         kfree(c->journal_seq_blacklist_table);
524         kfree(c->unused_inode_hints);
525
526         if (c->write_ref_wq)
527                 destroy_workqueue(c->write_ref_wq);
528         if (c->io_complete_wq)
529                 destroy_workqueue(c->io_complete_wq);
530         if (c->copygc_wq)
531                 destroy_workqueue(c->copygc_wq);
532         if (c->btree_io_complete_wq)
533                 destroy_workqueue(c->btree_io_complete_wq);
534         if (c->btree_update_wq)
535                 destroy_workqueue(c->btree_update_wq);
536
537         bch2_free_super(&c->disk_sb);
538         kvpfree(c, sizeof(*c));
539         module_put(THIS_MODULE);
540 }
541
542 static void bch2_fs_release(struct kobject *kobj)
543 {
544         struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
545
546         __bch2_fs_free(c);
547 }
548
549 void __bch2_fs_stop(struct bch_fs *c)
550 {
551         struct bch_dev *ca;
552         unsigned i;
553
554         bch_verbose(c, "shutting down");
555
556         set_bit(BCH_FS_STOPPING, &c->flags);
557
558         cancel_work_sync(&c->journal_seq_blacklist_gc_work);
559
560         down_write(&c->state_lock);
561         bch2_fs_read_only(c);
562         up_write(&c->state_lock);
563
564         for_each_member_device(ca, c, i)
565                 if (ca->kobj.state_in_sysfs &&
566                     ca->disk_sb.bdev)
567                         sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
568
569         if (c->kobj.state_in_sysfs)
570                 kobject_del(&c->kobj);
571
572         bch2_fs_debug_exit(c);
573         bch2_fs_chardev_exit(c);
574
575         kobject_put(&c->counters_kobj);
576         kobject_put(&c->time_stats);
577         kobject_put(&c->opts_dir);
578         kobject_put(&c->internal);
579
580         /* btree prefetch might have kicked off reads in the background: */
581         bch2_btree_flush_all_reads(c);
582
583         for_each_member_device(ca, c, i)
584                 cancel_work_sync(&ca->io_error_work);
585
586         cancel_work_sync(&c->read_only_work);
587 }
588
589 void bch2_fs_free(struct bch_fs *c)
590 {
591         unsigned i;
592
593         mutex_lock(&bch_fs_list_lock);
594         list_del(&c->list);
595         mutex_unlock(&bch_fs_list_lock);
596
597         closure_sync(&c->cl);
598         closure_debug_destroy(&c->cl);
599
600         for (i = 0; i < c->sb.nr_devices; i++) {
601                 struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true);
602
603                 if (ca) {
604                         bch2_free_super(&ca->disk_sb);
605                         bch2_dev_free(ca);
606                 }
607         }
608
609         bch_verbose(c, "shutdown complete");
610
611         kobject_put(&c->kobj);
612 }
613
614 void bch2_fs_stop(struct bch_fs *c)
615 {
616         __bch2_fs_stop(c);
617         bch2_fs_free(c);
618 }
619
620 static int bch2_fs_online(struct bch_fs *c)
621 {
622         struct bch_dev *ca;
623         unsigned i;
624         int ret = 0;
625
626         lockdep_assert_held(&bch_fs_list_lock);
627
628         if (__bch2_uuid_to_fs(c->sb.uuid)) {
629                 bch_err(c, "filesystem UUID already open");
630                 return -EINVAL;
631         }
632
633         ret = bch2_fs_chardev_init(c);
634         if (ret) {
635                 bch_err(c, "error creating character device");
636                 return ret;
637         }
638
639         bch2_fs_debug_init(c);
640
641         ret = kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ?:
642             kobject_add(&c->internal, &c->kobj, "internal") ?:
643             kobject_add(&c->opts_dir, &c->kobj, "options") ?:
644 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
645             kobject_add(&c->time_stats, &c->kobj, "time_stats") ?:
646 #endif
647             kobject_add(&c->counters_kobj, &c->kobj, "counters") ?:
648             bch2_opts_create_sysfs_files(&c->opts_dir);
649         if (ret) {
650                 bch_err(c, "error creating sysfs objects");
651                 return ret;
652         }
653
654         down_write(&c->state_lock);
655
656         for_each_member_device(ca, c, i) {
657                 ret = bch2_dev_sysfs_online(c, ca);
658                 if (ret) {
659                         bch_err(c, "error creating sysfs objects");
660                         percpu_ref_put(&ca->ref);
661                         goto err;
662                 }
663         }
664
665         BUG_ON(!list_empty(&c->list));
666         list_add(&c->list, &bch_fs_list);
667 err:
668         up_write(&c->state_lock);
669         return ret;
670 }
671
672 static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
673 {
674         struct bch_fs *c;
675         struct printbuf name = PRINTBUF;
676         unsigned i, iter_size;
677         int ret = 0;
678
679         c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
680         if (!c) {
681                 c = ERR_PTR(-BCH_ERR_ENOMEM_fs_alloc);
682                 goto out;
683         }
684
685         __module_get(THIS_MODULE);
686
687         closure_init(&c->cl, NULL);
688
689         c->kobj.kset = bcachefs_kset;
690         kobject_init(&c->kobj, &bch2_fs_ktype);
691         kobject_init(&c->internal, &bch2_fs_internal_ktype);
692         kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype);
693         kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype);
694         kobject_init(&c->counters_kobj, &bch2_fs_counters_ktype);
695
696         c->minor                = -1;
697         c->disk_sb.fs_sb        = true;
698
699         init_rwsem(&c->state_lock);
700         mutex_init(&c->sb_lock);
701         mutex_init(&c->replicas_gc_lock);
702         mutex_init(&c->btree_root_lock);
703         INIT_WORK(&c->read_only_work, bch2_fs_read_only_work);
704
705         init_rwsem(&c->gc_lock);
706         mutex_init(&c->gc_gens_lock);
707
708         for (i = 0; i < BCH_TIME_STAT_NR; i++)
709                 bch2_time_stats_init(&c->times[i]);
710
711         bch2_fs_copygc_init(c);
712         bch2_fs_btree_key_cache_init_early(&c->btree_key_cache);
713         bch2_fs_btree_interior_update_init_early(c);
714         bch2_fs_allocator_background_init(c);
715         bch2_fs_allocator_foreground_init(c);
716         bch2_fs_rebalance_init(c);
717         bch2_fs_quota_init(c);
718         bch2_fs_ec_init_early(c);
719         bch2_fs_move_init(c);
720         bch2_fs_sb_errors_init_early(c);
721
722         INIT_LIST_HEAD(&c->list);
723
724         mutex_init(&c->usage_scratch_lock);
725
726         mutex_init(&c->bio_bounce_pages_lock);
727         mutex_init(&c->snapshot_table_lock);
728         init_rwsem(&c->snapshot_create_lock);
729
730         spin_lock_init(&c->btree_write_error_lock);
731
732         INIT_WORK(&c->journal_seq_blacklist_gc_work,
733                   bch2_blacklist_entries_gc);
734
735         INIT_LIST_HEAD(&c->journal_iters);
736
737         INIT_LIST_HEAD(&c->fsck_error_msgs);
738         mutex_init(&c->fsck_error_msgs_lock);
739
740         seqcount_init(&c->gc_pos_lock);
741
742         seqcount_init(&c->usage_lock);
743
744         sema_init(&c->io_in_flight, 128);
745
746         INIT_LIST_HEAD(&c->vfs_inodes_list);
747         mutex_init(&c->vfs_inodes_lock);
748
749         c->copy_gc_enabled              = 1;
750         c->rebalance.enabled            = 1;
751         c->promote_whole_extents        = true;
752
753         c->journal.flush_write_time     = &c->times[BCH_TIME_journal_flush_write];
754         c->journal.noflush_write_time   = &c->times[BCH_TIME_journal_noflush_write];
755         c->journal.flush_seq_time       = &c->times[BCH_TIME_journal_flush_seq];
756
757         bch2_fs_btree_cache_init_early(&c->btree_cache);
758
759         mutex_init(&c->sectors_available_lock);
760
761         ret = percpu_init_rwsem(&c->mark_lock);
762         if (ret)
763                 goto err;
764
765         mutex_lock(&c->sb_lock);
766         ret = bch2_sb_to_fs(c, sb);
767         mutex_unlock(&c->sb_lock);
768
769         if (ret)
770                 goto err;
771
772         pr_uuid(&name, c->sb.user_uuid.b);
773         strscpy(c->name, name.buf, sizeof(c->name));
774         printbuf_exit(&name);
775
776         ret = name.allocation_failure ? -BCH_ERR_ENOMEM_fs_name_alloc : 0;
777         if (ret)
778                 goto err;
779
780         /* Compat: */
781         if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
782             !BCH_SB_JOURNAL_FLUSH_DELAY(sb))
783                 SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000);
784
785         if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
786             !BCH_SB_JOURNAL_RECLAIM_DELAY(sb))
787                 SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 100);
788
789         c->opts = bch2_opts_default;
790         ret = bch2_opts_from_sb(&c->opts, sb);
791         if (ret)
792                 goto err;
793
794         bch2_opts_apply(&c->opts, opts);
795
796         c->btree_key_cache_btrees |= 1U << BTREE_ID_alloc;
797         if (c->opts.inodes_use_key_cache)
798                 c->btree_key_cache_btrees |= 1U << BTREE_ID_inodes;
799         c->btree_key_cache_btrees |= 1U << BTREE_ID_logged_ops;
800
801         c->block_bits           = ilog2(block_sectors(c));
802         c->btree_foreground_merge_threshold = BTREE_FOREGROUND_MERGE_THRESHOLD(c);
803
804         if (bch2_fs_init_fault("fs_alloc")) {
805                 bch_err(c, "fs_alloc fault injected");
806                 ret = -EFAULT;
807                 goto err;
808         }
809
810         iter_size = sizeof(struct sort_iter) +
811                 (btree_blocks(c) + 1) * 2 *
812                 sizeof(struct sort_iter_set);
813
814         c->inode_shard_bits = ilog2(roundup_pow_of_two(num_possible_cpus()));
815
816         if (!(c->btree_update_wq = alloc_workqueue("bcachefs",
817                                 WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512)) ||
818             !(c->btree_io_complete_wq = alloc_workqueue("bcachefs_btree_io",
819                                 WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
820             !(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
821                                 WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
822             !(c->io_complete_wq = alloc_workqueue("bcachefs_io",
823                                 WQ_FREEZABLE|WQ_HIGHPRI|WQ_MEM_RECLAIM, 1)) ||
824             !(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref",
825                                 WQ_FREEZABLE, 0)) ||
826 #ifndef BCH_WRITE_REF_DEBUG
827             percpu_ref_init(&c->writes, bch2_writes_disabled,
828                             PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
829 #endif
830             mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
831             bioset_init(&c->btree_bio, 1,
832                         max(offsetof(struct btree_read_bio, bio),
833                             offsetof(struct btree_write_bio, wbio.bio)),
834                         BIOSET_NEED_BVECS) ||
835             !(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
836             !(c->online_reserved = alloc_percpu(u64)) ||
837             mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
838                                         btree_bytes(c)) ||
839             mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
840             !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
841                                               sizeof(u64), GFP_KERNEL))) {
842                 ret = -BCH_ERR_ENOMEM_fs_other_alloc;
843                 goto err;
844         }
845
846         ret = bch2_fs_counters_init(c) ?:
847             bch2_fs_sb_errors_init(c) ?:
848             bch2_io_clock_init(&c->io_clock[READ]) ?:
849             bch2_io_clock_init(&c->io_clock[WRITE]) ?:
850             bch2_fs_journal_init(&c->journal) ?:
851             bch2_fs_replicas_init(c) ?:
852             bch2_fs_btree_cache_init(c) ?:
853             bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?:
854             bch2_fs_btree_iter_init(c) ?:
855             bch2_fs_btree_interior_update_init(c) ?:
856             bch2_fs_buckets_waiting_for_journal_init(c) ?:
857             bch2_fs_btree_write_buffer_init(c) ?:
858             bch2_fs_subvolumes_init(c) ?:
859             bch2_fs_io_read_init(c) ?:
860             bch2_fs_io_write_init(c) ?:
861             bch2_fs_nocow_locking_init(c) ?:
862             bch2_fs_encryption_init(c) ?:
863             bch2_fs_compress_init(c) ?:
864             bch2_fs_ec_init(c) ?:
865             bch2_fs_fsio_init(c) ?:
866             bch2_fs_fs_io_buffered_init(c) ?:
867             bch2_fs_fs_io_direct_init(c);
868         if (ret)
869                 goto err;
870
871         for (i = 0; i < c->sb.nr_devices; i++)
872                 if (bch2_dev_exists(c->disk_sb.sb, i) &&
873                     bch2_dev_alloc(c, i)) {
874                         ret = -EEXIST;
875                         goto err;
876                 }
877
878         bch2_journal_entry_res_resize(&c->journal,
879                         &c->btree_root_journal_res,
880                         BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_BTREE_PTR_U64s_MAX));
881         bch2_dev_usage_journal_reserve(c);
882         bch2_journal_entry_res_resize(&c->journal,
883                         &c->clock_journal_res,
884                         (sizeof(struct jset_entry_clock) / sizeof(u64)) * 2);
885
886         mutex_lock(&bch_fs_list_lock);
887         ret = bch2_fs_online(c);
888         mutex_unlock(&bch_fs_list_lock);
889
890         if (ret)
891                 goto err;
892 out:
893         return c;
894 err:
895         bch2_fs_free(c);
896         c = ERR_PTR(ret);
897         goto out;
898 }
899
900 noinline_for_stack
901 static void print_mount_opts(struct bch_fs *c)
902 {
903         enum bch_opt_id i;
904         struct printbuf p = PRINTBUF;
905         bool first = true;
906
907         prt_str(&p, "mounting version ");
908         bch2_version_to_text(&p, c->sb.version);
909
910         if (c->opts.read_only) {
911                 prt_str(&p, " opts=");
912                 first = false;
913                 prt_printf(&p, "ro");
914         }
915
916         for (i = 0; i < bch2_opts_nr; i++) {
917                 const struct bch_option *opt = &bch2_opt_table[i];
918                 u64 v = bch2_opt_get_by_id(&c->opts, i);
919
920                 if (!(opt->flags & OPT_MOUNT))
921                         continue;
922
923                 if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
924                         continue;
925
926                 prt_str(&p, first ? " opts=" : ",");
927                 first = false;
928                 bch2_opt_to_text(&p, c, c->disk_sb.sb, opt, v, OPT_SHOW_MOUNT_STYLE);
929         }
930
931         bch_info(c, "%s", p.buf);
932         printbuf_exit(&p);
933 }
934
935 int bch2_fs_start(struct bch_fs *c)
936 {
937         struct bch_dev *ca;
938         time64_t now = ktime_get_real_seconds();
939         unsigned i;
940         int ret;
941
942         print_mount_opts(c);
943
944         down_write(&c->state_lock);
945
946         BUG_ON(test_bit(BCH_FS_STARTED, &c->flags));
947
948         mutex_lock(&c->sb_lock);
949
950         ret = bch2_sb_members_v2_init(c);
951         if (ret) {
952                 mutex_unlock(&c->sb_lock);
953                 goto err;
954         }
955
956         for_each_online_member(ca, c, i)
957                 bch2_members_v2_get_mut(c->disk_sb.sb, i)->last_mount = cpu_to_le64(now);
958
959         mutex_unlock(&c->sb_lock);
960
961         for_each_rw_member(ca, c, i)
962                 bch2_dev_allocator_add(c, ca);
963         bch2_recalc_capacity(c);
964
965         ret = BCH_SB_INITIALIZED(c->disk_sb.sb)
966                 ? bch2_fs_recovery(c)
967                 : bch2_fs_initialize(c);
968         if (ret)
969                 goto err;
970
971         ret = bch2_opts_check_may_set(c);
972         if (ret)
973                 goto err;
974
975         if (bch2_fs_init_fault("fs_start")) {
976                 bch_err(c, "fs_start fault injected");
977                 ret = -EINVAL;
978                 goto err;
979         }
980
981         set_bit(BCH_FS_STARTED, &c->flags);
982
983         if (c->opts.read_only || c->opts.nochanges) {
984                 bch2_fs_read_only(c);
985         } else {
986                 ret = !test_bit(BCH_FS_RW, &c->flags)
987                         ? bch2_fs_read_write(c)
988                         : bch2_fs_read_write_late(c);
989                 if (ret)
990                         goto err;
991         }
992
993         ret = 0;
994 out:
995         up_write(&c->state_lock);
996         return ret;
997 err:
998         bch_err_msg(c, ret, "starting filesystem");
999         goto out;
1000 }
1001
1002 static int bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
1003 {
1004         struct bch_member m = bch2_sb_member_get(sb, sb->dev_idx);
1005
1006         if (le16_to_cpu(sb->block_size) != block_sectors(c))
1007                 return -BCH_ERR_mismatched_block_size;
1008
1009         if (le16_to_cpu(m.bucket_size) <
1010             BCH_SB_BTREE_NODE_SIZE(c->disk_sb.sb))
1011                 return -BCH_ERR_bucket_size_too_small;
1012
1013         return 0;
1014 }
1015
1016 static int bch2_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb)
1017 {
1018         struct bch_sb *newest =
1019                 le64_to_cpu(fs->seq) > le64_to_cpu(sb->seq) ? fs : sb;
1020
1021         if (!uuid_equal(&fs->uuid, &sb->uuid))
1022                 return -BCH_ERR_device_not_a_member_of_filesystem;
1023
1024         if (!bch2_dev_exists(newest, sb->dev_idx))
1025                 return -BCH_ERR_device_has_been_removed;
1026
1027         if (fs->block_size != sb->block_size)
1028                 return -BCH_ERR_mismatched_block_size;
1029
1030         return 0;
1031 }
1032
1033 /* Device startup/shutdown: */
1034
1035 static void bch2_dev_release(struct kobject *kobj)
1036 {
1037         struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
1038
1039         kfree(ca);
1040 }
1041
1042 static void bch2_dev_free(struct bch_dev *ca)
1043 {
1044         cancel_work_sync(&ca->io_error_work);
1045
1046         if (ca->kobj.state_in_sysfs &&
1047             ca->disk_sb.bdev)
1048                 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
1049
1050         if (ca->kobj.state_in_sysfs)
1051                 kobject_del(&ca->kobj);
1052
1053         bch2_free_super(&ca->disk_sb);
1054         bch2_dev_journal_exit(ca);
1055
1056         free_percpu(ca->io_done);
1057         bioset_exit(&ca->replica_set);
1058         bch2_dev_buckets_free(ca);
1059         free_page((unsigned long) ca->sb_read_scratch);
1060
1061         bch2_time_stats_exit(&ca->io_latency[WRITE]);
1062         bch2_time_stats_exit(&ca->io_latency[READ]);
1063
1064         percpu_ref_exit(&ca->io_ref);
1065         percpu_ref_exit(&ca->ref);
1066         kobject_put(&ca->kobj);
1067 }
1068
1069 static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca)
1070 {
1071
1072         lockdep_assert_held(&c->state_lock);
1073
1074         if (percpu_ref_is_zero(&ca->io_ref))
1075                 return;
1076
1077         __bch2_dev_read_only(c, ca);
1078
1079         reinit_completion(&ca->io_ref_completion);
1080         percpu_ref_kill(&ca->io_ref);
1081         wait_for_completion(&ca->io_ref_completion);
1082
1083         if (ca->kobj.state_in_sysfs) {
1084                 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
1085                 sysfs_remove_link(&ca->kobj, "block");
1086         }
1087
1088         bch2_free_super(&ca->disk_sb);
1089         bch2_dev_journal_exit(ca);
1090 }
1091
1092 static void bch2_dev_ref_complete(struct percpu_ref *ref)
1093 {
1094         struct bch_dev *ca = container_of(ref, struct bch_dev, ref);
1095
1096         complete(&ca->ref_completion);
1097 }
1098
1099 static void bch2_dev_io_ref_complete(struct percpu_ref *ref)
1100 {
1101         struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref);
1102
1103         complete(&ca->io_ref_completion);
1104 }
1105
1106 static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca)
1107 {
1108         int ret;
1109
1110         if (!c->kobj.state_in_sysfs)
1111                 return 0;
1112
1113         if (!ca->kobj.state_in_sysfs) {
1114                 ret = kobject_add(&ca->kobj, &c->kobj,
1115                                   "dev-%u", ca->dev_idx);
1116                 if (ret)
1117                         return ret;
1118         }
1119
1120         if (ca->disk_sb.bdev) {
1121                 struct kobject *block = bdev_kobj(ca->disk_sb.bdev);
1122
1123                 ret = sysfs_create_link(block, &ca->kobj, "bcachefs");
1124                 if (ret)
1125                         return ret;
1126
1127                 ret = sysfs_create_link(&ca->kobj, block, "block");
1128                 if (ret)
1129                         return ret;
1130         }
1131
1132         return 0;
1133 }
1134
1135 static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
1136                                         struct bch_member *member)
1137 {
1138         struct bch_dev *ca;
1139         unsigned i;
1140
1141         ca = kzalloc(sizeof(*ca), GFP_KERNEL);
1142         if (!ca)
1143                 return NULL;
1144
1145         kobject_init(&ca->kobj, &bch2_dev_ktype);
1146         init_completion(&ca->ref_completion);
1147         init_completion(&ca->io_ref_completion);
1148
1149         init_rwsem(&ca->bucket_lock);
1150
1151         INIT_WORK(&ca->io_error_work, bch2_io_error_work);
1152
1153         bch2_time_stats_init(&ca->io_latency[READ]);
1154         bch2_time_stats_init(&ca->io_latency[WRITE]);
1155
1156         ca->mi = bch2_mi_to_cpu(member);
1157
1158         for (i = 0; i < ARRAY_SIZE(member->errors); i++)
1159                 atomic64_set(&ca->errors[i], le64_to_cpu(member->errors[i]));
1160
1161         ca->uuid = member->uuid;
1162
1163         ca->nr_btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
1164                              ca->mi.bucket_size / btree_sectors(c));
1165
1166         if (percpu_ref_init(&ca->ref, bch2_dev_ref_complete,
1167                             0, GFP_KERNEL) ||
1168             percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
1169                             PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
1170             !(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) ||
1171             bch2_dev_buckets_alloc(c, ca) ||
1172             bioset_init(&ca->replica_set, 4,
1173                         offsetof(struct bch_write_bio, bio), 0) ||
1174             !(ca->io_done       = alloc_percpu(*ca->io_done)))
1175                 goto err;
1176
1177         return ca;
1178 err:
1179         bch2_dev_free(ca);
1180         return NULL;
1181 }
1182
1183 static void bch2_dev_attach(struct bch_fs *c, struct bch_dev *ca,
1184                             unsigned dev_idx)
1185 {
1186         ca->dev_idx = dev_idx;
1187         __set_bit(ca->dev_idx, ca->self.d);
1188         scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx);
1189
1190         ca->fs = c;
1191         rcu_assign_pointer(c->devs[ca->dev_idx], ca);
1192
1193         if (bch2_dev_sysfs_online(c, ca))
1194                 pr_warn("error creating sysfs objects");
1195 }
1196
1197 static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx)
1198 {
1199         struct bch_member member = bch2_sb_member_get(c->disk_sb.sb, dev_idx);
1200         struct bch_dev *ca = NULL;
1201         int ret = 0;
1202
1203         if (bch2_fs_init_fault("dev_alloc"))
1204                 goto err;
1205
1206         ca = __bch2_dev_alloc(c, &member);
1207         if (!ca)
1208                 goto err;
1209
1210         ca->fs = c;
1211
1212         bch2_dev_attach(c, ca, dev_idx);
1213         return ret;
1214 err:
1215         if (ca)
1216                 bch2_dev_free(ca);
1217         return -BCH_ERR_ENOMEM_dev_alloc;
1218 }
1219
1220 static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
1221 {
1222         unsigned ret;
1223
1224         if (bch2_dev_is_online(ca)) {
1225                 bch_err(ca, "already have device online in slot %u",
1226                         sb->sb->dev_idx);
1227                 return -BCH_ERR_device_already_online;
1228         }
1229
1230         if (get_capacity(sb->bdev->bd_disk) <
1231             ca->mi.bucket_size * ca->mi.nbuckets) {
1232                 bch_err(ca, "cannot online: device too small");
1233                 return -BCH_ERR_device_size_too_small;
1234         }
1235
1236         BUG_ON(!percpu_ref_is_zero(&ca->io_ref));
1237
1238         ret = bch2_dev_journal_init(ca, sb->sb);
1239         if (ret)
1240                 return ret;
1241
1242         /* Commit: */
1243         ca->disk_sb = *sb;
1244         memset(sb, 0, sizeof(*sb));
1245
1246         ca->dev = ca->disk_sb.bdev->bd_dev;
1247
1248         percpu_ref_reinit(&ca->io_ref);
1249
1250         return 0;
1251 }
1252
1253 static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
1254 {
1255         struct bch_dev *ca;
1256         int ret;
1257
1258         lockdep_assert_held(&c->state_lock);
1259
1260         if (le64_to_cpu(sb->sb->seq) >
1261             le64_to_cpu(c->disk_sb.sb->seq))
1262                 bch2_sb_to_fs(c, sb->sb);
1263
1264         BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices ||
1265                !c->devs[sb->sb->dev_idx]);
1266
1267         ca = bch_dev_locked(c, sb->sb->dev_idx);
1268
1269         ret = __bch2_dev_attach_bdev(ca, sb);
1270         if (ret)
1271                 return ret;
1272
1273         bch2_dev_sysfs_online(c, ca);
1274
1275         if (c->sb.nr_devices == 1)
1276                 snprintf(c->name, sizeof(c->name), "%pg", ca->disk_sb.bdev);
1277         snprintf(ca->name, sizeof(ca->name), "%pg", ca->disk_sb.bdev);
1278
1279         rebalance_wakeup(c);
1280         return 0;
1281 }
1282
1283 /* Device management: */
1284
1285 /*
1286  * Note: this function is also used by the error paths - when a particular
1287  * device sees an error, we call it to determine whether we can just set the
1288  * device RO, or - if this function returns false - we'll set the whole
1289  * filesystem RO:
1290  *
1291  * XXX: maybe we should be more explicit about whether we're changing state
1292  * because we got an error or what have you?
1293  */
1294 bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
1295                             enum bch_member_state new_state, int flags)
1296 {
1297         struct bch_devs_mask new_online_devs;
1298         struct bch_dev *ca2;
1299         int i, nr_rw = 0, required;
1300
1301         lockdep_assert_held(&c->state_lock);
1302
1303         switch (new_state) {
1304         case BCH_MEMBER_STATE_rw:
1305                 return true;
1306         case BCH_MEMBER_STATE_ro:
1307                 if (ca->mi.state != BCH_MEMBER_STATE_rw)
1308                         return true;
1309
1310                 /* do we have enough devices to write to?  */
1311                 for_each_member_device(ca2, c, i)
1312                         if (ca2 != ca)
1313                                 nr_rw += ca2->mi.state == BCH_MEMBER_STATE_rw;
1314
1315                 required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED)
1316                                ? c->opts.metadata_replicas
1317                                : c->opts.metadata_replicas_required,
1318                                !(flags & BCH_FORCE_IF_DATA_DEGRADED)
1319                                ? c->opts.data_replicas
1320                                : c->opts.data_replicas_required);
1321
1322                 return nr_rw >= required;
1323         case BCH_MEMBER_STATE_failed:
1324         case BCH_MEMBER_STATE_spare:
1325                 if (ca->mi.state != BCH_MEMBER_STATE_rw &&
1326                     ca->mi.state != BCH_MEMBER_STATE_ro)
1327                         return true;
1328
1329                 /* do we have enough devices to read from?  */
1330                 new_online_devs = bch2_online_devs(c);
1331                 __clear_bit(ca->dev_idx, new_online_devs.d);
1332
1333                 return bch2_have_enough_devs(c, new_online_devs, flags, false);
1334         default:
1335                 BUG();
1336         }
1337 }
1338
1339 static bool bch2_fs_may_start(struct bch_fs *c)
1340 {
1341         struct bch_dev *ca;
1342         unsigned i, flags = 0;
1343
1344         if (c->opts.very_degraded)
1345                 flags |= BCH_FORCE_IF_DEGRADED|BCH_FORCE_IF_LOST;
1346
1347         if (c->opts.degraded)
1348                 flags |= BCH_FORCE_IF_DEGRADED;
1349
1350         if (!c->opts.degraded &&
1351             !c->opts.very_degraded) {
1352                 mutex_lock(&c->sb_lock);
1353
1354                 for (i = 0; i < c->disk_sb.sb->nr_devices; i++) {
1355                         if (!bch2_dev_exists(c->disk_sb.sb, i))
1356                                 continue;
1357
1358                         ca = bch_dev_locked(c, i);
1359
1360                         if (!bch2_dev_is_online(ca) &&
1361                             (ca->mi.state == BCH_MEMBER_STATE_rw ||
1362                              ca->mi.state == BCH_MEMBER_STATE_ro)) {
1363                                 mutex_unlock(&c->sb_lock);
1364                                 return false;
1365                         }
1366                 }
1367                 mutex_unlock(&c->sb_lock);
1368         }
1369
1370         return bch2_have_enough_devs(c, bch2_online_devs(c), flags, true);
1371 }
1372
1373 static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
1374 {
1375         /*
1376          * The allocator thread itself allocates btree nodes, so stop it first:
1377          */
1378         bch2_dev_allocator_remove(c, ca);
1379         bch2_dev_journal_stop(&c->journal, ca);
1380 }
1381
1382 static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
1383 {
1384         lockdep_assert_held(&c->state_lock);
1385
1386         BUG_ON(ca->mi.state != BCH_MEMBER_STATE_rw);
1387
1388         bch2_dev_allocator_add(c, ca);
1389         bch2_recalc_capacity(c);
1390 }
1391
1392 int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1393                          enum bch_member_state new_state, int flags)
1394 {
1395         struct bch_member *m;
1396         int ret = 0;
1397
1398         if (ca->mi.state == new_state)
1399                 return 0;
1400
1401         if (!bch2_dev_state_allowed(c, ca, new_state, flags))
1402                 return -BCH_ERR_device_state_not_allowed;
1403
1404         if (new_state != BCH_MEMBER_STATE_rw)
1405                 __bch2_dev_read_only(c, ca);
1406
1407         bch_notice(ca, "%s", bch2_member_states[new_state]);
1408
1409         mutex_lock(&c->sb_lock);
1410         m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
1411         SET_BCH_MEMBER_STATE(m, new_state);
1412         bch2_write_super(c);
1413         mutex_unlock(&c->sb_lock);
1414
1415         if (new_state == BCH_MEMBER_STATE_rw)
1416                 __bch2_dev_read_write(c, ca);
1417
1418         rebalance_wakeup(c);
1419
1420         return ret;
1421 }
1422
1423 int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1424                        enum bch_member_state new_state, int flags)
1425 {
1426         int ret;
1427
1428         down_write(&c->state_lock);
1429         ret = __bch2_dev_set_state(c, ca, new_state, flags);
1430         up_write(&c->state_lock);
1431
1432         return ret;
1433 }
1434
1435 /* Device add/removal: */
1436
1437 static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
1438 {
1439         struct bpos start       = POS(ca->dev_idx, 0);
1440         struct bpos end         = POS(ca->dev_idx, U64_MAX);
1441         int ret;
1442
1443         /*
1444          * We clear the LRU and need_discard btrees first so that we don't race
1445          * with bch2_do_invalidates() and bch2_do_discards()
1446          */
1447         ret =   bch2_btree_delete_range(c, BTREE_ID_lru, start, end,
1448                                         BTREE_TRIGGER_NORUN, NULL) ?:
1449                 bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end,
1450                                         BTREE_TRIGGER_NORUN, NULL) ?:
1451                 bch2_btree_delete_range(c, BTREE_ID_freespace, start, end,
1452                                         BTREE_TRIGGER_NORUN, NULL) ?:
1453                 bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
1454                                         BTREE_TRIGGER_NORUN, NULL) ?:
1455                 bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
1456                                         BTREE_TRIGGER_NORUN, NULL) ?:
1457                 bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
1458                                         BTREE_TRIGGER_NORUN, NULL);
1459         if (ret)
1460                 bch_err_msg(c, ret, "removing dev alloc info");
1461
1462         return ret;
1463 }
1464
1465 int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
1466 {
1467         struct bch_member *m;
1468         unsigned dev_idx = ca->dev_idx, data;
1469         int ret;
1470
1471         down_write(&c->state_lock);
1472
1473         /*
1474          * We consume a reference to ca->ref, regardless of whether we succeed
1475          * or fail:
1476          */
1477         percpu_ref_put(&ca->ref);
1478
1479         if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
1480                 bch_err(ca, "Cannot remove without losing data");
1481                 ret = -BCH_ERR_device_state_not_allowed;
1482                 goto err;
1483         }
1484
1485         __bch2_dev_read_only(c, ca);
1486
1487         ret = bch2_dev_data_drop(c, ca->dev_idx, flags);
1488         if (ret) {
1489                 bch_err_msg(ca, ret, "dropping data");
1490                 goto err;
1491         }
1492
1493         ret = bch2_dev_remove_alloc(c, ca);
1494         if (ret) {
1495                 bch_err_msg(ca, ret, "deleting alloc info");
1496                 goto err;
1497         }
1498
1499         ret = bch2_journal_flush_device_pins(&c->journal, ca->dev_idx);
1500         if (ret) {
1501                 bch_err_msg(ca, ret, "flushing journal");
1502                 goto err;
1503         }
1504
1505         ret = bch2_journal_flush(&c->journal);
1506         if (ret) {
1507                 bch_err(ca, "journal error");
1508                 goto err;
1509         }
1510
1511         ret = bch2_replicas_gc2(c);
1512         if (ret) {
1513                 bch_err_msg(ca, ret, "in replicas_gc2()");
1514                 goto err;
1515         }
1516
1517         data = bch2_dev_has_data(c, ca);
1518         if (data) {
1519                 struct printbuf data_has = PRINTBUF;
1520
1521                 prt_bitflags(&data_has, bch2_data_types, data);
1522                 bch_err(ca, "Remove failed, still has data (%s)", data_has.buf);
1523                 printbuf_exit(&data_has);
1524                 ret = -EBUSY;
1525                 goto err;
1526         }
1527
1528         __bch2_dev_offline(c, ca);
1529
1530         mutex_lock(&c->sb_lock);
1531         rcu_assign_pointer(c->devs[ca->dev_idx], NULL);
1532         mutex_unlock(&c->sb_lock);
1533
1534         percpu_ref_kill(&ca->ref);
1535         wait_for_completion(&ca->ref_completion);
1536
1537         bch2_dev_free(ca);
1538
1539         /*
1540          * At this point the device object has been removed in-core, but the
1541          * on-disk journal might still refer to the device index via sb device
1542          * usage entries. Recovery fails if it sees usage information for an
1543          * invalid device. Flush journal pins to push the back of the journal
1544          * past now invalid device index references before we update the
1545          * superblock, but after the device object has been removed so any
1546          * further journal writes elide usage info for the device.
1547          */
1548         bch2_journal_flush_all_pins(&c->journal);
1549
1550         /*
1551          * Free this device's slot in the bch_member array - all pointers to
1552          * this device must be gone:
1553          */
1554         mutex_lock(&c->sb_lock);
1555         m = bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx);
1556         memset(&m->uuid, 0, sizeof(m->uuid));
1557
1558         bch2_write_super(c);
1559
1560         mutex_unlock(&c->sb_lock);
1561         up_write(&c->state_lock);
1562
1563         bch2_dev_usage_journal_reserve(c);
1564         return 0;
1565 err:
1566         if (ca->mi.state == BCH_MEMBER_STATE_rw &&
1567             !percpu_ref_is_zero(&ca->io_ref))
1568                 __bch2_dev_read_write(c, ca);
1569         up_write(&c->state_lock);
1570         return ret;
1571 }
1572
1573 /* Add new device to running filesystem: */
1574 int bch2_dev_add(struct bch_fs *c, const char *path)
1575 {
1576         struct bch_opts opts = bch2_opts_empty();
1577         struct bch_sb_handle sb;
1578         struct bch_dev *ca = NULL;
1579         struct bch_sb_field_members_v2 *mi;
1580         struct bch_member dev_mi;
1581         unsigned dev_idx, nr_devices, u64s;
1582         struct printbuf errbuf = PRINTBUF;
1583         struct printbuf label = PRINTBUF;
1584         int ret;
1585
1586         ret = bch2_read_super(path, &opts, &sb);
1587         if (ret) {
1588                 bch_err_msg(c, ret, "reading super");
1589                 goto err;
1590         }
1591
1592         dev_mi = bch2_sb_member_get(sb.sb, sb.sb->dev_idx);
1593
1594         if (BCH_MEMBER_GROUP(&dev_mi)) {
1595                 bch2_disk_path_to_text_sb(&label, sb.sb, BCH_MEMBER_GROUP(&dev_mi) - 1);
1596                 if (label.allocation_failure) {
1597                         ret = -ENOMEM;
1598                         goto err;
1599                 }
1600         }
1601
1602         ret = bch2_dev_may_add(sb.sb, c);
1603         if (ret) {
1604                 bch_err_fn(c, ret);
1605                 goto err;
1606         }
1607
1608         ca = __bch2_dev_alloc(c, &dev_mi);
1609         if (!ca) {
1610                 ret = -ENOMEM;
1611                 goto err;
1612         }
1613
1614         bch2_dev_usage_init(ca);
1615
1616         ret = __bch2_dev_attach_bdev(ca, &sb);
1617         if (ret)
1618                 goto err;
1619
1620         ret = bch2_dev_journal_alloc(ca);
1621         if (ret) {
1622                 bch_err_msg(c, ret, "allocating journal");
1623                 goto err;
1624         }
1625
1626         down_write(&c->state_lock);
1627         mutex_lock(&c->sb_lock);
1628
1629         ret = bch2_sb_from_fs(c, ca);
1630         if (ret) {
1631                 bch_err_msg(c, ret, "setting up new superblock");
1632                 goto err_unlock;
1633         }
1634
1635         if (dynamic_fault("bcachefs:add:no_slot"))
1636                 goto no_slot;
1637
1638         for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++)
1639                 if (!bch2_dev_exists(c->disk_sb.sb, dev_idx))
1640                         goto have_slot;
1641 no_slot:
1642         ret = -BCH_ERR_ENOSPC_sb_members;
1643         bch_err_msg(c, ret, "setting up new superblock");
1644         goto err_unlock;
1645
1646 have_slot:
1647         nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices);
1648
1649         mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
1650         u64s = DIV_ROUND_UP(sizeof(struct bch_sb_field_members_v2) +
1651                             le16_to_cpu(mi->member_bytes) * nr_devices, sizeof(u64));
1652
1653         mi = bch2_sb_field_resize(&c->disk_sb, members_v2, u64s);
1654         if (!mi) {
1655                 ret = -BCH_ERR_ENOSPC_sb_members;
1656                 bch_err_msg(c, ret, "setting up new superblock");
1657                 goto err_unlock;
1658         }
1659         struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx);
1660
1661         /* success: */
1662
1663         *m = dev_mi;
1664         m->last_mount = cpu_to_le64(ktime_get_real_seconds());
1665         c->disk_sb.sb->nr_devices       = nr_devices;
1666
1667         ca->disk_sb.sb->dev_idx = dev_idx;
1668         bch2_dev_attach(c, ca, dev_idx);
1669
1670         if (BCH_MEMBER_GROUP(&dev_mi)) {
1671                 ret = __bch2_dev_group_set(c, ca, label.buf);
1672                 if (ret) {
1673                         bch_err_msg(c, ret, "creating new label");
1674                         goto err_unlock;
1675                 }
1676         }
1677
1678         bch2_write_super(c);
1679         mutex_unlock(&c->sb_lock);
1680
1681         bch2_dev_usage_journal_reserve(c);
1682
1683         ret = bch2_trans_mark_dev_sb(c, ca);
1684         if (ret) {
1685                 bch_err_msg(ca, ret, "marking new superblock");
1686                 goto err_late;
1687         }
1688
1689         ret = bch2_fs_freespace_init(c);
1690         if (ret) {
1691                 bch_err_msg(ca, ret, "initializing free space");
1692                 goto err_late;
1693         }
1694
1695         ca->new_fs_bucket_idx = 0;
1696
1697         if (ca->mi.state == BCH_MEMBER_STATE_rw)
1698                 __bch2_dev_read_write(c, ca);
1699
1700         up_write(&c->state_lock);
1701         return 0;
1702
1703 err_unlock:
1704         mutex_unlock(&c->sb_lock);
1705         up_write(&c->state_lock);
1706 err:
1707         if (ca)
1708                 bch2_dev_free(ca);
1709         bch2_free_super(&sb);
1710         printbuf_exit(&label);
1711         printbuf_exit(&errbuf);
1712         return ret;
1713 err_late:
1714         up_write(&c->state_lock);
1715         ca = NULL;
1716         goto err;
1717 }
1718
1719 /* Hot add existing device to running filesystem: */
1720 int bch2_dev_online(struct bch_fs *c, const char *path)
1721 {
1722         struct bch_opts opts = bch2_opts_empty();
1723         struct bch_sb_handle sb = { NULL };
1724         struct bch_dev *ca;
1725         unsigned dev_idx;
1726         int ret;
1727
1728         down_write(&c->state_lock);
1729
1730         ret = bch2_read_super(path, &opts, &sb);
1731         if (ret) {
1732                 up_write(&c->state_lock);
1733                 return ret;
1734         }
1735
1736         dev_idx = sb.sb->dev_idx;
1737
1738         ret = bch2_dev_in_fs(c->disk_sb.sb, sb.sb);
1739         if (ret) {
1740                 bch_err_msg(c, ret, "bringing %s online", path);
1741                 goto err;
1742         }
1743
1744         ret = bch2_dev_attach_bdev(c, &sb);
1745         if (ret)
1746                 goto err;
1747
1748         ca = bch_dev_locked(c, dev_idx);
1749
1750         ret = bch2_trans_mark_dev_sb(c, ca);
1751         if (ret) {
1752                 bch_err_msg(c, ret, "bringing %s online: error from bch2_trans_mark_dev_sb", path);
1753                 goto err;
1754         }
1755
1756         if (ca->mi.state == BCH_MEMBER_STATE_rw)
1757                 __bch2_dev_read_write(c, ca);
1758
1759         if (!ca->mi.freespace_initialized) {
1760                 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
1761                 bch_err_msg(ca, ret, "initializing free space");
1762                 if (ret)
1763                         goto err;
1764         }
1765
1766         if (!ca->journal.nr) {
1767                 ret = bch2_dev_journal_alloc(ca);
1768                 bch_err_msg(ca, ret, "allocating journal");
1769                 if (ret)
1770                         goto err;
1771         }
1772
1773         mutex_lock(&c->sb_lock);
1774         bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount =
1775                 cpu_to_le64(ktime_get_real_seconds());
1776         bch2_write_super(c);
1777         mutex_unlock(&c->sb_lock);
1778
1779         up_write(&c->state_lock);
1780         return 0;
1781 err:
1782         up_write(&c->state_lock);
1783         bch2_free_super(&sb);
1784         return ret;
1785 }
1786
1787 int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
1788 {
1789         down_write(&c->state_lock);
1790
1791         if (!bch2_dev_is_online(ca)) {
1792                 bch_err(ca, "Already offline");
1793                 up_write(&c->state_lock);
1794                 return 0;
1795         }
1796
1797         if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
1798                 bch_err(ca, "Cannot offline required disk");
1799                 up_write(&c->state_lock);
1800                 return -BCH_ERR_device_state_not_allowed;
1801         }
1802
1803         __bch2_dev_offline(c, ca);
1804
1805         up_write(&c->state_lock);
1806         return 0;
1807 }
1808
1809 int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1810 {
1811         struct bch_member *m;
1812         u64 old_nbuckets;
1813         int ret = 0;
1814
1815         down_write(&c->state_lock);
1816         old_nbuckets = ca->mi.nbuckets;
1817
1818         if (nbuckets < ca->mi.nbuckets) {
1819                 bch_err(ca, "Cannot shrink yet");
1820                 ret = -EINVAL;
1821                 goto err;
1822         }
1823
1824         if (bch2_dev_is_online(ca) &&
1825             get_capacity(ca->disk_sb.bdev->bd_disk) <
1826             ca->mi.bucket_size * nbuckets) {
1827                 bch_err(ca, "New size larger than device");
1828                 ret = -BCH_ERR_device_size_too_small;
1829                 goto err;
1830         }
1831
1832         ret = bch2_dev_buckets_resize(c, ca, nbuckets);
1833         if (ret) {
1834                 bch_err_msg(ca, ret, "resizing buckets");
1835                 goto err;
1836         }
1837
1838         ret = bch2_trans_mark_dev_sb(c, ca);
1839         if (ret)
1840                 goto err;
1841
1842         mutex_lock(&c->sb_lock);
1843         m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
1844         m->nbuckets = cpu_to_le64(nbuckets);
1845
1846         bch2_write_super(c);
1847         mutex_unlock(&c->sb_lock);
1848
1849         if (ca->mi.freespace_initialized) {
1850                 ret = bch2_dev_freespace_init(c, ca, old_nbuckets, nbuckets);
1851                 if (ret)
1852                         goto err;
1853
1854                 /*
1855                  * XXX: this is all wrong transactionally - we'll be able to do
1856                  * this correctly after the disk space accounting rewrite
1857                  */
1858                 ca->usage_base->d[BCH_DATA_free].buckets += nbuckets - old_nbuckets;
1859         }
1860
1861         bch2_recalc_capacity(c);
1862 err:
1863         up_write(&c->state_lock);
1864         return ret;
1865 }
1866
1867 /* return with ref on ca->ref: */
1868 struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name)
1869 {
1870         struct bch_dev *ca;
1871         unsigned i;
1872
1873         rcu_read_lock();
1874         for_each_member_device_rcu(ca, c, i, NULL)
1875                 if (!strcmp(name, ca->name))
1876                         goto found;
1877         ca = ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
1878 found:
1879         rcu_read_unlock();
1880
1881         return ca;
1882 }
1883
1884 /* Filesystem open: */
1885
1886 struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
1887                             struct bch_opts opts)
1888 {
1889         DARRAY(struct bch_sb_handle) sbs = { 0 };
1890         struct bch_fs *c = NULL;
1891         struct bch_sb_handle *sb, *best = NULL;
1892         struct printbuf errbuf = PRINTBUF;
1893         int ret = 0;
1894
1895         if (!try_module_get(THIS_MODULE))
1896                 return ERR_PTR(-ENODEV);
1897
1898         if (!nr_devices) {
1899                 ret = -EINVAL;
1900                 goto err;
1901         }
1902
1903         ret = darray_make_room(&sbs, nr_devices);
1904         if (ret)
1905                 goto err;
1906
1907         for (unsigned i = 0; i < nr_devices; i++) {
1908                 struct bch_sb_handle sb = { NULL };
1909
1910                 ret = bch2_read_super(devices[i], &opts, &sb);
1911                 if (ret)
1912                         goto err;
1913
1914                 BUG_ON(darray_push(&sbs, sb));
1915         }
1916
1917         darray_for_each(sbs, sb)
1918                 if (!best || le64_to_cpu(sb->sb->seq) > le64_to_cpu(best->sb->seq))
1919                         best = sb;
1920
1921         darray_for_each_reverse(sbs, sb) {
1922                 if (sb != best && !bch2_dev_exists(best->sb, sb->sb->dev_idx)) {
1923                         pr_info("%pg has been removed, skipping", sb->bdev);
1924                         bch2_free_super(sb);
1925                         darray_remove_item(&sbs, sb);
1926                         best -= best > sb;
1927                         continue;
1928                 }
1929
1930                 ret = bch2_dev_in_fs(best->sb, sb->sb);
1931                 if (ret)
1932                         goto err_print;
1933         }
1934
1935         c = bch2_fs_alloc(best->sb, opts);
1936         ret = PTR_ERR_OR_ZERO(c);
1937         if (ret)
1938                 goto err;
1939
1940         down_write(&c->state_lock);
1941         darray_for_each(sbs, sb) {
1942                 ret = bch2_dev_attach_bdev(c, sb);
1943                 if (ret) {
1944                         up_write(&c->state_lock);
1945                         goto err;
1946                 }
1947         }
1948         up_write(&c->state_lock);
1949
1950         if (!bch2_fs_may_start(c)) {
1951                 ret = -BCH_ERR_insufficient_devices_to_start;
1952                 goto err_print;
1953         }
1954
1955         if (!c->opts.nostart) {
1956                 ret = bch2_fs_start(c);
1957                 if (ret)
1958                         goto err;
1959         }
1960 out:
1961         darray_for_each(sbs, sb)
1962                 bch2_free_super(sb);
1963         darray_exit(&sbs);
1964         printbuf_exit(&errbuf);
1965         module_put(THIS_MODULE);
1966         return c;
1967 err_print:
1968         pr_err("bch_fs_open err opening %s: %s",
1969                devices[0], bch2_err_str(ret));
1970 err:
1971         if (!IS_ERR_OR_NULL(c))
1972                 bch2_fs_stop(c);
1973         c = ERR_PTR(ret);
1974         goto out;
1975 }
1976
1977 /* Global interfaces/init */
1978
1979 static void bcachefs_exit(void)
1980 {
1981         bch2_debug_exit();
1982         bch2_vfs_exit();
1983         bch2_chardev_exit();
1984         bch2_btree_key_cache_exit();
1985         if (bcachefs_kset)
1986                 kset_unregister(bcachefs_kset);
1987 }
1988
1989 static int __init bcachefs_init(void)
1990 {
1991         bch2_bkey_pack_test();
1992
1993         if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) ||
1994             bch2_btree_key_cache_init() ||
1995             bch2_chardev_init() ||
1996             bch2_vfs_init() ||
1997             bch2_debug_init())
1998                 goto err;
1999
2000         return 0;
2001 err:
2002         bcachefs_exit();
2003         return -ENOMEM;
2004 }
2005
2006 #define BCH_DEBUG_PARAM(name, description)                      \
2007         bool bch2_##name;                                       \
2008         module_param_named(name, bch2_##name, bool, 0644);      \
2009         MODULE_PARM_DESC(name, description);
2010 BCH_DEBUG_PARAMS()
2011 #undef BCH_DEBUG_PARAM
2012
2013 __maybe_unused
2014 static unsigned bch2_metadata_version = bcachefs_metadata_version_current;
2015 module_param_named(version, bch2_metadata_version, uint, 0400);
2016
2017 module_exit(bcachefs_exit);
2018 module_init(bcachefs_init);