]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/super.c
Update bcachefs sources to e14d7c7195 bcachefs: Compression levels
[bcachefs-tools-debian] / libbcachefs / super.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs setup/teardown code, and some metadata io - read a superblock and
4  * figure out what to do with it.
5  *
6  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7  * Copyright 2012 Google, Inc.
8  */
9
10 #include "bcachefs.h"
11 #include "alloc_background.h"
12 #include "alloc_foreground.h"
13 #include "bkey_sort.h"
14 #include "btree_cache.h"
15 #include "btree_gc.h"
16 #include "btree_key_cache.h"
17 #include "btree_update_interior.h"
18 #include "btree_io.h"
19 #include "btree_write_buffer.h"
20 #include "buckets_waiting_for_journal.h"
21 #include "chardev.h"
22 #include "checksum.h"
23 #include "clock.h"
24 #include "compress.h"
25 #include "counters.h"
26 #include "debug.h"
27 #include "disk_groups.h"
28 #include "ec.h"
29 #include "errcode.h"
30 #include "error.h"
31 #include "fs.h"
32 #include "fs-io.h"
33 #include "fsck.h"
34 #include "inode.h"
35 #include "io.h"
36 #include "journal.h"
37 #include "journal_reclaim.h"
38 #include "journal_seq_blacklist.h"
39 #include "move.h"
40 #include "migrate.h"
41 #include "movinggc.h"
42 #include "nocow_locking.h"
43 #include "quota.h"
44 #include "rebalance.h"
45 #include "recovery.h"
46 #include "replicas.h"
47 #include "subvolume.h"
48 #include "super.h"
49 #include "super-io.h"
50 #include "sysfs.h"
51 #include "trace.h"
52
53 #include <linux/backing-dev.h>
54 #include <linux/blkdev.h>
55 #include <linux/debugfs.h>
56 #include <linux/device.h>
57 #include <linux/idr.h>
58 #include <linux/module.h>
59 #include <linux/percpu.h>
60 #include <linux/random.h>
61 #include <linux/sysfs.h>
62 #include <crypto/hash.h>
63
64 MODULE_LICENSE("GPL");
65 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
66
67 #define KTYPE(type)                                                     \
68 static const struct attribute_group type ## _group = {                  \
69         .attrs = type ## _files                                         \
70 };                                                                      \
71                                                                         \
72 static const struct attribute_group *type ## _groups[] = {              \
73         &type ## _group,                                                \
74         NULL                                                            \
75 };                                                                      \
76                                                                         \
77 static const struct kobj_type type ## _ktype = {                        \
78         .release        = type ## _release,                             \
79         .sysfs_ops      = &type ## _sysfs_ops,                          \
80         .default_groups = type ## _groups                               \
81 }
82
83 static void bch2_fs_release(struct kobject *);
84 static void bch2_dev_release(struct kobject *);
85 static void bch2_fs_counters_release(struct kobject *k)
86 {
87 }
88
89 static void bch2_fs_internal_release(struct kobject *k)
90 {
91 }
92
93 static void bch2_fs_opts_dir_release(struct kobject *k)
94 {
95 }
96
97 static void bch2_fs_time_stats_release(struct kobject *k)
98 {
99 }
100
101 KTYPE(bch2_fs);
102 KTYPE(bch2_fs_counters);
103 KTYPE(bch2_fs_internal);
104 KTYPE(bch2_fs_opts_dir);
105 KTYPE(bch2_fs_time_stats);
106 KTYPE(bch2_dev);
107
108 static struct kset *bcachefs_kset;
109 static LIST_HEAD(bch_fs_list);
110 static DEFINE_MUTEX(bch_fs_list_lock);
111
112 DECLARE_WAIT_QUEUE_HEAD(bch2_read_only_wait);
113
114 static void bch2_dev_free(struct bch_dev *);
115 static int bch2_dev_alloc(struct bch_fs *, unsigned);
116 static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *);
117 static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
118
119 struct bch_fs *bch2_dev_to_fs(dev_t dev)
120 {
121         struct bch_fs *c;
122         struct bch_dev *ca;
123         unsigned i;
124
125         mutex_lock(&bch_fs_list_lock);
126         rcu_read_lock();
127
128         list_for_each_entry(c, &bch_fs_list, list)
129                 for_each_member_device_rcu(ca, c, i, NULL)
130                         if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) {
131                                 closure_get(&c->cl);
132                                 goto found;
133                         }
134         c = NULL;
135 found:
136         rcu_read_unlock();
137         mutex_unlock(&bch_fs_list_lock);
138
139         return c;
140 }
141
142 static struct bch_fs *__bch2_uuid_to_fs(__uuid_t uuid)
143 {
144         struct bch_fs *c;
145
146         lockdep_assert_held(&bch_fs_list_lock);
147
148         list_for_each_entry(c, &bch_fs_list, list)
149                 if (!memcmp(&c->disk_sb.sb->uuid, &uuid, sizeof(uuid)))
150                         return c;
151
152         return NULL;
153 }
154
155 struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid)
156 {
157         struct bch_fs *c;
158
159         mutex_lock(&bch_fs_list_lock);
160         c = __bch2_uuid_to_fs(uuid);
161         if (c)
162                 closure_get(&c->cl);
163         mutex_unlock(&bch_fs_list_lock);
164
165         return c;
166 }
167
168 static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
169 {
170         struct bch_dev *ca;
171         unsigned i, nr = 0, u64s =
172                 ((sizeof(struct jset_entry_dev_usage) +
173                   sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR)) /
174                 sizeof(u64);
175
176         rcu_read_lock();
177         for_each_member_device_rcu(ca, c, i, NULL)
178                 nr++;
179         rcu_read_unlock();
180
181         bch2_journal_entry_res_resize(&c->journal,
182                         &c->dev_usage_journal_res, u64s * nr);
183 }
184
185 /* Filesystem RO/RW: */
186
187 /*
188  * For startup/shutdown of RW stuff, the dependencies are:
189  *
190  * - foreground writes depend on copygc and rebalance (to free up space)
191  *
192  * - copygc and rebalance depend on mark and sweep gc (they actually probably
193  *   don't because they either reserve ahead of time or don't block if
194  *   allocations fail, but allocations can require mark and sweep gc to run
195  *   because of generation number wraparound)
196  *
197  * - all of the above depends on the allocator threads
198  *
199  * - allocator depends on the journal (when it rewrites prios and gens)
200  */
201
202 static void __bch2_fs_read_only(struct bch_fs *c)
203 {
204         struct bch_dev *ca;
205         unsigned i, clean_passes = 0;
206         u64 seq = 0;
207
208         bch2_fs_ec_stop(c);
209         bch2_open_buckets_stop(c, NULL, true);
210         bch2_rebalance_stop(c);
211         bch2_copygc_stop(c);
212         bch2_gc_thread_stop(c);
213         bch2_fs_ec_flush(c);
214
215         bch_verbose(c, "flushing journal and stopping allocators, journal seq %llu",
216                     journal_cur_seq(&c->journal));
217
218         do {
219                 clean_passes++;
220
221                 if (bch2_btree_interior_updates_flush(c) ||
222                     bch2_journal_flush_all_pins(&c->journal) ||
223                     bch2_btree_flush_all_writes(c) ||
224                     seq != atomic64_read(&c->journal.seq)) {
225                         seq = atomic64_read(&c->journal.seq);
226                         clean_passes = 0;
227                 }
228         } while (clean_passes < 2);
229
230         bch_verbose(c, "flushing journal and stopping allocators complete, journal seq %llu",
231                     journal_cur_seq(&c->journal));
232
233         if (test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) &&
234             !test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
235                 set_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags);
236         bch2_fs_journal_stop(&c->journal);
237
238         /*
239          * After stopping journal:
240          */
241         for_each_member_device(ca, c, i)
242                 bch2_dev_allocator_remove(c, ca);
243 }
244
245 #ifndef BCH_WRITE_REF_DEBUG
246 static void bch2_writes_disabled(struct percpu_ref *writes)
247 {
248         struct bch_fs *c = container_of(writes, struct bch_fs, writes);
249
250         set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
251         wake_up(&bch2_read_only_wait);
252 }
253 #endif
254
255 void bch2_fs_read_only(struct bch_fs *c)
256 {
257         if (!test_bit(BCH_FS_RW, &c->flags)) {
258                 bch2_journal_reclaim_stop(&c->journal);
259                 return;
260         }
261
262         BUG_ON(test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
263
264         /*
265          * Block new foreground-end write operations from starting - any new
266          * writes will return -EROFS:
267          */
268         set_bit(BCH_FS_GOING_RO, &c->flags);
269 #ifndef BCH_WRITE_REF_DEBUG
270         percpu_ref_kill(&c->writes);
271 #else
272         for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++)
273                 bch2_write_ref_put(c, i);
274 #endif
275
276         /*
277          * If we're not doing an emergency shutdown, we want to wait on
278          * outstanding writes to complete so they don't see spurious errors due
279          * to shutting down the allocator:
280          *
281          * If we are doing an emergency shutdown outstanding writes may
282          * hang until we shutdown the allocator so we don't want to wait
283          * on outstanding writes before shutting everything down - but
284          * we do need to wait on them before returning and signalling
285          * that going RO is complete:
286          */
287         wait_event(bch2_read_only_wait,
288                    test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags) ||
289                    test_bit(BCH_FS_EMERGENCY_RO, &c->flags));
290
291         __bch2_fs_read_only(c);
292
293         wait_event(bch2_read_only_wait,
294                    test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
295
296         clear_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
297         clear_bit(BCH_FS_GOING_RO, &c->flags);
298
299         if (!bch2_journal_error(&c->journal) &&
300             !test_bit(BCH_FS_ERROR, &c->flags) &&
301             !test_bit(BCH_FS_EMERGENCY_RO, &c->flags) &&
302             test_bit(BCH_FS_STARTED, &c->flags) &&
303             test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags) &&
304             !c->opts.norecovery) {
305                 BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
306                 BUG_ON(atomic_read(&c->btree_cache.dirty));
307                 BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty));
308                 BUG_ON(c->btree_write_buffer.state.nr);
309
310                 bch_verbose(c, "marking filesystem clean");
311                 bch2_fs_mark_clean(c);
312         }
313
314         clear_bit(BCH_FS_RW, &c->flags);
315 }
316
317 static void bch2_fs_read_only_work(struct work_struct *work)
318 {
319         struct bch_fs *c =
320                 container_of(work, struct bch_fs, read_only_work);
321
322         down_write(&c->state_lock);
323         bch2_fs_read_only(c);
324         up_write(&c->state_lock);
325 }
326
327 static void bch2_fs_read_only_async(struct bch_fs *c)
328 {
329         queue_work(system_long_wq, &c->read_only_work);
330 }
331
332 bool bch2_fs_emergency_read_only(struct bch_fs *c)
333 {
334         bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags);
335
336         bch2_journal_halt(&c->journal);
337         bch2_fs_read_only_async(c);
338
339         wake_up(&bch2_read_only_wait);
340         return ret;
341 }
342
343 static int bch2_fs_read_write_late(struct bch_fs *c)
344 {
345         int ret;
346
347         /*
348          * Data move operations can't run until after check_snapshots has
349          * completed, and bch2_snapshot_is_ancestor() is available.
350          *
351          * Ideally we'd start copygc/rebalance earlier instead of waiting for
352          * all of recovery/fsck to complete:
353          */
354         ret = bch2_copygc_start(c);
355         if (ret) {
356                 bch_err(c, "error starting copygc thread");
357                 return ret;
358         }
359
360         ret = bch2_rebalance_start(c);
361         if (ret) {
362                 bch_err(c, "error starting rebalance thread");
363                 return ret;
364         }
365
366         return 0;
367 }
368
369 static int __bch2_fs_read_write(struct bch_fs *c, bool early)
370 {
371         struct bch_dev *ca;
372         unsigned i;
373         int ret;
374
375         if (test_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags)) {
376                 bch_err(c, "cannot go rw, unfixed btree errors");
377                 return -BCH_ERR_erofs_unfixed_errors;
378         }
379
380         if (test_bit(BCH_FS_RW, &c->flags))
381                 return 0;
382
383         if (c->opts.norecovery)
384                 return -BCH_ERR_erofs_norecovery;
385
386         /*
387          * nochanges is used for fsck -n mode - we have to allow going rw
388          * during recovery for that to work:
389          */
390         if (c->opts.nochanges && (!early || c->opts.read_only))
391                 return -BCH_ERR_erofs_nochanges;
392
393         bch_info(c, "going read-write");
394
395         ret = bch2_fs_mark_dirty(c);
396         if (ret)
397                 goto err;
398
399         clear_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags);
400
401         /*
402          * First journal write must be a flush write: after a clean shutdown we
403          * don't read the journal, so the first journal write may end up
404          * overwriting whatever was there previously, and there must always be
405          * at least one non-flush write in the journal or recovery will fail:
406          */
407         set_bit(JOURNAL_NEED_FLUSH_WRITE, &c->journal.flags);
408
409         for_each_rw_member(ca, c, i)
410                 bch2_dev_allocator_add(c, ca);
411         bch2_recalc_capacity(c);
412
413         ret = bch2_gc_thread_start(c);
414         if (ret) {
415                 bch_err(c, "error starting gc thread");
416                 return ret;
417         }
418
419         if (!early) {
420                 ret = bch2_fs_read_write_late(c);
421                 if (ret)
422                         goto err;
423         }
424
425 #ifndef BCH_WRITE_REF_DEBUG
426         percpu_ref_reinit(&c->writes);
427 #else
428         for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++) {
429                 BUG_ON(atomic_long_read(&c->writes[i]));
430                 atomic_long_inc(&c->writes[i]);
431         }
432 #endif
433         set_bit(BCH_FS_RW, &c->flags);
434         set_bit(BCH_FS_WAS_RW, &c->flags);
435
436         bch2_do_discards(c);
437         bch2_do_invalidates(c);
438         bch2_do_stripe_deletes(c);
439         bch2_do_pending_node_rewrites(c);
440         return 0;
441 err:
442         __bch2_fs_read_only(c);
443         return ret;
444 }
445
446 int bch2_fs_read_write(struct bch_fs *c)
447 {
448         return __bch2_fs_read_write(c, false);
449 }
450
451 int bch2_fs_read_write_early(struct bch_fs *c)
452 {
453         lockdep_assert_held(&c->state_lock);
454
455         return __bch2_fs_read_write(c, true);
456 }
457
458 /* Filesystem startup/shutdown: */
459
460 static void __bch2_fs_free(struct bch_fs *c)
461 {
462         unsigned i;
463         int cpu;
464
465         for (i = 0; i < BCH_TIME_STAT_NR; i++)
466                 bch2_time_stats_exit(&c->times[i]);
467
468         bch2_free_pending_node_rewrites(c);
469         bch2_fs_counters_exit(c);
470         bch2_fs_snapshots_exit(c);
471         bch2_fs_quota_exit(c);
472         bch2_fs_fsio_exit(c);
473         bch2_fs_ec_exit(c);
474         bch2_fs_encryption_exit(c);
475         bch2_fs_io_exit(c);
476         bch2_fs_buckets_waiting_for_journal_exit(c);
477         bch2_fs_btree_interior_update_exit(c);
478         bch2_fs_btree_iter_exit(c);
479         bch2_fs_btree_key_cache_exit(&c->btree_key_cache);
480         bch2_fs_btree_cache_exit(c);
481         bch2_fs_replicas_exit(c);
482         bch2_fs_journal_exit(&c->journal);
483         bch2_io_clock_exit(&c->io_clock[WRITE]);
484         bch2_io_clock_exit(&c->io_clock[READ]);
485         bch2_fs_compress_exit(c);
486         bch2_journal_keys_free(&c->journal_keys);
487         bch2_journal_entries_free(c);
488         bch2_fs_btree_write_buffer_exit(c);
489         percpu_free_rwsem(&c->mark_lock);
490         free_percpu(c->online_reserved);
491
492         if (c->btree_paths_bufs)
493                 for_each_possible_cpu(cpu)
494                         kfree(per_cpu_ptr(c->btree_paths_bufs, cpu)->path);
495
496         darray_exit(&c->btree_roots_extra);
497         free_percpu(c->btree_paths_bufs);
498         free_percpu(c->pcpu);
499         mempool_exit(&c->large_bkey_pool);
500         mempool_exit(&c->btree_bounce_pool);
501         bioset_exit(&c->btree_bio);
502         mempool_exit(&c->fill_iter);
503 #ifndef BCH_WRITE_REF_DEBUG
504         percpu_ref_exit(&c->writes);
505 #endif
506         kfree(rcu_dereference_protected(c->disk_groups, 1));
507         kfree(c->journal_seq_blacklist_table);
508         kfree(c->unused_inode_hints);
509
510         if (c->write_ref_wq)
511                 destroy_workqueue(c->write_ref_wq);
512         if (c->io_complete_wq)
513                 destroy_workqueue(c->io_complete_wq);
514         if (c->copygc_wq)
515                 destroy_workqueue(c->copygc_wq);
516         if (c->btree_io_complete_wq)
517                 destroy_workqueue(c->btree_io_complete_wq);
518         if (c->btree_update_wq)
519                 destroy_workqueue(c->btree_update_wq);
520
521         bch2_free_super(&c->disk_sb);
522         kvpfree(c, sizeof(*c));
523         module_put(THIS_MODULE);
524 }
525
526 static void bch2_fs_release(struct kobject *kobj)
527 {
528         struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
529
530         __bch2_fs_free(c);
531 }
532
533 void __bch2_fs_stop(struct bch_fs *c)
534 {
535         struct bch_dev *ca;
536         unsigned i;
537
538         bch_verbose(c, "shutting down");
539
540         set_bit(BCH_FS_STOPPING, &c->flags);
541
542         cancel_work_sync(&c->journal_seq_blacklist_gc_work);
543
544         down_write(&c->state_lock);
545         bch2_fs_read_only(c);
546         up_write(&c->state_lock);
547
548         for_each_member_device(ca, c, i)
549                 if (ca->kobj.state_in_sysfs &&
550                     ca->disk_sb.bdev)
551                         sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
552
553         if (c->kobj.state_in_sysfs)
554                 kobject_del(&c->kobj);
555
556         bch2_fs_debug_exit(c);
557         bch2_fs_chardev_exit(c);
558
559         kobject_put(&c->counters_kobj);
560         kobject_put(&c->time_stats);
561         kobject_put(&c->opts_dir);
562         kobject_put(&c->internal);
563
564         /* btree prefetch might have kicked off reads in the background: */
565         bch2_btree_flush_all_reads(c);
566
567         for_each_member_device(ca, c, i)
568                 cancel_work_sync(&ca->io_error_work);
569
570         cancel_work_sync(&c->read_only_work);
571
572         for (i = 0; i < c->sb.nr_devices; i++) {
573                 struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true);
574
575                 if (ca)
576                         bch2_free_super(&ca->disk_sb);
577         }
578 }
579
580 void bch2_fs_free(struct bch_fs *c)
581 {
582         unsigned i;
583
584         mutex_lock(&bch_fs_list_lock);
585         list_del(&c->list);
586         mutex_unlock(&bch_fs_list_lock);
587
588         closure_sync(&c->cl);
589         closure_debug_destroy(&c->cl);
590
591         for (i = 0; i < c->sb.nr_devices; i++)
592                 if (c->devs[i])
593                         bch2_dev_free(rcu_dereference_protected(c->devs[i], 1));
594
595         bch_verbose(c, "shutdown complete");
596
597         kobject_put(&c->kobj);
598 }
599
600 void bch2_fs_stop(struct bch_fs *c)
601 {
602         __bch2_fs_stop(c);
603         bch2_fs_free(c);
604 }
605
606 static int bch2_fs_online(struct bch_fs *c)
607 {
608         struct bch_dev *ca;
609         unsigned i;
610         int ret = 0;
611
612         lockdep_assert_held(&bch_fs_list_lock);
613
614         if (__bch2_uuid_to_fs(c->sb.uuid)) {
615                 bch_err(c, "filesystem UUID already open");
616                 return -EINVAL;
617         }
618
619         ret = bch2_fs_chardev_init(c);
620         if (ret) {
621                 bch_err(c, "error creating character device");
622                 return ret;
623         }
624
625         bch2_fs_debug_init(c);
626
627         ret = kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ?:
628             kobject_add(&c->internal, &c->kobj, "internal") ?:
629             kobject_add(&c->opts_dir, &c->kobj, "options") ?:
630             kobject_add(&c->time_stats, &c->kobj, "time_stats") ?:
631             kobject_add(&c->counters_kobj, &c->kobj, "counters") ?:
632             bch2_opts_create_sysfs_files(&c->opts_dir);
633         if (ret) {
634                 bch_err(c, "error creating sysfs objects");
635                 return ret;
636         }
637
638         down_write(&c->state_lock);
639
640         for_each_member_device(ca, c, i) {
641                 ret = bch2_dev_sysfs_online(c, ca);
642                 if (ret) {
643                         bch_err(c, "error creating sysfs objects");
644                         percpu_ref_put(&ca->ref);
645                         goto err;
646                 }
647         }
648
649         BUG_ON(!list_empty(&c->list));
650         list_add(&c->list, &bch_fs_list);
651 err:
652         up_write(&c->state_lock);
653         return ret;
654 }
655
656 static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
657 {
658         struct bch_sb_field_members *mi;
659         struct bch_fs *c;
660         struct printbuf name = PRINTBUF;
661         unsigned i, iter_size;
662         int ret = 0;
663
664         c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
665         if (!c) {
666                 c = ERR_PTR(-BCH_ERR_ENOMEM_fs_alloc);
667                 goto out;
668         }
669
670         __module_get(THIS_MODULE);
671
672         closure_init(&c->cl, NULL);
673
674         c->kobj.kset = bcachefs_kset;
675         kobject_init(&c->kobj, &bch2_fs_ktype);
676         kobject_init(&c->internal, &bch2_fs_internal_ktype);
677         kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype);
678         kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype);
679         kobject_init(&c->counters_kobj, &bch2_fs_counters_ktype);
680
681         c->minor                = -1;
682         c->disk_sb.fs_sb        = true;
683
684         init_rwsem(&c->state_lock);
685         mutex_init(&c->sb_lock);
686         mutex_init(&c->replicas_gc_lock);
687         mutex_init(&c->btree_root_lock);
688         INIT_WORK(&c->read_only_work, bch2_fs_read_only_work);
689
690         init_rwsem(&c->gc_lock);
691         mutex_init(&c->gc_gens_lock);
692
693         for (i = 0; i < BCH_TIME_STAT_NR; i++)
694                 bch2_time_stats_init(&c->times[i]);
695
696         bch2_fs_copygc_init(c);
697         bch2_fs_btree_key_cache_init_early(&c->btree_key_cache);
698         bch2_fs_btree_interior_update_init_early(c);
699         bch2_fs_allocator_background_init(c);
700         bch2_fs_allocator_foreground_init(c);
701         bch2_fs_rebalance_init(c);
702         bch2_fs_quota_init(c);
703         bch2_fs_ec_init_early(c);
704         bch2_fs_move_init(c);
705
706         INIT_LIST_HEAD(&c->list);
707
708         mutex_init(&c->usage_scratch_lock);
709
710         mutex_init(&c->bio_bounce_pages_lock);
711         mutex_init(&c->snapshot_table_lock);
712
713         spin_lock_init(&c->btree_write_error_lock);
714
715         INIT_WORK(&c->journal_seq_blacklist_gc_work,
716                   bch2_blacklist_entries_gc);
717
718         INIT_LIST_HEAD(&c->journal_iters);
719
720         INIT_LIST_HEAD(&c->fsck_errors);
721         mutex_init(&c->fsck_error_lock);
722
723         seqcount_init(&c->gc_pos_lock);
724
725         seqcount_init(&c->usage_lock);
726
727         sema_init(&c->io_in_flight, 128);
728
729         INIT_LIST_HEAD(&c->vfs_inodes_list);
730         mutex_init(&c->vfs_inodes_lock);
731
732         c->copy_gc_enabled              = 1;
733         c->rebalance.enabled            = 1;
734         c->promote_whole_extents        = true;
735
736         c->journal.flush_write_time     = &c->times[BCH_TIME_journal_flush_write];
737         c->journal.noflush_write_time   = &c->times[BCH_TIME_journal_noflush_write];
738         c->journal.blocked_time         = &c->times[BCH_TIME_blocked_journal];
739         c->journal.flush_seq_time       = &c->times[BCH_TIME_journal_flush_seq];
740
741         bch2_fs_btree_cache_init_early(&c->btree_cache);
742
743         mutex_init(&c->sectors_available_lock);
744
745         ret = percpu_init_rwsem(&c->mark_lock);
746         if (ret)
747                 goto err;
748
749         mutex_lock(&c->sb_lock);
750         ret = bch2_sb_to_fs(c, sb);
751         mutex_unlock(&c->sb_lock);
752
753         if (ret)
754                 goto err;
755
756         pr_uuid(&name, c->sb.user_uuid.b);
757         strscpy(c->name, name.buf, sizeof(c->name));
758         printbuf_exit(&name);
759
760         ret = name.allocation_failure ? -BCH_ERR_ENOMEM_fs_name_alloc : 0;
761         if (ret)
762                 goto err;
763
764         /* Compat: */
765         if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
766             !BCH_SB_JOURNAL_FLUSH_DELAY(sb))
767                 SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000);
768
769         if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
770             !BCH_SB_JOURNAL_RECLAIM_DELAY(sb))
771                 SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 100);
772
773         c->opts = bch2_opts_default;
774         ret = bch2_opts_from_sb(&c->opts, sb);
775         if (ret)
776                 goto err;
777
778         bch2_opts_apply(&c->opts, opts);
779
780         c->btree_key_cache_btrees |= 1U << BTREE_ID_alloc;
781         if (c->opts.inodes_use_key_cache)
782                 c->btree_key_cache_btrees |= 1U << BTREE_ID_inodes;
783
784         c->block_bits           = ilog2(block_sectors(c));
785         c->btree_foreground_merge_threshold = BTREE_FOREGROUND_MERGE_THRESHOLD(c);
786
787         if (bch2_fs_init_fault("fs_alloc")) {
788                 bch_err(c, "fs_alloc fault injected");
789                 ret = -EFAULT;
790                 goto err;
791         }
792
793         iter_size = sizeof(struct sort_iter) +
794                 (btree_blocks(c) + 1) * 2 *
795                 sizeof(struct sort_iter_set);
796
797         c->inode_shard_bits = ilog2(roundup_pow_of_two(num_possible_cpus()));
798
799         if (!(c->btree_update_wq = alloc_workqueue("bcachefs",
800                                 WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512)) ||
801             !(c->btree_io_complete_wq = alloc_workqueue("bcachefs_btree_io",
802                                 WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
803             !(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
804                                 WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
805             !(c->io_complete_wq = alloc_workqueue("bcachefs_io",
806                                 WQ_FREEZABLE|WQ_HIGHPRI|WQ_MEM_RECLAIM, 1)) ||
807             !(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref",
808                                 WQ_FREEZABLE, 0)) ||
809 #ifndef BCH_WRITE_REF_DEBUG
810             percpu_ref_init(&c->writes, bch2_writes_disabled,
811                             PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
812 #endif
813             mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
814             bioset_init(&c->btree_bio, 1,
815                         max(offsetof(struct btree_read_bio, bio),
816                             offsetof(struct btree_write_bio, wbio.bio)),
817                         BIOSET_NEED_BVECS) ||
818             !(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
819             !(c->online_reserved = alloc_percpu(u64)) ||
820             !(c->btree_paths_bufs = alloc_percpu(struct btree_path_buf)) ||
821             mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
822                                         btree_bytes(c)) ||
823             mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
824             !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
825                                               sizeof(u64), GFP_KERNEL))) {
826                 ret = -BCH_ERR_ENOMEM_fs_other_alloc;
827                 goto err;
828         }
829
830         ret = bch2_fs_counters_init(c) ?:
831             bch2_io_clock_init(&c->io_clock[READ]) ?:
832             bch2_io_clock_init(&c->io_clock[WRITE]) ?:
833             bch2_fs_journal_init(&c->journal) ?:
834             bch2_fs_replicas_init(c) ?:
835             bch2_fs_btree_cache_init(c) ?:
836             bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?:
837             bch2_fs_btree_iter_init(c) ?:
838             bch2_fs_btree_interior_update_init(c) ?:
839             bch2_fs_buckets_waiting_for_journal_init(c) ?:
840             bch2_fs_btree_write_buffer_init(c) ?:
841             bch2_fs_subvolumes_init(c) ?:
842             bch2_fs_io_init(c) ?:
843             bch2_fs_nocow_locking_init(c) ?:
844             bch2_fs_encryption_init(c) ?:
845             bch2_fs_compress_init(c) ?:
846             bch2_fs_ec_init(c) ?:
847             bch2_fs_fsio_init(c);
848         if (ret)
849                 goto err;
850
851         mi = bch2_sb_get_members(c->disk_sb.sb);
852         for (i = 0; i < c->sb.nr_devices; i++)
853                 if (bch2_dev_exists(c->disk_sb.sb, mi, i) &&
854                     bch2_dev_alloc(c, i)) {
855                         ret = -EEXIST;
856                         goto err;
857                 }
858
859         bch2_journal_entry_res_resize(&c->journal,
860                         &c->btree_root_journal_res,
861                         BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_BTREE_PTR_U64s_MAX));
862         bch2_dev_usage_journal_reserve(c);
863         bch2_journal_entry_res_resize(&c->journal,
864                         &c->clock_journal_res,
865                         (sizeof(struct jset_entry_clock) / sizeof(u64)) * 2);
866
867         mutex_lock(&bch_fs_list_lock);
868         ret = bch2_fs_online(c);
869         mutex_unlock(&bch_fs_list_lock);
870
871         if (ret)
872                 goto err;
873 out:
874         return c;
875 err:
876         bch2_fs_free(c);
877         c = ERR_PTR(ret);
878         goto out;
879 }
880
881 noinline_for_stack
882 static void print_mount_opts(struct bch_fs *c)
883 {
884         enum bch_opt_id i;
885         struct printbuf p = PRINTBUF;
886         bool first = true;
887
888         prt_str(&p, "mounted version ");
889         bch2_version_to_text(&p, c->sb.version);
890
891         if (c->opts.read_only) {
892                 prt_str(&p, " opts=");
893                 first = false;
894                 prt_printf(&p, "ro");
895         }
896
897         for (i = 0; i < bch2_opts_nr; i++) {
898                 const struct bch_option *opt = &bch2_opt_table[i];
899                 u64 v = bch2_opt_get_by_id(&c->opts, i);
900
901                 if (!(opt->flags & OPT_MOUNT))
902                         continue;
903
904                 if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
905                         continue;
906
907                 prt_str(&p, first ? " opts=" : ",");
908                 first = false;
909                 bch2_opt_to_text(&p, c, c->disk_sb.sb, opt, v, OPT_SHOW_MOUNT_STYLE);
910         }
911
912         bch_info(c, "%s", p.buf);
913         printbuf_exit(&p);
914 }
915
916 int bch2_fs_start(struct bch_fs *c)
917 {
918         struct bch_sb_field_members *mi;
919         struct bch_dev *ca;
920         time64_t now = ktime_get_real_seconds();
921         unsigned i;
922         int ret;
923
924         down_write(&c->state_lock);
925
926         BUG_ON(test_bit(BCH_FS_STARTED, &c->flags));
927
928         mutex_lock(&c->sb_lock);
929
930         for_each_online_member(ca, c, i)
931                 bch2_sb_from_fs(c, ca);
932
933         mi = bch2_sb_get_members(c->disk_sb.sb);
934         for_each_online_member(ca, c, i)
935                 mi->members[ca->dev_idx].last_mount = cpu_to_le64(now);
936
937         mutex_unlock(&c->sb_lock);
938
939         for_each_rw_member(ca, c, i)
940                 bch2_dev_allocator_add(c, ca);
941         bch2_recalc_capacity(c);
942
943         for (i = 0; i < BCH_TRANSACTIONS_NR; i++) {
944                 mutex_lock(&c->btree_transaction_stats[i].lock);
945                 bch2_time_stats_init(&c->btree_transaction_stats[i].lock_hold_times);
946                 mutex_unlock(&c->btree_transaction_stats[i].lock);
947         }
948
949         ret = BCH_SB_INITIALIZED(c->disk_sb.sb)
950                 ? bch2_fs_recovery(c)
951                 : bch2_fs_initialize(c);
952         if (ret)
953                 goto err;
954
955         ret = bch2_opts_check_may_set(c);
956         if (ret)
957                 goto err;
958
959         if (bch2_fs_init_fault("fs_start")) {
960                 bch_err(c, "fs_start fault injected");
961                 ret = -EINVAL;
962                 goto err;
963         }
964
965         set_bit(BCH_FS_STARTED, &c->flags);
966
967         if (c->opts.read_only || c->opts.nochanges) {
968                 bch2_fs_read_only(c);
969         } else {
970                 ret = !test_bit(BCH_FS_RW, &c->flags)
971                         ? bch2_fs_read_write(c)
972                         : bch2_fs_read_write_late(c);
973                 if (ret)
974                         goto err;
975         }
976
977         print_mount_opts(c);
978         ret = 0;
979 out:
980         up_write(&c->state_lock);
981         return ret;
982 err:
983         bch_err(c, "error starting filesystem: %s", bch2_err_str(ret));
984         goto out;
985 }
986
987 static int bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
988 {
989         struct bch_sb_field_members *sb_mi;
990
991         sb_mi = bch2_sb_get_members(sb);
992         if (!sb_mi)
993                 return -BCH_ERR_member_info_missing;
994
995         if (le16_to_cpu(sb->block_size) != block_sectors(c))
996                 return -BCH_ERR_mismatched_block_size;
997
998         if (le16_to_cpu(sb_mi->members[sb->dev_idx].bucket_size) <
999             BCH_SB_BTREE_NODE_SIZE(c->disk_sb.sb))
1000                 return -BCH_ERR_bucket_size_too_small;
1001
1002         return 0;
1003 }
1004
1005 static int bch2_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb)
1006 {
1007         struct bch_sb *newest =
1008                 le64_to_cpu(fs->seq) > le64_to_cpu(sb->seq) ? fs : sb;
1009         struct bch_sb_field_members *mi = bch2_sb_get_members(newest);
1010
1011         if (!uuid_equal(&fs->uuid, &sb->uuid))
1012                 return -BCH_ERR_device_not_a_member_of_filesystem;
1013
1014         if (!bch2_dev_exists(newest, mi, sb->dev_idx))
1015                 return -BCH_ERR_device_has_been_removed;
1016
1017         if (fs->block_size != sb->block_size)
1018                 return -BCH_ERR_mismatched_block_size;
1019
1020         return 0;
1021 }
1022
1023 /* Device startup/shutdown: */
1024
1025 static void bch2_dev_release(struct kobject *kobj)
1026 {
1027         struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
1028
1029         kfree(ca);
1030 }
1031
1032 static void bch2_dev_free(struct bch_dev *ca)
1033 {
1034         cancel_work_sync(&ca->io_error_work);
1035
1036         if (ca->kobj.state_in_sysfs &&
1037             ca->disk_sb.bdev)
1038                 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
1039
1040         if (ca->kobj.state_in_sysfs)
1041                 kobject_del(&ca->kobj);
1042
1043         bch2_free_super(&ca->disk_sb);
1044         bch2_dev_journal_exit(ca);
1045
1046         free_percpu(ca->io_done);
1047         bioset_exit(&ca->replica_set);
1048         bch2_dev_buckets_free(ca);
1049         free_page((unsigned long) ca->sb_read_scratch);
1050
1051         bch2_time_stats_exit(&ca->io_latency[WRITE]);
1052         bch2_time_stats_exit(&ca->io_latency[READ]);
1053
1054         percpu_ref_exit(&ca->io_ref);
1055         percpu_ref_exit(&ca->ref);
1056         kobject_put(&ca->kobj);
1057 }
1058
1059 static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca)
1060 {
1061
1062         lockdep_assert_held(&c->state_lock);
1063
1064         if (percpu_ref_is_zero(&ca->io_ref))
1065                 return;
1066
1067         __bch2_dev_read_only(c, ca);
1068
1069         reinit_completion(&ca->io_ref_completion);
1070         percpu_ref_kill(&ca->io_ref);
1071         wait_for_completion(&ca->io_ref_completion);
1072
1073         if (ca->kobj.state_in_sysfs) {
1074                 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
1075                 sysfs_remove_link(&ca->kobj, "block");
1076         }
1077
1078         bch2_free_super(&ca->disk_sb);
1079         bch2_dev_journal_exit(ca);
1080 }
1081
1082 static void bch2_dev_ref_complete(struct percpu_ref *ref)
1083 {
1084         struct bch_dev *ca = container_of(ref, struct bch_dev, ref);
1085
1086         complete(&ca->ref_completion);
1087 }
1088
1089 static void bch2_dev_io_ref_complete(struct percpu_ref *ref)
1090 {
1091         struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref);
1092
1093         complete(&ca->io_ref_completion);
1094 }
1095
1096 static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca)
1097 {
1098         int ret;
1099
1100         if (!c->kobj.state_in_sysfs)
1101                 return 0;
1102
1103         if (!ca->kobj.state_in_sysfs) {
1104                 ret = kobject_add(&ca->kobj, &c->kobj,
1105                                   "dev-%u", ca->dev_idx);
1106                 if (ret)
1107                         return ret;
1108         }
1109
1110         if (ca->disk_sb.bdev) {
1111                 struct kobject *block = bdev_kobj(ca->disk_sb.bdev);
1112
1113                 ret = sysfs_create_link(block, &ca->kobj, "bcachefs");
1114                 if (ret)
1115                         return ret;
1116
1117                 ret = sysfs_create_link(&ca->kobj, block, "block");
1118                 if (ret)
1119                         return ret;
1120         }
1121
1122         return 0;
1123 }
1124
1125 static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
1126                                         struct bch_member *member)
1127 {
1128         struct bch_dev *ca;
1129
1130         ca = kzalloc(sizeof(*ca), GFP_KERNEL);
1131         if (!ca)
1132                 return NULL;
1133
1134         kobject_init(&ca->kobj, &bch2_dev_ktype);
1135         init_completion(&ca->ref_completion);
1136         init_completion(&ca->io_ref_completion);
1137
1138         init_rwsem(&ca->bucket_lock);
1139
1140         INIT_WORK(&ca->io_error_work, bch2_io_error_work);
1141
1142         bch2_time_stats_init(&ca->io_latency[READ]);
1143         bch2_time_stats_init(&ca->io_latency[WRITE]);
1144
1145         ca->mi = bch2_mi_to_cpu(member);
1146         ca->uuid = member->uuid;
1147
1148         ca->nr_btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
1149                              ca->mi.bucket_size / btree_sectors(c));
1150
1151         if (percpu_ref_init(&ca->ref, bch2_dev_ref_complete,
1152                             0, GFP_KERNEL) ||
1153             percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
1154                             PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
1155             !(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) ||
1156             bch2_dev_buckets_alloc(c, ca) ||
1157             bioset_init(&ca->replica_set, 4,
1158                         offsetof(struct bch_write_bio, bio), 0) ||
1159             !(ca->io_done       = alloc_percpu(*ca->io_done)))
1160                 goto err;
1161
1162         return ca;
1163 err:
1164         bch2_dev_free(ca);
1165         return NULL;
1166 }
1167
1168 static void bch2_dev_attach(struct bch_fs *c, struct bch_dev *ca,
1169                             unsigned dev_idx)
1170 {
1171         ca->dev_idx = dev_idx;
1172         __set_bit(ca->dev_idx, ca->self.d);
1173         scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx);
1174
1175         ca->fs = c;
1176         rcu_assign_pointer(c->devs[ca->dev_idx], ca);
1177
1178         if (bch2_dev_sysfs_online(c, ca))
1179                 pr_warn("error creating sysfs objects");
1180 }
1181
1182 static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx)
1183 {
1184         struct bch_member *member =
1185                 bch2_sb_get_members(c->disk_sb.sb)->members + dev_idx;
1186         struct bch_dev *ca = NULL;
1187         int ret = 0;
1188
1189         if (bch2_fs_init_fault("dev_alloc"))
1190                 goto err;
1191
1192         ca = __bch2_dev_alloc(c, member);
1193         if (!ca)
1194                 goto err;
1195
1196         ca->fs = c;
1197
1198         bch2_dev_attach(c, ca, dev_idx);
1199         return ret;
1200 err:
1201         if (ca)
1202                 bch2_dev_free(ca);
1203         return -BCH_ERR_ENOMEM_dev_alloc;
1204 }
1205
1206 static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
1207 {
1208         unsigned ret;
1209
1210         if (bch2_dev_is_online(ca)) {
1211                 bch_err(ca, "already have device online in slot %u",
1212                         sb->sb->dev_idx);
1213                 return -BCH_ERR_device_already_online;
1214         }
1215
1216         if (get_capacity(sb->bdev->bd_disk) <
1217             ca->mi.bucket_size * ca->mi.nbuckets) {
1218                 bch_err(ca, "cannot online: device too small");
1219                 return -BCH_ERR_device_size_too_small;
1220         }
1221
1222         BUG_ON(!percpu_ref_is_zero(&ca->io_ref));
1223
1224         ret = bch2_dev_journal_init(ca, sb->sb);
1225         if (ret)
1226                 return ret;
1227
1228         /* Commit: */
1229         ca->disk_sb = *sb;
1230         if (sb->mode & FMODE_EXCL)
1231                 ca->disk_sb.bdev->bd_holder = ca;
1232         memset(sb, 0, sizeof(*sb));
1233
1234         ca->dev = ca->disk_sb.bdev->bd_dev;
1235
1236         percpu_ref_reinit(&ca->io_ref);
1237
1238         return 0;
1239 }
1240
1241 static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
1242 {
1243         struct bch_dev *ca;
1244         int ret;
1245
1246         lockdep_assert_held(&c->state_lock);
1247
1248         if (le64_to_cpu(sb->sb->seq) >
1249             le64_to_cpu(c->disk_sb.sb->seq))
1250                 bch2_sb_to_fs(c, sb->sb);
1251
1252         BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices ||
1253                !c->devs[sb->sb->dev_idx]);
1254
1255         ca = bch_dev_locked(c, sb->sb->dev_idx);
1256
1257         ret = __bch2_dev_attach_bdev(ca, sb);
1258         if (ret)
1259                 return ret;
1260
1261         bch2_dev_sysfs_online(c, ca);
1262
1263         if (c->sb.nr_devices == 1)
1264                 snprintf(c->name, sizeof(c->name), "%pg", ca->disk_sb.bdev);
1265         snprintf(ca->name, sizeof(ca->name), "%pg", ca->disk_sb.bdev);
1266
1267         rebalance_wakeup(c);
1268         return 0;
1269 }
1270
1271 /* Device management: */
1272
1273 /*
1274  * Note: this function is also used by the error paths - when a particular
1275  * device sees an error, we call it to determine whether we can just set the
1276  * device RO, or - if this function returns false - we'll set the whole
1277  * filesystem RO:
1278  *
1279  * XXX: maybe we should be more explicit about whether we're changing state
1280  * because we got an error or what have you?
1281  */
1282 bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
1283                             enum bch_member_state new_state, int flags)
1284 {
1285         struct bch_devs_mask new_online_devs;
1286         struct bch_dev *ca2;
1287         int i, nr_rw = 0, required;
1288
1289         lockdep_assert_held(&c->state_lock);
1290
1291         switch (new_state) {
1292         case BCH_MEMBER_STATE_rw:
1293                 return true;
1294         case BCH_MEMBER_STATE_ro:
1295                 if (ca->mi.state != BCH_MEMBER_STATE_rw)
1296                         return true;
1297
1298                 /* do we have enough devices to write to?  */
1299                 for_each_member_device(ca2, c, i)
1300                         if (ca2 != ca)
1301                                 nr_rw += ca2->mi.state == BCH_MEMBER_STATE_rw;
1302
1303                 required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED)
1304                                ? c->opts.metadata_replicas
1305                                : c->opts.metadata_replicas_required,
1306                                !(flags & BCH_FORCE_IF_DATA_DEGRADED)
1307                                ? c->opts.data_replicas
1308                                : c->opts.data_replicas_required);
1309
1310                 return nr_rw >= required;
1311         case BCH_MEMBER_STATE_failed:
1312         case BCH_MEMBER_STATE_spare:
1313                 if (ca->mi.state != BCH_MEMBER_STATE_rw &&
1314                     ca->mi.state != BCH_MEMBER_STATE_ro)
1315                         return true;
1316
1317                 /* do we have enough devices to read from?  */
1318                 new_online_devs = bch2_online_devs(c);
1319                 __clear_bit(ca->dev_idx, new_online_devs.d);
1320
1321                 return bch2_have_enough_devs(c, new_online_devs, flags, false);
1322         default:
1323                 BUG();
1324         }
1325 }
1326
1327 static bool bch2_fs_may_start(struct bch_fs *c)
1328 {
1329         struct bch_sb_field_members *mi;
1330         struct bch_dev *ca;
1331         unsigned i, flags = 0;
1332
1333         if (c->opts.very_degraded)
1334                 flags |= BCH_FORCE_IF_DEGRADED|BCH_FORCE_IF_LOST;
1335
1336         if (c->opts.degraded)
1337                 flags |= BCH_FORCE_IF_DEGRADED;
1338
1339         if (!c->opts.degraded &&
1340             !c->opts.very_degraded) {
1341                 mutex_lock(&c->sb_lock);
1342                 mi = bch2_sb_get_members(c->disk_sb.sb);
1343
1344                 for (i = 0; i < c->disk_sb.sb->nr_devices; i++) {
1345                         if (!bch2_dev_exists(c->disk_sb.sb, mi, i))
1346                                 continue;
1347
1348                         ca = bch_dev_locked(c, i);
1349
1350                         if (!bch2_dev_is_online(ca) &&
1351                             (ca->mi.state == BCH_MEMBER_STATE_rw ||
1352                              ca->mi.state == BCH_MEMBER_STATE_ro)) {
1353                                 mutex_unlock(&c->sb_lock);
1354                                 return false;
1355                         }
1356                 }
1357                 mutex_unlock(&c->sb_lock);
1358         }
1359
1360         return bch2_have_enough_devs(c, bch2_online_devs(c), flags, true);
1361 }
1362
1363 static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
1364 {
1365         /*
1366          * The allocator thread itself allocates btree nodes, so stop it first:
1367          */
1368         bch2_dev_allocator_remove(c, ca);
1369         bch2_dev_journal_stop(&c->journal, ca);
1370 }
1371
1372 static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
1373 {
1374         lockdep_assert_held(&c->state_lock);
1375
1376         BUG_ON(ca->mi.state != BCH_MEMBER_STATE_rw);
1377
1378         bch2_dev_allocator_add(c, ca);
1379         bch2_recalc_capacity(c);
1380 }
1381
1382 int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1383                          enum bch_member_state new_state, int flags)
1384 {
1385         struct bch_sb_field_members *mi;
1386         int ret = 0;
1387
1388         if (ca->mi.state == new_state)
1389                 return 0;
1390
1391         if (!bch2_dev_state_allowed(c, ca, new_state, flags))
1392                 return -BCH_ERR_device_state_not_allowed;
1393
1394         if (new_state != BCH_MEMBER_STATE_rw)
1395                 __bch2_dev_read_only(c, ca);
1396
1397         bch_notice(ca, "%s", bch2_member_states[new_state]);
1398
1399         mutex_lock(&c->sb_lock);
1400         mi = bch2_sb_get_members(c->disk_sb.sb);
1401         SET_BCH_MEMBER_STATE(&mi->members[ca->dev_idx], new_state);
1402         bch2_write_super(c);
1403         mutex_unlock(&c->sb_lock);
1404
1405         if (new_state == BCH_MEMBER_STATE_rw)
1406                 __bch2_dev_read_write(c, ca);
1407
1408         rebalance_wakeup(c);
1409
1410         return ret;
1411 }
1412
1413 int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1414                        enum bch_member_state new_state, int flags)
1415 {
1416         int ret;
1417
1418         down_write(&c->state_lock);
1419         ret = __bch2_dev_set_state(c, ca, new_state, flags);
1420         up_write(&c->state_lock);
1421
1422         return ret;
1423 }
1424
1425 /* Device add/removal: */
1426
1427 static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
1428 {
1429         struct bpos start       = POS(ca->dev_idx, 0);
1430         struct bpos end         = POS(ca->dev_idx, U64_MAX);
1431         int ret;
1432
1433         /*
1434          * We clear the LRU and need_discard btrees first so that we don't race
1435          * with bch2_do_invalidates() and bch2_do_discards()
1436          */
1437         ret =   bch2_btree_delete_range(c, BTREE_ID_lru, start, end,
1438                                         BTREE_TRIGGER_NORUN, NULL) ?:
1439                 bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end,
1440                                         BTREE_TRIGGER_NORUN, NULL) ?:
1441                 bch2_btree_delete_range(c, BTREE_ID_freespace, start, end,
1442                                         BTREE_TRIGGER_NORUN, NULL) ?:
1443                 bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
1444                                         BTREE_TRIGGER_NORUN, NULL) ?:
1445                 bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
1446                                         BTREE_TRIGGER_NORUN, NULL) ?:
1447                 bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
1448                                         BTREE_TRIGGER_NORUN, NULL);
1449         if (ret)
1450                 bch_err(c, "error removing dev alloc info: %s", bch2_err_str(ret));
1451
1452         return ret;
1453 }
1454
1455 int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
1456 {
1457         struct bch_sb_field_members *mi;
1458         unsigned dev_idx = ca->dev_idx, data;
1459         int ret;
1460
1461         down_write(&c->state_lock);
1462
1463         /*
1464          * We consume a reference to ca->ref, regardless of whether we succeed
1465          * or fail:
1466          */
1467         percpu_ref_put(&ca->ref);
1468
1469         if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
1470                 bch_err(ca, "Cannot remove without losing data");
1471                 ret = -BCH_ERR_device_state_not_allowed;
1472                 goto err;
1473         }
1474
1475         __bch2_dev_read_only(c, ca);
1476
1477         ret = bch2_dev_data_drop(c, ca->dev_idx, flags);
1478         if (ret) {
1479                 bch_err(ca, "Remove failed: error dropping data: %s", bch2_err_str(ret));
1480                 goto err;
1481         }
1482
1483         ret = bch2_dev_remove_alloc(c, ca);
1484         if (ret) {
1485                 bch_err(ca, "Remove failed, error deleting alloc info");
1486                 goto err;
1487         }
1488
1489         ret = bch2_journal_flush_device_pins(&c->journal, ca->dev_idx);
1490         if (ret) {
1491                 bch_err(ca, "Remove failed: error flushing journal: %s", bch2_err_str(ret));
1492                 goto err;
1493         }
1494
1495         ret = bch2_journal_flush(&c->journal);
1496         if (ret) {
1497                 bch_err(ca, "Remove failed, journal error");
1498                 goto err;
1499         }
1500
1501         ret = bch2_replicas_gc2(c);
1502         if (ret) {
1503                 bch_err(ca, "Remove failed: error from replicas gc: %s", bch2_err_str(ret));
1504                 goto err;
1505         }
1506
1507         data = bch2_dev_has_data(c, ca);
1508         if (data) {
1509                 struct printbuf data_has = PRINTBUF;
1510
1511                 prt_bitflags(&data_has, bch2_data_types, data);
1512                 bch_err(ca, "Remove failed, still has data (%s)", data_has.buf);
1513                 printbuf_exit(&data_has);
1514                 ret = -EBUSY;
1515                 goto err;
1516         }
1517
1518         __bch2_dev_offline(c, ca);
1519
1520         mutex_lock(&c->sb_lock);
1521         rcu_assign_pointer(c->devs[ca->dev_idx], NULL);
1522         mutex_unlock(&c->sb_lock);
1523
1524         percpu_ref_kill(&ca->ref);
1525         wait_for_completion(&ca->ref_completion);
1526
1527         bch2_dev_free(ca);
1528
1529         /*
1530          * At this point the device object has been removed in-core, but the
1531          * on-disk journal might still refer to the device index via sb device
1532          * usage entries. Recovery fails if it sees usage information for an
1533          * invalid device. Flush journal pins to push the back of the journal
1534          * past now invalid device index references before we update the
1535          * superblock, but after the device object has been removed so any
1536          * further journal writes elide usage info for the device.
1537          */
1538         bch2_journal_flush_all_pins(&c->journal);
1539
1540         /*
1541          * Free this device's slot in the bch_member array - all pointers to
1542          * this device must be gone:
1543          */
1544         mutex_lock(&c->sb_lock);
1545         mi = bch2_sb_get_members(c->disk_sb.sb);
1546         memset(&mi->members[dev_idx].uuid, 0, sizeof(mi->members[dev_idx].uuid));
1547
1548         bch2_write_super(c);
1549
1550         mutex_unlock(&c->sb_lock);
1551         up_write(&c->state_lock);
1552
1553         bch2_dev_usage_journal_reserve(c);
1554         return 0;
1555 err:
1556         if (ca->mi.state == BCH_MEMBER_STATE_rw &&
1557             !percpu_ref_is_zero(&ca->io_ref))
1558                 __bch2_dev_read_write(c, ca);
1559         up_write(&c->state_lock);
1560         return ret;
1561 }
1562
1563 /* Add new device to running filesystem: */
1564 int bch2_dev_add(struct bch_fs *c, const char *path)
1565 {
1566         struct bch_opts opts = bch2_opts_empty();
1567         struct bch_sb_handle sb;
1568         struct bch_dev *ca = NULL;
1569         struct bch_sb_field_members *mi;
1570         struct bch_member dev_mi;
1571         unsigned dev_idx, nr_devices, u64s;
1572         struct printbuf errbuf = PRINTBUF;
1573         struct printbuf label = PRINTBUF;
1574         int ret;
1575
1576         ret = bch2_read_super(path, &opts, &sb);
1577         if (ret) {
1578                 bch_err(c, "device add error: error reading super: %s", bch2_err_str(ret));
1579                 goto err;
1580         }
1581
1582         dev_mi = bch2_sb_get_members(sb.sb)->members[sb.sb->dev_idx];
1583
1584         if (BCH_MEMBER_GROUP(&dev_mi)) {
1585                 bch2_disk_path_to_text(&label, sb.sb, BCH_MEMBER_GROUP(&dev_mi) - 1);
1586                 if (label.allocation_failure) {
1587                         ret = -ENOMEM;
1588                         goto err;
1589                 }
1590         }
1591
1592         ret = bch2_dev_may_add(sb.sb, c);
1593         if (ret) {
1594                 bch_err(c, "device add error: %s", bch2_err_str(ret));
1595                 goto err;
1596         }
1597
1598         ca = __bch2_dev_alloc(c, &dev_mi);
1599         if (!ca) {
1600                 bch2_free_super(&sb);
1601                 ret = -ENOMEM;
1602                 goto err;
1603         }
1604
1605         bch2_dev_usage_init(ca);
1606
1607         ret = __bch2_dev_attach_bdev(ca, &sb);
1608         if (ret) {
1609                 bch2_dev_free(ca);
1610                 goto err;
1611         }
1612
1613         ret = bch2_dev_journal_alloc(ca);
1614         if (ret) {
1615                 bch_err(c, "device add error: journal alloc failed");
1616                 goto err;
1617         }
1618
1619         down_write(&c->state_lock);
1620         mutex_lock(&c->sb_lock);
1621
1622         ret = bch2_sb_from_fs(c, ca);
1623         if (ret) {
1624                 bch_err(c, "device add error: new device superblock too small");
1625                 goto err_unlock;
1626         }
1627
1628         mi = bch2_sb_get_members(ca->disk_sb.sb);
1629
1630         if (!bch2_sb_resize_members(&ca->disk_sb,
1631                                 le32_to_cpu(mi->field.u64s) +
1632                                 sizeof(dev_mi) / sizeof(u64))) {
1633                 bch_err(c, "device add error: new device superblock too small");
1634                 ret = -BCH_ERR_ENOSPC_sb_members;
1635                 goto err_unlock;
1636         }
1637
1638         if (dynamic_fault("bcachefs:add:no_slot"))
1639                 goto no_slot;
1640
1641         mi = bch2_sb_get_members(c->disk_sb.sb);
1642         for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++)
1643                 if (!bch2_dev_exists(c->disk_sb.sb, mi, dev_idx))
1644                         goto have_slot;
1645 no_slot:
1646         bch_err(c, "device add error: already have maximum number of devices");
1647         ret = -BCH_ERR_ENOSPC_sb_members;
1648         goto err_unlock;
1649
1650 have_slot:
1651         nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices);
1652         u64s = (sizeof(struct bch_sb_field_members) +
1653                 sizeof(struct bch_member) * nr_devices) / sizeof(u64);
1654
1655         mi = bch2_sb_resize_members(&c->disk_sb, u64s);
1656         if (!mi) {
1657                 bch_err(c, "device add error: no room in superblock for member info");
1658                 ret = -BCH_ERR_ENOSPC_sb_members;
1659                 goto err_unlock;
1660         }
1661
1662         /* success: */
1663
1664         mi->members[dev_idx] = dev_mi;
1665         mi->members[dev_idx].last_mount = cpu_to_le64(ktime_get_real_seconds());
1666         c->disk_sb.sb->nr_devices       = nr_devices;
1667
1668         ca->disk_sb.sb->dev_idx = dev_idx;
1669         bch2_dev_attach(c, ca, dev_idx);
1670
1671         if (BCH_MEMBER_GROUP(&dev_mi)) {
1672                 ret = __bch2_dev_group_set(c, ca, label.buf);
1673                 if (ret) {
1674                         bch_err(c, "device add error: error setting label");
1675                         goto err_unlock;
1676                 }
1677         }
1678
1679         bch2_write_super(c);
1680         mutex_unlock(&c->sb_lock);
1681
1682         bch2_dev_usage_journal_reserve(c);
1683
1684         ret = bch2_trans_mark_dev_sb(c, ca);
1685         if (ret) {
1686                 bch_err(c, "device add error: error marking new superblock: %s", bch2_err_str(ret));
1687                 goto err_late;
1688         }
1689
1690         ret = bch2_fs_freespace_init(c);
1691         if (ret) {
1692                 bch_err(c, "device add error: error initializing free space: %s", bch2_err_str(ret));
1693                 goto err_late;
1694         }
1695
1696         ca->new_fs_bucket_idx = 0;
1697
1698         if (ca->mi.state == BCH_MEMBER_STATE_rw)
1699                 __bch2_dev_read_write(c, ca);
1700
1701         up_write(&c->state_lock);
1702         return 0;
1703
1704 err_unlock:
1705         mutex_unlock(&c->sb_lock);
1706         up_write(&c->state_lock);
1707 err:
1708         if (ca)
1709                 bch2_dev_free(ca);
1710         bch2_free_super(&sb);
1711         printbuf_exit(&label);
1712         printbuf_exit(&errbuf);
1713         return ret;
1714 err_late:
1715         up_write(&c->state_lock);
1716         ca = NULL;
1717         goto err;
1718 }
1719
1720 /* Hot add existing device to running filesystem: */
1721 int bch2_dev_online(struct bch_fs *c, const char *path)
1722 {
1723         struct bch_opts opts = bch2_opts_empty();
1724         struct bch_sb_handle sb = { NULL };
1725         struct bch_sb_field_members *mi;
1726         struct bch_dev *ca;
1727         unsigned dev_idx;
1728         int ret;
1729
1730         down_write(&c->state_lock);
1731
1732         ret = bch2_read_super(path, &opts, &sb);
1733         if (ret) {
1734                 up_write(&c->state_lock);
1735                 return ret;
1736         }
1737
1738         dev_idx = sb.sb->dev_idx;
1739
1740         ret = bch2_dev_in_fs(c->disk_sb.sb, sb.sb);
1741         if (ret) {
1742                 bch_err(c, "error bringing %s online: %s", path, bch2_err_str(ret));
1743                 goto err;
1744         }
1745
1746         ret = bch2_dev_attach_bdev(c, &sb);
1747         if (ret)
1748                 goto err;
1749
1750         ca = bch_dev_locked(c, dev_idx);
1751
1752         ret = bch2_trans_mark_dev_sb(c, ca);
1753         if (ret) {
1754                 bch_err(c, "error bringing %s online: error from bch2_trans_mark_dev_sb: %s",
1755                         path, bch2_err_str(ret));
1756                 goto err;
1757         }
1758
1759         if (ca->mi.state == BCH_MEMBER_STATE_rw)
1760                 __bch2_dev_read_write(c, ca);
1761
1762         mutex_lock(&c->sb_lock);
1763         mi = bch2_sb_get_members(c->disk_sb.sb);
1764
1765         mi->members[ca->dev_idx].last_mount =
1766                 cpu_to_le64(ktime_get_real_seconds());
1767
1768         bch2_write_super(c);
1769         mutex_unlock(&c->sb_lock);
1770
1771         ret = bch2_fs_freespace_init(c);
1772         if (ret)
1773                 bch_err(c, "device add error: error initializing free space: %s", bch2_err_str(ret));
1774
1775         up_write(&c->state_lock);
1776         return 0;
1777 err:
1778         up_write(&c->state_lock);
1779         bch2_free_super(&sb);
1780         return ret;
1781 }
1782
1783 int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
1784 {
1785         down_write(&c->state_lock);
1786
1787         if (!bch2_dev_is_online(ca)) {
1788                 bch_err(ca, "Already offline");
1789                 up_write(&c->state_lock);
1790                 return 0;
1791         }
1792
1793         if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
1794                 bch_err(ca, "Cannot offline required disk");
1795                 up_write(&c->state_lock);
1796                 return -BCH_ERR_device_state_not_allowed;
1797         }
1798
1799         __bch2_dev_offline(c, ca);
1800
1801         up_write(&c->state_lock);
1802         return 0;
1803 }
1804
1805 int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1806 {
1807         struct bch_member *mi;
1808         int ret = 0;
1809
1810         down_write(&c->state_lock);
1811
1812         if (nbuckets < ca->mi.nbuckets) {
1813                 bch_err(ca, "Cannot shrink yet");
1814                 ret = -EINVAL;
1815                 goto err;
1816         }
1817
1818         if (bch2_dev_is_online(ca) &&
1819             get_capacity(ca->disk_sb.bdev->bd_disk) <
1820             ca->mi.bucket_size * nbuckets) {
1821                 bch_err(ca, "New size larger than device");
1822                 ret = -BCH_ERR_device_size_too_small;
1823                 goto err;
1824         }
1825
1826         ret = bch2_dev_buckets_resize(c, ca, nbuckets);
1827         if (ret) {
1828                 bch_err(ca, "Resize error: %s", bch2_err_str(ret));
1829                 goto err;
1830         }
1831
1832         ret = bch2_trans_mark_dev_sb(c, ca);
1833         if (ret)
1834                 goto err;
1835
1836         mutex_lock(&c->sb_lock);
1837         mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx];
1838         mi->nbuckets = cpu_to_le64(nbuckets);
1839
1840         bch2_write_super(c);
1841         mutex_unlock(&c->sb_lock);
1842
1843         bch2_recalc_capacity(c);
1844 err:
1845         up_write(&c->state_lock);
1846         return ret;
1847 }
1848
1849 /* return with ref on ca->ref: */
1850 struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name)
1851 {
1852         struct bch_dev *ca;
1853         unsigned i;
1854
1855         rcu_read_lock();
1856         for_each_member_device_rcu(ca, c, i, NULL)
1857                 if (!strcmp(name, ca->name))
1858                         goto found;
1859         ca = ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
1860 found:
1861         rcu_read_unlock();
1862
1863         return ca;
1864 }
1865
1866 /* Filesystem open: */
1867
1868 struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
1869                             struct bch_opts opts)
1870 {
1871         struct bch_sb_handle *sb = NULL;
1872         struct bch_fs *c = NULL;
1873         struct bch_sb_field_members *mi;
1874         unsigned i, best_sb = 0;
1875         struct printbuf errbuf = PRINTBUF;
1876         int ret = 0;
1877
1878         if (!try_module_get(THIS_MODULE))
1879                 return ERR_PTR(-ENODEV);
1880
1881         if (!nr_devices) {
1882                 ret = -EINVAL;
1883                 goto err;
1884         }
1885
1886         sb = kcalloc(nr_devices, sizeof(*sb), GFP_KERNEL);
1887         if (!sb) {
1888                 ret = -ENOMEM;
1889                 goto err;
1890         }
1891
1892         for (i = 0; i < nr_devices; i++) {
1893                 ret = bch2_read_super(devices[i], &opts, &sb[i]);
1894                 if (ret)
1895                         goto err;
1896
1897         }
1898
1899         for (i = 1; i < nr_devices; i++)
1900                 if (le64_to_cpu(sb[i].sb->seq) >
1901                     le64_to_cpu(sb[best_sb].sb->seq))
1902                         best_sb = i;
1903
1904         mi = bch2_sb_get_members(sb[best_sb].sb);
1905
1906         i = 0;
1907         while (i < nr_devices) {
1908                 if (i != best_sb &&
1909                     !bch2_dev_exists(sb[best_sb].sb, mi, sb[i].sb->dev_idx)) {
1910                         pr_info("%pg has been removed, skipping", sb[i].bdev);
1911                         bch2_free_super(&sb[i]);
1912                         array_remove_item(sb, nr_devices, i);
1913                         continue;
1914                 }
1915
1916                 ret = bch2_dev_in_fs(sb[best_sb].sb, sb[i].sb);
1917                 if (ret)
1918                         goto err_print;
1919                 i++;
1920         }
1921
1922         c = bch2_fs_alloc(sb[best_sb].sb, opts);
1923         if (IS_ERR(c)) {
1924                 ret = PTR_ERR(c);
1925                 goto err;
1926         }
1927
1928         down_write(&c->state_lock);
1929         for (i = 0; i < nr_devices; i++) {
1930                 ret = bch2_dev_attach_bdev(c, &sb[i]);
1931                 if (ret) {
1932                         up_write(&c->state_lock);
1933                         goto err;
1934                 }
1935         }
1936         up_write(&c->state_lock);
1937
1938         if (!bch2_fs_may_start(c)) {
1939                 ret = -BCH_ERR_insufficient_devices_to_start;
1940                 goto err_print;
1941         }
1942
1943         if (!c->opts.nostart) {
1944                 ret = bch2_fs_start(c);
1945                 if (ret)
1946                         goto err;
1947         }
1948 out:
1949         kfree(sb);
1950         printbuf_exit(&errbuf);
1951         module_put(THIS_MODULE);
1952         return c;
1953 err_print:
1954         pr_err("bch_fs_open err opening %s: %s",
1955                devices[0], bch2_err_str(ret));
1956 err:
1957         if (!IS_ERR_OR_NULL(c))
1958                 bch2_fs_stop(c);
1959         if (sb)
1960                 for (i = 0; i < nr_devices; i++)
1961                         bch2_free_super(&sb[i]);
1962         c = ERR_PTR(ret);
1963         goto out;
1964 }
1965
1966 /* Global interfaces/init */
1967
1968 static void bcachefs_exit(void)
1969 {
1970         bch2_debug_exit();
1971         bch2_vfs_exit();
1972         bch2_chardev_exit();
1973         bch2_btree_key_cache_exit();
1974         if (bcachefs_kset)
1975                 kset_unregister(bcachefs_kset);
1976 }
1977
1978 static int __init bcachefs_init(void)
1979 {
1980         bch2_bkey_pack_test();
1981
1982         if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) ||
1983             bch2_btree_key_cache_init() ||
1984             bch2_chardev_init() ||
1985             bch2_vfs_init() ||
1986             bch2_debug_init())
1987                 goto err;
1988
1989         return 0;
1990 err:
1991         bcachefs_exit();
1992         return -ENOMEM;
1993 }
1994
1995 #define BCH_DEBUG_PARAM(name, description)                      \
1996         bool bch2_##name;                                       \
1997         module_param_named(name, bch2_##name, bool, 0644);      \
1998         MODULE_PARM_DESC(name, description);
1999 BCH_DEBUG_PARAMS()
2000 #undef BCH_DEBUG_PARAM
2001
2002 static unsigned bch2_metadata_version = bcachefs_metadata_version_current;
2003 module_param_named(version, bch2_metadata_version, uint, 0400);
2004
2005 module_exit(bcachefs_exit);
2006 module_init(bcachefs_init);