2 * bcachefs setup/teardown code, and some metadata io - read a superblock and
3 * figure out what to do with it.
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
11 #include "btree_cache.h"
13 #include "btree_update.h"
14 #include "btree_update_interior.h"
36 #include <linux/backing-dev.h>
37 #include <linux/blkdev.h>
38 #include <linux/debugfs.h>
39 #include <linux/device.h>
40 #include <linux/genhd.h>
41 #include <linux/idr.h>
42 #include <linux/kthread.h>
43 #include <linux/module.h>
44 #include <linux/percpu.h>
45 #include <linux/random.h>
46 #include <linux/sysfs.h>
47 #include <crypto/hash.h>
49 #include <trace/events/bcachefs.h>
51 MODULE_LICENSE("GPL");
52 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
55 struct kobj_type type ## _ktype = { \
56 .release = type ## _release, \
57 .sysfs_ops = &type ## _sysfs_ops, \
58 .default_attrs = type ## _files \
61 static void bch2_fs_release(struct kobject *);
62 static void bch2_dev_release(struct kobject *);
64 static void bch2_fs_internal_release(struct kobject *k)
68 static void bch2_fs_opts_dir_release(struct kobject *k)
72 static void bch2_fs_time_stats_release(struct kobject *k)
76 static KTYPE(bch2_fs);
77 static KTYPE(bch2_fs_internal);
78 static KTYPE(bch2_fs_opts_dir);
79 static KTYPE(bch2_fs_time_stats);
80 static KTYPE(bch2_dev);
82 static struct kset *bcachefs_kset;
83 static LIST_HEAD(bch_fs_list);
84 static DEFINE_MUTEX(bch_fs_list_lock);
86 static DECLARE_WAIT_QUEUE_HEAD(bch_read_only_wait);
88 static void bch2_dev_free(struct bch_dev *);
89 static int bch2_dev_alloc(struct bch_fs *, unsigned);
90 static int bch2_dev_sysfs_online(struct bch_dev *);
91 static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
93 struct bch_fs *bch2_bdev_to_fs(struct block_device *bdev)
99 mutex_lock(&bch_fs_list_lock);
102 list_for_each_entry(c, &bch_fs_list, list)
103 for_each_member_device_rcu(ca, c, i, NULL)
104 if (ca->disk_sb.bdev == bdev) {
111 mutex_unlock(&bch_fs_list_lock);
116 static struct bch_fs *__bch2_uuid_to_fs(uuid_le uuid)
120 lockdep_assert_held(&bch_fs_list_lock);
122 list_for_each_entry(c, &bch_fs_list, list)
123 if (!memcmp(&c->disk_sb->uuid, &uuid, sizeof(uuid_le)))
129 struct bch_fs *bch2_uuid_to_fs(uuid_le uuid)
133 mutex_lock(&bch_fs_list_lock);
134 c = __bch2_uuid_to_fs(uuid);
137 mutex_unlock(&bch_fs_list_lock);
142 int bch2_congested(struct bch_fs *c, int bdi_bits)
144 struct backing_dev_info *bdi;
149 if (bdi_bits & (1 << WB_sync_congested)) {
150 /* Reads - check all devices: */
151 for_each_readable_member(ca, c, i) {
152 bdi = ca->disk_sb.bdev->bd_bdi;
154 if (bdi_congested(bdi, bdi_bits)) {
160 /* Writes prefer fastest tier: */
161 struct bch_tier *tier = READ_ONCE(c->fastest_tier);
162 struct bch_devs_mask *devs =
163 tier ? &tier->devs : &c->rw_devs[BCH_DATA_USER];
166 for_each_member_device_rcu(ca, c, i, devs) {
167 bdi = ca->disk_sb.bdev->bd_bdi;
169 if (bdi_congested(bdi, bdi_bits)) {
180 static int bch2_congested_fn(void *data, int bdi_bits)
182 struct bch_fs *c = data;
184 return bch2_congested(c, bdi_bits);
187 /* Filesystem RO/RW: */
190 * For startup/shutdown of RW stuff, the dependencies are:
192 * - foreground writes depend on copygc and tiering (to free up space)
194 * - copygc and tiering depend on mark and sweep gc (they actually probably
195 * don't because they either reserve ahead of time or don't block if
196 * allocations fail, but allocations can require mark and sweep gc to run
197 * because of generation number wraparound)
199 * - all of the above depends on the allocator threads
201 * - allocator depends on the journal (when it rewrites prios and gens)
204 static void __bch2_fs_read_only(struct bch_fs *c)
209 bch2_tiering_stop(c);
211 for_each_member_device(ca, c, i)
212 bch2_moving_gc_stop(ca);
214 bch2_gc_thread_stop(c);
217 * Flush journal before stopping allocators, because flushing journal
218 * blacklist entries involves allocating new btree nodes:
220 bch2_journal_flush_pins(&c->journal, U64_MAX);
222 if (!bch2_journal_error(&c->journal))
223 bch2_btree_verify_flushed(c);
225 for_each_member_device(ca, c, i)
226 bch2_dev_allocator_stop(ca);
228 bch2_fs_journal_stop(&c->journal);
230 for_each_member_device(ca, c, i)
231 bch2_dev_allocator_remove(c, ca);
234 static void bch2_writes_disabled(struct percpu_ref *writes)
236 struct bch_fs *c = container_of(writes, struct bch_fs, writes);
238 set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
239 wake_up(&bch_read_only_wait);
242 void bch2_fs_read_only(struct bch_fs *c)
244 if (c->state != BCH_FS_STARTING &&
245 c->state != BCH_FS_RW)
248 if (test_bit(BCH_FS_ERROR, &c->flags))
252 * Block new foreground-end write operations from starting - any new
253 * writes will return -EROFS:
255 * (This is really blocking new _allocations_, writes to previously
256 * allocated space can still happen until stopping the allocator in
257 * bch2_dev_allocator_stop()).
259 percpu_ref_kill(&c->writes);
261 del_timer(&c->foreground_write_wakeup);
262 cancel_delayed_work(&c->pd_controllers_update);
264 c->foreground_write_pd.rate.rate = UINT_MAX;
265 bch2_wake_delayed_writes((unsigned long) c);
268 * If we're not doing an emergency shutdown, we want to wait on
269 * outstanding writes to complete so they don't see spurious errors due
270 * to shutting down the allocator:
272 * If we are doing an emergency shutdown outstanding writes may
273 * hang until we shutdown the allocator so we don't want to wait
274 * on outstanding writes before shutting everything down - but
275 * we do need to wait on them before returning and signalling
276 * that going RO is complete:
278 wait_event(bch_read_only_wait,
279 test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags) ||
280 test_bit(BCH_FS_EMERGENCY_RO, &c->flags));
282 __bch2_fs_read_only(c);
284 wait_event(bch_read_only_wait,
285 test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
287 clear_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
289 if (!bch2_journal_error(&c->journal) &&
290 !test_bit(BCH_FS_ERROR, &c->flags)) {
291 mutex_lock(&c->sb_lock);
292 SET_BCH_SB_CLEAN(c->disk_sb, true);
294 mutex_unlock(&c->sb_lock);
297 c->state = BCH_FS_RO;
300 static void bch2_fs_read_only_work(struct work_struct *work)
303 container_of(work, struct bch_fs, read_only_work);
305 mutex_lock(&c->state_lock);
306 bch2_fs_read_only(c);
307 mutex_unlock(&c->state_lock);
310 static void bch2_fs_read_only_async(struct bch_fs *c)
312 queue_work(system_long_wq, &c->read_only_work);
315 bool bch2_fs_emergency_read_only(struct bch_fs *c)
317 bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags);
319 bch2_fs_read_only_async(c);
320 bch2_journal_halt(&c->journal);
322 wake_up(&bch_read_only_wait);
326 const char *bch2_fs_read_write(struct bch_fs *c)
329 const char *err = NULL;
332 if (c->state != BCH_FS_STARTING &&
333 c->state != BCH_FS_RO)
336 for_each_rw_member(ca, c, i)
337 bch2_dev_allocator_add(c, ca);
338 bch2_recalc_capacity(c);
340 err = "error starting allocator thread";
341 for_each_rw_member(ca, c, i)
342 if (bch2_dev_allocator_start(ca)) {
343 percpu_ref_put(&ca->io_ref);
347 err = "error starting btree GC thread";
348 if (bch2_gc_thread_start(c))
351 err = "error starting moving GC thread";
352 for_each_rw_member(ca, c, i)
353 if (bch2_moving_gc_start(ca)) {
354 percpu_ref_put(&ca->io_ref);
358 err = "error starting tiering thread";
359 if (bch2_tiering_start(c))
362 schedule_delayed_work(&c->pd_controllers_update, 5 * HZ);
364 if (c->state != BCH_FS_STARTING)
365 percpu_ref_reinit(&c->writes);
367 c->state = BCH_FS_RW;
370 __bch2_fs_read_only(c);
374 /* Filesystem startup/shutdown: */
376 static void bch2_fs_free(struct bch_fs *c)
378 bch2_fs_encryption_exit(c);
379 bch2_fs_btree_exit(c);
380 bch2_fs_journal_exit(&c->journal);
381 bch2_io_clock_exit(&c->io_clock[WRITE]);
382 bch2_io_clock_exit(&c->io_clock[READ]);
383 bch2_fs_compress_exit(c);
384 if (c->bdi.bdi_list.next)
385 bdi_destroy(&c->bdi);
386 lg_lock_free(&c->usage_lock);
387 free_percpu(c->usage_percpu);
388 mempool_exit(&c->btree_bounce_pool);
389 mempool_exit(&c->bio_bounce_pages);
390 bioset_exit(&c->bio_write);
391 bioset_exit(&c->bio_read_split);
392 bioset_exit(&c->bio_read);
393 bioset_exit(&c->btree_read_bio);
394 mempool_exit(&c->btree_interior_update_pool);
395 mempool_exit(&c->btree_reserve_pool);
396 mempool_exit(&c->fill_iter);
397 percpu_ref_exit(&c->writes);
401 destroy_workqueue(c->copygc_wq);
403 destroy_workqueue(c->wq);
405 free_pages((unsigned long) c->disk_sb, c->disk_sb_order);
407 module_put(THIS_MODULE);
410 static void bch2_fs_exit(struct bch_fs *c)
414 del_timer_sync(&c->foreground_write_wakeup);
415 cancel_delayed_work_sync(&c->pd_controllers_update);
416 cancel_work_sync(&c->read_only_work);
418 for (i = 0; i < c->sb.nr_devices; i++)
420 bch2_dev_free(c->devs[i]);
422 closure_debug_destroy(&c->cl);
423 kobject_put(&c->kobj);
426 static void bch2_fs_offline(struct bch_fs *c)
431 mutex_lock(&bch_fs_list_lock);
433 mutex_unlock(&bch_fs_list_lock);
435 for_each_member_device(ca, c, i)
436 if (ca->kobj.state_in_sysfs &&
438 sysfs_remove_link(&part_to_dev(ca->disk_sb.bdev->bd_part)->kobj,
441 if (c->kobj.state_in_sysfs)
442 kobject_del(&c->kobj);
444 bch2_fs_debug_exit(c);
445 bch2_fs_chardev_exit(c);
447 kobject_put(&c->time_stats);
448 kobject_put(&c->opts_dir);
449 kobject_put(&c->internal);
451 mutex_lock(&c->state_lock);
452 __bch2_fs_read_only(c);
453 mutex_unlock(&c->state_lock);
456 static void bch2_fs_release(struct kobject *kobj)
458 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
463 void bch2_fs_stop(struct bch_fs *c)
465 mutex_lock(&c->state_lock);
466 BUG_ON(c->state == BCH_FS_STOPPING);
467 c->state = BCH_FS_STOPPING;
468 mutex_unlock(&c->state_lock);
472 closure_sync(&c->cl);
477 static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
479 struct bch_sb_field_members *mi;
481 unsigned i, iter_size;
483 c = kzalloc(sizeof(struct bch_fs), GFP_KERNEL);
487 __module_get(THIS_MODULE);
491 mutex_init(&c->state_lock);
492 mutex_init(&c->sb_lock);
493 mutex_init(&c->replicas_gc_lock);
494 mutex_init(&c->btree_cache_lock);
495 mutex_init(&c->bucket_lock);
496 mutex_init(&c->btree_root_lock);
497 INIT_WORK(&c->read_only_work, bch2_fs_read_only_work);
499 init_rwsem(&c->gc_lock);
501 #define BCH_TIME_STAT(name, frequency_units, duration_units) \
502 spin_lock_init(&c->name##_time.lock);
506 bch2_fs_allocator_init(c);
507 bch2_fs_tiering_init(c);
509 INIT_LIST_HEAD(&c->list);
510 INIT_LIST_HEAD(&c->btree_cache);
511 INIT_LIST_HEAD(&c->btree_cache_freeable);
512 INIT_LIST_HEAD(&c->btree_cache_freed);
514 INIT_LIST_HEAD(&c->btree_interior_update_list);
515 mutex_init(&c->btree_reserve_cache_lock);
516 mutex_init(&c->btree_interior_update_lock);
518 mutex_init(&c->bio_bounce_pages_lock);
519 mutex_init(&c->zlib_workspace_lock);
521 bio_list_init(&c->btree_write_error_list);
522 spin_lock_init(&c->btree_write_error_lock);
523 INIT_WORK(&c->btree_write_error_work, bch2_btree_write_error_work);
525 INIT_LIST_HEAD(&c->fsck_errors);
526 mutex_init(&c->fsck_error_lock);
528 seqcount_init(&c->gc_pos_lock);
530 c->prio_clock[READ].hand = 1;
531 c->prio_clock[READ].min_prio = 0;
532 c->prio_clock[WRITE].hand = 1;
533 c->prio_clock[WRITE].min_prio = 0;
535 init_waitqueue_head(&c->writeback_wait);
536 c->writeback_pages_max = (256 << 10) / PAGE_SIZE;
538 c->copy_gc_enabled = 1;
539 c->tiering_enabled = 1;
540 c->tiering_percent = 10;
542 c->foreground_target_percent = 20;
544 c->journal.write_time = &c->journal_write_time;
545 c->journal.delay_time = &c->journal_delay_time;
546 c->journal.blocked_time = &c->journal_blocked_time;
547 c->journal.flush_seq_time = &c->journal_flush_seq_time;
549 mutex_lock(&c->sb_lock);
551 if (bch2_sb_to_fs(c, sb)) {
552 mutex_unlock(&c->sb_lock);
556 mutex_unlock(&c->sb_lock);
558 scnprintf(c->name, sizeof(c->name), "%pU", &c->sb.user_uuid);
560 c->opts = bch2_opts_default;
561 bch2_opts_apply(&c->opts, bch2_opts_from_sb(sb));
562 bch2_opts_apply(&c->opts, opts);
564 c->block_bits = ilog2(c->opts.block_size);
566 c->opts.nochanges |= c->opts.noreplay;
567 c->opts.read_only |= c->opts.nochanges;
569 if (bch2_fs_init_fault("fs_alloc"))
572 iter_size = (btree_blocks(c) + 1) * 2 *
573 sizeof(struct btree_node_iter_set);
575 if (!(c->wq = alloc_workqueue("bcachefs",
576 WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
577 !(c->copygc_wq = alloc_workqueue("bcache_copygc",
578 WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
579 percpu_ref_init(&c->writes, bch2_writes_disabled, 0, GFP_KERNEL) ||
580 mempool_init_kmalloc_pool(&c->btree_reserve_pool, 1,
581 sizeof(struct btree_reserve)) ||
582 mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
583 sizeof(struct btree_update)) ||
584 mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
585 bioset_init(&c->btree_read_bio, 1,
586 offsetof(struct btree_read_bio, bio)) ||
587 bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio)) ||
588 bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio)) ||
589 bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio)) ||
590 mempool_init_page_pool(&c->bio_bounce_pages,
592 c->opts.btree_node_size,
593 c->sb.encoded_extent_max) /
595 !(c->usage_percpu = alloc_percpu(struct bch_fs_usage)) ||
596 lg_lock_init(&c->usage_lock) ||
597 mempool_init_vp_pool(&c->btree_bounce_pool, 1, btree_bytes(c)) ||
598 bdi_setup_and_register(&c->bdi, "bcachefs") ||
599 bch2_io_clock_init(&c->io_clock[READ]) ||
600 bch2_io_clock_init(&c->io_clock[WRITE]) ||
601 bch2_fs_journal_init(&c->journal) ||
602 bch2_fs_btree_init(c) ||
603 bch2_fs_encryption_init(c) ||
604 bch2_fs_compress_init(c) ||
605 bch2_check_set_has_compressed_data(c, c->opts.compression))
608 c->bdi.ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
609 c->bdi.congested_fn = bch2_congested_fn;
610 c->bdi.congested_data = c;
612 mi = bch2_sb_get_members(c->disk_sb);
613 for (i = 0; i < c->sb.nr_devices; i++)
614 if (bch2_dev_exists(c->disk_sb, mi, i) &&
615 bch2_dev_alloc(c, i))
619 * Now that all allocations have succeeded, init various refcounty
620 * things that let us shutdown:
622 closure_init(&c->cl, NULL);
624 c->kobj.kset = bcachefs_kset;
625 kobject_init(&c->kobj, &bch2_fs_ktype);
626 kobject_init(&c->internal, &bch2_fs_internal_ktype);
627 kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype);
628 kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype);
635 static const char *__bch2_fs_online(struct bch_fs *c)
638 const char *err = NULL;
642 lockdep_assert_held(&bch_fs_list_lock);
644 if (!list_empty(&c->list))
647 if (__bch2_uuid_to_fs(c->sb.uuid))
648 return "filesystem UUID already open";
650 ret = bch2_fs_chardev_init(c);
652 return "error creating character device";
654 bch2_fs_debug_init(c);
656 if (kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ||
657 kobject_add(&c->internal, &c->kobj, "internal") ||
658 kobject_add(&c->opts_dir, &c->kobj, "options") ||
659 kobject_add(&c->time_stats, &c->kobj, "time_stats") ||
660 bch2_opts_create_sysfs_files(&c->opts_dir))
661 return "error creating sysfs objects";
663 mutex_lock(&c->state_lock);
665 err = "error creating sysfs objects";
666 __for_each_member_device(ca, c, i, NULL)
667 if (bch2_dev_sysfs_online(ca))
670 list_add(&c->list, &bch_fs_list);
673 mutex_unlock(&c->state_lock);
677 static const char *bch2_fs_online(struct bch_fs *c)
681 mutex_lock(&bch_fs_list_lock);
682 err = __bch2_fs_online(c);
683 mutex_unlock(&bch_fs_list_lock);
688 static const char *__bch2_fs_start(struct bch_fs *c)
690 const char *err = "cannot allocate memory";
691 struct bch_sb_field_members *mi;
700 closure_init_stack(&cl);
702 mutex_lock(&c->state_lock);
704 BUG_ON(c->state != BCH_FS_STARTING);
706 mutex_lock(&c->sb_lock);
707 for_each_online_member(ca, c, i)
708 bch2_sb_from_fs(c, ca);
709 mutex_unlock(&c->sb_lock);
711 for_each_rw_member(ca, c, i)
712 bch2_dev_allocator_add(c, ca);
713 bch2_recalc_capacity(c);
715 if (BCH_SB_INITIALIZED(c->disk_sb)) {
716 ret = bch2_journal_read(c, &journal);
720 j = &list_entry(journal.prev, struct journal_replay, list)->j;
722 c->prio_clock[READ].hand = le16_to_cpu(j->read_clock);
723 c->prio_clock[WRITE].hand = le16_to_cpu(j->write_clock);
725 for (i = 0; i < BTREE_ID_NR; i++) {
729 err = "missing btree root";
730 k = bch2_journal_find_btree_root(c, j, i, &level);
731 if (!k && i < BTREE_ID_ALLOC)
737 err = "error reading btree root";
738 if (bch2_btree_root_read(c, i, k, level))
742 err = "error reading allocation information";
743 ret = bch2_alloc_read(c, &journal);
747 set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
749 bch_verbose(c, "starting mark and sweep:");
750 err = "error in recovery";
751 ret = bch2_initial_gc(c, &journal);
754 bch_verbose(c, "mark and sweep done");
756 if (c->opts.noreplay)
759 err = "cannot allocate new btree root";
760 for (i = 0; i < BTREE_ID_NR; i++)
761 if (!c->btree_roots[i].b &&
762 bch2_btree_root_alloc(c, i, &cl))
768 * bch2_journal_start() can't happen sooner, or btree_gc_finish()
769 * will give spurious errors about oldest_gen > bucket_gen -
770 * this is a hack but oh well.
772 bch2_journal_start(c);
774 err = "error starting allocator thread";
775 for_each_rw_member(ca, c, i)
776 if (bch2_dev_allocator_start(ca)) {
777 percpu_ref_put(&ca->io_ref);
781 bch_verbose(c, "starting journal replay:");
782 err = "journal replay failed";
783 ret = bch2_journal_replay(c, &journal);
786 bch_verbose(c, "journal replay done");
788 if (c->opts.norecovery)
791 bch_verbose(c, "starting fsck:");
792 err = "error in fsck";
793 ret = bch2_fsck(c, !c->opts.nofsck);
796 bch_verbose(c, "fsck done");
798 struct bch_inode_unpacked inode;
799 struct bkey_inode_buf packed_inode;
801 bch_notice(c, "initializing new filesystem");
803 set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
805 ret = bch2_initial_gc(c, &journal);
809 err = "unable to allocate journal buckets";
810 for_each_rw_member(ca, c, i)
811 if (bch2_dev_journal_alloc(ca)) {
812 percpu_ref_put(&ca->io_ref);
816 err = "cannot allocate new btree root";
817 for (i = 0; i < BTREE_ID_NR; i++)
818 if (bch2_btree_root_alloc(c, i, &cl))
822 * journal_res_get() will crash if called before this has
823 * set up the journal.pin FIFO and journal.cur pointer:
825 bch2_journal_start(c);
826 bch2_journal_set_replay_done(&c->journal);
828 err = "error starting allocator thread";
829 for_each_rw_member(ca, c, i)
830 if (bch2_dev_allocator_start(ca)) {
831 percpu_ref_put(&ca->io_ref);
835 /* Wait for new btree roots to be written: */
838 bch2_inode_init(c, &inode, 0, 0,
839 S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0);
840 inode.bi_inum = BCACHEFS_ROOT_INO;
842 bch2_inode_pack(&packed_inode, &inode);
844 err = "error creating root directory";
845 if (bch2_btree_insert(c, BTREE_ID_INODES,
846 &packed_inode.inode.k_i,
847 NULL, NULL, NULL, 0))
850 err = "error writing first journal entry";
851 if (bch2_journal_meta(&c->journal))
855 err = "dynamic fault";
856 if (bch2_fs_init_fault("fs_start"))
859 if (c->opts.read_only) {
860 bch2_fs_read_only(c);
862 err = bch2_fs_read_write(c);
867 mutex_lock(&c->sb_lock);
868 mi = bch2_sb_get_members(c->disk_sb);
869 now = ktime_get_seconds();
871 for_each_member_device(ca, c, i)
872 mi->members[ca->dev_idx].last_mount = cpu_to_le64(now);
874 SET_BCH_SB_INITIALIZED(c->disk_sb, true);
875 SET_BCH_SB_CLEAN(c->disk_sb, false);
878 mutex_unlock(&c->sb_lock);
882 mutex_unlock(&c->state_lock);
883 bch2_journal_entries_free(&journal);
889 case BCH_FSCK_ERRORS_NOT_FIXED:
890 bch_err(c, "filesystem contains errors: please report this to the developers");
891 pr_cont("mount with -o fix_errors to repair\n");
894 case BCH_FSCK_REPAIR_UNIMPLEMENTED:
895 bch_err(c, "filesystem contains errors: please report this to the developers");
896 pr_cont("repair unimplemented: inform the developers so that it can be added\n");
899 case BCH_FSCK_REPAIR_IMPOSSIBLE:
900 bch_err(c, "filesystem contains errors, but repair impossible");
903 case BCH_FSCK_UNKNOWN_VERSION:
904 err = "unknown metadata version";;
907 err = "cannot allocate memory";
915 set_bit(BCH_FS_ERROR, &c->flags);
919 const char *bch2_fs_start(struct bch_fs *c)
921 return __bch2_fs_start(c) ?: bch2_fs_online(c);
924 static const char *bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
926 struct bch_sb_field_members *sb_mi;
928 sb_mi = bch2_sb_get_members(sb);
930 return "Invalid superblock: member info area missing";
932 if (le16_to_cpu(sb->block_size) != c->opts.block_size)
933 return "mismatched block size";
935 if (le16_to_cpu(sb_mi->members[sb->dev_idx].bucket_size) <
936 BCH_SB_BTREE_NODE_SIZE(c->disk_sb))
937 return "new cache bucket size is too small";
942 static const char *bch2_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb)
944 struct bch_sb *newest =
945 le64_to_cpu(fs->seq) > le64_to_cpu(sb->seq) ? fs : sb;
946 struct bch_sb_field_members *mi = bch2_sb_get_members(newest);
948 if (uuid_le_cmp(fs->uuid, sb->uuid))
949 return "device not a member of filesystem";
951 if (!bch2_dev_exists(newest, mi, sb->dev_idx))
952 return "device has been removed";
954 if (fs->block_size != sb->block_size)
955 return "mismatched block size";
960 /* Device startup/shutdown: */
962 static void bch2_dev_release(struct kobject *kobj)
964 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
969 static void bch2_dev_free(struct bch_dev *ca)
973 cancel_work_sync(&ca->io_error_work);
975 if (ca->kobj.state_in_sysfs &&
977 sysfs_remove_link(&part_to_dev(ca->disk_sb.bdev->bd_part)->kobj,
980 if (ca->kobj.state_in_sysfs)
981 kobject_del(&ca->kobj);
983 bch2_free_super(&ca->disk_sb);
984 bch2_dev_journal_exit(ca);
986 free_percpu(ca->io_done);
987 bioset_exit(&ca->replica_set);
988 free_percpu(ca->usage_percpu);
989 kvpfree(ca->bucket_dirty, BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
990 kvpfree(ca->buckets, ca->mi.nbuckets * sizeof(struct bucket));
991 kvpfree(ca->oldest_gens, ca->mi.nbuckets * sizeof(u8));
992 free_heap(&ca->copygc_heap);
993 free_heap(&ca->alloc_heap);
994 free_fifo(&ca->free_inc);
996 for (i = 0; i < RESERVE_NR; i++)
997 free_fifo(&ca->free[i]);
999 percpu_ref_exit(&ca->io_ref);
1000 percpu_ref_exit(&ca->ref);
1001 kobject_put(&ca->kobj);
1004 static void bch2_dev_io_ref_release(struct percpu_ref *ref)
1006 struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref);
1008 complete(&ca->offline_complete);
1011 static void __bch2_dev_offline(struct bch_dev *ca)
1013 struct bch_fs *c = ca->fs;
1015 lockdep_assert_held(&c->state_lock);
1017 __bch2_dev_read_only(c, ca);
1019 reinit_completion(&ca->offline_complete);
1020 percpu_ref_kill(&ca->io_ref);
1021 wait_for_completion(&ca->offline_complete);
1023 if (ca->kobj.state_in_sysfs) {
1024 struct kobject *block =
1025 &part_to_dev(ca->disk_sb.bdev->bd_part)->kobj;
1027 sysfs_remove_link(block, "bcachefs");
1028 sysfs_remove_link(&ca->kobj, "block");
1031 bch2_free_super(&ca->disk_sb);
1032 bch2_dev_journal_exit(ca);
1035 static void bch2_dev_ref_release(struct percpu_ref *ref)
1037 struct bch_dev *ca = container_of(ref, struct bch_dev, ref);
1039 complete(&ca->stop_complete);
1042 static void bch2_dev_stop(struct bch_dev *ca)
1044 struct bch_fs *c = ca->fs;
1046 lockdep_assert_held(&c->state_lock);
1048 BUG_ON(rcu_access_pointer(c->devs[ca->dev_idx]) != ca);
1049 rcu_assign_pointer(c->devs[ca->dev_idx], NULL);
1053 reinit_completion(&ca->stop_complete);
1054 percpu_ref_kill(&ca->ref);
1055 wait_for_completion(&ca->stop_complete);
1058 static int bch2_dev_sysfs_online(struct bch_dev *ca)
1060 struct bch_fs *c = ca->fs;
1063 if (!c->kobj.state_in_sysfs)
1066 if (!ca->kobj.state_in_sysfs) {
1067 ret = kobject_add(&ca->kobj, &c->kobj,
1068 "dev-%u", ca->dev_idx);
1073 if (ca->disk_sb.bdev) {
1074 struct kobject *block =
1075 &part_to_dev(ca->disk_sb.bdev->bd_part)->kobj;
1077 ret = sysfs_create_link(block, &ca->kobj, "bcachefs");
1080 ret = sysfs_create_link(&ca->kobj, block, "block");
1088 static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx)
1090 struct bch_member *member;
1091 size_t reserve_none, movinggc_reserve, free_inc_reserve, total_reserve;
1093 unsigned i, btree_node_reserve_buckets;
1096 if (bch2_fs_init_fault("dev_alloc"))
1099 ca = kzalloc(sizeof(*ca), GFP_KERNEL);
1103 kobject_init(&ca->kobj, &bch2_dev_ktype);
1104 init_completion(&ca->stop_complete);
1105 init_completion(&ca->offline_complete);
1107 ca->dev_idx = dev_idx;
1108 __set_bit(ca->dev_idx, ca->self.d);
1110 ca->copygc_write_point.type = BCH_DATA_USER;
1112 spin_lock_init(&ca->freelist_lock);
1113 bch2_dev_moving_gc_init(ca);
1115 INIT_WORK(&ca->io_error_work, bch2_io_error_work);
1117 if (bch2_fs_init_fault("dev_alloc"))
1120 member = bch2_sb_get_members(c->disk_sb)->members + dev_idx;
1122 ca->mi = bch2_mi_to_cpu(member);
1123 ca->uuid = member->uuid;
1124 scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx);
1126 /* XXX: tune these */
1127 movinggc_reserve = max_t(size_t, 16, ca->mi.nbuckets >> 7);
1128 reserve_none = max_t(size_t, 4, ca->mi.nbuckets >> 9);
1130 * free_inc must be smaller than the copygc reserve: if it was bigger,
1131 * one copygc iteration might not make enough buckets available to fill
1132 * up free_inc and allow the allocator to make forward progress
1134 free_inc_reserve = movinggc_reserve / 2;
1135 heap_size = movinggc_reserve * 8;
1137 btree_node_reserve_buckets =
1138 DIV_ROUND_UP(BTREE_NODE_RESERVE,
1139 ca->mi.bucket_size / c->opts.btree_node_size);
1141 if (percpu_ref_init(&ca->ref, bch2_dev_ref_release,
1143 percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_release,
1144 PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
1145 !init_fifo(&ca->free[RESERVE_BTREE], btree_node_reserve_buckets,
1147 !init_fifo(&ca->free[RESERVE_MOVINGGC],
1148 movinggc_reserve, GFP_KERNEL) ||
1149 !init_fifo(&ca->free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
1150 !init_fifo(&ca->free_inc, free_inc_reserve, GFP_KERNEL) ||
1151 !init_heap(&ca->alloc_heap, free_inc_reserve, GFP_KERNEL) ||
1152 !init_heap(&ca->copygc_heap,heap_size, GFP_KERNEL) ||
1153 !(ca->oldest_gens = kvpmalloc(ca->mi.nbuckets *
1155 GFP_KERNEL|__GFP_ZERO)) ||
1156 !(ca->buckets = kvpmalloc(ca->mi.nbuckets *
1157 sizeof(struct bucket),
1158 GFP_KERNEL|__GFP_ZERO)) ||
1159 !(ca->bucket_dirty = kvpmalloc(BITS_TO_LONGS(ca->mi.nbuckets) *
1160 sizeof(unsigned long),
1161 GFP_KERNEL|__GFP_ZERO)) ||
1162 !(ca->usage_percpu = alloc_percpu(struct bch_dev_usage)) ||
1163 bioset_init(&ca->replica_set, 4,
1164 offsetof(struct bch_write_bio, bio)) ||
1165 !(ca->io_done = alloc_percpu(*ca->io_done)))
1168 total_reserve = ca->free_inc.size;
1169 for (i = 0; i < RESERVE_NR; i++)
1170 total_reserve += ca->free[i].size;
1172 ca->copygc_write_point.group = &ca->self;
1175 rcu_assign_pointer(c->devs[ca->dev_idx], ca);
1177 if (bch2_dev_sysfs_online(ca))
1178 pr_warn("error creating sysfs objects");
1186 static int __bch2_dev_online(struct bch_fs *c, struct bch_sb_handle *sb)
1191 lockdep_assert_held(&c->sb_lock);
1193 if (le64_to_cpu(sb->sb->seq) >
1194 le64_to_cpu(c->disk_sb->seq))
1195 bch2_sb_to_fs(c, sb->sb);
1197 BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices ||
1198 !c->devs[sb->sb->dev_idx]);
1200 ca = c->devs[sb->sb->dev_idx];
1201 if (ca->disk_sb.bdev) {
1202 bch_err(c, "already have device online in slot %u",
1207 ret = bch2_dev_journal_init(ca, sb->sb);
1212 * Increase journal write timeout if flushes to this device are
1215 if (!blk_queue_nonrot(bdev_get_queue(sb->bdev)) &&
1216 journal_flushes_device(ca))
1217 c->journal.write_delay_ms =
1218 max(c->journal.write_delay_ms, 1000U);
1222 if (sb->mode & FMODE_EXCL)
1223 ca->disk_sb.bdev->bd_holder = ca;
1224 memset(sb, 0, sizeof(*sb));
1226 if (c->sb.nr_devices == 1)
1227 bdevname(ca->disk_sb.bdev, c->name);
1228 bdevname(ca->disk_sb.bdev, ca->name);
1230 if (bch2_dev_sysfs_online(ca))
1231 pr_warn("error creating sysfs objects");
1233 lg_local_lock(&c->usage_lock);
1234 if (!gc_will_visit(c, gc_phase(GC_PHASE_SB_METADATA)))
1235 bch2_mark_dev_metadata(c, ca);
1236 lg_local_unlock(&c->usage_lock);
1238 if (ca->mi.state == BCH_MEMBER_STATE_RW)
1239 bch2_dev_allocator_add(c, ca);
1241 percpu_ref_reinit(&ca->io_ref);
1245 /* Device management: */
1248 * Note: this function is also used by the error paths - when a particular
1249 * device sees an error, we call it to determine whether we can just set the
1250 * device RO, or - if this function returns false - we'll set the whole
1253 * XXX: maybe we should be more explicit about whether we're changing state
1254 * because we got an error or what have you?
1256 bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
1257 enum bch_member_state new_state, int flags)
1259 struct bch_devs_mask new_online_devs;
1260 struct replicas_status s;
1261 struct bch_dev *ca2;
1262 int i, nr_rw = 0, required;
1264 lockdep_assert_held(&c->state_lock);
1266 switch (new_state) {
1267 case BCH_MEMBER_STATE_RW:
1269 case BCH_MEMBER_STATE_RO:
1270 if (ca->mi.state != BCH_MEMBER_STATE_RW)
1273 /* do we have enough devices to write to? */
1274 for_each_member_device(ca2, c, i)
1275 nr_rw += ca2->mi.state == BCH_MEMBER_STATE_RW;
1277 required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED)
1278 ? c->opts.metadata_replicas
1279 : c->opts.metadata_replicas_required,
1280 !(flags & BCH_FORCE_IF_DATA_DEGRADED)
1281 ? c->opts.data_replicas
1282 : c->opts.data_replicas_required);
1284 return nr_rw - 1 <= required;
1285 case BCH_MEMBER_STATE_FAILED:
1286 case BCH_MEMBER_STATE_SPARE:
1287 if (ca->mi.state != BCH_MEMBER_STATE_RW &&
1288 ca->mi.state != BCH_MEMBER_STATE_RO)
1291 /* do we have enough devices to read from? */
1292 new_online_devs = bch2_online_devs(c);
1293 __clear_bit(ca->dev_idx, new_online_devs.d);
1295 s = __bch2_replicas_status(c, new_online_devs);
1297 return bch2_have_enough_devs(c, s, flags);
1303 static bool bch2_fs_may_start(struct bch_fs *c)
1305 struct replicas_status s;
1306 struct bch_sb_field_members *mi;
1307 unsigned i, flags = c->opts.degraded
1308 ? BCH_FORCE_IF_DEGRADED
1311 if (!c->opts.degraded) {
1312 mutex_lock(&c->sb_lock);
1313 mi = bch2_sb_get_members(c->disk_sb);
1315 for (i = 0; i < c->disk_sb->nr_devices; i++)
1316 if (bch2_dev_exists(c->disk_sb, mi, i) &&
1317 !bch2_dev_is_online(c->devs[i]) &&
1318 (c->devs[i]->mi.state == BCH_MEMBER_STATE_RW ||
1319 c->devs[i]->mi.state == BCH_MEMBER_STATE_RO)) {
1320 mutex_unlock(&c->sb_lock);
1323 mutex_unlock(&c->sb_lock);
1326 s = bch2_replicas_status(c);
1328 return bch2_have_enough_devs(c, s, flags);
1331 static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
1333 bch2_moving_gc_stop(ca);
1336 * This stops new data writes (e.g. to existing open data
1337 * buckets) and then waits for all existing writes to
1340 bch2_dev_allocator_stop(ca);
1341 bch2_dev_allocator_remove(c, ca);
1344 static const char *__bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
1346 lockdep_assert_held(&c->state_lock);
1348 BUG_ON(ca->mi.state != BCH_MEMBER_STATE_RW);
1350 bch2_dev_allocator_add(c, ca);
1351 bch2_recalc_capacity(c);
1353 if (bch2_dev_allocator_start(ca))
1354 return "error starting allocator thread";
1356 if (bch2_moving_gc_start(ca))
1357 return "error starting moving GC thread";
1359 if (bch2_tiering_start(c))
1360 return "error starting tiering thread";
1365 int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1366 enum bch_member_state new_state, int flags)
1368 struct bch_sb_field_members *mi;
1370 if (ca->mi.state == new_state)
1373 if (!bch2_dev_state_allowed(c, ca, new_state, flags))
1376 if (new_state == BCH_MEMBER_STATE_RW) {
1377 if (__bch2_dev_read_write(c, ca))
1380 __bch2_dev_read_only(c, ca);
1383 bch_notice(ca, "%s", bch2_dev_state[new_state]);
1385 mutex_lock(&c->sb_lock);
1386 mi = bch2_sb_get_members(c->disk_sb);
1387 SET_BCH_MEMBER_STATE(&mi->members[ca->dev_idx], new_state);
1388 bch2_write_super(c);
1389 mutex_unlock(&c->sb_lock);
1394 int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1395 enum bch_member_state new_state, int flags)
1399 mutex_lock(&c->state_lock);
1400 ret = __bch2_dev_set_state(c, ca, new_state, flags);
1401 mutex_unlock(&c->state_lock);
1406 /* Device add/removal: */
1408 int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
1410 struct bch_sb_field_members *mi;
1411 unsigned dev_idx = ca->dev_idx, data;
1414 mutex_lock(&c->state_lock);
1416 percpu_ref_put(&ca->ref); /* XXX */
1418 if (ca->mi.state == BCH_MEMBER_STATE_RW) {
1419 bch_err(ca, "Cannot remove RW device");
1423 if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
1424 bch_err(ca, "Cannot remove without losing data");
1429 * XXX: verify that dev_idx is really not in use anymore, anywhere
1431 * flag_data_bad() does not check btree pointers
1433 ret = bch2_flag_data_bad(ca);
1435 bch_err(ca, "Remove failed");
1439 data = bch2_dev_has_data(c, ca);
1441 bch_err(ca, "Remove failed, still has data (%x)", data);
1445 bch2_journal_meta(&c->journal);
1447 __bch2_dev_offline(ca);
1452 * Free this device's slot in the bch_member array - all pointers to
1453 * this device must be gone:
1455 mutex_lock(&c->sb_lock);
1456 mi = bch2_sb_get_members(c->disk_sb);
1457 memset(&mi->members[dev_idx].uuid, 0, sizeof(mi->members[dev_idx].uuid));
1459 bch2_write_super(c);
1461 mutex_unlock(&c->sb_lock);
1462 mutex_unlock(&c->state_lock);
1465 mutex_unlock(&c->state_lock);
1469 /* Add new device to running filesystem: */
1470 int bch2_dev_add(struct bch_fs *c, const char *path)
1472 struct bch_sb_handle sb;
1474 struct bch_dev *ca = NULL;
1475 struct bch_sb_field_members *mi, *dev_mi;
1476 struct bch_member saved_mi;
1477 unsigned dev_idx, nr_devices, u64s;
1480 err = bch2_read_super(path, bch2_opts_empty(), &sb);
1484 err = bch2_sb_validate(&sb);
1488 err = bch2_dev_may_add(sb.sb, c);
1492 mutex_lock(&c->state_lock);
1493 mutex_lock(&c->sb_lock);
1496 * Preserve the old cache member information (esp. tier)
1497 * before we start bashing the disk stuff.
1499 dev_mi = bch2_sb_get_members(sb.sb);
1500 saved_mi = dev_mi->members[sb.sb->dev_idx];
1501 saved_mi.last_mount = cpu_to_le64(ktime_get_seconds());
1503 if (dynamic_fault("bcachefs:add:no_slot"))
1506 mi = bch2_sb_get_members(c->disk_sb);
1507 for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++)
1508 if (!bch2_dev_exists(c->disk_sb, mi, dev_idx))
1511 err = "no slots available in superblock";
1516 nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices);
1517 u64s = (sizeof(struct bch_sb_field_members) +
1518 sizeof(struct bch_member) * nr_devices) / sizeof(u64);
1519 err = "no space in superblock for member info";
1521 mi = bch2_fs_sb_resize_members(c, u64s);
1525 dev_mi = bch2_sb_resize_members(&sb, u64s);
1529 memcpy(dev_mi, mi, u64s * sizeof(u64));
1530 dev_mi->members[dev_idx] = saved_mi;
1532 sb.sb->uuid = c->disk_sb->uuid;
1533 sb.sb->dev_idx = dev_idx;
1534 sb.sb->nr_devices = nr_devices;
1536 /* commit new member info */
1537 memcpy(mi, dev_mi, u64s * sizeof(u64));
1538 c->disk_sb->nr_devices = nr_devices;
1539 c->sb.nr_devices = nr_devices;
1541 if (bch2_dev_alloc(c, dev_idx)) {
1542 err = "cannot allocate memory";
1547 if (__bch2_dev_online(c, &sb)) {
1548 err = "bch2_dev_online() error";
1553 bch2_write_super(c);
1554 mutex_unlock(&c->sb_lock);
1556 ca = c->devs[dev_idx];
1557 if (ca->mi.state == BCH_MEMBER_STATE_RW) {
1558 err = "journal alloc failed";
1559 if (bch2_dev_journal_alloc(ca))
1562 err = __bch2_dev_read_write(c, ca);
1567 mutex_unlock(&c->state_lock);
1570 mutex_unlock(&c->sb_lock);
1572 mutex_unlock(&c->state_lock);
1573 bch2_free_super(&sb);
1575 bch_err(c, "Unable to add device: %s", err);
1576 return ret ?: -EINVAL;
1579 /* Hot add existing device to running filesystem: */
1580 int bch2_dev_online(struct bch_fs *c, const char *path)
1582 struct bch_sb_handle sb = { 0 };
1587 mutex_lock(&c->state_lock);
1589 err = bch2_read_super(path, bch2_opts_empty(), &sb);
1593 dev_idx = sb.sb->dev_idx;
1595 err = bch2_dev_in_fs(c->disk_sb, sb.sb);
1599 mutex_lock(&c->sb_lock);
1600 if (__bch2_dev_online(c, &sb)) {
1601 err = "__bch2_dev_online() error";
1602 mutex_unlock(&c->sb_lock);
1605 mutex_unlock(&c->sb_lock);
1607 ca = c->devs[dev_idx];
1608 if (ca->mi.state == BCH_MEMBER_STATE_RW) {
1609 err = __bch2_dev_read_write(c, ca);
1614 mutex_unlock(&c->state_lock);
1617 mutex_unlock(&c->state_lock);
1618 bch2_free_super(&sb);
1619 bch_err(c, "error bringing %s online: %s", path, err);
1623 int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
1625 mutex_lock(&c->state_lock);
1627 if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
1628 bch_err(ca, "Cannot offline required disk");
1629 mutex_unlock(&c->state_lock);
1633 __bch2_dev_read_only(c, ca);
1634 __bch2_dev_offline(ca);
1636 mutex_unlock(&c->state_lock);
1640 int bch2_dev_evacuate(struct bch_fs *c, struct bch_dev *ca)
1645 mutex_lock(&c->state_lock);
1647 if (ca->mi.state == BCH_MEMBER_STATE_RW) {
1648 bch_err(ca, "Cannot migrate data off RW device");
1649 mutex_unlock(&c->state_lock);
1653 mutex_unlock(&c->state_lock);
1655 ret = bch2_move_data_off_device(ca);
1657 bch_err(ca, "Error migrating data: %i", ret);
1661 ret = bch2_move_metadata_off_device(ca);
1663 bch_err(ca, "Error migrating metadata: %i", ret);
1667 data = bch2_dev_has_data(c, ca);
1669 bch_err(ca, "Migrate error: data still present (%x)", data);
1676 /* Filesystem open: */
1678 const char *bch2_fs_open(char * const *devices, unsigned nr_devices,
1679 struct bch_opts opts, struct bch_fs **ret)
1682 struct bch_fs *c = NULL;
1683 struct bch_sb_handle *sb;
1684 unsigned i, best_sb = 0;
1687 return "need at least one device";
1689 if (!try_module_get(THIS_MODULE))
1690 return "module unloading";
1692 err = "cannot allocate memory";
1693 sb = kcalloc(nr_devices, sizeof(*sb), GFP_KERNEL);
1697 for (i = 0; i < nr_devices; i++) {
1698 err = bch2_read_super(devices[i], opts, &sb[i]);
1702 err = bch2_sb_validate(&sb[i]);
1707 for (i = 1; i < nr_devices; i++)
1708 if (le64_to_cpu(sb[i].sb->seq) >
1709 le64_to_cpu(sb[best_sb].sb->seq))
1712 for (i = 0; i < nr_devices; i++) {
1713 err = bch2_dev_in_fs(sb[best_sb].sb, sb[i].sb);
1718 err = "cannot allocate memory";
1719 c = bch2_fs_alloc(sb[best_sb].sb, opts);
1723 err = "bch2_dev_online() error";
1724 mutex_lock(&c->sb_lock);
1725 for (i = 0; i < nr_devices; i++)
1726 if (__bch2_dev_online(c, &sb[i])) {
1727 mutex_unlock(&c->sb_lock);
1730 mutex_unlock(&c->sb_lock);
1732 err = "insufficient devices";
1733 if (!bch2_fs_may_start(c))
1736 if (!c->opts.nostart) {
1737 err = __bch2_fs_start(c);
1742 err = bch2_fs_online(c);
1749 closure_put(&c->cl);
1754 module_put(THIS_MODULE);
1762 for (i = 0; i < nr_devices; i++)
1763 bch2_free_super(&sb[i]);
1767 static const char *__bch2_fs_open_incremental(struct bch_sb_handle *sb,
1768 struct bch_opts opts)
1772 bool allocated_fs = false;
1774 err = bch2_sb_validate(sb);
1778 mutex_lock(&bch_fs_list_lock);
1779 c = __bch2_uuid_to_fs(sb->sb->uuid);
1781 closure_get(&c->cl);
1783 err = bch2_dev_in_fs(c->disk_sb, sb->sb);
1787 c = bch2_fs_alloc(sb->sb, opts);
1788 err = "cannot allocate memory";
1792 allocated_fs = true;
1795 err = "bch2_dev_online() error";
1797 mutex_lock(&c->sb_lock);
1798 if (__bch2_dev_online(c, sb)) {
1799 mutex_unlock(&c->sb_lock);
1802 mutex_unlock(&c->sb_lock);
1804 if (!c->opts.nostart && bch2_fs_may_start(c)) {
1805 err = __bch2_fs_start(c);
1810 err = __bch2_fs_online(c);
1814 closure_put(&c->cl);
1815 mutex_unlock(&bch_fs_list_lock);
1819 mutex_unlock(&bch_fs_list_lock);
1824 closure_put(&c->cl);
1829 const char *bch2_fs_open_incremental(const char *path)
1831 struct bch_sb_handle sb;
1832 struct bch_opts opts = bch2_opts_empty();
1835 err = bch2_read_super(path, opts, &sb);
1839 err = __bch2_fs_open_incremental(&sb, opts);
1840 bch2_free_super(&sb);
1845 /* Global interfaces/init */
1847 static void bcachefs_exit(void)
1851 bch2_chardev_exit();
1853 kset_unregister(bcachefs_kset);
1856 static int __init bcachefs_init(void)
1858 bch2_bkey_pack_test();
1859 bch2_inode_pack_test();
1861 if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) ||
1862 bch2_chardev_init() ||
1873 #define BCH_DEBUG_PARAM(name, description) \
1875 module_param_named(name, bch2_##name, bool, 0644); \
1876 MODULE_PARM_DESC(name, description);
1878 #undef BCH_DEBUG_PARAM
1880 module_exit(bcachefs_exit);
1881 module_init(bcachefs_init);