2 * bcachefs setup/teardown code, and some metadata io - read a superblock and
3 * figure out what to do with it.
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
11 #include "btree_cache.h"
13 #include "btree_update.h"
35 #include <linux/backing-dev.h>
36 #include <linux/blkdev.h>
37 #include <linux/debugfs.h>
38 #include <linux/device.h>
39 #include <linux/genhd.h>
40 #include <linux/idr.h>
41 #include <linux/kthread.h>
42 #include <linux/module.h>
43 #include <linux/percpu.h>
44 #include <linux/random.h>
45 #include <linux/sysfs.h>
46 #include <crypto/hash.h>
48 #include <trace/events/bcachefs.h>
50 MODULE_LICENSE("GPL");
51 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
54 struct kobj_type type ## _ktype = { \
55 .release = type ## _release, \
56 .sysfs_ops = &type ## _sysfs_ops, \
57 .default_attrs = type ## _files \
60 static void bch2_fs_release(struct kobject *);
61 static void bch2_dev_release(struct kobject *);
63 static void bch2_fs_internal_release(struct kobject *k)
67 static void bch2_fs_opts_dir_release(struct kobject *k)
71 static void bch2_fs_time_stats_release(struct kobject *k)
75 static KTYPE(bch2_fs);
76 static KTYPE(bch2_fs_internal);
77 static KTYPE(bch2_fs_opts_dir);
78 static KTYPE(bch2_fs_time_stats);
79 static KTYPE(bch2_dev);
81 static struct kset *bcachefs_kset;
82 static LIST_HEAD(bch_fs_list);
83 static DEFINE_MUTEX(bch_fs_list_lock);
85 static DECLARE_WAIT_QUEUE_HEAD(bch_read_only_wait);
87 static void bch2_dev_free(struct bch_dev *);
88 static int bch2_dev_alloc(struct bch_fs *, unsigned);
89 static int bch2_dev_sysfs_online(struct bch_dev *);
90 static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
92 struct bch_fs *bch2_bdev_to_fs(struct block_device *bdev)
98 mutex_lock(&bch_fs_list_lock);
101 list_for_each_entry(c, &bch_fs_list, list)
102 for_each_member_device_rcu(ca, c, i)
103 if (ca->disk_sb.bdev == bdev) {
110 mutex_unlock(&bch_fs_list_lock);
115 static struct bch_fs *__bch2_uuid_to_fs(uuid_le uuid)
119 lockdep_assert_held(&bch_fs_list_lock);
121 list_for_each_entry(c, &bch_fs_list, list)
122 if (!memcmp(&c->disk_sb->uuid, &uuid, sizeof(uuid_le)))
128 struct bch_fs *bch2_uuid_to_fs(uuid_le uuid)
132 mutex_lock(&bch_fs_list_lock);
133 c = __bch2_uuid_to_fs(uuid);
136 mutex_unlock(&bch_fs_list_lock);
141 int bch2_congested(struct bch_fs *c, int bdi_bits)
143 struct backing_dev_info *bdi;
148 if (bdi_bits & (1 << WB_sync_congested)) {
149 /* Reads - check all devices: */
150 for_each_readable_member(ca, c, i) {
151 bdi = ca->disk_sb.bdev->bd_bdi;
153 if (bdi_congested(bdi, bdi_bits)) {
159 /* Writes prefer fastest tier: */
160 struct bch_tier *tier = READ_ONCE(c->fastest_tier);
161 struct dev_group *grp = tier ? &tier->devs : &c->all_devs;
164 group_for_each_dev(ca, grp, i) {
165 bdi = ca->disk_sb.bdev->bd_bdi;
167 if (bdi_congested(bdi, bdi_bits)) {
178 static int bch2_congested_fn(void *data, int bdi_bits)
180 struct bch_fs *c = data;
182 return bch2_congested(c, bdi_bits);
185 /* Filesystem RO/RW: */
188 * For startup/shutdown of RW stuff, the dependencies are:
190 * - foreground writes depend on copygc and tiering (to free up space)
192 * - copygc and tiering depend on mark and sweep gc (they actually probably
193 * don't because they either reserve ahead of time or don't block if
194 * allocations fail, but allocations can require mark and sweep gc to run
195 * because of generation number wraparound)
197 * - all of the above depends on the allocator threads
199 * - allocator depends on the journal (when it rewrites prios and gens)
202 static void __bch2_fs_read_only(struct bch_fs *c)
207 bch2_tiering_stop(c);
209 for_each_member_device(ca, c, i)
210 bch2_moving_gc_stop(ca);
212 bch2_gc_thread_stop(c);
215 * Flush journal before stopping allocators, because flushing journal
216 * blacklist entries involves allocating new btree nodes:
218 bch2_journal_flush_pins(&c->journal, U64_MAX);
220 if (!bch2_journal_error(&c->journal))
221 bch2_btree_verify_flushed(c);
223 for_each_member_device(ca, c, i)
224 bch2_dev_allocator_stop(ca);
226 bch2_fs_journal_stop(&c->journal);
228 for_each_member_device(ca, c, i)
229 bch2_dev_allocator_remove(c, ca);
232 static void bch2_writes_disabled(struct percpu_ref *writes)
234 struct bch_fs *c = container_of(writes, struct bch_fs, writes);
236 set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
237 wake_up(&bch_read_only_wait);
240 void bch2_fs_read_only(struct bch_fs *c)
242 mutex_lock(&c->state_lock);
243 if (c->state != BCH_FS_STARTING &&
244 c->state != BCH_FS_RW)
247 if (test_bit(BCH_FS_ERROR, &c->flags))
251 * Block new foreground-end write operations from starting - any new
252 * writes will return -EROFS:
254 * (This is really blocking new _allocations_, writes to previously
255 * allocated space can still happen until stopping the allocator in
256 * bch2_dev_allocator_stop()).
258 percpu_ref_kill(&c->writes);
260 del_timer(&c->foreground_write_wakeup);
261 cancel_delayed_work(&c->pd_controllers_update);
263 c->foreground_write_pd.rate.rate = UINT_MAX;
264 bch2_wake_delayed_writes((unsigned long) c);
267 * If we're not doing an emergency shutdown, we want to wait on
268 * outstanding writes to complete so they don't see spurious errors due
269 * to shutting down the allocator:
271 * If we are doing an emergency shutdown outstanding writes may
272 * hang until we shutdown the allocator so we don't want to wait
273 * on outstanding writes before shutting everything down - but
274 * we do need to wait on them before returning and signalling
275 * that going RO is complete:
277 wait_event(bch_read_only_wait,
278 test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags) ||
279 test_bit(BCH_FS_EMERGENCY_RO, &c->flags));
281 __bch2_fs_read_only(c);
283 wait_event(bch_read_only_wait,
284 test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
286 clear_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
288 if (!bch2_journal_error(&c->journal) &&
289 !test_bit(BCH_FS_ERROR, &c->flags)) {
290 mutex_lock(&c->sb_lock);
291 SET_BCH_SB_CLEAN(c->disk_sb, true);
293 mutex_unlock(&c->sb_lock);
296 c->state = BCH_FS_RO;
298 mutex_unlock(&c->state_lock);
301 static void bch2_fs_read_only_work(struct work_struct *work)
304 container_of(work, struct bch_fs, read_only_work);
306 bch2_fs_read_only(c);
309 static void bch2_fs_read_only_async(struct bch_fs *c)
311 queue_work(system_long_wq, &c->read_only_work);
314 bool bch2_fs_emergency_read_only(struct bch_fs *c)
316 bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags);
318 bch2_fs_read_only_async(c);
319 bch2_journal_halt(&c->journal);
321 wake_up(&bch_read_only_wait);
325 const char *bch2_fs_read_write(struct bch_fs *c)
328 const char *err = NULL;
331 mutex_lock(&c->state_lock);
332 if (c->state != BCH_FS_STARTING &&
333 c->state != BCH_FS_RO)
336 for_each_rw_member(ca, c, i)
337 bch2_dev_allocator_add(c, ca);
338 bch2_recalc_capacity(c);
340 err = "error starting allocator thread";
341 for_each_rw_member(ca, c, i)
342 if (bch2_dev_allocator_start(ca)) {
343 percpu_ref_put(&ca->io_ref);
347 err = "error starting btree GC thread";
348 if (bch2_gc_thread_start(c))
351 err = "error starting moving GC thread";
352 for_each_rw_member(ca, c, i)
353 if (bch2_moving_gc_start(ca)) {
354 percpu_ref_put(&ca->io_ref);
358 err = "error starting tiering thread";
359 if (bch2_tiering_start(c))
362 schedule_delayed_work(&c->pd_controllers_update, 5 * HZ);
364 if (c->state != BCH_FS_STARTING)
365 percpu_ref_reinit(&c->writes);
367 c->state = BCH_FS_RW;
370 mutex_unlock(&c->state_lock);
373 __bch2_fs_read_only(c);
377 /* Filesystem startup/shutdown: */
379 static void bch2_fs_free(struct bch_fs *c)
381 bch2_fs_encryption_exit(c);
382 bch2_fs_btree_exit(c);
383 bch2_fs_journal_exit(&c->journal);
384 bch2_io_clock_exit(&c->io_clock[WRITE]);
385 bch2_io_clock_exit(&c->io_clock[READ]);
386 bch2_fs_compress_exit(c);
387 if (c->bdi.bdi_list.next)
388 bdi_destroy(&c->bdi);
389 lg_lock_free(&c->usage_lock);
390 free_percpu(c->usage_percpu);
391 mempool_exit(&c->btree_bounce_pool);
392 mempool_exit(&c->bio_bounce_pages);
393 bioset_exit(&c->bio_write);
394 bioset_exit(&c->bio_read_split);
395 bioset_exit(&c->bio_read);
396 bioset_exit(&c->btree_read_bio);
397 mempool_exit(&c->btree_interior_update_pool);
398 mempool_exit(&c->btree_reserve_pool);
399 mempool_exit(&c->fill_iter);
400 percpu_ref_exit(&c->writes);
403 destroy_workqueue(c->copygc_wq);
405 destroy_workqueue(c->wq);
407 free_pages((unsigned long) c->disk_sb, c->disk_sb_order);
409 module_put(THIS_MODULE);
412 static void bch2_fs_exit(struct bch_fs *c)
416 del_timer_sync(&c->foreground_write_wakeup);
417 cancel_delayed_work_sync(&c->pd_controllers_update);
418 cancel_work_sync(&c->read_only_work);
419 cancel_work_sync(&c->read_retry_work);
421 for (i = 0; i < c->sb.nr_devices; i++)
423 bch2_dev_free(c->devs[i]);
425 closure_debug_destroy(&c->cl);
426 kobject_put(&c->kobj);
429 static void bch2_fs_offline(struct bch_fs *c)
434 mutex_lock(&bch_fs_list_lock);
436 mutex_unlock(&bch_fs_list_lock);
438 for_each_member_device(ca, c, i)
439 if (ca->kobj.state_in_sysfs &&
441 sysfs_remove_link(&part_to_dev(ca->disk_sb.bdev->bd_part)->kobj,
444 if (c->kobj.state_in_sysfs)
445 kobject_del(&c->kobj);
447 bch2_fs_debug_exit(c);
448 bch2_fs_chardev_exit(c);
450 kobject_put(&c->time_stats);
451 kobject_put(&c->opts_dir);
452 kobject_put(&c->internal);
454 __bch2_fs_read_only(c);
457 static void bch2_fs_release(struct kobject *kobj)
459 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
464 void bch2_fs_stop(struct bch_fs *c)
466 mutex_lock(&c->state_lock);
467 BUG_ON(c->state == BCH_FS_STOPPING);
468 c->state = BCH_FS_STOPPING;
469 mutex_unlock(&c->state_lock);
473 closure_sync(&c->cl);
478 static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
480 struct bch_sb_field_members *mi;
482 unsigned i, iter_size;
484 c = kzalloc(sizeof(struct bch_fs), GFP_KERNEL);
488 __module_get(THIS_MODULE);
492 mutex_init(&c->state_lock);
493 mutex_init(&c->sb_lock);
494 mutex_init(&c->replicas_gc_lock);
495 mutex_init(&c->btree_cache_lock);
496 mutex_init(&c->bucket_lock);
497 mutex_init(&c->btree_root_lock);
498 INIT_WORK(&c->read_only_work, bch2_fs_read_only_work);
500 init_rwsem(&c->gc_lock);
502 #define BCH_TIME_STAT(name, frequency_units, duration_units) \
503 spin_lock_init(&c->name##_time.lock);
507 bch2_fs_allocator_init(c);
508 bch2_fs_tiering_init(c);
510 INIT_LIST_HEAD(&c->list);
511 INIT_LIST_HEAD(&c->btree_cache);
512 INIT_LIST_HEAD(&c->btree_cache_freeable);
513 INIT_LIST_HEAD(&c->btree_cache_freed);
515 INIT_LIST_HEAD(&c->btree_interior_update_list);
516 mutex_init(&c->btree_reserve_cache_lock);
517 mutex_init(&c->btree_interior_update_lock);
519 mutex_init(&c->bio_bounce_pages_lock);
520 mutex_init(&c->zlib_workspace_lock);
522 bio_list_init(&c->read_retry_list);
523 spin_lock_init(&c->read_retry_lock);
524 INIT_WORK(&c->read_retry_work, bch2_read_retry_work);
526 bio_list_init(&c->btree_write_error_list);
527 spin_lock_init(&c->btree_write_error_lock);
528 INIT_WORK(&c->btree_write_error_work, bch2_btree_write_error_work);
530 INIT_LIST_HEAD(&c->fsck_errors);
531 mutex_init(&c->fsck_error_lock);
533 seqcount_init(&c->gc_pos_lock);
535 c->prio_clock[READ].hand = 1;
536 c->prio_clock[READ].min_prio = 0;
537 c->prio_clock[WRITE].hand = 1;
538 c->prio_clock[WRITE].min_prio = 0;
540 init_waitqueue_head(&c->writeback_wait);
541 c->writeback_pages_max = (256 << 10) / PAGE_SIZE;
543 c->copy_gc_enabled = 1;
544 c->tiering_enabled = 1;
545 c->tiering_percent = 10;
547 c->foreground_target_percent = 20;
549 c->journal.write_time = &c->journal_write_time;
550 c->journal.delay_time = &c->journal_delay_time;
551 c->journal.blocked_time = &c->journal_blocked_time;
552 c->journal.flush_seq_time = &c->journal_flush_seq_time;
554 mutex_lock(&c->sb_lock);
556 if (bch2_sb_to_fs(c, sb)) {
557 mutex_unlock(&c->sb_lock);
561 mutex_unlock(&c->sb_lock);
563 scnprintf(c->name, sizeof(c->name), "%pU", &c->sb.user_uuid);
565 bch2_opts_apply(&c->opts, bch2_sb_opts(sb));
566 bch2_opts_apply(&c->opts, opts);
568 c->opts.nochanges |= c->opts.noreplay;
569 c->opts.read_only |= c->opts.nochanges;
571 c->block_bits = ilog2(c->sb.block_size);
573 if (bch2_fs_init_fault("fs_alloc"))
576 iter_size = (btree_blocks(c) + 1) * 2 *
577 sizeof(struct btree_node_iter_set);
579 if (!(c->wq = alloc_workqueue("bcachefs",
580 WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
581 !(c->copygc_wq = alloc_workqueue("bcache_copygc",
582 WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
583 percpu_ref_init(&c->writes, bch2_writes_disabled, 0, GFP_KERNEL) ||
584 mempool_init_kmalloc_pool(&c->btree_reserve_pool, 1,
585 sizeof(struct btree_reserve)) ||
586 mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
587 sizeof(struct btree_interior_update)) ||
588 mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
589 bioset_init(&c->btree_read_bio, 1,
590 offsetof(struct btree_read_bio, bio)) ||
591 bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio)) ||
592 bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio)) ||
593 bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio)) ||
594 mempool_init_page_pool(&c->bio_bounce_pages,
596 c->sb.btree_node_size,
597 BCH_ENCODED_EXTENT_MAX) /
599 !(c->usage_percpu = alloc_percpu(struct bch_fs_usage)) ||
600 lg_lock_init(&c->usage_lock) ||
601 mempool_init_vp_pool(&c->btree_bounce_pool, 1, btree_bytes(c)) ||
602 bdi_setup_and_register(&c->bdi, "bcachefs") ||
603 bch2_io_clock_init(&c->io_clock[READ]) ||
604 bch2_io_clock_init(&c->io_clock[WRITE]) ||
605 bch2_fs_journal_init(&c->journal) ||
606 bch2_fs_btree_init(c) ||
607 bch2_fs_encryption_init(c) ||
608 bch2_fs_compress_init(c) ||
609 bch2_check_set_has_compressed_data(c, c->opts.compression))
612 c->bdi.ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
613 c->bdi.congested_fn = bch2_congested_fn;
614 c->bdi.congested_data = c;
616 mi = bch2_sb_get_members(c->disk_sb);
617 for (i = 0; i < c->sb.nr_devices; i++)
618 if (bch2_dev_exists(c->disk_sb, mi, i) &&
619 bch2_dev_alloc(c, i))
623 * Now that all allocations have succeeded, init various refcounty
624 * things that let us shutdown:
626 closure_init(&c->cl, NULL);
628 c->kobj.kset = bcachefs_kset;
629 kobject_init(&c->kobj, &bch2_fs_ktype);
630 kobject_init(&c->internal, &bch2_fs_internal_ktype);
631 kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype);
632 kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype);
639 static const char *__bch2_fs_online(struct bch_fs *c)
642 const char *err = NULL;
646 lockdep_assert_held(&bch_fs_list_lock);
648 if (!list_empty(&c->list))
651 if (__bch2_uuid_to_fs(c->sb.uuid))
652 return "filesystem UUID already open";
654 ret = bch2_fs_chardev_init(c);
656 return "error creating character device";
658 bch2_fs_debug_init(c);
660 if (kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ||
661 kobject_add(&c->internal, &c->kobj, "internal") ||
662 kobject_add(&c->opts_dir, &c->kobj, "options") ||
663 kobject_add(&c->time_stats, &c->kobj, "time_stats"))
664 return "error creating sysfs objects";
666 mutex_lock(&c->state_lock);
668 err = "error creating sysfs objects";
669 __for_each_member_device(ca, c, i)
670 if (bch2_dev_sysfs_online(ca))
673 list_add(&c->list, &bch_fs_list);
676 mutex_unlock(&c->state_lock);
680 static const char *bch2_fs_online(struct bch_fs *c)
684 mutex_lock(&bch_fs_list_lock);
685 err = __bch2_fs_online(c);
686 mutex_unlock(&bch_fs_list_lock);
691 static const char *__bch2_fs_start(struct bch_fs *c)
693 const char *err = "cannot allocate memory";
694 struct bch_sb_field_members *mi;
704 closure_init_stack(&cl);
706 BUG_ON(c->state != BCH_FS_STARTING);
708 mutex_lock(&c->sb_lock);
709 for_each_online_member(ca, c, i)
710 bch2_sb_from_fs(c, ca);
711 mutex_unlock(&c->sb_lock);
713 for_each_rw_member(ca, c, i)
714 bch2_dev_allocator_add(c, ca);
715 bch2_recalc_capacity(c);
717 if (BCH_SB_INITIALIZED(c->disk_sb)) {
718 ret = bch2_journal_read(c, &journal);
722 j = &list_entry(journal.prev, struct journal_replay, list)->j;
724 c->prio_clock[READ].hand = le16_to_cpu(j->read_clock);
725 c->prio_clock[WRITE].hand = le16_to_cpu(j->write_clock);
727 for (i = 0; i < BTREE_ID_NR; i++) {
731 err = "missing btree root";
732 k = bch2_journal_find_btree_root(c, j, i, &level);
733 if (!k && i < BTREE_ID_ALLOC)
739 err = "error reading btree root";
740 if (bch2_btree_root_read(c, i, k, level))
744 err = "error reading allocation information";
745 ret = bch2_alloc_read(c, &journal);
749 bch_verbose(c, "starting mark and sweep:");
750 err = "error in recovery";
751 ret = bch2_initial_gc(c, &journal);
754 bch_verbose(c, "mark and sweep done");
756 if (c->opts.noreplay)
759 err = "cannot allocate new btree root";
760 for (i = 0; i < BTREE_ID_NR; i++)
761 if (!c->btree_roots[i].b &&
762 bch2_btree_root_alloc(c, i, &cl))
768 * bch2_journal_start() can't happen sooner, or btree_gc_finish()
769 * will give spurious errors about oldest_gen > bucket_gen -
770 * this is a hack but oh well.
772 bch2_journal_start(c);
774 err = "error starting allocator thread";
775 for_each_rw_member(ca, c, i)
776 if (bch2_dev_allocator_start(ca)) {
777 percpu_ref_put(&ca->io_ref);
781 bch_verbose(c, "starting journal replay:");
782 err = "journal replay failed";
783 ret = bch2_journal_replay(c, &journal);
786 bch_verbose(c, "journal replay done");
788 if (c->opts.norecovery)
791 bch_verbose(c, "starting fsck:");
792 err = "error in fsck";
793 ret = bch2_fsck(c, !c->opts.nofsck);
796 bch_verbose(c, "fsck done");
798 for_each_rw_member(ca, c, i)
799 if (ca->need_alloc_write) {
800 ret = bch2_alloc_write(c, ca, &journal_seq);
802 percpu_ref_put(&ca->io_ref);
807 bch2_journal_flush_seq(&c->journal, journal_seq);
809 struct bch_inode_unpacked inode;
810 struct bkey_inode_buf packed_inode;
812 bch_notice(c, "initializing new filesystem");
814 ret = bch2_initial_gc(c, &journal);
818 err = "unable to allocate journal buckets";
819 for_each_rw_member(ca, c, i)
820 if (bch2_dev_journal_alloc(ca)) {
821 percpu_ref_put(&ca->io_ref);
825 err = "cannot allocate new btree root";
826 for (i = 0; i < BTREE_ID_NR; i++)
827 if (bch2_btree_root_alloc(c, i, &cl))
831 * journal_res_get() will crash if called before this has
832 * set up the journal.pin FIFO and journal.cur pointer:
834 bch2_journal_start(c);
835 bch2_journal_set_replay_done(&c->journal);
837 err = "error starting allocator thread";
838 for_each_rw_member(ca, c, i)
839 if (bch2_dev_allocator_start(ca)) {
840 percpu_ref_put(&ca->io_ref);
844 /* Wait for new btree roots to be written: */
847 bch2_inode_init(c, &inode, 0, 0,
848 S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0);
849 inode.inum = BCACHE_ROOT_INO;
851 bch2_inode_pack(&packed_inode, &inode);
853 err = "error creating root directory";
854 if (bch2_btree_insert(c, BTREE_ID_INODES,
855 &packed_inode.inode.k_i,
856 NULL, NULL, NULL, 0))
859 err = "error writing first journal entry";
860 if (bch2_journal_meta(&c->journal))
864 err = "dynamic fault";
865 if (bch2_fs_init_fault("fs_start"))
868 if (c->opts.read_only) {
869 bch2_fs_read_only(c);
871 err = bch2_fs_read_write(c);
876 mutex_lock(&c->sb_lock);
877 mi = bch2_sb_get_members(c->disk_sb);
878 now = ktime_get_seconds();
880 for_each_member_device(ca, c, i)
881 mi->members[ca->dev_idx].last_mount = cpu_to_le64(now);
883 SET_BCH_SB_INITIALIZED(c->disk_sb, true);
884 SET_BCH_SB_CLEAN(c->disk_sb, false);
885 c->disk_sb->version = BCACHE_SB_VERSION_CDEV;
888 mutex_unlock(&c->sb_lock);
892 bch2_journal_entries_free(&journal);
898 case BCH_FSCK_ERRORS_NOT_FIXED:
899 bch_err(c, "filesystem contains errors: please report this to the developers");
900 pr_cont("mount with -o fix_errors to repair\n");
903 case BCH_FSCK_REPAIR_UNIMPLEMENTED:
904 bch_err(c, "filesystem contains errors: please report this to the developers");
905 pr_cont("repair unimplemented: inform the developers so that it can be added\n");
908 case BCH_FSCK_REPAIR_IMPOSSIBLE:
909 bch_err(c, "filesystem contains errors, but repair impossible");
912 case BCH_FSCK_UNKNOWN_VERSION:
913 err = "unknown metadata version";;
916 err = "cannot allocate memory";
924 set_bit(BCH_FS_ERROR, &c->flags);
928 const char *bch2_fs_start(struct bch_fs *c)
930 return __bch2_fs_start(c) ?: bch2_fs_online(c);
933 static const char *bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
935 struct bch_sb_field_members *sb_mi;
937 sb_mi = bch2_sb_get_members(sb);
939 return "Invalid superblock: member info area missing";
941 if (le16_to_cpu(sb->block_size) != c->sb.block_size)
942 return "mismatched block size";
944 if (le16_to_cpu(sb_mi->members[sb->dev_idx].bucket_size) <
945 BCH_SB_BTREE_NODE_SIZE(c->disk_sb))
946 return "new cache bucket size is too small";
951 static const char *bch2_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb)
953 struct bch_sb *newest =
954 le64_to_cpu(fs->seq) > le64_to_cpu(sb->seq) ? fs : sb;
955 struct bch_sb_field_members *mi = bch2_sb_get_members(newest);
957 if (uuid_le_cmp(fs->uuid, sb->uuid))
958 return "device not a member of filesystem";
960 if (!bch2_dev_exists(newest, mi, sb->dev_idx))
961 return "device has been removed";
963 if (fs->block_size != sb->block_size)
964 return "mismatched block size";
969 /* Device startup/shutdown: */
971 static void bch2_dev_release(struct kobject *kobj)
973 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
978 static void bch2_dev_free(struct bch_dev *ca)
982 cancel_work_sync(&ca->io_error_work);
984 if (ca->kobj.state_in_sysfs &&
986 sysfs_remove_link(&part_to_dev(ca->disk_sb.bdev->bd_part)->kobj,
989 if (ca->kobj.state_in_sysfs)
990 kobject_del(&ca->kobj);
992 bch2_free_super(&ca->disk_sb);
993 bch2_dev_journal_exit(ca);
995 free_percpu(ca->sectors_written);
996 bioset_exit(&ca->replica_set);
997 free_percpu(ca->usage_percpu);
998 kvpfree(ca->buckets, ca->mi.nbuckets * sizeof(struct bucket));
999 kvpfree(ca->oldest_gens, ca->mi.nbuckets * sizeof(u8));
1000 free_heap(&ca->copygc_heap);
1001 free_heap(&ca->alloc_heap);
1002 free_fifo(&ca->free_inc);
1004 for (i = 0; i < RESERVE_NR; i++)
1005 free_fifo(&ca->free[i]);
1007 percpu_ref_exit(&ca->io_ref);
1008 percpu_ref_exit(&ca->ref);
1009 kobject_put(&ca->kobj);
1012 static void bch2_dev_io_ref_release(struct percpu_ref *ref)
1014 struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref);
1016 complete(&ca->offline_complete);
1019 static void __bch2_dev_offline(struct bch_dev *ca)
1021 struct bch_fs *c = ca->fs;
1023 lockdep_assert_held(&c->state_lock);
1025 __bch2_dev_read_only(c, ca);
1027 reinit_completion(&ca->offline_complete);
1028 percpu_ref_kill(&ca->io_ref);
1029 wait_for_completion(&ca->offline_complete);
1031 if (ca->kobj.state_in_sysfs) {
1032 struct kobject *block =
1033 &part_to_dev(ca->disk_sb.bdev->bd_part)->kobj;
1035 sysfs_remove_link(block, "bcachefs");
1036 sysfs_remove_link(&ca->kobj, "block");
1039 bch2_free_super(&ca->disk_sb);
1040 bch2_dev_journal_exit(ca);
1043 static void bch2_dev_ref_release(struct percpu_ref *ref)
1045 struct bch_dev *ca = container_of(ref, struct bch_dev, ref);
1047 complete(&ca->stop_complete);
1050 static void bch2_dev_stop(struct bch_dev *ca)
1052 struct bch_fs *c = ca->fs;
1054 lockdep_assert_held(&c->state_lock);
1056 BUG_ON(rcu_access_pointer(c->devs[ca->dev_idx]) != ca);
1057 rcu_assign_pointer(c->devs[ca->dev_idx], NULL);
1061 reinit_completion(&ca->stop_complete);
1062 percpu_ref_kill(&ca->ref);
1063 wait_for_completion(&ca->stop_complete);
1066 static int bch2_dev_sysfs_online(struct bch_dev *ca)
1068 struct bch_fs *c = ca->fs;
1071 if (!c->kobj.state_in_sysfs)
1074 if (!ca->kobj.state_in_sysfs) {
1075 ret = kobject_add(&ca->kobj, &c->kobj,
1076 "dev-%u", ca->dev_idx);
1081 if (ca->disk_sb.bdev) {
1082 struct kobject *block =
1083 &part_to_dev(ca->disk_sb.bdev->bd_part)->kobj;
1085 ret = sysfs_create_link(block, &ca->kobj, "bcachefs");
1088 ret = sysfs_create_link(&ca->kobj, block, "block");
1096 static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx)
1098 struct bch_member *member;
1099 size_t reserve_none, movinggc_reserve, free_inc_reserve, total_reserve;
1101 unsigned i, btree_node_reserve_buckets;
1104 if (bch2_fs_init_fault("dev_alloc"))
1107 ca = kzalloc(sizeof(*ca), GFP_KERNEL);
1111 kobject_init(&ca->kobj, &bch2_dev_ktype);
1112 init_completion(&ca->stop_complete);
1113 init_completion(&ca->offline_complete);
1115 spin_lock_init(&ca->self.lock);
1117 rcu_assign_pointer(ca->self.d[0].dev, ca);
1118 ca->dev_idx = dev_idx;
1120 spin_lock_init(&ca->freelist_lock);
1121 bch2_dev_moving_gc_init(ca);
1123 INIT_WORK(&ca->io_error_work, bch2_nonfatal_io_error_work);
1125 if (bch2_fs_init_fault("dev_alloc"))
1128 member = bch2_sb_get_members(c->disk_sb)->members + dev_idx;
1130 ca->mi = bch2_mi_to_cpu(member);
1131 ca->uuid = member->uuid;
1132 ca->bucket_bits = ilog2(ca->mi.bucket_size);
1133 scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx);
1135 /* XXX: tune these */
1136 movinggc_reserve = max_t(size_t, 16, ca->mi.nbuckets >> 7);
1137 reserve_none = max_t(size_t, 4, ca->mi.nbuckets >> 9);
1139 * free_inc must be smaller than the copygc reserve: if it was bigger,
1140 * one copygc iteration might not make enough buckets available to fill
1141 * up free_inc and allow the allocator to make forward progress
1143 free_inc_reserve = movinggc_reserve / 2;
1144 heap_size = movinggc_reserve * 8;
1146 btree_node_reserve_buckets =
1147 DIV_ROUND_UP(BTREE_NODE_RESERVE,
1148 ca->mi.bucket_size / c->sb.btree_node_size);
1150 if (percpu_ref_init(&ca->ref, bch2_dev_ref_release,
1152 percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_release,
1153 PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
1154 !init_fifo(&ca->free[RESERVE_BTREE], btree_node_reserve_buckets,
1156 !init_fifo(&ca->free[RESERVE_MOVINGGC],
1157 movinggc_reserve, GFP_KERNEL) ||
1158 !init_fifo(&ca->free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
1159 !init_fifo(&ca->free_inc, free_inc_reserve, GFP_KERNEL) ||
1160 !init_heap(&ca->alloc_heap, free_inc_reserve, GFP_KERNEL) ||
1161 !init_heap(&ca->copygc_heap,heap_size, GFP_KERNEL) ||
1162 !(ca->oldest_gens = kvpmalloc(ca->mi.nbuckets *
1164 GFP_KERNEL|__GFP_ZERO)) ||
1165 !(ca->buckets = kvpmalloc(ca->mi.nbuckets *
1166 sizeof(struct bucket),
1167 GFP_KERNEL|__GFP_ZERO)) ||
1168 !(ca->usage_percpu = alloc_percpu(struct bch_dev_usage)) ||
1169 bioset_init(&ca->replica_set, 4,
1170 offsetof(struct bch_write_bio, bio)) ||
1171 !(ca->sectors_written = alloc_percpu(*ca->sectors_written)))
1174 total_reserve = ca->free_inc.size;
1175 for (i = 0; i < RESERVE_NR; i++)
1176 total_reserve += ca->free[i].size;
1178 ca->copygc_write_point.group = &ca->self;
1179 ca->tiering_write_point.group = &ca->self;
1182 rcu_assign_pointer(c->devs[ca->dev_idx], ca);
1184 if (bch2_dev_sysfs_online(ca))
1185 pr_warn("error creating sysfs objects");
1193 static int __bch2_dev_online(struct bch_fs *c, struct bcache_superblock *sb)
1198 lockdep_assert_held(&c->sb_lock);
1200 if (le64_to_cpu(sb->sb->seq) >
1201 le64_to_cpu(c->disk_sb->seq))
1202 bch2_sb_to_fs(c, sb->sb);
1204 BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices ||
1205 !c->devs[sb->sb->dev_idx]);
1207 ca = c->devs[sb->sb->dev_idx];
1208 if (ca->disk_sb.bdev) {
1209 bch_err(c, "already have device online in slot %u",
1214 ret = bch2_dev_journal_init(ca, sb->sb);
1219 * Increase journal write timeout if flushes to this device are
1222 if (!blk_queue_nonrot(bdev_get_queue(sb->bdev)) &&
1223 journal_flushes_device(ca))
1224 c->journal.write_delay_ms =
1225 max(c->journal.write_delay_ms, 1000U);
1229 if (sb->mode & FMODE_EXCL)
1230 ca->disk_sb.bdev->bd_holder = ca;
1231 memset(sb, 0, sizeof(*sb));
1233 if (c->sb.nr_devices == 1)
1234 bdevname(ca->disk_sb.bdev, c->name);
1235 bdevname(ca->disk_sb.bdev, ca->name);
1237 if (bch2_dev_sysfs_online(ca))
1238 pr_warn("error creating sysfs objects");
1240 lg_local_lock(&c->usage_lock);
1241 if (!gc_will_visit(c, gc_phase(GC_PHASE_SB_METADATA)))
1242 bch2_mark_dev_metadata(c, ca);
1243 lg_local_unlock(&c->usage_lock);
1245 if (ca->mi.state == BCH_MEMBER_STATE_RW) {
1246 struct bch_sb_field_journal *journal_buckets =
1247 bch2_sb_get_journal(ca->disk_sb.sb);
1249 bch2_nr_journal_buckets(journal_buckets) >=
1250 BCH_JOURNAL_BUCKETS_MIN;
1252 bch2_dev_group_add(&c->tiers[ca->mi.tier].devs, ca);
1253 bch2_dev_group_add(&c->all_devs, ca);
1256 bch2_dev_group_add(&c->journal.devs, ca);
1259 percpu_ref_reinit(&ca->io_ref);
1263 /* Device management: */
1265 static bool have_enough_devs(struct bch_fs *c,
1266 struct replicas_status s,
1269 if ((s.replicas[BCH_DATA_JOURNAL].nr_offline ||
1270 s.replicas[BCH_DATA_BTREE].nr_offline) &&
1271 !(flags & BCH_FORCE_IF_METADATA_DEGRADED))
1274 if ((!s.replicas[BCH_DATA_JOURNAL].nr_online ||
1275 !s.replicas[BCH_DATA_BTREE].nr_online) &&
1276 !(flags & BCH_FORCE_IF_METADATA_LOST))
1279 if (s.replicas[BCH_DATA_USER].nr_offline &&
1280 !(flags & BCH_FORCE_IF_DATA_DEGRADED))
1283 if (!s.replicas[BCH_DATA_USER].nr_online &&
1284 !(flags & BCH_FORCE_IF_DATA_LOST))
1291 * Note: this function is also used by the error paths - when a particular
1292 * device sees an error, we call it to determine whether we can just set the
1293 * device RO, or - if this function returns false - we'll set the whole
1296 * XXX: maybe we should be more explicit about whether we're changing state
1297 * because we got an error or what have you?
1299 bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
1300 enum bch_member_state new_state, int flags)
1302 struct replicas_status s;
1303 struct bch_dev *ca2;
1304 int i, nr_rw = 0, required;
1306 lockdep_assert_held(&c->state_lock);
1308 switch (new_state) {
1309 case BCH_MEMBER_STATE_RW:
1311 case BCH_MEMBER_STATE_RO:
1312 if (ca->mi.state != BCH_MEMBER_STATE_RW)
1315 /* do we have enough devices to write to? */
1316 for_each_member_device(ca2, c, i)
1317 nr_rw += ca2->mi.state == BCH_MEMBER_STATE_RW;
1319 required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED)
1320 ? c->opts.metadata_replicas
1321 : c->opts.metadata_replicas_required,
1322 !(flags & BCH_FORCE_IF_DATA_DEGRADED)
1323 ? c->opts.data_replicas
1324 : c->opts.data_replicas_required);
1326 return nr_rw - 1 <= required;
1327 case BCH_MEMBER_STATE_FAILED:
1328 case BCH_MEMBER_STATE_SPARE:
1329 if (ca->mi.state != BCH_MEMBER_STATE_RW &&
1330 ca->mi.state != BCH_MEMBER_STATE_RO)
1333 /* do we have enough devices to read from? */
1334 s = __bch2_replicas_status(c, ca);
1336 pr_info("replicas: j %u %u b %u %u d %u %u",
1337 s.replicas[BCH_DATA_JOURNAL].nr_online,
1338 s.replicas[BCH_DATA_JOURNAL].nr_offline,
1340 s.replicas[BCH_DATA_BTREE].nr_online,
1341 s.replicas[BCH_DATA_BTREE].nr_offline,
1343 s.replicas[BCH_DATA_USER].nr_online,
1344 s.replicas[BCH_DATA_USER].nr_offline);
1346 return have_enough_devs(c, s, flags);
1352 static bool bch2_fs_may_start(struct bch_fs *c)
1354 struct replicas_status s;
1355 struct bch_sb_field_members *mi;
1356 unsigned i, flags = c->opts.degraded
1357 ? BCH_FORCE_IF_DEGRADED
1360 if (!c->opts.degraded) {
1361 mutex_lock(&c->sb_lock);
1362 mi = bch2_sb_get_members(c->disk_sb);
1364 for (i = 0; i < c->disk_sb->nr_devices; i++)
1365 if (bch2_dev_exists(c->disk_sb, mi, i) &&
1366 !bch2_dev_is_online(c->devs[i]) &&
1367 (c->devs[i]->mi.state == BCH_MEMBER_STATE_RW ||
1368 c->devs[i]->mi.state == BCH_MEMBER_STATE_RO)) {
1369 mutex_unlock(&c->sb_lock);
1372 mutex_unlock(&c->sb_lock);
1375 s = bch2_replicas_status(c);
1377 return have_enough_devs(c, s, flags);
1380 static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
1382 bch2_moving_gc_stop(ca);
1385 * This stops new data writes (e.g. to existing open data
1386 * buckets) and then waits for all existing writes to
1389 bch2_dev_allocator_stop(ca);
1390 bch2_dev_allocator_remove(c, ca);
1393 static const char *__bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
1395 lockdep_assert_held(&c->state_lock);
1397 BUG_ON(ca->mi.state != BCH_MEMBER_STATE_RW);
1399 bch2_dev_allocator_add(c, ca);
1400 bch2_recalc_capacity(c);
1402 if (bch2_dev_allocator_start(ca))
1403 return "error starting allocator thread";
1405 if (bch2_moving_gc_start(ca))
1406 return "error starting moving GC thread";
1408 if (bch2_tiering_start(c))
1409 return "error starting tiering thread";
1414 int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1415 enum bch_member_state new_state, int flags)
1417 struct bch_sb_field_members *mi;
1419 if (ca->mi.state == new_state)
1422 if (!bch2_dev_state_allowed(c, ca, new_state, flags))
1425 if (new_state == BCH_MEMBER_STATE_RW) {
1426 if (__bch2_dev_read_write(c, ca))
1429 __bch2_dev_read_only(c, ca);
1432 bch_notice(ca, "%s", bch2_dev_state[new_state]);
1434 mutex_lock(&c->sb_lock);
1435 mi = bch2_sb_get_members(c->disk_sb);
1436 SET_BCH_MEMBER_STATE(&mi->members[ca->dev_idx], new_state);
1437 bch2_write_super(c);
1438 mutex_unlock(&c->sb_lock);
1443 int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1444 enum bch_member_state new_state, int flags)
1448 mutex_lock(&c->state_lock);
1449 ret = __bch2_dev_set_state(c, ca, new_state, flags);
1450 mutex_unlock(&c->state_lock);
1455 /* Device add/removal: */
1457 int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
1459 struct bch_sb_field_members *mi;
1460 unsigned dev_idx = ca->dev_idx, data;
1463 mutex_lock(&c->state_lock);
1465 percpu_ref_put(&ca->ref); /* XXX */
1467 if (ca->mi.state == BCH_MEMBER_STATE_RW) {
1468 bch_err(ca, "Cannot remove RW device");
1472 if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
1473 bch_err(ca, "Cannot remove without losing data");
1478 * XXX: verify that dev_idx is really not in use anymore, anywhere
1480 * flag_data_bad() does not check btree pointers
1482 ret = bch2_flag_data_bad(ca);
1484 bch_err(ca, "Remove failed");
1488 data = bch2_dev_has_data(c, ca);
1490 bch_err(ca, "Remove failed, still has data (%x)", data);
1494 bch2_journal_meta(&c->journal);
1496 __bch2_dev_offline(ca);
1501 * Free this device's slot in the bch_member array - all pointers to
1502 * this device must be gone:
1504 mutex_lock(&c->sb_lock);
1505 mi = bch2_sb_get_members(c->disk_sb);
1506 memset(&mi->members[dev_idx].uuid, 0, sizeof(mi->members[dev_idx].uuid));
1508 bch2_write_super(c);
1510 mutex_unlock(&c->sb_lock);
1511 mutex_unlock(&c->state_lock);
1514 mutex_unlock(&c->state_lock);
1518 /* Add new device to running filesystem: */
1519 int bch2_dev_add(struct bch_fs *c, const char *path)
1521 struct bcache_superblock sb;
1523 struct bch_dev *ca = NULL;
1524 struct bch_sb_field_members *mi, *dev_mi;
1525 struct bch_member saved_mi;
1526 unsigned dev_idx, nr_devices, u64s;
1529 err = bch2_read_super(&sb, bch2_opts_empty(), path);
1533 err = bch2_sb_validate(&sb);
1537 err = bch2_dev_may_add(sb.sb, c);
1541 mutex_lock(&c->state_lock);
1542 mutex_lock(&c->sb_lock);
1545 * Preserve the old cache member information (esp. tier)
1546 * before we start bashing the disk stuff.
1548 dev_mi = bch2_sb_get_members(sb.sb);
1549 saved_mi = dev_mi->members[sb.sb->dev_idx];
1550 saved_mi.last_mount = cpu_to_le64(ktime_get_seconds());
1552 if (dynamic_fault("bcachefs:add:no_slot"))
1555 mi = bch2_sb_get_members(c->disk_sb);
1556 for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++)
1557 if (!bch2_dev_exists(c->disk_sb, mi, dev_idx))
1560 err = "no slots available in superblock";
1565 nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices);
1566 u64s = (sizeof(struct bch_sb_field_members) +
1567 sizeof(struct bch_member) * nr_devices) / sizeof(u64);
1568 err = "no space in superblock for member info";
1570 mi = bch2_fs_sb_resize_members(c, u64s);
1574 dev_mi = bch2_sb_resize_members(&sb, u64s);
1578 memcpy(dev_mi, mi, u64s * sizeof(u64));
1579 dev_mi->members[dev_idx] = saved_mi;
1581 sb.sb->uuid = c->disk_sb->uuid;
1582 sb.sb->dev_idx = dev_idx;
1583 sb.sb->nr_devices = nr_devices;
1585 /* commit new member info */
1586 memcpy(mi, dev_mi, u64s * sizeof(u64));
1587 c->disk_sb->nr_devices = nr_devices;
1588 c->sb.nr_devices = nr_devices;
1590 if (bch2_dev_alloc(c, dev_idx)) {
1591 err = "cannot allocate memory";
1596 if (__bch2_dev_online(c, &sb)) {
1597 err = "bch2_dev_online() error";
1602 bch2_write_super(c);
1603 mutex_unlock(&c->sb_lock);
1605 ca = c->devs[dev_idx];
1606 if (ca->mi.state == BCH_MEMBER_STATE_RW) {
1607 err = "journal alloc failed";
1608 if (bch2_dev_journal_alloc(ca))
1611 err = __bch2_dev_read_write(c, ca);
1616 mutex_unlock(&c->state_lock);
1619 mutex_unlock(&c->sb_lock);
1621 mutex_unlock(&c->state_lock);
1622 bch2_free_super(&sb);
1624 bch_err(c, "Unable to add device: %s", err);
1625 return ret ?: -EINVAL;
1628 /* Hot add existing device to running filesystem: */
1629 int bch2_dev_online(struct bch_fs *c, const char *path)
1631 struct bcache_superblock sb = { 0 };
1636 mutex_lock(&c->state_lock);
1638 err = bch2_read_super(&sb, bch2_opts_empty(), path);
1642 dev_idx = sb.sb->dev_idx;
1644 err = bch2_dev_in_fs(c->disk_sb, sb.sb);
1648 mutex_lock(&c->sb_lock);
1649 if (__bch2_dev_online(c, &sb)) {
1650 err = "__bch2_dev_online() error";
1651 mutex_unlock(&c->sb_lock);
1654 mutex_unlock(&c->sb_lock);
1656 ca = c->devs[dev_idx];
1657 if (ca->mi.state == BCH_MEMBER_STATE_RW) {
1658 err = __bch2_dev_read_write(c, ca);
1663 mutex_unlock(&c->state_lock);
1666 mutex_unlock(&c->state_lock);
1667 bch2_free_super(&sb);
1668 bch_err(c, "error bringing %s online: %s", path, err);
1672 int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
1674 mutex_lock(&c->state_lock);
1676 if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
1677 bch_err(ca, "Cannot offline required disk");
1678 mutex_unlock(&c->state_lock);
1682 __bch2_dev_read_only(c, ca);
1683 __bch2_dev_offline(ca);
1685 mutex_unlock(&c->state_lock);
1689 int bch2_dev_evacuate(struct bch_fs *c, struct bch_dev *ca)
1694 mutex_lock(&c->state_lock);
1696 if (ca->mi.state == BCH_MEMBER_STATE_RW) {
1697 bch_err(ca, "Cannot migrate data off RW device");
1698 mutex_unlock(&c->state_lock);
1702 mutex_unlock(&c->state_lock);
1704 ret = bch2_move_data_off_device(ca);
1706 bch_err(ca, "Error migrating data: %i", ret);
1710 ret = bch2_move_metadata_off_device(ca);
1712 bch_err(ca, "Error migrating metadata: %i", ret);
1716 data = bch2_dev_has_data(c, ca);
1718 bch_err(ca, "Migrate error: data still present (%x)", data);
1725 /* Filesystem open: */
1727 const char *bch2_fs_open(char * const *devices, unsigned nr_devices,
1728 struct bch_opts opts, struct bch_fs **ret)
1731 struct bch_fs *c = NULL;
1732 struct bcache_superblock *sb;
1733 unsigned i, best_sb = 0;
1736 return "need at least one device";
1738 if (!try_module_get(THIS_MODULE))
1739 return "module unloading";
1741 err = "cannot allocate memory";
1742 sb = kcalloc(nr_devices, sizeof(*sb), GFP_KERNEL);
1746 for (i = 0; i < nr_devices; i++) {
1747 err = bch2_read_super(&sb[i], opts, devices[i]);
1751 err = bch2_sb_validate(&sb[i]);
1756 for (i = 1; i < nr_devices; i++)
1757 if (le64_to_cpu(sb[i].sb->seq) >
1758 le64_to_cpu(sb[best_sb].sb->seq))
1761 for (i = 0; i < nr_devices; i++) {
1762 err = bch2_dev_in_fs(sb[best_sb].sb, sb[i].sb);
1767 err = "cannot allocate memory";
1768 c = bch2_fs_alloc(sb[best_sb].sb, opts);
1772 err = "bch2_dev_online() error";
1773 mutex_lock(&c->sb_lock);
1774 for (i = 0; i < nr_devices; i++)
1775 if (__bch2_dev_online(c, &sb[i])) {
1776 mutex_unlock(&c->sb_lock);
1779 mutex_unlock(&c->sb_lock);
1781 err = "insufficient devices";
1782 if (!bch2_fs_may_start(c))
1785 if (!c->opts.nostart) {
1786 err = __bch2_fs_start(c);
1791 err = bch2_fs_online(c);
1798 closure_put(&c->cl);
1803 module_put(THIS_MODULE);
1811 for (i = 0; i < nr_devices; i++)
1812 bch2_free_super(&sb[i]);
1816 static const char *__bch2_fs_open_incremental(struct bcache_superblock *sb,
1817 struct bch_opts opts)
1821 bool allocated_fs = false;
1823 err = bch2_sb_validate(sb);
1827 mutex_lock(&bch_fs_list_lock);
1828 c = __bch2_uuid_to_fs(sb->sb->uuid);
1830 closure_get(&c->cl);
1832 err = bch2_dev_in_fs(c->disk_sb, sb->sb);
1836 c = bch2_fs_alloc(sb->sb, opts);
1837 err = "cannot allocate memory";
1841 allocated_fs = true;
1844 err = "bch2_dev_online() error";
1846 mutex_lock(&c->sb_lock);
1847 if (__bch2_dev_online(c, sb)) {
1848 mutex_unlock(&c->sb_lock);
1851 mutex_unlock(&c->sb_lock);
1853 if (!c->opts.nostart && bch2_fs_may_start(c)) {
1854 err = __bch2_fs_start(c);
1859 err = __bch2_fs_online(c);
1863 closure_put(&c->cl);
1864 mutex_unlock(&bch_fs_list_lock);
1868 mutex_unlock(&bch_fs_list_lock);
1873 closure_put(&c->cl);
1878 const char *bch2_fs_open_incremental(const char *path)
1880 struct bcache_superblock sb;
1881 struct bch_opts opts = bch2_opts_empty();
1884 err = bch2_read_super(&sb, opts, path);
1888 err = __bch2_fs_open_incremental(&sb, opts);
1889 bch2_free_super(&sb);
1894 /* Global interfaces/init */
1896 static void bcachefs_exit(void)
1900 bch2_chardev_exit();
1902 kset_unregister(bcachefs_kset);
1905 static int __init bcachefs_init(void)
1907 bch2_bkey_pack_test();
1908 bch2_inode_pack_test();
1910 if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) ||
1911 bch2_chardev_init() ||
1922 #define BCH_DEBUG_PARAM(name, description) \
1924 module_param_named(name, bch2_##name, bool, 0644); \
1925 MODULE_PARM_DESC(name, description);
1927 #undef BCH_DEBUG_PARAM
1929 module_exit(bcachefs_exit);
1930 module_init(bcachefs_init);