1 // SPDX-License-Identifier: GPL-2.0
3 * bcachefs setup/teardown code, and some metadata io - read a superblock and
4 * figure out what to do with it.
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
11 #include "alloc_background.h"
12 #include "alloc_foreground.h"
13 #include "bkey_sort.h"
14 #include "btree_cache.h"
16 #include "btree_journal_iter.h"
17 #include "btree_key_cache.h"
18 #include "btree_update_interior.h"
20 #include "btree_write_buffer.h"
21 #include "buckets_waiting_for_journal.h"
28 #include "disk_groups.h"
34 #include "fs-io-buffered.h"
35 #include "fs-io-direct.h"
41 #include "journal_reclaim.h"
42 #include "journal_seq_blacklist.h"
46 #include "nocow_locking.h"
48 #include "rebalance.h"
52 #include "sb-errors.h"
53 #include "sb-members.h"
55 #include "subvolume.h"
61 #include <linux/backing-dev.h>
62 #include <linux/blkdev.h>
63 #include <linux/debugfs.h>
64 #include <linux/device.h>
65 #include <linux/idr.h>
66 #include <linux/module.h>
67 #include <linux/percpu.h>
68 #include <linux/random.h>
69 #include <linux/sysfs.h>
70 #include <crypto/hash.h>
72 MODULE_LICENSE("GPL");
73 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
74 MODULE_DESCRIPTION("bcachefs filesystem");
76 const char * const bch2_fs_flag_strs[] = {
83 void __bch2_print(struct bch_fs *c, const char *fmt, ...)
88 if (likely(!c->output)) {
93 spin_lock_irqsave(&c->output->lock, flags);
94 prt_vprintf(&c->output->buf, fmt, args);
95 spin_unlock_irqrestore(&c->output->lock, flags);
97 wake_up(&c->output->wait);
102 #define KTYPE(type) \
103 static const struct attribute_group type ## _group = { \
104 .attrs = type ## _files \
107 static const struct attribute_group *type ## _groups[] = { \
112 static const struct kobj_type type ## _ktype = { \
113 .release = type ## _release, \
114 .sysfs_ops = &type ## _sysfs_ops, \
115 .default_groups = type ## _groups \
118 static void bch2_fs_release(struct kobject *);
119 static void bch2_dev_release(struct kobject *);
120 static void bch2_fs_counters_release(struct kobject *k)
124 static void bch2_fs_internal_release(struct kobject *k)
128 static void bch2_fs_opts_dir_release(struct kobject *k)
132 static void bch2_fs_time_stats_release(struct kobject *k)
137 KTYPE(bch2_fs_counters);
138 KTYPE(bch2_fs_internal);
139 KTYPE(bch2_fs_opts_dir);
140 KTYPE(bch2_fs_time_stats);
143 static struct kset *bcachefs_kset;
144 static LIST_HEAD(bch_fs_list);
145 static DEFINE_MUTEX(bch_fs_list_lock);
147 DECLARE_WAIT_QUEUE_HEAD(bch2_read_only_wait);
149 static void bch2_dev_free(struct bch_dev *);
150 static int bch2_dev_alloc(struct bch_fs *, unsigned);
151 static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *);
152 static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
154 struct bch_fs *bch2_dev_to_fs(dev_t dev)
160 mutex_lock(&bch_fs_list_lock);
163 list_for_each_entry(c, &bch_fs_list, list)
164 for_each_member_device_rcu(ca, c, i, NULL)
165 if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) {
172 mutex_unlock(&bch_fs_list_lock);
177 static struct bch_fs *__bch2_uuid_to_fs(__uuid_t uuid)
181 lockdep_assert_held(&bch_fs_list_lock);
183 list_for_each_entry(c, &bch_fs_list, list)
184 if (!memcmp(&c->disk_sb.sb->uuid, &uuid, sizeof(uuid)))
190 struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid)
194 mutex_lock(&bch_fs_list_lock);
195 c = __bch2_uuid_to_fs(uuid);
198 mutex_unlock(&bch_fs_list_lock);
203 static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
206 unsigned i, nr = 0, u64s =
207 ((sizeof(struct jset_entry_dev_usage) +
208 sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR)) /
212 for_each_member_device_rcu(ca, c, i, NULL)
216 bch2_journal_entry_res_resize(&c->journal,
217 &c->dev_usage_journal_res, u64s * nr);
220 /* Filesystem RO/RW: */
223 * For startup/shutdown of RW stuff, the dependencies are:
225 * - foreground writes depend on copygc and rebalance (to free up space)
227 * - copygc and rebalance depend on mark and sweep gc (they actually probably
228 * don't because they either reserve ahead of time or don't block if
229 * allocations fail, but allocations can require mark and sweep gc to run
230 * because of generation number wraparound)
232 * - all of the above depends on the allocator threads
234 * - allocator depends on the journal (when it rewrites prios and gens)
237 static void __bch2_fs_read_only(struct bch_fs *c)
240 unsigned i, clean_passes = 0;
244 bch2_open_buckets_stop(c, NULL, true);
245 bch2_rebalance_stop(c);
247 bch2_gc_thread_stop(c);
250 bch_verbose(c, "flushing journal and stopping allocators, journal seq %llu",
251 journal_cur_seq(&c->journal));
256 if (bch2_btree_interior_updates_flush(c) ||
257 bch2_journal_flush_all_pins(&c->journal) ||
258 bch2_btree_flush_all_writes(c) ||
259 seq != atomic64_read(&c->journal.seq)) {
260 seq = atomic64_read(&c->journal.seq);
263 } while (clean_passes < 2);
265 bch_verbose(c, "flushing journal and stopping allocators complete, journal seq %llu",
266 journal_cur_seq(&c->journal));
268 if (test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) &&
269 !test_bit(BCH_FS_emergency_ro, &c->flags))
270 set_bit(BCH_FS_clean_shutdown, &c->flags);
271 bch2_fs_journal_stop(&c->journal);
274 * After stopping journal:
276 for_each_member_device(ca, c, i)
277 bch2_dev_allocator_remove(c, ca);
280 #ifndef BCH_WRITE_REF_DEBUG
281 static void bch2_writes_disabled(struct percpu_ref *writes)
283 struct bch_fs *c = container_of(writes, struct bch_fs, writes);
285 set_bit(BCH_FS_write_disable_complete, &c->flags);
286 wake_up(&bch2_read_only_wait);
290 void bch2_fs_read_only(struct bch_fs *c)
292 if (!test_bit(BCH_FS_rw, &c->flags)) {
293 bch2_journal_reclaim_stop(&c->journal);
297 BUG_ON(test_bit(BCH_FS_write_disable_complete, &c->flags));
299 bch_verbose(c, "going read-only");
302 * Block new foreground-end write operations from starting - any new
303 * writes will return -EROFS:
305 set_bit(BCH_FS_going_ro, &c->flags);
306 #ifndef BCH_WRITE_REF_DEBUG
307 percpu_ref_kill(&c->writes);
309 for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++)
310 bch2_write_ref_put(c, i);
314 * If we're not doing an emergency shutdown, we want to wait on
315 * outstanding writes to complete so they don't see spurious errors due
316 * to shutting down the allocator:
318 * If we are doing an emergency shutdown outstanding writes may
319 * hang until we shutdown the allocator so we don't want to wait
320 * on outstanding writes before shutting everything down - but
321 * we do need to wait on them before returning and signalling
322 * that going RO is complete:
324 wait_event(bch2_read_only_wait,
325 test_bit(BCH_FS_write_disable_complete, &c->flags) ||
326 test_bit(BCH_FS_emergency_ro, &c->flags));
328 bool writes_disabled = test_bit(BCH_FS_write_disable_complete, &c->flags);
330 bch_verbose(c, "finished waiting for writes to stop");
332 __bch2_fs_read_only(c);
334 wait_event(bch2_read_only_wait,
335 test_bit(BCH_FS_write_disable_complete, &c->flags));
337 if (!writes_disabled)
338 bch_verbose(c, "finished waiting for writes to stop");
340 clear_bit(BCH_FS_write_disable_complete, &c->flags);
341 clear_bit(BCH_FS_going_ro, &c->flags);
342 clear_bit(BCH_FS_rw, &c->flags);
344 if (!bch2_journal_error(&c->journal) &&
345 !test_bit(BCH_FS_error, &c->flags) &&
346 !test_bit(BCH_FS_emergency_ro, &c->flags) &&
347 test_bit(BCH_FS_started, &c->flags) &&
348 test_bit(BCH_FS_clean_shutdown, &c->flags) &&
349 !c->opts.norecovery) {
350 BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
351 BUG_ON(atomic_read(&c->btree_cache.dirty));
352 BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty));
353 BUG_ON(c->btree_write_buffer.state.nr);
355 bch_verbose(c, "marking filesystem clean");
356 bch2_fs_mark_clean(c);
358 bch_verbose(c, "done going read-only, filesystem not clean");
362 static void bch2_fs_read_only_work(struct work_struct *work)
365 container_of(work, struct bch_fs, read_only_work);
367 down_write(&c->state_lock);
368 bch2_fs_read_only(c);
369 up_write(&c->state_lock);
372 static void bch2_fs_read_only_async(struct bch_fs *c)
374 queue_work(system_long_wq, &c->read_only_work);
377 bool bch2_fs_emergency_read_only(struct bch_fs *c)
379 bool ret = !test_and_set_bit(BCH_FS_emergency_ro, &c->flags);
381 bch2_journal_halt(&c->journal);
382 bch2_fs_read_only_async(c);
384 wake_up(&bch2_read_only_wait);
388 static int bch2_fs_read_write_late(struct bch_fs *c)
393 * Data move operations can't run until after check_snapshots has
394 * completed, and bch2_snapshot_is_ancestor() is available.
396 * Ideally we'd start copygc/rebalance earlier instead of waiting for
397 * all of recovery/fsck to complete:
399 ret = bch2_copygc_start(c);
401 bch_err(c, "error starting copygc thread");
405 ret = bch2_rebalance_start(c);
407 bch_err(c, "error starting rebalance thread");
414 static int __bch2_fs_read_write(struct bch_fs *c, bool early)
420 if (test_bit(BCH_FS_initial_gc_unfixed, &c->flags)) {
421 bch_err(c, "cannot go rw, unfixed btree errors");
422 return -BCH_ERR_erofs_unfixed_errors;
425 if (test_bit(BCH_FS_rw, &c->flags))
428 if (c->opts.norecovery)
429 return -BCH_ERR_erofs_norecovery;
432 * nochanges is used for fsck -n mode - we have to allow going rw
433 * during recovery for that to work:
435 if (c->opts.nochanges && (!early || c->opts.read_only))
436 return -BCH_ERR_erofs_nochanges;
438 bch_info(c, "going read-write");
440 ret = bch2_sb_members_v2_init(c);
444 ret = bch2_fs_mark_dirty(c);
448 clear_bit(BCH_FS_clean_shutdown, &c->flags);
451 * First journal write must be a flush write: after a clean shutdown we
452 * don't read the journal, so the first journal write may end up
453 * overwriting whatever was there previously, and there must always be
454 * at least one non-flush write in the journal or recovery will fail:
456 set_bit(JOURNAL_NEED_FLUSH_WRITE, &c->journal.flags);
458 for_each_rw_member(ca, c, i)
459 bch2_dev_allocator_add(c, ca);
460 bch2_recalc_capacity(c);
462 set_bit(BCH_FS_rw, &c->flags);
463 set_bit(BCH_FS_was_rw, &c->flags);
465 #ifndef BCH_WRITE_REF_DEBUG
466 percpu_ref_reinit(&c->writes);
468 for (i = 0; i < BCH_WRITE_REF_NR; i++) {
469 BUG_ON(atomic_long_read(&c->writes[i]));
470 atomic_long_inc(&c->writes[i]);
474 ret = bch2_gc_thread_start(c);
476 bch_err(c, "error starting gc thread");
480 ret = bch2_journal_reclaim_start(&c->journal);
485 ret = bch2_fs_read_write_late(c);
491 bch2_do_invalidates(c);
492 bch2_do_stripe_deletes(c);
493 bch2_do_pending_node_rewrites(c);
496 if (test_bit(BCH_FS_rw, &c->flags))
497 bch2_fs_read_only(c);
499 __bch2_fs_read_only(c);
503 int bch2_fs_read_write(struct bch_fs *c)
505 return __bch2_fs_read_write(c, false);
508 int bch2_fs_read_write_early(struct bch_fs *c)
510 lockdep_assert_held(&c->state_lock);
512 return __bch2_fs_read_write(c, true);
515 /* Filesystem startup/shutdown: */
517 static void __bch2_fs_free(struct bch_fs *c)
521 for (i = 0; i < BCH_TIME_STAT_NR; i++)
522 bch2_time_stats_exit(&c->times[i]);
524 bch2_free_pending_node_rewrites(c);
525 bch2_fs_sb_errors_exit(c);
526 bch2_fs_counters_exit(c);
527 bch2_fs_snapshots_exit(c);
528 bch2_fs_quota_exit(c);
529 bch2_fs_fs_io_direct_exit(c);
530 bch2_fs_fs_io_buffered_exit(c);
531 bch2_fs_fsio_exit(c);
533 bch2_fs_encryption_exit(c);
534 bch2_fs_nocow_locking_exit(c);
535 bch2_fs_io_write_exit(c);
536 bch2_fs_io_read_exit(c);
537 bch2_fs_buckets_waiting_for_journal_exit(c);
538 bch2_fs_btree_interior_update_exit(c);
539 bch2_fs_btree_iter_exit(c);
540 bch2_fs_btree_key_cache_exit(&c->btree_key_cache);
541 bch2_fs_btree_cache_exit(c);
542 bch2_fs_replicas_exit(c);
543 bch2_fs_journal_exit(&c->journal);
544 bch2_io_clock_exit(&c->io_clock[WRITE]);
545 bch2_io_clock_exit(&c->io_clock[READ]);
546 bch2_fs_compress_exit(c);
547 bch2_journal_keys_put_initial(c);
548 BUG_ON(atomic_read(&c->journal_keys.ref));
549 bch2_fs_btree_write_buffer_exit(c);
550 percpu_free_rwsem(&c->mark_lock);
551 free_percpu(c->online_reserved);
553 darray_exit(&c->btree_roots_extra);
554 free_percpu(c->pcpu);
555 mempool_exit(&c->large_bkey_pool);
556 mempool_exit(&c->btree_bounce_pool);
557 bioset_exit(&c->btree_bio);
558 mempool_exit(&c->fill_iter);
559 #ifndef BCH_WRITE_REF_DEBUG
560 percpu_ref_exit(&c->writes);
562 kfree(rcu_dereference_protected(c->disk_groups, 1));
563 kfree(c->journal_seq_blacklist_table);
564 kfree(c->unused_inode_hints);
567 destroy_workqueue(c->write_ref_wq);
568 if (c->io_complete_wq)
569 destroy_workqueue(c->io_complete_wq);
571 destroy_workqueue(c->copygc_wq);
572 if (c->btree_io_complete_wq)
573 destroy_workqueue(c->btree_io_complete_wq);
574 if (c->btree_update_wq)
575 destroy_workqueue(c->btree_update_wq);
577 bch2_free_super(&c->disk_sb);
578 kvpfree(c, sizeof(*c));
579 module_put(THIS_MODULE);
582 static void bch2_fs_release(struct kobject *kobj)
584 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
589 void __bch2_fs_stop(struct bch_fs *c)
594 bch_verbose(c, "shutting down");
596 set_bit(BCH_FS_stopping, &c->flags);
598 cancel_work_sync(&c->journal_seq_blacklist_gc_work);
600 down_write(&c->state_lock);
601 bch2_fs_read_only(c);
602 up_write(&c->state_lock);
604 for_each_member_device(ca, c, i)
605 if (ca->kobj.state_in_sysfs &&
607 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
609 if (c->kobj.state_in_sysfs)
610 kobject_del(&c->kobj);
612 bch2_fs_debug_exit(c);
613 bch2_fs_chardev_exit(c);
615 kobject_put(&c->counters_kobj);
616 kobject_put(&c->time_stats);
617 kobject_put(&c->opts_dir);
618 kobject_put(&c->internal);
620 /* btree prefetch might have kicked off reads in the background: */
621 bch2_btree_flush_all_reads(c);
623 for_each_member_device(ca, c, i)
624 cancel_work_sync(&ca->io_error_work);
626 cancel_work_sync(&c->read_only_work);
629 void bch2_fs_free(struct bch_fs *c)
633 mutex_lock(&bch_fs_list_lock);
635 mutex_unlock(&bch_fs_list_lock);
637 closure_sync(&c->cl);
638 closure_debug_destroy(&c->cl);
640 for (i = 0; i < c->sb.nr_devices; i++) {
641 struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true);
644 bch2_free_super(&ca->disk_sb);
649 bch_verbose(c, "shutdown complete");
651 kobject_put(&c->kobj);
654 void bch2_fs_stop(struct bch_fs *c)
660 static int bch2_fs_online(struct bch_fs *c)
666 lockdep_assert_held(&bch_fs_list_lock);
668 if (__bch2_uuid_to_fs(c->sb.uuid)) {
669 bch_err(c, "filesystem UUID already open");
673 ret = bch2_fs_chardev_init(c);
675 bch_err(c, "error creating character device");
679 bch2_fs_debug_init(c);
681 ret = kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ?:
682 kobject_add(&c->internal, &c->kobj, "internal") ?:
683 kobject_add(&c->opts_dir, &c->kobj, "options") ?:
684 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
685 kobject_add(&c->time_stats, &c->kobj, "time_stats") ?:
687 kobject_add(&c->counters_kobj, &c->kobj, "counters") ?:
688 bch2_opts_create_sysfs_files(&c->opts_dir);
690 bch_err(c, "error creating sysfs objects");
694 down_write(&c->state_lock);
696 for_each_member_device(ca, c, i) {
697 ret = bch2_dev_sysfs_online(c, ca);
699 bch_err(c, "error creating sysfs objects");
700 percpu_ref_put(&ca->ref);
705 BUG_ON(!list_empty(&c->list));
706 list_add(&c->list, &bch_fs_list);
708 up_write(&c->state_lock);
712 static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
715 struct printbuf name = PRINTBUF;
716 unsigned i, iter_size;
719 c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
721 c = ERR_PTR(-BCH_ERR_ENOMEM_fs_alloc);
725 c->output = (void *)(unsigned long) opts.log_output;
727 __module_get(THIS_MODULE);
729 closure_init(&c->cl, NULL);
731 c->kobj.kset = bcachefs_kset;
732 kobject_init(&c->kobj, &bch2_fs_ktype);
733 kobject_init(&c->internal, &bch2_fs_internal_ktype);
734 kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype);
735 kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype);
736 kobject_init(&c->counters_kobj, &bch2_fs_counters_ktype);
739 c->disk_sb.fs_sb = true;
741 init_rwsem(&c->state_lock);
742 mutex_init(&c->sb_lock);
743 mutex_init(&c->replicas_gc_lock);
744 mutex_init(&c->btree_root_lock);
745 INIT_WORK(&c->read_only_work, bch2_fs_read_only_work);
747 init_rwsem(&c->gc_lock);
748 mutex_init(&c->gc_gens_lock);
749 atomic_set(&c->journal_keys.ref, 1);
750 c->journal_keys.initial_ref_held = true;
752 for (i = 0; i < BCH_TIME_STAT_NR; i++)
753 bch2_time_stats_init(&c->times[i]);
755 bch2_fs_copygc_init(c);
756 bch2_fs_btree_key_cache_init_early(&c->btree_key_cache);
757 bch2_fs_btree_interior_update_init_early(c);
758 bch2_fs_allocator_background_init(c);
759 bch2_fs_allocator_foreground_init(c);
760 bch2_fs_rebalance_init(c);
761 bch2_fs_quota_init(c);
762 bch2_fs_ec_init_early(c);
763 bch2_fs_move_init(c);
764 bch2_fs_sb_errors_init_early(c);
766 INIT_LIST_HEAD(&c->list);
768 mutex_init(&c->usage_scratch_lock);
770 mutex_init(&c->bio_bounce_pages_lock);
771 mutex_init(&c->snapshot_table_lock);
772 init_rwsem(&c->snapshot_create_lock);
774 spin_lock_init(&c->btree_write_error_lock);
776 INIT_WORK(&c->journal_seq_blacklist_gc_work,
777 bch2_blacklist_entries_gc);
779 INIT_LIST_HEAD(&c->journal_iters);
781 INIT_LIST_HEAD(&c->fsck_error_msgs);
782 mutex_init(&c->fsck_error_msgs_lock);
784 seqcount_init(&c->gc_pos_lock);
786 seqcount_init(&c->usage_lock);
788 sema_init(&c->io_in_flight, 128);
790 INIT_LIST_HEAD(&c->vfs_inodes_list);
791 mutex_init(&c->vfs_inodes_lock);
793 c->copy_gc_enabled = 1;
794 c->rebalance.enabled = 1;
795 c->promote_whole_extents = true;
797 c->journal.flush_write_time = &c->times[BCH_TIME_journal_flush_write];
798 c->journal.noflush_write_time = &c->times[BCH_TIME_journal_noflush_write];
799 c->journal.flush_seq_time = &c->times[BCH_TIME_journal_flush_seq];
801 bch2_fs_btree_cache_init_early(&c->btree_cache);
803 mutex_init(&c->sectors_available_lock);
805 ret = percpu_init_rwsem(&c->mark_lock);
809 mutex_lock(&c->sb_lock);
810 ret = bch2_sb_to_fs(c, sb);
811 mutex_unlock(&c->sb_lock);
816 pr_uuid(&name, c->sb.user_uuid.b);
817 strscpy(c->name, name.buf, sizeof(c->name));
818 printbuf_exit(&name);
820 ret = name.allocation_failure ? -BCH_ERR_ENOMEM_fs_name_alloc : 0;
825 if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
826 !BCH_SB_JOURNAL_FLUSH_DELAY(sb))
827 SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000);
829 if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
830 !BCH_SB_JOURNAL_RECLAIM_DELAY(sb))
831 SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 100);
833 c->opts = bch2_opts_default;
834 ret = bch2_opts_from_sb(&c->opts, sb);
838 bch2_opts_apply(&c->opts, opts);
840 c->btree_key_cache_btrees |= 1U << BTREE_ID_alloc;
841 if (c->opts.inodes_use_key_cache)
842 c->btree_key_cache_btrees |= 1U << BTREE_ID_inodes;
843 c->btree_key_cache_btrees |= 1U << BTREE_ID_logged_ops;
845 c->block_bits = ilog2(block_sectors(c));
846 c->btree_foreground_merge_threshold = BTREE_FOREGROUND_MERGE_THRESHOLD(c);
848 if (bch2_fs_init_fault("fs_alloc")) {
849 bch_err(c, "fs_alloc fault injected");
854 iter_size = sizeof(struct sort_iter) +
855 (btree_blocks(c) + 1) * 2 *
856 sizeof(struct sort_iter_set);
858 c->inode_shard_bits = ilog2(roundup_pow_of_two(num_possible_cpus()));
860 if (!(c->btree_update_wq = alloc_workqueue("bcachefs",
861 WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512)) ||
862 !(c->btree_io_complete_wq = alloc_workqueue("bcachefs_btree_io",
863 WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
864 !(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
865 WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
866 !(c->io_complete_wq = alloc_workqueue("bcachefs_io",
867 WQ_FREEZABLE|WQ_HIGHPRI|WQ_MEM_RECLAIM, 1)) ||
868 !(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref",
870 #ifndef BCH_WRITE_REF_DEBUG
871 percpu_ref_init(&c->writes, bch2_writes_disabled,
872 PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
874 mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
875 bioset_init(&c->btree_bio, 1,
876 max(offsetof(struct btree_read_bio, bio),
877 offsetof(struct btree_write_bio, wbio.bio)),
878 BIOSET_NEED_BVECS) ||
879 !(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
880 !(c->online_reserved = alloc_percpu(u64)) ||
881 mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
883 mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
884 !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
885 sizeof(u64), GFP_KERNEL))) {
886 ret = -BCH_ERR_ENOMEM_fs_other_alloc;
890 ret = bch2_fs_counters_init(c) ?:
891 bch2_fs_sb_errors_init(c) ?:
892 bch2_io_clock_init(&c->io_clock[READ]) ?:
893 bch2_io_clock_init(&c->io_clock[WRITE]) ?:
894 bch2_fs_journal_init(&c->journal) ?:
895 bch2_fs_replicas_init(c) ?:
896 bch2_fs_btree_cache_init(c) ?:
897 bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?:
898 bch2_fs_btree_iter_init(c) ?:
899 bch2_fs_btree_interior_update_init(c) ?:
900 bch2_fs_buckets_waiting_for_journal_init(c) ?:
901 bch2_fs_btree_write_buffer_init(c) ?:
902 bch2_fs_subvolumes_init(c) ?:
903 bch2_fs_io_read_init(c) ?:
904 bch2_fs_io_write_init(c) ?:
905 bch2_fs_nocow_locking_init(c) ?:
906 bch2_fs_encryption_init(c) ?:
907 bch2_fs_compress_init(c) ?:
908 bch2_fs_ec_init(c) ?:
909 bch2_fs_fsio_init(c) ?:
910 bch2_fs_fs_io_buffered_init(c) ?:
911 bch2_fs_fs_io_direct_init(c);
915 for (i = 0; i < c->sb.nr_devices; i++)
916 if (bch2_dev_exists(c->disk_sb.sb, i) &&
917 bch2_dev_alloc(c, i)) {
922 bch2_journal_entry_res_resize(&c->journal,
923 &c->btree_root_journal_res,
924 BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_BTREE_PTR_U64s_MAX));
925 bch2_dev_usage_journal_reserve(c);
926 bch2_journal_entry_res_resize(&c->journal,
927 &c->clock_journal_res,
928 (sizeof(struct jset_entry_clock) / sizeof(u64)) * 2);
930 mutex_lock(&bch_fs_list_lock);
931 ret = bch2_fs_online(c);
932 mutex_unlock(&bch_fs_list_lock);
945 static void print_mount_opts(struct bch_fs *c)
948 struct printbuf p = PRINTBUF;
951 prt_str(&p, "mounting version ");
952 bch2_version_to_text(&p, c->sb.version);
954 if (c->opts.read_only) {
955 prt_str(&p, " opts=");
957 prt_printf(&p, "ro");
960 for (i = 0; i < bch2_opts_nr; i++) {
961 const struct bch_option *opt = &bch2_opt_table[i];
962 u64 v = bch2_opt_get_by_id(&c->opts, i);
964 if (!(opt->flags & OPT_MOUNT))
967 if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
970 prt_str(&p, first ? " opts=" : ",");
972 bch2_opt_to_text(&p, c, c->disk_sb.sb, opt, v, OPT_SHOW_MOUNT_STYLE);
975 bch_info(c, "%s", p.buf);
979 int bch2_fs_start(struct bch_fs *c)
982 time64_t now = ktime_get_real_seconds();
988 down_write(&c->state_lock);
990 BUG_ON(test_bit(BCH_FS_started, &c->flags));
992 mutex_lock(&c->sb_lock);
994 ret = bch2_sb_members_v2_init(c);
996 mutex_unlock(&c->sb_lock);
1000 for_each_online_member(ca, c, i)
1001 bch2_members_v2_get_mut(c->disk_sb.sb, i)->last_mount = cpu_to_le64(now);
1003 mutex_unlock(&c->sb_lock);
1005 for_each_rw_member(ca, c, i)
1006 bch2_dev_allocator_add(c, ca);
1007 bch2_recalc_capacity(c);
1009 ret = BCH_SB_INITIALIZED(c->disk_sb.sb)
1010 ? bch2_fs_recovery(c)
1011 : bch2_fs_initialize(c);
1015 ret = bch2_opts_check_may_set(c);
1019 if (bch2_fs_init_fault("fs_start")) {
1020 bch_err(c, "fs_start fault injected");
1025 set_bit(BCH_FS_started, &c->flags);
1027 if (c->opts.read_only || c->opts.nochanges) {
1028 bch2_fs_read_only(c);
1030 ret = !test_bit(BCH_FS_rw, &c->flags)
1031 ? bch2_fs_read_write(c)
1032 : bch2_fs_read_write_late(c);
1039 up_write(&c->state_lock);
1042 bch_err_msg(c, ret, "starting filesystem");
1046 static int bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
1048 struct bch_member m = bch2_sb_member_get(sb, sb->dev_idx);
1050 if (le16_to_cpu(sb->block_size) != block_sectors(c))
1051 return -BCH_ERR_mismatched_block_size;
1053 if (le16_to_cpu(m.bucket_size) <
1054 BCH_SB_BTREE_NODE_SIZE(c->disk_sb.sb))
1055 return -BCH_ERR_bucket_size_too_small;
1060 static int bch2_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb)
1062 struct bch_sb *newest =
1063 le64_to_cpu(fs->seq) > le64_to_cpu(sb->seq) ? fs : sb;
1065 if (!uuid_equal(&fs->uuid, &sb->uuid))
1066 return -BCH_ERR_device_not_a_member_of_filesystem;
1068 if (!bch2_dev_exists(newest, sb->dev_idx))
1069 return -BCH_ERR_device_has_been_removed;
1071 if (fs->block_size != sb->block_size)
1072 return -BCH_ERR_mismatched_block_size;
1077 /* Device startup/shutdown: */
1079 static void bch2_dev_release(struct kobject *kobj)
1081 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
1086 static void bch2_dev_free(struct bch_dev *ca)
1088 cancel_work_sync(&ca->io_error_work);
1090 if (ca->kobj.state_in_sysfs &&
1092 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
1094 if (ca->kobj.state_in_sysfs)
1095 kobject_del(&ca->kobj);
1097 bch2_free_super(&ca->disk_sb);
1098 bch2_dev_journal_exit(ca);
1100 free_percpu(ca->io_done);
1101 bioset_exit(&ca->replica_set);
1102 bch2_dev_buckets_free(ca);
1103 free_page((unsigned long) ca->sb_read_scratch);
1105 bch2_time_stats_exit(&ca->io_latency[WRITE]);
1106 bch2_time_stats_exit(&ca->io_latency[READ]);
1108 percpu_ref_exit(&ca->io_ref);
1109 percpu_ref_exit(&ca->ref);
1110 kobject_put(&ca->kobj);
1113 static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca)
1116 lockdep_assert_held(&c->state_lock);
1118 if (percpu_ref_is_zero(&ca->io_ref))
1121 __bch2_dev_read_only(c, ca);
1123 reinit_completion(&ca->io_ref_completion);
1124 percpu_ref_kill(&ca->io_ref);
1125 wait_for_completion(&ca->io_ref_completion);
1127 if (ca->kobj.state_in_sysfs) {
1128 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
1129 sysfs_remove_link(&ca->kobj, "block");
1132 bch2_free_super(&ca->disk_sb);
1133 bch2_dev_journal_exit(ca);
1136 static void bch2_dev_ref_complete(struct percpu_ref *ref)
1138 struct bch_dev *ca = container_of(ref, struct bch_dev, ref);
1140 complete(&ca->ref_completion);
1143 static void bch2_dev_io_ref_complete(struct percpu_ref *ref)
1145 struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref);
1147 complete(&ca->io_ref_completion);
1150 static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca)
1154 if (!c->kobj.state_in_sysfs)
1157 if (!ca->kobj.state_in_sysfs) {
1158 ret = kobject_add(&ca->kobj, &c->kobj,
1159 "dev-%u", ca->dev_idx);
1164 if (ca->disk_sb.bdev) {
1165 struct kobject *block = bdev_kobj(ca->disk_sb.bdev);
1167 ret = sysfs_create_link(block, &ca->kobj, "bcachefs");
1171 ret = sysfs_create_link(&ca->kobj, block, "block");
1179 static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
1180 struct bch_member *member)
1185 ca = kzalloc(sizeof(*ca), GFP_KERNEL);
1189 kobject_init(&ca->kobj, &bch2_dev_ktype);
1190 init_completion(&ca->ref_completion);
1191 init_completion(&ca->io_ref_completion);
1193 init_rwsem(&ca->bucket_lock);
1195 INIT_WORK(&ca->io_error_work, bch2_io_error_work);
1197 bch2_time_stats_init(&ca->io_latency[READ]);
1198 bch2_time_stats_init(&ca->io_latency[WRITE]);
1200 ca->mi = bch2_mi_to_cpu(member);
1202 for (i = 0; i < ARRAY_SIZE(member->errors); i++)
1203 atomic64_set(&ca->errors[i], le64_to_cpu(member->errors[i]));
1205 ca->uuid = member->uuid;
1207 ca->nr_btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
1208 ca->mi.bucket_size / btree_sectors(c));
1210 if (percpu_ref_init(&ca->ref, bch2_dev_ref_complete,
1212 percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
1213 PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
1214 !(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) ||
1215 bch2_dev_buckets_alloc(c, ca) ||
1216 bioset_init(&ca->replica_set, 4,
1217 offsetof(struct bch_write_bio, bio), 0) ||
1218 !(ca->io_done = alloc_percpu(*ca->io_done)))
1227 static void bch2_dev_attach(struct bch_fs *c, struct bch_dev *ca,
1230 ca->dev_idx = dev_idx;
1231 __set_bit(ca->dev_idx, ca->self.d);
1232 scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx);
1235 rcu_assign_pointer(c->devs[ca->dev_idx], ca);
1237 if (bch2_dev_sysfs_online(c, ca))
1238 pr_warn("error creating sysfs objects");
1241 static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx)
1243 struct bch_member member = bch2_sb_member_get(c->disk_sb.sb, dev_idx);
1244 struct bch_dev *ca = NULL;
1247 if (bch2_fs_init_fault("dev_alloc"))
1250 ca = __bch2_dev_alloc(c, &member);
1256 bch2_dev_attach(c, ca, dev_idx);
1261 return -BCH_ERR_ENOMEM_dev_alloc;
1264 static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
1268 if (bch2_dev_is_online(ca)) {
1269 bch_err(ca, "already have device online in slot %u",
1271 return -BCH_ERR_device_already_online;
1274 if (get_capacity(sb->bdev->bd_disk) <
1275 ca->mi.bucket_size * ca->mi.nbuckets) {
1276 bch_err(ca, "cannot online: device too small");
1277 return -BCH_ERR_device_size_too_small;
1280 BUG_ON(!percpu_ref_is_zero(&ca->io_ref));
1282 ret = bch2_dev_journal_init(ca, sb->sb);
1288 memset(sb, 0, sizeof(*sb));
1290 ca->dev = ca->disk_sb.bdev->bd_dev;
1292 percpu_ref_reinit(&ca->io_ref);
1297 static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
1302 lockdep_assert_held(&c->state_lock);
1304 if (le64_to_cpu(sb->sb->seq) >
1305 le64_to_cpu(c->disk_sb.sb->seq))
1306 bch2_sb_to_fs(c, sb->sb);
1308 BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices ||
1309 !c->devs[sb->sb->dev_idx]);
1311 ca = bch_dev_locked(c, sb->sb->dev_idx);
1313 ret = __bch2_dev_attach_bdev(ca, sb);
1317 bch2_dev_sysfs_online(c, ca);
1319 if (c->sb.nr_devices == 1)
1320 snprintf(c->name, sizeof(c->name), "%pg", ca->disk_sb.bdev);
1321 snprintf(ca->name, sizeof(ca->name), "%pg", ca->disk_sb.bdev);
1323 rebalance_wakeup(c);
1327 /* Device management: */
1330 * Note: this function is also used by the error paths - when a particular
1331 * device sees an error, we call it to determine whether we can just set the
1332 * device RO, or - if this function returns false - we'll set the whole
1335 * XXX: maybe we should be more explicit about whether we're changing state
1336 * because we got an error or what have you?
1338 bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
1339 enum bch_member_state new_state, int flags)
1341 struct bch_devs_mask new_online_devs;
1342 struct bch_dev *ca2;
1343 int i, nr_rw = 0, required;
1345 lockdep_assert_held(&c->state_lock);
1347 switch (new_state) {
1348 case BCH_MEMBER_STATE_rw:
1350 case BCH_MEMBER_STATE_ro:
1351 if (ca->mi.state != BCH_MEMBER_STATE_rw)
1354 /* do we have enough devices to write to? */
1355 for_each_member_device(ca2, c, i)
1357 nr_rw += ca2->mi.state == BCH_MEMBER_STATE_rw;
1359 required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED)
1360 ? c->opts.metadata_replicas
1361 : c->opts.metadata_replicas_required,
1362 !(flags & BCH_FORCE_IF_DATA_DEGRADED)
1363 ? c->opts.data_replicas
1364 : c->opts.data_replicas_required);
1366 return nr_rw >= required;
1367 case BCH_MEMBER_STATE_failed:
1368 case BCH_MEMBER_STATE_spare:
1369 if (ca->mi.state != BCH_MEMBER_STATE_rw &&
1370 ca->mi.state != BCH_MEMBER_STATE_ro)
1373 /* do we have enough devices to read from? */
1374 new_online_devs = bch2_online_devs(c);
1375 __clear_bit(ca->dev_idx, new_online_devs.d);
1377 return bch2_have_enough_devs(c, new_online_devs, flags, false);
1383 static bool bch2_fs_may_start(struct bch_fs *c)
1386 unsigned i, flags = 0;
1388 if (c->opts.very_degraded)
1389 flags |= BCH_FORCE_IF_DEGRADED|BCH_FORCE_IF_LOST;
1391 if (c->opts.degraded)
1392 flags |= BCH_FORCE_IF_DEGRADED;
1394 if (!c->opts.degraded &&
1395 !c->opts.very_degraded) {
1396 mutex_lock(&c->sb_lock);
1398 for (i = 0; i < c->disk_sb.sb->nr_devices; i++) {
1399 if (!bch2_dev_exists(c->disk_sb.sb, i))
1402 ca = bch_dev_locked(c, i);
1404 if (!bch2_dev_is_online(ca) &&
1405 (ca->mi.state == BCH_MEMBER_STATE_rw ||
1406 ca->mi.state == BCH_MEMBER_STATE_ro)) {
1407 mutex_unlock(&c->sb_lock);
1411 mutex_unlock(&c->sb_lock);
1414 return bch2_have_enough_devs(c, bch2_online_devs(c), flags, true);
1417 static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
1420 * The allocator thread itself allocates btree nodes, so stop it first:
1422 bch2_dev_allocator_remove(c, ca);
1423 bch2_dev_journal_stop(&c->journal, ca);
1426 static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
1428 lockdep_assert_held(&c->state_lock);
1430 BUG_ON(ca->mi.state != BCH_MEMBER_STATE_rw);
1432 bch2_dev_allocator_add(c, ca);
1433 bch2_recalc_capacity(c);
1436 int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1437 enum bch_member_state new_state, int flags)
1439 struct bch_member *m;
1442 if (ca->mi.state == new_state)
1445 if (!bch2_dev_state_allowed(c, ca, new_state, flags))
1446 return -BCH_ERR_device_state_not_allowed;
1448 if (new_state != BCH_MEMBER_STATE_rw)
1449 __bch2_dev_read_only(c, ca);
1451 bch_notice(ca, "%s", bch2_member_states[new_state]);
1453 mutex_lock(&c->sb_lock);
1454 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
1455 SET_BCH_MEMBER_STATE(m, new_state);
1456 bch2_write_super(c);
1457 mutex_unlock(&c->sb_lock);
1459 if (new_state == BCH_MEMBER_STATE_rw)
1460 __bch2_dev_read_write(c, ca);
1462 rebalance_wakeup(c);
1467 int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1468 enum bch_member_state new_state, int flags)
1472 down_write(&c->state_lock);
1473 ret = __bch2_dev_set_state(c, ca, new_state, flags);
1474 up_write(&c->state_lock);
1479 /* Device add/removal: */
1481 static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
1483 struct bpos start = POS(ca->dev_idx, 0);
1484 struct bpos end = POS(ca->dev_idx, U64_MAX);
1488 * We clear the LRU and need_discard btrees first so that we don't race
1489 * with bch2_do_invalidates() and bch2_do_discards()
1491 ret = bch2_btree_delete_range(c, BTREE_ID_lru, start, end,
1492 BTREE_TRIGGER_NORUN, NULL) ?:
1493 bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end,
1494 BTREE_TRIGGER_NORUN, NULL) ?:
1495 bch2_btree_delete_range(c, BTREE_ID_freespace, start, end,
1496 BTREE_TRIGGER_NORUN, NULL) ?:
1497 bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
1498 BTREE_TRIGGER_NORUN, NULL) ?:
1499 bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
1500 BTREE_TRIGGER_NORUN, NULL) ?:
1501 bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
1502 BTREE_TRIGGER_NORUN, NULL);
1504 bch_err_msg(c, ret, "removing dev alloc info");
1509 int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
1511 struct bch_member *m;
1512 unsigned dev_idx = ca->dev_idx, data;
1515 down_write(&c->state_lock);
1518 * We consume a reference to ca->ref, regardless of whether we succeed
1521 percpu_ref_put(&ca->ref);
1523 if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
1524 bch_err(ca, "Cannot remove without losing data");
1525 ret = -BCH_ERR_device_state_not_allowed;
1529 __bch2_dev_read_only(c, ca);
1531 ret = bch2_dev_data_drop(c, ca->dev_idx, flags);
1533 bch_err_msg(ca, ret, "dropping data");
1537 ret = bch2_dev_remove_alloc(c, ca);
1539 bch_err_msg(ca, ret, "deleting alloc info");
1543 ret = bch2_journal_flush_device_pins(&c->journal, ca->dev_idx);
1545 bch_err_msg(ca, ret, "flushing journal");
1549 ret = bch2_journal_flush(&c->journal);
1551 bch_err(ca, "journal error");
1555 ret = bch2_replicas_gc2(c);
1557 bch_err_msg(ca, ret, "in replicas_gc2()");
1561 data = bch2_dev_has_data(c, ca);
1563 struct printbuf data_has = PRINTBUF;
1565 prt_bitflags(&data_has, bch2_data_types, data);
1566 bch_err(ca, "Remove failed, still has data (%s)", data_has.buf);
1567 printbuf_exit(&data_has);
1572 __bch2_dev_offline(c, ca);
1574 mutex_lock(&c->sb_lock);
1575 rcu_assign_pointer(c->devs[ca->dev_idx], NULL);
1576 mutex_unlock(&c->sb_lock);
1578 percpu_ref_kill(&ca->ref);
1579 wait_for_completion(&ca->ref_completion);
1584 * At this point the device object has been removed in-core, but the
1585 * on-disk journal might still refer to the device index via sb device
1586 * usage entries. Recovery fails if it sees usage information for an
1587 * invalid device. Flush journal pins to push the back of the journal
1588 * past now invalid device index references before we update the
1589 * superblock, but after the device object has been removed so any
1590 * further journal writes elide usage info for the device.
1592 bch2_journal_flush_all_pins(&c->journal);
1595 * Free this device's slot in the bch_member array - all pointers to
1596 * this device must be gone:
1598 mutex_lock(&c->sb_lock);
1599 m = bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx);
1600 memset(&m->uuid, 0, sizeof(m->uuid));
1602 bch2_write_super(c);
1604 mutex_unlock(&c->sb_lock);
1605 up_write(&c->state_lock);
1607 bch2_dev_usage_journal_reserve(c);
1610 if (ca->mi.state == BCH_MEMBER_STATE_rw &&
1611 !percpu_ref_is_zero(&ca->io_ref))
1612 __bch2_dev_read_write(c, ca);
1613 up_write(&c->state_lock);
1617 /* Add new device to running filesystem: */
1618 int bch2_dev_add(struct bch_fs *c, const char *path)
1620 struct bch_opts opts = bch2_opts_empty();
1621 struct bch_sb_handle sb;
1622 struct bch_dev *ca = NULL;
1623 struct bch_sb_field_members_v2 *mi;
1624 struct bch_member dev_mi;
1625 unsigned dev_idx, nr_devices, u64s;
1626 struct printbuf errbuf = PRINTBUF;
1627 struct printbuf label = PRINTBUF;
1630 ret = bch2_read_super(path, &opts, &sb);
1632 bch_err_msg(c, ret, "reading super");
1636 dev_mi = bch2_sb_member_get(sb.sb, sb.sb->dev_idx);
1638 if (BCH_MEMBER_GROUP(&dev_mi)) {
1639 bch2_disk_path_to_text_sb(&label, sb.sb, BCH_MEMBER_GROUP(&dev_mi) - 1);
1640 if (label.allocation_failure) {
1646 ret = bch2_dev_may_add(sb.sb, c);
1652 ca = __bch2_dev_alloc(c, &dev_mi);
1658 bch2_dev_usage_init(ca);
1660 ret = __bch2_dev_attach_bdev(ca, &sb);
1664 ret = bch2_dev_journal_alloc(ca);
1666 bch_err_msg(c, ret, "allocating journal");
1670 down_write(&c->state_lock);
1671 mutex_lock(&c->sb_lock);
1673 ret = bch2_sb_from_fs(c, ca);
1675 bch_err_msg(c, ret, "setting up new superblock");
1679 if (dynamic_fault("bcachefs:add:no_slot"))
1682 for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++)
1683 if (!bch2_dev_exists(c->disk_sb.sb, dev_idx))
1686 ret = -BCH_ERR_ENOSPC_sb_members;
1687 bch_err_msg(c, ret, "setting up new superblock");
1691 nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices);
1693 mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
1694 u64s = DIV_ROUND_UP(sizeof(struct bch_sb_field_members_v2) +
1695 le16_to_cpu(mi->member_bytes) * nr_devices, sizeof(u64));
1697 mi = bch2_sb_field_resize(&c->disk_sb, members_v2, u64s);
1699 ret = -BCH_ERR_ENOSPC_sb_members;
1700 bch_err_msg(c, ret, "setting up new superblock");
1703 struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx);
1708 m->last_mount = cpu_to_le64(ktime_get_real_seconds());
1709 c->disk_sb.sb->nr_devices = nr_devices;
1711 ca->disk_sb.sb->dev_idx = dev_idx;
1712 bch2_dev_attach(c, ca, dev_idx);
1714 if (BCH_MEMBER_GROUP(&dev_mi)) {
1715 ret = __bch2_dev_group_set(c, ca, label.buf);
1717 bch_err_msg(c, ret, "creating new label");
1722 bch2_write_super(c);
1723 mutex_unlock(&c->sb_lock);
1725 bch2_dev_usage_journal_reserve(c);
1727 ret = bch2_trans_mark_dev_sb(c, ca);
1729 bch_err_msg(ca, ret, "marking new superblock");
1733 ret = bch2_fs_freespace_init(c);
1735 bch_err_msg(ca, ret, "initializing free space");
1739 ca->new_fs_bucket_idx = 0;
1741 if (ca->mi.state == BCH_MEMBER_STATE_rw)
1742 __bch2_dev_read_write(c, ca);
1744 up_write(&c->state_lock);
1748 mutex_unlock(&c->sb_lock);
1749 up_write(&c->state_lock);
1753 bch2_free_super(&sb);
1754 printbuf_exit(&label);
1755 printbuf_exit(&errbuf);
1758 up_write(&c->state_lock);
1763 /* Hot add existing device to running filesystem: */
1764 int bch2_dev_online(struct bch_fs *c, const char *path)
1766 struct bch_opts opts = bch2_opts_empty();
1767 struct bch_sb_handle sb = { NULL };
1772 down_write(&c->state_lock);
1774 ret = bch2_read_super(path, &opts, &sb);
1776 up_write(&c->state_lock);
1780 dev_idx = sb.sb->dev_idx;
1782 ret = bch2_dev_in_fs(c->disk_sb.sb, sb.sb);
1784 bch_err_msg(c, ret, "bringing %s online", path);
1788 ret = bch2_dev_attach_bdev(c, &sb);
1792 ca = bch_dev_locked(c, dev_idx);
1794 ret = bch2_trans_mark_dev_sb(c, ca);
1796 bch_err_msg(c, ret, "bringing %s online: error from bch2_trans_mark_dev_sb", path);
1800 if (ca->mi.state == BCH_MEMBER_STATE_rw)
1801 __bch2_dev_read_write(c, ca);
1803 if (!ca->mi.freespace_initialized) {
1804 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
1805 bch_err_msg(ca, ret, "initializing free space");
1810 if (!ca->journal.nr) {
1811 ret = bch2_dev_journal_alloc(ca);
1812 bch_err_msg(ca, ret, "allocating journal");
1817 mutex_lock(&c->sb_lock);
1818 bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount =
1819 cpu_to_le64(ktime_get_real_seconds());
1820 bch2_write_super(c);
1821 mutex_unlock(&c->sb_lock);
1823 up_write(&c->state_lock);
1826 up_write(&c->state_lock);
1827 bch2_free_super(&sb);
1831 int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
1833 down_write(&c->state_lock);
1835 if (!bch2_dev_is_online(ca)) {
1836 bch_err(ca, "Already offline");
1837 up_write(&c->state_lock);
1841 if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
1842 bch_err(ca, "Cannot offline required disk");
1843 up_write(&c->state_lock);
1844 return -BCH_ERR_device_state_not_allowed;
1847 __bch2_dev_offline(c, ca);
1849 up_write(&c->state_lock);
1853 int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1855 struct bch_member *m;
1859 down_write(&c->state_lock);
1860 old_nbuckets = ca->mi.nbuckets;
1862 if (nbuckets < ca->mi.nbuckets) {
1863 bch_err(ca, "Cannot shrink yet");
1868 if (bch2_dev_is_online(ca) &&
1869 get_capacity(ca->disk_sb.bdev->bd_disk) <
1870 ca->mi.bucket_size * nbuckets) {
1871 bch_err(ca, "New size larger than device");
1872 ret = -BCH_ERR_device_size_too_small;
1876 ret = bch2_dev_buckets_resize(c, ca, nbuckets);
1878 bch_err_msg(ca, ret, "resizing buckets");
1882 ret = bch2_trans_mark_dev_sb(c, ca);
1886 mutex_lock(&c->sb_lock);
1887 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
1888 m->nbuckets = cpu_to_le64(nbuckets);
1890 bch2_write_super(c);
1891 mutex_unlock(&c->sb_lock);
1893 if (ca->mi.freespace_initialized) {
1894 ret = bch2_dev_freespace_init(c, ca, old_nbuckets, nbuckets);
1899 * XXX: this is all wrong transactionally - we'll be able to do
1900 * this correctly after the disk space accounting rewrite
1902 ca->usage_base->d[BCH_DATA_free].buckets += nbuckets - old_nbuckets;
1905 bch2_recalc_capacity(c);
1907 up_write(&c->state_lock);
1911 /* return with ref on ca->ref: */
1912 struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name)
1918 for_each_member_device_rcu(ca, c, i, NULL)
1919 if (!strcmp(name, ca->name))
1921 ca = ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
1928 /* Filesystem open: */
1930 struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
1931 struct bch_opts opts)
1933 DARRAY(struct bch_sb_handle) sbs = { 0 };
1934 struct bch_fs *c = NULL;
1935 struct bch_sb_handle *sb, *best = NULL;
1936 struct printbuf errbuf = PRINTBUF;
1939 if (!try_module_get(THIS_MODULE))
1940 return ERR_PTR(-ENODEV);
1947 ret = darray_make_room(&sbs, nr_devices);
1951 for (unsigned i = 0; i < nr_devices; i++) {
1952 struct bch_sb_handle sb = { NULL };
1954 ret = bch2_read_super(devices[i], &opts, &sb);
1958 BUG_ON(darray_push(&sbs, sb));
1961 darray_for_each(sbs, sb)
1962 if (!best || le64_to_cpu(sb->sb->seq) > le64_to_cpu(best->sb->seq))
1965 darray_for_each_reverse(sbs, sb) {
1966 if (sb != best && !bch2_dev_exists(best->sb, sb->sb->dev_idx)) {
1967 pr_info("%pg has been removed, skipping", sb->bdev);
1968 bch2_free_super(sb);
1969 darray_remove_item(&sbs, sb);
1974 ret = bch2_dev_in_fs(best->sb, sb->sb);
1979 c = bch2_fs_alloc(best->sb, opts);
1980 ret = PTR_ERR_OR_ZERO(c);
1984 down_write(&c->state_lock);
1985 darray_for_each(sbs, sb) {
1986 ret = bch2_dev_attach_bdev(c, sb);
1988 up_write(&c->state_lock);
1992 up_write(&c->state_lock);
1994 if (!bch2_fs_may_start(c)) {
1995 ret = -BCH_ERR_insufficient_devices_to_start;
1999 if (!c->opts.nostart) {
2000 ret = bch2_fs_start(c);
2005 darray_for_each(sbs, sb)
2006 bch2_free_super(sb);
2008 printbuf_exit(&errbuf);
2009 module_put(THIS_MODULE);
2012 pr_err("bch_fs_open err opening %s: %s",
2013 devices[0], bch2_err_str(ret));
2015 if (!IS_ERR_OR_NULL(c))
2021 /* Global interfaces/init */
2023 static void bcachefs_exit(void)
2027 bch2_chardev_exit();
2028 bch2_btree_key_cache_exit();
2030 kset_unregister(bcachefs_kset);
2033 static int __init bcachefs_init(void)
2035 bch2_bkey_pack_test();
2037 if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) ||
2038 bch2_btree_key_cache_init() ||
2039 bch2_chardev_init() ||
2050 #define BCH_DEBUG_PARAM(name, description) \
2052 module_param_named(name, bch2_##name, bool, 0644); \
2053 MODULE_PARM_DESC(name, description);
2055 #undef BCH_DEBUG_PARAM
2058 static unsigned bch2_metadata_version = bcachefs_metadata_version_current;
2059 module_param_named(version, bch2_metadata_version, uint, 0400);
2061 module_exit(bcachefs_exit);
2062 module_init(bcachefs_init);