]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/super.c
3f674bf061ff40554e38c5eb4666a4764be750fa
[bcachefs-tools-debian] / libbcachefs / super.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs setup/teardown code, and some metadata io - read a superblock and
4  * figure out what to do with it.
5  *
6  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7  * Copyright 2012 Google, Inc.
8  */
9
10 #include "bcachefs.h"
11 #include "alloc_background.h"
12 #include "alloc_foreground.h"
13 #include "bkey_sort.h"
14 #include "btree_cache.h"
15 #include "btree_gc.h"
16 #include "btree_key_cache.h"
17 #include "btree_update_interior.h"
18 #include "btree_io.h"
19 #include "buckets_waiting_for_journal.h"
20 #include "chardev.h"
21 #include "checksum.h"
22 #include "clock.h"
23 #include "compress.h"
24 #include "debug.h"
25 #include "disk_groups.h"
26 #include "ec.h"
27 #include "errcode.h"
28 #include "error.h"
29 #include "fs.h"
30 #include "fs-io.h"
31 #include "fsck.h"
32 #include "inode.h"
33 #include "io.h"
34 #include "journal.h"
35 #include "journal_reclaim.h"
36 #include "journal_seq_blacklist.h"
37 #include "move.h"
38 #include "migrate.h"
39 #include "movinggc.h"
40 #include "quota.h"
41 #include "rebalance.h"
42 #include "recovery.h"
43 #include "replicas.h"
44 #include "subvolume.h"
45 #include "super.h"
46 #include "super-io.h"
47 #include "sysfs.h"
48 #include "counters.h"
49
50 #include <linux/backing-dev.h>
51 #include <linux/blkdev.h>
52 #include <linux/debugfs.h>
53 #include <linux/device.h>
54 #include <linux/idr.h>
55 #include <linux/module.h>
56 #include <linux/percpu.h>
57 #include <linux/pretty-printers.h>
58 #include <linux/random.h>
59 #include <linux/sysfs.h>
60 #include <crypto/hash.h>
61
62 #include <trace/events/bcachefs.h>
63
64 MODULE_LICENSE("GPL");
65 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
66
67 #define KTYPE(type)                                                     \
68 static const struct attribute_group type ## _group = {                  \
69         .attrs = type ## _files                                         \
70 };                                                                      \
71                                                                         \
72 static const struct attribute_group *type ## _groups[] = {              \
73         &type ## _group,                                                \
74         NULL                                                            \
75 };                                                                      \
76                                                                         \
77 static const struct kobj_type type ## _ktype = {                        \
78         .release        = type ## _release,                             \
79         .sysfs_ops      = &type ## _sysfs_ops,                          \
80         .default_groups = type ## _groups                               \
81 }
82
83 static void bch2_fs_release(struct kobject *);
84 static void bch2_dev_release(struct kobject *);
85 static void bch2_fs_counters_release(struct kobject *k)
86 {
87 }
88
89 static void bch2_fs_internal_release(struct kobject *k)
90 {
91 }
92
93 static void bch2_fs_opts_dir_release(struct kobject *k)
94 {
95 }
96
97 static void bch2_fs_time_stats_release(struct kobject *k)
98 {
99 }
100
101 KTYPE(bch2_fs);
102 KTYPE(bch2_fs_counters);
103 KTYPE(bch2_fs_internal);
104 KTYPE(bch2_fs_opts_dir);
105 KTYPE(bch2_fs_time_stats);
106 KTYPE(bch2_dev);
107
108 static struct kset *bcachefs_kset;
109 static LIST_HEAD(bch_fs_list);
110 static DEFINE_MUTEX(bch_fs_list_lock);
111
112 static DECLARE_WAIT_QUEUE_HEAD(bch_read_only_wait);
113
114 static void bch2_dev_free(struct bch_dev *);
115 static int bch2_dev_alloc(struct bch_fs *, unsigned);
116 static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *);
117 static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
118
119 struct bch_fs *bch2_dev_to_fs(dev_t dev)
120 {
121         struct bch_fs *c;
122         struct bch_dev *ca;
123         unsigned i;
124
125         mutex_lock(&bch_fs_list_lock);
126         rcu_read_lock();
127
128         list_for_each_entry(c, &bch_fs_list, list)
129                 for_each_member_device_rcu(ca, c, i, NULL)
130                         if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) {
131                                 closure_get(&c->cl);
132                                 goto found;
133                         }
134         c = NULL;
135 found:
136         rcu_read_unlock();
137         mutex_unlock(&bch_fs_list_lock);
138
139         return c;
140 }
141
142 static struct bch_fs *__bch2_uuid_to_fs(uuid_le uuid)
143 {
144         struct bch_fs *c;
145
146         lockdep_assert_held(&bch_fs_list_lock);
147
148         list_for_each_entry(c, &bch_fs_list, list)
149                 if (!memcmp(&c->disk_sb.sb->uuid, &uuid, sizeof(uuid_le)))
150                         return c;
151
152         return NULL;
153 }
154
155 struct bch_fs *bch2_uuid_to_fs(uuid_le uuid)
156 {
157         struct bch_fs *c;
158
159         mutex_lock(&bch_fs_list_lock);
160         c = __bch2_uuid_to_fs(uuid);
161         if (c)
162                 closure_get(&c->cl);
163         mutex_unlock(&bch_fs_list_lock);
164
165         return c;
166 }
167
168 static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
169 {
170         struct bch_dev *ca;
171         unsigned i, nr = 0, u64s =
172                 ((sizeof(struct jset_entry_dev_usage) +
173                   sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR)) /
174                 sizeof(u64);
175
176         rcu_read_lock();
177         for_each_member_device_rcu(ca, c, i, NULL)
178                 nr++;
179         rcu_read_unlock();
180
181         bch2_journal_entry_res_resize(&c->journal,
182                         &c->dev_usage_journal_res, u64s * nr);
183 }
184
185 /* Filesystem RO/RW: */
186
187 /*
188  * For startup/shutdown of RW stuff, the dependencies are:
189  *
190  * - foreground writes depend on copygc and rebalance (to free up space)
191  *
192  * - copygc and rebalance depend on mark and sweep gc (they actually probably
193  *   don't because they either reserve ahead of time or don't block if
194  *   allocations fail, but allocations can require mark and sweep gc to run
195  *   because of generation number wraparound)
196  *
197  * - all of the above depends on the allocator threads
198  *
199  * - allocator depends on the journal (when it rewrites prios and gens)
200  */
201
202 static void __bch2_fs_read_only(struct bch_fs *c)
203 {
204         struct bch_dev *ca;
205         unsigned i, clean_passes = 0;
206         u64 seq = 0;
207
208         bch2_rebalance_stop(c);
209         bch2_copygc_stop(c);
210         bch2_gc_thread_stop(c);
211
212         bch_verbose(c, "flushing journal and stopping allocators");
213
214         do {
215                 clean_passes++;
216
217                 if (bch2_btree_interior_updates_flush(c) ||
218                     bch2_journal_flush_all_pins(&c->journal) ||
219                     bch2_btree_flush_all_writes(c) ||
220                     seq != atomic64_read(&c->journal.seq)) {
221                         seq = atomic64_read(&c->journal.seq);
222                         clean_passes = 0;
223                 }
224         } while (clean_passes < 2);
225
226         bch_verbose(c, "flushing journal and stopping allocators complete");
227
228         if (test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) &&
229             !test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
230                 set_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags);
231         bch2_fs_journal_stop(&c->journal);
232
233         /*
234          * After stopping journal:
235          */
236         for_each_member_device(ca, c, i)
237                 bch2_dev_allocator_remove(c, ca);
238 }
239
240 static void bch2_writes_disabled(struct percpu_ref *writes)
241 {
242         struct bch_fs *c = container_of(writes, struct bch_fs, writes);
243
244         set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
245         wake_up(&bch_read_only_wait);
246 }
247
248 void bch2_fs_read_only(struct bch_fs *c)
249 {
250         if (!test_bit(BCH_FS_RW, &c->flags)) {
251                 bch2_journal_reclaim_stop(&c->journal);
252                 return;
253         }
254
255         BUG_ON(test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
256
257         /*
258          * Block new foreground-end write operations from starting - any new
259          * writes will return -EROFS:
260          */
261         percpu_ref_kill(&c->writes);
262
263         cancel_work_sync(&c->ec_stripe_delete_work);
264
265         /*
266          * If we're not doing an emergency shutdown, we want to wait on
267          * outstanding writes to complete so they don't see spurious errors due
268          * to shutting down the allocator:
269          *
270          * If we are doing an emergency shutdown outstanding writes may
271          * hang until we shutdown the allocator so we don't want to wait
272          * on outstanding writes before shutting everything down - but
273          * we do need to wait on them before returning and signalling
274          * that going RO is complete:
275          */
276         wait_event(bch_read_only_wait,
277                    test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags) ||
278                    test_bit(BCH_FS_EMERGENCY_RO, &c->flags));
279
280         __bch2_fs_read_only(c);
281
282         wait_event(bch_read_only_wait,
283                    test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
284
285         clear_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
286
287         if (!bch2_journal_error(&c->journal) &&
288             !test_bit(BCH_FS_ERROR, &c->flags) &&
289             !test_bit(BCH_FS_EMERGENCY_RO, &c->flags) &&
290             test_bit(BCH_FS_STARTED, &c->flags) &&
291             test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags) &&
292             !c->opts.norecovery) {
293                 bch_verbose(c, "marking filesystem clean");
294                 bch2_fs_mark_clean(c);
295         }
296
297         clear_bit(BCH_FS_RW, &c->flags);
298 }
299
300 static void bch2_fs_read_only_work(struct work_struct *work)
301 {
302         struct bch_fs *c =
303                 container_of(work, struct bch_fs, read_only_work);
304
305         down_write(&c->state_lock);
306         bch2_fs_read_only(c);
307         up_write(&c->state_lock);
308 }
309
310 static void bch2_fs_read_only_async(struct bch_fs *c)
311 {
312         queue_work(system_long_wq, &c->read_only_work);
313 }
314
315 bool bch2_fs_emergency_read_only(struct bch_fs *c)
316 {
317         bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags);
318
319         bch2_journal_halt(&c->journal);
320         bch2_fs_read_only_async(c);
321
322         wake_up(&bch_read_only_wait);
323         return ret;
324 }
325
326 static int bch2_fs_read_write_late(struct bch_fs *c)
327 {
328         int ret;
329
330         ret = bch2_gc_thread_start(c);
331         if (ret) {
332                 bch_err(c, "error starting gc thread");
333                 return ret;
334         }
335
336         ret = bch2_copygc_start(c);
337         if (ret) {
338                 bch_err(c, "error starting copygc thread");
339                 return ret;
340         }
341
342         ret = bch2_rebalance_start(c);
343         if (ret) {
344                 bch_err(c, "error starting rebalance thread");
345                 return ret;
346         }
347
348         schedule_work(&c->ec_stripe_delete_work);
349
350         return 0;
351 }
352
353 static int __bch2_fs_read_write(struct bch_fs *c, bool early)
354 {
355         struct bch_dev *ca;
356         unsigned i;
357         int ret;
358
359         if (test_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags)) {
360                 bch_err(c, "cannot go rw, unfixed btree errors");
361                 return -EROFS;
362         }
363
364         if (test_bit(BCH_FS_RW, &c->flags))
365                 return 0;
366
367         /*
368          * nochanges is used for fsck -n mode - we have to allow going rw
369          * during recovery for that to work:
370          */
371         if (c->opts.norecovery ||
372             (c->opts.nochanges &&
373              (!early || c->opts.read_only)))
374                 return -EROFS;
375
376         bch_info(c, "going read-write");
377
378         ret = bch2_fs_mark_dirty(c);
379         if (ret)
380                 goto err;
381
382         clear_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags);
383
384         for_each_rw_member(ca, c, i)
385                 bch2_dev_allocator_add(c, ca);
386         bch2_recalc_capacity(c);
387
388         bch2_do_discards(c);
389         bch2_do_invalidates(c);
390
391         if (!early) {
392                 ret = bch2_fs_read_write_late(c);
393                 if (ret)
394                         goto err;
395         }
396
397         percpu_ref_reinit(&c->writes);
398         set_bit(BCH_FS_RW, &c->flags);
399         set_bit(BCH_FS_WAS_RW, &c->flags);
400         return 0;
401 err:
402         __bch2_fs_read_only(c);
403         return ret;
404 }
405
406 int bch2_fs_read_write(struct bch_fs *c)
407 {
408         return __bch2_fs_read_write(c, false);
409 }
410
411 int bch2_fs_read_write_early(struct bch_fs *c)
412 {
413         lockdep_assert_held(&c->state_lock);
414
415         return __bch2_fs_read_write(c, true);
416 }
417
418 /* Filesystem startup/shutdown: */
419
420 static void __bch2_fs_free(struct bch_fs *c)
421 {
422         unsigned i;
423         int cpu;
424
425         for (i = 0; i < BCH_TIME_STAT_NR; i++)
426                 bch2_time_stats_exit(&c->times[i]);
427
428         bch2_fs_counters_exit(c);
429         bch2_fs_snapshots_exit(c);
430         bch2_fs_quota_exit(c);
431         bch2_fs_fsio_exit(c);
432         bch2_fs_ec_exit(c);
433         bch2_fs_encryption_exit(c);
434         bch2_fs_io_exit(c);
435         bch2_fs_buckets_waiting_for_journal_exit(c);
436         bch2_fs_btree_interior_update_exit(c);
437         bch2_fs_btree_iter_exit(c);
438         bch2_fs_btree_key_cache_exit(&c->btree_key_cache);
439         bch2_fs_btree_cache_exit(c);
440         bch2_fs_replicas_exit(c);
441         bch2_fs_journal_exit(&c->journal);
442         bch2_io_clock_exit(&c->io_clock[WRITE]);
443         bch2_io_clock_exit(&c->io_clock[READ]);
444         bch2_fs_compress_exit(c);
445         bch2_journal_keys_free(&c->journal_keys);
446         bch2_journal_entries_free(c);
447         percpu_free_rwsem(&c->mark_lock);
448
449         if (c->btree_paths_bufs)
450                 for_each_possible_cpu(cpu)
451                         kfree(per_cpu_ptr(c->btree_paths_bufs, cpu)->path);
452
453         free_percpu(c->online_reserved);
454         free_percpu(c->btree_paths_bufs);
455         free_percpu(c->pcpu);
456         mempool_exit(&c->large_bkey_pool);
457         mempool_exit(&c->btree_bounce_pool);
458         bioset_exit(&c->btree_bio);
459         mempool_exit(&c->fill_iter);
460         percpu_ref_exit(&c->writes);
461         kfree(rcu_dereference_protected(c->disk_groups, 1));
462         kfree(c->journal_seq_blacklist_table);
463         kfree(c->unused_inode_hints);
464         free_heap(&c->copygc_heap);
465
466         if (c->io_complete_wq )
467                 destroy_workqueue(c->io_complete_wq );
468         if (c->copygc_wq)
469                 destroy_workqueue(c->copygc_wq);
470         if (c->btree_io_complete_wq)
471                 destroy_workqueue(c->btree_io_complete_wq);
472         if (c->btree_update_wq)
473                 destroy_workqueue(c->btree_update_wq);
474
475         bch2_free_super(&c->disk_sb);
476         kvpfree(c, sizeof(*c));
477         module_put(THIS_MODULE);
478 }
479
480 static void bch2_fs_release(struct kobject *kobj)
481 {
482         struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
483
484         __bch2_fs_free(c);
485 }
486
487 void __bch2_fs_stop(struct bch_fs *c)
488 {
489         struct bch_dev *ca;
490         unsigned i;
491
492         bch_verbose(c, "shutting down");
493
494         set_bit(BCH_FS_STOPPING, &c->flags);
495
496         cancel_work_sync(&c->journal_seq_blacklist_gc_work);
497
498         down_write(&c->state_lock);
499         bch2_fs_read_only(c);
500         up_write(&c->state_lock);
501
502         for_each_member_device(ca, c, i)
503                 if (ca->kobj.state_in_sysfs &&
504                     ca->disk_sb.bdev)
505                         sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
506
507         if (c->kobj.state_in_sysfs)
508                 kobject_del(&c->kobj);
509
510         bch2_fs_debug_exit(c);
511         bch2_fs_chardev_exit(c);
512
513         kobject_put(&c->counters_kobj);
514         kobject_put(&c->time_stats);
515         kobject_put(&c->opts_dir);
516         kobject_put(&c->internal);
517
518         /* btree prefetch might have kicked off reads in the background: */
519         bch2_btree_flush_all_reads(c);
520
521         for_each_member_device(ca, c, i)
522                 cancel_work_sync(&ca->io_error_work);
523
524         cancel_work_sync(&c->read_only_work);
525
526         for (i = 0; i < c->sb.nr_devices; i++)
527                 if (c->devs[i])
528                         bch2_free_super(&c->devs[i]->disk_sb);
529 }
530
531 void bch2_fs_free(struct bch_fs *c)
532 {
533         unsigned i;
534
535         mutex_lock(&bch_fs_list_lock);
536         list_del(&c->list);
537         mutex_unlock(&bch_fs_list_lock);
538
539         closure_sync(&c->cl);
540         closure_debug_destroy(&c->cl);
541
542         for (i = 0; i < c->sb.nr_devices; i++)
543                 if (c->devs[i])
544                         bch2_dev_free(rcu_dereference_protected(c->devs[i], 1));
545
546         bch_verbose(c, "shutdown complete");
547
548         kobject_put(&c->kobj);
549 }
550
551 void bch2_fs_stop(struct bch_fs *c)
552 {
553         __bch2_fs_stop(c);
554         bch2_fs_free(c);
555 }
556
557 static int bch2_fs_online(struct bch_fs *c)
558 {
559         struct bch_dev *ca;
560         unsigned i;
561         int ret = 0;
562
563         lockdep_assert_held(&bch_fs_list_lock);
564
565         if (__bch2_uuid_to_fs(c->sb.uuid)) {
566                 bch_err(c, "filesystem UUID already open");
567                 return -EINVAL;
568         }
569
570         ret = bch2_fs_chardev_init(c);
571         if (ret) {
572                 bch_err(c, "error creating character device");
573                 return ret;
574         }
575
576         bch2_fs_debug_init(c);
577
578         ret = kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ?:
579             kobject_add(&c->internal, &c->kobj, "internal") ?:
580             kobject_add(&c->opts_dir, &c->kobj, "options") ?:
581             kobject_add(&c->time_stats, &c->kobj, "time_stats") ?:
582             kobject_add(&c->counters_kobj, &c->kobj, "counters") ?:
583             bch2_opts_create_sysfs_files(&c->opts_dir);
584         if (ret) {
585                 bch_err(c, "error creating sysfs objects");
586                 return ret;
587         }
588
589         down_write(&c->state_lock);
590
591         for_each_member_device(ca, c, i) {
592                 ret = bch2_dev_sysfs_online(c, ca);
593                 if (ret) {
594                         bch_err(c, "error creating sysfs objects");
595                         percpu_ref_put(&ca->ref);
596                         goto err;
597                 }
598         }
599
600         BUG_ON(!list_empty(&c->list));
601         list_add(&c->list, &bch_fs_list);
602 err:
603         up_write(&c->state_lock);
604         return ret;
605 }
606
607 static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
608 {
609         struct bch_sb_field_members *mi;
610         struct bch_fs *c;
611         struct printbuf name = PRINTBUF;
612         unsigned i, iter_size;
613         int ret = 0;
614
615         pr_verbose_init(opts, "");
616
617         c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
618         if (!c) {
619                 c = ERR_PTR(-ENOMEM);
620                 goto out;
621         }
622
623         __module_get(THIS_MODULE);
624
625         closure_init(&c->cl, NULL);
626
627         c->kobj.kset = bcachefs_kset;
628         kobject_init(&c->kobj, &bch2_fs_ktype);
629         kobject_init(&c->internal, &bch2_fs_internal_ktype);
630         kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype);
631         kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype);
632         kobject_init(&c->counters_kobj, &bch2_fs_counters_ktype);
633
634         c->minor                = -1;
635         c->disk_sb.fs_sb        = true;
636
637         init_rwsem(&c->state_lock);
638         mutex_init(&c->sb_lock);
639         mutex_init(&c->replicas_gc_lock);
640         mutex_init(&c->btree_root_lock);
641         INIT_WORK(&c->read_only_work, bch2_fs_read_only_work);
642
643         init_rwsem(&c->gc_lock);
644         mutex_init(&c->gc_gens_lock);
645
646         for (i = 0; i < BCH_TIME_STAT_NR; i++)
647                 bch2_time_stats_init(&c->times[i]);
648
649         bch2_fs_copygc_init(c);
650         bch2_fs_btree_key_cache_init_early(&c->btree_key_cache);
651         bch2_fs_allocator_background_init(c);
652         bch2_fs_allocator_foreground_init(c);
653         bch2_fs_rebalance_init(c);
654         bch2_fs_quota_init(c);
655         bch2_fs_ec_init_early(c);
656
657         INIT_LIST_HEAD(&c->list);
658
659         mutex_init(&c->usage_scratch_lock);
660
661         mutex_init(&c->bio_bounce_pages_lock);
662         mutex_init(&c->snapshot_table_lock);
663
664         spin_lock_init(&c->btree_write_error_lock);
665
666         INIT_WORK(&c->journal_seq_blacklist_gc_work,
667                   bch2_blacklist_entries_gc);
668
669         INIT_LIST_HEAD(&c->journal_iters);
670
671         INIT_LIST_HEAD(&c->fsck_errors);
672         mutex_init(&c->fsck_error_lock);
673
674         INIT_LIST_HEAD(&c->ec_stripe_head_list);
675         mutex_init(&c->ec_stripe_head_lock);
676
677         INIT_LIST_HEAD(&c->ec_stripe_new_list);
678         mutex_init(&c->ec_stripe_new_lock);
679
680         INIT_LIST_HEAD(&c->data_progress_list);
681         mutex_init(&c->data_progress_lock);
682
683         spin_lock_init(&c->ec_stripes_heap_lock);
684
685         seqcount_init(&c->gc_pos_lock);
686
687         seqcount_init(&c->usage_lock);
688
689         c->copy_gc_enabled              = 1;
690         c->rebalance.enabled            = 1;
691         c->promote_whole_extents        = true;
692
693         c->journal.flush_write_time     = &c->times[BCH_TIME_journal_flush_write];
694         c->journal.noflush_write_time   = &c->times[BCH_TIME_journal_noflush_write];
695         c->journal.blocked_time         = &c->times[BCH_TIME_blocked_journal];
696         c->journal.flush_seq_time       = &c->times[BCH_TIME_journal_flush_seq];
697
698         bch2_fs_btree_cache_init_early(&c->btree_cache);
699
700         mutex_init(&c->sectors_available_lock);
701
702         ret = percpu_init_rwsem(&c->mark_lock);
703         if (ret)
704                 goto err;
705
706         mutex_lock(&c->sb_lock);
707         ret = bch2_sb_to_fs(c, sb);
708         mutex_unlock(&c->sb_lock);
709
710         if (ret)
711                 goto err;
712
713         pr_uuid(&name, c->sb.user_uuid.b);
714         strlcpy(c->name, name.buf, sizeof(c->name));
715         printbuf_exit(&name);
716
717         ret = name.allocation_failure ? -ENOMEM : 0;
718         if (ret)
719                 goto err;
720
721         /* Compat: */
722         if (sb->version <= bcachefs_metadata_version_inode_v2 &&
723             !BCH_SB_JOURNAL_FLUSH_DELAY(sb))
724                 SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000);
725
726         if (sb->version <= bcachefs_metadata_version_inode_v2 &&
727             !BCH_SB_JOURNAL_RECLAIM_DELAY(sb))
728                 SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 100);
729
730         c->opts = bch2_opts_default;
731         ret = bch2_opts_from_sb(&c->opts, sb);
732         if (ret)
733                 goto err;
734
735         bch2_opts_apply(&c->opts, opts);
736
737         /* key cache currently disabled for inodes, because of snapshots: */
738         c->opts.inodes_use_key_cache = 0;
739
740         c->btree_key_cache_btrees |= 1U << BTREE_ID_alloc;
741         if (c->opts.inodes_use_key_cache)
742                 c->btree_key_cache_btrees |= 1U << BTREE_ID_inodes;
743
744         c->block_bits           = ilog2(block_sectors(c));
745         c->btree_foreground_merge_threshold = BTREE_FOREGROUND_MERGE_THRESHOLD(c);
746
747         if (bch2_fs_init_fault("fs_alloc")) {
748                 bch_err(c, "fs_alloc fault injected");
749                 ret = -EFAULT;
750                 goto err;
751         }
752
753         iter_size = sizeof(struct sort_iter) +
754                 (btree_blocks(c) + 1) * 2 *
755                 sizeof(struct sort_iter_set);
756
757         c->inode_shard_bits = ilog2(roundup_pow_of_two(num_possible_cpus()));
758
759         if (!(c->btree_update_wq = alloc_workqueue("bcachefs",
760                                 WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
761             !(c->btree_io_complete_wq = alloc_workqueue("bcachefs_btree_io",
762                                 WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
763             !(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
764                                 WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
765             !(c->io_complete_wq = alloc_workqueue("bcachefs_io",
766                                 WQ_FREEZABLE|WQ_HIGHPRI|WQ_MEM_RECLAIM, 1)) ||
767             percpu_ref_init(&c->writes, bch2_writes_disabled,
768                             PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
769             mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
770             bioset_init(&c->btree_bio, 1,
771                         max(offsetof(struct btree_read_bio, bio),
772                             offsetof(struct btree_write_bio, wbio.bio)),
773                         BIOSET_NEED_BVECS) ||
774             !(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
775             !(c->btree_paths_bufs = alloc_percpu(struct btree_path_buf)) ||
776             !(c->online_reserved = alloc_percpu(u64)) ||
777             mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
778                                         btree_bytes(c)) ||
779             mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
780             !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
781                                               sizeof(u64), GFP_KERNEL))) {
782                 ret = -ENOMEM;
783                 goto err;
784         }
785
786         ret = bch2_fs_counters_init(c) ?:
787             bch2_io_clock_init(&c->io_clock[READ]) ?:
788             bch2_io_clock_init(&c->io_clock[WRITE]) ?:
789             bch2_fs_journal_init(&c->journal) ?:
790             bch2_fs_replicas_init(c) ?:
791             bch2_fs_btree_cache_init(c) ?:
792             bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?:
793             bch2_fs_btree_iter_init(c) ?:
794             bch2_fs_btree_interior_update_init(c) ?:
795             bch2_fs_buckets_waiting_for_journal_init(c) ?:
796             bch2_fs_subvolumes_init(c) ?:
797             bch2_fs_io_init(c) ?:
798             bch2_fs_encryption_init(c) ?:
799             bch2_fs_compress_init(c) ?:
800             bch2_fs_ec_init(c) ?:
801             bch2_fs_fsio_init(c);
802         if (ret)
803                 goto err;
804
805         mi = bch2_sb_get_members(c->disk_sb.sb);
806         for (i = 0; i < c->sb.nr_devices; i++)
807                 if (bch2_dev_exists(c->disk_sb.sb, mi, i) &&
808                     bch2_dev_alloc(c, i)) {
809                         ret = -EEXIST;
810                         goto err;
811                 }
812
813         bch2_journal_entry_res_resize(&c->journal,
814                         &c->btree_root_journal_res,
815                         BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_BTREE_PTR_U64s_MAX));
816         bch2_dev_usage_journal_reserve(c);
817         bch2_journal_entry_res_resize(&c->journal,
818                         &c->clock_journal_res,
819                         (sizeof(struct jset_entry_clock) / sizeof(u64)) * 2);
820
821         mutex_lock(&bch_fs_list_lock);
822         ret = bch2_fs_online(c);
823         mutex_unlock(&bch_fs_list_lock);
824
825         if (ret)
826                 goto err;
827 out:
828         pr_verbose_init(opts, "ret %i", PTR_ERR_OR_ZERO(c));
829         return c;
830 err:
831         bch2_fs_free(c);
832         c = ERR_PTR(ret);
833         goto out;
834 }
835
836 noinline_for_stack
837 static void print_mount_opts(struct bch_fs *c)
838 {
839         enum bch_opt_id i;
840         struct printbuf p = PRINTBUF;
841         bool first = true;
842
843         if (c->opts.read_only) {
844                 prt_printf(&p, "ro");
845                 first = false;
846         }
847
848         for (i = 0; i < bch2_opts_nr; i++) {
849                 const struct bch_option *opt = &bch2_opt_table[i];
850                 u64 v = bch2_opt_get_by_id(&c->opts, i);
851
852                 if (!(opt->flags & OPT_MOUNT))
853                         continue;
854
855                 if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
856                         continue;
857
858                 if (!first)
859                         prt_printf(&p, ",");
860                 first = false;
861                 bch2_opt_to_text(&p, c, c->disk_sb.sb, opt, v, OPT_SHOW_MOUNT_STYLE);
862         }
863
864         if (!p.pos)
865                 prt_printf(&p, "(null)");
866
867         bch_info(c, "mounted version=%s opts=%s", bch2_metadata_versions[c->sb.version], p.buf);
868         printbuf_exit(&p);
869 }
870
871 int bch2_fs_start(struct bch_fs *c)
872 {
873         struct bch_sb_field_members *mi;
874         struct bch_dev *ca;
875         time64_t now = ktime_get_real_seconds();
876         unsigned i;
877         int ret = -EINVAL;
878
879         down_write(&c->state_lock);
880
881         BUG_ON(test_bit(BCH_FS_STARTED, &c->flags));
882
883         mutex_lock(&c->sb_lock);
884
885         for_each_online_member(ca, c, i)
886                 bch2_sb_from_fs(c, ca);
887
888         mi = bch2_sb_get_members(c->disk_sb.sb);
889         for_each_online_member(ca, c, i)
890                 mi->members[ca->dev_idx].last_mount = cpu_to_le64(now);
891
892         mutex_unlock(&c->sb_lock);
893
894         for_each_rw_member(ca, c, i)
895                 bch2_dev_allocator_add(c, ca);
896         bch2_recalc_capacity(c);
897
898         for (i = 0; i < BCH_TRANSACTIONS_NR; i++) {
899                 mutex_lock(&c->btree_transaction_stats[i].lock);
900                 bch2_time_stats_init(&c->btree_transaction_stats[i].lock_hold_times);
901                 mutex_unlock(&c->btree_transaction_stats[i].lock);
902         }
903
904         ret = BCH_SB_INITIALIZED(c->disk_sb.sb)
905                 ? bch2_fs_recovery(c)
906                 : bch2_fs_initialize(c);
907         if (ret)
908                 goto err;
909
910         ret = bch2_opts_check_may_set(c);
911         if (ret)
912                 goto err;
913
914         ret = -EINVAL;
915         if (bch2_fs_init_fault("fs_start")) {
916                 bch_err(c, "fs_start fault injected");
917                 goto err;
918         }
919
920         set_bit(BCH_FS_STARTED, &c->flags);
921
922         if (c->opts.read_only || c->opts.nochanges) {
923                 bch2_fs_read_only(c);
924         } else {
925                 ret = !test_bit(BCH_FS_RW, &c->flags)
926                         ? bch2_fs_read_write(c)
927                         : bch2_fs_read_write_late(c);
928                 if (ret)
929                         goto err;
930         }
931
932         print_mount_opts(c);
933         ret = 0;
934 out:
935         up_write(&c->state_lock);
936         return ret;
937 err:
938         bch_err(c, "error starting filesystem: %s", bch2_err_str(ret));
939
940         if (ret < -BCH_ERR_START)
941                 ret = -EINVAL;
942         goto out;
943 }
944
945 static const char *bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
946 {
947         struct bch_sb_field_members *sb_mi;
948
949         sb_mi = bch2_sb_get_members(sb);
950         if (!sb_mi)
951                 return "Invalid superblock: member info area missing";
952
953         if (le16_to_cpu(sb->block_size) != block_sectors(c))
954                 return "mismatched block size";
955
956         if (le16_to_cpu(sb_mi->members[sb->dev_idx].bucket_size) <
957             BCH_SB_BTREE_NODE_SIZE(c->disk_sb.sb))
958                 return "new cache bucket size is too small";
959
960         return NULL;
961 }
962
963 static const char *bch2_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb)
964 {
965         struct bch_sb *newest =
966                 le64_to_cpu(fs->seq) > le64_to_cpu(sb->seq) ? fs : sb;
967         struct bch_sb_field_members *mi = bch2_sb_get_members(newest);
968
969         if (uuid_le_cmp(fs->uuid, sb->uuid))
970                 return "device not a member of filesystem";
971
972         if (!bch2_dev_exists(newest, mi, sb->dev_idx))
973                 return "device has been removed";
974
975         if (fs->block_size != sb->block_size)
976                 return "mismatched block size";
977
978         return NULL;
979 }
980
981 /* Device startup/shutdown: */
982
983 static void bch2_dev_release(struct kobject *kobj)
984 {
985         struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
986
987         kfree(ca);
988 }
989
990 static void bch2_dev_free(struct bch_dev *ca)
991 {
992         cancel_work_sync(&ca->io_error_work);
993
994         if (ca->kobj.state_in_sysfs &&
995             ca->disk_sb.bdev)
996                 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
997
998         if (ca->kobj.state_in_sysfs)
999                 kobject_del(&ca->kobj);
1000
1001         bch2_free_super(&ca->disk_sb);
1002         bch2_dev_journal_exit(ca);
1003
1004         free_percpu(ca->io_done);
1005         bioset_exit(&ca->replica_set);
1006         bch2_dev_buckets_free(ca);
1007         free_page((unsigned long) ca->sb_read_scratch);
1008
1009         bch2_time_stats_exit(&ca->io_latency[WRITE]);
1010         bch2_time_stats_exit(&ca->io_latency[READ]);
1011
1012         percpu_ref_exit(&ca->io_ref);
1013         percpu_ref_exit(&ca->ref);
1014         kobject_put(&ca->kobj);
1015 }
1016
1017 static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca)
1018 {
1019
1020         lockdep_assert_held(&c->state_lock);
1021
1022         if (percpu_ref_is_zero(&ca->io_ref))
1023                 return;
1024
1025         __bch2_dev_read_only(c, ca);
1026
1027         reinit_completion(&ca->io_ref_completion);
1028         percpu_ref_kill(&ca->io_ref);
1029         wait_for_completion(&ca->io_ref_completion);
1030
1031         if (ca->kobj.state_in_sysfs) {
1032                 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
1033                 sysfs_remove_link(&ca->kobj, "block");
1034         }
1035
1036         bch2_free_super(&ca->disk_sb);
1037         bch2_dev_journal_exit(ca);
1038 }
1039
1040 static void bch2_dev_ref_complete(struct percpu_ref *ref)
1041 {
1042         struct bch_dev *ca = container_of(ref, struct bch_dev, ref);
1043
1044         complete(&ca->ref_completion);
1045 }
1046
1047 static void bch2_dev_io_ref_complete(struct percpu_ref *ref)
1048 {
1049         struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref);
1050
1051         complete(&ca->io_ref_completion);
1052 }
1053
1054 static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca)
1055 {
1056         int ret;
1057
1058         if (!c->kobj.state_in_sysfs)
1059                 return 0;
1060
1061         if (!ca->kobj.state_in_sysfs) {
1062                 ret = kobject_add(&ca->kobj, &c->kobj,
1063                                   "dev-%u", ca->dev_idx);
1064                 if (ret)
1065                         return ret;
1066         }
1067
1068         if (ca->disk_sb.bdev) {
1069                 struct kobject *block = bdev_kobj(ca->disk_sb.bdev);
1070
1071                 ret = sysfs_create_link(block, &ca->kobj, "bcachefs");
1072                 if (ret)
1073                         return ret;
1074
1075                 ret = sysfs_create_link(&ca->kobj, block, "block");
1076                 if (ret)
1077                         return ret;
1078         }
1079
1080         return 0;
1081 }
1082
1083 static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
1084                                         struct bch_member *member)
1085 {
1086         struct bch_dev *ca;
1087
1088         ca = kzalloc(sizeof(*ca), GFP_KERNEL);
1089         if (!ca)
1090                 return NULL;
1091
1092         kobject_init(&ca->kobj, &bch2_dev_ktype);
1093         init_completion(&ca->ref_completion);
1094         init_completion(&ca->io_ref_completion);
1095
1096         init_rwsem(&ca->bucket_lock);
1097
1098         INIT_WORK(&ca->io_error_work, bch2_io_error_work);
1099
1100         bch2_time_stats_init(&ca->io_latency[READ]);
1101         bch2_time_stats_init(&ca->io_latency[WRITE]);
1102
1103         ca->mi = bch2_mi_to_cpu(member);
1104         ca->uuid = member->uuid;
1105
1106         ca->nr_btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
1107                              ca->mi.bucket_size / btree_sectors(c));
1108
1109         if (percpu_ref_init(&ca->ref, bch2_dev_ref_complete,
1110                             0, GFP_KERNEL) ||
1111             percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
1112                             PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
1113             !(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) ||
1114             bch2_dev_buckets_alloc(c, ca) ||
1115             bioset_init(&ca->replica_set, 4,
1116                         offsetof(struct bch_write_bio, bio), 0) ||
1117             !(ca->io_done       = alloc_percpu(*ca->io_done)))
1118                 goto err;
1119
1120         return ca;
1121 err:
1122         bch2_dev_free(ca);
1123         return NULL;
1124 }
1125
1126 static void bch2_dev_attach(struct bch_fs *c, struct bch_dev *ca,
1127                             unsigned dev_idx)
1128 {
1129         ca->dev_idx = dev_idx;
1130         __set_bit(ca->dev_idx, ca->self.d);
1131         scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx);
1132
1133         ca->fs = c;
1134         rcu_assign_pointer(c->devs[ca->dev_idx], ca);
1135
1136         if (bch2_dev_sysfs_online(c, ca))
1137                 pr_warn("error creating sysfs objects");
1138 }
1139
1140 static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx)
1141 {
1142         struct bch_member *member =
1143                 bch2_sb_get_members(c->disk_sb.sb)->members + dev_idx;
1144         struct bch_dev *ca = NULL;
1145         int ret = 0;
1146
1147         pr_verbose_init(c->opts, "");
1148
1149         if (bch2_fs_init_fault("dev_alloc"))
1150                 goto err;
1151
1152         ca = __bch2_dev_alloc(c, member);
1153         if (!ca)
1154                 goto err;
1155
1156         ca->fs = c;
1157
1158         bch2_dev_attach(c, ca, dev_idx);
1159 out:
1160         pr_verbose_init(c->opts, "ret %i", ret);
1161         return ret;
1162 err:
1163         if (ca)
1164                 bch2_dev_free(ca);
1165         ret = -ENOMEM;
1166         goto out;
1167 }
1168
1169 static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
1170 {
1171         unsigned ret;
1172
1173         if (bch2_dev_is_online(ca)) {
1174                 bch_err(ca, "already have device online in slot %u",
1175                         sb->sb->dev_idx);
1176                 return -EINVAL;
1177         }
1178
1179         if (get_capacity(sb->bdev->bd_disk) <
1180             ca->mi.bucket_size * ca->mi.nbuckets) {
1181                 bch_err(ca, "cannot online: device too small");
1182                 return -EINVAL;
1183         }
1184
1185         BUG_ON(!percpu_ref_is_zero(&ca->io_ref));
1186
1187         if (get_capacity(sb->bdev->bd_disk) <
1188             ca->mi.bucket_size * ca->mi.nbuckets) {
1189                 bch_err(ca, "device too small");
1190                 return -EINVAL;
1191         }
1192
1193         ret = bch2_dev_journal_init(ca, sb->sb);
1194         if (ret)
1195                 return ret;
1196
1197         /* Commit: */
1198         ca->disk_sb = *sb;
1199         if (sb->mode & FMODE_EXCL)
1200                 ca->disk_sb.bdev->bd_holder = ca;
1201         memset(sb, 0, sizeof(*sb));
1202
1203         ca->dev = ca->disk_sb.bdev->bd_dev;
1204
1205         percpu_ref_reinit(&ca->io_ref);
1206
1207         return 0;
1208 }
1209
1210 static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
1211 {
1212         struct bch_dev *ca;
1213         int ret;
1214
1215         lockdep_assert_held(&c->state_lock);
1216
1217         if (le64_to_cpu(sb->sb->seq) >
1218             le64_to_cpu(c->disk_sb.sb->seq))
1219                 bch2_sb_to_fs(c, sb->sb);
1220
1221         BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices ||
1222                !c->devs[sb->sb->dev_idx]);
1223
1224         ca = bch_dev_locked(c, sb->sb->dev_idx);
1225
1226         ret = __bch2_dev_attach_bdev(ca, sb);
1227         if (ret)
1228                 return ret;
1229
1230         bch2_dev_sysfs_online(c, ca);
1231
1232         if (c->sb.nr_devices == 1)
1233                 snprintf(c->name, sizeof(c->name), "%pg", ca->disk_sb.bdev);
1234         snprintf(ca->name, sizeof(ca->name), "%pg", ca->disk_sb.bdev);
1235
1236         rebalance_wakeup(c);
1237         return 0;
1238 }
1239
1240 /* Device management: */
1241
1242 /*
1243  * Note: this function is also used by the error paths - when a particular
1244  * device sees an error, we call it to determine whether we can just set the
1245  * device RO, or - if this function returns false - we'll set the whole
1246  * filesystem RO:
1247  *
1248  * XXX: maybe we should be more explicit about whether we're changing state
1249  * because we got an error or what have you?
1250  */
1251 bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
1252                             enum bch_member_state new_state, int flags)
1253 {
1254         struct bch_devs_mask new_online_devs;
1255         struct bch_dev *ca2;
1256         int i, nr_rw = 0, required;
1257
1258         lockdep_assert_held(&c->state_lock);
1259
1260         switch (new_state) {
1261         case BCH_MEMBER_STATE_rw:
1262                 return true;
1263         case BCH_MEMBER_STATE_ro:
1264                 if (ca->mi.state != BCH_MEMBER_STATE_rw)
1265                         return true;
1266
1267                 /* do we have enough devices to write to?  */
1268                 for_each_member_device(ca2, c, i)
1269                         if (ca2 != ca)
1270                                 nr_rw += ca2->mi.state == BCH_MEMBER_STATE_rw;
1271
1272                 required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED)
1273                                ? c->opts.metadata_replicas
1274                                : c->opts.metadata_replicas_required,
1275                                !(flags & BCH_FORCE_IF_DATA_DEGRADED)
1276                                ? c->opts.data_replicas
1277                                : c->opts.data_replicas_required);
1278
1279                 return nr_rw >= required;
1280         case BCH_MEMBER_STATE_failed:
1281         case BCH_MEMBER_STATE_spare:
1282                 if (ca->mi.state != BCH_MEMBER_STATE_rw &&
1283                     ca->mi.state != BCH_MEMBER_STATE_ro)
1284                         return true;
1285
1286                 /* do we have enough devices to read from?  */
1287                 new_online_devs = bch2_online_devs(c);
1288                 __clear_bit(ca->dev_idx, new_online_devs.d);
1289
1290                 return bch2_have_enough_devs(c, new_online_devs, flags, false);
1291         default:
1292                 BUG();
1293         }
1294 }
1295
1296 static bool bch2_fs_may_start(struct bch_fs *c)
1297 {
1298         struct bch_sb_field_members *mi;
1299         struct bch_dev *ca;
1300         unsigned i, flags = 0;
1301
1302         if (c->opts.very_degraded)
1303                 flags |= BCH_FORCE_IF_DEGRADED|BCH_FORCE_IF_LOST;
1304
1305         if (c->opts.degraded)
1306                 flags |= BCH_FORCE_IF_DEGRADED;
1307
1308         if (!c->opts.degraded &&
1309             !c->opts.very_degraded) {
1310                 mutex_lock(&c->sb_lock);
1311                 mi = bch2_sb_get_members(c->disk_sb.sb);
1312
1313                 for (i = 0; i < c->disk_sb.sb->nr_devices; i++) {
1314                         if (!bch2_dev_exists(c->disk_sb.sb, mi, i))
1315                                 continue;
1316
1317                         ca = bch_dev_locked(c, i);
1318
1319                         if (!bch2_dev_is_online(ca) &&
1320                             (ca->mi.state == BCH_MEMBER_STATE_rw ||
1321                              ca->mi.state == BCH_MEMBER_STATE_ro)) {
1322                                 mutex_unlock(&c->sb_lock);
1323                                 return false;
1324                         }
1325                 }
1326                 mutex_unlock(&c->sb_lock);
1327         }
1328
1329         return bch2_have_enough_devs(c, bch2_online_devs(c), flags, true);
1330 }
1331
1332 static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
1333 {
1334         /*
1335          * The allocator thread itself allocates btree nodes, so stop it first:
1336          */
1337         bch2_dev_allocator_remove(c, ca);
1338         bch2_dev_journal_stop(&c->journal, ca);
1339 }
1340
1341 static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
1342 {
1343         lockdep_assert_held(&c->state_lock);
1344
1345         BUG_ON(ca->mi.state != BCH_MEMBER_STATE_rw);
1346
1347         bch2_dev_allocator_add(c, ca);
1348         bch2_recalc_capacity(c);
1349 }
1350
1351 int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1352                          enum bch_member_state new_state, int flags)
1353 {
1354         struct bch_sb_field_members *mi;
1355         int ret = 0;
1356
1357         if (ca->mi.state == new_state)
1358                 return 0;
1359
1360         if (!bch2_dev_state_allowed(c, ca, new_state, flags))
1361                 return -EINVAL;
1362
1363         if (new_state != BCH_MEMBER_STATE_rw)
1364                 __bch2_dev_read_only(c, ca);
1365
1366         bch_notice(ca, "%s", bch2_member_states[new_state]);
1367
1368         mutex_lock(&c->sb_lock);
1369         mi = bch2_sb_get_members(c->disk_sb.sb);
1370         SET_BCH_MEMBER_STATE(&mi->members[ca->dev_idx], new_state);
1371         bch2_write_super(c);
1372         mutex_unlock(&c->sb_lock);
1373
1374         if (new_state == BCH_MEMBER_STATE_rw)
1375                 __bch2_dev_read_write(c, ca);
1376
1377         rebalance_wakeup(c);
1378
1379         return ret;
1380 }
1381
1382 int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1383                        enum bch_member_state new_state, int flags)
1384 {
1385         int ret;
1386
1387         down_write(&c->state_lock);
1388         ret = __bch2_dev_set_state(c, ca, new_state, flags);
1389         up_write(&c->state_lock);
1390
1391         return ret;
1392 }
1393
1394 /* Device add/removal: */
1395
1396 static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
1397 {
1398         struct bpos start       = POS(ca->dev_idx, 0);
1399         struct bpos end         = POS(ca->dev_idx, U64_MAX);
1400         int ret;
1401
1402         /*
1403          * We clear the LRU and need_discard btrees first so that we don't race
1404          * with bch2_do_invalidates() and bch2_do_discards()
1405          */
1406         ret =   bch2_btree_delete_range(c, BTREE_ID_lru, start, end,
1407                                         BTREE_TRIGGER_NORUN, NULL) ?:
1408                 bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end,
1409                                         BTREE_TRIGGER_NORUN, NULL) ?:
1410                 bch2_btree_delete_range(c, BTREE_ID_freespace, start, end,
1411                                         BTREE_TRIGGER_NORUN, NULL) ?:
1412                 bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
1413                                         BTREE_TRIGGER_NORUN, NULL) ?:
1414                 bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
1415                                         BTREE_TRIGGER_NORUN, NULL);
1416         if (ret)
1417                 bch_err(c, "error removing dev alloc info: %s", bch2_err_str(ret));
1418
1419         return ret;
1420 }
1421
1422 int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
1423 {
1424         struct bch_sb_field_members *mi;
1425         unsigned dev_idx = ca->dev_idx, data;
1426         int ret = -EINVAL;
1427
1428         down_write(&c->state_lock);
1429
1430         /*
1431          * We consume a reference to ca->ref, regardless of whether we succeed
1432          * or fail:
1433          */
1434         percpu_ref_put(&ca->ref);
1435
1436         if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
1437                 bch_err(ca, "Cannot remove without losing data");
1438                 goto err;
1439         }
1440
1441         __bch2_dev_read_only(c, ca);
1442
1443         ret = bch2_dev_data_drop(c, ca->dev_idx, flags);
1444         if (ret) {
1445                 bch_err(ca, "Remove failed: error dropping data: %s", bch2_err_str(ret));
1446                 goto err;
1447         }
1448
1449         ret = bch2_dev_remove_alloc(c, ca);
1450         if (ret) {
1451                 bch_err(ca, "Remove failed, error deleting alloc info");
1452                 goto err;
1453         }
1454
1455         ret = bch2_journal_flush_device_pins(&c->journal, ca->dev_idx);
1456         if (ret) {
1457                 bch_err(ca, "Remove failed: error flushing journal: %s", bch2_err_str(ret));
1458                 goto err;
1459         }
1460
1461         ret = bch2_journal_flush(&c->journal);
1462         if (ret) {
1463                 bch_err(ca, "Remove failed, journal error");
1464                 goto err;
1465         }
1466
1467         ret = bch2_replicas_gc2(c);
1468         if (ret) {
1469                 bch_err(ca, "Remove failed: error from replicas gc: %s", bch2_err_str(ret));
1470                 goto err;
1471         }
1472
1473         data = bch2_dev_has_data(c, ca);
1474         if (data) {
1475                 struct printbuf data_has = PRINTBUF;
1476
1477                 prt_bitflags(&data_has, bch2_data_types, data);
1478                 bch_err(ca, "Remove failed, still has data (%s)", data_has.buf);
1479                 printbuf_exit(&data_has);
1480                 ret = -EBUSY;
1481                 goto err;
1482         }
1483
1484         __bch2_dev_offline(c, ca);
1485
1486         mutex_lock(&c->sb_lock);
1487         rcu_assign_pointer(c->devs[ca->dev_idx], NULL);
1488         mutex_unlock(&c->sb_lock);
1489
1490         percpu_ref_kill(&ca->ref);
1491         wait_for_completion(&ca->ref_completion);
1492
1493         bch2_dev_free(ca);
1494
1495         /*
1496          * Free this device's slot in the bch_member array - all pointers to
1497          * this device must be gone:
1498          */
1499         mutex_lock(&c->sb_lock);
1500         mi = bch2_sb_get_members(c->disk_sb.sb);
1501         memset(&mi->members[dev_idx].uuid, 0, sizeof(mi->members[dev_idx].uuid));
1502
1503         bch2_write_super(c);
1504
1505         mutex_unlock(&c->sb_lock);
1506         up_write(&c->state_lock);
1507
1508         bch2_dev_usage_journal_reserve(c);
1509         return 0;
1510 err:
1511         if (ca->mi.state == BCH_MEMBER_STATE_rw &&
1512             !percpu_ref_is_zero(&ca->io_ref))
1513                 __bch2_dev_read_write(c, ca);
1514         up_write(&c->state_lock);
1515         return ret;
1516 }
1517
1518 /* Add new device to running filesystem: */
1519 int bch2_dev_add(struct bch_fs *c, const char *path)
1520 {
1521         struct bch_opts opts = bch2_opts_empty();
1522         struct bch_sb_handle sb;
1523         const char *err;
1524         struct bch_dev *ca = NULL;
1525         struct bch_sb_field_members *mi;
1526         struct bch_member dev_mi;
1527         unsigned dev_idx, nr_devices, u64s;
1528         struct printbuf errbuf = PRINTBUF;
1529         struct printbuf label = PRINTBUF;
1530         int ret;
1531
1532         ret = bch2_read_super(path, &opts, &sb);
1533         if (ret) {
1534                 bch_err(c, "device add error: error reading super: %s", bch2_err_str(ret));
1535                 goto err;
1536         }
1537
1538         dev_mi = bch2_sb_get_members(sb.sb)->members[sb.sb->dev_idx];
1539
1540         if (BCH_MEMBER_GROUP(&dev_mi)) {
1541                 bch2_disk_path_to_text(&label, sb.sb, BCH_MEMBER_GROUP(&dev_mi) - 1);
1542                 if (label.allocation_failure) {
1543                         ret = -ENOMEM;
1544                         goto err;
1545                 }
1546         }
1547
1548         err = bch2_dev_may_add(sb.sb, c);
1549         if (err) {
1550                 bch_err(c, "device add error: %s", err);
1551                 ret = -EINVAL;
1552                 goto err;
1553         }
1554
1555         ca = __bch2_dev_alloc(c, &dev_mi);
1556         if (!ca) {
1557                 bch2_free_super(&sb);
1558                 ret = -ENOMEM;
1559                 goto err;
1560         }
1561
1562         bch2_dev_usage_init(ca);
1563
1564         ret = __bch2_dev_attach_bdev(ca, &sb);
1565         if (ret) {
1566                 bch2_dev_free(ca);
1567                 goto err;
1568         }
1569
1570         ret = bch2_dev_journal_alloc(ca);
1571         if (ret) {
1572                 bch_err(c, "device add error: journal alloc failed");
1573                 goto err;
1574         }
1575
1576         down_write(&c->state_lock);
1577         mutex_lock(&c->sb_lock);
1578
1579         ret = bch2_sb_from_fs(c, ca);
1580         if (ret) {
1581                 bch_err(c, "device add error: new device superblock too small");
1582                 goto err_unlock;
1583         }
1584
1585         mi = bch2_sb_get_members(ca->disk_sb.sb);
1586
1587         if (!bch2_sb_resize_members(&ca->disk_sb,
1588                                 le32_to_cpu(mi->field.u64s) +
1589                                 sizeof(dev_mi) / sizeof(u64))) {
1590                 bch_err(c, "device add error: new device superblock too small");
1591                 ret = -BCH_ERR_ENOSPC_sb_members;
1592                 goto err_unlock;
1593         }
1594
1595         if (dynamic_fault("bcachefs:add:no_slot"))
1596                 goto no_slot;
1597
1598         mi = bch2_sb_get_members(c->disk_sb.sb);
1599         for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++)
1600                 if (!bch2_dev_exists(c->disk_sb.sb, mi, dev_idx))
1601                         goto have_slot;
1602 no_slot:
1603         bch_err(c, "device add error: already have maximum number of devices");
1604         ret = -BCH_ERR_ENOSPC_sb_members;
1605         goto err_unlock;
1606
1607 have_slot:
1608         nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices);
1609         u64s = (sizeof(struct bch_sb_field_members) +
1610                 sizeof(struct bch_member) * nr_devices) / sizeof(u64);
1611
1612         mi = bch2_sb_resize_members(&c->disk_sb, u64s);
1613         if (!mi) {
1614                 bch_err(c, "device add error: no room in superblock for member info");
1615                 ret = -BCH_ERR_ENOSPC_sb_members;
1616                 goto err_unlock;
1617         }
1618
1619         /* success: */
1620
1621         mi->members[dev_idx] = dev_mi;
1622         mi->members[dev_idx].last_mount = cpu_to_le64(ktime_get_real_seconds());
1623         c->disk_sb.sb->nr_devices       = nr_devices;
1624
1625         ca->disk_sb.sb->dev_idx = dev_idx;
1626         bch2_dev_attach(c, ca, dev_idx);
1627
1628         if (BCH_MEMBER_GROUP(&dev_mi)) {
1629                 ret = __bch2_dev_group_set(c, ca, label.buf);
1630                 if (ret) {
1631                         bch_err(c, "device add error: error setting label");
1632                         goto err_unlock;
1633                 }
1634         }
1635
1636         bch2_write_super(c);
1637         mutex_unlock(&c->sb_lock);
1638
1639         bch2_dev_usage_journal_reserve(c);
1640
1641         ret = bch2_trans_mark_dev_sb(c, ca);
1642         if (ret) {
1643                 bch_err(c, "device add error: error marking new superblock: %s", bch2_err_str(ret));
1644                 goto err_late;
1645         }
1646
1647         ret = bch2_fs_freespace_init(c);
1648         if (ret) {
1649                 bch_err(c, "device add error: error initializing free space: %s", bch2_err_str(ret));
1650                 goto err_late;
1651         }
1652
1653         ca->new_fs_bucket_idx = 0;
1654
1655         if (ca->mi.state == BCH_MEMBER_STATE_rw)
1656                 __bch2_dev_read_write(c, ca);
1657
1658         up_write(&c->state_lock);
1659         return 0;
1660
1661 err_unlock:
1662         mutex_unlock(&c->sb_lock);
1663         up_write(&c->state_lock);
1664 err:
1665         if (ca)
1666                 bch2_dev_free(ca);
1667         bch2_free_super(&sb);
1668         printbuf_exit(&label);
1669         printbuf_exit(&errbuf);
1670         return ret;
1671 err_late:
1672         up_write(&c->state_lock);
1673         ca = NULL;
1674         goto err;
1675 }
1676
1677 /* Hot add existing device to running filesystem: */
1678 int bch2_dev_online(struct bch_fs *c, const char *path)
1679 {
1680         struct bch_opts opts = bch2_opts_empty();
1681         struct bch_sb_handle sb = { NULL };
1682         struct bch_sb_field_members *mi;
1683         struct bch_dev *ca;
1684         unsigned dev_idx;
1685         const char *err;
1686         int ret;
1687
1688         down_write(&c->state_lock);
1689
1690         ret = bch2_read_super(path, &opts, &sb);
1691         if (ret) {
1692                 up_write(&c->state_lock);
1693                 return ret;
1694         }
1695
1696         dev_idx = sb.sb->dev_idx;
1697
1698         err = bch2_dev_in_fs(c->disk_sb.sb, sb.sb);
1699         if (err) {
1700                 bch_err(c, "error bringing %s online: %s", path, err);
1701                 goto err;
1702         }
1703
1704         ret = bch2_dev_attach_bdev(c, &sb);
1705         if (ret)
1706                 goto err;
1707
1708         ca = bch_dev_locked(c, dev_idx);
1709
1710         ret = bch2_trans_mark_dev_sb(c, ca);
1711         if (ret) {
1712                 bch_err(c, "error bringing %s online: error from bch2_trans_mark_dev_sb: %s",
1713                         path, bch2_err_str(ret));
1714                 goto err;
1715         }
1716
1717         if (ca->mi.state == BCH_MEMBER_STATE_rw)
1718                 __bch2_dev_read_write(c, ca);
1719
1720         mutex_lock(&c->sb_lock);
1721         mi = bch2_sb_get_members(c->disk_sb.sb);
1722
1723         mi->members[ca->dev_idx].last_mount =
1724                 cpu_to_le64(ktime_get_real_seconds());
1725
1726         bch2_write_super(c);
1727         mutex_unlock(&c->sb_lock);
1728
1729         up_write(&c->state_lock);
1730         return 0;
1731 err:
1732         up_write(&c->state_lock);
1733         bch2_free_super(&sb);
1734         return -EINVAL;
1735 }
1736
1737 int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
1738 {
1739         down_write(&c->state_lock);
1740
1741         if (!bch2_dev_is_online(ca)) {
1742                 bch_err(ca, "Already offline");
1743                 up_write(&c->state_lock);
1744                 return 0;
1745         }
1746
1747         if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
1748                 bch_err(ca, "Cannot offline required disk");
1749                 up_write(&c->state_lock);
1750                 return -EINVAL;
1751         }
1752
1753         __bch2_dev_offline(c, ca);
1754
1755         up_write(&c->state_lock);
1756         return 0;
1757 }
1758
1759 int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1760 {
1761         struct bch_member *mi;
1762         int ret = 0;
1763
1764         down_write(&c->state_lock);
1765
1766         if (nbuckets < ca->mi.nbuckets) {
1767                 bch_err(ca, "Cannot shrink yet");
1768                 ret = -EINVAL;
1769                 goto err;
1770         }
1771
1772         if (bch2_dev_is_online(ca) &&
1773             get_capacity(ca->disk_sb.bdev->bd_disk) <
1774             ca->mi.bucket_size * nbuckets) {
1775                 bch_err(ca, "New size larger than device");
1776                 ret = -EINVAL;
1777                 goto err;
1778         }
1779
1780         ret = bch2_dev_buckets_resize(c, ca, nbuckets);
1781         if (ret) {
1782                 bch_err(ca, "Resize error: %s", bch2_err_str(ret));
1783                 goto err;
1784         }
1785
1786         ret = bch2_trans_mark_dev_sb(c, ca);
1787         if (ret) {
1788                 goto err;
1789         }
1790
1791         mutex_lock(&c->sb_lock);
1792         mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx];
1793         mi->nbuckets = cpu_to_le64(nbuckets);
1794
1795         bch2_write_super(c);
1796         mutex_unlock(&c->sb_lock);
1797
1798         bch2_recalc_capacity(c);
1799 err:
1800         up_write(&c->state_lock);
1801         return ret;
1802 }
1803
1804 /* return with ref on ca->ref: */
1805 struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name)
1806 {
1807         struct bch_dev *ca;
1808         unsigned i;
1809
1810         rcu_read_lock();
1811         for_each_member_device_rcu(ca, c, i, NULL)
1812                 if (!strcmp(name, ca->name))
1813                         goto found;
1814         ca = ERR_PTR(-ENOENT);
1815 found:
1816         rcu_read_unlock();
1817
1818         return ca;
1819 }
1820
1821 /* Filesystem open: */
1822
1823 struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
1824                             struct bch_opts opts)
1825 {
1826         struct bch_sb_handle *sb = NULL;
1827         struct bch_fs *c = NULL;
1828         struct bch_sb_field_members *mi;
1829         unsigned i, best_sb = 0;
1830         const char *err;
1831         struct printbuf errbuf = PRINTBUF;
1832         int ret = 0;
1833
1834         if (!try_module_get(THIS_MODULE))
1835                 return ERR_PTR(-ENODEV);
1836
1837         pr_verbose_init(opts, "");
1838
1839         if (!nr_devices) {
1840                 ret = -EINVAL;
1841                 goto err;
1842         }
1843
1844         sb = kcalloc(nr_devices, sizeof(*sb), GFP_KERNEL);
1845         if (!sb) {
1846                 ret = -ENOMEM;
1847                 goto err;
1848         }
1849
1850         for (i = 0; i < nr_devices; i++) {
1851                 ret = bch2_read_super(devices[i], &opts, &sb[i]);
1852                 if (ret)
1853                         goto err;
1854
1855         }
1856
1857         for (i = 1; i < nr_devices; i++)
1858                 if (le64_to_cpu(sb[i].sb->seq) >
1859                     le64_to_cpu(sb[best_sb].sb->seq))
1860                         best_sb = i;
1861
1862         mi = bch2_sb_get_members(sb[best_sb].sb);
1863
1864         i = 0;
1865         while (i < nr_devices) {
1866                 if (i != best_sb &&
1867                     !bch2_dev_exists(sb[best_sb].sb, mi, sb[i].sb->dev_idx)) {
1868                         pr_info("%pg has been removed, skipping", sb[i].bdev);
1869                         bch2_free_super(&sb[i]);
1870                         array_remove_item(sb, nr_devices, i);
1871                         continue;
1872                 }
1873
1874                 err = bch2_dev_in_fs(sb[best_sb].sb, sb[i].sb);
1875                 if (err)
1876                         goto err_print;
1877                 i++;
1878         }
1879
1880         c = bch2_fs_alloc(sb[best_sb].sb, opts);
1881         if (IS_ERR(c)) {
1882                 ret = PTR_ERR(c);
1883                 goto err;
1884         }
1885
1886         down_write(&c->state_lock);
1887         for (i = 0; i < nr_devices; i++) {
1888                 ret = bch2_dev_attach_bdev(c, &sb[i]);
1889                 if (ret) {
1890                         up_write(&c->state_lock);
1891                         goto err;
1892                 }
1893         }
1894         up_write(&c->state_lock);
1895
1896         err = "insufficient devices";
1897         if (!bch2_fs_may_start(c))
1898                 goto err_print;
1899
1900         if (!c->opts.nostart) {
1901                 ret = bch2_fs_start(c);
1902                 if (ret)
1903                         goto err;
1904         }
1905 out:
1906         kfree(sb);
1907         printbuf_exit(&errbuf);
1908         module_put(THIS_MODULE);
1909         pr_verbose_init(opts, "ret %i", PTR_ERR_OR_ZERO(c));
1910         return c;
1911 err_print:
1912         pr_err("bch_fs_open err opening %s: %s",
1913                devices[0], err);
1914         ret = -EINVAL;
1915 err:
1916         if (!IS_ERR_OR_NULL(c))
1917                 bch2_fs_stop(c);
1918         if (sb)
1919                 for (i = 0; i < nr_devices; i++)
1920                         bch2_free_super(&sb[i]);
1921         c = ERR_PTR(ret);
1922         goto out;
1923 }
1924
1925 /* Global interfaces/init */
1926
1927 static void bcachefs_exit(void)
1928 {
1929         bch2_debug_exit();
1930         bch2_vfs_exit();
1931         bch2_chardev_exit();
1932         bch2_btree_key_cache_exit();
1933         if (bcachefs_kset)
1934                 kset_unregister(bcachefs_kset);
1935 }
1936
1937 static int __init bcachefs_init(void)
1938 {
1939         bch2_bkey_pack_test();
1940
1941         if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) ||
1942             bch2_btree_key_cache_init() ||
1943             bch2_chardev_init() ||
1944             bch2_vfs_init() ||
1945             bch2_debug_init())
1946                 goto err;
1947
1948         return 0;
1949 err:
1950         bcachefs_exit();
1951         return -ENOMEM;
1952 }
1953
1954 #define BCH_DEBUG_PARAM(name, description)                      \
1955         bool bch2_##name;                                       \
1956         module_param_named(name, bch2_##name, bool, 0644);      \
1957         MODULE_PARM_DESC(name, description);
1958 BCH_DEBUG_PARAMS()
1959 #undef BCH_DEBUG_PARAM
1960
1961 module_exit(bcachefs_exit);
1962 module_init(bcachefs_init);