]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/super.c
Update bcachefs sources to ed4aea2ad4 bcachefs: fix gcc warning
[bcachefs-tools-debian] / libbcachefs / super.c
1 /*
2  * bcachefs setup/teardown code, and some metadata io - read a superblock and
3  * figure out what to do with it.
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8
9 #include "bcachefs.h"
10 #include "alloc.h"
11 #include "btree_cache.h"
12 #include "btree_gc.h"
13 #include "btree_update.h"
14 #include "btree_update_interior.h"
15 #include "btree_io.h"
16 #include "chardev.h"
17 #include "checksum.h"
18 #include "clock.h"
19 #include "compress.h"
20 #include "debug.h"
21 #include "disk_groups.h"
22 #include "error.h"
23 #include "fs.h"
24 #include "fs-io.h"
25 #include "fsck.h"
26 #include "inode.h"
27 #include "io.h"
28 #include "journal.h"
29 #include "journal_io.h"
30 #include "journal_reclaim.h"
31 #include "keylist.h"
32 #include "move.h"
33 #include "migrate.h"
34 #include "movinggc.h"
35 #include "quota.h"
36 #include "replicas.h"
37 #include "super.h"
38 #include "super-io.h"
39 #include "sysfs.h"
40 #include "tier.h"
41
42 #include <linux/backing-dev.h>
43 #include <linux/blkdev.h>
44 #include <linux/debugfs.h>
45 #include <linux/device.h>
46 #include <linux/genhd.h>
47 #include <linux/idr.h>
48 #include <linux/kthread.h>
49 #include <linux/module.h>
50 #include <linux/percpu.h>
51 #include <linux/random.h>
52 #include <linux/sysfs.h>
53 #include <crypto/hash.h>
54
55 #include <trace/events/bcachefs.h>
56
57 MODULE_LICENSE("GPL");
58 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
59
60 #define KTYPE(type)                                                     \
61 struct kobj_type type ## _ktype = {                                     \
62         .release        = type ## _release,                             \
63         .sysfs_ops      = &type ## _sysfs_ops,                          \
64         .default_attrs  = type ## _files                                \
65 }
66
67 static void bch2_fs_release(struct kobject *);
68 static void bch2_dev_release(struct kobject *);
69
70 static void bch2_fs_internal_release(struct kobject *k)
71 {
72 }
73
74 static void bch2_fs_opts_dir_release(struct kobject *k)
75 {
76 }
77
78 static void bch2_fs_time_stats_release(struct kobject *k)
79 {
80 }
81
82 static KTYPE(bch2_fs);
83 static KTYPE(bch2_fs_internal);
84 static KTYPE(bch2_fs_opts_dir);
85 static KTYPE(bch2_fs_time_stats);
86 static KTYPE(bch2_dev);
87
88 static struct kset *bcachefs_kset;
89 static LIST_HEAD(bch_fs_list);
90 static DEFINE_MUTEX(bch_fs_list_lock);
91
92 static DECLARE_WAIT_QUEUE_HEAD(bch_read_only_wait);
93
94 static void bch2_dev_free(struct bch_dev *);
95 static int bch2_dev_alloc(struct bch_fs *, unsigned);
96 static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *);
97 static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
98
99 struct bch_fs *bch2_bdev_to_fs(struct block_device *bdev)
100 {
101         struct bch_fs *c;
102         struct bch_dev *ca;
103         unsigned i;
104
105         mutex_lock(&bch_fs_list_lock);
106         rcu_read_lock();
107
108         list_for_each_entry(c, &bch_fs_list, list)
109                 for_each_member_device_rcu(ca, c, i, NULL)
110                         if (ca->disk_sb.bdev == bdev) {
111                                 closure_get(&c->cl);
112                                 goto found;
113                         }
114         c = NULL;
115 found:
116         rcu_read_unlock();
117         mutex_unlock(&bch_fs_list_lock);
118
119         return c;
120 }
121
122 static struct bch_fs *__bch2_uuid_to_fs(uuid_le uuid)
123 {
124         struct bch_fs *c;
125
126         lockdep_assert_held(&bch_fs_list_lock);
127
128         list_for_each_entry(c, &bch_fs_list, list)
129                 if (!memcmp(&c->disk_sb.sb->uuid, &uuid, sizeof(uuid_le)))
130                         return c;
131
132         return NULL;
133 }
134
135 struct bch_fs *bch2_uuid_to_fs(uuid_le uuid)
136 {
137         struct bch_fs *c;
138
139         mutex_lock(&bch_fs_list_lock);
140         c = __bch2_uuid_to_fs(uuid);
141         if (c)
142                 closure_get(&c->cl);
143         mutex_unlock(&bch_fs_list_lock);
144
145         return c;
146 }
147
148 int bch2_congested(void *data, int bdi_bits)
149 {
150         struct bch_fs *c = data;
151         struct backing_dev_info *bdi;
152         struct bch_dev *ca;
153         unsigned i;
154         int ret = 0;
155
156         rcu_read_lock();
157         if (bdi_bits & (1 << WB_sync_congested)) {
158                 /* Reads - check all devices: */
159                 for_each_readable_member(ca, c, i) {
160                         bdi = ca->disk_sb.bdev->bd_bdi;
161
162                         if (bdi_congested(bdi, bdi_bits)) {
163                                 ret = 1;
164                                 break;
165                         }
166                 }
167         } else {
168                 unsigned target = READ_ONCE(c->opts.foreground_target);
169                 const struct bch_devs_mask *devs = target
170                         ? bch2_target_to_mask(c, target)
171                         : &c->rw_devs[BCH_DATA_USER];
172
173                 for_each_member_device_rcu(ca, c, i, devs) {
174                         bdi = ca->disk_sb.bdev->bd_bdi;
175
176                         if (bdi_congested(bdi, bdi_bits)) {
177                                 ret = 1;
178                                 break;
179                         }
180                 }
181         }
182         rcu_read_unlock();
183
184         return ret;
185 }
186
187 /* Filesystem RO/RW: */
188
189 /*
190  * For startup/shutdown of RW stuff, the dependencies are:
191  *
192  * - foreground writes depend on copygc and rebalance (to free up space)
193  *
194  * - copygc and rebalance depend on mark and sweep gc (they actually probably
195  *   don't because they either reserve ahead of time or don't block if
196  *   allocations fail, but allocations can require mark and sweep gc to run
197  *   because of generation number wraparound)
198  *
199  * - all of the above depends on the allocator threads
200  *
201  * - allocator depends on the journal (when it rewrites prios and gens)
202  */
203
204 static void bch_fs_mark_clean(struct bch_fs *c)
205 {
206         if (!bch2_journal_error(&c->journal) &&
207             !test_bit(BCH_FS_ERROR, &c->flags) &&
208             !test_bit(BCH_FS_EMERGENCY_RO, &c->flags)) {
209                 mutex_lock(&c->sb_lock);
210                 SET_BCH_SB_CLEAN(c->disk_sb.sb, true);
211                 bch2_write_super(c);
212                 mutex_unlock(&c->sb_lock);
213         }
214 }
215
216 static void __bch2_fs_read_only(struct bch_fs *c)
217 {
218         struct bch_dev *ca;
219         unsigned i;
220
221         bch2_rebalance_stop(c);
222
223         for_each_member_device(ca, c, i)
224                 bch2_copygc_stop(ca);
225
226         bch2_gc_thread_stop(c);
227
228         /*
229          * Flush journal before stopping allocators, because flushing journal
230          * blacklist entries involves allocating new btree nodes:
231          */
232         bch2_journal_flush_pins(&c->journal, U64_MAX - 1);
233
234         for_each_member_device(ca, c, i)
235                 bch2_dev_allocator_stop(ca);
236
237         bch2_journal_flush_all_pins(&c->journal);
238
239         /*
240          * We need to explicitly wait on btree interior updates to complete
241          * before stopping the journal, flushing all journal pins isn't
242          * sufficient, because in the BTREE_INTERIOR_UPDATING_ROOT case btree
243          * interior updates have to drop their journal pin before they're
244          * fully complete:
245          */
246         closure_wait_event(&c->btree_interior_update_wait,
247                            !bch2_btree_interior_updates_nr_pending(c));
248
249         if (!test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
250                 bch2_btree_verify_flushed(c);
251
252         bch2_fs_journal_stop(&c->journal);
253
254         /*
255          * the journal kicks off btree writes via reclaim - wait for in flight
256          * writes after stopping journal:
257          */
258         if (test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
259                 bch2_btree_flush_all_writes(c);
260
261         /*
262          * After stopping journal:
263          */
264         for_each_member_device(ca, c, i)
265                 bch2_dev_allocator_remove(c, ca);
266 }
267
268 static void bch2_writes_disabled(struct percpu_ref *writes)
269 {
270         struct bch_fs *c = container_of(writes, struct bch_fs, writes);
271
272         set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
273         wake_up(&bch_read_only_wait);
274 }
275
276 void bch2_fs_read_only(struct bch_fs *c)
277 {
278         if (c->state != BCH_FS_STARTING &&
279             c->state != BCH_FS_RW)
280                 return;
281
282         if (test_bit(BCH_FS_ERROR, &c->flags))
283                 return;
284
285         /*
286          * Block new foreground-end write operations from starting - any new
287          * writes will return -EROFS:
288          *
289          * (This is really blocking new _allocations_, writes to previously
290          * allocated space can still happen until stopping the allocator in
291          * bch2_dev_allocator_stop()).
292          */
293         percpu_ref_kill(&c->writes);
294
295         cancel_delayed_work(&c->pd_controllers_update);
296
297         /*
298          * If we're not doing an emergency shutdown, we want to wait on
299          * outstanding writes to complete so they don't see spurious errors due
300          * to shutting down the allocator:
301          *
302          * If we are doing an emergency shutdown outstanding writes may
303          * hang until we shutdown the allocator so we don't want to wait
304          * on outstanding writes before shutting everything down - but
305          * we do need to wait on them before returning and signalling
306          * that going RO is complete:
307          */
308         wait_event(bch_read_only_wait,
309                    test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags) ||
310                    test_bit(BCH_FS_EMERGENCY_RO, &c->flags));
311
312         __bch2_fs_read_only(c);
313
314         bch_fs_mark_clean(c);
315
316         wait_event(bch_read_only_wait,
317                    test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
318
319         clear_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
320         c->state = BCH_FS_RO;
321 }
322
323 static void bch2_fs_read_only_work(struct work_struct *work)
324 {
325         struct bch_fs *c =
326                 container_of(work, struct bch_fs, read_only_work);
327
328         mutex_lock(&c->state_lock);
329         bch2_fs_read_only(c);
330         mutex_unlock(&c->state_lock);
331 }
332
333 static void bch2_fs_read_only_async(struct bch_fs *c)
334 {
335         queue_work(system_long_wq, &c->read_only_work);
336 }
337
338 bool bch2_fs_emergency_read_only(struct bch_fs *c)
339 {
340         bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags);
341
342         bch2_fs_read_only_async(c);
343         bch2_journal_halt(&c->journal);
344
345         wake_up(&bch_read_only_wait);
346         return ret;
347 }
348
349 const char *bch2_fs_read_write(struct bch_fs *c)
350 {
351         struct bch_dev *ca;
352         const char *err = NULL;
353         unsigned i;
354
355         if (c->state != BCH_FS_STARTING &&
356             c->state != BCH_FS_RO)
357                 return NULL;
358
359         for_each_rw_member(ca, c, i)
360                 bch2_dev_allocator_add(c, ca);
361         bch2_recalc_capacity(c);
362
363         err = "error starting allocator thread";
364         for_each_rw_member(ca, c, i)
365                 if (bch2_dev_allocator_start(ca)) {
366                         percpu_ref_put(&ca->io_ref);
367                         goto err;
368                 }
369
370         err = "error starting btree GC thread";
371         if (bch2_gc_thread_start(c))
372                 goto err;
373
374         err = "error starting copygc thread";
375         for_each_rw_member(ca, c, i)
376                 if (bch2_copygc_start(c, ca)) {
377                         percpu_ref_put(&ca->io_ref);
378                         goto err;
379                 }
380
381         err = "error starting rebalance thread";
382         if (bch2_rebalance_start(c))
383                 goto err;
384
385         schedule_delayed_work(&c->pd_controllers_update, 5 * HZ);
386
387         if (c->state != BCH_FS_STARTING)
388                 percpu_ref_reinit(&c->writes);
389
390         c->state = BCH_FS_RW;
391         return NULL;
392 err:
393         __bch2_fs_read_only(c);
394         return err;
395 }
396
397 /* Filesystem startup/shutdown: */
398
399 static void bch2_fs_free(struct bch_fs *c)
400 {
401 #define BCH_TIME_STAT(name)                             \
402         bch2_time_stats_exit(&c->name##_time);
403         BCH_TIME_STATS()
404 #undef BCH_TIME_STAT
405
406         bch2_fs_quota_exit(c);
407         bch2_fs_fsio_exit(c);
408         bch2_fs_encryption_exit(c);
409         bch2_fs_io_exit(c);
410         bch2_fs_btree_cache_exit(c);
411         bch2_fs_journal_exit(&c->journal);
412         bch2_io_clock_exit(&c->io_clock[WRITE]);
413         bch2_io_clock_exit(&c->io_clock[READ]);
414         bch2_fs_compress_exit(c);
415         lg_lock_free(&c->usage_lock);
416         free_percpu(c->usage_percpu);
417         mempool_exit(&c->btree_bounce_pool);
418         bioset_exit(&c->btree_bio);
419         mempool_exit(&c->btree_interior_update_pool);
420         mempool_exit(&c->btree_reserve_pool);
421         mempool_exit(&c->fill_iter);
422         percpu_ref_exit(&c->writes);
423         kfree(rcu_dereference_protected(c->replicas, 1));
424         kfree(rcu_dereference_protected(c->disk_groups, 1));
425
426         if (c->copygc_wq)
427                 destroy_workqueue(c->copygc_wq);
428         if (c->wq)
429                 destroy_workqueue(c->wq);
430
431         free_pages((unsigned long) c->disk_sb.sb,
432                    c->disk_sb.page_order);
433         kvpfree(c, sizeof(*c));
434         module_put(THIS_MODULE);
435 }
436
437 static void bch2_fs_release(struct kobject *kobj)
438 {
439         struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
440
441         bch2_fs_free(c);
442 }
443
444 void bch2_fs_stop(struct bch_fs *c)
445 {
446         struct bch_dev *ca;
447         unsigned i;
448
449         mutex_lock(&c->state_lock);
450         BUG_ON(c->state == BCH_FS_STOPPING);
451         c->state = BCH_FS_STOPPING;
452         mutex_unlock(&c->state_lock);
453
454         for_each_member_device(ca, c, i)
455                 if (ca->kobj.state_in_sysfs &&
456                     ca->disk_sb.bdev)
457                         sysfs_remove_link(&part_to_dev(ca->disk_sb.bdev->bd_part)->kobj,
458                                           "bcachefs");
459
460         if (c->kobj.state_in_sysfs)
461                 kobject_del(&c->kobj);
462
463         bch2_fs_debug_exit(c);
464         bch2_fs_chardev_exit(c);
465
466         kobject_put(&c->time_stats);
467         kobject_put(&c->opts_dir);
468         kobject_put(&c->internal);
469
470         mutex_lock(&bch_fs_list_lock);
471         list_del(&c->list);
472         mutex_unlock(&bch_fs_list_lock);
473
474         closure_sync(&c->cl);
475         closure_debug_destroy(&c->cl);
476
477         mutex_lock(&c->state_lock);
478         __bch2_fs_read_only(c);
479         mutex_unlock(&c->state_lock);
480
481         bch_fs_mark_clean(c);
482
483         /* btree prefetch might have kicked off reads in the background: */
484         bch2_btree_flush_all_reads(c);
485
486         for_each_member_device(ca, c, i)
487                 cancel_work_sync(&ca->io_error_work);
488
489         cancel_work_sync(&c->btree_write_error_work);
490         cancel_delayed_work_sync(&c->pd_controllers_update);
491         cancel_work_sync(&c->read_only_work);
492
493         for (i = 0; i < c->sb.nr_devices; i++)
494                 if (c->devs[i])
495                         bch2_dev_free(rcu_dereference_protected(c->devs[i], 1));
496
497         kobject_put(&c->kobj);
498 }
499
500 static const char *bch2_fs_online(struct bch_fs *c)
501 {
502         struct bch_dev *ca;
503         const char *err = NULL;
504         unsigned i;
505         int ret;
506
507         lockdep_assert_held(&bch_fs_list_lock);
508
509         if (!list_empty(&c->list))
510                 return NULL;
511
512         if (__bch2_uuid_to_fs(c->sb.uuid))
513                 return "filesystem UUID already open";
514
515         ret = bch2_fs_chardev_init(c);
516         if (ret)
517                 return "error creating character device";
518
519         bch2_fs_debug_init(c);
520
521         if (kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ||
522             kobject_add(&c->internal, &c->kobj, "internal") ||
523             kobject_add(&c->opts_dir, &c->kobj, "options") ||
524             kobject_add(&c->time_stats, &c->kobj, "time_stats") ||
525             bch2_opts_create_sysfs_files(&c->opts_dir))
526                 return "error creating sysfs objects";
527
528         mutex_lock(&c->state_lock);
529
530         err = "error creating sysfs objects";
531         __for_each_member_device(ca, c, i, NULL)
532                 if (bch2_dev_sysfs_online(c, ca))
533                         goto err;
534
535         list_add(&c->list, &bch_fs_list);
536         err = NULL;
537 err:
538         mutex_unlock(&c->state_lock);
539         return err;
540 }
541
542 static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
543 {
544         struct bch_sb_field_members *mi;
545         struct bch_fs *c;
546         unsigned i, iter_size;
547         const char *err;
548
549         pr_verbose_init(opts, "");
550
551         c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
552         if (!c)
553                 goto out;
554
555         __module_get(THIS_MODULE);
556
557         c->minor                = -1;
558         c->disk_sb.fs_sb        = true;
559
560         mutex_init(&c->state_lock);
561         mutex_init(&c->sb_lock);
562         mutex_init(&c->replicas_gc_lock);
563         mutex_init(&c->btree_root_lock);
564         INIT_WORK(&c->read_only_work, bch2_fs_read_only_work);
565
566         init_rwsem(&c->gc_lock);
567
568 #define BCH_TIME_STAT(name)                             \
569         bch2_time_stats_init(&c->name##_time);
570         BCH_TIME_STATS()
571 #undef BCH_TIME_STAT
572
573         bch2_fs_allocator_init(c);
574         bch2_fs_rebalance_init(c);
575         bch2_fs_quota_init(c);
576
577         INIT_LIST_HEAD(&c->list);
578
579         INIT_LIST_HEAD(&c->btree_interior_update_list);
580         mutex_init(&c->btree_reserve_cache_lock);
581         mutex_init(&c->btree_interior_update_lock);
582
583         mutex_init(&c->bio_bounce_pages_lock);
584
585         bio_list_init(&c->btree_write_error_list);
586         spin_lock_init(&c->btree_write_error_lock);
587         INIT_WORK(&c->btree_write_error_work, bch2_btree_write_error_work);
588
589         INIT_LIST_HEAD(&c->fsck_errors);
590         mutex_init(&c->fsck_error_lock);
591
592         seqcount_init(&c->gc_pos_lock);
593
594         c->copy_gc_enabled              = 1;
595         c->rebalance_enabled            = 1;
596         c->rebalance_percent            = 10;
597         c->promote_whole_extents        = true;
598
599         c->journal.write_time   = &c->journal_write_time;
600         c->journal.delay_time   = &c->journal_delay_time;
601         c->journal.blocked_time = &c->journal_blocked_time;
602         c->journal.flush_seq_time = &c->journal_flush_seq_time;
603
604         bch2_fs_btree_cache_init_early(&c->btree_cache);
605
606         mutex_lock(&c->sb_lock);
607
608         if (bch2_sb_to_fs(c, sb)) {
609                 mutex_unlock(&c->sb_lock);
610                 goto err;
611         }
612
613         mutex_unlock(&c->sb_lock);
614
615         scnprintf(c->name, sizeof(c->name), "%pU", &c->sb.user_uuid);
616
617         c->opts = bch2_opts_default;
618         bch2_opts_apply(&c->opts, bch2_opts_from_sb(sb));
619         bch2_opts_apply(&c->opts, opts);
620
621         c->block_bits           = ilog2(c->opts.block_size);
622         c->btree_foreground_merge_threshold = BTREE_FOREGROUND_MERGE_THRESHOLD(c);
623
624         c->opts.nochanges       |= c->opts.noreplay;
625         c->opts.read_only       |= c->opts.nochanges;
626
627         if (bch2_fs_init_fault("fs_alloc"))
628                 goto err;
629
630         iter_size = sizeof(struct btree_node_iter_large) +
631                 (btree_blocks(c) + 1) * 2 *
632                 sizeof(struct btree_node_iter_set);
633
634         if (!(c->wq = alloc_workqueue("bcachefs",
635                                 WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
636             !(c->copygc_wq = alloc_workqueue("bcache_copygc",
637                                 WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
638             percpu_ref_init(&c->writes, bch2_writes_disabled, 0, GFP_KERNEL) ||
639             mempool_init_kmalloc_pool(&c->btree_reserve_pool, 1,
640                                       sizeof(struct btree_reserve)) ||
641             mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
642                                       sizeof(struct btree_update)) ||
643             mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
644             bioset_init(&c->btree_bio, 1,
645                         max(offsetof(struct btree_read_bio, bio),
646                             offsetof(struct btree_write_bio, wbio.bio)),
647                         BIOSET_NEED_BVECS) ||
648             !(c->usage_percpu = alloc_percpu(struct bch_fs_usage)) ||
649             lg_lock_init(&c->usage_lock) ||
650             mempool_init_vp_pool(&c->btree_bounce_pool, 1, btree_bytes(c)) ||
651             bch2_io_clock_init(&c->io_clock[READ]) ||
652             bch2_io_clock_init(&c->io_clock[WRITE]) ||
653             bch2_fs_journal_init(&c->journal) ||
654             bch2_fs_btree_cache_init(c) ||
655             bch2_fs_io_init(c) ||
656             bch2_fs_encryption_init(c) ||
657             bch2_fs_compress_init(c) ||
658             bch2_fs_fsio_init(c))
659                 goto err;
660
661         mi = bch2_sb_get_members(c->disk_sb.sb);
662         for (i = 0; i < c->sb.nr_devices; i++)
663                 if (bch2_dev_exists(c->disk_sb.sb, mi, i) &&
664                     bch2_dev_alloc(c, i))
665                         goto err;
666
667         /*
668          * Now that all allocations have succeeded, init various refcounty
669          * things that let us shutdown:
670          */
671         closure_init(&c->cl, NULL);
672
673         c->kobj.kset = bcachefs_kset;
674         kobject_init(&c->kobj, &bch2_fs_ktype);
675         kobject_init(&c->internal, &bch2_fs_internal_ktype);
676         kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype);
677         kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype);
678
679         mutex_lock(&bch_fs_list_lock);
680         err = bch2_fs_online(c);
681         mutex_unlock(&bch_fs_list_lock);
682         if (err) {
683                 bch_err(c, "bch2_fs_online() error: %s", err);
684                 goto err;
685         }
686 out:
687         pr_verbose_init(opts, "ret %i", c ? 0 : -ENOMEM);
688         return c;
689 err:
690         bch2_fs_free(c);
691         c = NULL;
692         goto out;
693 }
694
695 const char *bch2_fs_start(struct bch_fs *c)
696 {
697         const char *err = "cannot allocate memory";
698         struct bch_sb_field_members *mi;
699         struct bch_dev *ca;
700         LIST_HEAD(journal);
701         struct jset *j;
702         time64_t now;
703         unsigned i;
704         int ret = -EINVAL;
705
706         mutex_lock(&c->state_lock);
707
708         BUG_ON(c->state != BCH_FS_STARTING);
709
710         mutex_lock(&c->sb_lock);
711         for_each_online_member(ca, c, i)
712                 bch2_sb_from_fs(c, ca);
713         mutex_unlock(&c->sb_lock);
714
715         for_each_rw_member(ca, c, i)
716                 bch2_dev_allocator_add(c, ca);
717         bch2_recalc_capacity(c);
718
719         if (BCH_SB_INITIALIZED(c->disk_sb.sb)) {
720                 ret = bch2_journal_read(c, &journal);
721                 if (ret)
722                         goto err;
723
724                 j = &list_entry(journal.prev, struct journal_replay, list)->j;
725
726                 c->bucket_clock[READ].hand = le16_to_cpu(j->read_clock);
727                 c->bucket_clock[WRITE].hand = le16_to_cpu(j->write_clock);
728
729                 for (i = 0; i < BTREE_ID_NR; i++) {
730                         unsigned level;
731                         struct bkey_i *k;
732
733                         k = bch2_journal_find_btree_root(c, j, i, &level);
734                         if (!k)
735                                 continue;
736
737                         err = "invalid btree root pointer";
738                         if (IS_ERR(k))
739                                 goto err;
740
741                         err = "error reading btree root";
742                         if (bch2_btree_root_read(c, i, k, level)) {
743                                 if (i != BTREE_ID_ALLOC)
744                                         goto err;
745
746                                 mustfix_fsck_err(c, "error reading btree root");
747                         }
748                 }
749
750                 for (i = 0; i < BTREE_ID_NR; i++)
751                         if (!c->btree_roots[i].b)
752                                 bch2_btree_root_alloc(c, i);
753
754                 err = "error reading allocation information";
755                 ret = bch2_alloc_read(c, &journal);
756                 if (ret)
757                         goto err;
758
759                 set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
760
761                 bch_verbose(c, "starting mark and sweep:");
762                 err = "error in recovery";
763                 ret = bch2_initial_gc(c, &journal);
764                 if (ret)
765                         goto err;
766                 bch_verbose(c, "mark and sweep done");
767
768                 if (c->opts.noreplay)
769                         goto recovery_done;
770
771                 /*
772                  * bch2_fs_journal_start() can't happen sooner, or btree_gc_finish()
773                  * will give spurious errors about oldest_gen > bucket_gen -
774                  * this is a hack but oh well.
775                  */
776                 bch2_fs_journal_start(&c->journal);
777
778                 err = "error starting allocator";
779                 if (bch2_fs_allocator_start(c))
780                         goto err;
781
782                 bch_verbose(c, "starting journal replay:");
783                 err = "journal replay failed";
784                 ret = bch2_journal_replay(c, &journal);
785                 if (ret)
786                         goto err;
787                 bch_verbose(c, "journal replay done");
788
789                 if (c->opts.norecovery)
790                         goto recovery_done;
791
792                 bch_verbose(c, "starting fsck:");
793                 err = "error in fsck";
794                 ret = bch2_fsck(c, !c->opts.nofsck);
795                 if (ret)
796                         goto err;
797                 bch_verbose(c, "fsck done");
798
799                 if (enabled_qtypes(c)) {
800                         bch_verbose(c, "reading quotas:");
801                         ret = bch2_fs_quota_read(c);
802                         if (ret)
803                                 goto err;
804                         bch_verbose(c, "quotas done");
805                 }
806         } else {
807                 struct bch_inode_unpacked inode;
808                 struct bkey_inode_buf packed_inode;
809
810                 bch_notice(c, "initializing new filesystem");
811
812                 set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
813
814                 ret = bch2_initial_gc(c, &journal);
815                 if (ret)
816                         goto err;
817
818                 err = "unable to allocate journal buckets";
819                 for_each_online_member(ca, c, i)
820                         if (bch2_dev_journal_alloc(ca)) {
821                                 percpu_ref_put(&ca->io_ref);
822                                 goto err;
823                         }
824
825                 for (i = 0; i < BTREE_ID_NR; i++)
826                         bch2_btree_root_alloc(c, i);
827
828                 /*
829                  * journal_res_get() will crash if called before this has
830                  * set up the journal.pin FIFO and journal.cur pointer:
831                  */
832                 bch2_fs_journal_start(&c->journal);
833                 bch2_journal_set_replay_done(&c->journal);
834
835                 err = "error starting allocator";
836                 if (bch2_fs_allocator_start(c))
837                         goto err;
838
839                 bch2_inode_init(c, &inode, 0, 0,
840                                S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
841                 inode.bi_inum = BCACHEFS_ROOT_INO;
842
843                 bch2_inode_pack(&packed_inode, &inode);
844
845                 err = "error creating root directory";
846                 if (bch2_btree_insert(c, BTREE_ID_INODES,
847                                      &packed_inode.inode.k_i,
848                                      NULL, NULL, NULL, 0))
849                         goto err;
850
851                 if (enabled_qtypes(c)) {
852                         ret = bch2_fs_quota_read(c);
853                         if (ret)
854                                 goto err;
855                 }
856
857                 err = "error writing first journal entry";
858                 if (bch2_journal_meta(&c->journal))
859                         goto err;
860         }
861 recovery_done:
862         err = "dynamic fault";
863         if (bch2_fs_init_fault("fs_start"))
864                 goto err;
865
866         if (c->opts.read_only) {
867                 bch2_fs_read_only(c);
868         } else {
869                 err = bch2_fs_read_write(c);
870                 if (err)
871                         goto err;
872         }
873
874         mutex_lock(&c->sb_lock);
875         mi = bch2_sb_get_members(c->disk_sb.sb);
876         now = ktime_get_seconds();
877
878         for_each_member_device(ca, c, i)
879                 mi->members[ca->dev_idx].last_mount = cpu_to_le64(now);
880
881         SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
882         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
883
884         bch2_write_super(c);
885         mutex_unlock(&c->sb_lock);
886
887         set_bit(BCH_FS_STARTED, &c->flags);
888
889         err = NULL;
890 out:
891         mutex_unlock(&c->state_lock);
892         bch2_journal_entries_free(&journal);
893         return err;
894 err:
895 fsck_err:
896         switch (ret) {
897         case BCH_FSCK_ERRORS_NOT_FIXED:
898                 bch_err(c, "filesystem contains errors: please report this to the developers");
899                 pr_cont("mount with -o fix_errors to repair\n");
900                 err = "fsck error";
901                 break;
902         case BCH_FSCK_REPAIR_UNIMPLEMENTED:
903                 bch_err(c, "filesystem contains errors: please report this to the developers");
904                 pr_cont("repair unimplemented: inform the developers so that it can be added\n");
905                 err = "fsck error";
906                 break;
907         case BCH_FSCK_REPAIR_IMPOSSIBLE:
908                 bch_err(c, "filesystem contains errors, but repair impossible");
909                 err = "fsck error";
910                 break;
911         case BCH_FSCK_UNKNOWN_VERSION:
912                 err = "unknown metadata version";;
913                 break;
914         case -ENOMEM:
915                 err = "cannot allocate memory";
916                 break;
917         case -EIO:
918                 err = "IO error";
919                 break;
920         }
921
922         BUG_ON(!err);
923         set_bit(BCH_FS_ERROR, &c->flags);
924         goto out;
925 }
926
927 static const char *bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
928 {
929         struct bch_sb_field_members *sb_mi;
930
931         sb_mi = bch2_sb_get_members(sb);
932         if (!sb_mi)
933                 return "Invalid superblock: member info area missing";
934
935         if (le16_to_cpu(sb->block_size) != c->opts.block_size)
936                 return "mismatched block size";
937
938         if (le16_to_cpu(sb_mi->members[sb->dev_idx].bucket_size) <
939             BCH_SB_BTREE_NODE_SIZE(c->disk_sb.sb))
940                 return "new cache bucket size is too small";
941
942         return NULL;
943 }
944
945 static const char *bch2_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb)
946 {
947         struct bch_sb *newest =
948                 le64_to_cpu(fs->seq) > le64_to_cpu(sb->seq) ? fs : sb;
949         struct bch_sb_field_members *mi = bch2_sb_get_members(newest);
950
951         if (uuid_le_cmp(fs->uuid, sb->uuid))
952                 return "device not a member of filesystem";
953
954         if (!bch2_dev_exists(newest, mi, sb->dev_idx))
955                 return "device has been removed";
956
957         if (fs->block_size != sb->block_size)
958                 return "mismatched block size";
959
960         return NULL;
961 }
962
963 /* Device startup/shutdown: */
964
965 static void bch2_dev_release(struct kobject *kobj)
966 {
967         struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
968
969         kfree(ca);
970 }
971
972 static void bch2_dev_free(struct bch_dev *ca)
973 {
974         cancel_work_sync(&ca->io_error_work);
975
976         if (ca->kobj.state_in_sysfs &&
977             ca->disk_sb.bdev)
978                 sysfs_remove_link(&part_to_dev(ca->disk_sb.bdev->bd_part)->kobj,
979                                   "bcachefs");
980
981         if (ca->kobj.state_in_sysfs)
982                 kobject_del(&ca->kobj);
983
984         bch2_free_super(&ca->disk_sb);
985         bch2_dev_journal_exit(ca);
986
987         free_percpu(ca->io_done);
988         bioset_exit(&ca->replica_set);
989         bch2_dev_buckets_free(ca);
990
991         bch2_time_stats_exit(&ca->io_latency[WRITE]);
992         bch2_time_stats_exit(&ca->io_latency[READ]);
993
994         percpu_ref_exit(&ca->io_ref);
995         percpu_ref_exit(&ca->ref);
996         kobject_put(&ca->kobj);
997 }
998
999 static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca)
1000 {
1001
1002         lockdep_assert_held(&c->state_lock);
1003
1004         if (percpu_ref_is_zero(&ca->io_ref))
1005                 return;
1006
1007         __bch2_dev_read_only(c, ca);
1008
1009         reinit_completion(&ca->io_ref_completion);
1010         percpu_ref_kill(&ca->io_ref);
1011         wait_for_completion(&ca->io_ref_completion);
1012
1013         if (ca->kobj.state_in_sysfs) {
1014                 struct kobject *block =
1015                         &part_to_dev(ca->disk_sb.bdev->bd_part)->kobj;
1016
1017                 sysfs_remove_link(block, "bcachefs");
1018                 sysfs_remove_link(&ca->kobj, "block");
1019         }
1020
1021         bch2_free_super(&ca->disk_sb);
1022         bch2_dev_journal_exit(ca);
1023 }
1024
1025 static void bch2_dev_ref_complete(struct percpu_ref *ref)
1026 {
1027         struct bch_dev *ca = container_of(ref, struct bch_dev, ref);
1028
1029         complete(&ca->ref_completion);
1030 }
1031
1032 static void bch2_dev_io_ref_complete(struct percpu_ref *ref)
1033 {
1034         struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref);
1035
1036         complete(&ca->io_ref_completion);
1037 }
1038
1039 static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca)
1040 {
1041         int ret;
1042
1043         if (!c->kobj.state_in_sysfs)
1044                 return 0;
1045
1046         if (!ca->kobj.state_in_sysfs) {
1047                 ret = kobject_add(&ca->kobj, &c->kobj,
1048                                   "dev-%u", ca->dev_idx);
1049                 if (ret)
1050                         return ret;
1051         }
1052
1053         if (ca->disk_sb.bdev) {
1054                 struct kobject *block =
1055                         &part_to_dev(ca->disk_sb.bdev->bd_part)->kobj;
1056
1057                 ret = sysfs_create_link(block, &ca->kobj, "bcachefs");
1058                 if (ret)
1059                         return ret;
1060                 ret = sysfs_create_link(&ca->kobj, block, "block");
1061                 if (ret)
1062                         return ret;
1063         }
1064
1065         return 0;
1066 }
1067
1068 static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
1069                                         struct bch_member *member)
1070 {
1071         struct bch_dev *ca;
1072
1073         ca = kzalloc(sizeof(*ca), GFP_KERNEL);
1074         if (!ca)
1075                 return NULL;
1076
1077         kobject_init(&ca->kobj, &bch2_dev_ktype);
1078         init_completion(&ca->ref_completion);
1079         init_completion(&ca->io_ref_completion);
1080
1081         init_rwsem(&ca->bucket_lock);
1082
1083         writepoint_init(&ca->copygc_write_point, BCH_DATA_USER);
1084
1085         spin_lock_init(&ca->freelist_lock);
1086         bch2_dev_copygc_init(ca);
1087
1088         INIT_WORK(&ca->io_error_work, bch2_io_error_work);
1089
1090         bch2_time_stats_init(&ca->io_latency[READ]);
1091         bch2_time_stats_init(&ca->io_latency[WRITE]);
1092
1093         ca->mi = bch2_mi_to_cpu(member);
1094         ca->uuid = member->uuid;
1095
1096         if (percpu_ref_init(&ca->ref, bch2_dev_ref_complete,
1097                             0, GFP_KERNEL) ||
1098             percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
1099                             PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
1100             bch2_dev_buckets_alloc(c, ca) ||
1101             bioset_init(&ca->replica_set, 4,
1102                         offsetof(struct bch_write_bio, bio), 0) ||
1103             !(ca->io_done       = alloc_percpu(*ca->io_done)))
1104                 goto err;
1105
1106         return ca;
1107 err:
1108         bch2_dev_free(ca);
1109         return NULL;
1110 }
1111
1112 static void bch2_dev_attach(struct bch_fs *c, struct bch_dev *ca,
1113                             unsigned dev_idx)
1114 {
1115         ca->dev_idx = dev_idx;
1116         __set_bit(ca->dev_idx, ca->self.d);
1117         scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx);
1118
1119         ca->fs = c;
1120         rcu_assign_pointer(c->devs[ca->dev_idx], ca);
1121
1122         if (bch2_dev_sysfs_online(c, ca))
1123                 pr_warn("error creating sysfs objects");
1124 }
1125
1126 static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx)
1127 {
1128         struct bch_member *member =
1129                 bch2_sb_get_members(c->disk_sb.sb)->members + dev_idx;
1130         struct bch_dev *ca = NULL;
1131         int ret = 0;
1132
1133         pr_verbose_init(c->opts, "");
1134
1135         if (bch2_fs_init_fault("dev_alloc"))
1136                 goto err;
1137
1138         ca = __bch2_dev_alloc(c, member);
1139         if (!ca)
1140                 goto err;
1141
1142         bch2_dev_attach(c, ca, dev_idx);
1143 out:
1144         pr_verbose_init(c->opts, "ret %i", ret);
1145         return ret;
1146 err:
1147         if (ca)
1148                 bch2_dev_free(ca);
1149         ret = -ENOMEM;
1150         goto out;
1151 }
1152
1153 static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
1154 {
1155         unsigned ret;
1156
1157         if (bch2_dev_is_online(ca)) {
1158                 bch_err(ca, "already have device online in slot %u",
1159                         sb->sb->dev_idx);
1160                 return -EINVAL;
1161         }
1162
1163         if (get_capacity(sb->bdev->bd_disk) <
1164             ca->mi.bucket_size * ca->mi.nbuckets) {
1165                 bch_err(ca, "cannot online: device too small");
1166                 return -EINVAL;
1167         }
1168
1169         BUG_ON(!percpu_ref_is_zero(&ca->io_ref));
1170
1171         if (get_capacity(sb->bdev->bd_disk) <
1172             ca->mi.bucket_size * ca->mi.nbuckets) {
1173                 bch_err(ca, "device too small");
1174                 return -EINVAL;
1175         }
1176
1177         ret = bch2_dev_journal_init(ca, sb->sb);
1178         if (ret)
1179                 return ret;
1180
1181         /* Commit: */
1182         ca->disk_sb = *sb;
1183         if (sb->mode & FMODE_EXCL)
1184                 ca->disk_sb.bdev->bd_holder = ca;
1185         memset(sb, 0, sizeof(*sb));
1186
1187         if (ca->fs)
1188                 mutex_lock(&ca->fs->sb_lock);
1189
1190         bch2_mark_dev_superblock(ca->fs, ca, BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE);
1191
1192         if (ca->fs)
1193                 mutex_unlock(&ca->fs->sb_lock);
1194
1195         percpu_ref_reinit(&ca->io_ref);
1196
1197         return 0;
1198 }
1199
1200 static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
1201 {
1202         struct bch_dev *ca;
1203         int ret;
1204
1205         lockdep_assert_held(&c->state_lock);
1206
1207         if (le64_to_cpu(sb->sb->seq) >
1208             le64_to_cpu(c->disk_sb.sb->seq))
1209                 bch2_sb_to_fs(c, sb->sb);
1210
1211         BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices ||
1212                !c->devs[sb->sb->dev_idx]);
1213
1214         ca = bch_dev_locked(c, sb->sb->dev_idx);
1215
1216         ret = __bch2_dev_attach_bdev(ca, sb);
1217         if (ret)
1218                 return ret;
1219
1220         if (c->sb.nr_devices == 1)
1221                 bdevname(ca->disk_sb.bdev, c->name);
1222         bdevname(ca->disk_sb.bdev, ca->name);
1223
1224         rebalance_wakeup(c);
1225         return 0;
1226 }
1227
1228 /* Device management: */
1229
1230 /*
1231  * Note: this function is also used by the error paths - when a particular
1232  * device sees an error, we call it to determine whether we can just set the
1233  * device RO, or - if this function returns false - we'll set the whole
1234  * filesystem RO:
1235  *
1236  * XXX: maybe we should be more explicit about whether we're changing state
1237  * because we got an error or what have you?
1238  */
1239 bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
1240                             enum bch_member_state new_state, int flags)
1241 {
1242         struct bch_devs_mask new_online_devs;
1243         struct replicas_status s;
1244         struct bch_dev *ca2;
1245         int i, nr_rw = 0, required;
1246
1247         lockdep_assert_held(&c->state_lock);
1248
1249         switch (new_state) {
1250         case BCH_MEMBER_STATE_RW:
1251                 return true;
1252         case BCH_MEMBER_STATE_RO:
1253                 if (ca->mi.state != BCH_MEMBER_STATE_RW)
1254                         return true;
1255
1256                 /* do we have enough devices to write to?  */
1257                 for_each_member_device(ca2, c, i)
1258                         if (ca2 != ca)
1259                                 nr_rw += ca2->mi.state == BCH_MEMBER_STATE_RW;
1260
1261                 required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED)
1262                                ? c->opts.metadata_replicas
1263                                : c->opts.metadata_replicas_required,
1264                                !(flags & BCH_FORCE_IF_DATA_DEGRADED)
1265                                ? c->opts.data_replicas
1266                                : c->opts.data_replicas_required);
1267
1268                 return nr_rw >= required;
1269         case BCH_MEMBER_STATE_FAILED:
1270         case BCH_MEMBER_STATE_SPARE:
1271                 if (ca->mi.state != BCH_MEMBER_STATE_RW &&
1272                     ca->mi.state != BCH_MEMBER_STATE_RO)
1273                         return true;
1274
1275                 /* do we have enough devices to read from?  */
1276                 new_online_devs = bch2_online_devs(c);
1277                 __clear_bit(ca->dev_idx, new_online_devs.d);
1278
1279                 s = __bch2_replicas_status(c, new_online_devs);
1280
1281                 return bch2_have_enough_devs(s, flags);
1282         default:
1283                 BUG();
1284         }
1285 }
1286
1287 static bool bch2_fs_may_start(struct bch_fs *c)
1288 {
1289         struct replicas_status s;
1290         struct bch_sb_field_members *mi;
1291         struct bch_dev *ca;
1292         unsigned i, flags = c->opts.degraded
1293                 ? BCH_FORCE_IF_DEGRADED
1294                 : 0;
1295
1296         if (!c->opts.degraded) {
1297                 mutex_lock(&c->sb_lock);
1298                 mi = bch2_sb_get_members(c->disk_sb.sb);
1299
1300                 for (i = 0; i < c->disk_sb.sb->nr_devices; i++) {
1301                         if (!bch2_dev_exists(c->disk_sb.sb, mi, i))
1302                                 continue;
1303
1304                         ca = bch_dev_locked(c, i);
1305
1306                         if (!bch2_dev_is_online(ca) &&
1307                             (ca->mi.state == BCH_MEMBER_STATE_RW ||
1308                              ca->mi.state == BCH_MEMBER_STATE_RO)) {
1309                                 mutex_unlock(&c->sb_lock);
1310                                 return false;
1311                         }
1312                 }
1313                 mutex_unlock(&c->sb_lock);
1314         }
1315
1316         s = bch2_replicas_status(c);
1317
1318         return bch2_have_enough_devs(s, flags);
1319 }
1320
1321 static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
1322 {
1323         bch2_copygc_stop(ca);
1324
1325         /*
1326          * The allocator thread itself allocates btree nodes, so stop it first:
1327          */
1328         bch2_dev_allocator_stop(ca);
1329         bch2_dev_allocator_remove(c, ca);
1330         bch2_dev_journal_stop(&c->journal, ca);
1331 }
1332
1333 static const char *__bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
1334 {
1335         lockdep_assert_held(&c->state_lock);
1336
1337         BUG_ON(ca->mi.state != BCH_MEMBER_STATE_RW);
1338
1339         bch2_dev_allocator_add(c, ca);
1340         bch2_recalc_capacity(c);
1341
1342         if (bch2_dev_allocator_start(ca))
1343                 return "error starting allocator thread";
1344
1345         if (bch2_copygc_start(c, ca))
1346                 return "error starting copygc thread";
1347
1348         return NULL;
1349 }
1350
1351 int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1352                          enum bch_member_state new_state, int flags)
1353 {
1354         struct bch_sb_field_members *mi;
1355         int ret = 0;
1356
1357         if (ca->mi.state == new_state)
1358                 return 0;
1359
1360         if (!bch2_dev_state_allowed(c, ca, new_state, flags))
1361                 return -EINVAL;
1362
1363         if (new_state != BCH_MEMBER_STATE_RW)
1364                 __bch2_dev_read_only(c, ca);
1365
1366         bch_notice(ca, "%s", bch2_dev_state[new_state]);
1367
1368         mutex_lock(&c->sb_lock);
1369         mi = bch2_sb_get_members(c->disk_sb.sb);
1370         SET_BCH_MEMBER_STATE(&mi->members[ca->dev_idx], new_state);
1371         bch2_write_super(c);
1372         mutex_unlock(&c->sb_lock);
1373
1374         if (new_state == BCH_MEMBER_STATE_RW &&
1375             __bch2_dev_read_write(c, ca))
1376                 ret = -ENOMEM;
1377
1378         rebalance_wakeup(c);
1379
1380         return ret;
1381 }
1382
1383 int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1384                        enum bch_member_state new_state, int flags)
1385 {
1386         int ret;
1387
1388         mutex_lock(&c->state_lock);
1389         ret = __bch2_dev_set_state(c, ca, new_state, flags);
1390         mutex_unlock(&c->state_lock);
1391
1392         return ret;
1393 }
1394
1395 /* Device add/removal: */
1396
1397 int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
1398 {
1399         struct bch_sb_field_members *mi;
1400         unsigned dev_idx = ca->dev_idx, data;
1401         int ret = -EINVAL;
1402
1403         mutex_lock(&c->state_lock);
1404
1405         percpu_ref_put(&ca->ref); /* XXX */
1406
1407         if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
1408                 bch_err(ca, "Cannot remove without losing data");
1409                 goto err;
1410         }
1411
1412         __bch2_dev_read_only(c, ca);
1413
1414         /*
1415          * XXX: verify that dev_idx is really not in use anymore, anywhere
1416          *
1417          * flag_data_bad() does not check btree pointers
1418          */
1419         ret = bch2_dev_data_drop(c, ca->dev_idx, flags);
1420         if (ret) {
1421                 bch_err(ca, "Remove failed: error %i dropping data", ret);
1422                 goto err;
1423         }
1424
1425         ret = bch2_journal_flush_device_pins(&c->journal, ca->dev_idx);
1426         if (ret) {
1427                 bch_err(ca, "Remove failed: error %i flushing journal", ret);
1428                 goto err;
1429         }
1430
1431         data = bch2_dev_has_data(c, ca);
1432         if (data) {
1433                 char data_has_str[100];
1434                 bch2_scnprint_flag_list(data_has_str,
1435                                         sizeof(data_has_str),
1436                                         bch2_data_types,
1437                                         data);
1438                 bch_err(ca, "Remove failed, still has data (%s)", data_has_str);
1439                 ret = -EBUSY;
1440                 goto err;
1441         }
1442
1443         ret = bch2_btree_delete_range(c, BTREE_ID_ALLOC,
1444                                       POS(ca->dev_idx, 0),
1445                                       POS(ca->dev_idx + 1, 0),
1446                                       ZERO_VERSION,
1447                                       NULL, NULL, NULL);
1448         if (ret) {
1449                 bch_err(ca, "Remove failed, error deleting alloc info");
1450                 goto err;
1451         }
1452
1453         /*
1454          * must flush all existing journal entries, they might have
1455          * (overwritten) keys that point to the device we're removing:
1456          */
1457         ret = bch2_journal_flush_all_pins(&c->journal);
1458         if (ret) {
1459                 bch_err(ca, "Remove failed, journal error");
1460                 goto err;
1461         }
1462
1463         __bch2_dev_offline(c, ca);
1464
1465         mutex_lock(&c->sb_lock);
1466         rcu_assign_pointer(c->devs[ca->dev_idx], NULL);
1467         mutex_unlock(&c->sb_lock);
1468
1469         percpu_ref_kill(&ca->ref);
1470         wait_for_completion(&ca->ref_completion);
1471
1472         bch2_dev_free(ca);
1473
1474         /*
1475          * Free this device's slot in the bch_member array - all pointers to
1476          * this device must be gone:
1477          */
1478         mutex_lock(&c->sb_lock);
1479         mi = bch2_sb_get_members(c->disk_sb.sb);
1480         memset(&mi->members[dev_idx].uuid, 0, sizeof(mi->members[dev_idx].uuid));
1481
1482         bch2_write_super(c);
1483
1484         mutex_unlock(&c->sb_lock);
1485         mutex_unlock(&c->state_lock);
1486         return 0;
1487 err:
1488         if (ca->mi.state == BCH_MEMBER_STATE_RW)
1489                 __bch2_dev_read_write(c, ca);
1490         mutex_unlock(&c->state_lock);
1491         return ret;
1492 }
1493
1494 /* Add new device to running filesystem: */
1495 int bch2_dev_add(struct bch_fs *c, const char *path)
1496 {
1497         struct bch_opts opts = bch2_opts_empty();
1498         struct bch_sb_handle sb;
1499         const char *err;
1500         struct bch_dev *ca = NULL;
1501         struct bch_sb_field_members *mi;
1502         struct bch_member dev_mi;
1503         unsigned dev_idx, nr_devices, u64s;
1504         int ret;
1505
1506         ret = bch2_read_super(path, &opts, &sb);
1507         if (ret)
1508                 return ret;
1509
1510         err = bch2_sb_validate(&sb);
1511         if (err)
1512                 return -EINVAL;
1513
1514         dev_mi = bch2_sb_get_members(sb.sb)->members[sb.sb->dev_idx];
1515
1516         err = bch2_dev_may_add(sb.sb, c);
1517         if (err)
1518                 return -EINVAL;
1519
1520         ca = __bch2_dev_alloc(c, &dev_mi);
1521         if (!ca) {
1522                 bch2_free_super(&sb);
1523                 return -ENOMEM;
1524         }
1525
1526         ret = __bch2_dev_attach_bdev(ca, &sb);
1527         if (ret) {
1528                 bch2_dev_free(ca);
1529                 return ret;
1530         }
1531
1532         err = "journal alloc failed";
1533         ret = bch2_dev_journal_alloc(ca);
1534         if (ret)
1535                 goto err;
1536
1537         mutex_lock(&c->state_lock);
1538         mutex_lock(&c->sb_lock);
1539
1540         err = "insufficient space in new superblock";
1541         ret = bch2_sb_from_fs(c, ca);
1542         if (ret)
1543                 goto err_unlock;
1544
1545         mi = bch2_sb_get_members(ca->disk_sb.sb);
1546
1547         if (!bch2_sb_resize_members(&ca->disk_sb,
1548                                 le32_to_cpu(mi->field.u64s) +
1549                                 sizeof(dev_mi) / sizeof(u64))) {
1550                 ret = -ENOSPC;
1551                 goto err_unlock;
1552         }
1553
1554         if (dynamic_fault("bcachefs:add:no_slot"))
1555                 goto no_slot;
1556
1557         mi = bch2_sb_get_members(c->disk_sb.sb);
1558         for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++)
1559                 if (!bch2_dev_exists(c->disk_sb.sb, mi, dev_idx))
1560                         goto have_slot;
1561 no_slot:
1562         err = "no slots available in superblock";
1563         ret = -ENOSPC;
1564         goto err_unlock;
1565
1566 have_slot:
1567         nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices);
1568         u64s = (sizeof(struct bch_sb_field_members) +
1569                 sizeof(struct bch_member) * nr_devices) / sizeof(u64);
1570
1571         err = "no space in superblock for member info";
1572         ret = -ENOSPC;
1573
1574         mi = bch2_sb_resize_members(&c->disk_sb, u64s);
1575         if (!mi)
1576                 goto err_unlock;
1577
1578         /* success: */
1579
1580         mi->members[dev_idx] = dev_mi;
1581         mi->members[dev_idx].last_mount = cpu_to_le64(ktime_get_seconds());
1582         c->disk_sb.sb->nr_devices       = nr_devices;
1583
1584         ca->disk_sb.sb->dev_idx = dev_idx;
1585         bch2_dev_attach(c, ca, dev_idx);
1586
1587         bch2_write_super(c);
1588         mutex_unlock(&c->sb_lock);
1589
1590         if (ca->mi.state == BCH_MEMBER_STATE_RW) {
1591                 err = __bch2_dev_read_write(c, ca);
1592                 if (err)
1593                         goto err_late;
1594         }
1595
1596         mutex_unlock(&c->state_lock);
1597         return 0;
1598
1599 err_unlock:
1600         mutex_unlock(&c->sb_lock);
1601         mutex_unlock(&c->state_lock);
1602 err:
1603         if (ca)
1604                 bch2_dev_free(ca);
1605         bch2_free_super(&sb);
1606         bch_err(c, "Unable to add device: %s", err);
1607         return ret;
1608 err_late:
1609         bch_err(c, "Error going rw after adding device: %s", err);
1610         return -EINVAL;
1611 }
1612
1613 /* Hot add existing device to running filesystem: */
1614 int bch2_dev_online(struct bch_fs *c, const char *path)
1615 {
1616         struct bch_opts opts = bch2_opts_empty();
1617         struct bch_sb_handle sb = { NULL };
1618         struct bch_dev *ca;
1619         unsigned dev_idx;
1620         const char *err;
1621         int ret;
1622
1623         mutex_lock(&c->state_lock);
1624
1625         ret = bch2_read_super(path, &opts, &sb);
1626         if (ret) {
1627                 mutex_unlock(&c->state_lock);
1628                 return ret;
1629         }
1630
1631         dev_idx = sb.sb->dev_idx;
1632
1633         err = bch2_dev_in_fs(c->disk_sb.sb, sb.sb);
1634         if (err)
1635                 goto err;
1636
1637         if (bch2_dev_attach_bdev(c, &sb)) {
1638                 err = "bch2_dev_attach_bdev() error";
1639                 goto err;
1640         }
1641
1642         ca = bch_dev_locked(c, dev_idx);
1643         if (ca->mi.state == BCH_MEMBER_STATE_RW) {
1644                 err = __bch2_dev_read_write(c, ca);
1645                 if (err)
1646                         goto err;
1647         }
1648
1649         mutex_unlock(&c->state_lock);
1650         return 0;
1651 err:
1652         mutex_unlock(&c->state_lock);
1653         bch2_free_super(&sb);
1654         bch_err(c, "error bringing %s online: %s", path, err);
1655         return -EINVAL;
1656 }
1657
1658 int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
1659 {
1660         mutex_lock(&c->state_lock);
1661
1662         if (!bch2_dev_is_online(ca)) {
1663                 bch_err(ca, "Already offline");
1664                 mutex_unlock(&c->state_lock);
1665                 return 0;
1666         }
1667
1668         if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
1669                 bch_err(ca, "Cannot offline required disk");
1670                 mutex_unlock(&c->state_lock);
1671                 return -EINVAL;
1672         }
1673
1674         __bch2_dev_offline(c, ca);
1675
1676         mutex_unlock(&c->state_lock);
1677         return 0;
1678 }
1679
1680 int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1681 {
1682         struct bch_member *mi;
1683         int ret = 0;
1684
1685         mutex_lock(&c->state_lock);
1686
1687         if (nbuckets < ca->mi.nbuckets) {
1688                 bch_err(ca, "Cannot shrink yet");
1689                 ret = -EINVAL;
1690                 goto err;
1691         }
1692
1693         if (bch2_dev_is_online(ca) &&
1694             get_capacity(ca->disk_sb.bdev->bd_disk) <
1695             ca->mi.bucket_size * nbuckets) {
1696                 bch_err(ca, "New size larger than device");
1697                 ret = -EINVAL;
1698                 goto err;
1699         }
1700
1701         ret = bch2_dev_buckets_resize(c, ca, nbuckets);
1702         if (ret) {
1703                 bch_err(ca, "Resize error: %i", ret);
1704                 goto err;
1705         }
1706
1707         mutex_lock(&c->sb_lock);
1708         mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx];
1709         mi->nbuckets = cpu_to_le64(nbuckets);
1710
1711         bch2_write_super(c);
1712         mutex_unlock(&c->sb_lock);
1713
1714         bch2_recalc_capacity(c);
1715 err:
1716         mutex_unlock(&c->state_lock);
1717         return ret;
1718 }
1719
1720 /* return with ref on ca->ref: */
1721 struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *path)
1722 {
1723
1724         struct block_device *bdev = lookup_bdev(path);
1725         struct bch_dev *ca;
1726         unsigned i;
1727
1728         if (IS_ERR(bdev))
1729                 return ERR_CAST(bdev);
1730
1731         for_each_member_device(ca, c, i)
1732                 if (ca->disk_sb.bdev == bdev)
1733                         goto found;
1734
1735         ca = ERR_PTR(-ENOENT);
1736 found:
1737         bdput(bdev);
1738         return ca;
1739 }
1740
1741 /* Filesystem open: */
1742
1743 struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
1744                             struct bch_opts opts)
1745 {
1746         struct bch_sb_handle *sb = NULL;
1747         struct bch_fs *c = NULL;
1748         unsigned i, best_sb = 0;
1749         const char *err;
1750         int ret = -ENOMEM;
1751
1752         pr_verbose_init(opts, "");
1753
1754         if (!nr_devices) {
1755                 c = ERR_PTR(-EINVAL);
1756                 goto out2;
1757         }
1758
1759         if (!try_module_get(THIS_MODULE)) {
1760                 c = ERR_PTR(-ENODEV);
1761                 goto out2;
1762         }
1763
1764         sb = kcalloc(nr_devices, sizeof(*sb), GFP_KERNEL);
1765         if (!sb)
1766                 goto err;
1767
1768         for (i = 0; i < nr_devices; i++) {
1769                 ret = bch2_read_super(devices[i], &opts, &sb[i]);
1770                 if (ret)
1771                         goto err;
1772
1773                 err = bch2_sb_validate(&sb[i]);
1774                 if (err)
1775                         goto err_print;
1776         }
1777
1778         for (i = 1; i < nr_devices; i++)
1779                 if (le64_to_cpu(sb[i].sb->seq) >
1780                     le64_to_cpu(sb[best_sb].sb->seq))
1781                         best_sb = i;
1782
1783         for (i = 0; i < nr_devices; i++) {
1784                 err = bch2_dev_in_fs(sb[best_sb].sb, sb[i].sb);
1785                 if (err)
1786                         goto err_print;
1787         }
1788
1789         ret = -ENOMEM;
1790         c = bch2_fs_alloc(sb[best_sb].sb, opts);
1791         if (!c)
1792                 goto err;
1793
1794         err = "bch2_dev_online() error";
1795         mutex_lock(&c->state_lock);
1796         for (i = 0; i < nr_devices; i++)
1797                 if (bch2_dev_attach_bdev(c, &sb[i])) {
1798                         mutex_unlock(&c->state_lock);
1799                         goto err_print;
1800                 }
1801         mutex_unlock(&c->state_lock);
1802
1803         err = "insufficient devices";
1804         if (!bch2_fs_may_start(c))
1805                 goto err_print;
1806
1807         if (!c->opts.nostart) {
1808                 err = bch2_fs_start(c);
1809                 if (err)
1810                         goto err_print;
1811         }
1812 out:
1813         kfree(sb);
1814         module_put(THIS_MODULE);
1815 out2:
1816         pr_verbose_init(opts, "ret %i", PTR_ERR_OR_ZERO(c));
1817         return c;
1818 err_print:
1819         pr_err("bch_fs_open err opening %s: %s",
1820                devices[0], err);
1821         ret = -EINVAL;
1822 err:
1823         if (c)
1824                 bch2_fs_stop(c);
1825         for (i = 0; i < nr_devices; i++)
1826                 bch2_free_super(&sb[i]);
1827         c = ERR_PTR(ret);
1828         goto out;
1829 }
1830
1831 static const char *__bch2_fs_open_incremental(struct bch_sb_handle *sb,
1832                                               struct bch_opts opts)
1833 {
1834         const char *err;
1835         struct bch_fs *c;
1836         bool allocated_fs = false;
1837
1838         err = bch2_sb_validate(sb);
1839         if (err)
1840                 return err;
1841
1842         mutex_lock(&bch_fs_list_lock);
1843         c = __bch2_uuid_to_fs(sb->sb->uuid);
1844         if (c) {
1845                 closure_get(&c->cl);
1846
1847                 err = bch2_dev_in_fs(c->disk_sb.sb, sb->sb);
1848                 if (err)
1849                         goto err;
1850         } else {
1851                 c = bch2_fs_alloc(sb->sb, opts);
1852                 err = "cannot allocate memory";
1853                 if (!c)
1854                         goto err;
1855
1856                 allocated_fs = true;
1857         }
1858
1859         err = "bch2_dev_online() error";
1860
1861         mutex_lock(&c->sb_lock);
1862         if (bch2_dev_attach_bdev(c, sb)) {
1863                 mutex_unlock(&c->sb_lock);
1864                 goto err;
1865         }
1866         mutex_unlock(&c->sb_lock);
1867
1868         if (!c->opts.nostart && bch2_fs_may_start(c)) {
1869                 err = bch2_fs_start(c);
1870                 if (err)
1871                         goto err;
1872         }
1873
1874         closure_put(&c->cl);
1875         mutex_unlock(&bch_fs_list_lock);
1876
1877         return NULL;
1878 err:
1879         mutex_unlock(&bch_fs_list_lock);
1880
1881         if (allocated_fs)
1882                 bch2_fs_stop(c);
1883         else if (c)
1884                 closure_put(&c->cl);
1885
1886         return err;
1887 }
1888
1889 const char *bch2_fs_open_incremental(const char *path)
1890 {
1891         struct bch_sb_handle sb;
1892         struct bch_opts opts = bch2_opts_empty();
1893         const char *err;
1894
1895         if (bch2_read_super(path, &opts, &sb))
1896                 return "error reading superblock";
1897
1898         err = __bch2_fs_open_incremental(&sb, opts);
1899         bch2_free_super(&sb);
1900
1901         return err;
1902 }
1903
1904 /* Global interfaces/init */
1905
1906 static void bcachefs_exit(void)
1907 {
1908         bch2_debug_exit();
1909         bch2_vfs_exit();
1910         bch2_chardev_exit();
1911         if (bcachefs_kset)
1912                 kset_unregister(bcachefs_kset);
1913 }
1914
1915 static int __init bcachefs_init(void)
1916 {
1917         bch2_bkey_pack_test();
1918         bch2_inode_pack_test();
1919
1920         if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) ||
1921             bch2_chardev_init() ||
1922             bch2_vfs_init() ||
1923             bch2_debug_init())
1924                 goto err;
1925
1926         return 0;
1927 err:
1928         bcachefs_exit();
1929         return -ENOMEM;
1930 }
1931
1932 #define BCH_DEBUG_PARAM(name, description)                      \
1933         bool bch2_##name;                                       \
1934         module_param_named(name, bch2_##name, bool, 0644);      \
1935         MODULE_PARM_DESC(name, description);
1936 BCH_DEBUG_PARAMS()
1937 #undef BCH_DEBUG_PARAM
1938
1939 module_exit(bcachefs_exit);
1940 module_init(bcachefs_init);