]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/super-io.c
1eae0fcb97cb3cc81f522aeaf7dc996765c3ef93
[bcachefs-tools-debian] / libbcachefs / super-io.c
1
2 #include "bcachefs.h"
3 #include "checksum.h"
4 #include "error.h"
5 #include "io.h"
6 #include "journal.h"
7 #include "super-io.h"
8 #include "super.h"
9 #include "vstructs.h"
10
11 #include <linux/backing-dev.h>
12 #include <linux/sort.h>
13
14 static int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *);
15 static const char *bch2_sb_validate_replicas(struct bch_sb *);
16
17 static inline void __bch2_sb_layout_size_assert(void)
18 {
19         BUILD_BUG_ON(sizeof(struct bch_sb_layout) != 512);
20 }
21
22 struct bch_sb_field *bch2_sb_field_get(struct bch_sb *sb,
23                                       enum bch_sb_field_type type)
24 {
25         struct bch_sb_field *f;
26
27         /* XXX: need locking around superblock to access optional fields */
28
29         vstruct_for_each(sb, f)
30                 if (le32_to_cpu(f->type) == type)
31                         return f;
32         return NULL;
33 }
34
35 void bch2_free_super(struct bcache_superblock *sb)
36 {
37         if (sb->bio)
38                 bio_put(sb->bio);
39         if (!IS_ERR_OR_NULL(sb->bdev))
40                 blkdev_put(sb->bdev, sb->mode);
41
42         free_pages((unsigned long) sb->sb, sb->page_order);
43         memset(sb, 0, sizeof(*sb));
44 }
45
46 static int __bch2_super_realloc(struct bcache_superblock *sb, unsigned order)
47 {
48         struct bch_sb *new_sb;
49         struct bio *bio;
50
51         if (sb->page_order >= order && sb->sb)
52                 return 0;
53
54         if (dynamic_fault("bcachefs:add:super_realloc"))
55                 return -ENOMEM;
56
57         bio = bio_kmalloc(GFP_KERNEL, 1 << order);
58         if (!bio)
59                 return -ENOMEM;
60
61         if (sb->bio)
62                 bio_put(sb->bio);
63         sb->bio = bio;
64
65         new_sb = (void *) __get_free_pages(GFP_KERNEL, order);
66         if (!new_sb)
67                 return -ENOMEM;
68
69         if (sb->sb)
70                 memcpy(new_sb, sb->sb, PAGE_SIZE << sb->page_order);
71
72         free_pages((unsigned long) sb->sb, sb->page_order);
73         sb->sb = new_sb;
74
75         sb->page_order = order;
76
77         return 0;
78 }
79
80 static int bch2_sb_realloc(struct bcache_superblock *sb, unsigned u64s)
81 {
82         u64 new_bytes = __vstruct_bytes(struct bch_sb, u64s);
83         u64 max_bytes = 512 << sb->sb->layout.sb_max_size_bits;
84
85         if (new_bytes > max_bytes) {
86                 char buf[BDEVNAME_SIZE];
87
88                 pr_err("%s: superblock too big: want %llu but have %llu",
89                        bdevname(sb->bdev, buf), new_bytes, max_bytes);
90                 return -ENOSPC;
91         }
92
93         return __bch2_super_realloc(sb, get_order(new_bytes));
94 }
95
96 static int bch2_fs_sb_realloc(struct bch_fs *c, unsigned u64s)
97 {
98         u64 bytes = __vstruct_bytes(struct bch_sb, u64s);
99         struct bch_sb *sb;
100         unsigned order = get_order(bytes);
101
102         if (c->disk_sb && order <= c->disk_sb_order)
103                 return 0;
104
105         sb = (void *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
106         if (!sb)
107                 return -ENOMEM;
108
109         if (c->disk_sb)
110                 memcpy(sb, c->disk_sb, PAGE_SIZE << c->disk_sb_order);
111
112         free_pages((unsigned long) c->disk_sb, c->disk_sb_order);
113
114         c->disk_sb = sb;
115         c->disk_sb_order = order;
116         return 0;
117 }
118
119 static struct bch_sb_field *__bch2_sb_field_resize(struct bch_sb *sb,
120                                                   struct bch_sb_field *f,
121                                                   unsigned u64s)
122 {
123         unsigned old_u64s = f ? le32_to_cpu(f->u64s) : 0;
124
125         if (!f) {
126                 f = vstruct_last(sb);
127                 memset(f, 0, sizeof(u64) * u64s);
128                 f->u64s = cpu_to_le32(u64s);
129                 f->type = 0;
130         } else {
131                 void *src, *dst;
132
133                 src = vstruct_end(f);
134                 f->u64s = cpu_to_le32(u64s);
135                 dst = vstruct_end(f);
136
137                 memmove(dst, src, vstruct_end(sb) - src);
138
139                 if (dst > src)
140                         memset(src, 0, dst - src);
141         }
142
143         le32_add_cpu(&sb->u64s, u64s - old_u64s);
144
145         return f;
146 }
147
148 struct bch_sb_field *bch2_sb_field_resize(struct bcache_superblock *sb,
149                                          enum bch_sb_field_type type,
150                                          unsigned u64s)
151 {
152         struct bch_sb_field *f = bch2_sb_field_get(sb->sb, type);
153         ssize_t old_u64s = f ? le32_to_cpu(f->u64s) : 0;
154         ssize_t d = -old_u64s + u64s;
155
156         if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d))
157                 return NULL;
158
159         f = __bch2_sb_field_resize(sb->sb, f, u64s);
160         f->type = type;
161         return f;
162 }
163
164 struct bch_sb_field *bch2_fs_sb_field_resize(struct bch_fs *c,
165                                             enum bch_sb_field_type type,
166                                             unsigned u64s)
167 {
168         struct bch_sb_field *f = bch2_sb_field_get(c->disk_sb, type);
169         ssize_t old_u64s = f ? le32_to_cpu(f->u64s) : 0;
170         ssize_t d = -old_u64s + u64s;
171         struct bch_dev *ca;
172         unsigned i;
173
174         lockdep_assert_held(&c->sb_lock);
175
176         if (bch2_fs_sb_realloc(c, le32_to_cpu(c->disk_sb->u64s) + d))
177                 return NULL;
178
179         /* XXX: we're not checking that offline device have enough space */
180
181         for_each_online_member(ca, c, i) {
182                 struct bcache_superblock *sb = &ca->disk_sb;
183
184                 if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d)) {
185                         percpu_ref_put(&ca->ref);
186                         return NULL;
187                 }
188         }
189
190         f = __bch2_sb_field_resize(c->disk_sb, f, u64s);
191         f->type = type;
192         return f;
193 }
194
195 static const char *validate_sb_layout(struct bch_sb_layout *layout)
196 {
197         u64 offset, prev_offset, max_sectors;
198         unsigned i;
199
200         if (uuid_le_cmp(layout->magic, BCACHE_MAGIC))
201                 return "Not a bcachefs superblock layout";
202
203         if (layout->layout_type != 0)
204                 return "Invalid superblock layout type";
205
206         if (!layout->nr_superblocks)
207                 return "Invalid superblock layout: no superblocks";
208
209         if (layout->nr_superblocks > ARRAY_SIZE(layout->sb_offset))
210                 return "Invalid superblock layout: too many superblocks";
211
212         max_sectors = 1 << layout->sb_max_size_bits;
213
214         prev_offset = le64_to_cpu(layout->sb_offset[0]);
215
216         for (i = 1; i < layout->nr_superblocks; i++) {
217                 offset = le64_to_cpu(layout->sb_offset[i]);
218
219                 if (offset < prev_offset + max_sectors)
220                         return "Invalid superblock layout: superblocks overlap";
221                 prev_offset = offset;
222         }
223
224         return NULL;
225 }
226
227 static int u64_cmp(const void *_l, const void *_r)
228 {
229         u64 l = *((const u64 *) _l), r = *((const u64 *) _r);
230
231         return l < r ? -1 : l > r ? 1 : 0;
232 }
233
234 const char *bch2_sb_validate_journal(struct bch_sb *sb,
235                                      struct bch_member_cpu mi)
236 {
237         struct bch_sb_field_journal *journal;
238         const char *err;
239         unsigned nr;
240         unsigned i;
241         u64 *b;
242
243         journal = bch2_sb_get_journal(sb);
244         if (!journal)
245                 return NULL;
246
247         nr = bch2_nr_journal_buckets(journal);
248         if (!nr)
249                 return NULL;
250
251         b = kmalloc_array(sizeof(u64), nr, GFP_KERNEL);
252         if (!b)
253                 return "cannot allocate memory";
254
255         for (i = 0; i < nr; i++)
256                 b[i] = le64_to_cpu(journal->buckets[i]);
257
258         sort(b, nr, sizeof(u64), u64_cmp, NULL);
259
260         err = "journal bucket at sector 0";
261         if (!b[0])
262                 goto err;
263
264         err = "journal bucket before first bucket";
265         if (b[0] < mi.first_bucket)
266                 goto err;
267
268         err = "journal bucket past end of device";
269         if (b[nr - 1] >= mi.nbuckets)
270                 goto err;
271
272         err = "duplicate journal buckets";
273         for (i = 0; i + 1 < nr; i++)
274                 if (b[i] == b[i + 1])
275                         goto err;
276
277         err = NULL;
278 err:
279         kfree(b);
280         return err;
281 }
282
283 static const char *bch2_sb_validate_members(struct bch_sb *sb)
284 {
285         struct bch_sb_field_members *mi;
286         unsigned i;
287
288         mi = bch2_sb_get_members(sb);
289         if (!mi)
290                 return "Invalid superblock: member info area missing";
291
292         if ((void *) (mi->members + sb->nr_devices) >
293             vstruct_end(&mi->field))
294                 return "Invalid superblock: bad member info";
295
296         for (i = 0; i < sb->nr_devices; i++) {
297                 if (!bch2_dev_exists(sb, mi, i))
298                         continue;
299
300                 if (le16_to_cpu(mi->members[i].bucket_size) <
301                     BCH_SB_BTREE_NODE_SIZE(sb))
302                         return "bucket size smaller than btree node size";
303         }
304
305         return NULL;
306 }
307
308 const char *bch2_sb_validate(struct bcache_superblock *disk_sb)
309 {
310         struct bch_sb *sb = disk_sb->sb;
311         struct bch_sb_field *f;
312         struct bch_sb_field_members *sb_mi;
313         struct bch_member_cpu mi;
314         const char *err;
315         u16 block_size;
316
317         switch (le64_to_cpu(sb->version)) {
318         case BCACHE_SB_VERSION_CDEV_V4:
319                 break;
320         default:
321                 return"Unsupported superblock version";
322         }
323
324         if (BCH_SB_INITIALIZED(sb) &&
325             le64_to_cpu(sb->version) != BCACHE_SB_VERSION_CDEV_V4)
326                 return "Unsupported superblock version";
327
328         block_size = le16_to_cpu(sb->block_size);
329
330         if (!is_power_of_2(block_size) ||
331             block_size > PAGE_SECTORS)
332                 return "Bad block size";
333
334         if (bch2_is_zero(sb->user_uuid.b, sizeof(uuid_le)))
335                 return "Bad user UUID";
336
337         if (bch2_is_zero(sb->uuid.b, sizeof(uuid_le)))
338                 return "Bad internal UUID";
339
340         if (!sb->nr_devices ||
341             sb->nr_devices <= sb->dev_idx ||
342             sb->nr_devices > BCH_SB_MEMBERS_MAX)
343                 return "Bad cache device number in set";
344
345         if (!BCH_SB_META_REPLICAS_WANT(sb) ||
346             BCH_SB_META_REPLICAS_WANT(sb) >= BCH_REPLICAS_MAX)
347                 return "Invalid number of metadata replicas";
348
349         if (!BCH_SB_META_REPLICAS_REQ(sb) ||
350             BCH_SB_META_REPLICAS_REQ(sb) >= BCH_REPLICAS_MAX)
351                 return "Invalid number of metadata replicas";
352
353         if (!BCH_SB_DATA_REPLICAS_WANT(sb) ||
354             BCH_SB_DATA_REPLICAS_WANT(sb) >= BCH_REPLICAS_MAX)
355                 return "Invalid number of data replicas";
356
357         if (!BCH_SB_DATA_REPLICAS_REQ(sb) ||
358             BCH_SB_DATA_REPLICAS_REQ(sb) >= BCH_REPLICAS_MAX)
359                 return "Invalid number of metadata replicas";
360
361         if (!BCH_SB_BTREE_NODE_SIZE(sb))
362                 return "Btree node size not set";
363
364         if (!is_power_of_2(BCH_SB_BTREE_NODE_SIZE(sb)))
365                 return "Btree node size not a power of two";
366
367         if (BCH_SB_BTREE_NODE_SIZE(sb) > BTREE_NODE_SIZE_MAX)
368                 return "Btree node size too large";
369
370         if (BCH_SB_GC_RESERVE(sb) < 5)
371                 return "gc reserve percentage too small";
372
373         if (!sb->time_precision ||
374             le32_to_cpu(sb->time_precision) > NSEC_PER_SEC)
375                 return "invalid time precision";
376
377         /* validate layout */
378         err = validate_sb_layout(&sb->layout);
379         if (err)
380                 return err;
381
382         vstruct_for_each(sb, f) {
383                 if (!f->u64s)
384                         return "Invalid superblock: invalid optional field";
385
386                 if (vstruct_next(f) > vstruct_last(sb))
387                         return "Invalid superblock: invalid optional field";
388
389                 if (le32_to_cpu(f->type) >= BCH_SB_FIELD_NR)
390                         return "Invalid superblock: unknown optional field type";
391         }
392
393         err = bch2_sb_validate_members(sb);
394         if (err)
395                 return err;
396
397         sb_mi = bch2_sb_get_members(sb);
398         mi = bch2_mi_to_cpu(sb_mi->members + sb->dev_idx);
399
400         if (mi.nbuckets > LONG_MAX)
401                 return "Too many buckets";
402
403         if (mi.nbuckets - mi.first_bucket < 1 << 10)
404                 return "Not enough buckets";
405
406         if (!is_power_of_2(mi.bucket_size) ||
407             mi.bucket_size < PAGE_SECTORS ||
408             mi.bucket_size < block_size)
409                 return "Bad bucket size";
410
411         if (get_capacity(disk_sb->bdev->bd_disk) <
412             mi.bucket_size * mi.nbuckets)
413                 return "Invalid superblock: device too small";
414
415         err = bch2_sb_validate_journal(sb, mi);
416         if (err)
417                 return err;
418
419         err = bch2_sb_validate_replicas(sb);
420         if (err)
421                 return err;
422
423         return NULL;
424 }
425
426 /* device open: */
427
428 static const char *bch2_blkdev_open(const char *path, fmode_t mode,
429                                    void *holder, struct block_device **ret)
430 {
431         struct block_device *bdev;
432
433         *ret = NULL;
434         bdev = blkdev_get_by_path(path, mode, holder);
435         if (bdev == ERR_PTR(-EBUSY))
436                 return "device busy";
437
438         if (IS_ERR(bdev))
439                 return "failed to open device";
440
441         if (mode & FMODE_WRITE)
442                 bdev_get_queue(bdev)->backing_dev_info->capabilities
443                         |= BDI_CAP_STABLE_WRITES;
444
445         *ret = bdev;
446         return NULL;
447 }
448
449 static void bch2_sb_update(struct bch_fs *c)
450 {
451         struct bch_sb *src = c->disk_sb;
452         struct bch_sb_field_members *mi = bch2_sb_get_members(src);
453         struct bch_dev *ca;
454         unsigned i;
455
456         lockdep_assert_held(&c->sb_lock);
457
458         c->sb.uuid              = src->uuid;
459         c->sb.user_uuid         = src->user_uuid;
460         c->sb.block_size        = le16_to_cpu(src->block_size);
461         c->sb.btree_node_size   = BCH_SB_BTREE_NODE_SIZE(src);
462         c->sb.nr_devices        = src->nr_devices;
463         c->sb.clean             = BCH_SB_CLEAN(src);
464         c->sb.str_hash_type     = BCH_SB_STR_HASH_TYPE(src);
465         c->sb.encryption_type   = BCH_SB_ENCRYPTION_TYPE(src);
466         c->sb.time_base_lo      = le64_to_cpu(src->time_base_lo);
467         c->sb.time_base_hi      = le32_to_cpu(src->time_base_hi);
468         c->sb.time_precision    = le32_to_cpu(src->time_precision);
469
470         for_each_member_device(ca, c, i)
471                 ca->mi = bch2_mi_to_cpu(mi->members + i);
472 }
473
474 /* doesn't copy member info */
475 static void __copy_super(struct bch_sb *dst, struct bch_sb *src)
476 {
477         struct bch_sb_field *src_f, *dst_f;
478
479         dst->version            = src->version;
480         dst->seq                = src->seq;
481         dst->uuid               = src->uuid;
482         dst->user_uuid          = src->user_uuid;
483         memcpy(dst->label,      src->label, sizeof(dst->label));
484
485         dst->block_size         = src->block_size;
486         dst->nr_devices         = src->nr_devices;
487
488         dst->time_base_lo       = src->time_base_lo;
489         dst->time_base_hi       = src->time_base_hi;
490         dst->time_precision     = src->time_precision;
491
492         memcpy(dst->flags,      src->flags,     sizeof(dst->flags));
493         memcpy(dst->features,   src->features,  sizeof(dst->features));
494         memcpy(dst->compat,     src->compat,    sizeof(dst->compat));
495
496         vstruct_for_each(src, src_f) {
497                 if (src_f->type == BCH_SB_FIELD_journal)
498                         continue;
499
500                 dst_f = bch2_sb_field_get(dst, src_f->type);
501                 dst_f = __bch2_sb_field_resize(dst, dst_f,
502                                 le32_to_cpu(src_f->u64s));
503
504                 memcpy(dst_f, src_f, vstruct_bytes(src_f));
505         }
506 }
507
508 int bch2_sb_to_fs(struct bch_fs *c, struct bch_sb *src)
509 {
510         struct bch_sb_field_journal *journal_buckets =
511                 bch2_sb_get_journal(src);
512         unsigned journal_u64s = journal_buckets
513                 ? le32_to_cpu(journal_buckets->field.u64s)
514                 : 0;
515         int ret;
516
517         lockdep_assert_held(&c->sb_lock);
518
519         if (bch2_fs_sb_realloc(c, le32_to_cpu(src->u64s) - journal_u64s))
520                 return -ENOMEM;
521
522         __copy_super(c->disk_sb, src);
523
524         ret = bch2_sb_replicas_to_cpu_replicas(c);
525         if (ret)
526                 return ret;
527
528         bch2_sb_update(c);
529         return 0;
530 }
531
532 int bch2_sb_from_fs(struct bch_fs *c, struct bch_dev *ca)
533 {
534         struct bch_sb *src = c->disk_sb, *dst = ca->disk_sb.sb;
535         struct bch_sb_field_journal *journal_buckets =
536                 bch2_sb_get_journal(dst);
537         unsigned journal_u64s = journal_buckets
538                 ? le32_to_cpu(journal_buckets->field.u64s)
539                 : 0;
540         unsigned u64s = le32_to_cpu(src->u64s) + journal_u64s;
541         int ret;
542
543         ret = bch2_sb_realloc(&ca->disk_sb, u64s);
544         if (ret)
545                 return ret;
546
547         __copy_super(dst, src);
548
549         return 0;
550 }
551
552 /* read superblock: */
553
554 static const char *read_one_super(struct bcache_superblock *sb, u64 offset)
555 {
556         struct bch_csum csum;
557         size_t bytes;
558         unsigned order;
559 reread:
560         bio_reset(sb->bio);
561         sb->bio->bi_bdev = sb->bdev;
562         sb->bio->bi_iter.bi_sector = offset;
563         sb->bio->bi_iter.bi_size = PAGE_SIZE << sb->page_order;
564         bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
565         bch2_bio_map(sb->bio, sb->sb);
566
567         if (submit_bio_wait(sb->bio))
568                 return "IO error";
569
570         if (uuid_le_cmp(sb->sb->magic, BCACHE_MAGIC))
571                 return "Not a bcachefs superblock";
572
573         if (le64_to_cpu(sb->sb->version) != BCACHE_SB_VERSION_CDEV_V4)
574                 return "Unsupported superblock version";
575
576         bytes = vstruct_bytes(sb->sb);
577
578         if (bytes > 512 << sb->sb->layout.sb_max_size_bits)
579                 return "Bad superblock: too big";
580
581         order = get_order(bytes);
582         if (order > sb->page_order) {
583                 if (__bch2_super_realloc(sb, order))
584                         return "cannot allocate memory";
585                 goto reread;
586         }
587
588         if (BCH_SB_CSUM_TYPE(sb->sb) >= BCH_CSUM_NR)
589                 return "unknown csum type";
590
591         /* XXX: verify MACs */
592         csum = csum_vstruct(NULL, BCH_SB_CSUM_TYPE(sb->sb),
593                             (struct nonce) { 0 }, sb->sb);
594
595         if (bch2_crc_cmp(csum, sb->sb->csum))
596                 return "bad checksum reading superblock";
597
598         return NULL;
599 }
600
601 const char *bch2_read_super(struct bcache_superblock *sb,
602                            struct bch_opts opts,
603                            const char *path)
604 {
605         u64 offset = opt_defined(opts.sb) ? opts.sb : BCH_SB_SECTOR;
606         struct bch_sb_layout layout;
607         const char *err;
608         unsigned i;
609
610         memset(sb, 0, sizeof(*sb));
611         sb->mode = FMODE_READ;
612
613         if (!(opt_defined(opts.noexcl) && opts.noexcl))
614                 sb->mode |= FMODE_EXCL;
615
616         if (!(opt_defined(opts.nochanges) && opts.nochanges))
617                 sb->mode |= FMODE_WRITE;
618
619         err = bch2_blkdev_open(path, sb->mode, sb, &sb->bdev);
620         if (err)
621                 return err;
622
623         err = "cannot allocate memory";
624         if (__bch2_super_realloc(sb, 0))
625                 goto err;
626
627         err = "dynamic fault";
628         if (bch2_fs_init_fault("read_super"))
629                 goto err;
630
631         err = read_one_super(sb, offset);
632         if (!err)
633                 goto got_super;
634
635         if (offset != BCH_SB_SECTOR) {
636                 pr_err("error reading superblock: %s", err);
637                 goto err;
638         }
639
640         pr_err("error reading default superblock: %s", err);
641
642         /*
643          * Error reading primary superblock - read location of backup
644          * superblocks:
645          */
646         bio_reset(sb->bio);
647         sb->bio->bi_bdev = sb->bdev;
648         sb->bio->bi_iter.bi_sector = BCH_SB_LAYOUT_SECTOR;
649         sb->bio->bi_iter.bi_size = sizeof(struct bch_sb_layout);
650         bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
651         /*
652          * use sb buffer to read layout, since sb buffer is page aligned but
653          * layout won't be:
654          */
655         bch2_bio_map(sb->bio, sb->sb);
656
657         err = "IO error";
658         if (submit_bio_wait(sb->bio))
659                 goto err;
660
661         memcpy(&layout, sb->sb, sizeof(layout));
662         err = validate_sb_layout(&layout);
663         if (err)
664                 goto err;
665
666         for (i = 0; i < layout.nr_superblocks; i++) {
667                 u64 offset = le64_to_cpu(layout.sb_offset[i]);
668
669                 if (offset == BCH_SB_SECTOR)
670                         continue;
671
672                 err = read_one_super(sb, offset);
673                 if (!err)
674                         goto got_super;
675         }
676         goto err;
677 got_super:
678         pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
679                  le64_to_cpu(sb->sb->version),
680                  le64_to_cpu(sb->sb->flags),
681                  le64_to_cpu(sb->sb->seq),
682                  le16_to_cpu(sb->sb->u64s));
683
684         err = "Superblock block size smaller than device block size";
685         if (le16_to_cpu(sb->sb->block_size) << 9 <
686             bdev_logical_block_size(sb->bdev))
687                 goto err;
688
689         return NULL;
690 err:
691         bch2_free_super(sb);
692         return err;
693 }
694
695 /* write superblock: */
696
697 static void write_super_endio(struct bio *bio)
698 {
699         struct bch_dev *ca = bio->bi_private;
700
701         /* XXX: return errors directly */
702
703         bch2_dev_fatal_io_err_on(bio->bi_error, ca, "superblock write");
704
705         closure_put(&ca->fs->sb_write);
706         percpu_ref_put(&ca->io_ref);
707 }
708
709 static bool write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
710 {
711         struct bch_sb *sb = ca->disk_sb.sb;
712         struct bio *bio = ca->disk_sb.bio;
713
714         if (idx >= sb->layout.nr_superblocks)
715                 return false;
716
717         if (!percpu_ref_tryget(&ca->io_ref))
718                 return false;
719
720         sb->offset = sb->layout.sb_offset[idx];
721
722         SET_BCH_SB_CSUM_TYPE(sb, c->opts.metadata_checksum);
723         sb->csum = csum_vstruct(c, BCH_SB_CSUM_TYPE(sb),
724                                 (struct nonce) { 0 }, sb);
725
726         bio_reset(bio);
727         bio->bi_bdev            = ca->disk_sb.bdev;
728         bio->bi_iter.bi_sector  = le64_to_cpu(sb->offset);
729         bio->bi_iter.bi_size    =
730                 roundup(vstruct_bytes(sb),
731                         bdev_logical_block_size(ca->disk_sb.bdev));
732         bio->bi_end_io          = write_super_endio;
733         bio->bi_private         = ca;
734         bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
735         bch2_bio_map(bio, sb);
736
737         closure_bio_submit(bio, &c->sb_write);
738         return true;
739 }
740
741 void bch2_write_super(struct bch_fs *c)
742 {
743         struct closure *cl = &c->sb_write;
744         struct bch_dev *ca;
745         unsigned i, super_idx = 0;
746         const char *err;
747         bool wrote;
748
749         lockdep_assert_held(&c->sb_lock);
750
751         closure_init_stack(cl);
752
753         le64_add_cpu(&c->disk_sb->seq, 1);
754
755         for_each_online_member(ca, c, i)
756                 bch2_sb_from_fs(c, ca);
757
758         for_each_online_member(ca, c, i) {
759                 err = bch2_sb_validate(&ca->disk_sb);
760                 if (err) {
761                         bch2_fs_inconsistent(c, "sb invalid before write: %s", err);
762                         goto out;
763                 }
764         }
765
766         if (c->opts.nochanges ||
767             test_bit(BCH_FS_ERROR, &c->flags))
768                 goto out;
769
770         do {
771                 wrote = false;
772                 for_each_online_member(ca, c, i)
773                         if (write_one_super(c, ca, super_idx))
774                                 wrote = true;
775
776                 closure_sync(cl);
777                 super_idx++;
778         } while (wrote);
779 out:
780         /* Make new options visible after they're persistent: */
781         bch2_sb_update(c);
782 }
783
784 /* replica information: */
785
786 static inline struct bch_replicas_entry *
787 replicas_entry_next(struct bch_replicas_entry *i)
788 {
789         return (void *) i + offsetof(struct bch_replicas_entry, devs) + i->nr;
790 }
791
792 #define for_each_replicas_entry(_r, _i)                                 \
793         for (_i = (_r)->entries;                                        \
794              (void *) (_i) < vstruct_end(&(_r)->field) && (_i)->data_type;\
795              (_i) = replicas_entry_next(_i))
796
797 static void bch2_sb_replicas_nr_entries(struct bch_sb_field_replicas *r,
798                                         unsigned *nr,
799                                         unsigned *bytes,
800                                         unsigned *max_dev)
801 {
802         struct bch_replicas_entry *i;
803         unsigned j;
804
805         *nr     = 0;
806         *bytes  = sizeof(*r);
807         *max_dev = 0;
808
809         if (!r)
810                 return;
811
812         for_each_replicas_entry(r, i) {
813                 for (j = 0; j < i->nr; j++)
814                         *max_dev = max_t(unsigned, *max_dev, i->devs[j]);
815                 (*nr)++;
816         }
817
818         *bytes = (void *) i - (void *) r;
819 }
820
821 static struct bch_replicas_cpu *
822 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r)
823 {
824         struct bch_replicas_cpu *cpu_r;
825         unsigned i, nr, bytes, max_dev, entry_size;
826
827         bch2_sb_replicas_nr_entries(sb_r, &nr, &bytes, &max_dev);
828
829         entry_size = offsetof(struct bch_replicas_cpu_entry, devs) +
830                 DIV_ROUND_UP(max_dev + 1, 8);
831
832         cpu_r = kzalloc(sizeof(struct bch_replicas_cpu) +
833                         nr * entry_size, GFP_NOIO);
834         if (!cpu_r)
835                 return NULL;
836
837         cpu_r->nr               = nr;
838         cpu_r->entry_size       = entry_size;
839
840         if (nr) {
841                 struct bch_replicas_cpu_entry *dst =
842                         cpu_replicas_entry(cpu_r, 0);
843                 struct bch_replicas_entry *src = sb_r->entries;
844
845                 while (dst < cpu_replicas_entry(cpu_r, nr)) {
846                         dst->data_type = src->data_type;
847                         for (i = 0; i < src->nr; i++)
848                                 replicas_set_dev(dst, src->devs[i]);
849
850                         src     = replicas_entry_next(src);
851                         dst     = (void *) dst + entry_size;
852                 }
853         }
854
855         eytzinger0_sort(cpu_r->entries,
856                         cpu_r->nr,
857                         cpu_r->entry_size,
858                         memcmp, NULL);
859         return cpu_r;
860 }
861
862 static int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
863 {
864         struct bch_sb_field_replicas *sb_r;
865         struct bch_replicas_cpu *cpu_r, *old_r;
866
867         lockdep_assert_held(&c->sb_lock);
868
869         sb_r    = bch2_sb_get_replicas(c->disk_sb);
870         cpu_r   = __bch2_sb_replicas_to_cpu_replicas(sb_r);
871         if (!cpu_r)
872                 return -ENOMEM;
873
874         old_r = c->replicas;
875         rcu_assign_pointer(c->replicas, cpu_r);
876         if (old_r)
877                 kfree_rcu(old_r, rcu);
878
879         return 0;
880 }
881
882 /*
883  * for when gc of replica information is in progress:
884  */
885 static int bch2_update_gc_replicas(struct bch_fs *c,
886                                    struct bch_replicas_cpu *gc_r,
887                                    struct bkey_s_c_extent e,
888                                    enum bch_data_types data_type)
889 {
890         const struct bch_extent_ptr *ptr;
891         struct bch_replicas_cpu_entry *new_e;
892         struct bch_replicas_cpu *new;
893         unsigned i, nr, entry_size, max_dev = 0;
894
895         extent_for_each_ptr(e, ptr)
896                 if (!ptr->cached)
897                         max_dev = max_t(unsigned, max_dev, ptr->dev);
898
899         entry_size = offsetof(struct bch_replicas_cpu_entry, devs) +
900                 DIV_ROUND_UP(max_dev + 1, 8);
901         entry_size = max(entry_size, gc_r->entry_size);
902         nr = gc_r->nr + 1;
903
904         new = kzalloc(sizeof(struct bch_replicas_cpu) +
905                       nr * entry_size, GFP_NOIO);
906         if (!new)
907                 return -ENOMEM;
908
909         new->nr         = nr;
910         new->entry_size = entry_size;
911
912         for (i = 0; i < gc_r->nr; i++)
913                 memcpy(cpu_replicas_entry(new, i),
914                        cpu_replicas_entry(gc_r, i),
915                        gc_r->entry_size);
916
917         new_e = cpu_replicas_entry(new, nr - 1);
918         new_e->data_type = data_type;
919
920         extent_for_each_ptr(e, ptr)
921                 if (!ptr->cached)
922                         replicas_set_dev(new_e, ptr->dev);
923
924         eytzinger0_sort(new->entries,
925                         new->nr,
926                         new->entry_size,
927                         memcmp, NULL);
928
929         rcu_assign_pointer(c->replicas_gc, new);
930         kfree_rcu(gc_r, rcu);
931         return 0;
932 }
933
934 int bch2_check_mark_super_slowpath(struct bch_fs *c, struct bkey_s_c_extent e,
935                                    enum bch_data_types data_type)
936 {
937         struct bch_replicas_cpu *gc_r;
938         const struct bch_extent_ptr *ptr;
939         struct bch_sb_field_replicas *sb_r;
940         struct bch_replicas_entry *new_entry;
941         unsigned new_entry_bytes, new_u64s, nr, bytes, max_dev;
942         int ret = 0;
943
944         mutex_lock(&c->sb_lock);
945
946         gc_r = rcu_dereference_protected(c->replicas_gc,
947                                          lockdep_is_held(&c->sb_lock));
948         if (gc_r &&
949             !replicas_has_extent(gc_r, e, data_type)) {
950                 ret = bch2_update_gc_replicas(c, gc_r, e, data_type);
951                 if (ret)
952                         goto err;
953         }
954
955         /* recheck, might have raced */
956         if (bch2_sb_has_replicas(c, e, data_type)) {
957                 mutex_unlock(&c->sb_lock);
958                 return 0;
959         }
960
961         new_entry_bytes = sizeof(struct bch_replicas_entry) +
962                 bch2_extent_nr_dirty_ptrs(e.s_c);
963
964         sb_r = bch2_sb_get_replicas(c->disk_sb);
965
966         bch2_sb_replicas_nr_entries(sb_r, &nr, &bytes, &max_dev);
967
968         new_u64s = DIV_ROUND_UP(bytes + new_entry_bytes, sizeof(u64));
969
970         sb_r = bch2_fs_sb_resize_replicas(c,
971                         DIV_ROUND_UP(sizeof(*sb_r) + bytes + new_entry_bytes,
972                                      sizeof(u64)));
973         if (!sb_r) {
974                 ret = -ENOSPC;
975                 goto err;
976         }
977
978         new_entry = (void *) sb_r + bytes;
979         new_entry->data_type = data_type;
980         new_entry->nr = 0;
981
982         extent_for_each_ptr(e, ptr)
983                 if (!ptr->cached)
984                         new_entry->devs[new_entry->nr++] = ptr->dev;
985
986         ret = bch2_sb_replicas_to_cpu_replicas(c);
987         if (ret) {
988                 memset(new_entry, 0,
989                        vstruct_end(&sb_r->field) - (void *) new_entry);
990                 goto err;
991         }
992
993         bch2_write_super(c);
994 err:
995         mutex_unlock(&c->sb_lock);
996         return ret;
997 }
998
999 struct replicas_status __bch2_replicas_status(struct bch_fs *c,
1000                                               struct bch_dev *dev_to_offline)
1001 {
1002         struct bch_replicas_cpu_entry *e;
1003         struct bch_replicas_cpu *r;
1004         unsigned i, dev, dev_slots, nr_online, nr_offline;
1005         struct replicas_status ret;
1006
1007         memset(&ret, 0, sizeof(ret));
1008
1009         for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
1010                 ret.replicas[i].nr_online = UINT_MAX;
1011
1012         rcu_read_lock();
1013         r = rcu_dereference(c->replicas);
1014         dev_slots = min_t(unsigned, replicas_dev_slots(r), c->sb.nr_devices);
1015
1016         for (i = 0; i < r->nr; i++) {
1017                 e = cpu_replicas_entry(r, i);
1018
1019                 BUG_ON(e->data_type >= ARRAY_SIZE(ret.replicas));
1020
1021                 nr_online = nr_offline = 0;
1022
1023                 for (dev = 0; dev < dev_slots; dev++) {
1024                         if (!replicas_test_dev(e, dev))
1025                                 continue;
1026
1027                         if (bch2_dev_is_online(c->devs[dev]) &&
1028                             c->devs[dev] != dev_to_offline)
1029                                 nr_online++;
1030                         else
1031                                 nr_offline++;
1032                 }
1033
1034                 ret.replicas[e->data_type].nr_online =
1035                         min(ret.replicas[e->data_type].nr_online,
1036                             nr_online);
1037
1038                 ret.replicas[e->data_type].nr_offline =
1039                         max(ret.replicas[e->data_type].nr_offline,
1040                             nr_offline);
1041         }
1042
1043         rcu_read_unlock();
1044
1045         return ret;
1046 }
1047
1048 struct replicas_status bch2_replicas_status(struct bch_fs *c)
1049 {
1050         return __bch2_replicas_status(c, NULL);
1051 }
1052
1053 unsigned bch2_replicas_online(struct bch_fs *c, bool meta)
1054 {
1055         struct replicas_status s = bch2_replicas_status(c);
1056
1057         return meta
1058                 ? min(s.replicas[BCH_DATA_JOURNAL].nr_online,
1059                       s.replicas[BCH_DATA_BTREE].nr_online)
1060                 : s.replicas[BCH_DATA_USER].nr_online;
1061 }
1062
1063 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
1064 {
1065         struct bch_replicas_cpu_entry *e;
1066         struct bch_replicas_cpu *r;
1067         unsigned i, ret = 0;
1068
1069         rcu_read_lock();
1070         r = rcu_dereference(c->replicas);
1071
1072         if (ca->dev_idx >= replicas_dev_slots(r))
1073                 goto out;
1074
1075         for (i = 0; i < r->nr; i++) {
1076                 e = cpu_replicas_entry(r, i);
1077
1078                 if (replicas_test_dev(e, ca->dev_idx)) {
1079                         ret |= 1 << e->data_type;
1080                         break;
1081                 }
1082         }
1083 out:
1084         rcu_read_unlock();
1085
1086         return ret;
1087 }
1088
1089 static const char *bch2_sb_validate_replicas(struct bch_sb *sb)
1090 {
1091         struct bch_sb_field_members *mi;
1092         struct bch_sb_field_replicas *sb_r;
1093         struct bch_replicas_cpu *cpu_r = NULL;
1094         struct bch_replicas_entry *e;
1095         const char *err;
1096         unsigned i;
1097
1098         mi      = bch2_sb_get_members(sb);
1099         sb_r    = bch2_sb_get_replicas(sb);
1100         if (!sb_r)
1101                 return NULL;
1102
1103         for_each_replicas_entry(sb_r, e) {
1104                 err = "invalid replicas entry: invalid data type";
1105                 if (e->data_type >= BCH_DATA_NR)
1106                         goto err;
1107
1108                 err = "invalid replicas entry: too many devices";
1109                 if (e->nr >= BCH_REPLICAS_MAX)
1110                         goto err;
1111
1112                 err = "invalid replicas entry: invalid device";
1113                 for (i = 0; i < e->nr; i++)
1114                         if (!bch2_dev_exists(sb, mi, e->devs[i]))
1115                                 goto err;
1116         }
1117
1118         err = "cannot allocate memory";
1119         cpu_r = __bch2_sb_replicas_to_cpu_replicas(sb_r);
1120         if (!cpu_r)
1121                 goto err;
1122
1123         sort_cmp_size(cpu_r->entries,
1124                       cpu_r->nr,
1125                       cpu_r->entry_size,
1126                       memcmp, NULL);
1127
1128         for (i = 0; i + 1 < cpu_r->nr; i++) {
1129                 struct bch_replicas_cpu_entry *l =
1130                         cpu_replicas_entry(cpu_r, i);
1131                 struct bch_replicas_cpu_entry *r =
1132                         cpu_replicas_entry(cpu_r, i + 1);
1133
1134                 BUG_ON(memcmp(l, r, cpu_r->entry_size) > 0);
1135
1136                 err = "duplicate replicas entry";
1137                 if (!memcmp(l, r, cpu_r->entry_size))
1138                         goto err;
1139         }
1140
1141         err = NULL;
1142 err:
1143         kfree(cpu_r);
1144         return err;
1145 }
1146
1147 int bch2_replicas_gc_end(struct bch_fs *c, int err)
1148 {
1149         struct bch_sb_field_replicas *sb_r;
1150         struct bch_replicas_cpu *r, *old_r;
1151         struct bch_replicas_entry *dst_e;
1152         size_t i, j, bytes, dev_slots;
1153         int ret = 0;
1154
1155         lockdep_assert_held(&c->replicas_gc_lock);
1156
1157         mutex_lock(&c->sb_lock);
1158
1159         r = rcu_dereference_protected(c->replicas_gc,
1160                                       lockdep_is_held(&c->sb_lock));
1161
1162         if (err) {
1163                 rcu_assign_pointer(c->replicas_gc, NULL);
1164                 kfree_rcu(r, rcu);
1165                 goto err;
1166         }
1167
1168         dev_slots = replicas_dev_slots(r);
1169
1170         bytes = sizeof(struct bch_sb_field_replicas);
1171
1172         for (i = 0; i < r->nr; i++) {
1173                 struct bch_replicas_cpu_entry *e =
1174                         cpu_replicas_entry(r, i);
1175
1176                 bytes += sizeof(struct bch_replicas_entry);
1177                 for (j = 0; j < r->entry_size - 1; j++)
1178                         bytes += hweight8(e->devs[j]);
1179         }
1180
1181         sb_r = bch2_fs_sb_resize_replicas(c,
1182                         DIV_ROUND_UP(sizeof(*sb_r) + bytes, sizeof(u64)));
1183         if (!sb_r) {
1184                 ret = -ENOSPC;
1185                 goto err;
1186         }
1187
1188         memset(&sb_r->entries, 0,
1189                vstruct_end(&sb_r->field) -
1190                (void *) &sb_r->entries);
1191
1192         dst_e = sb_r->entries;
1193         for (i = 0; i < r->nr; i++) {
1194                 struct bch_replicas_cpu_entry *src_e =
1195                         cpu_replicas_entry(r, i);
1196
1197                 dst_e->data_type = src_e->data_type;
1198
1199                 for (j = 0; j < dev_slots; j++)
1200                         if (replicas_test_dev(src_e, j))
1201                                 dst_e->devs[dst_e->nr++] = j;
1202
1203                 dst_e = replicas_entry_next(dst_e);
1204         }
1205
1206         old_r = rcu_dereference_protected(c->replicas,
1207                                           lockdep_is_held(&c->sb_lock));
1208         rcu_assign_pointer(c->replicas, r);
1209         rcu_assign_pointer(c->replicas_gc, NULL);
1210         kfree_rcu(old_r, rcu);
1211
1212         bch2_write_super(c);
1213 err:
1214         mutex_unlock(&c->sb_lock);
1215         return ret;
1216 }
1217
1218 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
1219 {
1220         struct bch_replicas_cpu *r, *src;
1221         unsigned i;
1222
1223         lockdep_assert_held(&c->replicas_gc_lock);
1224
1225         mutex_lock(&c->sb_lock);
1226         BUG_ON(c->replicas_gc);
1227
1228         src = rcu_dereference_protected(c->replicas,
1229                                         lockdep_is_held(&c->sb_lock));
1230
1231         r = kzalloc(sizeof(struct bch_replicas_cpu) +
1232                     src->nr * src->entry_size, GFP_NOIO);
1233         if (!r) {
1234                 mutex_unlock(&c->sb_lock);
1235                 return -ENOMEM;
1236         }
1237
1238         r->entry_size = src->entry_size;
1239         r->nr = 0;
1240
1241         for (i = 0; i < src->nr; i++) {
1242                 struct bch_replicas_cpu_entry *dst_e =
1243                         cpu_replicas_entry(r, r->nr);
1244                 struct bch_replicas_cpu_entry *src_e =
1245                         cpu_replicas_entry(src, i);
1246
1247                 if (!(src_e->data_type & typemask)) {
1248                         memcpy(dst_e, src_e, r->entry_size);
1249                         r->nr++;
1250                 }
1251         }
1252
1253         eytzinger0_sort(r->entries,
1254                         r->nr,
1255                         r->entry_size,
1256                         memcmp, NULL);
1257
1258         rcu_assign_pointer(c->replicas_gc, r);
1259         mutex_unlock(&c->sb_lock);
1260
1261         return 0;
1262 }