10 #include <linux/backing-dev.h>
11 #include <linux/sort.h>
13 static int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *);
14 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
15 struct bch_replicas_cpu *);
16 static int bch2_sb_disk_groups_to_cpu(struct bch_fs *);
18 /* superblock fields (optional/variable size sections: */
20 const char * const bch2_sb_fields[] = {
21 #define x(name, nr) #name,
28 static const char *bch2_sb_validate_##f(struct bch_sb *, struct bch_sb_field *);
32 struct bch_sb_field_ops {
33 const char * (*validate)(struct bch_sb *, struct bch_sb_field *);
36 static const struct bch_sb_field_ops bch2_sb_field_ops[] = {
38 [BCH_SB_FIELD_##f] = { \
39 .validate = bch2_sb_validate_##f, \
45 static const char *bch2_sb_field_validate(struct bch_sb *sb,
46 struct bch_sb_field *f)
49 unsigned type = le32_to_cpu(f->type);
51 return type < BCH_SB_FIELD_NR
52 ? bch2_sb_field_ops[type].validate(sb, f)
56 struct bch_sb_field *bch2_sb_field_get(struct bch_sb *sb,
57 enum bch_sb_field_type type)
59 struct bch_sb_field *f;
61 /* XXX: need locking around superblock to access optional fields */
63 vstruct_for_each(sb, f)
64 if (le32_to_cpu(f->type) == type)
69 static struct bch_sb_field *__bch2_sb_field_resize(struct bch_sb *sb,
70 struct bch_sb_field *f,
73 unsigned old_u64s = f ? le32_to_cpu(f->u64s) : 0;
77 memset(f, 0, sizeof(u64) * u64s);
78 f->u64s = cpu_to_le32(u64s);
84 f->u64s = cpu_to_le32(u64s);
87 memmove(dst, src, vstruct_end(sb) - src);
90 memset(src, 0, dst - src);
93 le32_add_cpu(&sb->u64s, u64s - old_u64s);
98 /* Superblock realloc/free: */
100 void bch2_free_super(struct bch_sb_handle *sb)
104 if (!IS_ERR_OR_NULL(sb->bdev))
105 blkdev_put(sb->bdev, sb->mode);
107 free_pages((unsigned long) sb->sb, sb->page_order);
108 memset(sb, 0, sizeof(*sb));
111 static int __bch2_super_realloc(struct bch_sb_handle *sb, unsigned order)
113 struct bch_sb *new_sb;
116 if (sb->page_order >= order && sb->sb)
119 if (dynamic_fault("bcachefs:add:super_realloc"))
122 bio = bio_kmalloc(GFP_KERNEL, 1 << order);
130 new_sb = (void *) __get_free_pages(GFP_KERNEL, order);
135 memcpy(new_sb, sb->sb, PAGE_SIZE << sb->page_order);
137 free_pages((unsigned long) sb->sb, sb->page_order);
140 sb->page_order = order;
145 static int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s)
147 u64 new_bytes = __vstruct_bytes(struct bch_sb, u64s);
148 u64 max_bytes = 512 << sb->sb->layout.sb_max_size_bits;
150 if (new_bytes > max_bytes) {
151 char buf[BDEVNAME_SIZE];
153 pr_err("%s: superblock too big: want %llu but have %llu",
154 bdevname(sb->bdev, buf), new_bytes, max_bytes);
158 return __bch2_super_realloc(sb, get_order(new_bytes));
161 static int bch2_fs_sb_realloc(struct bch_fs *c, unsigned u64s)
163 u64 bytes = __vstruct_bytes(struct bch_sb, u64s);
165 unsigned order = get_order(bytes);
167 if (c->disk_sb && order <= c->disk_sb_order)
170 sb = (void *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
175 memcpy(sb, c->disk_sb, PAGE_SIZE << c->disk_sb_order);
177 free_pages((unsigned long) c->disk_sb, c->disk_sb_order);
180 c->disk_sb_order = order;
184 struct bch_sb_field *bch2_sb_field_resize(struct bch_sb_handle *sb,
185 enum bch_sb_field_type type,
188 struct bch_sb_field *f = bch2_sb_field_get(sb->sb, type);
189 ssize_t old_u64s = f ? le32_to_cpu(f->u64s) : 0;
190 ssize_t d = -old_u64s + u64s;
192 if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d))
195 f = __bch2_sb_field_resize(sb->sb, f, u64s);
196 f->type = cpu_to_le32(type);
200 struct bch_sb_field *bch2_fs_sb_field_resize(struct bch_fs *c,
201 enum bch_sb_field_type type,
204 struct bch_sb_field *f = bch2_sb_field_get(c->disk_sb, type);
205 ssize_t old_u64s = f ? le32_to_cpu(f->u64s) : 0;
206 ssize_t d = -old_u64s + u64s;
210 lockdep_assert_held(&c->sb_lock);
212 if (bch2_fs_sb_realloc(c, le32_to_cpu(c->disk_sb->u64s) + d))
215 /* XXX: we're not checking that offline device have enough space */
217 for_each_online_member(ca, c, i) {
218 struct bch_sb_handle *sb = &ca->disk_sb;
220 if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d)) {
221 percpu_ref_put(&ca->ref);
226 f = __bch2_sb_field_resize(c->disk_sb, f, u64s);
227 f->type = cpu_to_le32(type);
231 /* Superblock validate: */
233 static inline void __bch2_sb_layout_size_assert(void)
235 BUILD_BUG_ON(sizeof(struct bch_sb_layout) != 512);
238 static const char *validate_sb_layout(struct bch_sb_layout *layout)
240 u64 offset, prev_offset, max_sectors;
243 if (uuid_le_cmp(layout->magic, BCACHE_MAGIC))
244 return "Not a bcachefs superblock layout";
246 if (layout->layout_type != 0)
247 return "Invalid superblock layout type";
249 if (!layout->nr_superblocks)
250 return "Invalid superblock layout: no superblocks";
252 if (layout->nr_superblocks > ARRAY_SIZE(layout->sb_offset))
253 return "Invalid superblock layout: too many superblocks";
255 max_sectors = 1 << layout->sb_max_size_bits;
257 prev_offset = le64_to_cpu(layout->sb_offset[0]);
259 for (i = 1; i < layout->nr_superblocks; i++) {
260 offset = le64_to_cpu(layout->sb_offset[i]);
262 if (offset < prev_offset + max_sectors)
263 return "Invalid superblock layout: superblocks overlap";
264 prev_offset = offset;
270 const char *bch2_sb_validate(struct bch_sb_handle *disk_sb)
272 struct bch_sb *sb = disk_sb->sb;
273 struct bch_sb_field *f;
274 struct bch_sb_field_members *mi;
278 if (le64_to_cpu(sb->version) < BCH_SB_VERSION_MIN ||
279 le64_to_cpu(sb->version) > BCH_SB_VERSION_MAX)
280 return"Unsupported superblock version";
282 if (le64_to_cpu(sb->version) < BCH_SB_VERSION_EXTENT_MAX) {
283 SET_BCH_SB_ENCODED_EXTENT_MAX_BITS(sb, 7);
284 SET_BCH_SB_POSIX_ACL(sb, 1);
287 block_size = le16_to_cpu(sb->block_size);
289 if (!is_power_of_2(block_size) ||
290 block_size > PAGE_SECTORS)
291 return "Bad block size";
293 if (bch2_is_zero(sb->user_uuid.b, sizeof(uuid_le)))
294 return "Bad user UUID";
296 if (bch2_is_zero(sb->uuid.b, sizeof(uuid_le)))
297 return "Bad internal UUID";
299 if (!sb->nr_devices ||
300 sb->nr_devices <= sb->dev_idx ||
301 sb->nr_devices > BCH_SB_MEMBERS_MAX)
302 return "Bad number of member devices";
304 if (!BCH_SB_META_REPLICAS_WANT(sb) ||
305 BCH_SB_META_REPLICAS_WANT(sb) >= BCH_REPLICAS_MAX)
306 return "Invalid number of metadata replicas";
308 if (!BCH_SB_META_REPLICAS_REQ(sb) ||
309 BCH_SB_META_REPLICAS_REQ(sb) >= BCH_REPLICAS_MAX)
310 return "Invalid number of metadata replicas";
312 if (!BCH_SB_DATA_REPLICAS_WANT(sb) ||
313 BCH_SB_DATA_REPLICAS_WANT(sb) >= BCH_REPLICAS_MAX)
314 return "Invalid number of data replicas";
316 if (!BCH_SB_DATA_REPLICAS_REQ(sb) ||
317 BCH_SB_DATA_REPLICAS_REQ(sb) >= BCH_REPLICAS_MAX)
318 return "Invalid number of data replicas";
320 if (BCH_SB_META_CSUM_TYPE(sb) >= BCH_CSUM_OPT_NR)
321 return "Invalid metadata checksum type";
323 if (BCH_SB_DATA_CSUM_TYPE(sb) >= BCH_CSUM_OPT_NR)
324 return "Invalid metadata checksum type";
326 if (BCH_SB_COMPRESSION_TYPE(sb) >= BCH_COMPRESSION_OPT_NR)
327 return "Invalid compression type";
329 if (!BCH_SB_BTREE_NODE_SIZE(sb))
330 return "Btree node size not set";
332 if (!is_power_of_2(BCH_SB_BTREE_NODE_SIZE(sb)))
333 return "Btree node size not a power of two";
335 if (BCH_SB_GC_RESERVE(sb) < 5)
336 return "gc reserve percentage too small";
338 if (!sb->time_precision ||
339 le32_to_cpu(sb->time_precision) > NSEC_PER_SEC)
340 return "invalid time precision";
342 /* validate layout */
343 err = validate_sb_layout(&sb->layout);
347 vstruct_for_each(sb, f) {
349 return "Invalid superblock: invalid optional field";
351 if (vstruct_next(f) > vstruct_last(sb))
352 return "Invalid superblock: invalid optional field";
355 /* members must be validated first: */
356 mi = bch2_sb_get_members(sb);
358 return "Invalid superblock: member info area missing";
360 err = bch2_sb_field_validate(sb, &mi->field);
364 vstruct_for_each(sb, f) {
365 if (le32_to_cpu(f->type) == BCH_SB_FIELD_members)
368 err = bch2_sb_field_validate(sb, f);
373 if (le64_to_cpu(sb->version) < BCH_SB_VERSION_EXTENT_NONCE_V1 &&
374 bch2_sb_get_crypt(sb) &&
375 BCH_SB_INITIALIZED(sb))
376 return "Incompatible extent nonces";
378 sb->version = cpu_to_le64(BCH_SB_VERSION_MAX);
385 static void bch2_sb_update(struct bch_fs *c)
387 struct bch_sb *src = c->disk_sb;
388 struct bch_sb_field_members *mi = bch2_sb_get_members(src);
392 lockdep_assert_held(&c->sb_lock);
394 c->sb.uuid = src->uuid;
395 c->sb.user_uuid = src->user_uuid;
396 c->sb.nr_devices = src->nr_devices;
397 c->sb.clean = BCH_SB_CLEAN(src);
398 c->sb.encryption_type = BCH_SB_ENCRYPTION_TYPE(src);
399 c->sb.encoded_extent_max= 1 << BCH_SB_ENCODED_EXTENT_MAX_BITS(src);
400 c->sb.time_base_lo = le64_to_cpu(src->time_base_lo);
401 c->sb.time_base_hi = le32_to_cpu(src->time_base_hi);
402 c->sb.time_precision = le32_to_cpu(src->time_precision);
404 for_each_member_device(ca, c, i)
405 ca->mi = bch2_mi_to_cpu(mi->members + i);
408 /* doesn't copy member info */
409 static void __copy_super(struct bch_sb *dst, struct bch_sb *src)
411 struct bch_sb_field *src_f, *dst_f;
413 dst->version = src->version;
415 dst->uuid = src->uuid;
416 dst->user_uuid = src->user_uuid;
417 memcpy(dst->label, src->label, sizeof(dst->label));
419 dst->block_size = src->block_size;
420 dst->nr_devices = src->nr_devices;
422 dst->time_base_lo = src->time_base_lo;
423 dst->time_base_hi = src->time_base_hi;
424 dst->time_precision = src->time_precision;
426 memcpy(dst->flags, src->flags, sizeof(dst->flags));
427 memcpy(dst->features, src->features, sizeof(dst->features));
428 memcpy(dst->compat, src->compat, sizeof(dst->compat));
430 vstruct_for_each(src, src_f) {
431 if (src_f->type == BCH_SB_FIELD_journal)
434 dst_f = bch2_sb_field_get(dst, le32_to_cpu(src_f->type));
435 dst_f = __bch2_sb_field_resize(dst, dst_f,
436 le32_to_cpu(src_f->u64s));
438 memcpy(dst_f, src_f, vstruct_bytes(src_f));
442 int bch2_sb_to_fs(struct bch_fs *c, struct bch_sb *src)
444 struct bch_sb_field_journal *journal_buckets =
445 bch2_sb_get_journal(src);
446 unsigned journal_u64s = journal_buckets
447 ? le32_to_cpu(journal_buckets->field.u64s)
451 lockdep_assert_held(&c->sb_lock);
453 ret = bch2_fs_sb_realloc(c, le32_to_cpu(src->u64s) - journal_u64s);
457 __copy_super(c->disk_sb, src);
459 ret = bch2_sb_replicas_to_cpu_replicas(c);
463 ret = bch2_sb_disk_groups_to_cpu(c);
471 int bch2_sb_from_fs(struct bch_fs *c, struct bch_dev *ca)
473 struct bch_sb *src = c->disk_sb, *dst = ca->disk_sb.sb;
474 struct bch_sb_field_journal *journal_buckets =
475 bch2_sb_get_journal(dst);
476 unsigned journal_u64s = journal_buckets
477 ? le32_to_cpu(journal_buckets->field.u64s)
479 unsigned u64s = le32_to_cpu(src->u64s) + journal_u64s;
482 ret = bch2_sb_realloc(&ca->disk_sb, u64s);
486 __copy_super(dst, src);
490 /* read superblock: */
492 static const char *read_one_super(struct bch_sb_handle *sb, u64 offset)
494 struct bch_csum csum;
499 bio_set_dev(sb->bio, sb->bdev);
500 sb->bio->bi_iter.bi_sector = offset;
501 sb->bio->bi_iter.bi_size = PAGE_SIZE << sb->page_order;
502 bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
503 bch2_bio_map(sb->bio, sb->sb);
505 if (submit_bio_wait(sb->bio))
508 if (uuid_le_cmp(sb->sb->magic, BCACHE_MAGIC))
509 return "Not a bcachefs superblock";
511 if (le64_to_cpu(sb->sb->version) < BCH_SB_VERSION_MIN ||
512 le64_to_cpu(sb->sb->version) > BCH_SB_VERSION_MAX)
513 return"Unsupported superblock version";
515 bytes = vstruct_bytes(sb->sb);
517 if (bytes > 512 << sb->sb->layout.sb_max_size_bits)
518 return "Bad superblock: too big";
520 order = get_order(bytes);
521 if (order > sb->page_order) {
522 if (__bch2_super_realloc(sb, order))
523 return "cannot allocate memory";
527 if (BCH_SB_CSUM_TYPE(sb->sb) >= BCH_CSUM_NR)
528 return "unknown csum type";
530 /* XXX: verify MACs */
531 csum = csum_vstruct(NULL, BCH_SB_CSUM_TYPE(sb->sb),
532 null_nonce(), sb->sb);
534 if (bch2_crc_cmp(csum, sb->sb->csum))
535 return "bad checksum reading superblock";
540 int bch2_read_super(const char *path, struct bch_opts *opts,
541 struct bch_sb_handle *sb)
543 u64 offset = opt_get(*opts, sb);
544 struct bch_sb_layout layout;
549 memset(sb, 0, sizeof(*sb));
550 sb->mode = FMODE_READ;
552 if (!opt_get(*opts, noexcl))
553 sb->mode |= FMODE_EXCL;
555 if (!opt_get(*opts, nochanges))
556 sb->mode |= FMODE_WRITE;
558 sb->bdev = blkdev_get_by_path(path, sb->mode, sb);
559 if (IS_ERR(sb->bdev) &&
560 PTR_ERR(sb->bdev) == -EACCES &&
561 opt_get(*opts, read_only)) {
562 sb->mode &= ~FMODE_WRITE;
564 sb->bdev = blkdev_get_by_path(path, sb->mode, sb);
565 if (!IS_ERR(sb->bdev))
566 opt_set(*opts, nochanges, true);
569 if (IS_ERR(sb->bdev))
570 return PTR_ERR(sb->bdev);
572 err = "cannot allocate memory";
573 ret = __bch2_super_realloc(sb, 0);
578 err = "dynamic fault";
579 if (bch2_fs_init_fault("read_super"))
583 err = read_one_super(sb, offset);
587 if (opt_defined(*opts, sb))
590 pr_err("error reading default superblock: %s", err);
593 * Error reading primary superblock - read location of backup
597 bio_set_dev(sb->bio, sb->bdev);
598 sb->bio->bi_iter.bi_sector = BCH_SB_LAYOUT_SECTOR;
599 sb->bio->bi_iter.bi_size = sizeof(struct bch_sb_layout);
600 bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
602 * use sb buffer to read layout, since sb buffer is page aligned but
605 bch2_bio_map(sb->bio, sb->sb);
608 if (submit_bio_wait(sb->bio))
611 memcpy(&layout, sb->sb, sizeof(layout));
612 err = validate_sb_layout(&layout);
616 for (i = layout.sb_offset;
617 i < layout.sb_offset + layout.nr_superblocks; i++) {
618 offset = le64_to_cpu(*i);
620 if (offset == opt_get(*opts, sb))
623 err = read_one_super(sb, offset);
632 err = "Superblock block size smaller than device block size";
634 if (le16_to_cpu(sb->sb->block_size) << 9 <
635 bdev_logical_block_size(sb->bdev))
638 if (sb->mode & FMODE_WRITE)
639 bdev_get_queue(sb->bdev)->backing_dev_info->capabilities
640 |= BDI_CAP_STABLE_WRITES;
645 pr_err("error reading superblock: %s", err);
649 /* write superblock: */
651 static void write_super_endio(struct bio *bio)
653 struct bch_dev *ca = bio->bi_private;
655 /* XXX: return errors directly */
657 if (bch2_dev_io_err_on(bio->bi_status, ca, "superblock write"))
658 ca->sb_write_error = 1;
660 closure_put(&ca->fs->sb_write);
661 percpu_ref_put(&ca->io_ref);
664 static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
666 struct bch_sb *sb = ca->disk_sb.sb;
667 struct bio *bio = ca->disk_sb.bio;
669 sb->offset = sb->layout.sb_offset[idx];
671 SET_BCH_SB_CSUM_TYPE(sb, c->opts.metadata_checksum);
672 sb->csum = csum_vstruct(c, BCH_SB_CSUM_TYPE(sb),
676 bio_set_dev(bio, ca->disk_sb.bdev);
677 bio->bi_iter.bi_sector = le64_to_cpu(sb->offset);
678 bio->bi_iter.bi_size =
679 roundup(vstruct_bytes(sb),
680 bdev_logical_block_size(ca->disk_sb.bdev));
681 bio->bi_end_io = write_super_endio;
682 bio->bi_private = ca;
683 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
684 bch2_bio_map(bio, sb);
686 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_SB],
689 percpu_ref_get(&ca->io_ref);
690 closure_bio_submit(bio, &c->sb_write);
693 void bch2_write_super(struct bch_fs *c)
695 struct closure *cl = &c->sb_write;
697 unsigned i, sb = 0, nr_wrote;
699 struct bch_devs_mask sb_written;
700 bool wrote, can_mount_without_written, can_mount_with_written;
702 lockdep_assert_held(&c->sb_lock);
704 closure_init_stack(cl);
705 memset(&sb_written, 0, sizeof(sb_written));
707 le64_add_cpu(&c->disk_sb->seq, 1);
709 for_each_online_member(ca, c, i)
710 bch2_sb_from_fs(c, ca);
712 for_each_online_member(ca, c, i) {
713 err = bch2_sb_validate(&ca->disk_sb);
715 bch2_fs_inconsistent(c, "sb invalid before write: %s", err);
720 if (c->opts.nochanges ||
721 test_bit(BCH_FS_ERROR, &c->flags))
724 for_each_online_member(ca, c, i) {
725 __set_bit(ca->dev_idx, sb_written.d);
726 ca->sb_write_error = 0;
731 for_each_online_member(ca, c, i)
732 if (sb < ca->disk_sb.sb->layout.nr_superblocks) {
733 write_one_super(c, ca, sb);
740 for_each_online_member(ca, c, i)
741 if (ca->sb_write_error)
742 __clear_bit(ca->dev_idx, sb_written.d);
744 nr_wrote = dev_mask_nr(&sb_written);
746 can_mount_with_written =
747 bch2_have_enough_devs(c,
748 __bch2_replicas_status(c, sb_written),
749 BCH_FORCE_IF_DEGRADED);
751 for (i = 0; i < ARRAY_SIZE(sb_written.d); i++)
752 sb_written.d[i] = ~sb_written.d[i];
754 can_mount_without_written =
755 bch2_have_enough_devs(c,
756 __bch2_replicas_status(c, sb_written),
757 BCH_FORCE_IF_DEGRADED);
760 * If we would be able to mount _without_ the devices we successfully
761 * wrote superblocks to, we weren't able to write to enough devices:
763 * Exception: if we can mount without the successes because we haven't
764 * written anything (new filesystem), we continue if we'd be able to
765 * mount with the devices we did successfully write to:
767 bch2_fs_fatal_err_on(!nr_wrote ||
768 (can_mount_without_written &&
769 !can_mount_with_written), c,
770 "Unable to write superblock to sufficient devices");
772 /* Make new options visible after they're persistent: */
776 /* BCH_SB_FIELD_journal: */
778 static int u64_cmp(const void *_l, const void *_r)
780 u64 l = *((const u64 *) _l), r = *((const u64 *) _r);
782 return l < r ? -1 : l > r ? 1 : 0;
785 static const char *bch2_sb_validate_journal(struct bch_sb *sb,
786 struct bch_sb_field *f)
788 struct bch_sb_field_journal *journal = field_to_type(f, journal);
789 struct bch_member *m = bch2_sb_get_members(sb)->members + sb->dev_idx;
795 journal = bch2_sb_get_journal(sb);
799 nr = bch2_nr_journal_buckets(journal);
803 b = kmalloc_array(sizeof(u64), nr, GFP_KERNEL);
805 return "cannot allocate memory";
807 for (i = 0; i < nr; i++)
808 b[i] = le64_to_cpu(journal->buckets[i]);
810 sort(b, nr, sizeof(u64), u64_cmp, NULL);
812 err = "journal bucket at sector 0";
816 err = "journal bucket before first bucket";
817 if (m && b[0] < le16_to_cpu(m->first_bucket))
820 err = "journal bucket past end of device";
821 if (m && b[nr - 1] >= le64_to_cpu(m->nbuckets))
824 err = "duplicate journal buckets";
825 for (i = 0; i + 1 < nr; i++)
826 if (b[i] == b[i + 1])
835 /* BCH_SB_FIELD_members: */
837 static const char *bch2_sb_validate_members(struct bch_sb *sb,
838 struct bch_sb_field *f)
840 struct bch_sb_field_members *mi = field_to_type(f, members);
841 struct bch_member *m;
843 if ((void *) (mi->members + sb->nr_devices) >
844 vstruct_end(&mi->field))
845 return "Invalid superblock: bad member info";
847 for (m = mi->members;
848 m < mi->members + sb->nr_devices;
850 if (!bch2_member_exists(m))
853 if (le64_to_cpu(m->nbuckets) > LONG_MAX)
854 return "Too many buckets";
856 if (le64_to_cpu(m->nbuckets) -
857 le16_to_cpu(m->first_bucket) < 1 << 10)
858 return "Not enough buckets";
860 if (le16_to_cpu(m->bucket_size) <
861 le16_to_cpu(sb->block_size))
862 return "bucket size smaller than block size";
864 if (le16_to_cpu(m->bucket_size) <
865 BCH_SB_BTREE_NODE_SIZE(sb))
866 return "bucket size smaller than btree node size";
869 if (le64_to_cpu(sb->version) < BCH_SB_VERSION_EXTENT_MAX)
870 for (m = mi->members;
871 m < mi->members + sb->nr_devices;
873 SET_BCH_MEMBER_DATA_ALLOWED(m, ~0);
878 /* BCH_SB_FIELD_crypt: */
880 static const char *bch2_sb_validate_crypt(struct bch_sb *sb,
881 struct bch_sb_field *f)
883 struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
885 if (vstruct_bytes(&crypt->field) != sizeof(*crypt))
886 return "invalid field crypt: wrong size";
888 if (BCH_CRYPT_KDF_TYPE(crypt))
889 return "invalid field crypt: bad kdf type";
894 /* BCH_SB_FIELD_replicas: */
896 /* Replicas tracking - in memory: */
898 #define for_each_cpu_replicas_entry(_r, _i) \
899 for (_i = (_r)->entries; \
900 (void *) (_i) < (void *) (_r)->entries + (_r)->nr * (_r)->entry_size;\
901 _i = (void *) (_i) + (_r)->entry_size)
903 static inline struct bch_replicas_cpu_entry *
904 cpu_replicas_entry(struct bch_replicas_cpu *r, unsigned i)
906 return (void *) r->entries + r->entry_size * i;
909 static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
911 eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
914 static inline bool replicas_test_dev(struct bch_replicas_cpu_entry *e,
917 return (e->devs[dev >> 3] & (1 << (dev & 7))) != 0;
920 static inline void replicas_set_dev(struct bch_replicas_cpu_entry *e,
923 e->devs[dev >> 3] |= 1 << (dev & 7);
926 static inline unsigned replicas_dev_slots(struct bch_replicas_cpu *r)
928 return (r->entry_size -
929 offsetof(struct bch_replicas_cpu_entry, devs)) * 8;
932 int bch2_cpu_replicas_to_text(struct bch_replicas_cpu *r,
933 char *buf, size_t size)
935 char *out = buf, *end = out + size;
936 struct bch_replicas_cpu_entry *e;
940 for_each_cpu_replicas_entry(r, e) {
944 out += scnprintf(out, end - out, " ");
947 out += scnprintf(out, end - out, "%u: [", e->data_type);
949 for (i = 0; i < replicas_dev_slots(r); i++)
950 if (replicas_test_dev(e, i)) {
952 out += scnprintf(out, end - out, " ");
954 out += scnprintf(out, end - out, "%u", i);
956 out += scnprintf(out, end - out, "]");
962 static inline unsigned bkey_to_replicas(struct bkey_s_c_extent e,
963 enum bch_data_type data_type,
964 struct bch_replicas_cpu_entry *r,
967 const struct bch_extent_ptr *ptr;
971 data_type == BCH_DATA_SB ||
972 data_type >= BCH_DATA_NR);
974 memset(r, 0, sizeof(*r));
975 r->data_type = data_type;
979 extent_for_each_ptr(e, ptr)
981 *max_dev = max_t(unsigned, *max_dev, ptr->dev);
982 replicas_set_dev(r, ptr->dev);
988 static inline void devlist_to_replicas(struct bch_devs_list devs,
989 enum bch_data_type data_type,
990 struct bch_replicas_cpu_entry *r,
996 data_type == BCH_DATA_SB ||
997 data_type >= BCH_DATA_NR);
999 memset(r, 0, sizeof(*r));
1000 r->data_type = data_type;
1004 for (i = 0; i < devs.nr; i++) {
1005 *max_dev = max_t(unsigned, *max_dev, devs.devs[i]);
1006 replicas_set_dev(r, devs.devs[i]);
1010 static struct bch_replicas_cpu *
1011 cpu_replicas_add_entry(struct bch_replicas_cpu *old,
1012 struct bch_replicas_cpu_entry new_entry,
1015 struct bch_replicas_cpu *new;
1016 unsigned i, nr, entry_size;
1018 entry_size = offsetof(struct bch_replicas_cpu_entry, devs) +
1019 DIV_ROUND_UP(max_dev + 1, 8);
1020 entry_size = max(entry_size, old->entry_size);
1023 new = kzalloc(sizeof(struct bch_replicas_cpu) +
1024 nr * entry_size, GFP_NOIO);
1029 new->entry_size = entry_size;
1031 for (i = 0; i < old->nr; i++)
1032 memcpy(cpu_replicas_entry(new, i),
1033 cpu_replicas_entry(old, i),
1034 min(new->entry_size, old->entry_size));
1036 memcpy(cpu_replicas_entry(new, old->nr),
1040 bch2_cpu_replicas_sort(new);
1044 static bool replicas_has_entry(struct bch_replicas_cpu *r,
1045 struct bch_replicas_cpu_entry search,
1048 return max_dev < replicas_dev_slots(r) &&
1049 eytzinger0_find(r->entries, r->nr,
1051 memcmp, &search) < r->nr;
1055 static int bch2_check_mark_super_slowpath(struct bch_fs *c,
1056 struct bch_replicas_cpu_entry new_entry,
1059 struct bch_replicas_cpu *old_gc, *new_gc = NULL, *old_r, *new_r = NULL;
1062 mutex_lock(&c->sb_lock);
1064 old_gc = rcu_dereference_protected(c->replicas_gc,
1065 lockdep_is_held(&c->sb_lock));
1066 if (old_gc && !replicas_has_entry(old_gc, new_entry, max_dev)) {
1067 new_gc = cpu_replicas_add_entry(old_gc, new_entry, max_dev);
1072 old_r = rcu_dereference_protected(c->replicas,
1073 lockdep_is_held(&c->sb_lock));
1074 if (!replicas_has_entry(old_r, new_entry, max_dev)) {
1075 new_r = cpu_replicas_add_entry(old_r, new_entry, max_dev);
1079 ret = bch2_cpu_replicas_to_sb_replicas(c, new_r);
1084 /* allocations done, now commit: */
1087 bch2_write_super(c);
1089 /* don't update in memory replicas until changes are persistent */
1092 rcu_assign_pointer(c->replicas_gc, new_gc);
1093 kfree_rcu(old_gc, rcu);
1097 rcu_assign_pointer(c->replicas, new_r);
1098 kfree_rcu(old_r, rcu);
1101 mutex_unlock(&c->sb_lock);
1104 mutex_unlock(&c->sb_lock);
1112 int bch2_check_mark_super(struct bch_fs *c,
1113 enum bch_data_type data_type,
1114 struct bch_devs_list devs)
1116 struct bch_replicas_cpu_entry search;
1117 struct bch_replicas_cpu *r, *gc_r;
1124 devlist_to_replicas(devs, data_type, &search, &max_dev);
1127 r = rcu_dereference(c->replicas);
1128 gc_r = rcu_dereference(c->replicas_gc);
1129 marked = replicas_has_entry(r, search, max_dev) &&
1130 (!likely(gc_r) || replicas_has_entry(gc_r, search, max_dev));
1133 return likely(marked) ? 0
1134 : bch2_check_mark_super_slowpath(c, search, max_dev);
1137 int bch2_replicas_gc_end(struct bch_fs *c, int err)
1139 struct bch_replicas_cpu *new_r, *old_r;
1142 lockdep_assert_held(&c->replicas_gc_lock);
1144 mutex_lock(&c->sb_lock);
1146 new_r = rcu_dereference_protected(c->replicas_gc,
1147 lockdep_is_held(&c->sb_lock));
1150 rcu_assign_pointer(c->replicas_gc, NULL);
1151 kfree_rcu(new_r, rcu);
1155 if (bch2_cpu_replicas_to_sb_replicas(c, new_r)) {
1160 old_r = rcu_dereference_protected(c->replicas,
1161 lockdep_is_held(&c->sb_lock));
1163 rcu_assign_pointer(c->replicas, new_r);
1164 rcu_assign_pointer(c->replicas_gc, NULL);
1165 kfree_rcu(old_r, rcu);
1167 bch2_write_super(c);
1169 mutex_unlock(&c->sb_lock);
1173 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
1175 struct bch_replicas_cpu *dst, *src;
1176 struct bch_replicas_cpu_entry *e;
1178 lockdep_assert_held(&c->replicas_gc_lock);
1180 mutex_lock(&c->sb_lock);
1181 BUG_ON(c->replicas_gc);
1183 src = rcu_dereference_protected(c->replicas,
1184 lockdep_is_held(&c->sb_lock));
1186 dst = kzalloc(sizeof(struct bch_replicas_cpu) +
1187 src->nr * src->entry_size, GFP_NOIO);
1189 mutex_unlock(&c->sb_lock);
1194 dst->entry_size = src->entry_size;
1196 for_each_cpu_replicas_entry(src, e)
1197 if (!((1 << e->data_type) & typemask))
1198 memcpy(cpu_replicas_entry(dst, dst->nr++),
1199 e, dst->entry_size);
1201 bch2_cpu_replicas_sort(dst);
1203 rcu_assign_pointer(c->replicas_gc, dst);
1204 mutex_unlock(&c->sb_lock);
1209 /* Replicas tracking - superblock: */
1211 static void bch2_sb_replicas_nr_entries(struct bch_sb_field_replicas *r,
1216 struct bch_replicas_entry *i;
1220 *bytes = sizeof(*r);
1226 for_each_replicas_entry(r, i) {
1227 for (j = 0; j < i->nr; j++)
1228 *max_dev = max_t(unsigned, *max_dev, i->devs[j]);
1232 *bytes = (void *) i - (void *) r;
1235 static struct bch_replicas_cpu *
1236 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r)
1238 struct bch_replicas_cpu *cpu_r;
1239 unsigned i, nr, bytes, max_dev, entry_size;
1241 bch2_sb_replicas_nr_entries(sb_r, &nr, &bytes, &max_dev);
1243 entry_size = offsetof(struct bch_replicas_cpu_entry, devs) +
1244 DIV_ROUND_UP(max_dev + 1, 8);
1246 cpu_r = kzalloc(sizeof(struct bch_replicas_cpu) +
1247 nr * entry_size, GFP_NOIO);
1252 cpu_r->entry_size = entry_size;
1255 struct bch_replicas_cpu_entry *dst =
1256 cpu_replicas_entry(cpu_r, 0);
1257 struct bch_replicas_entry *src = sb_r->entries;
1259 while (dst < cpu_replicas_entry(cpu_r, nr)) {
1260 dst->data_type = src->data_type;
1261 for (i = 0; i < src->nr; i++)
1262 replicas_set_dev(dst, src->devs[i]);
1264 src = replicas_entry_next(src);
1265 dst = (void *) dst + entry_size;
1269 bch2_cpu_replicas_sort(cpu_r);
1273 static int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
1275 struct bch_sb_field_replicas *sb_r;
1276 struct bch_replicas_cpu *cpu_r, *old_r;
1278 sb_r = bch2_sb_get_replicas(c->disk_sb);
1279 cpu_r = __bch2_sb_replicas_to_cpu_replicas(sb_r);
1283 old_r = rcu_dereference_check(c->replicas, lockdep_is_held(&c->sb_lock));
1284 rcu_assign_pointer(c->replicas, cpu_r);
1286 kfree_rcu(old_r, rcu);
1291 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
1292 struct bch_replicas_cpu *r)
1294 struct bch_sb_field_replicas *sb_r;
1295 struct bch_replicas_entry *sb_e;
1296 struct bch_replicas_cpu_entry *e;
1299 bytes = sizeof(struct bch_sb_field_replicas);
1301 for_each_cpu_replicas_entry(r, e) {
1302 bytes += sizeof(struct bch_replicas_entry);
1303 for (i = 0; i < r->entry_size - 1; i++)
1304 bytes += hweight8(e->devs[i]);
1307 sb_r = bch2_fs_sb_resize_replicas(c,
1308 DIV_ROUND_UP(sizeof(*sb_r) + bytes, sizeof(u64)));
1312 memset(&sb_r->entries, 0,
1313 vstruct_end(&sb_r->field) -
1314 (void *) &sb_r->entries);
1316 sb_e = sb_r->entries;
1317 for_each_cpu_replicas_entry(r, e) {
1318 sb_e->data_type = e->data_type;
1320 for (i = 0; i < replicas_dev_slots(r); i++)
1321 if (replicas_test_dev(e, i))
1322 sb_e->devs[sb_e->nr++] = i;
1324 sb_e = replicas_entry_next(sb_e);
1326 BUG_ON((void *) sb_e > vstruct_end(&sb_r->field));
1332 static const char *bch2_sb_validate_replicas(struct bch_sb *sb,
1333 struct bch_sb_field *f)
1335 struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
1336 struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
1337 struct bch_replicas_cpu *cpu_r = NULL;
1338 struct bch_replicas_entry *e;
1342 for_each_replicas_entry(sb_r, e) {
1343 err = "invalid replicas entry: invalid data type";
1344 if (e->data_type >= BCH_DATA_NR)
1347 err = "invalid replicas entry: no devices";
1351 err = "invalid replicas entry: too many devices";
1352 if (e->nr >= BCH_REPLICAS_MAX)
1355 err = "invalid replicas entry: invalid device";
1356 for (i = 0; i < e->nr; i++)
1357 if (!bch2_dev_exists(sb, mi, e->devs[i]))
1361 err = "cannot allocate memory";
1362 cpu_r = __bch2_sb_replicas_to_cpu_replicas(sb_r);
1366 sort_cmp_size(cpu_r->entries,
1371 for (i = 0; i + 1 < cpu_r->nr; i++) {
1372 struct bch_replicas_cpu_entry *l =
1373 cpu_replicas_entry(cpu_r, i);
1374 struct bch_replicas_cpu_entry *r =
1375 cpu_replicas_entry(cpu_r, i + 1);
1377 BUG_ON(memcmp(l, r, cpu_r->entry_size) > 0);
1379 err = "duplicate replicas entry";
1380 if (!memcmp(l, r, cpu_r->entry_size))
1390 int bch2_sb_replicas_to_text(struct bch_sb_field_replicas *r, char *buf, size_t size)
1392 char *out = buf, *end = out + size;
1393 struct bch_replicas_entry *e;
1398 out += scnprintf(out, end - out, "(no replicas section found)");
1402 for_each_replicas_entry(r, e) {
1404 out += scnprintf(out, end - out, " ");
1407 out += scnprintf(out, end - out, "%u: [", e->data_type);
1409 for (i = 0; i < e->nr; i++)
1410 out += scnprintf(out, end - out,
1411 i ? " %u" : "%u", e->devs[i]);
1412 out += scnprintf(out, end - out, "]");
1418 /* Query replicas: */
1420 bool bch2_sb_has_replicas(struct bch_fs *c,
1421 enum bch_data_type data_type,
1422 struct bch_devs_list devs)
1424 struct bch_replicas_cpu_entry search;
1431 devlist_to_replicas(devs, data_type, &search, &max_dev);
1434 ret = replicas_has_entry(rcu_dereference(c->replicas),
1441 struct replicas_status __bch2_replicas_status(struct bch_fs *c,
1442 struct bch_devs_mask online_devs)
1444 struct bch_sb_field_members *mi;
1445 struct bch_replicas_cpu_entry *e;
1446 struct bch_replicas_cpu *r;
1447 unsigned i, dev, dev_slots, nr_online, nr_offline;
1448 struct replicas_status ret;
1450 memset(&ret, 0, sizeof(ret));
1452 for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
1453 ret.replicas[i].nr_online = UINT_MAX;
1455 mi = bch2_sb_get_members(c->disk_sb);
1458 r = rcu_dereference(c->replicas);
1459 dev_slots = replicas_dev_slots(r);
1461 for_each_cpu_replicas_entry(r, e) {
1462 if (e->data_type >= ARRAY_SIZE(ret.replicas))
1463 panic("e %p data_type %u\n", e, e->data_type);
1465 nr_online = nr_offline = 0;
1467 for (dev = 0; dev < dev_slots; dev++) {
1468 if (!replicas_test_dev(e, dev))
1471 BUG_ON(!bch2_dev_exists(c->disk_sb, mi, dev));
1473 if (test_bit(dev, online_devs.d))
1479 ret.replicas[e->data_type].nr_online =
1480 min(ret.replicas[e->data_type].nr_online,
1483 ret.replicas[e->data_type].nr_offline =
1484 max(ret.replicas[e->data_type].nr_offline,
1493 struct replicas_status bch2_replicas_status(struct bch_fs *c)
1495 return __bch2_replicas_status(c, bch2_online_devs(c));
1498 bool bch2_have_enough_devs(struct bch_fs *c,
1499 struct replicas_status s,
1502 if ((s.replicas[BCH_DATA_JOURNAL].nr_offline ||
1503 s.replicas[BCH_DATA_BTREE].nr_offline) &&
1504 !(flags & BCH_FORCE_IF_METADATA_DEGRADED))
1507 if ((!s.replicas[BCH_DATA_JOURNAL].nr_online ||
1508 !s.replicas[BCH_DATA_BTREE].nr_online) &&
1509 !(flags & BCH_FORCE_IF_METADATA_LOST))
1512 if (s.replicas[BCH_DATA_USER].nr_offline &&
1513 !(flags & BCH_FORCE_IF_DATA_DEGRADED))
1516 if (!s.replicas[BCH_DATA_USER].nr_online &&
1517 !(flags & BCH_FORCE_IF_DATA_LOST))
1523 unsigned bch2_replicas_online(struct bch_fs *c, bool meta)
1525 struct replicas_status s = bch2_replicas_status(c);
1528 ? min(s.replicas[BCH_DATA_JOURNAL].nr_online,
1529 s.replicas[BCH_DATA_BTREE].nr_online)
1530 : s.replicas[BCH_DATA_USER].nr_online;
1533 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
1535 struct bch_replicas_cpu_entry *e;
1536 struct bch_replicas_cpu *r;
1540 r = rcu_dereference(c->replicas);
1542 if (ca->dev_idx >= replicas_dev_slots(r))
1545 for_each_cpu_replicas_entry(r, e)
1546 if (replicas_test_dev(e, ca->dev_idx))
1547 ret |= 1 << e->data_type;
1556 static const char *bch2_sb_validate_quota(struct bch_sb *sb,
1557 struct bch_sb_field *f)
1559 struct bch_sb_field_quota *q = field_to_type(f, quota);
1561 if (vstruct_bytes(&q->field) != sizeof(*q))
1562 return "invalid field quota: wrong size";
1570 static size_t trim_nulls(const char *str, size_t len)
1572 while (len && !str[len - 1])
1578 static const char *bch2_sb_validate_disk_groups(struct bch_sb *sb,
1579 struct bch_sb_field *f)
1581 struct bch_sb_field_disk_groups *groups =
1582 field_to_type(f, disk_groups);
1583 struct bch_sb_field_members *mi;
1584 struct bch_member *m;
1585 struct bch_disk_group *g;
1588 mi = bch2_sb_get_members(sb);
1589 groups = bch2_sb_get_disk_groups(sb);
1590 nr_groups = disk_groups_nr(groups);
1592 for (m = mi->members;
1593 m < mi->members + sb->nr_devices;
1595 if (!BCH_MEMBER_GROUP(m))
1598 if (BCH_MEMBER_GROUP(m) >= nr_groups)
1599 return "disk has invalid group";
1601 g = &groups->entries[BCH_MEMBER_GROUP(m)];
1602 if (BCH_GROUP_DELETED(g))
1603 return "disk has invalid group";
1610 labels = kcalloc(nr_groups, sizeof(char *), GFP_KERNEL);
1612 return "cannot allocate memory";
1614 for (g = groups->groups;
1615 g < groups->groups + nr_groups;
1623 static int bch2_sb_disk_groups_to_cpu(struct bch_fs *c)
1625 struct bch_sb_field_members *mi;
1626 struct bch_sb_field_disk_groups *groups;
1627 struct bch_disk_groups_cpu *cpu_g, *old_g;
1628 unsigned i, nr_groups;
1630 lockdep_assert_held(&c->sb_lock);
1632 mi = bch2_sb_get_members(c->disk_sb);
1633 groups = bch2_sb_get_disk_groups(c->disk_sb);
1634 nr_groups = disk_groups_nr(groups);
1639 cpu_g = kzalloc(sizeof(*cpu_g) +
1640 sizeof(cpu_g->entries[0]) * nr_groups, GFP_KERNEL);
1644 cpu_g->nr = nr_groups;
1646 for (i = 0; i < nr_groups; i++) {
1647 struct bch_disk_group *src = &groups->entries[i];
1648 struct bch_disk_group_cpu *dst = &cpu_g->entries[i];
1650 dst->deleted = BCH_GROUP_DELETED(src);
1653 for (i = 0; i < c->disk_sb->nr_devices; i++) {
1654 struct bch_member *m = mi->members + i;
1655 struct bch_disk_group_cpu *dst =
1656 &cpu_g->entries[BCH_MEMBER_GROUP(m)];
1658 if (!bch2_member_exists(m))
1661 __set_bit(i, dst->devs.d);
1664 old_g = c->disk_groups;
1665 rcu_assign_pointer(c->disk_groups, cpu_g);
1667 kfree_rcu(old_g, rcu);
1672 const struct bch_devs_mask *bch2_target_to_mask(struct bch_fs *c, unsigned target)
1674 struct target t = target_decode(target);
1678 BUG_ON(t.dev >= c->sb.nr_devices && !c->devs[t.dev]);
1679 return &c->devs[t.dev]->self;
1680 case TARGET_GROUP: {
1681 struct bch_disk_groups_cpu *g =
1682 rcu_dereference(c->disk_groups);
1684 /* XXX: what to do here? */
1685 BUG_ON(t.group >= g->nr || g->entries[t.group].deleted);
1686 return &g->entries[t.group].devs;