1 // SPDX-License-Identifier: GPL-2.0
6 #include "disk_groups.h"
11 #include "journal_seq_blacklist.h"
18 #include <linux/backing-dev.h>
19 #include <linux/sort.h>
21 const char * const bch2_sb_fields[] = {
22 #define x(name, nr) #name,
28 static const char *bch2_sb_field_validate(struct bch_sb *,
29 struct bch_sb_field *);
31 struct bch_sb_field *bch2_sb_field_get(struct bch_sb *sb,
32 enum bch_sb_field_type type)
34 struct bch_sb_field *f;
36 /* XXX: need locking around superblock to access optional fields */
38 vstruct_for_each(sb, f)
39 if (le32_to_cpu(f->type) == type)
44 static struct bch_sb_field *__bch2_sb_field_resize(struct bch_sb_handle *sb,
45 struct bch_sb_field *f,
48 unsigned old_u64s = f ? le32_to_cpu(f->u64s) : 0;
49 unsigned sb_u64s = le32_to_cpu(sb->sb->u64s) + u64s - old_u64s;
51 BUG_ON(get_order(__vstruct_bytes(struct bch_sb, sb_u64s)) >
57 f = vstruct_last(sb->sb);
58 memset(f, 0, sizeof(u64) * u64s);
59 f->u64s = cpu_to_le32(u64s);
67 f->u64s = cpu_to_le32(u64s);
73 memmove(dst, src, vstruct_end(sb->sb) - src);
76 memset(src, 0, dst - src);
79 sb->sb->u64s = cpu_to_le32(sb_u64s);
81 return u64s ? f : NULL;
84 void bch2_sb_field_delete(struct bch_sb_handle *sb,
85 enum bch_sb_field_type type)
87 struct bch_sb_field *f = bch2_sb_field_get(sb->sb, type);
90 __bch2_sb_field_resize(sb, f, 0);
93 /* Superblock realloc/free: */
95 void bch2_free_super(struct bch_sb_handle *sb)
99 if (!IS_ERR_OR_NULL(sb->bdev))
100 blkdev_put(sb->bdev, sb->mode);
102 free_pages((unsigned long) sb->sb, sb->page_order);
103 memset(sb, 0, sizeof(*sb));
106 int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s)
108 size_t new_bytes = __vstruct_bytes(struct bch_sb, u64s);
109 unsigned order = get_order(new_bytes);
110 struct bch_sb *new_sb;
113 if (sb->sb && sb->page_order >= order)
116 if (sb->have_layout) {
117 u64 max_bytes = 512 << sb->sb->layout.sb_max_size_bits;
119 if (new_bytes > max_bytes) {
120 char buf[BDEVNAME_SIZE];
122 pr_err("%s: superblock too big: want %zu but have %llu",
123 bdevname(sb->bdev, buf), new_bytes, max_bytes);
128 if (sb->page_order >= order && sb->sb)
131 if (dynamic_fault("bcachefs:add:super_realloc"))
135 bio = bio_kmalloc(GFP_KERNEL, 1 << order);
144 new_sb = (void *) __get_free_pages(GFP_NOFS|__GFP_ZERO, order);
149 memcpy(new_sb, sb->sb, PAGE_SIZE << sb->page_order);
151 free_pages((unsigned long) sb->sb, sb->page_order);
154 sb->page_order = order;
159 struct bch_sb_field *bch2_sb_field_resize(struct bch_sb_handle *sb,
160 enum bch_sb_field_type type,
163 struct bch_sb_field *f = bch2_sb_field_get(sb->sb, type);
164 ssize_t old_u64s = f ? le32_to_cpu(f->u64s) : 0;
165 ssize_t d = -old_u64s + u64s;
167 if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d))
171 struct bch_fs *c = container_of(sb, struct bch_fs, disk_sb);
175 lockdep_assert_held(&c->sb_lock);
177 /* XXX: we're not checking that offline device have enough space */
179 for_each_online_member(ca, c, i) {
180 struct bch_sb_handle *sb = &ca->disk_sb;
182 if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d)) {
183 percpu_ref_put(&ca->ref);
189 f = bch2_sb_field_get(sb->sb, type);
190 f = __bch2_sb_field_resize(sb, f, u64s);
192 f->type = cpu_to_le32(type);
196 /* Superblock validate: */
198 static inline void __bch2_sb_layout_size_assert(void)
200 BUILD_BUG_ON(sizeof(struct bch_sb_layout) != 512);
203 static const char *validate_sb_layout(struct bch_sb_layout *layout)
205 u64 offset, prev_offset, max_sectors;
208 if (uuid_le_cmp(layout->magic, BCACHE_MAGIC))
209 return "Not a bcachefs superblock layout";
211 if (layout->layout_type != 0)
212 return "Invalid superblock layout type";
214 if (!layout->nr_superblocks)
215 return "Invalid superblock layout: no superblocks";
217 if (layout->nr_superblocks > ARRAY_SIZE(layout->sb_offset))
218 return "Invalid superblock layout: too many superblocks";
220 max_sectors = 1 << layout->sb_max_size_bits;
222 prev_offset = le64_to_cpu(layout->sb_offset[0]);
224 for (i = 1; i < layout->nr_superblocks; i++) {
225 offset = le64_to_cpu(layout->sb_offset[i]);
227 if (offset < prev_offset + max_sectors)
228 return "Invalid superblock layout: superblocks overlap";
229 prev_offset = offset;
235 const char *bch2_sb_validate(struct bch_sb_handle *disk_sb)
237 struct bch_sb *sb = disk_sb->sb;
238 struct bch_sb_field *f;
239 struct bch_sb_field_members *mi;
241 u32 version, version_min;
244 version = le16_to_cpu(sb->version);
245 version_min = version >= bcachefs_metadata_version_new_versioning
246 ? le16_to_cpu(sb->version_min)
249 if (version >= bcachefs_metadata_version_max ||
250 version_min < bcachefs_metadata_version_min)
251 return "Unsupported superblock version";
253 if (version_min > version)
254 return "Bad minimum version";
256 if (sb->features[1] ||
257 (le64_to_cpu(sb->features[0]) & (~0ULL << BCH_FEATURE_NR)))
258 return "Filesystem has incompatible features";
260 block_size = le16_to_cpu(sb->block_size);
262 if (!is_power_of_2(block_size) ||
263 block_size > PAGE_SECTORS)
264 return "Bad block size";
266 if (bch2_is_zero(sb->user_uuid.b, sizeof(uuid_le)))
267 return "Bad user UUID";
269 if (bch2_is_zero(sb->uuid.b, sizeof(uuid_le)))
270 return "Bad internal UUID";
272 if (!sb->nr_devices ||
273 sb->nr_devices <= sb->dev_idx ||
274 sb->nr_devices > BCH_SB_MEMBERS_MAX)
275 return "Bad number of member devices";
277 if (!BCH_SB_META_REPLICAS_WANT(sb) ||
278 BCH_SB_META_REPLICAS_WANT(sb) >= BCH_REPLICAS_MAX)
279 return "Invalid number of metadata replicas";
281 if (!BCH_SB_META_REPLICAS_REQ(sb) ||
282 BCH_SB_META_REPLICAS_REQ(sb) >= BCH_REPLICAS_MAX)
283 return "Invalid number of metadata replicas";
285 if (!BCH_SB_DATA_REPLICAS_WANT(sb) ||
286 BCH_SB_DATA_REPLICAS_WANT(sb) >= BCH_REPLICAS_MAX)
287 return "Invalid number of data replicas";
289 if (!BCH_SB_DATA_REPLICAS_REQ(sb) ||
290 BCH_SB_DATA_REPLICAS_REQ(sb) >= BCH_REPLICAS_MAX)
291 return "Invalid number of data replicas";
293 if (BCH_SB_META_CSUM_TYPE(sb) >= BCH_CSUM_OPT_NR)
294 return "Invalid metadata checksum type";
296 if (BCH_SB_DATA_CSUM_TYPE(sb) >= BCH_CSUM_OPT_NR)
297 return "Invalid metadata checksum type";
299 if (BCH_SB_COMPRESSION_TYPE(sb) >= BCH_COMPRESSION_OPT_NR)
300 return "Invalid compression type";
302 if (!BCH_SB_BTREE_NODE_SIZE(sb))
303 return "Btree node size not set";
305 if (!is_power_of_2(BCH_SB_BTREE_NODE_SIZE(sb)))
306 return "Btree node size not a power of two";
308 if (BCH_SB_GC_RESERVE(sb) < 5)
309 return "gc reserve percentage too small";
311 if (!sb->time_precision ||
312 le32_to_cpu(sb->time_precision) > NSEC_PER_SEC)
313 return "invalid time precision";
315 /* validate layout */
316 err = validate_sb_layout(&sb->layout);
320 vstruct_for_each(sb, f) {
322 return "Invalid superblock: invalid optional field";
324 if (vstruct_next(f) > vstruct_last(sb))
325 return "Invalid superblock: invalid optional field";
328 /* members must be validated first: */
329 mi = bch2_sb_get_members(sb);
331 return "Invalid superblock: member info area missing";
333 err = bch2_sb_field_validate(sb, &mi->field);
337 vstruct_for_each(sb, f) {
338 if (le32_to_cpu(f->type) == BCH_SB_FIELD_members)
341 err = bch2_sb_field_validate(sb, f);
351 static void bch2_sb_update(struct bch_fs *c)
353 struct bch_sb *src = c->disk_sb.sb;
354 struct bch_sb_field_members *mi = bch2_sb_get_members(src);
358 lockdep_assert_held(&c->sb_lock);
360 c->sb.uuid = src->uuid;
361 c->sb.user_uuid = src->user_uuid;
362 c->sb.version = le16_to_cpu(src->version);
363 c->sb.nr_devices = src->nr_devices;
364 c->sb.clean = BCH_SB_CLEAN(src);
365 c->sb.encryption_type = BCH_SB_ENCRYPTION_TYPE(src);
366 c->sb.encoded_extent_max= 1 << BCH_SB_ENCODED_EXTENT_MAX_BITS(src);
367 c->sb.time_base_lo = le64_to_cpu(src->time_base_lo);
368 c->sb.time_base_hi = le32_to_cpu(src->time_base_hi);
369 c->sb.time_precision = le32_to_cpu(src->time_precision);
370 c->sb.features = le64_to_cpu(src->features[0]);
371 c->sb.compat = le64_to_cpu(src->compat[0]);
373 for_each_member_device(ca, c, i)
374 ca->mi = bch2_mi_to_cpu(mi->members + i);
377 /* doesn't copy member info */
378 static void __copy_super(struct bch_sb_handle *dst_handle, struct bch_sb *src)
380 struct bch_sb_field *src_f, *dst_f;
381 struct bch_sb *dst = dst_handle->sb;
384 dst->version = src->version;
385 dst->version_min = src->version_min;
387 dst->uuid = src->uuid;
388 dst->user_uuid = src->user_uuid;
389 memcpy(dst->label, src->label, sizeof(dst->label));
391 dst->block_size = src->block_size;
392 dst->nr_devices = src->nr_devices;
394 dst->time_base_lo = src->time_base_lo;
395 dst->time_base_hi = src->time_base_hi;
396 dst->time_precision = src->time_precision;
398 memcpy(dst->flags, src->flags, sizeof(dst->flags));
399 memcpy(dst->features, src->features, sizeof(dst->features));
400 memcpy(dst->compat, src->compat, sizeof(dst->compat));
402 for (i = 0; i < BCH_SB_FIELD_NR; i++) {
403 if (i == BCH_SB_FIELD_journal)
406 src_f = bch2_sb_field_get(src, i);
407 dst_f = bch2_sb_field_get(dst, i);
408 dst_f = __bch2_sb_field_resize(dst_handle, dst_f,
409 src_f ? le32_to_cpu(src_f->u64s) : 0);
412 memcpy(dst_f, src_f, vstruct_bytes(src_f));
416 int bch2_sb_to_fs(struct bch_fs *c, struct bch_sb *src)
418 struct bch_sb_field_journal *journal_buckets =
419 bch2_sb_get_journal(src);
420 unsigned journal_u64s = journal_buckets
421 ? le32_to_cpu(journal_buckets->field.u64s)
425 lockdep_assert_held(&c->sb_lock);
427 ret = bch2_sb_realloc(&c->disk_sb,
428 le32_to_cpu(src->u64s) - journal_u64s);
432 __copy_super(&c->disk_sb, src);
434 ret = bch2_sb_replicas_to_cpu_replicas(c);
438 ret = bch2_sb_disk_groups_to_cpu(c);
446 int bch2_sb_from_fs(struct bch_fs *c, struct bch_dev *ca)
448 struct bch_sb *src = c->disk_sb.sb, *dst = ca->disk_sb.sb;
449 struct bch_sb_field_journal *journal_buckets =
450 bch2_sb_get_journal(dst);
451 unsigned journal_u64s = journal_buckets
452 ? le32_to_cpu(journal_buckets->field.u64s)
454 unsigned u64s = le32_to_cpu(src->u64s) + journal_u64s;
457 ret = bch2_sb_realloc(&ca->disk_sb, u64s);
461 __copy_super(&ca->disk_sb, src);
465 /* read superblock: */
467 static const char *read_one_super(struct bch_sb_handle *sb, u64 offset)
469 struct bch_csum csum;
473 bio_set_dev(sb->bio, sb->bdev);
474 sb->bio->bi_iter.bi_sector = offset;
475 bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
476 bch2_bio_map(sb->bio, sb->sb, PAGE_SIZE << sb->page_order);
478 if (submit_bio_wait(sb->bio))
481 if (uuid_le_cmp(sb->sb->magic, BCACHE_MAGIC))
482 return "Not a bcachefs superblock";
484 if (le16_to_cpu(sb->sb->version) < bcachefs_metadata_version_min ||
485 le16_to_cpu(sb->sb->version) >= bcachefs_metadata_version_max)
486 return "Unsupported superblock version";
488 bytes = vstruct_bytes(sb->sb);
490 if (bytes > 512 << sb->sb->layout.sb_max_size_bits)
491 return "Bad superblock: too big";
493 if (get_order(bytes) > sb->page_order) {
494 if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s)))
495 return "cannot allocate memory";
499 if (BCH_SB_CSUM_TYPE(sb->sb) >= BCH_CSUM_NR)
500 return "unknown csum type";
502 /* XXX: verify MACs */
503 csum = csum_vstruct(NULL, BCH_SB_CSUM_TYPE(sb->sb),
504 null_nonce(), sb->sb);
506 if (bch2_crc_cmp(csum, sb->sb->csum))
507 return "bad checksum reading superblock";
509 sb->seq = le64_to_cpu(sb->sb->seq);
514 int bch2_read_super(const char *path, struct bch_opts *opts,
515 struct bch_sb_handle *sb)
517 u64 offset = opt_get(*opts, sb);
518 struct bch_sb_layout layout;
523 pr_verbose_init(*opts, "");
525 memset(sb, 0, sizeof(*sb));
526 sb->mode = FMODE_READ;
529 if (!opt_get(*opts, noexcl))
530 sb->mode |= FMODE_EXCL;
532 if (!opt_get(*opts, nochanges))
533 sb->mode |= FMODE_WRITE;
535 sb->bdev = blkdev_get_by_path(path, sb->mode, sb);
536 if (IS_ERR(sb->bdev) &&
537 PTR_ERR(sb->bdev) == -EACCES &&
538 opt_get(*opts, read_only)) {
539 sb->mode &= ~FMODE_WRITE;
541 sb->bdev = blkdev_get_by_path(path, sb->mode, sb);
542 if (!IS_ERR(sb->bdev))
543 opt_set(*opts, nochanges, true);
546 if (IS_ERR(sb->bdev)) {
547 ret = PTR_ERR(sb->bdev);
551 err = "cannot allocate memory";
552 ret = bch2_sb_realloc(sb, 0);
557 err = "dynamic fault";
558 if (bch2_fs_init_fault("read_super"))
562 err = read_one_super(sb, offset);
566 if (opt_defined(*opts, sb))
569 pr_err("error reading default superblock: %s", err);
572 * Error reading primary superblock - read location of backup
576 bio_set_dev(sb->bio, sb->bdev);
577 sb->bio->bi_iter.bi_sector = BCH_SB_LAYOUT_SECTOR;
578 bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
580 * use sb buffer to read layout, since sb buffer is page aligned but
583 bch2_bio_map(sb->bio, sb->sb, sizeof(struct bch_sb_layout));
586 if (submit_bio_wait(sb->bio))
589 memcpy(&layout, sb->sb, sizeof(layout));
590 err = validate_sb_layout(&layout);
594 for (i = layout.sb_offset;
595 i < layout.sb_offset + layout.nr_superblocks; i++) {
596 offset = le64_to_cpu(*i);
598 if (offset == opt_get(*opts, sb))
601 err = read_one_super(sb, offset);
610 err = "Superblock block size smaller than device block size";
612 if (le16_to_cpu(sb->sb->block_size) << 9 <
613 bdev_logical_block_size(sb->bdev))
616 if (sb->mode & FMODE_WRITE)
617 bdev_get_queue(sb->bdev)->backing_dev_info->capabilities
618 |= BDI_CAP_STABLE_WRITES;
620 sb->have_layout = true;
622 pr_verbose_init(*opts, "ret %i", ret);
626 pr_err("error reading superblock: %s", err);
630 /* write superblock: */
632 static void write_super_endio(struct bio *bio)
634 struct bch_dev *ca = bio->bi_private;
636 /* XXX: return errors directly */
638 if (bch2_dev_io_err_on(bio->bi_status, ca, "superblock write"))
639 ca->sb_write_error = 1;
641 closure_put(&ca->fs->sb_write);
642 percpu_ref_put(&ca->io_ref);
645 static void read_back_super(struct bch_fs *c, struct bch_dev *ca)
647 struct bch_sb *sb = ca->disk_sb.sb;
648 struct bio *bio = ca->disk_sb.bio;
651 bio_set_dev(bio, ca->disk_sb.bdev);
652 bio->bi_iter.bi_sector = le64_to_cpu(sb->layout.sb_offset[0]);
653 bio->bi_end_io = write_super_endio;
654 bio->bi_private = ca;
655 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC|REQ_META);
656 bch2_bio_map(bio, ca->sb_read_scratch, PAGE_SIZE);
658 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_SB],
661 percpu_ref_get(&ca->io_ref);
662 closure_bio_submit(bio, &c->sb_write);
665 static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
667 struct bch_sb *sb = ca->disk_sb.sb;
668 struct bio *bio = ca->disk_sb.bio;
670 sb->offset = sb->layout.sb_offset[idx];
672 SET_BCH_SB_CSUM_TYPE(sb, c->opts.metadata_checksum);
673 sb->csum = csum_vstruct(c, BCH_SB_CSUM_TYPE(sb),
677 bio_set_dev(bio, ca->disk_sb.bdev);
678 bio->bi_iter.bi_sector = le64_to_cpu(sb->offset);
679 bio->bi_end_io = write_super_endio;
680 bio->bi_private = ca;
681 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
682 bch2_bio_map(bio, sb,
683 roundup((size_t) vstruct_bytes(sb),
684 bdev_logical_block_size(ca->disk_sb.bdev)));
686 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_SB],
689 percpu_ref_get(&ca->io_ref);
690 closure_bio_submit(bio, &c->sb_write);
693 int bch2_write_super(struct bch_fs *c)
695 struct closure *cl = &c->sb_write;
697 unsigned i, sb = 0, nr_wrote;
699 struct bch_devs_mask sb_written;
700 bool wrote, can_mount_without_written, can_mount_with_written;
703 lockdep_assert_held(&c->sb_lock);
705 closure_init_stack(cl);
706 memset(&sb_written, 0, sizeof(sb_written));
708 le64_add_cpu(&c->disk_sb.sb->seq, 1);
710 if (test_bit(BCH_FS_ERROR, &c->flags))
711 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 1);
713 for_each_online_member(ca, c, i)
714 bch2_sb_from_fs(c, ca);
716 for_each_online_member(ca, c, i) {
717 err = bch2_sb_validate(&ca->disk_sb);
719 bch2_fs_inconsistent(c, "sb invalid before write: %s", err);
725 if (c->opts.nochanges)
728 for_each_online_member(ca, c, i) {
729 __set_bit(ca->dev_idx, sb_written.d);
730 ca->sb_write_error = 0;
733 for_each_online_member(ca, c, i)
734 read_back_super(c, ca);
737 for_each_online_member(ca, c, i) {
738 if (!ca->sb_write_error &&
740 le64_to_cpu(ca->sb_read_scratch->seq)) {
741 bch2_fs_fatal_error(c,
742 "Superblock modified by another process");
743 percpu_ref_put(&ca->io_ref);
751 for_each_online_member(ca, c, i)
752 if (!ca->sb_write_error &&
753 sb < ca->disk_sb.sb->layout.nr_superblocks) {
754 write_one_super(c, ca, sb);
761 for_each_online_member(ca, c, i) {
762 if (ca->sb_write_error)
763 __clear_bit(ca->dev_idx, sb_written.d);
765 ca->disk_sb.seq = le64_to_cpu(ca->disk_sb.sb->seq);
768 nr_wrote = dev_mask_nr(&sb_written);
770 can_mount_with_written =
771 bch2_have_enough_devs(__bch2_replicas_status(c, sb_written),
772 BCH_FORCE_IF_DEGRADED);
774 for (i = 0; i < ARRAY_SIZE(sb_written.d); i++)
775 sb_written.d[i] = ~sb_written.d[i];
777 can_mount_without_written =
778 bch2_have_enough_devs(__bch2_replicas_status(c, sb_written),
779 BCH_FORCE_IF_DEGRADED);
782 * If we would be able to mount _without_ the devices we successfully
783 * wrote superblocks to, we weren't able to write to enough devices:
785 * Exception: if we can mount without the successes because we haven't
786 * written anything (new filesystem), we continue if we'd be able to
787 * mount with the devices we did successfully write to:
789 if (bch2_fs_fatal_err_on(!nr_wrote ||
790 (can_mount_without_written &&
791 !can_mount_with_written), c,
792 "Unable to write superblock to sufficient devices"))
795 /* Make new options visible after they're persistent: */
800 void __bch2_check_set_feature(struct bch_fs *c, unsigned feat)
802 mutex_lock(&c->sb_lock);
803 if (!(c->sb.features & (1ULL << feat))) {
804 c->disk_sb.sb->features[0] |= cpu_to_le64(1ULL << feat);
808 mutex_unlock(&c->sb_lock);
811 /* BCH_SB_FIELD_journal: */
813 static int u64_cmp(const void *_l, const void *_r)
815 u64 l = *((const u64 *) _l), r = *((const u64 *) _r);
817 return l < r ? -1 : l > r ? 1 : 0;
820 static const char *bch2_sb_validate_journal(struct bch_sb *sb,
821 struct bch_sb_field *f)
823 struct bch_sb_field_journal *journal = field_to_type(f, journal);
824 struct bch_member *m = bch2_sb_get_members(sb)->members + sb->dev_idx;
830 journal = bch2_sb_get_journal(sb);
834 nr = bch2_nr_journal_buckets(journal);
838 b = kmalloc_array(sizeof(u64), nr, GFP_KERNEL);
840 return "cannot allocate memory";
842 for (i = 0; i < nr; i++)
843 b[i] = le64_to_cpu(journal->buckets[i]);
845 sort(b, nr, sizeof(u64), u64_cmp, NULL);
847 err = "journal bucket at sector 0";
851 err = "journal bucket before first bucket";
852 if (m && b[0] < le16_to_cpu(m->first_bucket))
855 err = "journal bucket past end of device";
856 if (m && b[nr - 1] >= le64_to_cpu(m->nbuckets))
859 err = "duplicate journal buckets";
860 for (i = 0; i + 1 < nr; i++)
861 if (b[i] == b[i + 1])
870 static const struct bch_sb_field_ops bch_sb_field_ops_journal = {
871 .validate = bch2_sb_validate_journal,
874 /* BCH_SB_FIELD_members: */
876 static const char *bch2_sb_validate_members(struct bch_sb *sb,
877 struct bch_sb_field *f)
879 struct bch_sb_field_members *mi = field_to_type(f, members);
880 struct bch_member *m;
882 if ((void *) (mi->members + sb->nr_devices) >
883 vstruct_end(&mi->field))
884 return "Invalid superblock: bad member info";
886 for (m = mi->members;
887 m < mi->members + sb->nr_devices;
889 if (!bch2_member_exists(m))
892 if (le64_to_cpu(m->nbuckets) > LONG_MAX)
893 return "Too many buckets";
895 if (le64_to_cpu(m->nbuckets) -
896 le16_to_cpu(m->first_bucket) < BCH_MIN_NR_NBUCKETS)
897 return "Not enough buckets";
899 if (le16_to_cpu(m->bucket_size) <
900 le16_to_cpu(sb->block_size))
901 return "bucket size smaller than block size";
903 if (le16_to_cpu(m->bucket_size) <
904 BCH_SB_BTREE_NODE_SIZE(sb))
905 return "bucket size smaller than btree node size";
911 static const struct bch_sb_field_ops bch_sb_field_ops_members = {
912 .validate = bch2_sb_validate_members,
915 /* BCH_SB_FIELD_crypt: */
917 static const char *bch2_sb_validate_crypt(struct bch_sb *sb,
918 struct bch_sb_field *f)
920 struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
922 if (vstruct_bytes(&crypt->field) != sizeof(*crypt))
923 return "invalid field crypt: wrong size";
925 if (BCH_CRYPT_KDF_TYPE(crypt))
926 return "invalid field crypt: bad kdf type";
931 static const struct bch_sb_field_ops bch_sb_field_ops_crypt = {
932 .validate = bch2_sb_validate_crypt,
935 /* BCH_SB_FIELD_clean: */
937 void bch2_sb_clean_renumber(struct bch_sb_field_clean *clean, int write)
939 struct jset_entry *entry;
941 for (entry = clean->start;
942 entry < (struct jset_entry *) vstruct_end(&clean->field);
943 entry = vstruct_next(entry))
944 bch2_bkey_renumber(BKEY_TYPE_BTREE, bkey_to_packed(entry->start), write);
947 int bch2_fs_mark_dirty(struct bch_fs *c)
952 * Unconditionally write superblock, to verify it hasn't changed before
956 mutex_lock(&c->sb_lock);
957 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
958 c->disk_sb.sb->compat[0] &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_METADATA);
959 c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_extents_above_btree_updates;
960 ret = bch2_write_super(c);
961 mutex_unlock(&c->sb_lock);
967 entry_init_u64s(struct jset_entry *entry, unsigned u64s)
969 memset(entry, 0, u64s * sizeof(u64));
972 * The u64s field counts from the start of data, ignoring the shared
975 entry->u64s = u64s - 1;
979 entry_init_size(struct jset_entry *entry, size_t size)
981 unsigned u64s = DIV_ROUND_UP(size, sizeof(u64));
982 entry_init_u64s(entry, u64s);
986 bch2_journal_super_entries_add_common(struct bch_fs *c,
987 struct jset_entry *entry,
990 struct btree_root *r;
993 mutex_lock(&c->btree_root_lock);
995 for (r = c->btree_roots;
996 r < c->btree_roots + BTREE_ID_NR;
999 entry_init_u64s(entry, r->key.u64s + 1);
1000 entry->btree_id = r - c->btree_roots;
1001 entry->level = r->level;
1002 entry->type = BCH_JSET_ENTRY_btree_root;
1003 bkey_copy(&entry->start[0], &r->key);
1005 entry = vstruct_next(entry);
1007 c->btree_roots_dirty = false;
1009 mutex_unlock(&c->btree_root_lock);
1011 percpu_down_write(&c->mark_lock);
1014 bch2_fs_usage_acc_to_base(c, 0);
1015 bch2_fs_usage_acc_to_base(c, 1);
1017 bch2_fs_usage_acc_to_base(c, journal_seq & 1);
1021 struct jset_entry_usage *u =
1022 container_of(entry, struct jset_entry_usage, entry);
1024 entry_init_size(entry, sizeof(*u));
1025 u->entry.type = BCH_JSET_ENTRY_usage;
1026 u->entry.btree_id = FS_USAGE_INODES;
1027 u->v = cpu_to_le64(c->usage_base->nr_inodes);
1029 entry = vstruct_next(entry);
1033 struct jset_entry_usage *u =
1034 container_of(entry, struct jset_entry_usage, entry);
1036 entry_init_size(entry, sizeof(*u));
1037 u->entry.type = BCH_JSET_ENTRY_usage;
1038 u->entry.btree_id = FS_USAGE_KEY_VERSION;
1039 u->v = cpu_to_le64(atomic64_read(&c->key_version));
1041 entry = vstruct_next(entry);
1044 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
1045 struct jset_entry_usage *u =
1046 container_of(entry, struct jset_entry_usage, entry);
1048 entry_init_size(entry, sizeof(*u));
1049 u->entry.type = BCH_JSET_ENTRY_usage;
1050 u->entry.btree_id = FS_USAGE_RESERVED;
1052 u->v = cpu_to_le64(c->usage_base->persistent_reserved[i]);
1054 entry = vstruct_next(entry);
1057 for (i = 0; i < c->replicas.nr; i++) {
1058 struct bch_replicas_entry *e =
1059 cpu_replicas_entry(&c->replicas, i);
1060 struct jset_entry_data_usage *u =
1061 container_of(entry, struct jset_entry_data_usage, entry);
1063 entry_init_size(entry, sizeof(*u) + e->nr_devs);
1064 u->entry.type = BCH_JSET_ENTRY_data_usage;
1065 u->v = cpu_to_le64(c->usage_base->replicas[i]);
1066 memcpy(&u->r, e, replicas_entry_bytes(e));
1068 entry = vstruct_next(entry);
1071 percpu_up_write(&c->mark_lock);
1076 void bch2_fs_mark_clean(struct bch_fs *c)
1078 struct bch_sb_field_clean *sb_clean;
1079 struct jset_entry *entry;
1082 mutex_lock(&c->sb_lock);
1083 if (BCH_SB_CLEAN(c->disk_sb.sb))
1086 SET_BCH_SB_CLEAN(c->disk_sb.sb, true);
1088 c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_FEAT_ALLOC_INFO;
1089 c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_FEAT_ALLOC_METADATA;
1090 c->disk_sb.sb->features[0] &= ~(1ULL << BCH_FEATURE_extents_above_btree_updates);
1092 u64s = sizeof(*sb_clean) / sizeof(u64) + c->journal.entry_u64s_reserved;
1094 sb_clean = bch2_sb_resize_clean(&c->disk_sb, u64s);
1096 bch_err(c, "error resizing superblock while setting filesystem clean");
1100 sb_clean->flags = 0;
1101 sb_clean->read_clock = cpu_to_le16(c->bucket_clock[READ].hand);
1102 sb_clean->write_clock = cpu_to_le16(c->bucket_clock[WRITE].hand);
1103 sb_clean->journal_seq = cpu_to_le64(journal_cur_seq(&c->journal) - 1);
1105 /* Trying to catch outstanding bug: */
1106 BUG_ON(le64_to_cpu(sb_clean->journal_seq) > S64_MAX);
1108 entry = sb_clean->start;
1109 entry = bch2_journal_super_entries_add_common(c, entry, 0);
1110 BUG_ON((void *) entry > vstruct_end(&sb_clean->field));
1113 vstruct_end(&sb_clean->field) - (void *) entry);
1115 if (le16_to_cpu(c->disk_sb.sb->version) <
1116 bcachefs_metadata_version_bkey_renumber)
1117 bch2_sb_clean_renumber(sb_clean, WRITE);
1119 bch2_write_super(c);
1121 mutex_unlock(&c->sb_lock);
1124 static const char *bch2_sb_validate_clean(struct bch_sb *sb,
1125 struct bch_sb_field *f)
1127 struct bch_sb_field_clean *clean = field_to_type(f, clean);
1129 if (vstruct_bytes(&clean->field) < sizeof(*clean))
1130 return "invalid field crypt: wrong size";
1135 static const struct bch_sb_field_ops bch_sb_field_ops_clean = {
1136 .validate = bch2_sb_validate_clean,
1139 static const struct bch_sb_field_ops *bch2_sb_field_ops[] = {
1141 [BCH_SB_FIELD_##f] = &bch_sb_field_ops_##f,
1146 static const char *bch2_sb_field_validate(struct bch_sb *sb,
1147 struct bch_sb_field *f)
1149 unsigned type = le32_to_cpu(f->type);
1151 return type < BCH_SB_FIELD_NR
1152 ? bch2_sb_field_ops[type]->validate(sb, f)
1156 void bch2_sb_field_to_text(struct printbuf *out, struct bch_sb *sb,
1157 struct bch_sb_field *f)
1159 unsigned type = le32_to_cpu(f->type);
1160 const struct bch_sb_field_ops *ops = type < BCH_SB_FIELD_NR
1161 ? bch2_sb_field_ops[type] : NULL;
1164 pr_buf(out, "%s", bch2_sb_fields[type]);
1166 pr_buf(out, "(unknown field %u)", type);
1168 pr_buf(out, " (size %llu):", vstruct_bytes(f));
1170 if (ops && ops->to_text)
1171 bch2_sb_field_ops[type]->to_text(out, sb, f);