4 #include "disk_groups.h"
15 #include <linux/backing-dev.h>
16 #include <linux/sort.h>
18 const char * const bch2_sb_fields[] = {
19 #define x(name, nr) #name,
25 static const char *bch2_sb_field_validate(struct bch_sb *,
26 struct bch_sb_field *);
28 struct bch_sb_field *bch2_sb_field_get(struct bch_sb *sb,
29 enum bch_sb_field_type type)
31 struct bch_sb_field *f;
33 /* XXX: need locking around superblock to access optional fields */
35 vstruct_for_each(sb, f)
36 if (le32_to_cpu(f->type) == type)
41 static struct bch_sb_field *__bch2_sb_field_resize(struct bch_sb_handle *sb,
42 struct bch_sb_field *f,
45 unsigned old_u64s = f ? le32_to_cpu(f->u64s) : 0;
46 unsigned sb_u64s = le32_to_cpu(sb->sb->u64s) + u64s - old_u64s;
48 BUG_ON(get_order(__vstruct_bytes(struct bch_sb, sb_u64s)) >
52 f = vstruct_last(sb->sb);
53 memset(f, 0, sizeof(u64) * u64s);
54 f->u64s = cpu_to_le32(u64s);
62 f->u64s = cpu_to_le32(u64s);
68 memmove(dst, src, vstruct_end(sb->sb) - src);
71 memset(src, 0, dst - src);
74 sb->sb->u64s = cpu_to_le32(sb_u64s);
76 return u64s ? f : NULL;
79 void bch2_sb_field_delete(struct bch_sb_handle *sb,
80 enum bch_sb_field_type type)
82 struct bch_sb_field *f = bch2_sb_field_get(sb->sb, type);
85 __bch2_sb_field_resize(sb, f, 0);
88 /* Superblock realloc/free: */
90 void bch2_free_super(struct bch_sb_handle *sb)
94 if (!IS_ERR_OR_NULL(sb->bdev))
95 blkdev_put(sb->bdev, sb->mode);
97 free_pages((unsigned long) sb->sb, sb->page_order);
98 memset(sb, 0, sizeof(*sb));
101 int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s)
103 size_t new_bytes = __vstruct_bytes(struct bch_sb, u64s);
104 unsigned order = get_order(new_bytes);
105 struct bch_sb *new_sb;
108 if (sb->sb && sb->page_order >= order)
111 if (sb->have_layout) {
112 u64 max_bytes = 512 << sb->sb->layout.sb_max_size_bits;
114 if (new_bytes > max_bytes) {
115 char buf[BDEVNAME_SIZE];
117 pr_err("%s: superblock too big: want %zu but have %llu",
118 bdevname(sb->bdev, buf), new_bytes, max_bytes);
123 if (sb->page_order >= order && sb->sb)
126 if (dynamic_fault("bcachefs:add:super_realloc"))
130 bio = bio_kmalloc(GFP_KERNEL, 1 << order);
139 new_sb = (void *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
144 memcpy(new_sb, sb->sb, PAGE_SIZE << sb->page_order);
146 free_pages((unsigned long) sb->sb, sb->page_order);
149 sb->page_order = order;
154 struct bch_sb_field *bch2_sb_field_resize(struct bch_sb_handle *sb,
155 enum bch_sb_field_type type,
158 struct bch_sb_field *f = bch2_sb_field_get(sb->sb, type);
159 ssize_t old_u64s = f ? le32_to_cpu(f->u64s) : 0;
160 ssize_t d = -old_u64s + u64s;
162 if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d))
166 struct bch_fs *c = container_of(sb, struct bch_fs, disk_sb);
170 lockdep_assert_held(&c->sb_lock);
172 /* XXX: we're not checking that offline device have enough space */
174 for_each_online_member(ca, c, i) {
175 struct bch_sb_handle *sb = &ca->disk_sb;
177 if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d)) {
178 percpu_ref_put(&ca->ref);
184 f = bch2_sb_field_get(sb->sb, type);
185 f = __bch2_sb_field_resize(sb, f, u64s);
187 f->type = cpu_to_le32(type);
191 /* Superblock validate: */
193 static inline void __bch2_sb_layout_size_assert(void)
195 BUILD_BUG_ON(sizeof(struct bch_sb_layout) != 512);
198 static const char *validate_sb_layout(struct bch_sb_layout *layout)
200 u64 offset, prev_offset, max_sectors;
203 if (uuid_le_cmp(layout->magic, BCACHE_MAGIC))
204 return "Not a bcachefs superblock layout";
206 if (layout->layout_type != 0)
207 return "Invalid superblock layout type";
209 if (!layout->nr_superblocks)
210 return "Invalid superblock layout: no superblocks";
212 if (layout->nr_superblocks > ARRAY_SIZE(layout->sb_offset))
213 return "Invalid superblock layout: too many superblocks";
215 max_sectors = 1 << layout->sb_max_size_bits;
217 prev_offset = le64_to_cpu(layout->sb_offset[0]);
219 for (i = 1; i < layout->nr_superblocks; i++) {
220 offset = le64_to_cpu(layout->sb_offset[i]);
222 if (offset < prev_offset + max_sectors)
223 return "Invalid superblock layout: superblocks overlap";
224 prev_offset = offset;
230 const char *bch2_sb_validate(struct bch_sb_handle *disk_sb)
232 struct bch_sb *sb = disk_sb->sb;
233 struct bch_sb_field *f;
234 struct bch_sb_field_members *mi;
236 u32 version, version_min;
239 version = le16_to_cpu(sb->version);
240 version_min = version >= bcachefs_metadata_version_new_versioning
241 ? le16_to_cpu(sb->version_min)
244 if (version >= bcachefs_metadata_version_max ||
245 version_min < bcachefs_metadata_version_min)
246 return "Unsupported superblock version";
248 if (version_min > version)
249 return "Bad minimum version";
251 if (sb->features[1] ||
252 (le64_to_cpu(sb->features[0]) & (~0ULL << BCH_FEATURE_NR)))
253 return "Filesystem has incompatible features";
255 block_size = le16_to_cpu(sb->block_size);
257 if (!is_power_of_2(block_size) ||
258 block_size > PAGE_SECTORS)
259 return "Bad block size";
261 if (bch2_is_zero(sb->user_uuid.b, sizeof(uuid_le)))
262 return "Bad user UUID";
264 if (bch2_is_zero(sb->uuid.b, sizeof(uuid_le)))
265 return "Bad internal UUID";
267 if (!sb->nr_devices ||
268 sb->nr_devices <= sb->dev_idx ||
269 sb->nr_devices > BCH_SB_MEMBERS_MAX)
270 return "Bad number of member devices";
272 if (!BCH_SB_META_REPLICAS_WANT(sb) ||
273 BCH_SB_META_REPLICAS_WANT(sb) >= BCH_REPLICAS_MAX)
274 return "Invalid number of metadata replicas";
276 if (!BCH_SB_META_REPLICAS_REQ(sb) ||
277 BCH_SB_META_REPLICAS_REQ(sb) >= BCH_REPLICAS_MAX)
278 return "Invalid number of metadata replicas";
280 if (!BCH_SB_DATA_REPLICAS_WANT(sb) ||
281 BCH_SB_DATA_REPLICAS_WANT(sb) >= BCH_REPLICAS_MAX)
282 return "Invalid number of data replicas";
284 if (!BCH_SB_DATA_REPLICAS_REQ(sb) ||
285 BCH_SB_DATA_REPLICAS_REQ(sb) >= BCH_REPLICAS_MAX)
286 return "Invalid number of data replicas";
288 if (BCH_SB_META_CSUM_TYPE(sb) >= BCH_CSUM_OPT_NR)
289 return "Invalid metadata checksum type";
291 if (BCH_SB_DATA_CSUM_TYPE(sb) >= BCH_CSUM_OPT_NR)
292 return "Invalid metadata checksum type";
294 if (BCH_SB_COMPRESSION_TYPE(sb) >= BCH_COMPRESSION_OPT_NR)
295 return "Invalid compression type";
297 if (!BCH_SB_BTREE_NODE_SIZE(sb))
298 return "Btree node size not set";
300 if (!is_power_of_2(BCH_SB_BTREE_NODE_SIZE(sb)))
301 return "Btree node size not a power of two";
303 if (BCH_SB_GC_RESERVE(sb) < 5)
304 return "gc reserve percentage too small";
306 if (!sb->time_precision ||
307 le32_to_cpu(sb->time_precision) > NSEC_PER_SEC)
308 return "invalid time precision";
310 /* validate layout */
311 err = validate_sb_layout(&sb->layout);
315 vstruct_for_each(sb, f) {
317 return "Invalid superblock: invalid optional field";
319 if (vstruct_next(f) > vstruct_last(sb))
320 return "Invalid superblock: invalid optional field";
323 /* members must be validated first: */
324 mi = bch2_sb_get_members(sb);
326 return "Invalid superblock: member info area missing";
328 err = bch2_sb_field_validate(sb, &mi->field);
332 vstruct_for_each(sb, f) {
333 if (le32_to_cpu(f->type) == BCH_SB_FIELD_members)
336 err = bch2_sb_field_validate(sb, f);
346 static void bch2_sb_update(struct bch_fs *c)
348 struct bch_sb *src = c->disk_sb.sb;
349 struct bch_sb_field_members *mi = bch2_sb_get_members(src);
353 lockdep_assert_held(&c->sb_lock);
355 c->sb.uuid = src->uuid;
356 c->sb.user_uuid = src->user_uuid;
357 c->sb.version = le16_to_cpu(src->version);
358 c->sb.nr_devices = src->nr_devices;
359 c->sb.clean = BCH_SB_CLEAN(src);
360 c->sb.encryption_type = BCH_SB_ENCRYPTION_TYPE(src);
361 c->sb.encoded_extent_max= 1 << BCH_SB_ENCODED_EXTENT_MAX_BITS(src);
362 c->sb.time_base_lo = le64_to_cpu(src->time_base_lo);
363 c->sb.time_base_hi = le32_to_cpu(src->time_base_hi);
364 c->sb.time_precision = le32_to_cpu(src->time_precision);
365 c->sb.features = le64_to_cpu(src->features[0]);
366 c->sb.compat = le64_to_cpu(src->compat[0]);
368 for_each_member_device(ca, c, i)
369 ca->mi = bch2_mi_to_cpu(mi->members + i);
372 /* doesn't copy member info */
373 static void __copy_super(struct bch_sb_handle *dst_handle, struct bch_sb *src)
375 struct bch_sb_field *src_f, *dst_f;
376 struct bch_sb *dst = dst_handle->sb;
379 dst->version = src->version;
380 dst->version_min = src->version_min;
382 dst->uuid = src->uuid;
383 dst->user_uuid = src->user_uuid;
384 memcpy(dst->label, src->label, sizeof(dst->label));
386 dst->block_size = src->block_size;
387 dst->nr_devices = src->nr_devices;
389 dst->time_base_lo = src->time_base_lo;
390 dst->time_base_hi = src->time_base_hi;
391 dst->time_precision = src->time_precision;
393 memcpy(dst->flags, src->flags, sizeof(dst->flags));
394 memcpy(dst->features, src->features, sizeof(dst->features));
395 memcpy(dst->compat, src->compat, sizeof(dst->compat));
397 for (i = 0; i < BCH_SB_FIELD_NR; i++) {
398 if (i == BCH_SB_FIELD_journal)
401 src_f = bch2_sb_field_get(src, i);
402 dst_f = bch2_sb_field_get(dst, i);
403 dst_f = __bch2_sb_field_resize(dst_handle, dst_f,
404 src_f ? le32_to_cpu(src_f->u64s) : 0);
407 memcpy(dst_f, src_f, vstruct_bytes(src_f));
411 int bch2_sb_to_fs(struct bch_fs *c, struct bch_sb *src)
413 struct bch_sb_field_journal *journal_buckets =
414 bch2_sb_get_journal(src);
415 unsigned journal_u64s = journal_buckets
416 ? le32_to_cpu(journal_buckets->field.u64s)
420 lockdep_assert_held(&c->sb_lock);
422 ret = bch2_sb_realloc(&c->disk_sb,
423 le32_to_cpu(src->u64s) - journal_u64s);
427 __copy_super(&c->disk_sb, src);
429 ret = bch2_sb_replicas_to_cpu_replicas(c);
433 ret = bch2_sb_disk_groups_to_cpu(c);
441 int bch2_sb_from_fs(struct bch_fs *c, struct bch_dev *ca)
443 struct bch_sb *src = c->disk_sb.sb, *dst = ca->disk_sb.sb;
444 struct bch_sb_field_journal *journal_buckets =
445 bch2_sb_get_journal(dst);
446 unsigned journal_u64s = journal_buckets
447 ? le32_to_cpu(journal_buckets->field.u64s)
449 unsigned u64s = le32_to_cpu(src->u64s) + journal_u64s;
452 ret = bch2_sb_realloc(&ca->disk_sb, u64s);
456 __copy_super(&ca->disk_sb, src);
460 /* read superblock: */
462 static const char *read_one_super(struct bch_sb_handle *sb, u64 offset)
464 struct bch_csum csum;
468 bio_set_dev(sb->bio, sb->bdev);
469 sb->bio->bi_iter.bi_sector = offset;
470 sb->bio->bi_iter.bi_size = PAGE_SIZE << sb->page_order;
471 bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
472 bch2_bio_map(sb->bio, sb->sb);
474 if (submit_bio_wait(sb->bio))
477 if (uuid_le_cmp(sb->sb->magic, BCACHE_MAGIC))
478 return "Not a bcachefs superblock";
480 if (le16_to_cpu(sb->sb->version) < bcachefs_metadata_version_min ||
481 le16_to_cpu(sb->sb->version) >= bcachefs_metadata_version_max)
482 return "Unsupported superblock version";
484 bytes = vstruct_bytes(sb->sb);
486 if (bytes > 512 << sb->sb->layout.sb_max_size_bits)
487 return "Bad superblock: too big";
489 if (get_order(bytes) > sb->page_order) {
490 if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s)))
491 return "cannot allocate memory";
495 if (BCH_SB_CSUM_TYPE(sb->sb) >= BCH_CSUM_NR)
496 return "unknown csum type";
498 /* XXX: verify MACs */
499 csum = csum_vstruct(NULL, BCH_SB_CSUM_TYPE(sb->sb),
500 null_nonce(), sb->sb);
502 if (bch2_crc_cmp(csum, sb->sb->csum))
503 return "bad checksum reading superblock";
508 int bch2_read_super(const char *path, struct bch_opts *opts,
509 struct bch_sb_handle *sb)
511 u64 offset = opt_get(*opts, sb);
512 struct bch_sb_layout layout;
517 pr_verbose_init(*opts, "");
519 memset(sb, 0, sizeof(*sb));
520 sb->mode = FMODE_READ;
523 if (!opt_get(*opts, noexcl))
524 sb->mode |= FMODE_EXCL;
526 if (!opt_get(*opts, nochanges))
527 sb->mode |= FMODE_WRITE;
529 sb->bdev = blkdev_get_by_path(path, sb->mode, sb);
530 if (IS_ERR(sb->bdev) &&
531 PTR_ERR(sb->bdev) == -EACCES &&
532 opt_get(*opts, read_only)) {
533 sb->mode &= ~FMODE_WRITE;
535 sb->bdev = blkdev_get_by_path(path, sb->mode, sb);
536 if (!IS_ERR(sb->bdev))
537 opt_set(*opts, nochanges, true);
540 if (IS_ERR(sb->bdev)) {
541 ret = PTR_ERR(sb->bdev);
545 err = "cannot allocate memory";
546 ret = bch2_sb_realloc(sb, 0);
551 err = "dynamic fault";
552 if (bch2_fs_init_fault("read_super"))
556 err = read_one_super(sb, offset);
560 if (opt_defined(*opts, sb))
563 pr_err("error reading default superblock: %s", err);
566 * Error reading primary superblock - read location of backup
570 bio_set_dev(sb->bio, sb->bdev);
571 sb->bio->bi_iter.bi_sector = BCH_SB_LAYOUT_SECTOR;
572 sb->bio->bi_iter.bi_size = sizeof(struct bch_sb_layout);
573 bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
575 * use sb buffer to read layout, since sb buffer is page aligned but
578 bch2_bio_map(sb->bio, sb->sb);
581 if (submit_bio_wait(sb->bio))
584 memcpy(&layout, sb->sb, sizeof(layout));
585 err = validate_sb_layout(&layout);
589 for (i = layout.sb_offset;
590 i < layout.sb_offset + layout.nr_superblocks; i++) {
591 offset = le64_to_cpu(*i);
593 if (offset == opt_get(*opts, sb))
596 err = read_one_super(sb, offset);
605 err = "Superblock block size smaller than device block size";
607 if (le16_to_cpu(sb->sb->block_size) << 9 <
608 bdev_logical_block_size(sb->bdev))
611 if (sb->mode & FMODE_WRITE)
612 bdev_get_queue(sb->bdev)->backing_dev_info->capabilities
613 |= BDI_CAP_STABLE_WRITES;
615 sb->have_layout = true;
617 pr_verbose_init(*opts, "ret %i", ret);
621 pr_err("error reading superblock: %s", err);
625 /* write superblock: */
627 static void write_super_endio(struct bio *bio)
629 struct bch_dev *ca = bio->bi_private;
631 /* XXX: return errors directly */
633 if (bch2_dev_io_err_on(bio->bi_status, ca, "superblock write"))
634 ca->sb_write_error = 1;
636 closure_put(&ca->fs->sb_write);
637 percpu_ref_put(&ca->io_ref);
640 static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
642 struct bch_sb *sb = ca->disk_sb.sb;
643 struct bio *bio = ca->disk_sb.bio;
645 sb->offset = sb->layout.sb_offset[idx];
647 SET_BCH_SB_CSUM_TYPE(sb, c->opts.metadata_checksum);
648 sb->csum = csum_vstruct(c, BCH_SB_CSUM_TYPE(sb),
652 bio_set_dev(bio, ca->disk_sb.bdev);
653 bio->bi_iter.bi_sector = le64_to_cpu(sb->offset);
654 bio->bi_iter.bi_size =
655 roundup((size_t) vstruct_bytes(sb),
656 bdev_logical_block_size(ca->disk_sb.bdev));
657 bio->bi_end_io = write_super_endio;
658 bio->bi_private = ca;
659 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
660 bch2_bio_map(bio, sb);
662 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_SB],
665 percpu_ref_get(&ca->io_ref);
666 closure_bio_submit(bio, &c->sb_write);
669 void bch2_write_super(struct bch_fs *c)
671 struct closure *cl = &c->sb_write;
673 unsigned i, sb = 0, nr_wrote;
675 struct bch_devs_mask sb_written;
676 bool wrote, can_mount_without_written, can_mount_with_written;
678 lockdep_assert_held(&c->sb_lock);
680 closure_init_stack(cl);
681 memset(&sb_written, 0, sizeof(sb_written));
683 le64_add_cpu(&c->disk_sb.sb->seq, 1);
685 for_each_online_member(ca, c, i)
686 bch2_sb_from_fs(c, ca);
688 for_each_online_member(ca, c, i) {
689 err = bch2_sb_validate(&ca->disk_sb);
691 bch2_fs_inconsistent(c, "sb invalid before write: %s", err);
696 if (c->opts.nochanges ||
697 test_bit(BCH_FS_ERROR, &c->flags))
700 for_each_online_member(ca, c, i) {
701 __set_bit(ca->dev_idx, sb_written.d);
702 ca->sb_write_error = 0;
707 for_each_online_member(ca, c, i)
708 if (sb < ca->disk_sb.sb->layout.nr_superblocks) {
709 write_one_super(c, ca, sb);
716 for_each_online_member(ca, c, i)
717 if (ca->sb_write_error)
718 __clear_bit(ca->dev_idx, sb_written.d);
720 nr_wrote = dev_mask_nr(&sb_written);
722 can_mount_with_written =
723 bch2_have_enough_devs(__bch2_replicas_status(c, sb_written),
724 BCH_FORCE_IF_DEGRADED);
726 for (i = 0; i < ARRAY_SIZE(sb_written.d); i++)
727 sb_written.d[i] = ~sb_written.d[i];
729 can_mount_without_written =
730 bch2_have_enough_devs(__bch2_replicas_status(c, sb_written),
731 BCH_FORCE_IF_DEGRADED);
734 * If we would be able to mount _without_ the devices we successfully
735 * wrote superblocks to, we weren't able to write to enough devices:
737 * Exception: if we can mount without the successes because we haven't
738 * written anything (new filesystem), we continue if we'd be able to
739 * mount with the devices we did successfully write to:
741 bch2_fs_fatal_err_on(!nr_wrote ||
742 (can_mount_without_written &&
743 !can_mount_with_written), c,
744 "Unable to write superblock to sufficient devices");
746 /* Make new options visible after they're persistent: */
750 /* BCH_SB_FIELD_journal: */
752 static int u64_cmp(const void *_l, const void *_r)
754 u64 l = *((const u64 *) _l), r = *((const u64 *) _r);
756 return l < r ? -1 : l > r ? 1 : 0;
759 static const char *bch2_sb_validate_journal(struct bch_sb *sb,
760 struct bch_sb_field *f)
762 struct bch_sb_field_journal *journal = field_to_type(f, journal);
763 struct bch_member *m = bch2_sb_get_members(sb)->members + sb->dev_idx;
769 journal = bch2_sb_get_journal(sb);
773 nr = bch2_nr_journal_buckets(journal);
777 b = kmalloc_array(sizeof(u64), nr, GFP_KERNEL);
779 return "cannot allocate memory";
781 for (i = 0; i < nr; i++)
782 b[i] = le64_to_cpu(journal->buckets[i]);
784 sort(b, nr, sizeof(u64), u64_cmp, NULL);
786 err = "journal bucket at sector 0";
790 err = "journal bucket before first bucket";
791 if (m && b[0] < le16_to_cpu(m->first_bucket))
794 err = "journal bucket past end of device";
795 if (m && b[nr - 1] >= le64_to_cpu(m->nbuckets))
798 err = "duplicate journal buckets";
799 for (i = 0; i + 1 < nr; i++)
800 if (b[i] == b[i + 1])
809 static const struct bch_sb_field_ops bch_sb_field_ops_journal = {
810 .validate = bch2_sb_validate_journal,
813 /* BCH_SB_FIELD_members: */
815 static const char *bch2_sb_validate_members(struct bch_sb *sb,
816 struct bch_sb_field *f)
818 struct bch_sb_field_members *mi = field_to_type(f, members);
819 struct bch_member *m;
821 if ((void *) (mi->members + sb->nr_devices) >
822 vstruct_end(&mi->field))
823 return "Invalid superblock: bad member info";
825 for (m = mi->members;
826 m < mi->members + sb->nr_devices;
828 if (!bch2_member_exists(m))
831 if (le64_to_cpu(m->nbuckets) > LONG_MAX)
832 return "Too many buckets";
834 if (le64_to_cpu(m->nbuckets) -
835 le16_to_cpu(m->first_bucket) < BCH_MIN_NR_NBUCKETS)
836 return "Not enough buckets";
838 if (le16_to_cpu(m->bucket_size) <
839 le16_to_cpu(sb->block_size))
840 return "bucket size smaller than block size";
842 if (le16_to_cpu(m->bucket_size) <
843 BCH_SB_BTREE_NODE_SIZE(sb))
844 return "bucket size smaller than btree node size";
850 static const struct bch_sb_field_ops bch_sb_field_ops_members = {
851 .validate = bch2_sb_validate_members,
854 /* BCH_SB_FIELD_crypt: */
856 static const char *bch2_sb_validate_crypt(struct bch_sb *sb,
857 struct bch_sb_field *f)
859 struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
861 if (vstruct_bytes(&crypt->field) != sizeof(*crypt))
862 return "invalid field crypt: wrong size";
864 if (BCH_CRYPT_KDF_TYPE(crypt))
865 return "invalid field crypt: bad kdf type";
870 static const struct bch_sb_field_ops bch_sb_field_ops_crypt = {
871 .validate = bch2_sb_validate_crypt,
874 /* BCH_SB_FIELD_clean: */
876 void bch2_sb_clean_renumber(struct bch_sb_field_clean *clean, int write)
878 struct jset_entry *entry;
880 for (entry = clean->start;
881 entry < (struct jset_entry *) vstruct_end(&clean->field);
882 entry = vstruct_next(entry))
883 bch2_bkey_renumber(BKEY_TYPE_BTREE, bkey_to_packed(entry->start), write);
886 static void bch2_fs_mark_dirty(struct bch_fs *c)
888 mutex_lock(&c->sb_lock);
889 if (BCH_SB_CLEAN(c->disk_sb.sb) ||
890 (c->disk_sb.sb->compat[0] & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO))) {
891 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
892 c->disk_sb.sb->compat[0] &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
895 mutex_unlock(&c->sb_lock);
899 bch2_journal_super_entries_add_common(struct bch_fs *c,
900 struct jset_entry *entry)
902 struct btree_root *r;
905 mutex_lock(&c->btree_root_lock);
907 for (r = c->btree_roots;
908 r < c->btree_roots + BTREE_ID_NR;
911 entry->u64s = r->key.u64s;
912 entry->btree_id = r - c->btree_roots;
913 entry->level = r->level;
914 entry->type = BCH_JSET_ENTRY_btree_root;
915 bkey_copy(&entry->start[0], &r->key);
917 entry = vstruct_next(entry);
919 c->btree_roots_dirty = false;
921 mutex_unlock(&c->btree_root_lock);
923 percpu_down_read_preempt_disable(&c->mark_lock);
926 u64 nr_inodes = percpu_u64_get(&c->usage[0]->s.nr_inodes);
927 struct jset_entry_usage *u =
928 container_of(entry, struct jset_entry_usage, entry);
930 memset(u, 0, sizeof(*u));
931 u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
932 u->entry.type = BCH_JSET_ENTRY_usage;
933 u->entry.btree_id = FS_USAGE_INODES;
934 u->v = cpu_to_le64(nr_inodes);
936 entry = vstruct_next(entry);
940 struct jset_entry_usage *u =
941 container_of(entry, struct jset_entry_usage, entry);
943 memset(u, 0, sizeof(*u));
944 u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
945 u->entry.type = BCH_JSET_ENTRY_usage;
946 u->entry.btree_id = FS_USAGE_KEY_VERSION;
947 u->v = cpu_to_le64(atomic64_read(&c->key_version));
949 entry = vstruct_next(entry);
952 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
953 struct jset_entry_usage *u =
954 container_of(entry, struct jset_entry_usage, entry);
955 u64 sectors = percpu_u64_get(&c->usage[0]->persistent_reserved[i]);
960 memset(u, 0, sizeof(*u));
961 u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
962 u->entry.type = BCH_JSET_ENTRY_usage;
963 u->entry.btree_id = FS_USAGE_RESERVED;
967 entry = vstruct_next(entry);
970 for (i = 0; i < c->replicas.nr; i++) {
971 struct bch_replicas_entry *e =
972 cpu_replicas_entry(&c->replicas, i);
973 u64 sectors = percpu_u64_get(&c->usage[0]->data[i]);
974 struct jset_entry_data_usage *u =
975 container_of(entry, struct jset_entry_data_usage, entry);
977 memset(u, 0, sizeof(*u));
978 u->entry.u64s = DIV_ROUND_UP(sizeof(*u) + e->nr_devs,
980 u->entry.type = BCH_JSET_ENTRY_data_usage;
981 u->v = cpu_to_le64(sectors);
982 memcpy(&u->r, e, replicas_entry_bytes(e));
984 entry = vstruct_next(entry);
987 percpu_up_read_preempt_enable(&c->mark_lock);
992 void bch2_fs_mark_clean(struct bch_fs *c, bool clean)
994 struct bch_sb_field_clean *sb_clean;
995 struct jset_entry *entry;
999 bch2_fs_mark_dirty(c);
1003 mutex_lock(&c->sb_lock);
1004 if (BCH_SB_CLEAN(c->disk_sb.sb))
1007 SET_BCH_SB_CLEAN(c->disk_sb.sb, true);
1009 c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_FEAT_ALLOC_INFO;
1011 u64s = sizeof(*sb_clean) / sizeof(u64) + c->journal.entry_u64s_reserved;
1013 sb_clean = bch2_sb_resize_clean(&c->disk_sb, u64s);
1015 bch_err(c, "error resizing superblock while setting filesystem clean");
1019 sb_clean->flags = 0;
1020 sb_clean->read_clock = cpu_to_le16(c->bucket_clock[READ].hand);
1021 sb_clean->write_clock = cpu_to_le16(c->bucket_clock[WRITE].hand);
1022 sb_clean->journal_seq = journal_cur_seq(&c->journal) - 1;
1024 entry = sb_clean->start;
1025 entry = bch2_journal_super_entries_add_common(c, entry);
1026 BUG_ON((void *) entry > vstruct_end(&sb_clean->field));
1029 vstruct_end(&sb_clean->field) - (void *) entry);
1031 if (le16_to_cpu(c->disk_sb.sb->version) <
1032 bcachefs_metadata_version_bkey_renumber)
1033 bch2_sb_clean_renumber(sb_clean, WRITE);
1035 bch2_write_super(c);
1037 mutex_unlock(&c->sb_lock);
1040 static const char *bch2_sb_validate_clean(struct bch_sb *sb,
1041 struct bch_sb_field *f)
1043 struct bch_sb_field_clean *clean = field_to_type(f, clean);
1045 if (vstruct_bytes(&clean->field) < sizeof(*clean))
1046 return "invalid field crypt: wrong size";
1051 static const struct bch_sb_field_ops bch_sb_field_ops_clean = {
1052 .validate = bch2_sb_validate_clean,
1055 static const struct bch_sb_field_ops *bch2_sb_field_ops[] = {
1057 [BCH_SB_FIELD_##f] = &bch_sb_field_ops_##f,
1062 static const char *bch2_sb_field_validate(struct bch_sb *sb,
1063 struct bch_sb_field *f)
1065 unsigned type = le32_to_cpu(f->type);
1067 return type < BCH_SB_FIELD_NR
1068 ? bch2_sb_field_ops[type]->validate(sb, f)
1072 void bch2_sb_field_to_text(struct printbuf *out, struct bch_sb *sb,
1073 struct bch_sb_field *f)
1075 unsigned type = le32_to_cpu(f->type);
1076 const struct bch_sb_field_ops *ops = type < BCH_SB_FIELD_NR
1077 ? bch2_sb_field_ops[type] : NULL;
1080 pr_buf(out, "%s", bch2_sb_fields[type]);
1082 pr_buf(out, "(unknown field %u)", type);
1084 pr_buf(out, " (size %llu):", vstruct_bytes(f));
1086 if (ops && ops->to_text)
1087 bch2_sb_field_ops[type]->to_text(out, sb, f);