11 #include <linux/backing-dev.h>
12 #include <linux/sort.h>
14 static int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *);
15 static const char *bch2_sb_validate_replicas(struct bch_sb *);
17 static inline void __bch2_sb_layout_size_assert(void)
19 BUILD_BUG_ON(sizeof(struct bch_sb_layout) != 512);
22 struct bch_sb_field *bch2_sb_field_get(struct bch_sb *sb,
23 enum bch_sb_field_type type)
25 struct bch_sb_field *f;
27 /* XXX: need locking around superblock to access optional fields */
29 vstruct_for_each(sb, f)
30 if (le32_to_cpu(f->type) == type)
35 void bch2_free_super(struct bcache_superblock *sb)
39 if (!IS_ERR_OR_NULL(sb->bdev))
40 blkdev_put(sb->bdev, sb->mode);
42 free_pages((unsigned long) sb->sb, sb->page_order);
43 memset(sb, 0, sizeof(*sb));
46 static int __bch2_super_realloc(struct bcache_superblock *sb, unsigned order)
48 struct bch_sb *new_sb;
51 if (sb->page_order >= order && sb->sb)
54 if (dynamic_fault("bcachefs:add:super_realloc"))
57 bio = bio_kmalloc(GFP_KERNEL, 1 << order);
65 new_sb = (void *) __get_free_pages(GFP_KERNEL, order);
70 memcpy(new_sb, sb->sb, PAGE_SIZE << sb->page_order);
72 free_pages((unsigned long) sb->sb, sb->page_order);
75 sb->page_order = order;
80 static int bch2_sb_realloc(struct bcache_superblock *sb, unsigned u64s)
82 u64 new_bytes = __vstruct_bytes(struct bch_sb, u64s);
83 u64 max_bytes = 512 << sb->sb->layout.sb_max_size_bits;
85 if (new_bytes > max_bytes) {
86 char buf[BDEVNAME_SIZE];
88 pr_err("%s: superblock too big: want %llu but have %llu",
89 bdevname(sb->bdev, buf), new_bytes, max_bytes);
93 return __bch2_super_realloc(sb, get_order(new_bytes));
96 static int bch2_fs_sb_realloc(struct bch_fs *c, unsigned u64s)
98 u64 bytes = __vstruct_bytes(struct bch_sb, u64s);
100 unsigned order = get_order(bytes);
102 if (c->disk_sb && order <= c->disk_sb_order)
105 sb = (void *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
110 memcpy(sb, c->disk_sb, PAGE_SIZE << c->disk_sb_order);
112 free_pages((unsigned long) c->disk_sb, c->disk_sb_order);
115 c->disk_sb_order = order;
119 static struct bch_sb_field *__bch2_sb_field_resize(struct bch_sb *sb,
120 struct bch_sb_field *f,
123 unsigned old_u64s = f ? le32_to_cpu(f->u64s) : 0;
126 f = vstruct_last(sb);
127 memset(f, 0, sizeof(u64) * u64s);
128 f->u64s = cpu_to_le32(u64s);
133 src = vstruct_end(f);
134 f->u64s = cpu_to_le32(u64s);
135 dst = vstruct_end(f);
137 memmove(dst, src, vstruct_end(sb) - src);
140 memset(src, 0, dst - src);
143 le32_add_cpu(&sb->u64s, u64s - old_u64s);
148 struct bch_sb_field *bch2_sb_field_resize(struct bcache_superblock *sb,
149 enum bch_sb_field_type type,
152 struct bch_sb_field *f = bch2_sb_field_get(sb->sb, type);
153 ssize_t old_u64s = f ? le32_to_cpu(f->u64s) : 0;
154 ssize_t d = -old_u64s + u64s;
156 if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d))
159 f = __bch2_sb_field_resize(sb->sb, f, u64s);
164 struct bch_sb_field *bch2_fs_sb_field_resize(struct bch_fs *c,
165 enum bch_sb_field_type type,
168 struct bch_sb_field *f = bch2_sb_field_get(c->disk_sb, type);
169 ssize_t old_u64s = f ? le32_to_cpu(f->u64s) : 0;
170 ssize_t d = -old_u64s + u64s;
174 lockdep_assert_held(&c->sb_lock);
176 if (bch2_fs_sb_realloc(c, le32_to_cpu(c->disk_sb->u64s) + d))
179 /* XXX: we're not checking that offline device have enough space */
181 for_each_online_member(ca, c, i) {
182 struct bcache_superblock *sb = &ca->disk_sb;
184 if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d)) {
185 percpu_ref_put(&ca->ref);
190 f = __bch2_sb_field_resize(c->disk_sb, f, u64s);
195 static const char *validate_sb_layout(struct bch_sb_layout *layout)
197 u64 offset, prev_offset, max_sectors;
200 if (uuid_le_cmp(layout->magic, BCACHE_MAGIC))
201 return "Not a bcachefs superblock layout";
203 if (layout->layout_type != 0)
204 return "Invalid superblock layout type";
206 if (!layout->nr_superblocks)
207 return "Invalid superblock layout: no superblocks";
209 if (layout->nr_superblocks > ARRAY_SIZE(layout->sb_offset))
210 return "Invalid superblock layout: too many superblocks";
212 max_sectors = 1 << layout->sb_max_size_bits;
214 prev_offset = le64_to_cpu(layout->sb_offset[0]);
216 for (i = 1; i < layout->nr_superblocks; i++) {
217 offset = le64_to_cpu(layout->sb_offset[i]);
219 if (offset < prev_offset + max_sectors)
220 return "Invalid superblock layout: superblocks overlap";
221 prev_offset = offset;
227 static int u64_cmp(const void *_l, const void *_r)
229 u64 l = *((const u64 *) _l), r = *((const u64 *) _r);
231 return l < r ? -1 : l > r ? 1 : 0;
234 const char *bch2_sb_validate_journal(struct bch_sb *sb,
235 struct bch_member_cpu mi)
237 struct bch_sb_field_journal *journal;
243 journal = bch2_sb_get_journal(sb);
247 nr = bch2_nr_journal_buckets(journal);
251 b = kmalloc_array(sizeof(u64), nr, GFP_KERNEL);
253 return "cannot allocate memory";
255 for (i = 0; i < nr; i++)
256 b[i] = le64_to_cpu(journal->buckets[i]);
258 sort(b, nr, sizeof(u64), u64_cmp, NULL);
260 err = "journal bucket at sector 0";
264 err = "journal bucket before first bucket";
265 if (b[0] < mi.first_bucket)
268 err = "journal bucket past end of device";
269 if (b[nr - 1] >= mi.nbuckets)
272 err = "duplicate journal buckets";
273 for (i = 0; i + 1 < nr; i++)
274 if (b[i] == b[i + 1])
283 static const char *bch2_sb_validate_members(struct bch_sb *sb)
285 struct bch_sb_field_members *mi;
288 mi = bch2_sb_get_members(sb);
290 return "Invalid superblock: member info area missing";
292 if ((void *) (mi->members + sb->nr_devices) >
293 vstruct_end(&mi->field))
294 return "Invalid superblock: bad member info";
296 for (i = 0; i < sb->nr_devices; i++) {
297 if (!bch2_dev_exists(sb, mi, i))
300 if (le16_to_cpu(mi->members[i].bucket_size) <
301 BCH_SB_BTREE_NODE_SIZE(sb))
302 return "bucket size smaller than btree node size";
308 const char *bch2_sb_validate(struct bcache_superblock *disk_sb)
310 struct bch_sb *sb = disk_sb->sb;
311 struct bch_sb_field *f;
312 struct bch_sb_field_members *sb_mi;
313 struct bch_member_cpu mi;
317 switch (le64_to_cpu(sb->version)) {
318 case BCACHE_SB_VERSION_CDEV_V4:
321 return"Unsupported superblock version";
324 if (BCH_SB_INITIALIZED(sb) &&
325 le64_to_cpu(sb->version) != BCACHE_SB_VERSION_CDEV_V4)
326 return "Unsupported superblock version";
328 block_size = le16_to_cpu(sb->block_size);
330 if (!is_power_of_2(block_size) ||
331 block_size > PAGE_SECTORS)
332 return "Bad block size";
334 if (bch2_is_zero(sb->user_uuid.b, sizeof(uuid_le)))
335 return "Bad user UUID";
337 if (bch2_is_zero(sb->uuid.b, sizeof(uuid_le)))
338 return "Bad internal UUID";
340 if (!sb->nr_devices ||
341 sb->nr_devices <= sb->dev_idx ||
342 sb->nr_devices > BCH_SB_MEMBERS_MAX)
343 return "Bad cache device number in set";
345 if (!BCH_SB_META_REPLICAS_WANT(sb) ||
346 BCH_SB_META_REPLICAS_WANT(sb) >= BCH_REPLICAS_MAX)
347 return "Invalid number of metadata replicas";
349 if (!BCH_SB_META_REPLICAS_REQ(sb) ||
350 BCH_SB_META_REPLICAS_REQ(sb) >= BCH_REPLICAS_MAX)
351 return "Invalid number of metadata replicas";
353 if (!BCH_SB_DATA_REPLICAS_WANT(sb) ||
354 BCH_SB_DATA_REPLICAS_WANT(sb) >= BCH_REPLICAS_MAX)
355 return "Invalid number of data replicas";
357 if (!BCH_SB_DATA_REPLICAS_REQ(sb) ||
358 BCH_SB_DATA_REPLICAS_REQ(sb) >= BCH_REPLICAS_MAX)
359 return "Invalid number of metadata replicas";
361 if (!BCH_SB_BTREE_NODE_SIZE(sb))
362 return "Btree node size not set";
364 if (!is_power_of_2(BCH_SB_BTREE_NODE_SIZE(sb)))
365 return "Btree node size not a power of two";
367 if (BCH_SB_BTREE_NODE_SIZE(sb) > BTREE_NODE_SIZE_MAX)
368 return "Btree node size too large";
370 if (BCH_SB_GC_RESERVE(sb) < 5)
371 return "gc reserve percentage too small";
373 if (!sb->time_precision ||
374 le32_to_cpu(sb->time_precision) > NSEC_PER_SEC)
375 return "invalid time precision";
377 /* validate layout */
378 err = validate_sb_layout(&sb->layout);
382 vstruct_for_each(sb, f) {
384 return "Invalid superblock: invalid optional field";
386 if (vstruct_next(f) > vstruct_last(sb))
387 return "Invalid superblock: invalid optional field";
389 if (le32_to_cpu(f->type) >= BCH_SB_FIELD_NR)
390 return "Invalid superblock: unknown optional field type";
393 err = bch2_sb_validate_members(sb);
397 sb_mi = bch2_sb_get_members(sb);
398 mi = bch2_mi_to_cpu(sb_mi->members + sb->dev_idx);
400 if (mi.nbuckets > LONG_MAX)
401 return "Too many buckets";
403 if (mi.nbuckets - mi.first_bucket < 1 << 10)
404 return "Not enough buckets";
406 if (!is_power_of_2(mi.bucket_size) ||
407 mi.bucket_size < PAGE_SECTORS ||
408 mi.bucket_size < block_size)
409 return "Bad bucket size";
411 if (get_capacity(disk_sb->bdev->bd_disk) <
412 mi.bucket_size * mi.nbuckets)
413 return "Invalid superblock: device too small";
415 err = bch2_sb_validate_journal(sb, mi);
419 err = bch2_sb_validate_replicas(sb);
428 static const char *bch2_blkdev_open(const char *path, fmode_t mode,
429 void *holder, struct block_device **ret)
431 struct block_device *bdev;
434 bdev = blkdev_get_by_path(path, mode, holder);
435 if (bdev == ERR_PTR(-EBUSY))
436 return "device busy";
439 return "failed to open device";
441 if (mode & FMODE_WRITE)
442 bdev_get_queue(bdev)->backing_dev_info->capabilities
443 |= BDI_CAP_STABLE_WRITES;
449 static void bch2_sb_update(struct bch_fs *c)
451 struct bch_sb *src = c->disk_sb;
452 struct bch_sb_field_members *mi = bch2_sb_get_members(src);
456 lockdep_assert_held(&c->sb_lock);
458 c->sb.uuid = src->uuid;
459 c->sb.user_uuid = src->user_uuid;
460 c->sb.block_size = le16_to_cpu(src->block_size);
461 c->sb.btree_node_size = BCH_SB_BTREE_NODE_SIZE(src);
462 c->sb.nr_devices = src->nr_devices;
463 c->sb.clean = BCH_SB_CLEAN(src);
464 c->sb.str_hash_type = BCH_SB_STR_HASH_TYPE(src);
465 c->sb.encryption_type = BCH_SB_ENCRYPTION_TYPE(src);
466 c->sb.time_base_lo = le64_to_cpu(src->time_base_lo);
467 c->sb.time_base_hi = le32_to_cpu(src->time_base_hi);
468 c->sb.time_precision = le32_to_cpu(src->time_precision);
470 for_each_member_device(ca, c, i)
471 ca->mi = bch2_mi_to_cpu(mi->members + i);
474 /* doesn't copy member info */
475 static void __copy_super(struct bch_sb *dst, struct bch_sb *src)
477 struct bch_sb_field *src_f, *dst_f;
479 dst->version = src->version;
481 dst->uuid = src->uuid;
482 dst->user_uuid = src->user_uuid;
483 memcpy(dst->label, src->label, sizeof(dst->label));
485 dst->block_size = src->block_size;
486 dst->nr_devices = src->nr_devices;
488 dst->time_base_lo = src->time_base_lo;
489 dst->time_base_hi = src->time_base_hi;
490 dst->time_precision = src->time_precision;
492 memcpy(dst->flags, src->flags, sizeof(dst->flags));
493 memcpy(dst->features, src->features, sizeof(dst->features));
494 memcpy(dst->compat, src->compat, sizeof(dst->compat));
496 vstruct_for_each(src, src_f) {
497 if (src_f->type == BCH_SB_FIELD_journal)
500 dst_f = bch2_sb_field_get(dst, src_f->type);
501 dst_f = __bch2_sb_field_resize(dst, dst_f,
502 le32_to_cpu(src_f->u64s));
504 memcpy(dst_f, src_f, vstruct_bytes(src_f));
508 int bch2_sb_to_fs(struct bch_fs *c, struct bch_sb *src)
510 struct bch_sb_field_journal *journal_buckets =
511 bch2_sb_get_journal(src);
512 unsigned journal_u64s = journal_buckets
513 ? le32_to_cpu(journal_buckets->field.u64s)
517 lockdep_assert_held(&c->sb_lock);
519 if (bch2_fs_sb_realloc(c, le32_to_cpu(src->u64s) - journal_u64s))
522 __copy_super(c->disk_sb, src);
524 ret = bch2_sb_replicas_to_cpu_replicas(c);
532 int bch2_sb_from_fs(struct bch_fs *c, struct bch_dev *ca)
534 struct bch_sb *src = c->disk_sb, *dst = ca->disk_sb.sb;
535 struct bch_sb_field_journal *journal_buckets =
536 bch2_sb_get_journal(dst);
537 unsigned journal_u64s = journal_buckets
538 ? le32_to_cpu(journal_buckets->field.u64s)
540 unsigned u64s = le32_to_cpu(src->u64s) + journal_u64s;
543 ret = bch2_sb_realloc(&ca->disk_sb, u64s);
547 __copy_super(dst, src);
552 /* read superblock: */
554 static const char *read_one_super(struct bcache_superblock *sb, u64 offset)
556 struct bch_csum csum;
561 sb->bio->bi_bdev = sb->bdev;
562 sb->bio->bi_iter.bi_sector = offset;
563 sb->bio->bi_iter.bi_size = PAGE_SIZE << sb->page_order;
564 bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
565 bch2_bio_map(sb->bio, sb->sb);
567 if (submit_bio_wait(sb->bio))
570 if (uuid_le_cmp(sb->sb->magic, BCACHE_MAGIC))
571 return "Not a bcachefs superblock";
573 if (le64_to_cpu(sb->sb->version) != BCACHE_SB_VERSION_CDEV_V4)
574 return "Unsupported superblock version";
576 bytes = vstruct_bytes(sb->sb);
578 if (bytes > 512 << sb->sb->layout.sb_max_size_bits)
579 return "Bad superblock: too big";
581 order = get_order(bytes);
582 if (order > sb->page_order) {
583 if (__bch2_super_realloc(sb, order))
584 return "cannot allocate memory";
588 if (BCH_SB_CSUM_TYPE(sb->sb) >= BCH_CSUM_NR)
589 return "unknown csum type";
591 /* XXX: verify MACs */
592 csum = csum_vstruct(NULL, BCH_SB_CSUM_TYPE(sb->sb),
593 (struct nonce) { 0 }, sb->sb);
595 if (bch2_crc_cmp(csum, sb->sb->csum))
596 return "bad checksum reading superblock";
601 const char *bch2_read_super(struct bcache_superblock *sb,
602 struct bch_opts opts,
605 u64 offset = opt_defined(opts.sb) ? opts.sb : BCH_SB_SECTOR;
606 struct bch_sb_layout layout;
610 memset(sb, 0, sizeof(*sb));
611 sb->mode = FMODE_READ;
613 if (!(opt_defined(opts.noexcl) && opts.noexcl))
614 sb->mode |= FMODE_EXCL;
616 if (!(opt_defined(opts.nochanges) && opts.nochanges))
617 sb->mode |= FMODE_WRITE;
619 err = bch2_blkdev_open(path, sb->mode, sb, &sb->bdev);
623 err = "cannot allocate memory";
624 if (__bch2_super_realloc(sb, 0))
627 err = "dynamic fault";
628 if (bch2_fs_init_fault("read_super"))
631 err = read_one_super(sb, offset);
635 if (offset != BCH_SB_SECTOR) {
636 pr_err("error reading superblock: %s", err);
640 pr_err("error reading default superblock: %s", err);
643 * Error reading primary superblock - read location of backup
647 sb->bio->bi_bdev = sb->bdev;
648 sb->bio->bi_iter.bi_sector = BCH_SB_LAYOUT_SECTOR;
649 sb->bio->bi_iter.bi_size = sizeof(struct bch_sb_layout);
650 bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
652 * use sb buffer to read layout, since sb buffer is page aligned but
655 bch2_bio_map(sb->bio, sb->sb);
658 if (submit_bio_wait(sb->bio))
661 memcpy(&layout, sb->sb, sizeof(layout));
662 err = validate_sb_layout(&layout);
666 for (i = 0; i < layout.nr_superblocks; i++) {
667 u64 offset = le64_to_cpu(layout.sb_offset[i]);
669 if (offset == BCH_SB_SECTOR)
672 err = read_one_super(sb, offset);
678 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
679 le64_to_cpu(sb->sb->version),
680 le64_to_cpu(sb->sb->flags),
681 le64_to_cpu(sb->sb->seq),
682 le16_to_cpu(sb->sb->u64s));
684 err = "Superblock block size smaller than device block size";
685 if (le16_to_cpu(sb->sb->block_size) << 9 <
686 bdev_logical_block_size(sb->bdev))
695 /* write superblock: */
697 static void write_super_endio(struct bio *bio)
699 struct bch_dev *ca = bio->bi_private;
701 /* XXX: return errors directly */
703 bch2_dev_fatal_io_err_on(bio->bi_error, ca, "superblock write");
705 closure_put(&ca->fs->sb_write);
706 percpu_ref_put(&ca->io_ref);
709 static bool write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
711 struct bch_sb *sb = ca->disk_sb.sb;
712 struct bio *bio = ca->disk_sb.bio;
714 if (idx >= sb->layout.nr_superblocks)
717 if (!percpu_ref_tryget(&ca->io_ref))
720 sb->offset = sb->layout.sb_offset[idx];
722 SET_BCH_SB_CSUM_TYPE(sb, c->opts.metadata_checksum);
723 sb->csum = csum_vstruct(c, BCH_SB_CSUM_TYPE(sb),
724 (struct nonce) { 0 }, sb);
727 bio->bi_bdev = ca->disk_sb.bdev;
728 bio->bi_iter.bi_sector = le64_to_cpu(sb->offset);
729 bio->bi_iter.bi_size =
730 roundup(vstruct_bytes(sb),
731 bdev_logical_block_size(ca->disk_sb.bdev));
732 bio->bi_end_io = write_super_endio;
733 bio->bi_private = ca;
734 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
735 bch2_bio_map(bio, sb);
737 closure_bio_submit(bio, &c->sb_write);
741 void bch2_write_super(struct bch_fs *c)
743 struct closure *cl = &c->sb_write;
745 unsigned i, super_idx = 0;
749 lockdep_assert_held(&c->sb_lock);
751 closure_init_stack(cl);
753 le64_add_cpu(&c->disk_sb->seq, 1);
755 for_each_online_member(ca, c, i)
756 bch2_sb_from_fs(c, ca);
758 for_each_online_member(ca, c, i) {
759 err = bch2_sb_validate(&ca->disk_sb);
761 bch2_fs_inconsistent(c, "sb invalid before write: %s", err);
766 if (c->opts.nochanges ||
767 test_bit(BCH_FS_ERROR, &c->flags))
772 for_each_online_member(ca, c, i)
773 if (write_one_super(c, ca, super_idx))
780 /* Make new options visible after they're persistent: */
784 /* replica information: */
786 static inline struct bch_replicas_cpu_entry *
787 cpu_replicas_entry(struct bch_replicas_cpu *r, unsigned i)
789 return (void *) r->entries + r->entry_size * i;
792 static inline struct bch_replicas_entry *
793 replicas_entry_next(struct bch_replicas_entry *i)
795 return (void *) i + offsetof(struct bch_replicas_entry, devs) + i->nr;
798 #define for_each_replicas_entry(_r, _i) \
799 for (_i = (_r)->entries; \
800 (void *) (_i) < vstruct_end(&(_r)->field) && (_i)->data_type;\
801 (_i) = replicas_entry_next(_i))
803 static inline bool replicas_test_dev(struct bch_replicas_cpu_entry *e,
806 return (e->devs[dev >> 3] & (1 << (dev & 7))) != 0;
809 static inline void replicas_set_dev(struct bch_replicas_cpu_entry *e,
812 e->devs[dev >> 3] |= 1 << (dev & 7);
815 static inline unsigned replicas_dev_slots(struct bch_replicas_cpu *r)
817 return (r->entry_size -
818 offsetof(struct bch_replicas_cpu_entry, devs)) * 8;
821 static void bch2_sb_replicas_nr_entries(struct bch_sb_field_replicas *r,
826 struct bch_replicas_entry *i;
836 for_each_replicas_entry(r, i) {
837 for (j = 0; j < i->nr; j++)
838 *max_dev = max_t(unsigned, *max_dev, i->devs[j]);
842 *bytes = (void *) i - (void *) r;
845 static struct bch_replicas_cpu *
846 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r)
848 struct bch_replicas_cpu *cpu_r;
849 unsigned i, nr, bytes, max_dev, entry_size;
851 bch2_sb_replicas_nr_entries(sb_r, &nr, &bytes, &max_dev);
853 entry_size = offsetof(struct bch_replicas_cpu_entry, devs) +
854 DIV_ROUND_UP(max_dev + 1, 8);
856 cpu_r = kzalloc(sizeof(struct bch_replicas_cpu) +
857 nr * entry_size, GFP_NOIO);
862 cpu_r->entry_size = entry_size;
865 struct bch_replicas_cpu_entry *dst =
866 cpu_replicas_entry(cpu_r, 0);
867 struct bch_replicas_entry *src = sb_r->entries;
869 while (dst < cpu_replicas_entry(cpu_r, nr)) {
870 dst->data_type = src->data_type;
871 for (i = 0; i < src->nr; i++)
872 replicas_set_dev(dst, src->devs[i]);
874 src = replicas_entry_next(src);
875 dst = (void *) dst + entry_size;
879 eytzinger0_sort(cpu_r->entries,
886 static int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
888 struct bch_sb_field_replicas *sb_r;
889 struct bch_replicas_cpu *cpu_r, *old_r;
891 lockdep_assert_held(&c->sb_lock);
893 sb_r = bch2_sb_get_replicas(c->disk_sb);
894 cpu_r = __bch2_sb_replicas_to_cpu_replicas(sb_r);
899 rcu_assign_pointer(c->replicas, cpu_r);
901 kfree_rcu(old_r, rcu);
906 static void bkey_to_replicas(struct bkey_s_c_extent e,
907 enum bch_data_types data_type,
908 struct bch_replicas_cpu_entry *r,
911 const struct bch_extent_ptr *ptr;
914 data_type == BCH_DATA_SB ||
915 data_type >= BCH_DATA_NR);
917 memset(r, 0, sizeof(*r));
918 r->data_type = data_type;
922 extent_for_each_ptr(e, ptr)
924 *max_dev = max_t(unsigned, *max_dev, ptr->dev);
925 replicas_set_dev(r, ptr->dev);
930 * for when gc of replica information is in progress:
932 static int bch2_update_gc_replicas(struct bch_fs *c,
933 struct bch_replicas_cpu *gc_r,
934 struct bkey_s_c_extent e,
935 enum bch_data_types data_type)
937 struct bch_replicas_cpu_entry new_e;
938 struct bch_replicas_cpu *new;
939 unsigned i, nr, entry_size, max_dev;
941 bkey_to_replicas(e, data_type, &new_e, &max_dev);
943 entry_size = offsetof(struct bch_replicas_cpu_entry, devs) +
944 DIV_ROUND_UP(max_dev + 1, 8);
945 entry_size = max(entry_size, gc_r->entry_size);
948 new = kzalloc(sizeof(struct bch_replicas_cpu) +
949 nr * entry_size, GFP_NOIO);
954 new->entry_size = entry_size;
956 for (i = 0; i < gc_r->nr; i++)
957 memcpy(cpu_replicas_entry(new, i),
958 cpu_replicas_entry(gc_r, i),
961 memcpy(cpu_replicas_entry(new, nr - 1),
965 eytzinger0_sort(new->entries,
970 rcu_assign_pointer(c->replicas_gc, new);
971 kfree_rcu(gc_r, rcu);
975 static bool replicas_has_extent(struct bch_replicas_cpu *r,
976 struct bkey_s_c_extent e,
977 enum bch_data_types data_type)
979 struct bch_replicas_cpu_entry search;
982 bkey_to_replicas(e, data_type, &search, &max_dev);
984 return max_dev < replicas_dev_slots(r) &&
985 eytzinger0_find(r->entries, r->nr,
987 memcmp, &search) < r->nr;
990 bool bch2_sb_has_replicas(struct bch_fs *c, struct bkey_s_c_extent e,
991 enum bch_data_types data_type)
996 ret = replicas_has_extent(rcu_dereference(c->replicas),
1004 static int bch2_check_mark_super_slowpath(struct bch_fs *c,
1005 struct bkey_s_c_extent e,
1006 enum bch_data_types data_type)
1008 struct bch_replicas_cpu *gc_r;
1009 const struct bch_extent_ptr *ptr;
1010 struct bch_sb_field_replicas *sb_r;
1011 struct bch_replicas_entry *new_entry;
1012 unsigned new_entry_bytes, new_u64s, nr, bytes, max_dev;
1015 mutex_lock(&c->sb_lock);
1017 gc_r = rcu_dereference_protected(c->replicas_gc,
1018 lockdep_is_held(&c->sb_lock));
1020 !replicas_has_extent(gc_r, e, data_type)) {
1021 ret = bch2_update_gc_replicas(c, gc_r, e, data_type);
1026 /* recheck, might have raced */
1027 if (bch2_sb_has_replicas(c, e, data_type)) {
1028 mutex_unlock(&c->sb_lock);
1032 new_entry_bytes = sizeof(struct bch_replicas_entry) +
1033 bch2_extent_nr_dirty_ptrs(e.s_c);
1035 sb_r = bch2_sb_get_replicas(c->disk_sb);
1037 bch2_sb_replicas_nr_entries(sb_r, &nr, &bytes, &max_dev);
1039 new_u64s = DIV_ROUND_UP(bytes + new_entry_bytes, sizeof(u64));
1041 sb_r = bch2_fs_sb_resize_replicas(c,
1042 DIV_ROUND_UP(sizeof(*sb_r) + bytes + new_entry_bytes,
1049 new_entry = (void *) sb_r + bytes;
1050 new_entry->data_type = data_type;
1053 extent_for_each_ptr(e, ptr)
1055 new_entry->devs[new_entry->nr++] = ptr->dev;
1057 ret = bch2_sb_replicas_to_cpu_replicas(c);
1059 memset(new_entry, 0,
1060 vstruct_end(&sb_r->field) - (void *) new_entry);
1064 bch2_write_super(c);
1066 mutex_unlock(&c->sb_lock);
1070 int bch2_check_mark_super(struct bch_fs *c, struct bkey_s_c_extent e,
1071 enum bch_data_types data_type)
1073 struct bch_replicas_cpu *gc_r;
1077 marked = replicas_has_extent(rcu_dereference(c->replicas),
1079 (!(gc_r = rcu_dereference(c->replicas_gc)) ||
1080 replicas_has_extent(gc_r, e, data_type));
1086 return bch2_check_mark_super_slowpath(c, e, data_type);
1089 struct replicas_status __bch2_replicas_status(struct bch_fs *c,
1090 struct bch_dev *dev_to_offline)
1092 struct bch_replicas_cpu_entry *e;
1093 struct bch_replicas_cpu *r;
1094 unsigned i, dev, dev_slots, nr_online, nr_offline;
1095 struct replicas_status ret;
1097 memset(&ret, 0, sizeof(ret));
1099 for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
1100 ret.replicas[i].nr_online = UINT_MAX;
1103 r = rcu_dereference(c->replicas);
1104 dev_slots = min_t(unsigned, replicas_dev_slots(r), c->sb.nr_devices);
1106 for (i = 0; i < r->nr; i++) {
1107 e = cpu_replicas_entry(r, i);
1109 BUG_ON(e->data_type >= ARRAY_SIZE(ret.replicas));
1111 nr_online = nr_offline = 0;
1113 for (dev = 0; dev < dev_slots; dev++) {
1114 if (!replicas_test_dev(e, dev))
1117 if (bch2_dev_is_online(c->devs[dev]) &&
1118 c->devs[dev] != dev_to_offline)
1124 ret.replicas[e->data_type].nr_online =
1125 min(ret.replicas[e->data_type].nr_online,
1128 ret.replicas[e->data_type].nr_offline =
1129 max(ret.replicas[e->data_type].nr_offline,
1138 struct replicas_status bch2_replicas_status(struct bch_fs *c)
1140 return __bch2_replicas_status(c, NULL);
1143 unsigned bch2_replicas_online(struct bch_fs *c, bool meta)
1145 struct replicas_status s = bch2_replicas_status(c);
1148 ? min(s.replicas[BCH_DATA_JOURNAL].nr_online,
1149 s.replicas[BCH_DATA_BTREE].nr_online)
1150 : s.replicas[BCH_DATA_USER].nr_online;
1153 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
1155 struct bch_replicas_cpu_entry *e;
1156 struct bch_replicas_cpu *r;
1157 unsigned i, ret = 0;
1160 r = rcu_dereference(c->replicas);
1162 if (ca->dev_idx >= replicas_dev_slots(r))
1165 for (i = 0; i < r->nr; i++) {
1166 e = cpu_replicas_entry(r, i);
1168 if (replicas_test_dev(e, ca->dev_idx)) {
1169 ret |= 1 << e->data_type;
1179 static const char *bch2_sb_validate_replicas(struct bch_sb *sb)
1181 struct bch_sb_field_members *mi;
1182 struct bch_sb_field_replicas *sb_r;
1183 struct bch_replicas_cpu *cpu_r = NULL;
1184 struct bch_replicas_entry *e;
1188 mi = bch2_sb_get_members(sb);
1189 sb_r = bch2_sb_get_replicas(sb);
1193 for_each_replicas_entry(sb_r, e) {
1194 err = "invalid replicas entry: invalid data type";
1195 if (e->data_type >= BCH_DATA_NR)
1198 err = "invalid replicas entry: too many devices";
1199 if (e->nr >= BCH_REPLICAS_MAX)
1202 err = "invalid replicas entry: invalid device";
1203 for (i = 0; i < e->nr; i++)
1204 if (!bch2_dev_exists(sb, mi, e->devs[i]))
1208 err = "cannot allocate memory";
1209 cpu_r = __bch2_sb_replicas_to_cpu_replicas(sb_r);
1213 sort_cmp_size(cpu_r->entries,
1218 for (i = 0; i + 1 < cpu_r->nr; i++) {
1219 struct bch_replicas_cpu_entry *l =
1220 cpu_replicas_entry(cpu_r, i);
1221 struct bch_replicas_cpu_entry *r =
1222 cpu_replicas_entry(cpu_r, i + 1);
1224 BUG_ON(memcmp(l, r, cpu_r->entry_size) > 0);
1226 err = "duplicate replicas entry";
1227 if (!memcmp(l, r, cpu_r->entry_size))
1237 int bch2_replicas_gc_end(struct bch_fs *c, int err)
1239 struct bch_sb_field_replicas *sb_r;
1240 struct bch_replicas_cpu *r, *old_r;
1241 struct bch_replicas_entry *dst_e;
1242 size_t i, j, bytes, dev_slots;
1245 lockdep_assert_held(&c->replicas_gc_lock);
1247 mutex_lock(&c->sb_lock);
1249 r = rcu_dereference_protected(c->replicas_gc,
1250 lockdep_is_held(&c->sb_lock));
1253 rcu_assign_pointer(c->replicas_gc, NULL);
1258 dev_slots = replicas_dev_slots(r);
1260 bytes = sizeof(struct bch_sb_field_replicas);
1262 for (i = 0; i < r->nr; i++) {
1263 struct bch_replicas_cpu_entry *e =
1264 cpu_replicas_entry(r, i);
1266 bytes += sizeof(struct bch_replicas_entry);
1267 for (j = 0; j < r->entry_size - 1; j++)
1268 bytes += hweight8(e->devs[j]);
1271 sb_r = bch2_fs_sb_resize_replicas(c,
1272 DIV_ROUND_UP(sizeof(*sb_r) + bytes, sizeof(u64)));
1278 memset(&sb_r->entries, 0,
1279 vstruct_end(&sb_r->field) -
1280 (void *) &sb_r->entries);
1282 dst_e = sb_r->entries;
1283 for (i = 0; i < r->nr; i++) {
1284 struct bch_replicas_cpu_entry *src_e =
1285 cpu_replicas_entry(r, i);
1287 dst_e->data_type = src_e->data_type;
1289 for (j = 0; j < dev_slots; j++)
1290 if (replicas_test_dev(src_e, j))
1291 dst_e->devs[dst_e->nr++] = j;
1293 dst_e = replicas_entry_next(dst_e);
1296 old_r = rcu_dereference_protected(c->replicas,
1297 lockdep_is_held(&c->sb_lock));
1298 rcu_assign_pointer(c->replicas, r);
1299 rcu_assign_pointer(c->replicas_gc, NULL);
1300 kfree_rcu(old_r, rcu);
1302 bch2_write_super(c);
1304 mutex_unlock(&c->sb_lock);
1308 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
1310 struct bch_replicas_cpu *r, *src;
1313 lockdep_assert_held(&c->replicas_gc_lock);
1315 mutex_lock(&c->sb_lock);
1316 BUG_ON(c->replicas_gc);
1318 src = rcu_dereference_protected(c->replicas,
1319 lockdep_is_held(&c->sb_lock));
1321 r = kzalloc(sizeof(struct bch_replicas_cpu) +
1322 src->nr * src->entry_size, GFP_NOIO);
1324 mutex_unlock(&c->sb_lock);
1328 r->entry_size = src->entry_size;
1331 for (i = 0; i < src->nr; i++) {
1332 struct bch_replicas_cpu_entry *dst_e =
1333 cpu_replicas_entry(r, r->nr);
1334 struct bch_replicas_cpu_entry *src_e =
1335 cpu_replicas_entry(src, i);
1337 if (!(src_e->data_type & typemask)) {
1338 memcpy(dst_e, src_e, r->entry_size);
1343 eytzinger0_sort(r->entries,
1348 rcu_assign_pointer(c->replicas_gc, r);
1349 mutex_unlock(&c->sb_lock);