11 #include <linux/backing-dev.h>
12 #include <linux/sort.h>
14 static int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *);
15 static const char *bch2_sb_validate_replicas(struct bch_sb *);
17 static inline void __bch2_sb_layout_size_assert(void)
19 BUILD_BUG_ON(sizeof(struct bch_sb_layout) != 512);
22 struct bch_sb_field *bch2_sb_field_get(struct bch_sb *sb,
23 enum bch_sb_field_type type)
25 struct bch_sb_field *f;
27 /* XXX: need locking around superblock to access optional fields */
29 vstruct_for_each(sb, f)
30 if (le32_to_cpu(f->type) == type)
35 void bch2_free_super(struct bcache_superblock *sb)
39 if (!IS_ERR_OR_NULL(sb->bdev))
40 blkdev_put(sb->bdev, sb->mode);
42 free_pages((unsigned long) sb->sb, sb->page_order);
43 memset(sb, 0, sizeof(*sb));
46 static int __bch2_super_realloc(struct bcache_superblock *sb, unsigned order)
48 struct bch_sb *new_sb;
51 if (sb->page_order >= order && sb->sb)
54 if (dynamic_fault("bcachefs:add:super_realloc"))
57 bio = bio_kmalloc(GFP_KERNEL, 1 << order);
65 new_sb = (void *) __get_free_pages(GFP_KERNEL, order);
70 memcpy(new_sb, sb->sb, PAGE_SIZE << sb->page_order);
72 free_pages((unsigned long) sb->sb, sb->page_order);
75 sb->page_order = order;
80 static int bch2_sb_realloc(struct bcache_superblock *sb, unsigned u64s)
82 u64 new_bytes = __vstruct_bytes(struct bch_sb, u64s);
83 u64 max_bytes = 512 << sb->sb->layout.sb_max_size_bits;
85 if (new_bytes > max_bytes) {
86 char buf[BDEVNAME_SIZE];
88 pr_err("%s: superblock too big: want %llu but have %llu",
89 bdevname(sb->bdev, buf), new_bytes, max_bytes);
93 return __bch2_super_realloc(sb, get_order(new_bytes));
96 static int bch2_fs_sb_realloc(struct bch_fs *c, unsigned u64s)
98 u64 bytes = __vstruct_bytes(struct bch_sb, u64s);
100 unsigned order = get_order(bytes);
102 if (c->disk_sb && order <= c->disk_sb_order)
105 sb = (void *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
110 memcpy(sb, c->disk_sb, PAGE_SIZE << c->disk_sb_order);
112 free_pages((unsigned long) c->disk_sb, c->disk_sb_order);
115 c->disk_sb_order = order;
119 static struct bch_sb_field *__bch2_sb_field_resize(struct bch_sb *sb,
120 struct bch_sb_field *f,
123 unsigned old_u64s = f ? le32_to_cpu(f->u64s) : 0;
126 f = vstruct_last(sb);
127 memset(f, 0, sizeof(u64) * u64s);
128 f->u64s = cpu_to_le32(u64s);
133 src = vstruct_end(f);
134 f->u64s = cpu_to_le32(u64s);
135 dst = vstruct_end(f);
137 memmove(dst, src, vstruct_end(sb) - src);
140 memset(src, 0, dst - src);
143 le32_add_cpu(&sb->u64s, u64s - old_u64s);
148 struct bch_sb_field *bch2_sb_field_resize(struct bcache_superblock *sb,
149 enum bch_sb_field_type type,
152 struct bch_sb_field *f = bch2_sb_field_get(sb->sb, type);
153 ssize_t old_u64s = f ? le32_to_cpu(f->u64s) : 0;
154 ssize_t d = -old_u64s + u64s;
156 if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d))
159 f = __bch2_sb_field_resize(sb->sb, f, u64s);
164 struct bch_sb_field *bch2_fs_sb_field_resize(struct bch_fs *c,
165 enum bch_sb_field_type type,
168 struct bch_sb_field *f = bch2_sb_field_get(c->disk_sb, type);
169 ssize_t old_u64s = f ? le32_to_cpu(f->u64s) : 0;
170 ssize_t d = -old_u64s + u64s;
174 lockdep_assert_held(&c->sb_lock);
176 if (bch2_fs_sb_realloc(c, le32_to_cpu(c->disk_sb->u64s) + d))
179 /* XXX: we're not checking that offline device have enough space */
181 for_each_online_member(ca, c, i) {
182 struct bcache_superblock *sb = &ca->disk_sb;
184 if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d)) {
185 percpu_ref_put(&ca->ref);
190 f = __bch2_sb_field_resize(c->disk_sb, f, u64s);
195 static const char *validate_sb_layout(struct bch_sb_layout *layout)
197 u64 offset, prev_offset, max_sectors;
200 if (uuid_le_cmp(layout->magic, BCACHE_MAGIC))
201 return "Not a bcachefs superblock layout";
203 if (layout->layout_type != 0)
204 return "Invalid superblock layout type";
206 if (!layout->nr_superblocks)
207 return "Invalid superblock layout: no superblocks";
209 if (layout->nr_superblocks > ARRAY_SIZE(layout->sb_offset))
210 return "Invalid superblock layout: too many superblocks";
212 max_sectors = 1 << layout->sb_max_size_bits;
214 prev_offset = le64_to_cpu(layout->sb_offset[0]);
216 for (i = 1; i < layout->nr_superblocks; i++) {
217 offset = le64_to_cpu(layout->sb_offset[i]);
219 if (offset < prev_offset + max_sectors)
220 return "Invalid superblock layout: superblocks overlap";
221 prev_offset = offset;
227 static int u64_cmp(const void *_l, const void *_r)
229 u64 l = *((const u64 *) _l), r = *((const u64 *) _r);
231 return l < r ? -1 : l > r ? 1 : 0;
234 const char *bch2_sb_validate_journal(struct bch_sb *sb,
235 struct bch_member_cpu mi)
237 struct bch_sb_field_journal *journal;
243 journal = bch2_sb_get_journal(sb);
247 nr = bch2_nr_journal_buckets(journal);
251 b = kmalloc_array(sizeof(u64), nr, GFP_KERNEL);
253 return "cannot allocate memory";
255 for (i = 0; i < nr; i++)
256 b[i] = le64_to_cpu(journal->buckets[i]);
258 sort(b, nr, sizeof(u64), u64_cmp, NULL);
260 err = "journal bucket at sector 0";
264 err = "journal bucket before first bucket";
265 if (b[0] < mi.first_bucket)
268 err = "journal bucket past end of device";
269 if (b[nr - 1] >= mi.nbuckets)
272 err = "duplicate journal buckets";
273 for (i = 0; i + 1 < nr; i++)
274 if (b[i] == b[i + 1])
283 static const char *bch2_sb_validate_members(struct bch_sb *sb)
285 struct bch_sb_field_members *mi;
288 mi = bch2_sb_get_members(sb);
290 return "Invalid superblock: member info area missing";
292 if ((void *) (mi->members + sb->nr_devices) >
293 vstruct_end(&mi->field))
294 return "Invalid superblock: bad member info";
296 for (i = 0; i < sb->nr_devices; i++) {
297 if (!bch2_dev_exists(sb, mi, i))
300 if (le16_to_cpu(mi->members[i].bucket_size) <
301 BCH_SB_BTREE_NODE_SIZE(sb))
302 return "bucket size smaller than btree node size";
308 const char *bch2_sb_validate(struct bcache_superblock *disk_sb)
310 struct bch_sb *sb = disk_sb->sb;
311 struct bch_sb_field *f;
312 struct bch_sb_field_members *sb_mi;
313 struct bch_member_cpu mi;
317 switch (le64_to_cpu(sb->version)) {
318 case BCACHE_SB_VERSION_CDEV_V4:
321 return"Unsupported superblock version";
324 if (BCH_SB_INITIALIZED(sb) &&
325 le64_to_cpu(sb->version) != BCACHE_SB_VERSION_CDEV_V4)
326 return "Unsupported superblock version";
328 block_size = le16_to_cpu(sb->block_size);
330 if (!is_power_of_2(block_size) ||
331 block_size > PAGE_SECTORS)
332 return "Bad block size";
334 if (bch2_is_zero(sb->user_uuid.b, sizeof(uuid_le)))
335 return "Bad user UUID";
337 if (bch2_is_zero(sb->uuid.b, sizeof(uuid_le)))
338 return "Bad internal UUID";
340 if (!sb->nr_devices ||
341 sb->nr_devices <= sb->dev_idx ||
342 sb->nr_devices > BCH_SB_MEMBERS_MAX)
343 return "Bad cache device number in set";
345 if (!BCH_SB_META_REPLICAS_WANT(sb) ||
346 BCH_SB_META_REPLICAS_WANT(sb) >= BCH_REPLICAS_MAX)
347 return "Invalid number of metadata replicas";
349 if (!BCH_SB_META_REPLICAS_REQ(sb) ||
350 BCH_SB_META_REPLICAS_REQ(sb) >= BCH_REPLICAS_MAX)
351 return "Invalid number of metadata replicas";
353 if (!BCH_SB_DATA_REPLICAS_WANT(sb) ||
354 BCH_SB_DATA_REPLICAS_WANT(sb) >= BCH_REPLICAS_MAX)
355 return "Invalid number of data replicas";
357 if (!BCH_SB_DATA_REPLICAS_REQ(sb) ||
358 BCH_SB_DATA_REPLICAS_REQ(sb) >= BCH_REPLICAS_MAX)
359 return "Invalid number of metadata replicas";
361 if (!BCH_SB_BTREE_NODE_SIZE(sb))
362 return "Btree node size not set";
364 if (!is_power_of_2(BCH_SB_BTREE_NODE_SIZE(sb)))
365 return "Btree node size not a power of two";
367 if (BCH_SB_BTREE_NODE_SIZE(sb) > BTREE_NODE_SIZE_MAX)
368 return "Btree node size too large";
370 if (BCH_SB_GC_RESERVE(sb) < 5)
371 return "gc reserve percentage too small";
373 if (!sb->time_precision ||
374 le32_to_cpu(sb->time_precision) > NSEC_PER_SEC)
375 return "invalid time precision";
377 /* validate layout */
378 err = validate_sb_layout(&sb->layout);
382 vstruct_for_each(sb, f) {
384 return "Invalid superblock: invalid optional field";
386 if (vstruct_next(f) > vstruct_last(sb))
387 return "Invalid superblock: invalid optional field";
389 if (le32_to_cpu(f->type) >= BCH_SB_FIELD_NR)
390 return "Invalid superblock: unknown optional field type";
393 err = bch2_sb_validate_members(sb);
397 sb_mi = bch2_sb_get_members(sb);
398 mi = bch2_mi_to_cpu(sb_mi->members + sb->dev_idx);
400 if (mi.nbuckets > LONG_MAX)
401 return "Too many buckets";
403 if (mi.nbuckets - mi.first_bucket < 1 << 10)
404 return "Not enough buckets";
406 if (!is_power_of_2(mi.bucket_size) ||
407 mi.bucket_size < PAGE_SECTORS ||
408 mi.bucket_size < block_size)
409 return "Bad bucket size";
411 if (get_capacity(disk_sb->bdev->bd_disk) <
412 mi.bucket_size * mi.nbuckets)
413 return "Invalid superblock: device too small";
415 err = bch2_sb_validate_journal(sb, mi);
419 err = bch2_sb_validate_replicas(sb);
428 static const char *bch2_blkdev_open(const char *path, fmode_t mode,
429 void *holder, struct block_device **ret)
431 struct block_device *bdev;
434 bdev = blkdev_get_by_path(path, mode, holder);
435 if (bdev == ERR_PTR(-EBUSY))
436 return "device busy";
439 return "failed to open device";
441 if (mode & FMODE_WRITE)
442 bdev_get_queue(bdev)->backing_dev_info->capabilities
443 |= BDI_CAP_STABLE_WRITES;
449 static void bch2_sb_update(struct bch_fs *c)
451 struct bch_sb *src = c->disk_sb;
452 struct bch_sb_field_members *mi = bch2_sb_get_members(src);
456 lockdep_assert_held(&c->sb_lock);
458 c->sb.uuid = src->uuid;
459 c->sb.user_uuid = src->user_uuid;
460 c->sb.block_size = le16_to_cpu(src->block_size);
461 c->sb.btree_node_size = BCH_SB_BTREE_NODE_SIZE(src);
462 c->sb.nr_devices = src->nr_devices;
463 c->sb.clean = BCH_SB_CLEAN(src);
464 c->sb.str_hash_type = BCH_SB_STR_HASH_TYPE(src);
465 c->sb.encryption_type = BCH_SB_ENCRYPTION_TYPE(src);
466 c->sb.time_base_lo = le64_to_cpu(src->time_base_lo);
467 c->sb.time_base_hi = le32_to_cpu(src->time_base_hi);
468 c->sb.time_precision = le32_to_cpu(src->time_precision);
470 for_each_member_device(ca, c, i)
471 ca->mi = bch2_mi_to_cpu(mi->members + i);
474 /* doesn't copy member info */
475 static void __copy_super(struct bch_sb *dst, struct bch_sb *src)
477 struct bch_sb_field *src_f, *dst_f;
479 dst->version = src->version;
481 dst->uuid = src->uuid;
482 dst->user_uuid = src->user_uuid;
483 memcpy(dst->label, src->label, sizeof(dst->label));
485 dst->block_size = src->block_size;
486 dst->nr_devices = src->nr_devices;
488 dst->time_base_lo = src->time_base_lo;
489 dst->time_base_hi = src->time_base_hi;
490 dst->time_precision = src->time_precision;
492 memcpy(dst->flags, src->flags, sizeof(dst->flags));
493 memcpy(dst->features, src->features, sizeof(dst->features));
494 memcpy(dst->compat, src->compat, sizeof(dst->compat));
496 vstruct_for_each(src, src_f) {
497 if (src_f->type == BCH_SB_FIELD_journal)
500 dst_f = bch2_sb_field_get(dst, src_f->type);
501 dst_f = __bch2_sb_field_resize(dst, dst_f,
502 le32_to_cpu(src_f->u64s));
504 memcpy(dst_f, src_f, vstruct_bytes(src_f));
508 int bch2_sb_to_fs(struct bch_fs *c, struct bch_sb *src)
510 struct bch_sb_field_journal *journal_buckets =
511 bch2_sb_get_journal(src);
512 unsigned journal_u64s = journal_buckets
513 ? le32_to_cpu(journal_buckets->field.u64s)
517 lockdep_assert_held(&c->sb_lock);
519 if (bch2_fs_sb_realloc(c, le32_to_cpu(src->u64s) - journal_u64s))
522 __copy_super(c->disk_sb, src);
524 ret = bch2_sb_replicas_to_cpu_replicas(c);
532 int bch2_sb_from_fs(struct bch_fs *c, struct bch_dev *ca)
534 struct bch_sb *src = c->disk_sb, *dst = ca->disk_sb.sb;
535 struct bch_sb_field_journal *journal_buckets =
536 bch2_sb_get_journal(dst);
537 unsigned journal_u64s = journal_buckets
538 ? le32_to_cpu(journal_buckets->field.u64s)
540 unsigned u64s = le32_to_cpu(src->u64s) + journal_u64s;
543 ret = bch2_sb_realloc(&ca->disk_sb, u64s);
547 __copy_super(dst, src);
552 /* read superblock: */
554 static const char *read_one_super(struct bcache_superblock *sb, u64 offset)
556 struct bch_csum csum;
561 sb->bio->bi_bdev = sb->bdev;
562 sb->bio->bi_iter.bi_sector = offset;
563 sb->bio->bi_iter.bi_size = PAGE_SIZE << sb->page_order;
564 bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
565 bch2_bio_map(sb->bio, sb->sb);
567 if (submit_bio_wait(sb->bio))
570 if (uuid_le_cmp(sb->sb->magic, BCACHE_MAGIC))
571 return "Not a bcachefs superblock";
573 if (le64_to_cpu(sb->sb->version) != BCACHE_SB_VERSION_CDEV_V4)
574 return "Unsupported superblock version";
576 bytes = vstruct_bytes(sb->sb);
578 if (bytes > 512 << sb->sb->layout.sb_max_size_bits)
579 return "Bad superblock: too big";
581 order = get_order(bytes);
582 if (order > sb->page_order) {
583 if (__bch2_super_realloc(sb, order))
584 return "cannot allocate memory";
588 if (BCH_SB_CSUM_TYPE(sb->sb) >= BCH_CSUM_NR)
589 return "unknown csum type";
591 /* XXX: verify MACs */
592 csum = csum_vstruct(NULL, BCH_SB_CSUM_TYPE(sb->sb),
593 (struct nonce) { 0 }, sb->sb);
595 if (bch2_crc_cmp(csum, sb->sb->csum))
596 return "bad checksum reading superblock";
601 const char *bch2_read_super(struct bcache_superblock *sb,
602 struct bch_opts opts,
605 u64 offset = opt_defined(opts.sb) ? opts.sb : BCH_SB_SECTOR;
606 struct bch_sb_layout layout;
610 memset(sb, 0, sizeof(*sb));
611 sb->mode = FMODE_READ;
613 if (!(opt_defined(opts.noexcl) && opts.noexcl))
614 sb->mode |= FMODE_EXCL;
616 if (!(opt_defined(opts.nochanges) && opts.nochanges))
617 sb->mode |= FMODE_WRITE;
619 err = bch2_blkdev_open(path, sb->mode, sb, &sb->bdev);
623 err = "cannot allocate memory";
624 if (__bch2_super_realloc(sb, 0))
627 err = "dynamic fault";
628 if (bch2_fs_init_fault("read_super"))
631 err = read_one_super(sb, offset);
635 if (offset != BCH_SB_SECTOR) {
636 pr_err("error reading superblock: %s", err);
640 pr_err("error reading default superblock: %s", err);
643 * Error reading primary superblock - read location of backup
647 sb->bio->bi_bdev = sb->bdev;
648 sb->bio->bi_iter.bi_sector = BCH_SB_LAYOUT_SECTOR;
649 sb->bio->bi_iter.bi_size = sizeof(struct bch_sb_layout);
650 bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
652 * use sb buffer to read layout, since sb buffer is page aligned but
655 bch2_bio_map(sb->bio, sb->sb);
658 if (submit_bio_wait(sb->bio))
661 memcpy(&layout, sb->sb, sizeof(layout));
662 err = validate_sb_layout(&layout);
666 for (i = 0; i < layout.nr_superblocks; i++) {
667 u64 offset = le64_to_cpu(layout.sb_offset[i]);
669 if (offset == BCH_SB_SECTOR)
672 err = read_one_super(sb, offset);
678 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
679 le64_to_cpu(sb->sb->version),
680 le64_to_cpu(sb->sb->flags),
681 le64_to_cpu(sb->sb->seq),
682 le16_to_cpu(sb->sb->u64s));
684 err = "Superblock block size smaller than device block size";
685 if (le16_to_cpu(sb->sb->block_size) << 9 <
686 bdev_logical_block_size(sb->bdev))
695 /* write superblock: */
697 static void write_super_endio(struct bio *bio)
699 struct bch_dev *ca = bio->bi_private;
701 /* XXX: return errors directly */
703 bch2_dev_fatal_io_err_on(bio->bi_error, ca, "superblock write");
705 closure_put(&ca->fs->sb_write);
706 percpu_ref_put(&ca->io_ref);
709 static bool write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
711 struct bch_sb *sb = ca->disk_sb.sb;
712 struct bio *bio = ca->disk_sb.bio;
714 if (idx >= sb->layout.nr_superblocks)
717 if (!percpu_ref_tryget(&ca->io_ref))
720 sb->offset = sb->layout.sb_offset[idx];
722 SET_BCH_SB_CSUM_TYPE(sb, c->opts.metadata_checksum);
723 sb->csum = csum_vstruct(c, BCH_SB_CSUM_TYPE(sb),
724 (struct nonce) { 0 }, sb);
727 bio->bi_bdev = ca->disk_sb.bdev;
728 bio->bi_iter.bi_sector = le64_to_cpu(sb->offset);
729 bio->bi_iter.bi_size =
730 roundup(vstruct_bytes(sb),
731 bdev_logical_block_size(ca->disk_sb.bdev));
732 bio->bi_end_io = write_super_endio;
733 bio->bi_private = ca;
734 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
735 bch2_bio_map(bio, sb);
737 closure_bio_submit(bio, &c->sb_write);
741 void bch2_write_super(struct bch_fs *c)
743 struct closure *cl = &c->sb_write;
745 unsigned i, super_idx = 0;
749 lockdep_assert_held(&c->sb_lock);
751 closure_init_stack(cl);
753 le64_add_cpu(&c->disk_sb->seq, 1);
755 for_each_online_member(ca, c, i)
756 bch2_sb_from_fs(c, ca);
758 for_each_online_member(ca, c, i) {
759 err = bch2_sb_validate(&ca->disk_sb);
761 bch2_fs_inconsistent(c, "sb invalid before write: %s", err);
766 if (c->opts.nochanges ||
767 test_bit(BCH_FS_ERROR, &c->flags))
772 for_each_online_member(ca, c, i)
773 if (write_one_super(c, ca, super_idx))
780 /* Make new options visible after they're persistent: */
784 /* replica information: */
786 static inline struct bch_replicas_entry *
787 replicas_entry_next(struct bch_replicas_entry *i)
789 return (void *) i + offsetof(struct bch_replicas_entry, devs) + i->nr;
792 #define for_each_replicas_entry(_r, _i) \
793 for (_i = (_r)->entries; \
794 (void *) (_i) < vstruct_end(&(_r)->field) && (_i)->data_type;\
795 (_i) = replicas_entry_next(_i))
797 static void bch2_sb_replicas_nr_entries(struct bch_sb_field_replicas *r,
802 struct bch_replicas_entry *i;
812 for_each_replicas_entry(r, i) {
813 for (j = 0; j < i->nr; j++)
814 *max_dev = max_t(unsigned, *max_dev, i->devs[j]);
818 *bytes = (void *) i - (void *) r;
821 static struct bch_replicas_cpu *
822 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r)
824 struct bch_replicas_cpu *cpu_r;
825 unsigned i, nr, bytes, max_dev, entry_size;
827 bch2_sb_replicas_nr_entries(sb_r, &nr, &bytes, &max_dev);
829 entry_size = offsetof(struct bch_replicas_cpu_entry, devs) +
830 DIV_ROUND_UP(max_dev + 1, 8);
832 cpu_r = kzalloc(sizeof(struct bch_replicas_cpu) +
833 nr * entry_size, GFP_NOIO);
838 cpu_r->entry_size = entry_size;
841 struct bch_replicas_cpu_entry *dst =
842 cpu_replicas_entry(cpu_r, 0);
843 struct bch_replicas_entry *src = sb_r->entries;
845 while (dst < cpu_replicas_entry(cpu_r, nr)) {
846 dst->data_type = src->data_type;
847 for (i = 0; i < src->nr; i++)
848 replicas_set_dev(dst, src->devs[i]);
850 src = replicas_entry_next(src);
851 dst = (void *) dst + entry_size;
855 eytzinger0_sort(cpu_r->entries,
862 static int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
864 struct bch_sb_field_replicas *sb_r;
865 struct bch_replicas_cpu *cpu_r, *old_r;
867 lockdep_assert_held(&c->sb_lock);
869 sb_r = bch2_sb_get_replicas(c->disk_sb);
870 cpu_r = __bch2_sb_replicas_to_cpu_replicas(sb_r);
875 rcu_assign_pointer(c->replicas, cpu_r);
877 kfree_rcu(old_r, rcu);
883 * for when gc of replica information is in progress:
885 static int bch2_update_gc_replicas(struct bch_fs *c,
886 struct bch_replicas_cpu *gc_r,
887 struct bkey_s_c_extent e,
888 enum bch_data_types data_type)
890 const struct bch_extent_ptr *ptr;
891 struct bch_replicas_cpu_entry *new_e;
892 struct bch_replicas_cpu *new;
893 unsigned i, nr, entry_size, max_dev = 0;
895 extent_for_each_ptr(e, ptr)
897 max_dev = max_t(unsigned, max_dev, ptr->dev);
899 entry_size = offsetof(struct bch_replicas_cpu_entry, devs) +
900 DIV_ROUND_UP(max_dev + 1, 8);
901 entry_size = max(entry_size, gc_r->entry_size);
904 new = kzalloc(sizeof(struct bch_replicas_cpu) +
905 nr * entry_size, GFP_NOIO);
910 new->entry_size = entry_size;
912 for (i = 0; i < gc_r->nr; i++)
913 memcpy(cpu_replicas_entry(new, i),
914 cpu_replicas_entry(gc_r, i),
917 new_e = cpu_replicas_entry(new, nr - 1);
918 new_e->data_type = data_type;
920 extent_for_each_ptr(e, ptr)
922 replicas_set_dev(new_e, ptr->dev);
924 eytzinger0_sort(new->entries,
929 rcu_assign_pointer(c->replicas_gc, new);
930 kfree_rcu(gc_r, rcu);
934 int bch2_check_mark_super_slowpath(struct bch_fs *c, struct bkey_s_c_extent e,
935 enum bch_data_types data_type)
937 struct bch_replicas_cpu *gc_r;
938 const struct bch_extent_ptr *ptr;
939 struct bch_sb_field_replicas *sb_r;
940 struct bch_replicas_entry *new_entry;
941 unsigned new_entry_bytes, new_u64s, nr, bytes, max_dev;
944 mutex_lock(&c->sb_lock);
946 gc_r = rcu_dereference_protected(c->replicas_gc,
947 lockdep_is_held(&c->sb_lock));
949 !replicas_has_extent(gc_r, e, data_type)) {
950 ret = bch2_update_gc_replicas(c, gc_r, e, data_type);
955 /* recheck, might have raced */
956 if (bch2_sb_has_replicas(c, e, data_type)) {
957 mutex_unlock(&c->sb_lock);
961 new_entry_bytes = sizeof(struct bch_replicas_entry) +
962 bch2_extent_nr_dirty_ptrs(e.s_c);
964 sb_r = bch2_sb_get_replicas(c->disk_sb);
966 bch2_sb_replicas_nr_entries(sb_r, &nr, &bytes, &max_dev);
968 new_u64s = DIV_ROUND_UP(bytes + new_entry_bytes, sizeof(u64));
970 sb_r = bch2_fs_sb_resize_replicas(c,
971 DIV_ROUND_UP(sizeof(*sb_r) + bytes + new_entry_bytes,
978 new_entry = (void *) sb_r + bytes;
979 new_entry->data_type = data_type;
982 extent_for_each_ptr(e, ptr)
984 new_entry->devs[new_entry->nr++] = ptr->dev;
986 ret = bch2_sb_replicas_to_cpu_replicas(c);
989 vstruct_end(&sb_r->field) - (void *) new_entry);
995 mutex_unlock(&c->sb_lock);
999 struct replicas_status __bch2_replicas_status(struct bch_fs *c,
1000 struct bch_dev *dev_to_offline)
1002 struct bch_replicas_cpu_entry *e;
1003 struct bch_replicas_cpu *r;
1004 unsigned i, dev, dev_slots, nr_online, nr_offline;
1005 struct replicas_status ret;
1007 memset(&ret, 0, sizeof(ret));
1009 for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
1010 ret.replicas[i].nr_online = UINT_MAX;
1013 r = rcu_dereference(c->replicas);
1014 dev_slots = min_t(unsigned, replicas_dev_slots(r), c->sb.nr_devices);
1016 for (i = 0; i < r->nr; i++) {
1017 e = cpu_replicas_entry(r, i);
1019 BUG_ON(e->data_type >= ARRAY_SIZE(ret.replicas));
1021 nr_online = nr_offline = 0;
1023 for (dev = 0; dev < dev_slots; dev++) {
1024 if (!replicas_test_dev(e, dev))
1027 if (bch2_dev_is_online(c->devs[dev]) &&
1028 c->devs[dev] != dev_to_offline)
1034 ret.replicas[e->data_type].nr_online =
1035 min(ret.replicas[e->data_type].nr_online,
1038 ret.replicas[e->data_type].nr_offline =
1039 max(ret.replicas[e->data_type].nr_offline,
1048 struct replicas_status bch2_replicas_status(struct bch_fs *c)
1050 return __bch2_replicas_status(c, NULL);
1053 unsigned bch2_replicas_online(struct bch_fs *c, bool meta)
1055 struct replicas_status s = bch2_replicas_status(c);
1058 ? min(s.replicas[BCH_DATA_JOURNAL].nr_online,
1059 s.replicas[BCH_DATA_BTREE].nr_online)
1060 : s.replicas[BCH_DATA_USER].nr_online;
1063 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
1065 struct bch_replicas_cpu_entry *e;
1066 struct bch_replicas_cpu *r;
1067 unsigned i, ret = 0;
1070 r = rcu_dereference(c->replicas);
1072 if (ca->dev_idx >= replicas_dev_slots(r))
1075 for (i = 0; i < r->nr; i++) {
1076 e = cpu_replicas_entry(r, i);
1078 if (replicas_test_dev(e, ca->dev_idx)) {
1079 ret |= 1 << e->data_type;
1089 static const char *bch2_sb_validate_replicas(struct bch_sb *sb)
1091 struct bch_sb_field_members *mi;
1092 struct bch_sb_field_replicas *sb_r;
1093 struct bch_replicas_cpu *cpu_r = NULL;
1094 struct bch_replicas_entry *e;
1098 mi = bch2_sb_get_members(sb);
1099 sb_r = bch2_sb_get_replicas(sb);
1103 for_each_replicas_entry(sb_r, e) {
1104 err = "invalid replicas entry: invalid data type";
1105 if (e->data_type >= BCH_DATA_NR)
1108 err = "invalid replicas entry: too many devices";
1109 if (e->nr >= BCH_REPLICAS_MAX)
1112 err = "invalid replicas entry: invalid device";
1113 for (i = 0; i < e->nr; i++)
1114 if (!bch2_dev_exists(sb, mi, e->devs[i]))
1118 err = "cannot allocate memory";
1119 cpu_r = __bch2_sb_replicas_to_cpu_replicas(sb_r);
1123 sort_cmp_size(cpu_r->entries,
1128 for (i = 0; i + 1 < cpu_r->nr; i++) {
1129 struct bch_replicas_cpu_entry *l =
1130 cpu_replicas_entry(cpu_r, i);
1131 struct bch_replicas_cpu_entry *r =
1132 cpu_replicas_entry(cpu_r, i + 1);
1134 BUG_ON(memcmp(l, r, cpu_r->entry_size) > 0);
1136 err = "duplicate replicas entry";
1137 if (!memcmp(l, r, cpu_r->entry_size))
1147 int bch2_replicas_gc_end(struct bch_fs *c, int err)
1149 struct bch_sb_field_replicas *sb_r;
1150 struct bch_replicas_cpu *r, *old_r;
1151 struct bch_replicas_entry *dst_e;
1152 size_t i, j, bytes, dev_slots;
1155 lockdep_assert_held(&c->replicas_gc_lock);
1157 mutex_lock(&c->sb_lock);
1159 r = rcu_dereference_protected(c->replicas_gc,
1160 lockdep_is_held(&c->sb_lock));
1163 rcu_assign_pointer(c->replicas_gc, NULL);
1168 dev_slots = replicas_dev_slots(r);
1170 bytes = sizeof(struct bch_sb_field_replicas);
1172 for (i = 0; i < r->nr; i++) {
1173 struct bch_replicas_cpu_entry *e =
1174 cpu_replicas_entry(r, i);
1176 bytes += sizeof(struct bch_replicas_entry);
1177 for (j = 0; j < r->entry_size - 1; j++)
1178 bytes += hweight8(e->devs[j]);
1181 sb_r = bch2_fs_sb_resize_replicas(c,
1182 DIV_ROUND_UP(sizeof(*sb_r) + bytes, sizeof(u64)));
1188 memset(&sb_r->entries, 0,
1189 vstruct_end(&sb_r->field) -
1190 (void *) &sb_r->entries);
1192 dst_e = sb_r->entries;
1193 for (i = 0; i < r->nr; i++) {
1194 struct bch_replicas_cpu_entry *src_e =
1195 cpu_replicas_entry(r, i);
1197 dst_e->data_type = src_e->data_type;
1199 for (j = 0; j < dev_slots; j++)
1200 if (replicas_test_dev(src_e, j))
1201 dst_e->devs[dst_e->nr++] = j;
1203 dst_e = replicas_entry_next(dst_e);
1206 old_r = rcu_dereference_protected(c->replicas,
1207 lockdep_is_held(&c->sb_lock));
1208 rcu_assign_pointer(c->replicas, r);
1209 rcu_assign_pointer(c->replicas_gc, NULL);
1210 kfree_rcu(old_r, rcu);
1212 bch2_write_super(c);
1214 mutex_unlock(&c->sb_lock);
1218 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
1220 struct bch_replicas_cpu *r, *src;
1223 lockdep_assert_held(&c->replicas_gc_lock);
1225 mutex_lock(&c->sb_lock);
1226 BUG_ON(c->replicas_gc);
1228 src = rcu_dereference_protected(c->replicas,
1229 lockdep_is_held(&c->sb_lock));
1231 r = kzalloc(sizeof(struct bch_replicas_cpu) +
1232 src->nr * src->entry_size, GFP_NOIO);
1234 mutex_unlock(&c->sb_lock);
1238 r->entry_size = src->entry_size;
1241 for (i = 0; i < src->nr; i++) {
1242 struct bch_replicas_cpu_entry *dst_e =
1243 cpu_replicas_entry(r, r->nr);
1244 struct bch_replicas_cpu_entry *src_e =
1245 cpu_replicas_entry(src, i);
1247 if (!(src_e->data_type & typemask)) {
1248 memcpy(dst_e, src_e, r->entry_size);
1253 eytzinger0_sort(r->entries,
1258 rcu_assign_pointer(c->replicas_gc, r);
1259 mutex_unlock(&c->sb_lock);