10 #include <sys/sysmacros.h>
11 #include <sys/types.h>
15 #include <uuid/uuid.h>
17 #include "libbcachefs.h"
19 #include "libbcachefs/bcachefs_format.h"
20 #include "libbcachefs/btree_cache.h"
21 #include "libbcachefs/checksum.h"
22 #include "libbcachefs/disk_groups.h"
23 #include "libbcachefs/opts.h"
24 #include "libbcachefs/replicas.h"
25 #include "libbcachefs/super-io.h"
27 #define NSEC_PER_SEC 1000000000L
29 #define BCH_MIN_NR_NBUCKETS (1 << 10)
31 /* minimum size filesystem we can create, given a bucket size: */
32 static u64 min_size(unsigned bucket_size)
34 return BCH_MIN_NR_NBUCKETS * bucket_size;
37 static void init_layout(struct bch_sb_layout *l, unsigned block_size,
41 u64 backup; /* offset of 2nd sb */
43 memset(l, 0, sizeof(*l));
45 if (start != BCH_SB_SECTOR)
46 start = round_up(start, block_size);
47 end = round_down(end, block_size);
50 die("insufficient space for superblocks");
53 * Create two superblocks in the allowed range: reserve a maximum of 64k
55 sb_size = min_t(u64, 128, end - start / 2);
57 backup = start + sb_size;
58 backup = round_up(backup, block_size);
60 backup = min(backup, end);
62 sb_size = min(end - backup, backup- start);
63 sb_size = rounddown_pow_of_two(sb_size);
66 die("insufficient space for superblocks");
68 l->magic = BCACHE_MAGIC;
70 l->nr_superblocks = 2;
71 l->sb_max_size_bits = ilog2(sb_size);
72 l->sb_offset[0] = cpu_to_le64(start);
73 l->sb_offset[1] = cpu_to_le64(backup);
76 void bch2_pick_bucket_size(struct format_opts opts, struct dev_opts *dev)
78 if (!dev->sb_offset) {
79 dev->sb_offset = BCH_SB_SECTOR;
80 dev->sb_end = BCH_SB_SECTOR + 256;
84 dev->size = get_size(dev->path, dev->fd) >> 9;
86 if (!dev->bucket_size) {
87 if (dev->size < min_size(opts.block_size))
88 die("cannot format %s, too small (%llu sectors, min %llu)",
89 dev->path, dev->size, min_size(opts.block_size));
91 /* Bucket size must be >= block size: */
92 dev->bucket_size = opts.block_size;
94 /* Bucket size must be >= btree node size: */
95 dev->bucket_size = max(dev->bucket_size, opts.btree_node_size);
97 /* Want a bucket size of at least 128k, if possible: */
98 dev->bucket_size = max(dev->bucket_size, 256U);
100 if (dev->size >= min_size(dev->bucket_size)) {
101 unsigned scale = max(1,
102 ilog2(dev->size / min_size(dev->bucket_size)) / 4);
104 scale = rounddown_pow_of_two(scale);
106 /* max bucket size 1 mb */
107 dev->bucket_size = min(dev->bucket_size * scale, 1U << 11);
110 dev->bucket_size /= 2;
111 } while (dev->size < min_size(dev->bucket_size));
115 dev->nbuckets = dev->size / dev->bucket_size;
117 if (dev->bucket_size < opts.block_size)
118 die("Bucket size cannot be smaller than block size");
120 if (dev->bucket_size < opts.btree_node_size)
121 die("Bucket size cannot be smaller than btree node size");
123 if (dev->nbuckets < BCH_MIN_NR_NBUCKETS)
124 die("Not enough buckets: %llu, need %u (bucket size %u)",
125 dev->nbuckets, BCH_MIN_NR_NBUCKETS, dev->bucket_size);
129 static unsigned parse_target(struct bch_sb_handle *sb,
130 struct dev_opts *devs, size_t nr_devs,
139 for (i = devs; i < devs + nr_devs; i++)
140 if (!strcmp(s, i->path))
141 return dev_to_target(i - devs);
143 idx = bch2_disk_path_find(sb, s);
145 return group_to_target(idx);
147 die("Invalid target %s", s);
151 struct bch_sb *bch2_format(struct format_opts opts,
152 struct dev_opts *devs, size_t nr_devs)
154 struct bch_sb_handle sb = { NULL };
156 struct bch_sb_field_members *mi;
158 /* calculate block size: */
159 if (!opts.block_size)
160 for (i = devs; i < devs + nr_devs; i++)
161 opts.block_size = max(opts.block_size,
162 get_blocksize(i->path, i->fd));
164 /* calculate bucket sizes: */
165 for (i = devs; i < devs + nr_devs; i++)
166 bch2_pick_bucket_size(opts, i);
168 /* calculate btree node size: */
169 if (!opts.btree_node_size) {
170 /* 256k default btree node size */
171 opts.btree_node_size = 512;
173 for (i = devs; i < devs + nr_devs; i++)
174 opts.btree_node_size =
175 min(opts.btree_node_size, i->bucket_size);
178 if (!is_power_of_2(opts.block_size))
179 die("block size must be power of 2");
181 if (!is_power_of_2(opts.btree_node_size))
182 die("btree node size must be power of 2");
184 if (uuid_is_null(opts.uuid.b))
185 uuid_generate(opts.uuid.b);
187 if (bch2_sb_realloc(&sb, 0))
188 die("insufficient memory");
190 sb.sb->version = cpu_to_le64(BCH_SB_VERSION_MAX);
191 sb.sb->magic = BCACHE_MAGIC;
192 sb.sb->block_size = cpu_to_le16(opts.block_size);
193 sb.sb->user_uuid = opts.uuid;
194 sb.sb->nr_devices = nr_devs;
196 uuid_generate(sb.sb->uuid.b);
199 strncpy((char *) sb.sb->label, opts.label, sizeof(sb.sb->label));
201 SET_BCH_SB_CSUM_TYPE(sb.sb, opts.meta_csum_type);
202 SET_BCH_SB_META_CSUM_TYPE(sb.sb, opts.meta_csum_type);
203 SET_BCH_SB_DATA_CSUM_TYPE(sb.sb, opts.data_csum_type);
204 SET_BCH_SB_COMPRESSION_TYPE(sb.sb, opts.compression_type);
205 SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE(sb.sb,
206 opts.background_compression_type);
208 SET_BCH_SB_BTREE_NODE_SIZE(sb.sb, opts.btree_node_size);
209 SET_BCH_SB_GC_RESERVE(sb.sb, 8);
210 SET_BCH_SB_META_REPLICAS_WANT(sb.sb, opts.meta_replicas);
211 SET_BCH_SB_META_REPLICAS_REQ(sb.sb, opts.meta_replicas_required);
212 SET_BCH_SB_DATA_REPLICAS_WANT(sb.sb, opts.data_replicas);
213 SET_BCH_SB_DATA_REPLICAS_REQ(sb.sb, opts.data_replicas_required);
214 SET_BCH_SB_ERROR_ACTION(sb.sb, opts.on_error_action);
215 SET_BCH_SB_STR_HASH_TYPE(sb.sb, BCH_STR_HASH_SIPHASH);
216 SET_BCH_SB_ENCODED_EXTENT_MAX_BITS(sb.sb,ilog2(opts.encoded_extent_max));
218 SET_BCH_SB_POSIX_ACL(sb.sb, 1);
221 if (clock_gettime(CLOCK_REALTIME, &now))
222 die("error getting current time: %m");
224 sb.sb->time_base_lo = cpu_to_le64(now.tv_sec * NSEC_PER_SEC + now.tv_nsec);
225 sb.sb->time_precision = cpu_to_le32(1);
228 mi = bch2_sb_resize_members(&sb,
229 (sizeof(*mi) + sizeof(struct bch_member) *
230 nr_devs) / sizeof(u64));
232 for (i = devs; i < devs + nr_devs; i++) {
233 struct bch_member *m = mi->members + (i - devs);
235 uuid_generate(m->uuid.b);
236 m->nbuckets = cpu_to_le64(i->nbuckets);
238 m->bucket_size = cpu_to_le16(i->bucket_size);
240 SET_BCH_MEMBER_REPLACEMENT(m, CACHE_REPLACEMENT_LRU);
241 SET_BCH_MEMBER_DISCARD(m, i->discard);
242 SET_BCH_MEMBER_DATA_ALLOWED(m, i->data_allowed);
243 SET_BCH_MEMBER_DURABILITY(m, i->durability + 1);
247 for (i = devs; i < devs + nr_devs; i++) {
248 struct bch_member *m = mi->members + (i - devs);
254 idx = bch2_disk_path_find_or_create(&sb, i->group);
256 die("error creating disk path: %s", idx);
258 SET_BCH_MEMBER_GROUP(m, idx + 1);
261 SET_BCH_SB_FOREGROUND_TARGET(sb.sb,
262 parse_target(&sb, devs, nr_devs, opts.foreground_target));
263 SET_BCH_SB_BACKGROUND_TARGET(sb.sb,
264 parse_target(&sb, devs, nr_devs, opts.background_target));
265 SET_BCH_SB_PROMOTE_TARGET(sb.sb,
266 parse_target(&sb, devs, nr_devs, opts.promote_target));
269 if (opts.encrypted) {
270 struct bch_sb_field_crypt *crypt =
271 bch2_sb_resize_crypt(&sb, sizeof(*crypt) / sizeof(u64));
273 bch_sb_crypt_init(sb.sb, crypt, opts.passphrase);
274 SET_BCH_SB_ENCRYPTION_TYPE(sb.sb, 1);
277 for (i = devs; i < devs + nr_devs; i++) {
278 sb.sb->dev_idx = i - devs;
280 init_layout(&sb.sb->layout, opts.block_size,
281 i->sb_offset, i->sb_end);
283 if (i->sb_offset == BCH_SB_SECTOR) {
284 /* Zero start of disk */
285 static const char zeroes[BCH_SB_SECTOR << 9];
287 xpwrite(i->fd, zeroes, BCH_SB_SECTOR << 9, 0);
290 bch2_super_write(i->fd, sb.sb);
297 void bch2_super_write(int fd, struct bch_sb *sb)
299 struct nonce nonce = { 0 };
302 for (i = 0; i < sb->layout.nr_superblocks; i++) {
303 sb->offset = sb->layout.sb_offset[i];
305 if (sb->offset == BCH_SB_SECTOR) {
306 /* Write backup layout */
307 xpwrite(fd, &sb->layout, sizeof(sb->layout),
308 BCH_SB_LAYOUT_SECTOR << 9);
311 sb->csum = csum_vstruct(NULL, BCH_SB_CSUM_TYPE(sb), nonce, sb);
312 xpwrite(fd, sb, vstruct_bytes(sb),
313 le64_to_cpu(sb->offset) << 9);
319 struct bch_sb *__bch2_super_read(int fd, u64 sector)
321 struct bch_sb sb, *ret;
323 xpread(fd, &sb, sizeof(sb), sector << 9);
325 if (memcmp(&sb.magic, &BCACHE_MAGIC, sizeof(sb.magic)))
326 die("not a bcachefs superblock");
328 size_t bytes = vstruct_bytes(&sb);
332 xpread(fd, ret, bytes, sector << 9);
337 static unsigned get_dev_has_data(struct bch_sb *sb, unsigned dev)
339 struct bch_sb_field_replicas *replicas;
340 struct bch_replicas_entry *r;
341 unsigned i, data_has = 0;
343 replicas = bch2_sb_get_replicas(sb);
346 for_each_replicas_entry(replicas, r)
347 for (i = 0; i < r->nr; i++)
348 if (r->devs[i] == dev)
349 data_has |= 1 << r->data_type;
354 /* superblock printing: */
356 static void bch2_sb_print_layout(struct bch_sb *sb, enum units units)
358 struct bch_sb_layout *l = &sb->layout;
362 " superblock max size: %s\n"
363 " nr superblocks: %u\n"
366 pr_units(1 << l->sb_max_size_bits, units),
369 for (i = 0; i < l->nr_superblocks; i++) {
372 printf("%llu", le64_to_cpu(l->sb_offset[i]));
377 static void bch2_sb_print_journal(struct bch_sb *sb, struct bch_sb_field *f,
380 struct bch_sb_field_journal *journal = field_to_type(f, journal);
381 unsigned i, nr = bch2_nr_journal_buckets(journal);
383 printf(" Buckets: ");
384 for (i = 0; i < nr; i++) {
387 printf("%llu", le64_to_cpu(journal->buckets[i]));
392 static void bch2_sb_print_members(struct bch_sb *sb, struct bch_sb_field *f,
395 struct bch_sb_field_members *mi = field_to_type(f, members);
396 struct bch_sb_field_disk_groups *gi = bch2_sb_get_disk_groups(sb);
399 for (i = 0; i < sb->nr_devices; i++) {
400 struct bch_member *m = mi->members + i;
401 time_t last_mount = le64_to_cpu(m->last_mount);
402 char member_uuid_str[40];
403 char data_allowed_str[100];
404 char data_has_str[100];
407 if (!bch2_member_exists(m))
410 uuid_unparse(m->uuid.b, member_uuid_str);
412 if (BCH_MEMBER_GROUP(m)) {
413 unsigned idx = BCH_MEMBER_GROUP(m) - 1;
415 if (idx < disk_groups_nr(gi)) {
416 memcpy(group, gi->entries[idx].label,
418 group[BCH_SB_LABEL_SIZE] = '\0';
420 strcpy(group, "(bad disk groups section");
424 bch2_scnprint_flag_list(data_allowed_str,
425 sizeof(data_allowed_str),
427 BCH_MEMBER_DATA_ALLOWED(m));
428 if (!data_allowed_str[0])
429 strcpy(data_allowed_str, "(none)");
431 bch2_scnprint_flag_list(data_has_str,
432 sizeof(data_has_str),
434 get_dev_has_data(sb, i));
435 if (!data_has_str[0])
436 strcpy(data_has_str, "(none)");
438 printf(" Device %u:\n"
442 " First bucket: %u\n"
447 " Data allowed: %s\n"
451 " Replacement policy: %s\n"
454 pr_units(le16_to_cpu(m->bucket_size) *
455 le64_to_cpu(m->nbuckets), units),
456 pr_units(le16_to_cpu(m->bucket_size), units),
457 le16_to_cpu(m->first_bucket),
458 le64_to_cpu(m->nbuckets),
459 last_mount ? ctime(&last_mount) : "(never)",
461 BCH_MEMBER_STATE(m) < BCH_MEMBER_STATE_NR
462 ? bch2_dev_state[BCH_MEMBER_STATE(m)]
469 BCH_MEMBER_REPLACEMENT(m) < CACHE_REPLACEMENT_NR
470 ? bch2_cache_replacement_policies[BCH_MEMBER_REPLACEMENT(m)]
473 BCH_MEMBER_DISCARD(m));
477 static void bch2_sb_print_crypt(struct bch_sb *sb, struct bch_sb_field *f,
480 struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
482 printf(" KFD: %llu\n"
486 BCH_CRYPT_KDF_TYPE(crypt),
487 BCH_KDF_SCRYPT_N(crypt),
488 BCH_KDF_SCRYPT_R(crypt),
489 BCH_KDF_SCRYPT_P(crypt));
492 static void bch2_sb_print_replicas(struct bch_sb *sb, struct bch_sb_field *f,
495 struct bch_sb_field_replicas *replicas = field_to_type(f, replicas);
496 struct bch_replicas_entry *e;
499 for_each_replicas_entry(replicas, e) {
500 printf_pad(32, " %s:", bch2_data_types[e->data_type]);
503 for (i = 0; i < e->nr; i++) {
506 printf("%u", e->devs[i]);
512 static void bch2_sb_print_quota(struct bch_sb *sb, struct bch_sb_field *f,
517 static void bch2_sb_print_disk_groups(struct bch_sb *sb, struct bch_sb_field *f,
522 static void bch2_sb_print_clean(struct bch_sb *sb, struct bch_sb_field *f,
527 typedef void (*sb_field_print_fn)(struct bch_sb *, struct bch_sb_field *, enum units);
529 struct bch_sb_field_toolops {
530 sb_field_print_fn print;
533 static const struct bch_sb_field_toolops bch2_sb_field_ops[] = {
535 [BCH_SB_FIELD_##f] = { \
536 .print = bch2_sb_print_##f, \
542 static inline void bch2_sb_field_print(struct bch_sb *sb,
543 struct bch_sb_field *f,
546 unsigned type = le32_to_cpu(f->type);
548 if (type < BCH_SB_FIELD_NR)
549 bch2_sb_field_ops[type].print(sb, f, units);
551 printf("(unknown field %u)\n", type);
554 void bch2_sb_print(struct bch_sb *sb, bool print_layout,
555 unsigned fields, enum units units)
557 struct bch_sb_field_members *mi;
558 char user_uuid_str[40], internal_uuid_str[40];
559 char fields_have_str[200];
560 char label[BCH_SB_LABEL_SIZE + 1];
561 struct bch_sb_field *f;
563 unsigned nr_devices = 0;
565 memset(label, 0, sizeof(label));
566 memcpy(label, sb->label, sizeof(sb->label));
567 uuid_unparse(sb->user_uuid.b, user_uuid_str);
568 uuid_unparse(sb->uuid.b, internal_uuid_str);
570 mi = bch2_sb_get_members(sb);
572 struct bch_member *m;
574 for (m = mi->members;
575 m < mi->members + sb->nr_devices;
577 nr_devices += bch2_member_exists(m);
580 vstruct_for_each(sb, f)
581 fields_have |= 1 << le32_to_cpu(f->type);
582 bch2_scnprint_flag_list(fields_have_str, sizeof(fields_have_str),
583 bch2_sb_fields, fields_have);
585 printf("External UUID: %s\n"
586 "Internal UUID: %s\n"
590 "Btree node size: %s\n"
594 "Metadata replicas: %llu\n"
595 "Data replicas: %llu\n"
597 "Metadata checksum type: %s (%llu)\n"
598 "Data checksum type: %s (%llu)\n"
599 "Compression type: %s (%llu)\n"
601 "Foreground write target: %llu\n"
602 "Background write target: %llu\n"
603 "Promote target: %llu\n"
605 "String hash type: %s (%llu)\n"
606 "32 bit inodes: %llu\n"
607 "GC reserve percentage: %llu%%\n"
608 "Root reserve percentage: %llu%%\n"
610 "Devices: %u live, %u total\n"
612 "Superblock size: %llu\n",
616 le64_to_cpu(sb->version),
617 pr_units(le16_to_cpu(sb->block_size), units),
618 pr_units(BCH_SB_BTREE_NODE_SIZE(sb), units),
620 BCH_SB_ERROR_ACTION(sb) < BCH_NR_ERROR_ACTIONS
621 ? bch2_error_actions[BCH_SB_ERROR_ACTION(sb)]
626 BCH_SB_META_REPLICAS_WANT(sb),
627 BCH_SB_DATA_REPLICAS_WANT(sb),
629 BCH_SB_META_CSUM_TYPE(sb) < BCH_CSUM_OPT_NR
630 ? bch2_csum_types[BCH_SB_META_CSUM_TYPE(sb)]
632 BCH_SB_META_CSUM_TYPE(sb),
634 BCH_SB_DATA_CSUM_TYPE(sb) < BCH_CSUM_OPT_NR
635 ? bch2_csum_types[BCH_SB_DATA_CSUM_TYPE(sb)]
637 BCH_SB_DATA_CSUM_TYPE(sb),
639 BCH_SB_COMPRESSION_TYPE(sb) < BCH_COMPRESSION_OPT_NR
640 ? bch2_compression_types[BCH_SB_COMPRESSION_TYPE(sb)]
642 BCH_SB_COMPRESSION_TYPE(sb),
644 BCH_SB_FOREGROUND_TARGET(sb),
645 BCH_SB_BACKGROUND_TARGET(sb),
646 BCH_SB_PROMOTE_TARGET(sb),
648 BCH_SB_STR_HASH_TYPE(sb) < BCH_STR_HASH_NR
649 ? bch2_str_hash_types[BCH_SB_STR_HASH_TYPE(sb)]
651 BCH_SB_STR_HASH_TYPE(sb),
653 BCH_SB_INODE_32BIT(sb),
654 BCH_SB_GC_RESERVE(sb),
655 BCH_SB_ROOT_RESERVE(sb),
657 nr_devices, sb->nr_devices,
664 bch2_sb_print_layout(sb, units);
667 vstruct_for_each(sb, f) {
668 unsigned type = le32_to_cpu(f->type);
671 if (!(fields & (1 << type)))
674 if (type < BCH_SB_FIELD_NR) {
675 scnprintf(name, sizeof(name), "%s", bch2_sb_fields[type]);
676 name[0] = toupper(name[0]);
678 scnprintf(name, sizeof(name), "(unknown field %u)", type);
681 printf("\n%s (size %llu):\n", name, vstruct_bytes(f));
682 if (type < BCH_SB_FIELD_NR)
683 bch2_sb_field_print(sb, f, units);
687 /* ioctl interface: */
689 /* Global control device: */
690 int bcachectl_open(void)
692 return xopen("/dev/bcachefs-ctl", O_RDWR);
695 /* Filesystem handles (ioctl, sysfs dir): */
697 #define SYSFS_BASE "/sys/fs/bcachefs/"
699 void bcache_fs_close(struct bchfs_handle fs)
705 struct bchfs_handle bcache_fs_open(const char *path)
707 struct bchfs_handle ret;
709 if (!uuid_parse(path, ret.uuid.b)) {
710 /* It's a UUID, look it up in sysfs: */
711 char *sysfs = mprintf(SYSFS_BASE "%s", path);
712 ret.sysfs_fd = xopen(sysfs, O_RDONLY);
714 char *minor = read_file_str(ret.sysfs_fd, "minor");
715 char *ctl = mprintf("/dev/bcachefs%s-ctl", minor);
716 ret.ioctl_fd = xopen(ctl, O_RDWR);
723 ret.ioctl_fd = xopen(path, O_RDONLY);
725 struct bch_ioctl_query_uuid uuid;
726 if (ioctl(ret.ioctl_fd, BCH_IOCTL_QUERY_UUID, &uuid) < 0)
727 die("error opening %s: not a bcachefs filesystem", path);
729 ret.uuid = uuid.uuid;
732 uuid_unparse(uuid.uuid.b, uuid_str);
734 char *sysfs = mprintf(SYSFS_BASE "%s", uuid_str);
735 ret.sysfs_fd = xopen(sysfs, O_RDONLY);
743 * Given a path to a block device, open the filesystem it belongs to; also
744 * return the device's idx:
746 struct bchfs_handle bchu_fs_open_by_dev(const char *path, unsigned *idx)
748 char buf[1024], *uuid_str;
750 struct stat stat = xstat(path);
752 if (!S_ISBLK(stat.st_mode))
753 die("%s is not a block device", path);
755 char *sysfs = mprintf("/sys/dev/block/%u:%u/bcachefs",
758 ssize_t len = readlink(sysfs, buf, sizeof(buf));
762 char *p = strrchr(buf, '/');
763 if (!p || sscanf(p + 1, "dev-%u", idx) != 1)
764 die("error parsing sysfs");
767 p = strrchr(buf, '/');
770 struct bch_opts opts = bch2_opts_empty();
772 opt_set(opts, noexcl, true);
773 opt_set(opts, nochanges, true);
775 struct bch_sb_handle sb;
776 int ret = bch2_read_super(path, &opts, &sb);
778 die("Error opening %s: %s", path, strerror(-ret));
780 *idx = sb.sb->dev_idx;
782 uuid_unparse(sb.sb->user_uuid.b, uuid_str);
784 bch2_free_super(&sb);
787 return bcache_fs_open(uuid_str);
790 int bchu_data(struct bchfs_handle fs, struct bch_ioctl_data cmd)
792 int progress_fd = xioctl(fs.ioctl_fd, BCH_IOCTL_DATA, &cmd);
795 struct bch_ioctl_data_event e;
797 if (read(progress_fd, &e, sizeof(e)) != sizeof(e))
798 die("error reading from progress fd %m");
803 if (e.p.data_type == U8_MAX)
808 printf("%llu%% complete: current position %s",
809 e.p.sectors_done * 100 / e.p.sectors_total,
810 bch2_data_types[e.p.data_type]);
812 switch (e.p.data_type) {
815 printf(" %s:%llu:%llu",
816 bch2_btree_ids[e.p.btree_id],