11 #include <sys/sysmacros.h>
12 #include <sys/types.h>
16 #include <uuid/uuid.h>
18 #include "libbcachefs.h"
20 #include "libbcachefs/bcachefs_format.h"
21 #include "libbcachefs/btree_cache.h"
22 #include "libbcachefs/checksum.h"
23 #include "libbcachefs/disk_groups.h"
24 #include "libbcachefs/journal_seq_blacklist.h"
25 #include "libbcachefs/opts.h"
26 #include "libbcachefs/replicas.h"
27 #include "libbcachefs/super-io.h"
28 #include "tools-util.h"
30 #define NSEC_PER_SEC 1000000000L
32 /* minimum size filesystem we can create, given a bucket size: */
33 static u64 min_size(unsigned bucket_size)
35 return BCH_MIN_NR_NBUCKETS * bucket_size;
38 static void init_layout(struct bch_sb_layout *l,
41 u64 sb_start, u64 sb_end)
43 u64 sb_pos = sb_start;
46 memset(l, 0, sizeof(*l));
48 l->magic = BCACHE_MAGIC;
50 l->nr_superblocks = 2;
51 l->sb_max_size_bits = ilog2(sb_size);
53 /* Create two superblocks in the allowed range: */
54 for (i = 0; i < l->nr_superblocks; i++) {
55 if (sb_pos != BCH_SB_SECTOR)
56 sb_pos = round_up(sb_pos, block_size);
58 l->sb_offset[i] = cpu_to_le64(sb_pos);
63 die("insufficient space for superblocks: start %llu end %llu > %llu size %u",
64 sb_start, sb_pos, sb_end, sb_size);
67 void bch2_pick_bucket_size(struct bch_opts opts, struct dev_opts *dev)
70 dev->size = get_size(dev->path, dev->fd) >> 9;
72 if (!dev->bucket_size) {
73 if (dev->size < min_size(opts.block_size))
74 die("cannot format %s, too small (%llu sectors, min %llu)",
75 dev->path, dev->size, min_size(opts.block_size));
77 /* Bucket size must be >= block size: */
78 dev->bucket_size = opts.block_size;
80 /* Bucket size must be >= btree node size: */
81 if (opt_defined(opts, btree_node_size))
82 dev->bucket_size = max_t(unsigned, dev->bucket_size,
83 opts.btree_node_size);
85 /* Want a bucket size of at least 128k, if possible: */
86 dev->bucket_size = max(dev->bucket_size, 256U);
88 if (dev->size >= min_size(dev->bucket_size)) {
89 unsigned scale = max(1,
90 ilog2(dev->size / min_size(dev->bucket_size)) / 4);
92 scale = rounddown_pow_of_two(scale);
94 /* max bucket size 1 mb */
95 dev->bucket_size = min(dev->bucket_size * scale, 1U << 11);
98 dev->bucket_size /= 2;
99 } while (dev->size < min_size(dev->bucket_size));
103 dev->nbuckets = dev->size / dev->bucket_size;
105 if (dev->bucket_size < opts.block_size)
106 die("Bucket size cannot be smaller than block size");
108 if (opt_defined(opts, btree_node_size) &&
109 dev->bucket_size < opts.btree_node_size)
110 die("Bucket size cannot be smaller than btree node size");
112 if (dev->nbuckets < BCH_MIN_NR_NBUCKETS)
113 die("Not enough buckets: %llu, need %u (bucket size %u)",
114 dev->nbuckets, BCH_MIN_NR_NBUCKETS, dev->bucket_size);
118 static unsigned parse_target(struct bch_sb_handle *sb,
119 struct dev_opts *devs, size_t nr_devs,
128 for (i = devs; i < devs + nr_devs; i++)
129 if (!strcmp(s, i->path))
130 return dev_to_target(i - devs);
132 idx = bch2_disk_path_find(sb, s);
134 return group_to_target(idx);
136 die("Invalid target %s", s);
140 struct bch_sb *bch2_format(struct bch_opt_strs fs_opt_strs,
141 struct bch_opts fs_opts,
142 struct format_opts opts,
143 struct dev_opts *devs,
146 struct bch_sb_handle sb = { NULL };
148 struct bch_sb_field_members *mi;
149 unsigned max_dev_block_size = 0;
152 for (i = devs; i < devs + nr_devs; i++)
153 max_dev_block_size = max(max_dev_block_size,
154 get_blocksize(i->path, i->fd));
156 /* calculate block size: */
157 if (!opt_defined(fs_opts, block_size)) {
158 opt_set(fs_opts, block_size, max_dev_block_size);
159 } else if (fs_opts.block_size < max_dev_block_size)
160 die("blocksize too small: %u, must be greater than device blocksize %u",
161 fs_opts.block_size, max_dev_block_size);
163 /* calculate bucket sizes: */
164 for (i = devs; i < devs + nr_devs; i++)
165 bch2_pick_bucket_size(fs_opts, i);
167 /* calculate btree node size: */
168 if (!opt_defined(fs_opts, btree_node_size)) {
169 /* 256k default btree node size */
170 opt_set(fs_opts, btree_node_size, 512);
172 for (i = devs; i < devs + nr_devs; i++)
173 fs_opts.btree_node_size =
174 min_t(unsigned, fs_opts.btree_node_size,
178 if (!is_power_of_2(fs_opts.block_size))
179 die("block size must be power of 2");
181 if (!is_power_of_2(fs_opts.btree_node_size))
182 die("btree node size must be power of 2");
184 if (uuid_is_null(opts.uuid.b))
185 uuid_generate(opts.uuid.b);
187 if (bch2_sb_realloc(&sb, 0))
188 die("insufficient memory");
190 sb.sb->version = le16_to_cpu(opts.version);
191 sb.sb->version_min = le16_to_cpu(opts.version);
192 sb.sb->magic = BCACHE_MAGIC;
193 sb.sb->block_size = cpu_to_le16(fs_opts.block_size);
194 sb.sb->user_uuid = opts.uuid;
195 sb.sb->nr_devices = nr_devs;
197 if (opts.version == bcachefs_metadata_version_current)
198 sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
200 uuid_generate(sb.sb->uuid.b);
205 min(strlen(opts.label), sizeof(sb.sb->label)));
208 opt_id < bch2_opts_nr;
210 const struct bch_option *opt = &bch2_opt_table[opt_id];
213 if (opt->set_sb == SET_NO_SB_OPT)
216 v = bch2_opt_defined_by_id(&fs_opts, opt_id)
217 ? bch2_opt_get_by_id(&fs_opts, opt_id)
218 : bch2_opt_get_by_id(&bch2_opts_default, opt_id);
220 opt->set_sb(sb.sb, v);
223 SET_BCH_SB_ENCODED_EXTENT_MAX_BITS(sb.sb,
224 ilog2(opts.encoded_extent_max));
227 if (clock_gettime(CLOCK_REALTIME, &now))
228 die("error getting current time: %m");
230 sb.sb->time_base_lo = cpu_to_le64(now.tv_sec * NSEC_PER_SEC + now.tv_nsec);
231 sb.sb->time_precision = cpu_to_le32(1);
234 mi = bch2_sb_resize_members(&sb,
235 (sizeof(*mi) + sizeof(struct bch_member) *
236 nr_devs) / sizeof(u64));
238 for (i = devs; i < devs + nr_devs; i++) {
239 struct bch_member *m = mi->members + (i - devs);
241 uuid_generate(m->uuid.b);
242 m->nbuckets = cpu_to_le64(i->nbuckets);
244 m->bucket_size = cpu_to_le16(i->bucket_size);
246 SET_BCH_MEMBER_REPLACEMENT(m, BCH_CACHE_REPLACEMENT_lru);
247 SET_BCH_MEMBER_DISCARD(m, i->discard);
248 SET_BCH_MEMBER_DATA_ALLOWED(m, i->data_allowed);
249 SET_BCH_MEMBER_DURABILITY(m, i->durability + 1);
253 for (i = devs; i < devs + nr_devs; i++) {
254 struct bch_member *m = mi->members + (i - devs);
260 idx = bch2_disk_path_find_or_create(&sb, i->label);
262 die("error creating disk path: %s", idx);
264 SET_BCH_MEMBER_GROUP(m, idx + 1);
267 SET_BCH_SB_FOREGROUND_TARGET(sb.sb,
268 parse_target(&sb, devs, nr_devs, fs_opt_strs.foreground_target));
269 SET_BCH_SB_BACKGROUND_TARGET(sb.sb,
270 parse_target(&sb, devs, nr_devs, fs_opt_strs.background_target));
271 SET_BCH_SB_PROMOTE_TARGET(sb.sb,
272 parse_target(&sb, devs, nr_devs, fs_opt_strs.promote_target));
273 SET_BCH_SB_METADATA_TARGET(sb.sb,
274 parse_target(&sb, devs, nr_devs, fs_opt_strs.metadata_target));
277 if (opts.encrypted) {
278 struct bch_sb_field_crypt *crypt =
279 bch2_sb_resize_crypt(&sb, sizeof(*crypt) / sizeof(u64));
281 bch_sb_crypt_init(sb.sb, crypt, opts.passphrase);
282 SET_BCH_SB_ENCRYPTION_TYPE(sb.sb, 1);
285 for (i = devs; i < devs + nr_devs; i++) {
286 sb.sb->dev_idx = i - devs;
289 i->sb_offset = BCH_SB_SECTOR;
293 init_layout(&sb.sb->layout, fs_opts.block_size,
294 opts.superblock_size,
295 i->sb_offset, i->sb_end);
298 * Also create a backup superblock at the end of the disk:
300 * If we're not creating a superblock at the default offset, it
301 * means we're being run from the migrate tool and we could be
302 * overwriting existing data if we write to the end of the disk:
304 if (i->sb_offset == BCH_SB_SECTOR) {
305 struct bch_sb_layout *l = &sb.sb->layout;
306 u64 backup_sb = i->size - (1 << l->sb_max_size_bits);
308 backup_sb = rounddown(backup_sb, i->bucket_size);
309 l->sb_offset[l->nr_superblocks++] = cpu_to_le64(backup_sb);
312 if (i->sb_offset == BCH_SB_SECTOR) {
313 /* Zero start of disk */
314 static const char zeroes[BCH_SB_SECTOR << 9];
316 xpwrite(i->fd, zeroes, BCH_SB_SECTOR << 9, 0);
319 bch2_super_write(i->fd, sb.sb);
326 void bch2_super_write(int fd, struct bch_sb *sb)
328 struct nonce nonce = { 0 };
331 for (i = 0; i < sb->layout.nr_superblocks; i++) {
332 sb->offset = sb->layout.sb_offset[i];
334 if (sb->offset == BCH_SB_SECTOR) {
335 /* Write backup layout */
336 xpwrite(fd, &sb->layout, sizeof(sb->layout),
337 BCH_SB_LAYOUT_SECTOR << 9);
340 sb->csum = csum_vstruct(NULL, BCH_SB_CSUM_TYPE(sb), nonce, sb);
341 xpwrite(fd, sb, vstruct_bytes(sb),
342 le64_to_cpu(sb->offset) << 9);
348 struct bch_sb *__bch2_super_read(int fd, u64 sector)
350 struct bch_sb sb, *ret;
352 xpread(fd, &sb, sizeof(sb), sector << 9);
354 if (memcmp(&sb.magic, &BCACHE_MAGIC, sizeof(sb.magic)))
355 die("not a bcachefs superblock");
357 size_t bytes = vstruct_bytes(&sb);
361 xpread(fd, ret, bytes, sector << 9);
366 static unsigned get_dev_has_data(struct bch_sb *sb, unsigned dev)
368 struct bch_sb_field_replicas *replicas;
369 struct bch_replicas_entry *r;
370 unsigned i, data_has = 0;
372 replicas = bch2_sb_get_replicas(sb);
375 for_each_replicas_entry(replicas, r)
376 for (i = 0; i < r->nr_devs; i++)
377 if (r->devs[i] == dev)
378 data_has |= 1 << r->data_type;
383 static int bch2_sb_get_target(struct bch_sb *sb, char *buf, size_t len, u64 v)
385 struct target t = target_decode(v);
390 return scnprintf(buf, len, "none");
392 struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
393 struct bch_member *m = mi->members + t.dev;
395 if (bch2_dev_exists(sb, mi, t.dev)) {
398 uuid_unparse(m->uuid.b, uuid_str);
400 ret = scnprintf(buf, len, "Device %u (%s)", t.dev,
403 ret = scnprintf(buf, len, "Bad device %u", t.dev);
409 struct bch_sb_field_disk_groups *gi;
410 gi = bch2_sb_get_disk_groups(sb);
412 struct bch_disk_group *g = gi->entries + t.group;
414 if (t.group < disk_groups_nr(gi) && !BCH_GROUP_DELETED(g)) {
415 ret = scnprintf(buf, len, "Label %u (%.*s)", t.group,
416 BCH_SB_LABEL_SIZE, g->label);
418 ret = scnprintf(buf, len, "Bad label %u", t.group);
429 /* superblock printing: */
431 static void bch2_sb_print_layout(struct bch_sb *sb, enum units units)
433 struct bch_sb_layout *l = &sb->layout;
437 " superblock max size: %s\n"
438 " nr superblocks: %u\n"
441 pr_units(1 << l->sb_max_size_bits, units),
444 for (i = 0; i < l->nr_superblocks; i++) {
447 printf("%llu", le64_to_cpu(l->sb_offset[i]));
452 static void bch2_sb_print_journal(struct bch_sb *sb, struct bch_sb_field *f,
455 struct bch_sb_field_journal *journal = field_to_type(f, journal);
456 unsigned i, nr = bch2_nr_journal_buckets(journal);
458 printf(" Buckets: ");
459 for (i = 0; i < nr; i++) {
462 printf("%llu", le64_to_cpu(journal->buckets[i]));
467 static void bch2_sb_print_members(struct bch_sb *sb, struct bch_sb_field *f,
470 struct bch_sb_field_members *mi = field_to_type(f, members);
471 struct bch_sb_field_disk_groups *gi = bch2_sb_get_disk_groups(sb);
474 for (i = 0; i < sb->nr_devices; i++) {
475 struct bch_member *m = mi->members + i;
476 time_t last_mount = le64_to_cpu(m->last_mount);
477 char member_uuid_str[40];
478 char data_allowed_str[100];
479 char data_has_str[100];
480 char label [BCH_SB_LABEL_SIZE+10];
483 if (!bch2_member_exists(m))
486 uuid_unparse(m->uuid.b, member_uuid_str);
488 if (BCH_MEMBER_GROUP(m)) {
489 unsigned idx = BCH_MEMBER_GROUP(m) - 1;
491 if (idx < disk_groups_nr(gi)) {
492 scnprintf(label, sizeof(label), "%.*s (%u)",
494 gi->entries[idx].label, idx);
496 strcpy(label, "(bad disk labels section)");
499 strcpy(label, "(none)");
502 bch2_flags_to_text(&PBUF(data_allowed_str),
504 BCH_MEMBER_DATA_ALLOWED(m));
505 if (!data_allowed_str[0])
506 strcpy(data_allowed_str, "(none)");
508 bch2_flags_to_text(&PBUF(data_has_str),
510 get_dev_has_data(sb, i));
511 if (!data_has_str[0])
512 strcpy(data_has_str, "(none)");
515 struct tm *tm = localtime(&last_mount);
516 size_t err = strftime(time_str, sizeof(time_str), "%c", tm);
518 strcpy(time_str, "(formatting error)");
520 strcpy(time_str, "(never)");
523 printf(" Device %u:\n"
527 " First bucket: %u\n"
532 " Data allowed: %s\n"
536 " Replacement policy: %s\n"
539 pr_units(le16_to_cpu(m->bucket_size) *
540 le64_to_cpu(m->nbuckets), units),
541 pr_units(le16_to_cpu(m->bucket_size), units),
542 le16_to_cpu(m->first_bucket),
543 le64_to_cpu(m->nbuckets),
546 BCH_MEMBER_STATE(m) < BCH_MEMBER_STATE_NR
547 ? bch2_member_states[BCH_MEMBER_STATE(m)]
554 BCH_MEMBER_REPLACEMENT(m) < BCH_CACHE_REPLACEMENT_NR
555 ? bch2_cache_replacement_policies[BCH_MEMBER_REPLACEMENT(m)]
558 BCH_MEMBER_DISCARD(m));
562 static void bch2_sb_print_crypt(struct bch_sb *sb, struct bch_sb_field *f,
565 struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
567 printf(" KFD: %llu\n"
571 BCH_CRYPT_KDF_TYPE(crypt),
572 BCH_KDF_SCRYPT_N(crypt),
573 BCH_KDF_SCRYPT_R(crypt),
574 BCH_KDF_SCRYPT_P(crypt));
577 static void bch2_sb_print_replicas_v0(struct bch_sb *sb, struct bch_sb_field *f,
580 struct bch_sb_field_replicas_v0 *replicas = field_to_type(f, replicas_v0);
581 struct bch_replicas_entry_v0 *e;
584 for_each_replicas_entry(replicas, e) {
585 printf_pad(32, " %s:", bch2_data_types[e->data_type]);
588 for (i = 0; i < e->nr_devs; i++) {
591 printf("%u", e->devs[i]);
597 static void bch2_sb_print_replicas(struct bch_sb *sb, struct bch_sb_field *f,
600 struct bch_sb_field_replicas *replicas = field_to_type(f, replicas);
601 struct bch_replicas_entry *e;
604 for_each_replicas_entry(replicas, e) {
605 printf_pad(32, " %s: %u/%u",
606 bch2_data_types[e->data_type],
611 for (i = 0; i < e->nr_devs; i++) {
614 printf("%u", e->devs[i]);
620 static void bch2_sb_print_quota(struct bch_sb *sb, struct bch_sb_field *f,
625 static void bch2_sb_print_disk_groups(struct bch_sb *sb, struct bch_sb_field *f,
630 static void bch2_sb_print_clean(struct bch_sb *sb, struct bch_sb_field *f,
633 struct bch_sb_field_clean *clean = field_to_type(f, clean);
636 printf(" flags: %x", le32_to_cpu(clean->flags));
637 printf(" journal seq: %llx", le64_to_cpu(clean->journal_seq));
640 static void bch2_sb_print_journal_seq_blacklist(struct bch_sb *sb, struct bch_sb_field *f,
643 struct bch_sb_field_journal_seq_blacklist *bl = field_to_type(f, journal_seq_blacklist);
644 unsigned i, nr = blacklist_nr_entries(bl);
646 for (i = 0; i < nr; i++) {
647 struct journal_seq_blacklist_entry *e =
650 printf(" %llu-%llu\n",
651 le64_to_cpu(e->start),
652 le64_to_cpu(e->end));
656 typedef void (*sb_field_print_fn)(struct bch_sb *, struct bch_sb_field *, enum units);
658 struct bch_sb_field_toolops {
659 sb_field_print_fn print;
662 static const struct bch_sb_field_toolops bch2_sb_field_ops[] = {
664 [BCH_SB_FIELD_##f] = { \
665 .print = bch2_sb_print_##f, \
671 static inline void bch2_sb_field_print(struct bch_sb *sb,
672 struct bch_sb_field *f,
675 unsigned type = le32_to_cpu(f->type);
677 if (type < BCH_SB_FIELD_NR)
678 bch2_sb_field_ops[type].print(sb, f, units);
680 printf("(unknown field %u)\n", type);
683 void bch2_sb_print(struct bch_sb *sb, bool print_layout,
684 unsigned fields, enum units units)
686 struct bch_sb_field_members *mi;
687 char user_uuid_str[40], internal_uuid_str[40];
688 char features_str[500];
689 char compat_features_str[500];
690 char fields_have_str[200];
691 char label[BCH_SB_LABEL_SIZE + 1];
693 char foreground_str[64];
694 char background_str[64];
695 char promote_str[64];
696 char metadata_str[64];
697 struct bch_sb_field *f;
699 unsigned nr_devices = 0;
700 time_t time_base = le64_to_cpu(sb->time_base_lo) / NSEC_PER_SEC;
702 memcpy(label, sb->label, BCH_SB_LABEL_SIZE);
703 label[BCH_SB_LABEL_SIZE] = '\0';
705 uuid_unparse(sb->user_uuid.b, user_uuid_str);
706 uuid_unparse(sb->uuid.b, internal_uuid_str);
709 struct tm *tm = localtime(&time_base);
710 size_t err = strftime(time_str, sizeof(time_str), "%c", tm);
712 strcpy(time_str, "(formatting error)");
714 strcpy(time_str, "(not set)");
717 mi = bch2_sb_get_members(sb);
719 struct bch_member *m;
721 for (m = mi->members;
722 m < mi->members + sb->nr_devices;
724 nr_devices += bch2_member_exists(m);
727 bch2_sb_get_target(sb, foreground_str, sizeof(foreground_str),
728 BCH_SB_FOREGROUND_TARGET(sb));
730 bch2_sb_get_target(sb, background_str, sizeof(background_str),
731 BCH_SB_BACKGROUND_TARGET(sb));
733 bch2_sb_get_target(sb, promote_str, sizeof(promote_str),
734 BCH_SB_PROMOTE_TARGET(sb));
736 bch2_sb_get_target(sb, metadata_str, sizeof(metadata_str),
737 BCH_SB_METADATA_TARGET(sb));
739 bch2_flags_to_text(&PBUF(features_str),
741 le64_to_cpu(sb->features[0]));
743 bch2_flags_to_text(&PBUF(compat_features_str),
745 le64_to_cpu(sb->compat[0]));
747 vstruct_for_each(sb, f)
748 fields_have |= 1 << le32_to_cpu(f->type);
749 bch2_flags_to_text(&PBUF(fields_have_str),
750 bch2_sb_fields, fields_have);
752 printf("External UUID: %s\n"
753 "Internal UUID: %s\n"
757 "Oldest version on disk: %u\n"
759 "Squence number: %llu\n"
761 "Btree node size: %s\n"
765 "Compat features: %s\n"
767 "Metadata replicas: %llu\n"
768 "Data replicas: %llu\n"
770 "Metadata checksum type: %s (%llu)\n"
771 "Data checksum type: %s (%llu)\n"
772 "Compression type: %s (%llu)\n"
774 "Foreground write target: %s\n"
775 "Background write target: %s\n"
776 "Promote target: %s\n"
777 "Metadata target: %s\n"
779 "String hash type: %s (%llu)\n"
780 "32 bit inodes: %llu\n"
781 "GC reserve percentage: %llu%%\n"
782 "Root reserve percentage: %llu%%\n"
784 "Devices: %u live, %u total\n"
786 "Superblock size: %llu\n",
791 le16_to_cpu(sb->version),
792 le16_to_cpu(sb->version_min),
794 le64_to_cpu(sb->seq),
795 pr_units(le16_to_cpu(sb->block_size), units),
796 pr_units(BCH_SB_BTREE_NODE_SIZE(sb), units),
798 BCH_SB_ERROR_ACTION(sb) < BCH_ON_ERROR_NR
799 ? bch2_error_actions[BCH_SB_ERROR_ACTION(sb)]
806 BCH_SB_META_REPLICAS_WANT(sb),
807 BCH_SB_DATA_REPLICAS_WANT(sb),
809 BCH_SB_META_CSUM_TYPE(sb) < BCH_CSUM_OPT_NR
810 ? bch2_csum_opts[BCH_SB_META_CSUM_TYPE(sb)]
812 BCH_SB_META_CSUM_TYPE(sb),
814 BCH_SB_DATA_CSUM_TYPE(sb) < BCH_CSUM_OPT_NR
815 ? bch2_csum_opts[BCH_SB_DATA_CSUM_TYPE(sb)]
817 BCH_SB_DATA_CSUM_TYPE(sb),
819 BCH_SB_COMPRESSION_TYPE(sb) < BCH_COMPRESSION_OPT_NR
820 ? bch2_compression_opts[BCH_SB_COMPRESSION_TYPE(sb)]
822 BCH_SB_COMPRESSION_TYPE(sb),
829 BCH_SB_STR_HASH_TYPE(sb) < BCH_STR_HASH_NR
830 ? bch2_str_hash_types[BCH_SB_STR_HASH_TYPE(sb)]
832 BCH_SB_STR_HASH_TYPE(sb),
834 BCH_SB_INODE_32BIT(sb),
835 BCH_SB_GC_RESERVE(sb),
836 BCH_SB_ROOT_RESERVE(sb),
838 nr_devices, sb->nr_devices,
845 bch2_sb_print_layout(sb, units);
848 vstruct_for_each(sb, f) {
849 unsigned type = le32_to_cpu(f->type);
852 if (!(fields & (1 << type)))
855 if (type < BCH_SB_FIELD_NR) {
856 scnprintf(name, sizeof(name), "%s", bch2_sb_fields[type]);
857 name[0] = toupper(name[0]);
859 scnprintf(name, sizeof(name), "(unknown field %u)", type);
862 printf("\n%s (size %llu):\n", name, vstruct_bytes(f));
863 if (type < BCH_SB_FIELD_NR)
864 bch2_sb_field_print(sb, f, units);
868 /* ioctl interface: */
870 /* Global control device: */
871 int bcachectl_open(void)
873 return xopen("/dev/bcachefs-ctl", O_RDWR);
876 /* Filesystem handles (ioctl, sysfs dir): */
878 #define SYSFS_BASE "/sys/fs/bcachefs/"
880 void bcache_fs_close(struct bchfs_handle fs)
886 struct bchfs_handle bcache_fs_open(const char *path)
888 struct bchfs_handle ret;
890 if (!uuid_parse(path, ret.uuid.b)) {
891 /* It's a UUID, look it up in sysfs: */
892 char *sysfs = mprintf(SYSFS_BASE "%s", path);
893 ret.sysfs_fd = xopen(sysfs, O_RDONLY);
895 char *minor = read_file_str(ret.sysfs_fd, "minor");
896 char *ctl = mprintf("/dev/bcachefs%s-ctl", minor);
897 ret.ioctl_fd = xopen(ctl, O_RDWR);
904 ret.ioctl_fd = open(path, O_RDONLY);
905 if (ret.ioctl_fd < 0)
906 die("Error opening filesystem at %s: %m", path);
908 struct bch_ioctl_query_uuid uuid;
909 if (ioctl(ret.ioctl_fd, BCH_IOCTL_QUERY_UUID, &uuid) < 0)
910 die("error opening %s: not a bcachefs filesystem", path);
912 ret.uuid = uuid.uuid;
915 uuid_unparse(uuid.uuid.b, uuid_str);
917 char *sysfs = mprintf(SYSFS_BASE "%s", uuid_str);
918 ret.sysfs_fd = xopen(sysfs, O_RDONLY);
926 * Given a path to a block device, open the filesystem it belongs to; also
927 * return the device's idx:
929 struct bchfs_handle bchu_fs_open_by_dev(const char *path, int *idx)
931 char buf[1024], *uuid_str;
933 struct stat stat = xstat(path);
935 if (!S_ISBLK(stat.st_mode))
936 die("%s is not a block device", path);
938 char *sysfs = mprintf("/sys/dev/block/%u:%u/bcachefs",
941 ssize_t len = readlink(sysfs, buf, sizeof(buf));
945 char *p = strrchr(buf, '/');
946 if (!p || sscanf(p + 1, "dev-%u", idx) != 1)
947 die("error parsing sysfs");
950 p = strrchr(buf, '/');
953 struct bch_opts opts = bch2_opts_empty();
955 opt_set(opts, noexcl, true);
956 opt_set(opts, nochanges, true);
958 struct bch_sb_handle sb;
959 int ret = bch2_read_super(path, &opts, &sb);
961 die("Error opening %s: %s", path, strerror(-ret));
963 *idx = sb.sb->dev_idx;
965 uuid_unparse(sb.sb->user_uuid.b, uuid_str);
967 bch2_free_super(&sb);
970 return bcache_fs_open(uuid_str);
973 int bchu_dev_path_to_idx(struct bchfs_handle fs, const char *dev_path)
976 struct bchfs_handle fs2 = bchu_fs_open_by_dev(dev_path, &idx);
978 if (memcmp(&fs.uuid, &fs2.uuid, sizeof(fs.uuid)))
980 bcache_fs_close(fs2);
984 int bchu_data(struct bchfs_handle fs, struct bch_ioctl_data cmd)
986 int progress_fd = xioctl(fs.ioctl_fd, BCH_IOCTL_DATA, &cmd);
989 struct bch_ioctl_data_event e;
991 if (read(progress_fd, &e, sizeof(e)) != sizeof(e))
992 die("error reading from progress fd %m");
997 if (e.p.data_type == U8_MAX)
1002 printf("%llu%% complete: current position %s",
1004 ? e.p.sectors_done * 100 / e.p.sectors_total
1006 bch2_data_types[e.p.data_type]);
1008 switch (e.p.data_type) {
1009 case BCH_DATA_btree:
1011 printf(" %s:%llu:%llu",
1012 bch2_btree_ids[e.p.btree_id],
1026 /* option parsing */
1028 void bch2_opt_strs_free(struct bch_opt_strs *opts)
1032 for (i = 0; i < bch2_opts_nr; i++) {
1033 free(opts->by_id[i]);
1034 opts->by_id[i] = NULL;
1038 struct bch_opt_strs bch2_cmdline_opts_get(int *argc, char *argv[],
1041 struct bch_opt_strs opts;
1044 memset(&opts, 0, sizeof(opts));
1047 char *optstr = strcmp_prefix(argv[i], "--");
1048 char *valstr = NULL, *p;
1049 int optid, nr_args = 1;
1056 optstr = strdup(optstr);
1059 while (isalpha(*p) || *p == '_')
1067 optid = bch2_opt_lookup(optstr);
1069 !(bch2_opt_table[optid].mode & opt_types)) {
1075 bch2_opt_table[optid].type != BCH_OPT_BOOL) {
1077 valstr = argv[i + 1];
1083 opts.by_id[optid] = strdup(valstr);
1088 sizeof(char *) * (*argc - i));
1097 struct bch_opts bch2_parse_opts(struct bch_opt_strs strs)
1099 struct bch_opts opts = bch2_opts_empty();
1104 for (i = 0; i < bch2_opts_nr; i++) {
1105 if (!strs.by_id[i] ||
1106 bch2_opt_table[i].type == BCH_OPT_FN)
1109 ret = bch2_opt_parse(NULL, &bch2_opt_table[i],
1112 die("Invalid %s: %s",
1113 bch2_opt_table[i].attr.name,
1116 bch2_opt_set_by_id(&opts, i, v);
1122 #define newline(c) \
1127 void bch2_opts_usage(unsigned opt_types)
1129 const struct bch_option *opt;
1130 unsigned i, c = 0, helpcol = 30;
1134 for (opt = bch2_opt_table;
1135 opt < bch2_opt_table + bch2_opts_nr;
1137 if (!(opt->mode & opt_types))
1140 c += printf(" --%s", opt->attr.name);
1142 switch (opt->type) {
1147 for (i = 0; opt->choices[i]; i++) {
1150 c += printf("%s", opt->choices[i]);
1155 c += printf("=%s", opt->hint);
1160 const char *l = opt->help;
1166 const char *n = strchrnul(l, '\n');
1168 while (c < helpcol) {
1172 printf("%.*s", (int) (n - l), l);
1185 dev_names bchu_fs_get_devices(struct bchfs_handle fs)
1187 DIR *dir = fdopendir(fs.sysfs_fd);
1193 while ((errno = 0), (d = readdir(dir))) {
1194 struct dev_name n = { 0, NULL, NULL };
1196 if (sscanf(d->d_name, "dev-%u", &n.idx) != 1)
1199 char *block_attr = mprintf("dev-%u/block", n.idx);
1201 char sysfs_block_buf[4096];
1202 ssize_t r = readlinkat(fs.sysfs_fd, block_attr,
1203 sysfs_block_buf, sizeof(sysfs_block_buf));
1205 sysfs_block_buf[r] = '\0';
1206 n.dev = strdup(basename(sysfs_block_buf));
1211 char *label_attr = mprintf("dev-%u/label", n.idx);
1212 n.label = read_file_str(fs.sysfs_fd, label_attr);
1215 darray_append(devs, n);