10 #include <sys/sysmacros.h>
11 #include <sys/types.h>
15 #include <uuid/uuid.h>
17 #include "libbcachefs.h"
19 #include "libbcachefs/bcachefs_format.h"
20 #include "libbcachefs/btree_cache.h"
21 #include "libbcachefs/checksum.h"
22 #include "libbcachefs/disk_groups.h"
23 #include "libbcachefs/opts.h"
24 #include "libbcachefs/replicas.h"
25 #include "libbcachefs/super-io.h"
26 #include "tools-util.h"
28 #define NSEC_PER_SEC 1000000000L
30 /* minimum size filesystem we can create, given a bucket size: */
31 static u64 min_size(unsigned bucket_size)
33 return BCH_MIN_NR_NBUCKETS * bucket_size;
36 static void init_layout(struct bch_sb_layout *l, unsigned block_size,
40 u64 backup; /* offset of 2nd sb */
42 memset(l, 0, sizeof(*l));
44 if (start != BCH_SB_SECTOR)
45 start = round_up(start, block_size);
46 end = round_down(end, block_size);
49 die("insufficient space for superblocks");
52 * Create two superblocks in the allowed range: reserve a maximum of 64k
54 sb_size = min_t(u64, 128, end - start / 2);
56 backup = start + sb_size;
57 backup = round_up(backup, block_size);
59 backup = min(backup, end);
61 sb_size = min(end - backup, backup- start);
62 sb_size = rounddown_pow_of_two(sb_size);
65 die("insufficient space for superblocks");
67 l->magic = BCACHE_MAGIC;
69 l->nr_superblocks = 2;
70 l->sb_max_size_bits = ilog2(sb_size);
71 l->sb_offset[0] = cpu_to_le64(start);
72 l->sb_offset[1] = cpu_to_le64(backup);
75 void bch2_pick_bucket_size(struct bch_opts opts, struct dev_opts *dev)
77 if (!dev->sb_offset) {
78 dev->sb_offset = BCH_SB_SECTOR;
79 dev->sb_end = BCH_SB_SECTOR + 256;
83 dev->size = get_size(dev->path, dev->fd) >> 9;
85 if (!dev->bucket_size) {
86 if (dev->size < min_size(opts.block_size))
87 die("cannot format %s, too small (%llu sectors, min %llu)",
88 dev->path, dev->size, min_size(opts.block_size));
90 /* Bucket size must be >= block size: */
91 dev->bucket_size = opts.block_size;
93 /* Bucket size must be >= btree node size: */
94 if (opt_defined(opts, btree_node_size))
95 dev->bucket_size = max_t(unsigned, dev->bucket_size,
96 opts.btree_node_size);
98 /* Want a bucket size of at least 128k, if possible: */
99 dev->bucket_size = max(dev->bucket_size, 256U);
101 if (dev->size >= min_size(dev->bucket_size)) {
102 unsigned scale = max(1,
103 ilog2(dev->size / min_size(dev->bucket_size)) / 4);
105 scale = rounddown_pow_of_two(scale);
107 /* max bucket size 1 mb */
108 dev->bucket_size = min(dev->bucket_size * scale, 1U << 11);
111 dev->bucket_size /= 2;
112 } while (dev->size < min_size(dev->bucket_size));
116 dev->nbuckets = dev->size / dev->bucket_size;
118 if (dev->bucket_size < opts.block_size)
119 die("Bucket size cannot be smaller than block size");
121 if (opt_defined(opts, btree_node_size) &&
122 dev->bucket_size < opts.btree_node_size)
123 die("Bucket size cannot be smaller than btree node size");
125 if (dev->nbuckets < BCH_MIN_NR_NBUCKETS)
126 die("Not enough buckets: %llu, need %u (bucket size %u)",
127 dev->nbuckets, BCH_MIN_NR_NBUCKETS, dev->bucket_size);
131 static unsigned parse_target(struct bch_sb_handle *sb,
132 struct dev_opts *devs, size_t nr_devs,
141 for (i = devs; i < devs + nr_devs; i++)
142 if (!strcmp(s, i->path))
143 return dev_to_target(i - devs);
145 idx = bch2_disk_path_find(sb, s);
147 return group_to_target(idx);
149 die("Invalid target %s", s);
153 struct bch_sb *bch2_format(struct bch_opt_strs fs_opt_strs,
154 struct bch_opts fs_opts,
155 struct format_opts opts,
156 struct dev_opts *devs,
159 struct bch_sb_handle sb = { NULL };
161 struct bch_sb_field_members *mi;
162 unsigned max_dev_block_size = 0;
165 for (i = devs; i < devs + nr_devs; i++)
166 max_dev_block_size = max(max_dev_block_size,
167 get_blocksize(i->path, i->fd));
169 /* calculate block size: */
170 if (!opt_defined(fs_opts, block_size)) {
171 opt_set(fs_opts, block_size, max_dev_block_size);
172 } else if (fs_opts.block_size < max_dev_block_size)
173 die("blocksize too small: %u, must be greater than device blocksize %u",
174 fs_opts.block_size, max_dev_block_size);
176 /* calculate bucket sizes: */
177 for (i = devs; i < devs + nr_devs; i++)
178 bch2_pick_bucket_size(fs_opts, i);
180 /* calculate btree node size: */
181 if (!opt_defined(fs_opts, btree_node_size)) {
182 /* 256k default btree node size */
183 opt_set(fs_opts, btree_node_size, 512);
185 for (i = devs; i < devs + nr_devs; i++)
186 fs_opts.btree_node_size =
187 min_t(unsigned, fs_opts.btree_node_size,
191 if (!is_power_of_2(fs_opts.block_size))
192 die("block size must be power of 2");
194 if (!is_power_of_2(fs_opts.btree_node_size))
195 die("btree node size must be power of 2");
197 if (uuid_is_null(opts.uuid.b))
198 uuid_generate(opts.uuid.b);
200 if (bch2_sb_realloc(&sb, 0))
201 die("insufficient memory");
203 sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current);
204 sb.sb->version_min = le16_to_cpu(bcachefs_metadata_version_current);
205 sb.sb->magic = BCACHE_MAGIC;
206 sb.sb->block_size = cpu_to_le16(fs_opts.block_size);
207 sb.sb->user_uuid = opts.uuid;
208 sb.sb->nr_devices = nr_devs;
210 uuid_generate(sb.sb->uuid.b);
215 min(strlen(opts.label), sizeof(sb.sb->label)));
218 opt_id < bch2_opts_nr;
220 const struct bch_option *opt = &bch2_opt_table[opt_id];
223 if (opt->set_sb == SET_NO_SB_OPT)
226 v = bch2_opt_defined_by_id(&fs_opts, opt_id)
227 ? bch2_opt_get_by_id(&fs_opts, opt_id)
228 : bch2_opt_get_by_id(&bch2_opts_default, opt_id);
230 opt->set_sb(sb.sb, v);
233 SET_BCH_SB_ENCODED_EXTENT_MAX_BITS(sb.sb,
234 ilog2(opts.encoded_extent_max));
237 if (clock_gettime(CLOCK_REALTIME, &now))
238 die("error getting current time: %m");
240 sb.sb->time_base_lo = cpu_to_le64(now.tv_sec * NSEC_PER_SEC + now.tv_nsec);
241 sb.sb->time_precision = cpu_to_le32(1);
243 sb.sb->features[0] |= 1ULL << BCH_FEATURE_NEW_SIPHASH;
246 mi = bch2_sb_resize_members(&sb,
247 (sizeof(*mi) + sizeof(struct bch_member) *
248 nr_devs) / sizeof(u64));
250 for (i = devs; i < devs + nr_devs; i++) {
251 struct bch_member *m = mi->members + (i - devs);
253 uuid_generate(m->uuid.b);
254 m->nbuckets = cpu_to_le64(i->nbuckets);
256 m->bucket_size = cpu_to_le16(i->bucket_size);
258 SET_BCH_MEMBER_REPLACEMENT(m, CACHE_REPLACEMENT_LRU);
259 SET_BCH_MEMBER_DISCARD(m, i->discard);
260 SET_BCH_MEMBER_DATA_ALLOWED(m, i->data_allowed);
261 SET_BCH_MEMBER_DURABILITY(m, i->durability + 1);
265 for (i = devs; i < devs + nr_devs; i++) {
266 struct bch_member *m = mi->members + (i - devs);
272 idx = bch2_disk_path_find_or_create(&sb, i->group);
274 die("error creating disk path: %s", idx);
276 SET_BCH_MEMBER_GROUP(m, idx + 1);
279 SET_BCH_SB_FOREGROUND_TARGET(sb.sb,
280 parse_target(&sb, devs, nr_devs, fs_opt_strs.foreground_target));
281 SET_BCH_SB_BACKGROUND_TARGET(sb.sb,
282 parse_target(&sb, devs, nr_devs, fs_opt_strs.background_target));
283 SET_BCH_SB_PROMOTE_TARGET(sb.sb,
284 parse_target(&sb, devs, nr_devs, fs_opt_strs.promote_target));
287 if (opts.encrypted) {
288 struct bch_sb_field_crypt *crypt =
289 bch2_sb_resize_crypt(&sb, sizeof(*crypt) / sizeof(u64));
291 bch_sb_crypt_init(sb.sb, crypt, opts.passphrase);
292 SET_BCH_SB_ENCRYPTION_TYPE(sb.sb, 1);
295 for (i = devs; i < devs + nr_devs; i++) {
296 sb.sb->dev_idx = i - devs;
298 init_layout(&sb.sb->layout, fs_opts.block_size,
299 i->sb_offset, i->sb_end);
301 if (i->sb_offset == BCH_SB_SECTOR) {
302 /* Zero start of disk */
303 static const char zeroes[BCH_SB_SECTOR << 9];
305 xpwrite(i->fd, zeroes, BCH_SB_SECTOR << 9, 0);
308 bch2_super_write(i->fd, sb.sb);
315 void bch2_super_write(int fd, struct bch_sb *sb)
317 struct nonce nonce = { 0 };
320 for (i = 0; i < sb->layout.nr_superblocks; i++) {
321 sb->offset = sb->layout.sb_offset[i];
323 if (sb->offset == BCH_SB_SECTOR) {
324 /* Write backup layout */
325 xpwrite(fd, &sb->layout, sizeof(sb->layout),
326 BCH_SB_LAYOUT_SECTOR << 9);
329 sb->csum = csum_vstruct(NULL, BCH_SB_CSUM_TYPE(sb), nonce, sb);
330 xpwrite(fd, sb, vstruct_bytes(sb),
331 le64_to_cpu(sb->offset) << 9);
337 struct bch_sb *__bch2_super_read(int fd, u64 sector)
339 struct bch_sb sb, *ret;
341 xpread(fd, &sb, sizeof(sb), sector << 9);
343 if (memcmp(&sb.magic, &BCACHE_MAGIC, sizeof(sb.magic)))
344 die("not a bcachefs superblock");
346 size_t bytes = vstruct_bytes(&sb);
350 xpread(fd, ret, bytes, sector << 9);
355 static unsigned get_dev_has_data(struct bch_sb *sb, unsigned dev)
357 struct bch_sb_field_replicas *replicas;
358 struct bch_replicas_entry *r;
359 unsigned i, data_has = 0;
361 replicas = bch2_sb_get_replicas(sb);
364 for_each_replicas_entry(replicas, r)
365 for (i = 0; i < r->nr_devs; i++)
366 if (r->devs[i] == dev)
367 data_has |= 1 << r->data_type;
372 static int bch2_sb_get_target(struct bch_sb *sb, char *buf, size_t len, u64 v)
374 struct target t = target_decode(v);
379 return scnprintf(buf, len, "none");
381 struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
382 struct bch_member *m = mi->members + t.dev;
384 if (bch2_dev_exists(sb, mi, t.dev)) {
387 uuid_unparse(m->uuid.b, uuid_str);
389 ret = scnprintf(buf, len, "Device %u (%s)", t.dev,
392 ret = scnprintf(buf, len, "Bad device %u", t.dev);
398 struct bch_sb_field_disk_groups *gi;
399 gi = bch2_sb_get_disk_groups(sb);
401 struct bch_disk_group *g = gi->entries + t.group;
403 if (t.group < disk_groups_nr(gi) && !BCH_GROUP_DELETED(g)) {
404 ret = scnprintf(buf, len, "Group %u (%.*s)", t.group,
405 BCH_SB_LABEL_SIZE, g->label);
407 ret = scnprintf(buf, len, "Bad group %u", t.group);
418 /* superblock printing: */
420 static void bch2_sb_print_layout(struct bch_sb *sb, enum units units)
422 struct bch_sb_layout *l = &sb->layout;
426 " superblock max size: %s\n"
427 " nr superblocks: %u\n"
430 pr_units(1 << l->sb_max_size_bits, units),
433 for (i = 0; i < l->nr_superblocks; i++) {
436 printf("%llu", le64_to_cpu(l->sb_offset[i]));
441 static void bch2_sb_print_journal(struct bch_sb *sb, struct bch_sb_field *f,
444 struct bch_sb_field_journal *journal = field_to_type(f, journal);
445 unsigned i, nr = bch2_nr_journal_buckets(journal);
447 printf(" Buckets: ");
448 for (i = 0; i < nr; i++) {
451 printf("%llu", le64_to_cpu(journal->buckets[i]));
456 static void bch2_sb_print_members(struct bch_sb *sb, struct bch_sb_field *f,
459 struct bch_sb_field_members *mi = field_to_type(f, members);
460 struct bch_sb_field_disk_groups *gi = bch2_sb_get_disk_groups(sb);
463 for (i = 0; i < sb->nr_devices; i++) {
464 struct bch_member *m = mi->members + i;
465 time_t last_mount = le64_to_cpu(m->last_mount);
466 char member_uuid_str[40];
467 char data_allowed_str[100];
468 char data_has_str[100];
469 char group[BCH_SB_LABEL_SIZE+10];
472 if (!bch2_member_exists(m))
475 uuid_unparse(m->uuid.b, member_uuid_str);
477 if (BCH_MEMBER_GROUP(m)) {
478 unsigned idx = BCH_MEMBER_GROUP(m) - 1;
480 if (idx < disk_groups_nr(gi)) {
481 snprintf(group, sizeof(group), "%.*s (%u)",
483 gi->entries[idx].label, idx);
485 strcpy(group, "(bad disk groups section)");
488 strcpy(group, "(none)");
491 bch2_flags_to_text(&PBUF(data_allowed_str),
493 BCH_MEMBER_DATA_ALLOWED(m));
494 if (!data_allowed_str[0])
495 strcpy(data_allowed_str, "(none)");
497 bch2_flags_to_text(&PBUF(data_has_str),
499 get_dev_has_data(sb, i));
500 if (!data_has_str[0])
501 strcpy(data_has_str, "(none)");
504 struct tm *tm = localtime(&last_mount);
505 size_t err = strftime(time_str, sizeof(time_str), "%c", tm);
507 strcpy(time_str, "(formatting error)");
509 strcpy(time_str, "(never)");
512 printf(" Device %u:\n"
516 " First bucket: %u\n"
521 " Data allowed: %s\n"
525 " Replacement policy: %s\n"
528 pr_units(le16_to_cpu(m->bucket_size) *
529 le64_to_cpu(m->nbuckets), units),
530 pr_units(le16_to_cpu(m->bucket_size), units),
531 le16_to_cpu(m->first_bucket),
532 le64_to_cpu(m->nbuckets),
535 BCH_MEMBER_STATE(m) < BCH_MEMBER_STATE_NR
536 ? bch2_dev_state[BCH_MEMBER_STATE(m)]
543 BCH_MEMBER_REPLACEMENT(m) < CACHE_REPLACEMENT_NR
544 ? bch2_cache_replacement_policies[BCH_MEMBER_REPLACEMENT(m)]
547 BCH_MEMBER_DISCARD(m));
551 static void bch2_sb_print_crypt(struct bch_sb *sb, struct bch_sb_field *f,
554 struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
556 printf(" KFD: %llu\n"
560 BCH_CRYPT_KDF_TYPE(crypt),
561 BCH_KDF_SCRYPT_N(crypt),
562 BCH_KDF_SCRYPT_R(crypt),
563 BCH_KDF_SCRYPT_P(crypt));
566 static void bch2_sb_print_replicas_v0(struct bch_sb *sb, struct bch_sb_field *f,
569 struct bch_sb_field_replicas_v0 *replicas = field_to_type(f, replicas_v0);
570 struct bch_replicas_entry_v0 *e;
573 for_each_replicas_entry(replicas, e) {
574 printf_pad(32, " %s:", bch2_data_types[e->data_type]);
577 for (i = 0; i < e->nr_devs; i++) {
580 printf("%u", e->devs[i]);
586 static void bch2_sb_print_replicas(struct bch_sb *sb, struct bch_sb_field *f,
589 struct bch_sb_field_replicas *replicas = field_to_type(f, replicas);
590 struct bch_replicas_entry *e;
593 for_each_replicas_entry(replicas, e) {
594 printf_pad(32, " %s: %u/%u",
595 bch2_data_types[e->data_type],
600 for (i = 0; i < e->nr_devs; i++) {
603 printf("%u", e->devs[i]);
609 static void bch2_sb_print_quota(struct bch_sb *sb, struct bch_sb_field *f,
614 static void bch2_sb_print_disk_groups(struct bch_sb *sb, struct bch_sb_field *f,
619 static void bch2_sb_print_clean(struct bch_sb *sb, struct bch_sb_field *f,
624 static void bch2_sb_print_journal_seq_blacklist(struct bch_sb *sb, struct bch_sb_field *f,
629 typedef void (*sb_field_print_fn)(struct bch_sb *, struct bch_sb_field *, enum units);
631 struct bch_sb_field_toolops {
632 sb_field_print_fn print;
635 static const struct bch_sb_field_toolops bch2_sb_field_ops[] = {
637 [BCH_SB_FIELD_##f] = { \
638 .print = bch2_sb_print_##f, \
644 static inline void bch2_sb_field_print(struct bch_sb *sb,
645 struct bch_sb_field *f,
648 unsigned type = le32_to_cpu(f->type);
650 if (type < BCH_SB_FIELD_NR)
651 bch2_sb_field_ops[type].print(sb, f, units);
653 printf("(unknown field %u)\n", type);
656 void bch2_sb_print(struct bch_sb *sb, bool print_layout,
657 unsigned fields, enum units units)
659 struct bch_sb_field_members *mi;
660 char user_uuid_str[40], internal_uuid_str[40];
661 char fields_have_str[200];
662 char label[BCH_SB_LABEL_SIZE + 1];
664 char foreground_str[64];
665 char background_str[64];
666 char promote_str[64];
667 struct bch_sb_field *f;
669 unsigned nr_devices = 0;
670 time_t time_base = le64_to_cpu(sb->time_base_lo) / NSEC_PER_SEC;
672 memcpy(label, sb->label, BCH_SB_LABEL_SIZE);
673 label[BCH_SB_LABEL_SIZE] = '\0';
675 uuid_unparse(sb->user_uuid.b, user_uuid_str);
676 uuid_unparse(sb->uuid.b, internal_uuid_str);
679 struct tm *tm = localtime(&time_base);
680 size_t err = strftime(time_str, sizeof(time_str), "%c", tm);
682 strcpy(time_str, "(formatting error)");
684 strcpy(time_str, "(not set)");
687 mi = bch2_sb_get_members(sb);
689 struct bch_member *m;
691 for (m = mi->members;
692 m < mi->members + sb->nr_devices;
694 nr_devices += bch2_member_exists(m);
697 bch2_sb_get_target(sb, foreground_str, sizeof(foreground_str),
698 BCH_SB_FOREGROUND_TARGET(sb));
700 bch2_sb_get_target(sb, background_str, sizeof(background_str),
701 BCH_SB_BACKGROUND_TARGET(sb));
703 bch2_sb_get_target(sb, promote_str, sizeof(promote_str),
704 BCH_SB_PROMOTE_TARGET(sb));
706 vstruct_for_each(sb, f)
707 fields_have |= 1 << le32_to_cpu(f->type);
708 bch2_flags_to_text(&PBUF(fields_have_str),
709 bch2_sb_fields, fields_have);
711 printf("External UUID: %s\n"
712 "Internal UUID: %s\n"
717 "Btree node size: %s\n"
721 "Metadata replicas: %llu\n"
722 "Data replicas: %llu\n"
724 "Metadata checksum type: %s (%llu)\n"
725 "Data checksum type: %s (%llu)\n"
726 "Compression type: %s (%llu)\n"
728 "Foreground write target: %s\n"
729 "Background write target: %s\n"
730 "Promote target: %s\n"
732 "String hash type: %s (%llu)\n"
733 "32 bit inodes: %llu\n"
734 "GC reserve percentage: %llu%%\n"
735 "Root reserve percentage: %llu%%\n"
737 "Devices: %u live, %u total\n"
739 "Superblock size: %llu\n",
743 le64_to_cpu(sb->version),
745 pr_units(le16_to_cpu(sb->block_size), units),
746 pr_units(BCH_SB_BTREE_NODE_SIZE(sb), units),
748 BCH_SB_ERROR_ACTION(sb) < BCH_NR_ERROR_ACTIONS
749 ? bch2_error_actions[BCH_SB_ERROR_ACTION(sb)]
754 BCH_SB_META_REPLICAS_WANT(sb),
755 BCH_SB_DATA_REPLICAS_WANT(sb),
757 BCH_SB_META_CSUM_TYPE(sb) < BCH_CSUM_OPT_NR
758 ? bch2_csum_types[BCH_SB_META_CSUM_TYPE(sb)]
760 BCH_SB_META_CSUM_TYPE(sb),
762 BCH_SB_DATA_CSUM_TYPE(sb) < BCH_CSUM_OPT_NR
763 ? bch2_csum_types[BCH_SB_DATA_CSUM_TYPE(sb)]
765 BCH_SB_DATA_CSUM_TYPE(sb),
767 BCH_SB_COMPRESSION_TYPE(sb) < BCH_COMPRESSION_OPT_NR
768 ? bch2_compression_types[BCH_SB_COMPRESSION_TYPE(sb)]
770 BCH_SB_COMPRESSION_TYPE(sb),
776 BCH_SB_STR_HASH_TYPE(sb) < BCH_STR_HASH_NR
777 ? bch2_str_hash_types[BCH_SB_STR_HASH_TYPE(sb)]
779 BCH_SB_STR_HASH_TYPE(sb),
781 BCH_SB_INODE_32BIT(sb),
782 BCH_SB_GC_RESERVE(sb),
783 BCH_SB_ROOT_RESERVE(sb),
785 nr_devices, sb->nr_devices,
792 bch2_sb_print_layout(sb, units);
795 vstruct_for_each(sb, f) {
796 unsigned type = le32_to_cpu(f->type);
799 if (!(fields & (1 << type)))
802 if (type < BCH_SB_FIELD_NR) {
803 scnprintf(name, sizeof(name), "%s", bch2_sb_fields[type]);
804 name[0] = toupper(name[0]);
806 scnprintf(name, sizeof(name), "(unknown field %u)", type);
809 printf("\n%s (size %llu):\n", name, vstruct_bytes(f));
810 if (type < BCH_SB_FIELD_NR)
811 bch2_sb_field_print(sb, f, units);
815 /* ioctl interface: */
817 /* Global control device: */
818 int bcachectl_open(void)
820 return xopen("/dev/bcachefs-ctl", O_RDWR);
823 /* Filesystem handles (ioctl, sysfs dir): */
825 #define SYSFS_BASE "/sys/fs/bcachefs/"
827 void bcache_fs_close(struct bchfs_handle fs)
833 struct bchfs_handle bcache_fs_open(const char *path)
835 struct bchfs_handle ret;
837 if (!uuid_parse(path, ret.uuid.b)) {
838 /* It's a UUID, look it up in sysfs: */
839 char *sysfs = mprintf(SYSFS_BASE "%s", path);
840 ret.sysfs_fd = xopen(sysfs, O_RDONLY);
842 char *minor = read_file_str(ret.sysfs_fd, "minor");
843 char *ctl = mprintf("/dev/bcachefs%s-ctl", minor);
844 ret.ioctl_fd = xopen(ctl, O_RDWR);
851 ret.ioctl_fd = xopen(path, O_RDONLY);
853 struct bch_ioctl_query_uuid uuid;
854 if (ioctl(ret.ioctl_fd, BCH_IOCTL_QUERY_UUID, &uuid) < 0)
855 die("error opening %s: not a bcachefs filesystem", path);
857 ret.uuid = uuid.uuid;
860 uuid_unparse(uuid.uuid.b, uuid_str);
862 char *sysfs = mprintf(SYSFS_BASE "%s", uuid_str);
863 ret.sysfs_fd = xopen(sysfs, O_RDONLY);
871 * Given a path to a block device, open the filesystem it belongs to; also
872 * return the device's idx:
874 struct bchfs_handle bchu_fs_open_by_dev(const char *path, unsigned *idx)
876 char buf[1024], *uuid_str;
878 struct stat stat = xstat(path);
880 if (!S_ISBLK(stat.st_mode))
881 die("%s is not a block device", path);
883 char *sysfs = mprintf("/sys/dev/block/%u:%u/bcachefs",
886 ssize_t len = readlink(sysfs, buf, sizeof(buf));
890 char *p = strrchr(buf, '/');
891 if (!p || sscanf(p + 1, "dev-%u", idx) != 1)
892 die("error parsing sysfs");
895 p = strrchr(buf, '/');
898 struct bch_opts opts = bch2_opts_empty();
900 opt_set(opts, noexcl, true);
901 opt_set(opts, nochanges, true);
903 struct bch_sb_handle sb;
904 int ret = bch2_read_super(path, &opts, &sb);
906 die("Error opening %s: %s", path, strerror(-ret));
908 *idx = sb.sb->dev_idx;
910 uuid_unparse(sb.sb->user_uuid.b, uuid_str);
912 bch2_free_super(&sb);
915 return bcache_fs_open(uuid_str);
918 int bchu_data(struct bchfs_handle fs, struct bch_ioctl_data cmd)
920 int progress_fd = xioctl(fs.ioctl_fd, BCH_IOCTL_DATA, &cmd);
923 struct bch_ioctl_data_event e;
925 if (read(progress_fd, &e, sizeof(e)) != sizeof(e))
926 die("error reading from progress fd %m");
931 if (e.p.data_type == U8_MAX)
936 printf("%llu%% complete: current position %s",
938 ? e.p.sectors_done * 100 / e.p.sectors_total
940 bch2_data_types[e.p.data_type]);
942 switch (e.p.data_type) {
945 printf(" %s:%llu:%llu",
946 bch2_btree_ids[e.p.btree_id],
961 struct bch_opt_strs bch2_cmdline_opts_get(int *argc, char *argv[],
964 struct bch_opt_strs opts;
967 memset(&opts, 0, sizeof(opts));
970 char *optstr = strcmp_prefix(argv[i], "--");
971 char *valstr = NULL, *p;
972 int optid, nr_args = 1;
979 optstr = strdup(optstr);
982 while (isalpha(*p) || *p == '_')
990 optid = bch2_opt_lookup(optstr);
992 !(bch2_opt_table[optid].mode & opt_types)) {
999 bch2_opt_table[optid].type != BCH_OPT_BOOL) {
1001 valstr = argv[i + 1];
1007 opts.by_id[optid] = valstr;
1012 sizeof(char *) * (*argc - i));
1019 struct bch_opts bch2_parse_opts(struct bch_opt_strs strs)
1021 struct bch_opts opts = bch2_opts_empty();
1026 for (i = 0; i < bch2_opts_nr; i++) {
1027 if (!strs.by_id[i] ||
1028 bch2_opt_table[i].type == BCH_OPT_FN)
1031 ret = bch2_opt_parse(NULL, &bch2_opt_table[i],
1034 die("Invalid %s: %s", strs.by_id[i], strerror(-ret));
1036 bch2_opt_set_by_id(&opts, i, v);
1042 void bch2_opts_usage(unsigned opt_types)
1044 const struct bch_option *opt;
1045 unsigned i, c = 0, helpcol = 30;
1048 while (c < helpcol) {
1059 for (opt = bch2_opt_table;
1060 opt < bch2_opt_table + bch2_opts_nr;
1062 if (!(opt->mode & opt_types))
1065 c += printf(" --%s", opt->attr.name);
1067 switch (opt->type) {
1072 for (i = 0; opt->choices[i]; i++) {
1075 c += printf("%s", opt->choices[i]);
1080 c += printf("=%s", opt->hint);
1085 const char *l = opt->help;
1091 const char *n = strchrnul(l, '\n');
1094 printf("%.*s", (int) (n - l), l);