12 #include <sys/sysmacros.h>
13 #include <sys/types.h>
17 #include <uuid/uuid.h>
19 #include "libbcachefs.h"
21 #include "libbcachefs/bcachefs_format.h"
22 #include "libbcachefs/btree_cache.h"
23 #include "libbcachefs/checksum.h"
24 #include "libbcachefs/disk_groups.h"
25 #include "libbcachefs/journal_seq_blacklist.h"
26 #include "libbcachefs/opts.h"
27 #include "libbcachefs/replicas.h"
28 #include "libbcachefs/super-io.h"
29 #include "tools-util.h"
31 #define NSEC_PER_SEC 1000000000L
33 static void init_layout(struct bch_sb_layout *l,
36 u64 sb_start, u64 sb_end)
38 u64 sb_pos = sb_start;
41 memset(l, 0, sizeof(*l));
43 l->magic = BCHFS_MAGIC;
45 l->nr_superblocks = 2;
46 l->sb_max_size_bits = ilog2(sb_size);
48 /* Create two superblocks in the allowed range: */
49 for (i = 0; i < l->nr_superblocks; i++) {
50 if (sb_pos != BCH_SB_SECTOR)
51 sb_pos = round_up(sb_pos, block_size);
53 l->sb_offset[i] = cpu_to_le64(sb_pos);
58 die("insufficient space for superblocks: start %llu end %llu > %llu size %u",
59 sb_start, sb_pos, sb_end, sb_size);
62 /* minimum size filesystem we can create, given a bucket size: */
63 static u64 min_size(unsigned bucket_size)
65 return BCH_MIN_NR_NBUCKETS * bucket_size;
68 u64 bch2_pick_bucket_size(struct bch_opts opts, struct dev_opts *dev)
72 if (dev->size < min_size(opts.block_size))
73 die("cannot format %s, too small (%llu bytes, min %llu)",
74 dev->path, dev->size, min_size(opts.block_size));
76 /* Bucket size must be >= block size: */
77 bucket_size = opts.block_size;
79 /* Bucket size must be >= btree node size: */
80 if (opt_defined(opts, btree_node_size))
81 bucket_size = max_t(unsigned, bucket_size,
82 opts.btree_node_size);
84 /* Want a bucket size of at least 128k, if possible: */
85 bucket_size = max(bucket_size, 128ULL << 10);
87 if (dev->size >= min_size(bucket_size)) {
88 unsigned scale = max(1,
89 ilog2(dev->size / min_size(bucket_size)) / 4);
91 scale = rounddown_pow_of_two(scale);
93 /* max bucket size 1 mb */
94 bucket_size = min(bucket_size * scale, 1ULL << 20);
98 } while (dev->size < min_size(bucket_size));
104 void bch2_check_bucket_size(struct bch_opts opts, struct dev_opts *dev)
106 if (dev->bucket_size < opts.block_size)
107 die("Bucket size (%llu) cannot be smaller than block size (%u)",
108 dev->bucket_size, opts.block_size);
110 if (opt_defined(opts, btree_node_size) &&
111 dev->bucket_size < opts.btree_node_size)
112 die("Bucket size (%llu) cannot be smaller than btree node size (%u)",
113 dev->bucket_size, opts.btree_node_size);
115 if (dev->nbuckets < BCH_MIN_NR_NBUCKETS)
116 die("Not enough buckets: %llu, need %u (bucket size %llu)",
117 dev->nbuckets, BCH_MIN_NR_NBUCKETS, dev->bucket_size);
119 if (dev->bucket_size > (u32) U16_MAX << 9)
120 die("Bucket size (%llu) too big (max %u)",
121 dev->bucket_size, (u32) U16_MAX << 9);
124 static unsigned parse_target(struct bch_sb_handle *sb,
125 struct dev_opts *devs, size_t nr_devs,
134 for (i = devs; i < devs + nr_devs; i++)
135 if (!strcmp(s, i->path))
136 return dev_to_target(i - devs);
138 idx = bch2_disk_path_find(sb, s);
140 return group_to_target(idx);
142 die("Invalid target %s", s);
146 struct bch_sb *bch2_format(struct bch_opt_strs fs_opt_strs,
147 struct bch_opts fs_opts,
148 struct format_opts opts,
149 struct dev_opts *devs,
152 struct bch_sb_handle sb = { NULL };
154 unsigned max_dev_block_size = 0;
156 u64 min_bucket_size = U64_MAX;
158 for (i = devs; i < devs + nr_devs; i++)
159 max_dev_block_size = max(max_dev_block_size, get_blocksize(i->bdev->bd_buffered_fd));
161 /* calculate block size: */
162 if (!opt_defined(fs_opts, block_size)) {
163 opt_set(fs_opts, block_size, max_dev_block_size);
164 } else if (fs_opts.block_size < max_dev_block_size)
165 die("blocksize too small: %u, must be greater than device blocksize %u",
166 fs_opts.block_size, max_dev_block_size);
168 /* get device size, if it wasn't specified: */
169 for (i = devs; i < devs + nr_devs; i++)
171 i->size = get_size(i->bdev->bd_buffered_fd);
173 /* calculate bucket sizes: */
174 for (i = devs; i < devs + nr_devs; i++)
175 min_bucket_size = min(min_bucket_size,
176 i->bucket_size ?: bch2_pick_bucket_size(fs_opts, i));
178 for (i = devs; i < devs + nr_devs; i++)
180 i->bucket_size = min_bucket_size;
182 for (i = devs; i < devs + nr_devs; i++) {
183 i->nbuckets = i->size / i->bucket_size;
184 bch2_check_bucket_size(fs_opts, i);
187 /* calculate btree node size: */
188 if (!opt_defined(fs_opts, btree_node_size)) {
189 /* 256k default btree node size */
190 opt_set(fs_opts, btree_node_size, 256 << 10);
192 for (i = devs; i < devs + nr_devs; i++)
193 fs_opts.btree_node_size =
194 min_t(unsigned, fs_opts.btree_node_size,
198 if (uuid_is_null(opts.uuid.b))
199 uuid_generate(opts.uuid.b);
201 if (bch2_sb_realloc(&sb, 0))
202 die("insufficient memory");
204 sb.sb->version = le16_to_cpu(opts.version);
205 sb.sb->version_min = le16_to_cpu(opts.version);
206 sb.sb->magic = BCHFS_MAGIC;
207 sb.sb->user_uuid = opts.uuid;
208 sb.sb->nr_devices = nr_devs;
210 if (opts.version == bcachefs_metadata_version_current)
211 sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
213 uuid_generate(sb.sb->uuid.b);
218 min(strlen(opts.label), sizeof(sb.sb->label)));
221 opt_id < bch2_opts_nr;
225 v = bch2_opt_defined_by_id(&fs_opts, opt_id)
226 ? bch2_opt_get_by_id(&fs_opts, opt_id)
227 : bch2_opt_get_by_id(&bch2_opts_default, opt_id);
229 __bch2_opt_set_sb(sb.sb, &bch2_opt_table[opt_id], v);
233 if (clock_gettime(CLOCK_REALTIME, &now))
234 die("error getting current time: %m");
236 sb.sb->time_base_lo = cpu_to_le64(now.tv_sec * NSEC_PER_SEC + now.tv_nsec);
237 sb.sb->time_precision = cpu_to_le32(1);
240 struct bch_sb_field_members_v2 *mi =
241 bch2_sb_field_resize(&sb, members_v2,
242 (sizeof(*mi) + sizeof(struct bch_member) *
243 nr_devs) / sizeof(u64));
244 mi->member_bytes = cpu_to_le16(sizeof(struct bch_member));
245 for (i = devs; i < devs + nr_devs; i++) {
246 struct bch_member *m = bch2_members_v2_get_mut(sb.sb, (i - devs));
248 uuid_generate(m->uuid.b);
249 m->nbuckets = cpu_to_le64(i->nbuckets);
251 m->bucket_size = cpu_to_le16(i->bucket_size >> 9);
253 SET_BCH_MEMBER_DISCARD(m, i->discard);
254 SET_BCH_MEMBER_DATA_ALLOWED(m, i->data_allowed);
255 SET_BCH_MEMBER_DURABILITY(m, i->durability + 1);
259 for (i = devs; i < devs + nr_devs; i++) {
260 struct bch_member *m;
266 idx = bch2_disk_path_find_or_create(&sb, i->label);
268 die("error creating disk path: %s", strerror(-idx));
271 * Recompute mi and m after each sb modification: its location
272 * in memory may have changed due to reallocation.
274 m = bch2_members_v2_get_mut(sb.sb, (i - devs));
275 SET_BCH_MEMBER_GROUP(m, idx + 1);
278 SET_BCH_SB_FOREGROUND_TARGET(sb.sb,
279 parse_target(&sb, devs, nr_devs, fs_opt_strs.foreground_target));
280 SET_BCH_SB_BACKGROUND_TARGET(sb.sb,
281 parse_target(&sb, devs, nr_devs, fs_opt_strs.background_target));
282 SET_BCH_SB_PROMOTE_TARGET(sb.sb,
283 parse_target(&sb, devs, nr_devs, fs_opt_strs.promote_target));
284 SET_BCH_SB_METADATA_TARGET(sb.sb,
285 parse_target(&sb, devs, nr_devs, fs_opt_strs.metadata_target));
288 if (opts.encrypted) {
289 struct bch_sb_field_crypt *crypt =
290 bch2_sb_field_resize(&sb, crypt, sizeof(*crypt) / sizeof(u64));
292 bch_sb_crypt_init(sb.sb, crypt, opts.passphrase);
293 SET_BCH_SB_ENCRYPTION_TYPE(sb.sb, 1);
296 bch2_sb_members_cpy_v2_v1(&sb);
298 for (i = devs; i < devs + nr_devs; i++) {
299 u64 size_sectors = i->size >> 9;
301 sb.sb->dev_idx = i - devs;
304 i->sb_offset = BCH_SB_SECTOR;
305 i->sb_end = size_sectors;
308 init_layout(&sb.sb->layout, fs_opts.block_size,
309 opts.superblock_size,
310 i->sb_offset, i->sb_end);
313 * Also create a backup superblock at the end of the disk:
315 * If we're not creating a superblock at the default offset, it
316 * means we're being run from the migrate tool and we could be
317 * overwriting existing data if we write to the end of the disk:
319 if (i->sb_offset == BCH_SB_SECTOR) {
320 struct bch_sb_layout *l = &sb.sb->layout;
321 u64 backup_sb = size_sectors - (1 << l->sb_max_size_bits);
323 backup_sb = rounddown(backup_sb, i->bucket_size >> 9);
324 l->sb_offset[l->nr_superblocks++] = cpu_to_le64(backup_sb);
327 if (i->sb_offset == BCH_SB_SECTOR) {
328 /* Zero start of disk */
329 static const char zeroes[BCH_SB_SECTOR << 9];
331 xpwrite(i->bdev->bd_buffered_fd, zeroes, BCH_SB_SECTOR << 9, 0,
332 "zeroing start of disk");
335 bch2_super_write(i->bdev->bd_buffered_fd, sb.sb);
336 close(i->bdev->bd_buffered_fd);
342 void bch2_super_write(int fd, struct bch_sb *sb)
344 struct nonce nonce = { 0 };
347 for (i = 0; i < sb->layout.nr_superblocks; i++) {
348 sb->offset = sb->layout.sb_offset[i];
350 if (sb->offset == BCH_SB_SECTOR) {
351 /* Write backup layout */
352 xpwrite(fd, &sb->layout, sizeof(sb->layout),
353 BCH_SB_LAYOUT_SECTOR << 9,
357 sb->csum = csum_vstruct(NULL, BCH_SB_CSUM_TYPE(sb), nonce, sb);
358 xpwrite(fd, sb, vstruct_bytes(sb),
359 le64_to_cpu(sb->offset) << 9,
366 struct bch_sb *__bch2_super_read(int fd, u64 sector)
368 struct bch_sb sb, *ret;
370 xpread(fd, &sb, sizeof(sb), sector << 9);
372 if (memcmp(&sb.magic, &BCACHE_MAGIC, sizeof(sb.magic)) &&
373 memcmp(&sb.magic, &BCHFS_MAGIC, sizeof(sb.magic)))
374 die("not a bcachefs superblock");
376 size_t bytes = vstruct_bytes(&sb);
380 xpread(fd, ret, bytes, sector << 9);
385 /* ioctl interface: */
387 /* Global control device: */
388 int bcachectl_open(void)
390 return xopen("/dev/bcachefs-ctl", O_RDWR);
393 /* Filesystem handles (ioctl, sysfs dir): */
395 #define SYSFS_BASE "/sys/fs/bcachefs/"
397 void bcache_fs_close(struct bchfs_handle fs)
403 struct bchfs_handle bcache_fs_open(const char *path)
405 struct bchfs_handle ret;
407 if (!uuid_parse(path, ret.uuid.b)) {
408 /* It's a UUID, look it up in sysfs: */
409 char *sysfs = mprintf(SYSFS_BASE "%s", path);
410 ret.sysfs_fd = xopen(sysfs, O_RDONLY);
412 char *minor = read_file_str(ret.sysfs_fd, "minor");
413 char *ctl = mprintf("/dev/bcachefs%s-ctl", minor);
414 ret.ioctl_fd = xopen(ctl, O_RDWR);
421 ret.ioctl_fd = open(path, O_RDONLY);
422 if (ret.ioctl_fd < 0)
423 die("Error opening filesystem at %s: %m", path);
425 struct bch_ioctl_query_uuid uuid;
426 if (ioctl(ret.ioctl_fd, BCH_IOCTL_QUERY_UUID, &uuid) < 0)
427 die("error opening %s: not a bcachefs filesystem", path);
429 ret.uuid = uuid.uuid;
432 uuid_unparse(uuid.uuid.b, uuid_str);
434 char *sysfs = mprintf(SYSFS_BASE "%s", uuid_str);
435 ret.sysfs_fd = xopen(sysfs, O_RDONLY);
443 * Given a path to a block device, open the filesystem it belongs to; also
444 * return the device's idx:
446 struct bchfs_handle bchu_fs_open_by_dev(const char *path, int *idx)
448 struct bch_opts opts = bch2_opts_empty();
449 char buf[1024], *uuid_str;
451 struct stat stat = xstat(path);
453 if (S_ISBLK(stat.st_mode)) {
454 char *sysfs = mprintf("/sys/dev/block/%u:%u/bcachefs",
458 ssize_t len = readlink(sysfs, buf, sizeof(buf));
464 char *p = strrchr(buf, '/');
465 if (!p || sscanf(p + 1, "dev-%u", idx) != 1)
466 die("error parsing sysfs");
469 p = strrchr(buf, '/');
473 opt_set(opts, noexcl, true);
474 opt_set(opts, nochanges, true);
476 struct bch_sb_handle sb;
477 int ret = bch2_read_super(path, &opts, &sb);
479 die("Error opening %s: %s", path, strerror(-ret));
481 *idx = sb.sb->dev_idx;
483 uuid_unparse(sb.sb->user_uuid.b, uuid_str);
485 bch2_free_super(&sb);
488 return bcache_fs_open(uuid_str);
491 int bchu_dev_path_to_idx(struct bchfs_handle fs, const char *dev_path)
494 struct bchfs_handle fs2 = bchu_fs_open_by_dev(dev_path, &idx);
496 if (memcmp(&fs.uuid, &fs2.uuid, sizeof(fs.uuid)))
498 bcache_fs_close(fs2);
502 int bchu_data(struct bchfs_handle fs, struct bch_ioctl_data cmd)
504 int progress_fd = xioctl(fs.ioctl_fd, BCH_IOCTL_DATA, &cmd);
507 struct bch_ioctl_data_event e;
509 if (read(progress_fd, &e, sizeof(e)) != sizeof(e))
510 die("error reading from progress fd %m");
515 if (e.p.data_type == U8_MAX)
520 printf("%llu%% complete: current position %s",
522 ? e.p.sectors_done * 100 / e.p.sectors_total
524 bch2_data_types[e.p.data_type]);
526 switch (e.p.data_type) {
529 printf(" %s:%llu:%llu",
530 bch2_btree_id_str(e.p.btree_id),
546 void bch2_opt_strs_free(struct bch_opt_strs *opts)
550 for (i = 0; i < bch2_opts_nr; i++) {
551 free(opts->by_id[i]);
552 opts->by_id[i] = NULL;
556 struct bch_opt_strs bch2_cmdline_opts_get(int *argc, char *argv[],
559 struct bch_opt_strs opts;
562 memset(&opts, 0, sizeof(opts));
565 char *optstr = strcmp_prefix(argv[i], "--");
566 char *valstr = NULL, *p;
567 int optid, nr_args = 1;
574 optstr = strdup(optstr);
577 while (isalpha(*p) || *p == '_')
585 optid = bch2_opt_lookup(optstr);
587 !(bch2_opt_table[optid].flags & opt_types)) {
593 bch2_opt_table[optid].type != BCH_OPT_BOOL) {
595 valstr = argv[i + 1];
601 opts.by_id[optid] = strdup(valstr);
606 sizeof(char *) * (*argc - i));
615 struct bch_opts bch2_parse_opts(struct bch_opt_strs strs)
617 struct bch_opts opts = bch2_opts_empty();
618 struct printbuf err = PRINTBUF;
623 for (i = 0; i < bch2_opts_nr; i++) {
627 ret = bch2_opt_parse(NULL,
629 strs.by_id[i], &v, &err);
631 die("Invalid option %s", err.buf);
633 bch2_opt_set_by_id(&opts, i, v);
645 void bch2_opts_usage(unsigned opt_types)
647 const struct bch_option *opt;
648 unsigned i, c = 0, helpcol = 30;
652 for (opt = bch2_opt_table;
653 opt < bch2_opt_table + bch2_opts_nr;
655 if (!(opt->flags & opt_types))
658 c += printf(" --%s", opt->attr.name);
665 for (i = 0; opt->choices[i]; i++) {
668 c += printf("%s", opt->choices[i]);
673 c += printf("=%s", opt->hint);
678 const char *l = opt->help;
684 const char *n = strchrnul(l, '\n');
686 while (c < helpcol) {
690 printf("%.*s", (int) (n - l), l);
703 dev_names bchu_fs_get_devices(struct bchfs_handle fs)
705 DIR *dir = fdopendir(fs.sysfs_fd);
711 while ((errno = 0), (d = readdir(dir))) {
712 struct dev_name n = { 0, NULL, NULL };
714 if (sscanf(d->d_name, "dev-%u", &n.idx) != 1)
717 char *block_attr = mprintf("dev-%u/block", n.idx);
719 char sysfs_block_buf[4096];
720 ssize_t r = readlinkat(fs.sysfs_fd, block_attr,
721 sysfs_block_buf, sizeof(sysfs_block_buf));
723 sysfs_block_buf[r] = '\0';
724 n.dev = strdup(basename(sysfs_block_buf));
729 char *label_attr = mprintf("dev-%u/label", n.idx);
730 n.label = read_file_str(fs.sysfs_fd, label_attr);
733 char *durability_attr = mprintf("dev-%u/durability", n.idx);
734 n.durability = read_file_u64(fs.sysfs_fd, durability_attr);
735 free(durability_attr);
737 darray_push(&devs, n);