#include "libbcachefs/btree_cache.h"
#include "libbcachefs/checksum.h"
#include "libbcachefs/disk_groups.h"
+#include "libbcachefs/journal_seq_blacklist.h"
#include "libbcachefs/opts.h"
#include "libbcachefs/replicas.h"
#include "libbcachefs/super-io.h"
return BCH_MIN_NR_NBUCKETS * bucket_size;
}
-static void init_layout(struct bch_sb_layout *l, unsigned block_size,
- u64 start, u64 end)
+static void init_layout(struct bch_sb_layout *l,
+ unsigned block_size,
+ unsigned sb_size,
+ u64 sb_start, u64 sb_end)
{
- unsigned sb_size;
- u64 backup; /* offset of 2nd sb */
+ unsigned i;
memset(l, 0, sizeof(*l));
- if (start != BCH_SB_SECTOR)
- start = round_up(start, block_size);
- end = round_down(end, block_size);
-
- if (start >= end)
- die("insufficient space for superblocks");
-
- /*
- * Create two superblocks in the allowed range: reserve a maximum of 64k
- */
- sb_size = min_t(u64, 128, end - start / 2);
-
- backup = start + sb_size;
- backup = round_up(backup, block_size);
-
- backup = min(backup, end);
-
- sb_size = min(end - backup, backup- start);
- sb_size = rounddown_pow_of_two(sb_size);
-
- if (sb_size < 8)
- die("insufficient space for superblocks");
-
l->magic = BCACHE_MAGIC;
l->layout_type = 0;
l->nr_superblocks = 2;
l->sb_max_size_bits = ilog2(sb_size);
- l->sb_offset[0] = cpu_to_le64(start);
- l->sb_offset[1] = cpu_to_le64(backup);
+
+ /* Create two superblocks in the allowed range: */
+ for (i = 0; i < l->nr_superblocks; i++) {
+ if (sb_start != BCH_SB_SECTOR)
+ sb_start = round_up(sb_start, block_size);
+
+ l->sb_offset[i] = cpu_to_le64(sb_start);
+ sb_start += sb_size;
+ }
+
+ if (sb_start >= sb_end)
+ die("insufficient space for superblocks");
}
void bch2_pick_bucket_size(struct bch_opts opts, struct dev_opts *dev)
{
- if (!dev->sb_offset) {
- dev->sb_offset = BCH_SB_SECTOR;
- dev->sb_end = BCH_SB_SECTOR + 256;
- }
-
if (!dev->size)
dev->size = get_size(dev->path, dev->fd) >> 9;
if (bch2_sb_realloc(&sb, 0))
die("insufficient memory");
- sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current);
- sb.sb->version_min = le16_to_cpu(bcachefs_metadata_version_current);
+ sb.sb->version = le16_to_cpu(opts.version);
+ sb.sb->version_min = le16_to_cpu(opts.version);
sb.sb->magic = BCACHE_MAGIC;
sb.sb->block_size = cpu_to_le16(fs_opts.block_size);
sb.sb->user_uuid = opts.uuid;
sb.sb->nr_devices = nr_devs;
+ if (opts.version == bcachefs_metadata_version_current)
+ sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
+
uuid_generate(sb.sb->uuid.b);
if (opts.label)
/* Member info: */
mi = bch2_sb_resize_members(&sb,
(sizeof(*mi) + sizeof(struct bch_member) *
- nr_devs) / sizeof(u64));
+ nr_devs) / sizeof(u64));
for (i = devs; i < devs + nr_devs; i++) {
struct bch_member *m = mi->members + (i - devs);
m->first_bucket = 0;
m->bucket_size = cpu_to_le16(i->bucket_size);
- SET_BCH_MEMBER_REPLACEMENT(m, CACHE_REPLACEMENT_LRU);
+ SET_BCH_MEMBER_REPLACEMENT(m, BCH_CACHE_REPLACEMENT_lru);
SET_BCH_MEMBER_DISCARD(m, i->discard);
SET_BCH_MEMBER_DATA_ALLOWED(m, i->data_allowed);
SET_BCH_MEMBER_DURABILITY(m, i->durability + 1);
}
- /* Disk groups */
+ /* Disk labels*/
for (i = devs; i < devs + nr_devs; i++) {
struct bch_member *m = mi->members + (i - devs);
int idx;
- if (!i->group)
+ if (!i->label)
continue;
- idx = bch2_disk_path_find_or_create(&sb, i->group);
+ idx = bch2_disk_path_find_or_create(&sb, i->label);
if (idx < 0)
die("error creating disk path: %s", idx);
parse_target(&sb, devs, nr_devs, fs_opt_strs.background_target));
SET_BCH_SB_PROMOTE_TARGET(sb.sb,
parse_target(&sb, devs, nr_devs, fs_opt_strs.promote_target));
+ SET_BCH_SB_METADATA_TARGET(sb.sb,
+ parse_target(&sb, devs, nr_devs, fs_opt_strs.metadata_target));
/* Crypt: */
if (opts.encrypted) {
for (i = devs; i < devs + nr_devs; i++) {
sb.sb->dev_idx = i - devs;
+ if (!i->sb_offset) {
+ i->sb_offset = BCH_SB_SECTOR;
+ i->sb_end = i->size;
+ }
+
init_layout(&sb.sb->layout, fs_opts.block_size,
+ opts.superblock_size,
i->sb_offset, i->sb_end);
+ /*
+ * Also create a backup superblock at the end of the disk:
+ *
+ * If we're not creating a superblock at the default offset, it
+ * means we're being run from the migrate tool and we could be
+ * overwriting existing data if we write to the end of the disk:
+ */
+ if (i->sb_offset == BCH_SB_SECTOR) {
+ struct bch_sb_layout *l = &sb.sb->layout;
+ u64 backup_sb = i->size - (1 << l->sb_max_size_bits);
+
+ backup_sb = rounddown(backup_sb, i->bucket_size);
+ l->sb_offset[l->nr_superblocks++] = cpu_to_le64(backup_sb);
+ }
+
if (i->sb_offset == BCH_SB_SECTOR) {
/* Zero start of disk */
static const char zeroes[BCH_SB_SECTOR << 9];
struct bch_disk_group *g = gi->entries + t.group;
if (t.group < disk_groups_nr(gi) && !BCH_GROUP_DELETED(g)) {
- ret = scnprintf(buf, len, "Group %u (%.*s)", t.group,
+ ret = scnprintf(buf, len, "Label %u (%.*s)", t.group,
BCH_SB_LABEL_SIZE, g->label);
} else {
- ret = scnprintf(buf, len, "Bad group %u", t.group);
+ ret = scnprintf(buf, len, "Bad label %u", t.group);
}
break;
}
char member_uuid_str[40];
char data_allowed_str[100];
char data_has_str[100];
- char group[BCH_SB_LABEL_SIZE+10];
+ char label [BCH_SB_LABEL_SIZE+10];
char time_str[64];
if (!bch2_member_exists(m))
unsigned idx = BCH_MEMBER_GROUP(m) - 1;
if (idx < disk_groups_nr(gi)) {
- snprintf(group, sizeof(group), "%.*s (%u)",
+ scnprintf(label, sizeof(label), "%.*s (%u)",
BCH_SB_LABEL_SIZE,
gi->entries[idx].label, idx);
} else {
- strcpy(group, "(bad disk groups section)");
+ strcpy(label, "(bad disk labels section)");
}
} else {
- strcpy(group, "(none)");
+ strcpy(label, "(none)");
}
bch2_flags_to_text(&PBUF(data_allowed_str),
time_str,
BCH_MEMBER_STATE(m) < BCH_MEMBER_STATE_NR
- ? bch2_dev_state[BCH_MEMBER_STATE(m)]
+ ? bch2_member_states[BCH_MEMBER_STATE(m)]
: "unknown",
- group,
+ label,
data_allowed_str,
data_has_str,
- BCH_MEMBER_REPLACEMENT(m) < CACHE_REPLACEMENT_NR
+ BCH_MEMBER_REPLACEMENT(m) < BCH_CACHE_REPLACEMENT_NR
? bch2_cache_replacement_policies[BCH_MEMBER_REPLACEMENT(m)]
: "unknown",
}
static void bch2_sb_print_replicas_v0(struct bch_sb *sb, struct bch_sb_field *f,
- enum units units)
+ enum units units)
{
struct bch_sb_field_replicas_v0 *replicas = field_to_type(f, replicas_v0);
struct bch_replicas_entry_v0 *e;
static void bch2_sb_print_clean(struct bch_sb *sb, struct bch_sb_field *f,
enum units units)
{
+ struct bch_sb_field_clean *clean = field_to_type(f, clean);
+
+
+ printf(" flags: %x", le32_to_cpu(clean->flags));
+ printf(" journal seq: %llx", le64_to_cpu(clean->journal_seq));
}
static void bch2_sb_print_journal_seq_blacklist(struct bch_sb *sb, struct bch_sb_field *f,
- enum units units)
+ enum units units)
{
+ struct bch_sb_field_journal_seq_blacklist *bl = field_to_type(f, journal_seq_blacklist);
+ unsigned i, nr = blacklist_nr_entries(bl);
+
+ for (i = 0; i < nr; i++) {
+ struct journal_seq_blacklist_entry *e =
+ bl->start + i;
+
+ printf(" %llu-%llu\n",
+ le64_to_cpu(e->start),
+ le64_to_cpu(e->end));
+ }
}
typedef void (*sb_field_print_fn)(struct bch_sb *, struct bch_sb_field *, enum units);
{
struct bch_sb_field_members *mi;
char user_uuid_str[40], internal_uuid_str[40];
- char features_str[200];
+ char features_str[500];
+ char compat_features_str[500];
char fields_have_str[200];
char label[BCH_SB_LABEL_SIZE + 1];
char time_str[64];
char foreground_str[64];
char background_str[64];
char promote_str[64];
+ char metadata_str[64];
struct bch_sb_field *f;
u64 fields_have = 0;
unsigned nr_devices = 0;
bch2_sb_get_target(sb, promote_str, sizeof(promote_str),
BCH_SB_PROMOTE_TARGET(sb));
+ bch2_sb_get_target(sb, metadata_str, sizeof(metadata_str),
+ BCH_SB_METADATA_TARGET(sb));
+
bch2_flags_to_text(&PBUF(features_str),
bch2_sb_features,
le64_to_cpu(sb->features[0]));
+ bch2_flags_to_text(&PBUF(compat_features_str),
+ bch2_sb_compat,
+ le64_to_cpu(sb->compat[0]));
+
vstruct_for_each(sb, f)
fields_have |= 1 << le32_to_cpu(f->type);
bch2_flags_to_text(&PBUF(fields_have_str),
printf("External UUID: %s\n"
"Internal UUID: %s\n"
+ "Device index: %u\n"
"Label: %s\n"
- "Version: %llu\n"
+ "Version: %u\n"
+ "Oldest version on disk: %u\n"
"Created: %s\n"
+ "Squence number: %llu\n"
"Block_size: %s\n"
"Btree node size: %s\n"
"Error action: %s\n"
"Clean: %llu\n"
"Features: %s\n"
+ "Compat features: %s\n"
"Metadata replicas: %llu\n"
"Data replicas: %llu\n"
"Foreground write target: %s\n"
"Background write target: %s\n"
"Promote target: %s\n"
+ "Metadata target: %s\n"
"String hash type: %s (%llu)\n"
"32 bit inodes: %llu\n"
"Superblock size: %llu\n",
user_uuid_str,
internal_uuid_str,
+ sb->dev_idx,
label,
- le64_to_cpu(sb->version),
+ le16_to_cpu(sb->version),
+ le16_to_cpu(sb->version_min),
time_str,
+ le64_to_cpu(sb->seq),
pr_units(le16_to_cpu(sb->block_size), units),
pr_units(BCH_SB_BTREE_NODE_SIZE(sb), units),
- BCH_SB_ERROR_ACTION(sb) < BCH_NR_ERROR_ACTIONS
+ BCH_SB_ERROR_ACTION(sb) < BCH_ON_ERROR_NR
? bch2_error_actions[BCH_SB_ERROR_ACTION(sb)]
: "unknown",
BCH_SB_CLEAN(sb),
features_str,
+ compat_features_str,
BCH_SB_META_REPLICAS_WANT(sb),
BCH_SB_DATA_REPLICAS_WANT(sb),
foreground_str,
background_str,
promote_str,
+ metadata_str,
BCH_SB_STR_HASH_TYPE(sb) < BCH_STR_HASH_NR
? bch2_str_hash_types[BCH_SB_STR_HASH_TYPE(sb)]
free(ctl);
} else {
/* It's a path: */
- ret.ioctl_fd = xopen(path, O_RDONLY);
+ ret.ioctl_fd = open(path, O_RDONLY);
+ if (ret.ioctl_fd < 0)
+ die("Error opening filesystem at %s: %m", path);
struct bch_ioctl_query_uuid uuid;
if (ioctl(ret.ioctl_fd, BCH_IOCTL_QUERY_UUID, &uuid) < 0)
* Given a path to a block device, open the filesystem it belongs to; also
* return the device's idx:
*/
-struct bchfs_handle bchu_fs_open_by_dev(const char *path, unsigned *idx)
+struct bchfs_handle bchu_fs_open_by_dev(const char *path, int *idx)
{
char buf[1024], *uuid_str;
return bcache_fs_open(uuid_str);
}
+int bchu_dev_path_to_idx(struct bchfs_handle fs, const char *dev_path)
+{
+ int idx;
+ struct bchfs_handle fs2 = bchu_fs_open_by_dev(dev_path, &idx);
+
+ if (memcmp(&fs.uuid, &fs2.uuid, sizeof(fs.uuid)))
+ idx = -1;
+ bcache_fs_close(fs2);
+ return idx;
+}
+
int bchu_data(struct bchfs_handle fs, struct bch_ioctl_data cmd)
{
int progress_fd = xioctl(fs.ioctl_fd, BCH_IOCTL_DATA, &cmd);
bch2_data_types[e.p.data_type]);
switch (e.p.data_type) {
- case BCH_DATA_BTREE:
- case BCH_DATA_USER:
+ case BCH_DATA_btree:
+ case BCH_DATA_user:
printf(" %s:%llu:%llu",
bch2_btree_ids[e.p.btree_id],
e.p.pos.inode,
/* option parsing */
+void bch2_opt_strs_free(struct bch_opt_strs *opts)
+{
+ unsigned i;
+
+ for (i = 0; i < bch2_opts_nr; i++) {
+ free(opts->by_id[i]);
+ opts->by_id[i] = NULL;
+ }
+}
+
struct bch_opt_strs bch2_cmdline_opts_get(int *argc, char *argv[],
unsigned opt_types)
{
optid = bch2_opt_lookup(optstr);
if (optid < 0 ||
!(bch2_opt_table[optid].mode & opt_types)) {
- free(optstr);
i++;
- continue;
+ goto next;
}
if (!valstr &&
if (!valstr)
valstr = "1";
- opts.by_id[optid] = valstr;
+ opts.by_id[optid] = strdup(valstr);
*argc -= nr_args;
memmove(&argv[i],
&argv[i + nr_args],
sizeof(char *) * (*argc - i));
argv[*argc] = NULL;
+next:
+ free(optstr);
}
return opts;
ret = bch2_opt_parse(NULL, &bch2_opt_table[i],
strs.by_id[i], &v);
if (ret < 0)
- die("Invalid %s: %s", strs.by_id[i], strerror(-ret));
+ die("Invalid %s: %s",
+ bch2_opt_table[i].attr.name,
+ strerror(-ret));
bch2_opt_set_by_id(&opts, i, v);
}
return opts;
}
+#define newline(c) \
+ do { \
+ printf("\n"); \
+ c = 0; \
+ } while(0)
void bch2_opts_usage(unsigned opt_types)
{
const struct bch_option *opt;
unsigned i, c = 0, helpcol = 30;
- void tabalign() {
- while (c < helpcol) {
- putchar(' ');
- c++;
- }
- }
- void newline() {
- printf("\n");
- c = 0;
- }
for (opt = bch2_opt_table;
opt < bch2_opt_table + bch2_opts_nr;
const char *l = opt->help;
if (c >= helpcol)
- newline();
+ newline(c);
while (1) {
const char *n = strchrnul(l, '\n');
- tabalign();
+ while (c < helpcol) {
+ putchar(' ');
+ c++;
+ }
printf("%.*s", (int) (n - l), l);
- newline();
+ newline(c);
if (!*n)
break;
l = n + 1;
}
} else {
- newline();
+ newline(c);
}
}
}