// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
+#include "btree_update_interior.h"
#include "buckets.h"
#include "checksum.h"
#include "disk_groups.h"
#include "error.h"
#include "io.h"
#include "journal.h"
+#include "journal_io.h"
#include "journal_seq_blacklist.h"
#include "replicas.h"
#include "quota.h"
unsigned old_u64s = f ? le32_to_cpu(f->u64s) : 0;
unsigned sb_u64s = le32_to_cpu(sb->sb->u64s) + u64s - old_u64s;
- BUG_ON(get_order(__vstruct_bytes(struct bch_sb, sb_u64s)) >
- sb->page_order);
+ BUG_ON(__vstruct_bytes(struct bch_sb, sb_u64s) > sb->buffer_size);
if (!f && !u64s) {
/* nothing to do: */
if (!IS_ERR_OR_NULL(sb->bdev))
blkdev_put(sb->bdev, sb->mode);
- free_pages((unsigned long) sb->sb, sb->page_order);
+ kfree(sb->sb);
memset(sb, 0, sizeof(*sb));
}
int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s)
{
size_t new_bytes = __vstruct_bytes(struct bch_sb, u64s);
- unsigned order = get_order(new_bytes);
+ size_t new_buffer_size;
struct bch_sb *new_sb;
struct bio *bio;
- if (sb->sb && sb->page_order >= order)
+ if (sb->bdev)
+ new_bytes = max_t(size_t, new_bytes, bdev_logical_block_size(sb->bdev));
+
+ new_buffer_size = roundup_pow_of_two(new_bytes);
+
+ if (sb->sb && sb->buffer_size >= new_buffer_size)
return 0;
if (sb->have_layout) {
}
}
- if (sb->page_order >= order && sb->sb)
+ if (sb->buffer_size >= new_buffer_size && sb->sb)
return 0;
if (dynamic_fault("bcachefs:add:super_realloc"))
return -ENOMEM;
if (sb->have_bio) {
- bio = bio_kmalloc(GFP_KERNEL, 1 << order);
+ bio = bio_kmalloc(GFP_KERNEL,
+ DIV_ROUND_UP(new_buffer_size, PAGE_SIZE));
if (!bio)
return -ENOMEM;
sb->bio = bio;
}
- new_sb = (void *) __get_free_pages(GFP_NOFS|__GFP_ZERO, order);
+ new_sb = krealloc(sb->sb, new_buffer_size, GFP_NOFS|__GFP_ZERO);
if (!new_sb)
return -ENOMEM;
- if (sb->sb)
- memcpy(new_sb, sb->sb, PAGE_SIZE << sb->page_order);
-
- free_pages((unsigned long) sb->sb, sb->page_order);
sb->sb = new_sb;
-
- sb->page_order = order;
+ sb->buffer_size = new_buffer_size;
return 0;
}
return "Bad number of member devices";
if (!BCH_SB_META_REPLICAS_WANT(sb) ||
- BCH_SB_META_REPLICAS_WANT(sb) >= BCH_REPLICAS_MAX)
+ BCH_SB_META_REPLICAS_WANT(sb) > BCH_REPLICAS_MAX)
return "Invalid number of metadata replicas";
if (!BCH_SB_META_REPLICAS_REQ(sb) ||
- BCH_SB_META_REPLICAS_REQ(sb) >= BCH_REPLICAS_MAX)
+ BCH_SB_META_REPLICAS_REQ(sb) > BCH_REPLICAS_MAX)
return "Invalid number of metadata replicas";
if (!BCH_SB_DATA_REPLICAS_WANT(sb) ||
- BCH_SB_DATA_REPLICAS_WANT(sb) >= BCH_REPLICAS_MAX)
+ BCH_SB_DATA_REPLICAS_WANT(sb) > BCH_REPLICAS_MAX)
return "Invalid number of data replicas";
if (!BCH_SB_DATA_REPLICAS_REQ(sb) ||
- BCH_SB_DATA_REPLICAS_REQ(sb) >= BCH_REPLICAS_MAX)
+ BCH_SB_DATA_REPLICAS_REQ(sb) > BCH_REPLICAS_MAX)
return "Invalid number of data replicas";
if (BCH_SB_META_CSUM_TYPE(sb) >= BCH_CSUM_OPT_NR)
c->sb.uuid = src->uuid;
c->sb.user_uuid = src->user_uuid;
c->sb.version = le16_to_cpu(src->version);
+ c->sb.version_min = le16_to_cpu(src->version_min);
c->sb.nr_devices = src->nr_devices;
c->sb.clean = BCH_SB_CLEAN(src);
c->sb.encryption_type = BCH_SB_ENCRYPTION_TYPE(src);
c->sb.encoded_extent_max= 1 << BCH_SB_ENCODED_EXTENT_MAX_BITS(src);
- c->sb.time_base_lo = le64_to_cpu(src->time_base_lo);
+
+ c->sb.nsec_per_time_unit = le32_to_cpu(src->time_precision);
+ c->sb.time_units_per_sec = NSEC_PER_SEC / c->sb.nsec_per_time_unit;
+
+ /* XXX this is wrong, we need a 96 or 128 bit integer type */
+ c->sb.time_base_lo = div_u64(le64_to_cpu(src->time_base_lo),
+ c->sb.nsec_per_time_unit);
c->sb.time_base_hi = le32_to_cpu(src->time_base_hi);
- c->sb.time_precision = le32_to_cpu(src->time_precision);
+
c->sb.features = le64_to_cpu(src->features[0]);
c->sb.compat = le64_to_cpu(src->compat[0]);
ca->mi = bch2_mi_to_cpu(mi->members + i);
}
-/* doesn't copy member info */
static void __copy_super(struct bch_sb_handle *dst_handle, struct bch_sb *src)
{
struct bch_sb_field *src_f, *dst_f;
__copy_super(&c->disk_sb, src);
+ if (BCH_SB_HAS_ERRORS(c->disk_sb.sb))
+ set_bit(BCH_FS_ERROR, &c->flags);
+ if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb))
+ set_bit(BCH_FS_TOPOLOGY_ERROR, &c->flags);
+
ret = bch2_sb_replicas_to_cpu_replicas(c);
if (ret)
return ret;
bio_set_dev(sb->bio, sb->bdev);
sb->bio->bi_iter.bi_sector = offset;
bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
- bch2_bio_map(sb->bio, sb->sb, PAGE_SIZE << sb->page_order);
+ bch2_bio_map(sb->bio, sb->sb, sb->buffer_size);
if (submit_bio_wait(sb->bio))
return "IO error";
if (bytes > 512 << sb->sb->layout.sb_max_size_bits)
return "Bad superblock: too big";
- if (get_order(bytes) > sb->page_order) {
+ if (bytes > sb->buffer_size) {
if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s)))
return "cannot allocate memory";
goto reread;
bdev_logical_block_size(sb->bdev))
goto err;
- if (sb->mode & FMODE_WRITE)
- bdev_get_queue(sb->bdev)->backing_dev_info->capabilities
- |= BDI_CAP_STABLE_WRITES;
ret = 0;
sb->have_layout = true;
out:
/* XXX: return errors directly */
- if (bch2_dev_io_err_on(bio->bi_status, ca, "superblock write"))
+ if (bch2_dev_io_err_on(bio->bi_status, ca, "superblock write error: %s",
+ bch2_blk_status_to_str(bio->bi_status)))
ca->sb_write_error = 1;
closure_put(&ca->fs->sb_write);
bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC|REQ_META);
bch2_bio_map(bio, ca->sb_read_scratch, PAGE_SIZE);
- this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_SB],
+ this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb],
bio_sectors(bio));
percpu_ref_get(&ca->io_ref);
sb->offset = sb->layout.sb_offset[idx];
- SET_BCH_SB_CSUM_TYPE(sb, c->opts.metadata_checksum);
+ SET_BCH_SB_CSUM_TYPE(sb, bch2_csum_opt_to_type(c->opts.metadata_checksum, false));
sb->csum = csum_vstruct(c, BCH_SB_CSUM_TYPE(sb),
null_nonce(), sb);
roundup((size_t) vstruct_bytes(sb),
bdev_logical_block_size(ca->disk_sb.bdev)));
- this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_SB],
+ this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_sb],
bio_sectors(bio));
percpu_ref_get(&ca->io_ref);
const char *err;
struct bch_devs_mask sb_written;
bool wrote, can_mount_without_written, can_mount_with_written;
+ unsigned degraded_flags = BCH_FORCE_IF_DEGRADED;
int ret = 0;
+ if (c->opts.very_degraded)
+ degraded_flags |= BCH_FORCE_IF_LOST;
+
lockdep_assert_held(&c->sb_lock);
closure_init_stack(cl);
if (test_bit(BCH_FS_ERROR, &c->flags))
SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 1);
+ if (test_bit(BCH_FS_TOPOLOGY_ERROR, &c->flags))
+ SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 1);
+
+ SET_BCH_SB_BIG_ENDIAN(c->disk_sb.sb, CPU_BIG_ENDIAN);
for_each_online_member(ca, c, i)
bch2_sb_from_fs(c, ca);
nr_wrote = dev_mask_nr(&sb_written);
can_mount_with_written =
- bch2_have_enough_devs(__bch2_replicas_status(c, sb_written),
- BCH_FORCE_IF_DEGRADED);
+ bch2_have_enough_devs(c, sb_written, degraded_flags, false);
for (i = 0; i < ARRAY_SIZE(sb_written.d); i++)
sb_written.d[i] = ~sb_written.d[i];
can_mount_without_written =
- bch2_have_enough_devs(__bch2_replicas_status(c, sb_written),
- BCH_FORCE_IF_DEGRADED);
+ bch2_have_enough_devs(c, sb_written, degraded_flags, false);
/*
* If we would be able to mount _without_ the devices we successfully
* mount with the devices we did successfully write to:
*/
if (bch2_fs_fatal_err_on(!nr_wrote ||
+ !can_mount_with_written ||
(can_mount_without_written &&
!can_mount_with_written), c,
"Unable to write superblock to sufficient devices"))
/* BCH_SB_FIELD_clean: */
-void bch2_sb_clean_renumber(struct bch_sb_field_clean *clean, int write)
+int bch2_sb_clean_validate(struct bch_fs *c, struct bch_sb_field_clean *clean, int write)
{
struct jset_entry *entry;
+ int ret;
for (entry = clean->start;
entry < (struct jset_entry *) vstruct_end(&clean->field);
- entry = vstruct_next(entry))
- bch2_bkey_renumber(BKEY_TYPE_BTREE, bkey_to_packed(entry->start), write);
+ entry = vstruct_next(entry)) {
+ ret = bch2_journal_entry_validate(c, "superblock", entry,
+ le16_to_cpu(c->disk_sb.sb->version),
+ BCH_SB_BIG_ENDIAN(c->disk_sb.sb),
+ write);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
int bch2_fs_mark_dirty(struct bch_fs *c)
mutex_lock(&c->sb_lock);
SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
- c->disk_sb.sb->compat[0] &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_METADATA);
- c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_extents_above_btree_updates;
+ c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALWAYS);
ret = bch2_write_super(c);
mutex_unlock(&c->sb_lock);
return ret;
}
-static void
-entry_init_u64s(struct jset_entry *entry, unsigned u64s)
+static struct jset_entry *jset_entry_init(struct jset_entry **end, size_t size)
{
- memset(entry, 0, u64s * sizeof(u64));
+ struct jset_entry *entry = *end;
+ unsigned u64s = DIV_ROUND_UP(size, sizeof(u64));
+ memset(entry, 0, u64s * sizeof(u64));
/*
* The u64s field counts from the start of data, ignoring the shared
* fields.
*/
- entry->u64s = u64s - 1;
-}
+ entry->u64s = cpu_to_le16(u64s - 1);
-static void
-entry_init_size(struct jset_entry *entry, size_t size)
-{
- unsigned u64s = DIV_ROUND_UP(size, sizeof(u64));
- entry_init_u64s(entry, u64s);
+ *end = vstruct_next(*end);
+ return entry;
}
-struct jset_entry *
-bch2_journal_super_entries_add_common(struct bch_fs *c,
- struct jset_entry *entry,
- u64 journal_seq)
+void bch2_journal_super_entries_add_common(struct bch_fs *c,
+ struct jset_entry **end,
+ u64 journal_seq)
{
- struct btree_root *r;
- unsigned i;
-
- mutex_lock(&c->btree_root_lock);
-
- for (r = c->btree_roots;
- r < c->btree_roots + BTREE_ID_NR;
- r++)
- if (r->alive) {
- entry_init_u64s(entry, r->key.u64s + 1);
- entry->btree_id = r - c->btree_roots;
- entry->level = r->level;
- entry->type = BCH_JSET_ENTRY_btree_root;
- bkey_copy(&entry->start[0], &r->key);
-
- entry = vstruct_next(entry);
- }
- c->btree_roots_dirty = false;
-
- mutex_unlock(&c->btree_root_lock);
+ struct bch_dev *ca;
+ unsigned i, dev;
- percpu_down_write(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
if (!journal_seq) {
- bch2_fs_usage_acc_to_base(c, 0);
- bch2_fs_usage_acc_to_base(c, 1);
+ for (i = 0; i < ARRAY_SIZE(c->usage); i++)
+ bch2_fs_usage_acc_to_base(c, i);
} else {
- bch2_fs_usage_acc_to_base(c, journal_seq & 1);
+ bch2_fs_usage_acc_to_base(c, journal_seq & JOURNAL_BUF_MASK);
}
{
struct jset_entry_usage *u =
- container_of(entry, struct jset_entry_usage, entry);
+ container_of(jset_entry_init(end, sizeof(*u)),
+ struct jset_entry_usage, entry);
- entry_init_size(entry, sizeof(*u));
u->entry.type = BCH_JSET_ENTRY_usage;
u->entry.btree_id = FS_USAGE_INODES;
u->v = cpu_to_le64(c->usage_base->nr_inodes);
-
- entry = vstruct_next(entry);
}
{
struct jset_entry_usage *u =
- container_of(entry, struct jset_entry_usage, entry);
+ container_of(jset_entry_init(end, sizeof(*u)),
+ struct jset_entry_usage, entry);
- entry_init_size(entry, sizeof(*u));
u->entry.type = BCH_JSET_ENTRY_usage;
u->entry.btree_id = FS_USAGE_KEY_VERSION;
u->v = cpu_to_le64(atomic64_read(&c->key_version));
-
- entry = vstruct_next(entry);
}
for (i = 0; i < BCH_REPLICAS_MAX; i++) {
struct jset_entry_usage *u =
- container_of(entry, struct jset_entry_usage, entry);
+ container_of(jset_entry_init(end, sizeof(*u)),
+ struct jset_entry_usage, entry);
- entry_init_size(entry, sizeof(*u));
u->entry.type = BCH_JSET_ENTRY_usage;
u->entry.btree_id = FS_USAGE_RESERVED;
u->entry.level = i;
u->v = cpu_to_le64(c->usage_base->persistent_reserved[i]);
-
- entry = vstruct_next(entry);
}
for (i = 0; i < c->replicas.nr; i++) {
struct bch_replicas_entry *e =
cpu_replicas_entry(&c->replicas, i);
struct jset_entry_data_usage *u =
- container_of(entry, struct jset_entry_data_usage, entry);
+ container_of(jset_entry_init(end, sizeof(*u) + e->nr_devs),
+ struct jset_entry_data_usage, entry);
- entry_init_size(entry, sizeof(*u) + e->nr_devs);
u->entry.type = BCH_JSET_ENTRY_data_usage;
u->v = cpu_to_le64(c->usage_base->replicas[i]);
memcpy(&u->r, e, replicas_entry_bytes(e));
+ }
- entry = vstruct_next(entry);
+ for_each_member_device(ca, c, dev) {
+ unsigned b = sizeof(struct jset_entry_dev_usage) +
+ sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR;
+ struct jset_entry_dev_usage *u =
+ container_of(jset_entry_init(end, b),
+ struct jset_entry_dev_usage, entry);
+
+ u->entry.type = BCH_JSET_ENTRY_dev_usage;
+ u->dev = cpu_to_le32(dev);
+ u->buckets_ec = cpu_to_le64(ca->usage_base->buckets_ec);
+ u->buckets_unavailable = cpu_to_le64(ca->usage_base->buckets_unavailable);
+
+ for (i = 0; i < BCH_DATA_NR; i++) {
+ u->d[i].buckets = cpu_to_le64(ca->usage_base->d[i].buckets);
+ u->d[i].sectors = cpu_to_le64(ca->usage_base->d[i].sectors);
+ u->d[i].fragmented = cpu_to_le64(ca->usage_base->d[i].fragmented);
+ }
}
- percpu_up_write(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
- return entry;
+ for (i = 0; i < 2; i++) {
+ struct jset_entry_clock *clock =
+ container_of(jset_entry_init(end, sizeof(*clock)),
+ struct jset_entry_clock, entry);
+
+ clock->entry.type = BCH_JSET_ENTRY_clock;
+ clock->rw = i;
+ clock->time = cpu_to_le64(atomic64_read(&c->io_clock[i].now));
+ }
}
void bch2_fs_mark_clean(struct bch_fs *c)
struct bch_sb_field_clean *sb_clean;
struct jset_entry *entry;
unsigned u64s;
+ int ret;
mutex_lock(&c->sb_lock);
if (BCH_SB_CLEAN(c->disk_sb.sb))
SET_BCH_SB_CLEAN(c->disk_sb.sb, true);
- c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_FEAT_ALLOC_INFO;
- c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_FEAT_ALLOC_METADATA;
- c->disk_sb.sb->features[0] &= ~(1ULL << BCH_FEATURE_extents_above_btree_updates);
+ c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
+ c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_metadata);
+ c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_extents_above_btree_updates));
+ c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_btree_updates_journalled));
u64s = sizeof(*sb_clean) / sizeof(u64) + c->journal.entry_u64s_reserved;
}
sb_clean->flags = 0;
- sb_clean->read_clock = cpu_to_le16(c->bucket_clock[READ].hand);
- sb_clean->write_clock = cpu_to_le16(c->bucket_clock[WRITE].hand);
sb_clean->journal_seq = cpu_to_le64(journal_cur_seq(&c->journal) - 1);
/* Trying to catch outstanding bug: */
BUG_ON(le64_to_cpu(sb_clean->journal_seq) > S64_MAX);
entry = sb_clean->start;
- entry = bch2_journal_super_entries_add_common(c, entry, 0);
+ bch2_journal_super_entries_add_common(c, &entry, 0);
+ entry = bch2_btree_roots_to_journal_entries(c, entry, entry);
BUG_ON((void *) entry > vstruct_end(&sb_clean->field));
memset(entry, 0,
vstruct_end(&sb_clean->field) - (void *) entry);
- if (le16_to_cpu(c->disk_sb.sb->version) <
- bcachefs_metadata_version_bkey_renumber)
- bch2_sb_clean_renumber(sb_clean, WRITE);
+ /*
+ * this should be in the write path, and we should be validating every
+ * superblock section:
+ */
+ ret = bch2_sb_clean_validate(c, sb_clean, WRITE);
+ if (ret) {
+ bch_err(c, "error writing marking filesystem clean: validate error");
+ goto out;
+ }
bch2_write_super(c);
out: