#include "recovery.h"
#include "replicas.h"
#include "sb-clean.h"
+#include "sb-errors.h"
#include "sb-members.h"
#include "snapshot.h"
#include "subvolume.h"
MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
MODULE_DESCRIPTION("bcachefs filesystem");
+const char * const bch2_fs_flag_strs[] = {
+#define x(n) #n,
+ BCH_FS_FLAGS()
+#undef x
+ NULL
+};
+
+void __bch2_print(struct bch_fs *c, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ if (likely(!c->output)) {
+ vprintk(fmt, args);
+ } else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->output->lock, flags);
+ prt_vprintf(&c->output->buf, fmt, args);
+ spin_unlock_irqrestore(&c->output->lock, flags);
+
+ wake_up(&c->output->wait);
+ }
+ va_end(args);
+}
+
#define KTYPE(type) \
static const struct attribute_group type ## _group = { \
.attrs = type ## _files \
journal_cur_seq(&c->journal));
if (test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) &&
- !test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
- set_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags);
+ !test_bit(BCH_FS_emergency_ro, &c->flags))
+ set_bit(BCH_FS_clean_shutdown, &c->flags);
bch2_fs_journal_stop(&c->journal);
/*
{
struct bch_fs *c = container_of(writes, struct bch_fs, writes);
- set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
+ set_bit(BCH_FS_write_disable_complete, &c->flags);
wake_up(&bch2_read_only_wait);
}
#endif
void bch2_fs_read_only(struct bch_fs *c)
{
- if (!test_bit(BCH_FS_RW, &c->flags)) {
+ if (!test_bit(BCH_FS_rw, &c->flags)) {
bch2_journal_reclaim_stop(&c->journal);
return;
}
- BUG_ON(test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
+ BUG_ON(test_bit(BCH_FS_write_disable_complete, &c->flags));
+
+ bch_verbose(c, "going read-only");
/*
* Block new foreground-end write operations from starting - any new
* writes will return -EROFS:
*/
- set_bit(BCH_FS_GOING_RO, &c->flags);
+ set_bit(BCH_FS_going_ro, &c->flags);
#ifndef BCH_WRITE_REF_DEBUG
percpu_ref_kill(&c->writes);
#else
* that going RO is complete:
*/
wait_event(bch2_read_only_wait,
- test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags) ||
- test_bit(BCH_FS_EMERGENCY_RO, &c->flags));
+ test_bit(BCH_FS_write_disable_complete, &c->flags) ||
+ test_bit(BCH_FS_emergency_ro, &c->flags));
+
+ bool writes_disabled = test_bit(BCH_FS_write_disable_complete, &c->flags);
+ if (writes_disabled)
+ bch_verbose(c, "finished waiting for writes to stop");
__bch2_fs_read_only(c);
wait_event(bch2_read_only_wait,
- test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
+ test_bit(BCH_FS_write_disable_complete, &c->flags));
- clear_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
- clear_bit(BCH_FS_GOING_RO, &c->flags);
+ if (!writes_disabled)
+ bch_verbose(c, "finished waiting for writes to stop");
+
+ clear_bit(BCH_FS_write_disable_complete, &c->flags);
+ clear_bit(BCH_FS_going_ro, &c->flags);
+ clear_bit(BCH_FS_rw, &c->flags);
if (!bch2_journal_error(&c->journal) &&
- !test_bit(BCH_FS_ERROR, &c->flags) &&
- !test_bit(BCH_FS_EMERGENCY_RO, &c->flags) &&
- test_bit(BCH_FS_STARTED, &c->flags) &&
- test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags) &&
+ !test_bit(BCH_FS_error, &c->flags) &&
+ !test_bit(BCH_FS_emergency_ro, &c->flags) &&
+ test_bit(BCH_FS_started, &c->flags) &&
+ test_bit(BCH_FS_clean_shutdown, &c->flags) &&
!c->opts.norecovery) {
BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
BUG_ON(atomic_read(&c->btree_cache.dirty));
bch_verbose(c, "marking filesystem clean");
bch2_fs_mark_clean(c);
+ } else {
+ bch_verbose(c, "done going read-only, filesystem not clean");
}
-
- clear_bit(BCH_FS_RW, &c->flags);
}
static void bch2_fs_read_only_work(struct work_struct *work)
bool bch2_fs_emergency_read_only(struct bch_fs *c)
{
- bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags);
+ bool ret = !test_and_set_bit(BCH_FS_emergency_ro, &c->flags);
bch2_journal_halt(&c->journal);
bch2_fs_read_only_async(c);
unsigned i;
int ret;
- if (test_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags)) {
+ if (test_bit(BCH_FS_initial_gc_unfixed, &c->flags)) {
bch_err(c, "cannot go rw, unfixed btree errors");
return -BCH_ERR_erofs_unfixed_errors;
}
- if (test_bit(BCH_FS_RW, &c->flags))
+ if (test_bit(BCH_FS_rw, &c->flags))
return 0;
if (c->opts.norecovery)
bch_info(c, "going read-write");
- ret = bch2_members_v2_init(c);
+ ret = bch2_sb_members_v2_init(c);
if (ret)
goto err;
if (ret)
goto err;
- clear_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags);
+ clear_bit(BCH_FS_clean_shutdown, &c->flags);
/*
* First journal write must be a flush write: after a clean shutdown we
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
+ set_bit(BCH_FS_rw, &c->flags);
+ set_bit(BCH_FS_was_rw, &c->flags);
+
+#ifndef BCH_WRITE_REF_DEBUG
+ percpu_ref_reinit(&c->writes);
+#else
+ for (i = 0; i < BCH_WRITE_REF_NR; i++) {
+ BUG_ON(atomic_long_read(&c->writes[i]));
+ atomic_long_inc(&c->writes[i]);
+ }
+#endif
+
ret = bch2_gc_thread_start(c);
if (ret) {
bch_err(c, "error starting gc thread");
goto err;
}
-#ifndef BCH_WRITE_REF_DEBUG
- percpu_ref_reinit(&c->writes);
-#else
- for (i = 0; i < BCH_WRITE_REF_NR; i++) {
- BUG_ON(atomic_long_read(&c->writes[i]));
- atomic_long_inc(&c->writes[i]);
- }
-#endif
- set_bit(BCH_FS_RW, &c->flags);
- set_bit(BCH_FS_WAS_RW, &c->flags);
-
bch2_do_discards(c);
bch2_do_invalidates(c);
bch2_do_stripe_deletes(c);
bch2_do_pending_node_rewrites(c);
return 0;
err:
- __bch2_fs_read_only(c);
+ if (test_bit(BCH_FS_rw, &c->flags))
+ bch2_fs_read_only(c);
+ else
+ __bch2_fs_read_only(c);
return ret;
}
bch2_time_stats_exit(&c->times[i]);
bch2_free_pending_node_rewrites(c);
+ bch2_fs_sb_errors_exit(c);
bch2_fs_counters_exit(c);
bch2_fs_snapshots_exit(c);
bch2_fs_quota_exit(c);
bch2_io_clock_exit(&c->io_clock[WRITE]);
bch2_io_clock_exit(&c->io_clock[READ]);
bch2_fs_compress_exit(c);
- bch2_journal_keys_free(&c->journal_keys);
- bch2_journal_entries_free(c);
+ bch2_journal_keys_put_initial(c);
+ BUG_ON(atomic_read(&c->journal_keys.ref));
bch2_fs_btree_write_buffer_exit(c);
percpu_free_rwsem(&c->mark_lock);
free_percpu(c->online_reserved);
bch_verbose(c, "shutting down");
- set_bit(BCH_FS_STOPPING, &c->flags);
+ set_bit(BCH_FS_stopping, &c->flags);
cancel_work_sync(&c->journal_seq_blacklist_gc_work);
ret = kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ?:
kobject_add(&c->internal, &c->kobj, "internal") ?:
kobject_add(&c->opts_dir, &c->kobj, "options") ?:
+#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
kobject_add(&c->time_stats, &c->kobj, "time_stats") ?:
+#endif
kobject_add(&c->counters_kobj, &c->kobj, "counters") ?:
bch2_opts_create_sysfs_files(&c->opts_dir);
if (ret) {
goto out;
}
+ c->output = (void *)(unsigned long) opts.log_output;
+
__module_get(THIS_MODULE);
closure_init(&c->cl, NULL);
init_rwsem(&c->gc_lock);
mutex_init(&c->gc_gens_lock);
+ atomic_set(&c->journal_keys.ref, 1);
+ c->journal_keys.initial_ref_held = true;
for (i = 0; i < BCH_TIME_STAT_NR; i++)
bch2_time_stats_init(&c->times[i]);
bch2_fs_quota_init(c);
bch2_fs_ec_init_early(c);
bch2_fs_move_init(c);
+ bch2_fs_sb_errors_init_early(c);
INIT_LIST_HEAD(&c->list);
INIT_LIST_HEAD(&c->journal_iters);
- INIT_LIST_HEAD(&c->fsck_errors);
- mutex_init(&c->fsck_error_lock);
+ INIT_LIST_HEAD(&c->fsck_error_msgs);
+ mutex_init(&c->fsck_error_msgs_lock);
seqcount_init(&c->gc_pos_lock);
c->journal.flush_write_time = &c->times[BCH_TIME_journal_flush_write];
c->journal.noflush_write_time = &c->times[BCH_TIME_journal_noflush_write];
- c->journal.blocked_time = &c->times[BCH_TIME_blocked_journal];
c->journal.flush_seq_time = &c->times[BCH_TIME_journal_flush_seq];
bch2_fs_btree_cache_init_early(&c->btree_cache);
}
ret = bch2_fs_counters_init(c) ?:
+ bch2_fs_sb_errors_init(c) ?:
bch2_io_clock_init(&c->io_clock[READ]) ?:
bch2_io_clock_init(&c->io_clock[WRITE]) ?:
bch2_fs_journal_init(&c->journal) ?:
down_write(&c->state_lock);
- BUG_ON(test_bit(BCH_FS_STARTED, &c->flags));
+ BUG_ON(test_bit(BCH_FS_started, &c->flags));
mutex_lock(&c->sb_lock);
- ret = bch2_members_v2_init(c);
+ ret = bch2_sb_members_v2_init(c);
if (ret) {
mutex_unlock(&c->sb_lock);
goto err;
goto err;
}
- set_bit(BCH_FS_STARTED, &c->flags);
+ set_bit(BCH_FS_started, &c->flags);
if (c->opts.read_only || c->opts.nochanges) {
bch2_fs_read_only(c);
} else {
- ret = !test_bit(BCH_FS_RW, &c->flags)
+ ret = !test_bit(BCH_FS_rw, &c->flags)
? bch2_fs_read_write(c)
: bch2_fs_read_write_late(c);
if (ret)
struct bch_member *member)
{
struct bch_dev *ca;
+ unsigned i;
ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
bch2_time_stats_init(&ca->io_latency[WRITE]);
ca->mi = bch2_mi_to_cpu(member);
+
+ for (i = 0; i < ARRAY_SIZE(member->errors); i++)
+ atomic64_set(&ca->errors[i], le64_to_cpu(member->errors[i]));
+
ca->uuid = member->uuid;
ca->nr_btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
goto err_unlock;
}
- mi = bch2_sb_field_get(ca->disk_sb.sb, members_v2);
-
- if (!bch2_sb_field_resize(&ca->disk_sb, members_v2,
- le32_to_cpu(mi->field.u64s) +
- sizeof(dev_mi) / sizeof(u64))) {
- ret = -BCH_ERR_ENOSPC_sb_members;
- bch_err_msg(c, ret, "setting up new superblock");
- goto err_unlock;
- }
-
if (dynamic_fault("bcachefs:add:no_slot"))
goto no_slot;
have_slot:
nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices);
+
+ mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
u64s = DIV_ROUND_UP(sizeof(struct bch_sb_field_members_v2) +
le16_to_cpu(mi->member_bytes) * nr_devices, sizeof(u64));
struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
struct bch_opts opts)
{
- struct bch_sb_handle *sb = NULL;
+ DARRAY(struct bch_sb_handle) sbs = { 0 };
struct bch_fs *c = NULL;
- unsigned i, best_sb = 0;
+ struct bch_sb_handle *sb, *best = NULL;
struct printbuf errbuf = PRINTBUF;
int ret = 0;
goto err;
}
- sb = kcalloc(nr_devices, sizeof(*sb), GFP_KERNEL);
- if (!sb) {
- ret = -ENOMEM;
+ ret = darray_make_room(&sbs, nr_devices);
+ if (ret)
goto err;
- }
- for (i = 0; i < nr_devices; i++) {
- ret = bch2_read_super(devices[i], &opts, &sb[i]);
+ for (unsigned i = 0; i < nr_devices; i++) {
+ struct bch_sb_handle sb = { NULL };
+
+ ret = bch2_read_super(devices[i], &opts, &sb);
if (ret)
goto err;
+ BUG_ON(darray_push(&sbs, sb));
}
- for (i = 1; i < nr_devices; i++)
- if (le64_to_cpu(sb[i].sb->seq) >
- le64_to_cpu(sb[best_sb].sb->seq))
- best_sb = i;
-
- i = 0;
- while (i < nr_devices) {
- if (i != best_sb &&
- !bch2_dev_exists(sb[best_sb].sb, sb[i].sb->dev_idx)) {
- pr_info("%pg has been removed, skipping", sb[i].bdev);
- bch2_free_super(&sb[i]);
- array_remove_item(sb, nr_devices, i);
+ darray_for_each(sbs, sb)
+ if (!best || le64_to_cpu(sb->sb->seq) > le64_to_cpu(best->sb->seq))
+ best = sb;
+
+ darray_for_each_reverse(sbs, sb) {
+ if (sb != best && !bch2_dev_exists(best->sb, sb->sb->dev_idx)) {
+ pr_info("%pg has been removed, skipping", sb->bdev);
+ bch2_free_super(sb);
+ darray_remove_item(&sbs, sb);
+ best -= best > sb;
continue;
}
- ret = bch2_dev_in_fs(sb[best_sb].sb, sb[i].sb);
+ ret = bch2_dev_in_fs(best->sb, sb->sb);
if (ret)
goto err_print;
- i++;
}
- c = bch2_fs_alloc(sb[best_sb].sb, opts);
- if (IS_ERR(c)) {
- ret = PTR_ERR(c);
+ c = bch2_fs_alloc(best->sb, opts);
+ ret = PTR_ERR_OR_ZERO(c);
+ if (ret)
goto err;
- }
down_write(&c->state_lock);
- for (i = 0; i < nr_devices; i++) {
- ret = bch2_dev_attach_bdev(c, &sb[i]);
+ darray_for_each(sbs, sb) {
+ ret = bch2_dev_attach_bdev(c, sb);
if (ret) {
up_write(&c->state_lock);
goto err;
goto err;
}
out:
- kfree(sb);
+ darray_for_each(sbs, sb)
+ bch2_free_super(sb);
+ darray_exit(&sbs);
printbuf_exit(&errbuf);
module_put(THIS_MODULE);
return c;
err:
if (!IS_ERR_OR_NULL(c))
bch2_fs_stop(c);
- if (sb)
- for (i = 0; i < nr_devices; i++)
- bch2_free_super(&sb[i]);
c = ERR_PTR(ret);
goto out;
}