#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/idr.h>
-#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/random.h>
return c;
}
+static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i, nr = 0, u64s =
+ ((sizeof(struct jset_entry_dev_usage) +
+ sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR)) /
+ sizeof(u64);
+
+ rcu_read_lock();
+ for_each_member_device_rcu(ca, c, i, NULL)
+ nr++;
+ rcu_read_unlock();
+
+ bch2_journal_entry_res_resize(&c->journal,
+ &c->dev_usage_journal_res, u64s * nr);
+}
+
/* Filesystem RO/RW: */
/*
bch2_copygc_stop(c);
bch2_gc_thread_stop(c);
- bch2_io_timer_del(&c->io_clock[READ], &c->bucket_clock[READ].rescale);
- bch2_io_timer_del(&c->io_clock[WRITE], &c->bucket_clock[WRITE].rescale);
-
/*
* Flush journal before stopping allocators, because flushing journal
* blacklist entries involves allocating new btree nodes:
* the journal kicks off btree writes via reclaim - wait for in flight
* writes after stopping journal:
*/
- if (test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
- bch2_btree_flush_all_writes(c);
- else
- bch2_btree_verify_flushed(c);
+ bch2_btree_flush_all_writes(c);
/*
* After stopping journal:
void bch2_fs_read_only(struct bch_fs *c)
{
if (!test_bit(BCH_FS_RW, &c->flags)) {
- cancel_delayed_work_sync(&c->journal.reclaim_work);
+ BUG_ON(c->journal.reclaim_thread);
return;
}
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
- bch2_io_timer_add(&c->io_clock[READ], &c->bucket_clock[READ].rescale);
- bch2_io_timer_add(&c->io_clock[WRITE], &c->bucket_clock[WRITE].rescale);
-
for_each_rw_member(ca, c, i) {
ret = bch2_dev_allocator_start(ca);
if (ret) {
set_bit(BCH_FS_ALLOCATOR_RUNNING, &c->flags);
+ ret = bch2_journal_reclaim_start(&c->journal);
+ if (ret) {
+ bch_err(c, "error starting journal reclaim: %i", ret);
+ return ret;
+ }
+
if (!early) {
ret = bch2_fs_read_write_late(c);
if (ret)
percpu_ref_reinit(&c->writes);
set_bit(BCH_FS_RW, &c->flags);
-
- queue_delayed_work(c->journal_reclaim_wq,
- &c->journal.reclaim_work, 0);
return 0;
err:
__bch2_fs_read_only(c);
static void __bch2_fs_free(struct bch_fs *c)
{
unsigned i;
+ int cpu;
for (i = 0; i < BCH_TIME_STAT_NR; i++)
bch2_time_stats_exit(&c->times[i]);
bch2_journal_entries_free(&c->journal_entries);
percpu_free_rwsem(&c->mark_lock);
kfree(c->usage_scratch);
- free_percpu(c->usage[1]);
- free_percpu(c->usage[0]);
+ for (i = 0; i < ARRAY_SIZE(c->usage); i++)
+ free_percpu(c->usage[i]);
kfree(c->usage_base);
+
+ if (c->btree_iters_bufs)
+ for_each_possible_cpu(cpu)
+ kfree(per_cpu_ptr(c->btree_iters_bufs, cpu)->iter);
+
+ free_percpu(c->btree_iters_bufs);
free_percpu(c->pcpu);
mempool_exit(&c->large_bkey_pool);
mempool_exit(&c->btree_bounce_pool);
kfree(c->replicas_gc.entries);
kfree(rcu_dereference_protected(c->disk_groups, 1));
kfree(c->journal_seq_blacklist_table);
+ kfree(c->unused_inode_hints);
free_heap(&c->copygc_heap);
- if (c->journal_reclaim_wq)
- destroy_workqueue(c->journal_reclaim_wq);
if (c->copygc_wq)
destroy_workqueue(c->copygc_wq);
if (c->wq)
bch2_blacklist_entries_gc);
INIT_LIST_HEAD(&c->journal_entries);
+ INIT_LIST_HEAD(&c->journal_iters);
INIT_LIST_HEAD(&c->fsck_errors);
mutex_init(&c->fsck_error_lock);
bch2_fs_btree_cache_init_early(&c->btree_cache);
+ mutex_init(&c->sectors_available_lock);
+
if (percpu_init_rwsem(&c->mark_lock))
goto err;
(btree_blocks(c) + 1) * 2 *
sizeof(struct sort_iter_set);
+ c->inode_shard_bits = ilog2(roundup_pow_of_two(num_possible_cpus()));
+
if (!(c->wq = alloc_workqueue("bcachefs",
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
- !(c->copygc_wq = alloc_workqueue("bcache_copygc",
+ !(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
- !(c->journal_reclaim_wq = alloc_workqueue("bcache_journal",
- WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
percpu_ref_init(&c->writes, bch2_writes_disabled,
PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
offsetof(struct btree_write_bio, wbio.bio)),
BIOSET_NEED_BVECS) ||
!(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
+ !(c->btree_iters_bufs = alloc_percpu(struct btree_iter_buf)) ||
mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
btree_bytes(c)) ||
mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
+ !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
+ sizeof(u64), GFP_KERNEL)) ||
bch2_io_clock_init(&c->io_clock[READ]) ||
bch2_io_clock_init(&c->io_clock[WRITE]) ||
bch2_fs_journal_init(&c->journal) ||
bch2_dev_alloc(c, i))
goto err;
+ bch2_journal_entry_res_resize(&c->journal,
+ &c->btree_root_journal_res,
+ BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_BTREE_PTR_U64s_MAX));
+ bch2_dev_usage_journal_reserve(c);
+ bch2_journal_entry_res_resize(&c->journal,
+ &c->clock_journal_res,
+ (sizeof(struct jset_entry_clock) / sizeof(u64)) * 2);
+
mutex_lock(&bch_fs_list_lock);
err = bch2_fs_online(c);
mutex_unlock(&bch_fs_list_lock);
if (ret)
return ret;
- if (test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags) &&
- !percpu_u64_get(&ca->usage[0]->buckets[BCH_DATA_sb])) {
- mutex_lock(&c->sb_lock);
- bch2_mark_dev_superblock(ca->fs, ca, 0);
- mutex_unlock(&c->sb_lock);
- }
-
bch2_dev_sysfs_online(c, ca);
if (c->sb.nr_devices == 1)
mutex_unlock(&c->sb_lock);
up_write(&c->state_lock);
+
+ bch2_dev_usage_journal_reserve(c);
return 0;
err:
if (ca->mi.state == BCH_MEMBER_STATE_RW &&
return ret;
}
-static void dev_usage_clear(struct bch_dev *ca)
-{
- struct bucket_array *buckets;
-
- percpu_memset(ca->usage[0], 0, sizeof(*ca->usage[0]));
-
- down_read(&ca->bucket_lock);
- buckets = bucket_array(ca);
-
- memset(buckets->b, 0, sizeof(buckets->b[0]) * buckets->nbuckets);
- up_read(&ca->bucket_lock);
-}
-
/* Add new device to running filesystem: */
int bch2_dev_add(struct bch_fs *c, const char *path)
{
* allocate the journal, reset all the marks, then remark after we
* attach...
*/
- bch2_mark_dev_superblock(ca->fs, ca, 0);
+ bch2_mark_dev_superblock(NULL, ca, 0);
err = "journal alloc failed";
ret = bch2_dev_journal_alloc(ca);
if (ret)
goto err;
- dev_usage_clear(ca);
-
down_write(&c->state_lock);
mutex_lock(&c->sb_lock);
ca->disk_sb.sb->dev_idx = dev_idx;
bch2_dev_attach(c, ca, dev_idx);
- bch2_mark_dev_superblock(c, ca, 0);
-
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
- err = "alloc write failed";
- ret = bch2_dev_alloc_write(c, ca, 0);
+ bch2_dev_usage_journal_reserve(c);
+
+ err = "error marking superblock";
+ ret = bch2_trans_mark_dev_sb(c, NULL, ca);
if (ret)
- goto err;
+ goto err_late;
if (ca->mi.state == BCH_MEMBER_STATE_RW) {
err = __bch2_dev_read_write(c, ca);
bch_err(c, "Unable to add device: %s", err);
return ret;
err_late:
+ up_write(&c->state_lock);
bch_err(c, "Error going rw after adding device: %s", err);
return -EINVAL;
}
}
ca = bch_dev_locked(c, dev_idx);
+
+ if (bch2_trans_mark_dev_sb(c, NULL, ca)) {
+ err = "bch2_trans_mark_dev_sb() error";
+ goto err;
+ }
+
if (ca->mi.state == BCH_MEMBER_STATE_RW) {
err = __bch2_dev_read_write(c, ca);
if (err)
bch2_debug_exit();
bch2_vfs_exit();
bch2_chardev_exit();
+ bch2_btree_key_cache_exit();
if (bcachefs_kset)
kset_unregister(bcachefs_kset);
}
static int __init bcachefs_init(void)
{
bch2_bkey_pack_test();
- bch2_inode_pack_test();
if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) ||
+ bch2_btree_key_cache_init() ||
bch2_chardev_init() ||
bch2_vfs_init() ||
bch2_debug_init())