+// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "alloc_background.h"
#include "alloc_foreground.h"
#include "btree_cache.h"
#include "btree_io.h"
+#include "btree_key_cache.h"
#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_gc.h"
#include "buckets.h"
#include "clock.h"
#include "debug.h"
+#include "ec.h"
#include "error.h"
-#include "journal_io.h"
+#include "recovery.h"
+#include "varint.h"
#include <linux/kthread.h>
#include <linux/math64.h>
#include <linux/sort.h>
#include <trace/events/bcachefs.h>
-static void bch2_recalc_oldest_io(struct bch_fs *, struct bch_dev *, int);
+static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
+#define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
+ BCH_ALLOC_FIELDS_V1()
+#undef x
+};
/* Ratelimiting/PD controllers */
struct bch_fs,
pd_controllers_update);
struct bch_dev *ca;
+ s64 free = 0, fragmented = 0;
unsigned i;
for_each_member_device(ca, c, i) {
- struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
+ struct bch_dev_usage stats = bch2_dev_usage_read(ca);
- u64 free = bucket_to_sector(ca,
+ free += bucket_to_sector(ca,
__dev_buckets_free(ca, stats)) << 9;
/*
* Bytes of internal fragmentation, which can be
* reclaimed by copy GC
*/
- s64 fragmented = (bucket_to_sector(ca,
- stats.buckets[BCH_DATA_USER] +
- stats.buckets[BCH_DATA_CACHED]) -
- (stats.sectors[BCH_DATA_USER] +
- stats.sectors[BCH_DATA_CACHED])) << 9;
-
- fragmented = max(0LL, fragmented);
-
- bch2_pd_controller_update(&ca->copygc_pd,
- free, fragmented, -1);
+ fragmented += max_t(s64, 0, (bucket_to_sector(ca,
+ stats.d[BCH_DATA_user].buckets +
+ stats.d[BCH_DATA_cached].buckets) -
+ (stats.d[BCH_DATA_user].sectors +
+ stats.d[BCH_DATA_cached].sectors)) << 9);
}
+ bch2_pd_controller_update(&c->copygc_pd, free, fragmented, -1);
schedule_delayed_work(&c->pd_controllers_update,
c->pd_controllers_update_seconds * HZ);
}
/* Persistent alloc info: */
-static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
+static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
+ const void **p, unsigned field)
{
- unsigned bytes = offsetof(struct bch_alloc, data);
-
- if (a->fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
- bytes += 2;
- if (a->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
- bytes += 2;
-
- return DIV_ROUND_UP(bytes, sizeof(u64));
-}
-
-const char *bch2_alloc_invalid(const struct bch_fs *c, struct bkey_s_c k)
-{
- if (k.k->p.inode >= c->sb.nr_devices ||
- !c->devs[k.k->p.inode])
- return "invalid device";
-
- switch (k.k->type) {
- case BCH_ALLOC: {
- struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
-
- if (bch_alloc_val_u64s(a.v) != bkey_val_u64s(a.k))
- return "incorrect value size";
- break;
- }
- default:
- return "invalid type";
- }
-
- return NULL;
-}
-
-int bch2_alloc_to_text(struct bch_fs *c, char *buf,
- size_t size, struct bkey_s_c k)
-{
- buf[0] = '\0';
-
- switch (k.k->type) {
- case BCH_ALLOC:
- break;
- }
-
- return 0;
-}
+ unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
+ u64 v;
-static inline unsigned get_alloc_field(const u8 **p, unsigned bytes)
-{
- unsigned v;
+ if (!(a->fields & (1 << field)))
+ return 0;
switch (bytes) {
case 1:
- v = **p;
+ v = *((const u8 *) *p);
break;
case 2:
- v = le16_to_cpup((void *) *p);
+ v = le16_to_cpup(*p);
break;
case 4:
- v = le32_to_cpup((void *) *p);
+ v = le32_to_cpup(*p);
+ break;
+ case 8:
+ v = le64_to_cpup(*p);
break;
default:
BUG();
return v;
}
-static inline void put_alloc_field(u8 **p, unsigned bytes, unsigned v)
+static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
+ unsigned field, u64 v)
{
+ unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
+
+ if (!v)
+ return;
+
+ a->v.fields |= 1 << field;
+
switch (bytes) {
case 1:
- **p = v;
+ *((u8 *) *p) = v;
break;
case 2:
*((__le16 *) *p) = cpu_to_le16(v);
case 4:
*((__le32 *) *p) = cpu_to_le32(v);
break;
+ case 8:
+ *((__le64 *) *p) = cpu_to_le64(v);
+ break;
default:
BUG();
}
*p += bytes;
}
-static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k)
+static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
+ struct bkey_s_c k)
{
- struct bch_dev *ca;
- struct bkey_s_c_alloc a;
- struct bucket_mark new;
- struct bucket *g;
- const u8 *d;
+ const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
+ const void *d = in->data;
+ unsigned idx = 0;
- if (k.k->type != BCH_ALLOC)
- return;
+ out->gen = in->gen;
- a = bkey_s_c_to_alloc(k);
- ca = bch_dev_bkey_exists(c, a.k->p.inode);
+#define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
+ BCH_ALLOC_FIELDS_V1()
+#undef x
+}
- if (a.k->p.offset >= ca->mi.nbuckets)
- return;
+static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
+ const u8 *in = a.v->data;
+ const u8 *end = bkey_val_end(a);
+ unsigned fieldnr = 0;
+ int ret;
+ u64 v;
+
+ out->gen = a.v->gen;
+ out->oldest_gen = a.v->oldest_gen;
+ out->data_type = a.v->data_type;
+
+#define x(_name, _bits) \
+ if (fieldnr < a.v->nr_fields) { \
+ ret = bch2_varint_decode(in, end, &v); \
+ if (ret < 0) \
+ return ret; \
+ in += ret; \
+ } else { \
+ v = 0; \
+ } \
+ out->_name = v; \
+ if (v != out->_name) \
+ return -1; \
+ fieldnr++;
+
+ BCH_ALLOC_FIELDS_V2()
+#undef x
+ return 0;
+}
- percpu_down_read_preempt_disable(&c->usage_lock);
+static void bch2_alloc_pack_v2(struct bkey_alloc_buf *dst,
+ const struct bkey_alloc_unpacked src)
+{
+ struct bkey_i_alloc_v2 *a = bkey_alloc_v2_init(&dst->k);
+ unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
+ u8 *out = a->v.data;
+ u8 *end = (void *) &dst[1];
+ u8 *last_nonzero_field = out;
+ unsigned bytes;
+
+ a->k.p = POS(src.dev, src.bucket);
+ a->v.gen = src.gen;
+ a->v.oldest_gen = src.oldest_gen;
+ a->v.data_type = src.data_type;
+
+#define x(_name, _bits) \
+ nr_fields++; \
+ \
+ if (src._name) { \
+ out += bch2_varint_encode(out, src._name); \
+ \
+ last_nonzero_field = out; \
+ last_nonzero_fieldnr = nr_fields; \
+ } else { \
+ *out++ = 0; \
+ }
- g = bucket(ca, a.k->p.offset);
- bucket_cmpxchg(g, new, ({
- new.gen = a.v->gen;
- new.gen_valid = 1;
- }));
+ BCH_ALLOC_FIELDS_V2()
+#undef x
+ BUG_ON(out > end);
- d = a.v->data;
- if (a.v->fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
- g->io_time[READ] = get_alloc_field(&d, 2);
- if (a.v->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
- g->io_time[WRITE] = get_alloc_field(&d, 2);
+ out = last_nonzero_field;
+ a->v.nr_fields = last_nonzero_fieldnr;
- percpu_up_read_preempt_enable(&c->usage_lock);
+ bytes = (u8 *) out - (u8 *) &a->v;
+ set_bkey_val_bytes(&a->k, bytes);
+ memset_u64s_tail(&a->v, 0, bytes);
}
-int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list)
+struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
{
- struct journal_replay *r;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bch_dev *ca;
- unsigned i;
- int ret;
+ struct bkey_alloc_unpacked ret = {
+ .dev = k.k->p.inode,
+ .bucket = k.k->p.offset,
+ .gen = 0,
+ };
- for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS_MIN, 0, k) {
- bch2_alloc_read_key(c, k);
- bch2_btree_iter_cond_resched(&iter);
- }
+ if (k.k->type == KEY_TYPE_alloc_v2)
+ bch2_alloc_unpack_v2(&ret, k);
+ else if (k.k->type == KEY_TYPE_alloc)
+ bch2_alloc_unpack_v1(&ret, k);
- ret = bch2_btree_iter_unlock(&iter);
- if (ret)
- return ret;
-
- list_for_each_entry(r, journal_replay_list, list) {
- struct bkey_i *k, *n;
- struct jset_entry *entry;
+ return ret;
+}
- for_each_jset_key(k, n, entry, &r->j)
- if (entry->btree_id == BTREE_ID_ALLOC)
- bch2_alloc_read_key(c, bkey_i_to_s_c(k));
- }
+void bch2_alloc_pack(struct bch_fs *c,
+ struct bkey_alloc_buf *dst,
+ const struct bkey_alloc_unpacked src)
+{
+ bch2_alloc_pack_v2(dst, src);
+}
- mutex_lock(&c->bucket_clock[READ].lock);
- for_each_member_device(ca, c, i) {
- down_read(&ca->bucket_lock);
- bch2_recalc_oldest_io(c, ca, READ);
- up_read(&ca->bucket_lock);
- }
- mutex_unlock(&c->bucket_clock[READ].lock);
+static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
+{
+ unsigned i, bytes = offsetof(struct bch_alloc, data);
- mutex_lock(&c->bucket_clock[WRITE].lock);
- for_each_member_device(ca, c, i) {
- down_read(&ca->bucket_lock);
- bch2_recalc_oldest_io(c, ca, WRITE);
- up_read(&ca->bucket_lock);
- }
- mutex_unlock(&c->bucket_clock[WRITE].lock);
+ for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
+ if (a->fields & (1 << i))
+ bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
- return 0;
+ return DIV_ROUND_UP(bytes, sizeof(u64));
}
-static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
- size_t b, struct btree_iter *iter,
- u64 *journal_seq, unsigned flags)
+const char *bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
- struct bucket_mark m;
- __BKEY_PADDED(k, DIV_ROUND_UP(sizeof(struct bch_alloc), 8)) alloc_key;
- struct bucket *g;
- struct bkey_i_alloc *a;
- u8 *d;
+ struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
- percpu_down_read_preempt_disable(&c->usage_lock);
- g = bucket(ca, b);
+ if (k.k->p.inode >= c->sb.nr_devices ||
+ !c->devs[k.k->p.inode])
+ return "invalid device";
- m = READ_ONCE(g->mark);
- a = bkey_alloc_init(&alloc_key.k);
- a->k.p = POS(ca->dev_idx, b);
- a->v.fields = 0;
- a->v.gen = m.gen;
- set_bkey_val_u64s(&a->k, bch_alloc_val_u64s(&a->v));
-
- d = a->v.data;
- if (a->v.fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
- put_alloc_field(&d, 2, g->io_time[READ]);
- if (a->v.fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
- put_alloc_field(&d, 2, g->io_time[WRITE]);
- percpu_up_read_preempt_enable(&c->usage_lock);
-
- bch2_btree_iter_cond_resched(iter);
-
- bch2_btree_iter_set_pos(iter, a->k.p);
-
- return bch2_btree_insert_at(c, NULL, journal_seq,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_USE_RESERVE|
- BTREE_INSERT_USE_ALLOC_RESERVE|
- flags,
- BTREE_INSERT_ENTRY(iter, &a->k_i));
+ /* allow for unknown fields */
+ if (bkey_val_u64s(a.k) < bch_alloc_val_u64s(a.v))
+ return "incorrect value size";
+
+ return NULL;
}
-int bch2_alloc_replay_key(struct bch_fs *c, struct bpos pos)
+const char *bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
- struct bch_dev *ca;
- struct btree_iter iter;
- int ret;
+ struct bkey_alloc_unpacked u;
- if (pos.inode >= c->sb.nr_devices || !c->devs[pos.inode])
- return 0;
+ if (k.k->p.inode >= c->sb.nr_devices ||
+ !c->devs[k.k->p.inode])
+ return "invalid device";
- ca = bch_dev_bkey_exists(c, pos.inode);
+ if (bch2_alloc_unpack_v2(&u, k))
+ return "unpack error";
- if (pos.offset >= ca->mi.nbuckets)
- return 0;
+ return NULL;
+}
- bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
- ret = __bch2_alloc_write_key(c, ca, pos.offset, &iter, NULL, 0);
- bch2_btree_iter_unlock(&iter);
- return ret;
+ pr_buf(out, "gen %u oldest_gen %u data_type %u",
+ u.gen, u.oldest_gen, u.data_type);
+#define x(_name, ...) pr_buf(out, #_name " %llu ", (u64) u._name);
+ BCH_ALLOC_FIELDS_V2()
+#undef x
}
-int bch2_alloc_write(struct bch_fs *c)
+static int bch2_alloc_read_fn(struct bch_fs *c, enum btree_id id,
+ unsigned level, struct bkey_s_c k)
{
struct bch_dev *ca;
- unsigned i;
- int ret = 0;
+ struct bucket *g;
+ struct bkey_alloc_unpacked u;
- for_each_rw_member(ca, c, i) {
- struct btree_iter iter;
- unsigned long bucket;
+ if (level ||
+ (k.k->type != KEY_TYPE_alloc &&
+ k.k->type != KEY_TYPE_alloc_v2))
+ return 0;
- bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ ca = bch_dev_bkey_exists(c, k.k->p.inode);
+ g = bucket(ca, k.k->p.offset);
+ u = bch2_alloc_unpack(k);
- down_read(&ca->bucket_lock);
- for_each_set_bit(bucket, ca->buckets_dirty, ca->mi.nbuckets) {
- ret = __bch2_alloc_write_key(c, ca, bucket,
- &iter, NULL, 0);
- if (ret)
- break;
+ g->_mark.gen = u.gen;
+ g->_mark.data_type = u.data_type;
+ g->_mark.dirty_sectors = u.dirty_sectors;
+ g->_mark.cached_sectors = u.cached_sectors;
+ g->io_time[READ] = u.read_time;
+ g->io_time[WRITE] = u.write_time;
+ g->oldest_gen = u.oldest_gen;
+ g->gen_valid = 1;
- clear_bit(bucket, ca->buckets_dirty);
- }
- up_read(&ca->bucket_lock);
- bch2_btree_iter_unlock(&iter);
+ return 0;
+}
- if (ret) {
- percpu_ref_put(&ca->io_ref);
- break;
- }
+int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
+{
+ int ret;
+
+ down_read(&c->gc_lock);
+ ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_alloc,
+ NULL, bch2_alloc_read_fn);
+ up_read(&c->gc_lock);
+
+ if (ret) {
+ bch_err(c, "error reading alloc info: %i", ret);
+ return ret;
}
- return ret;
+ return 0;
}
-/* Bucket IO clocks: */
-
-static void bch2_recalc_oldest_io(struct bch_fs *c, struct bch_dev *ca, int rw)
+static int bch2_alloc_write_key(struct btree_trans *trans,
+ struct btree_iter *iter,
+ unsigned flags)
{
- struct bucket_clock *clock = &c->bucket_clock[rw];
- struct bucket_array *buckets = bucket_array(ca);
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c k;
+ struct bch_dev *ca;
struct bucket *g;
- u16 max_last_io = 0;
- unsigned i;
+ struct bucket_mark m;
+ struct bkey_alloc_unpacked old_u, new_u;
+ struct bkey_alloc_buf a;
+ int ret;
+retry:
+ bch2_trans_begin(trans);
- lockdep_assert_held(&c->bucket_clock[rw].lock);
+ ret = bch2_btree_key_cache_flush(trans,
+ BTREE_ID_alloc, iter->pos);
+ if (ret)
+ goto err;
- /* Recalculate max_last_io for this device: */
- for_each_bucket(g, buckets)
- max_last_io = max(max_last_io, bucket_last_io(c, g, rw));
+ k = bch2_btree_iter_peek_slot(iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
- ca->max_last_bucket_io[rw] = max_last_io;
+ old_u = bch2_alloc_unpack(k);
- /* Recalculate global max_last_io: */
- max_last_io = 0;
+ percpu_down_read(&c->mark_lock);
+ ca = bch_dev_bkey_exists(c, iter->pos.inode);
+ g = bucket(ca, iter->pos.offset);
+ m = READ_ONCE(g->mark);
+ new_u = alloc_mem_to_key(iter, g, m);
+ percpu_up_read(&c->mark_lock);
- for_each_member_device(ca, c, i)
- max_last_io = max(max_last_io, ca->max_last_bucket_io[rw]);
+ if (!bkey_alloc_unpacked_cmp(old_u, new_u))
+ return 0;
- clock->max_last_io = max_last_io;
+ bch2_alloc_pack(c, &a, new_u);
+ bch2_trans_update(trans, iter, &a.k,
+ BTREE_TRIGGER_NORUN);
+ ret = bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|flags);
+err:
+ if (ret == -EINTR)
+ goto retry;
+ return ret;
}
-static void bch2_rescale_bucket_io_times(struct bch_fs *c, int rw)
+int bch2_alloc_write(struct bch_fs *c, unsigned flags)
{
- struct bucket_clock *clock = &c->bucket_clock[rw];
- struct bucket_array *buckets;
+ struct btree_trans trans;
+ struct btree_iter *iter;
struct bch_dev *ca;
- struct bucket *g;
unsigned i;
+ int ret = 0;
- trace_rescale_prios(c);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_alloc, POS_MIN,
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
for_each_member_device(ca, c, i) {
- down_read(&ca->bucket_lock);
- buckets = bucket_array(ca);
-
- for_each_bucket(g, buckets)
- g->io_time[rw] = clock->hand -
- bucket_last_io(c, g, rw) / 2;
+ bch2_btree_iter_set_pos(iter,
+ POS(ca->dev_idx, ca->mi.first_bucket));
- bch2_recalc_oldest_io(c, ca, rw);
+ while (iter->pos.offset < ca->mi.nbuckets) {
+ bch2_trans_cond_resched(&trans);
- up_read(&ca->bucket_lock);
+ ret = bch2_alloc_write_key(&trans, iter, flags);
+ if (ret) {
+ percpu_ref_put(&ca->io_ref);
+ goto err;
+ }
+ bch2_btree_iter_next_slot(iter);
+ }
}
+err:
+ bch2_trans_iter_put(&trans, iter);
+ bch2_trans_exit(&trans);
+ return ret;
}
-static void bch2_inc_clock_hand(struct io_timer *timer)
-{
- struct bucket_clock *clock = container_of(timer,
- struct bucket_clock, rescale);
- struct bch_fs *c = container_of(clock,
- struct bch_fs, bucket_clock[clock->rw]);
- struct bch_dev *ca;
- u64 capacity;
- unsigned i;
-
- mutex_lock(&clock->lock);
-
- /* if clock cannot be advanced more, rescale prio */
- if (clock->max_last_io >= U16_MAX - 2)
- bch2_rescale_bucket_io_times(c, clock->rw);
-
- BUG_ON(clock->max_last_io >= U16_MAX - 2);
-
- for_each_member_device(ca, c, i)
- ca->max_last_bucket_io[clock->rw]++;
- clock->max_last_io++;
- clock->hand++;
+/* Bucket IO clocks: */
- mutex_unlock(&clock->lock);
+int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
+ size_t bucket_nr, int rw)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, dev);
+ struct btree_iter *iter;
+ struct bucket *g;
+ struct bkey_alloc_buf *a;
+ struct bkey_alloc_unpacked u;
+ u64 *time, now;
+ int ret = 0;
- capacity = READ_ONCE(c->capacity);
+ iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, POS(dev, bucket_nr),
+ BTREE_ITER_CACHED|
+ BTREE_ITER_CACHED_NOFILL|
+ BTREE_ITER_INTENT);
+ ret = bch2_btree_iter_traverse(iter);
+ if (ret)
+ goto out;
- if (!capacity)
- return;
+ a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
+ ret = PTR_ERR_OR_ZERO(a);
+ if (ret)
+ goto out;
- /*
- * we only increment when 0.1% of the filesystem capacity has been read
- * or written too, this determines if it's time
- *
- * XXX: we shouldn't really be going off of the capacity of devices in
- * RW mode (that will be 0 when we're RO, yet we can still service
- * reads)
- */
- timer->expire += capacity >> 10;
+ percpu_down_read(&c->mark_lock);
+ g = bucket(ca, bucket_nr);
+ u = alloc_mem_to_key(iter, g, READ_ONCE(g->mark));
+ percpu_up_read(&c->mark_lock);
- bch2_io_timer_add(&c->io_clock[clock->rw], timer);
-}
+ time = rw == READ ? &u.read_time : &u.write_time;
+ now = atomic64_read(&c->io_clock[rw].now);
+ if (*time == now)
+ goto out;
-static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
-{
- struct bucket_clock *clock = &c->bucket_clock[rw];
+ *time = now;
- clock->hand = 1;
- clock->rw = rw;
- clock->rescale.fn = bch2_inc_clock_hand;
- clock->rescale.expire = c->capacity >> 10;
- mutex_init(&clock->lock);
+ bch2_alloc_pack(c, a, u);
+ ret = bch2_trans_update(trans, iter, &a->k, 0) ?:
+ bch2_trans_commit(trans, NULL, NULL, 0);
+out:
+ bch2_trans_iter_put(trans, iter);
+ return ret;
}
/* Background allocator thread: */
* commands to the newly free buckets, then puts them on the various freelists.
*/
-#define BUCKET_GC_GEN_MAX 96U
-
/**
* wait_buckets_available - wait on reclaimable buckets
*
static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
{
unsigned long gc_count = c->gc_count;
+ s64 available;
+ unsigned i;
int ret = 0;
+ ca->allocator_state = ALLOCATOR_BLOCKED;
+ closure_wake_up(&c->freelist_wait);
+
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop()) {
if (gc_count != c->gc_count)
ca->inc_gen_really_needs_gc = 0;
- if ((ssize_t) (dev_buckets_available(c, ca) -
- ca->inc_gen_really_needs_gc) >=
- (ssize_t) fifo_free(&ca->free_inc))
+ available = dev_buckets_available(ca);
+ available -= ca->inc_gen_really_needs_gc;
+
+ spin_lock(&c->freelist_lock);
+ for (i = 0; i < RESERVE_NR; i++)
+ available -= fifo_used(&ca->free[i]);
+ spin_unlock(&c->freelist_lock);
+
+ available = max(available, 0LL);
+
+ if (available > fifo_free(&ca->free_inc) ||
+ (available &&
+ !fifo_full(&ca->free[RESERVE_MOVINGGC])))
break;
up_read(&c->gc_lock);
}
__set_current_state(TASK_RUNNING);
+ ca->allocator_state = ALLOCATOR_RUNNING;
+ closure_wake_up(&c->freelist_wait);
+
return ret;
}
-static bool bch2_can_invalidate_bucket(struct bch_dev *ca,
- size_t bucket,
- struct bucket_mark mark)
+static bool bch2_can_invalidate_bucket(struct bch_dev *ca, size_t b,
+ struct bucket_mark m)
{
u8 gc_gen;
- if (!is_available_bucket(mark))
+ if (!is_available_bucket(m))
+ return false;
+
+ if (m.owned_by_allocator)
+ return false;
+
+ if (ca->buckets_nouse &&
+ test_bit(b, ca->buckets_nouse))
return false;
- gc_gen = bucket_gc_gen(ca, bucket);
+ gc_gen = bucket_gc_gen(bucket(ca, b));
if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
ca->inc_gen_needs_gc++;
/*
* Determines what order we're going to reuse buckets, smallest bucket_key()
* first.
- *
- *
- * - We take into account the read prio of the bucket, which gives us an
- * indication of how hot the data is -- we scale the prio so that the prio
- * farthest from the clock is worth 1/8th of the closest.
- *
- * - The number of sectors of cached data in the bucket, which gives us an
- * indication of the cost in cache misses this eviction will cause.
- *
- * - If hotness * sectors used compares equal, we pick the bucket with the
- * smallest bucket_gc_gen() - since incrementing the same bucket's generation
- * number repeatedly forces us to run mark and sweep gc to avoid generation
- * number wraparound.
*/
-static unsigned long bucket_sort_key(struct bch_fs *c, struct bch_dev *ca,
- size_t b, struct bucket_mark m)
+static unsigned bucket_sort_key(struct bucket *g, struct bucket_mark m,
+ u64 now, u64 last_seq_ondisk)
{
- unsigned last_io = bucket_last_io(c, bucket(ca, b), READ);
- unsigned max_last_io = ca->max_last_bucket_io[READ];
+ unsigned used = bucket_sectors_used(m);
- /*
- * Time since last read, scaled to [0, 8) where larger value indicates
- * more recently read data:
- */
- unsigned long hotness = (max_last_io - last_io) * 7 / max_last_io;
-
- /* How much we want to keep the data in this bucket: */
- unsigned long data_wantness =
- (hotness + 1) * bucket_sectors_used(m);
-
- unsigned long needs_journal_commit =
- bucket_needs_journal_commit(m, c->journal.last_seq_ondisk);
+ if (used) {
+ /*
+ * Prefer to keep buckets that have been read more recently, and
+ * buckets that have more data in them:
+ */
+ u64 last_read = max_t(s64, 0, now - g->io_time[READ]);
+ u32 last_read_scaled = max_t(u64, U32_MAX, div_u64(last_read, used));
- return (data_wantness << 9) |
- (needs_journal_commit << 8) |
- (bucket_gc_gen(ca, b) / 16);
+ return -last_read_scaled;
+ } else {
+ /*
+ * Prefer to use buckets with smaller gc_gen so that we don't
+ * have to walk the btree and recalculate oldest_gen - but shift
+ * off the low bits so that buckets will still have equal sort
+ * keys when there's only a small difference, so that we can
+ * keep sequential buckets together:
+ */
+ return (bucket_needs_journal_commit(m, last_seq_ondisk) << 4)|
+ (bucket_gc_gen(g) >> 4);
+ }
}
static inline int bucket_alloc_cmp(alloc_heap *h,
struct alloc_heap_entry l,
struct alloc_heap_entry r)
{
- return (l.key > r.key) - (l.key < r.key) ?:
- (l.nr < r.nr) - (l.nr > r.nr) ?:
- (l.bucket > r.bucket) - (l.bucket < r.bucket);
+ return cmp_int(l.key, r.key) ?:
+ cmp_int(r.nr, l.nr) ?:
+ cmp_int(l.bucket, r.bucket);
}
static inline int bucket_idx_cmp(const void *_l, const void *_r)
{
const struct alloc_heap_entry *l = _l, *r = _r;
- return (l->bucket > r->bucket) - (l->bucket < r->bucket);
+ return cmp_int(l->bucket, r->bucket);
}
static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
{
struct bucket_array *buckets;
struct alloc_heap_entry e = { 0 };
+ u64 now, last_seq_ondisk;
size_t b, i, nr = 0;
- ca->alloc_heap.used = 0;
-
- mutex_lock(&c->bucket_clock[READ].lock);
down_read(&ca->bucket_lock);
buckets = bucket_array(ca);
-
- bch2_recalc_oldest_io(c, ca, READ);
+ ca->alloc_heap.used = 0;
+ now = atomic64_read(&c->io_clock[READ].now);
+ last_seq_ondisk = c->journal.last_seq_ondisk;
/*
* Find buckets with lowest read priority, by building a maxheap sorted
* all buckets have been visited.
*/
for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
- struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
- unsigned long key = bucket_sort_key(c, ca, b, m);
+ struct bucket *g = &buckets->b[b];
+ struct bucket_mark m = READ_ONCE(g->mark);
+ unsigned key = bucket_sort_key(g, m, now, last_seq_ondisk);
if (!bch2_can_invalidate_bucket(ca, b, m))
continue;
}
up_read(&ca->bucket_lock);
- mutex_unlock(&c->bucket_clock[READ].lock);
}
static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
ca->inc_gen_needs_gc = 0;
switch (ca->mi.replacement) {
- case CACHE_REPLACEMENT_LRU:
+ case BCH_CACHE_REPLACEMENT_lru:
find_reclaimable_buckets_lru(c, ca);
break;
- case CACHE_REPLACEMENT_FIFO:
+ case BCH_CACHE_REPLACEMENT_fifo:
find_reclaimable_buckets_fifo(c, ca);
break;
- case CACHE_REPLACEMENT_RANDOM:
+ case BCH_CACHE_REPLACEMENT_random:
find_reclaimable_buckets_random(c, ca);
break;
}
return -1;
}
-static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
- size_t bucket, u64 *flush_seq)
+/*
+ * returns sequence number of most recent journal entry that updated this
+ * bucket:
+ */
+static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
{
+ if (m.journal_seq_valid) {
+ u64 journal_seq = atomic64_read(&c->journal.seq);
+ u64 bucket_seq = journal_seq;
+
+ bucket_seq &= ~((u64) U16_MAX);
+ bucket_seq |= m.journal_seq;
+
+ if (bucket_seq > journal_seq)
+ bucket_seq -= 1 << 16;
+
+ return bucket_seq;
+ } else {
+ return 0;
+ }
+}
+
+static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
+ struct bch_dev *ca,
+ struct btree_iter *iter,
+ u64 *journal_seq, unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_alloc_buf a;
+ struct bkey_alloc_unpacked u;
+ struct bucket *g;
struct bucket_mark m;
+ bool invalidating_cached_data;
+ size_t b;
+ int ret = 0;
- percpu_down_read_preempt_disable(&c->usage_lock);
- spin_lock(&c->freelist_lock);
+ BUG_ON(!ca->alloc_heap.used ||
+ !ca->alloc_heap.data[0].nr);
+ b = ca->alloc_heap.data[0].bucket;
+
+ /* first, put on free_inc and mark as owned by allocator: */
+ percpu_down_read(&c->mark_lock);
+ g = bucket(ca, b);
+ m = READ_ONCE(g->mark);
- bch2_invalidate_bucket(c, ca, bucket, &m);
+ BUG_ON(m.dirty_sectors);
- verify_not_on_freelist(c, ca, bucket);
- BUG_ON(!fifo_push(&ca->free_inc, bucket));
+ bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
+ spin_lock(&c->freelist_lock);
+ verify_not_on_freelist(c, ca, b);
+ BUG_ON(!fifo_push(&ca->free_inc, b));
spin_unlock(&c->freelist_lock);
- bucket_io_clock_reset(c, ca, bucket, READ);
- bucket_io_clock_reset(c, ca, bucket, WRITE);
+ /*
+ * If we're not invalidating cached data, we only increment the bucket
+ * gen in memory here, the incremented gen will be updated in the btree
+ * by bch2_trans_mark_pointer():
+ */
+ if (!m.cached_sectors &&
+ !bucket_needs_journal_commit(m, c->journal.last_seq_ondisk)) {
+ BUG_ON(m.data_type);
+ bucket_cmpxchg(g, m, m.gen++);
+ percpu_up_read(&c->mark_lock);
+ goto out;
+ }
- percpu_up_read_preempt_enable(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
- if (m.journal_seq_valid) {
- u64 journal_seq = atomic64_read(&c->journal.seq);
- u64 bucket_seq = journal_seq;
+ /*
+ * If the read-only path is trying to shut down, we can't be generating
+ * new btree updates:
+ */
+ if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags)) {
+ ret = 1;
+ goto out;
+ }
- bucket_seq &= ~((u64) U16_MAX);
- bucket_seq |= m.journal_seq;
+ bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
+retry:
+ ret = bch2_btree_iter_traverse(iter);
+ if (ret)
+ return ret;
- if (bucket_seq > journal_seq)
- bucket_seq -= 1 << 16;
+ percpu_down_read(&c->mark_lock);
+ g = bucket(ca, iter->pos.offset);
+ m = READ_ONCE(g->mark);
+ u = alloc_mem_to_key(iter, g, m);
+
+ percpu_up_read(&c->mark_lock);
+
+ invalidating_cached_data = u.cached_sectors != 0;
+
+ u.gen++;
+ u.data_type = 0;
+ u.dirty_sectors = 0;
+ u.cached_sectors = 0;
+ u.read_time = atomic64_read(&c->io_clock[READ].now);
+ u.write_time = atomic64_read(&c->io_clock[WRITE].now);
+
+ bch2_alloc_pack(c, &a, u);
+ bch2_trans_update(trans, iter, &a.k,
+ BTREE_TRIGGER_BUCKET_INVALIDATE);
+
+ /*
+ * XXX:
+ * when using deferred btree updates, we have journal reclaim doing
+ * btree updates and thus requiring the allocator to make forward
+ * progress, and here the allocator is requiring space in the journal -
+ * so we need a journal pre-reservation:
+ */
+ ret = bch2_trans_commit(trans, NULL,
+ invalidating_cached_data ? journal_seq : NULL,
+ BTREE_INSERT_NOUNLOCK|
+ BTREE_INSERT_NOCHECK_RW|
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_JOURNAL_RESERVED|
+ flags);
+ if (ret == -EINTR)
+ goto retry;
+out:
+ if (!ret) {
+ /* remove from alloc_heap: */
+ struct alloc_heap_entry e, *top = ca->alloc_heap.data;
- *flush_seq = max(*flush_seq, bucket_seq);
+ top->bucket++;
+ top->nr--;
+
+ if (!top->nr)
+ heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
+
+ /*
+ * Make sure we flush the last journal entry that updated this
+ * bucket (i.e. deleting the last reference) before writing to
+ * this bucket again:
+ */
+ *journal_seq = max(*journal_seq, bucket_journal_seq(c, m));
+ } else {
+ size_t b2;
+
+ /* remove from free_inc: */
+ percpu_down_read(&c->mark_lock);
+ spin_lock(&c->freelist_lock);
+
+ bch2_mark_alloc_bucket(c, ca, b, false,
+ gc_pos_alloc(c, NULL), 0);
+
+ BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
+ BUG_ON(b != b2);
+
+ spin_unlock(&c->freelist_lock);
+ percpu_up_read(&c->mark_lock);
}
- return m.cached_sectors != 0;
+ return ret < 0 ? ret : 0;
}
/*
*/
static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
{
- struct btree_iter iter;
+ struct btree_trans trans;
+ struct btree_iter *iter;
u64 journal_seq = 0;
int ret = 0;
- long b;
- bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ bch2_trans_init(&trans, c, 0, 0);
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_alloc,
+ POS(ca->dev_idx, 0),
+ BTREE_ITER_CACHED|
+ BTREE_ITER_CACHED_NOFILL|
+ BTREE_ITER_INTENT);
/* Only use nowait if we've already invalidated at least one bucket: */
while (!ret &&
!fifo_full(&ca->free_inc) &&
- (b = next_alloc_bucket(ca)) >= 0) {
- bool must_flush =
- bch2_invalidate_one_bucket(c, ca, b, &journal_seq);
-
- ret = __bch2_alloc_write_key(c, ca, b, &iter,
- must_flush ? &journal_seq : NULL,
- !fifo_empty(&ca->free_inc) ? BTREE_INSERT_NOWAIT : 0);
- }
+ ca->alloc_heap.used)
+ ret = bch2_invalidate_one_bucket2(&trans, ca, iter, &journal_seq,
+ BTREE_INSERT_GC_LOCK_HELD|
+ (!fifo_empty(&ca->free_inc)
+ ? BTREE_INSERT_NOWAIT : 0));
- bch2_btree_iter_unlock(&iter);
+ bch2_trans_iter_put(&trans, iter);
+ bch2_trans_exit(&trans);
/* If we used NOWAIT, don't return the error: */
if (!fifo_empty(&ca->free_inc))
set_current_state(TASK_INTERRUPTIBLE);
spin_lock(&c->freelist_lock);
- for (i = 0; i < RESERVE_NR; i++)
+ for (i = 0; i < RESERVE_NR; i++) {
+
+ /*
+ * Don't strand buckets on the copygc freelist until
+ * after recovery is finished:
+ */
+ if (!test_bit(BCH_FS_STARTED, &c->flags) &&
+ i == RESERVE_MOVINGGC)
+ continue;
+
if (fifo_push(&ca->free[i], bucket)) {
fifo_pop(&ca->free_inc, bucket);
+
closure_wake_up(&c->freelist_wait);
+ ca->allocator_state = ALLOCATOR_RUNNING;
+
spin_unlock(&c->freelist_lock);
goto out;
}
+ }
+
+ if (ca->allocator_state != ALLOCATOR_BLOCKED_FULL) {
+ ca->allocator_state = ALLOCATOR_BLOCKED_FULL;
+ closure_wake_up(&c->freelist_wait);
+ }
+
spin_unlock(&c->freelist_lock);
if ((current->flags & PF_KTHREAD) &&
return 0;
}
+static inline bool allocator_thread_running(struct bch_dev *ca)
+{
+ return ca->mi.state == BCH_MEMBER_STATE_rw &&
+ test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags);
+}
+
/**
* bch_allocator_thread - move buckets from free_inc to reserves
*
set_freezable();
while (1) {
+ if (!allocator_thread_running(ca)) {
+ ca->allocator_state = ALLOCATOR_STOPPED;
+ if (kthread_wait_freezable(allocator_thread_running(ca)))
+ break;
+ }
+
+ ca->allocator_state = ALLOCATOR_RUNNING;
+
cond_resched();
+ if (kthread_should_stop())
+ break;
pr_debug("discarding %zu invalidated buckets",
fifo_used(&ca->free_inc));
pr_debug("free_inc now empty");
do {
- if (test_bit(BCH_FS_GC_FAILURE, &c->flags)) {
- up_read(&c->gc_lock);
- bch_err(ca, "gc failure");
- goto stop;
- }
-
/*
* Find some buckets that we can invalidate, either
* they're completely unused, or only contain clean data
*/
if (!nr ||
(nr < ALLOC_SCAN_BATCH(ca) &&
- !fifo_full(&ca->free[RESERVE_MOVINGGC]))) {
- ca->allocator_blocked = true;
- closure_wake_up(&c->freelist_wait);
-
+ !fifo_empty(&ca->free[RESERVE_NONE]))) {
ret = wait_buckets_available(c, ca);
if (ret) {
up_read(&c->gc_lock);
}
} while (!nr);
- ca->allocator_blocked = false;
up_read(&c->gc_lock);
pr_debug("%zu buckets to invalidate", nr);
stop:
pr_debug("alloc thread stopping (ret %i)", ret);
+ ca->allocator_state = ALLOCATOR_STOPPED;
+ closure_wake_up(&c->freelist_wait);
return 0;
}
void bch2_recalc_capacity(struct bch_fs *c)
{
struct bch_dev *ca;
- u64 capacity = 0, reserved_sectors = 0, gc_reserve;
+ u64 capacity = 0, reserved_sectors = 0, gc_reserve, copygc_threshold = 0;
+ unsigned bucket_size_max = 0;
unsigned long ra_pages = 0;
unsigned i, j;
for (j = 0; j < RESERVE_NONE; j++)
dev_reserve += ca->free[j].size;
- dev_reserve += ca->free_inc.size;
-
- dev_reserve += ARRAY_SIZE(c->write_points);
-
dev_reserve += 1; /* btree write point */
dev_reserve += 1; /* copygc write point */
dev_reserve += 1; /* rebalance write point */
- dev_reserve += WRITE_POINT_COUNT;
dev_reserve *= ca->mi.bucket_size;
- ca->copygc_threshold = dev_reserve;
+ copygc_threshold += dev_reserve;
capacity += bucket_to_sector(ca, ca->mi.nbuckets -
ca->mi.first_bucket);
reserved_sectors += dev_reserve * 2;
+
+ bucket_size_max = max_t(unsigned, bucket_size_max,
+ ca->mi.bucket_size);
}
gc_reserve = c->opts.gc_reserve_bytes
reserved_sectors = min(reserved_sectors, capacity);
+ c->copygc_threshold = copygc_threshold;
c->capacity = capacity - reserved_sectors;
- if (c->capacity) {
- bch2_io_timer_add(&c->io_clock[READ],
- &c->bucket_clock[READ].rescale);
- bch2_io_timer_add(&c->io_clock[WRITE],
- &c->bucket_clock[WRITE].rescale);
- } else {
- bch2_io_timer_del(&c->io_clock[READ],
- &c->bucket_clock[READ].rescale);
- bch2_io_timer_del(&c->io_clock[WRITE],
- &c->bucket_clock[WRITE].rescale);
- }
+ c->bucket_size_max = bucket_size_max;
/* Wake up case someone was waiting for buckets */
closure_wake_up(&c->freelist_wait);
for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
bch2_writepoint_stop(c, ca, &c->write_points[i]);
- bch2_writepoint_stop(c, ca, &ca->copygc_write_point);
+ bch2_writepoint_stop(c, ca, &c->copygc_write_point);
bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
bch2_writepoint_stop(c, ca, &c->btree_write_point);
}
mutex_unlock(&c->btree_reserve_cache_lock);
+ while (1) {
+ struct open_bucket *ob;
+
+ spin_lock(&c->freelist_lock);
+ if (!ca->open_buckets_partial_nr) {
+ spin_unlock(&c->freelist_lock);
+ break;
+ }
+ ob = c->open_buckets +
+ ca->open_buckets_partial[--ca->open_buckets_partial_nr];
+ ob->on_partial_list = false;
+ spin_unlock(&c->freelist_lock);
+
+ bch2_open_bucket_put(c, ob);
+ }
+
+ bch2_ec_stop_dev(c, ca);
+
/*
* Wake up threads that were blocked on allocation, so they can notice
* the device can no longer be removed and the capacity has changed:
set_bit(ca->dev_idx, c->rw_devs[i].d);
}
+void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
+{
+ if (ca->alloc_thread)
+ closure_wait_event(&c->freelist_wait,
+ ca->allocator_state != ALLOCATOR_RUNNING);
+}
+
/* stop allocator thread: */
void bch2_dev_allocator_stop(struct bch_dev *ca)
{
return 0;
p = kthread_create(bch2_allocator_thread, ca,
- "bch_alloc[%s]", ca->name);
- if (IS_ERR(p))
+ "bch-alloc/%s", ca->name);
+ if (IS_ERR(p)) {
+ bch_err(ca->fs, "error creating allocator thread: %li",
+ PTR_ERR(p));
return PTR_ERR(p);
+ }
get_task_struct(p);
rcu_assign_pointer(ca->alloc_thread, p);
return 0;
}
-static void flush_held_btree_writes(struct bch_fs *c)
-{
- struct bucket_table *tbl;
- struct rhash_head *pos;
- struct btree *b;
- bool flush_updates;
- size_t i, nr_pending_updates;
-
- clear_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
-again:
- pr_debug("flushing dirty btree nodes");
- cond_resched();
-
- flush_updates = false;
- nr_pending_updates = bch2_btree_interior_updates_nr_pending(c);
-
- rcu_read_lock();
- for_each_cached_btree(b, c, tbl, i, pos)
- if (btree_node_dirty(b) && (!b->written || b->level)) {
- if (btree_node_may_write(b)) {
- rcu_read_unlock();
- btree_node_lock_type(c, b, SIX_LOCK_read);
- bch2_btree_node_write(c, b, SIX_LOCK_read);
- six_unlock_read(&b->lock);
- goto again;
- } else {
- flush_updates = true;
- }
- }
- rcu_read_unlock();
-
- if (c->btree_roots_dirty)
- bch2_journal_meta(&c->journal);
-
- /*
- * This is ugly, but it's needed to flush btree node writes
- * without spinning...
- */
- if (flush_updates) {
- closure_wait_event(&c->btree_interior_update_wait,
- bch2_btree_interior_updates_nr_pending(c) <
- nr_pending_updates);
- goto again;
- }
-
-}
-
-static void allocator_start_issue_discards(struct bch_fs *c)
-{
- struct bch_dev *ca;
- unsigned dev_iter;
- size_t bu;
-
- for_each_rw_member(ca, c, dev_iter)
- while (fifo_pop(&ca->free_inc, bu))
- blkdev_issue_discard(ca->disk_sb.bdev,
- bucket_to_sector(ca, bu),
- ca->mi.bucket_size, GFP_NOIO, 0);
-}
-
-static int __bch2_fs_allocator_start(struct bch_fs *c)
-{
- struct bch_dev *ca;
- unsigned dev_iter;
- u64 journal_seq = 0;
- long bu;
- bool invalidating_data = false;
- int ret = 0;
-
- if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
- return -1;
-
- if (test_alloc_startup(c)) {
- invalidating_data = true;
- goto not_enough;
- }
-
- /* Scan for buckets that are already invalidated: */
- for_each_rw_member(ca, c, dev_iter) {
- struct btree_iter iter;
- struct bucket_mark m;
- struct bkey_s_c k;
-
- for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0), 0, k) {
- if (k.k->type != BCH_ALLOC)
- continue;
-
- bu = k.k->p.offset;
- m = READ_ONCE(bucket(ca, bu)->mark);
-
- if (!is_available_bucket(m) || m.cached_sectors)
- continue;
-
- percpu_down_read_preempt_disable(&c->usage_lock);
- bch2_mark_alloc_bucket(c, ca, bu, true,
- gc_pos_alloc(c, NULL),
- BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
- BCH_BUCKET_MARK_GC_LOCK_HELD);
- percpu_up_read_preempt_enable(&c->usage_lock);
-
- fifo_push(&ca->free_inc, bu);
-
- if (fifo_full(&ca->free_inc))
- break;
- }
- bch2_btree_iter_unlock(&iter);
- }
-
- /* did we find enough buckets? */
- for_each_rw_member(ca, c, dev_iter)
- if (fifo_used(&ca->free_inc) < ca->free[RESERVE_BTREE].size) {
- percpu_ref_put(&ca->io_ref);
- goto not_enough;
- }
-
- return 0;
-not_enough:
- pr_debug("did not find enough empty buckets; issuing discards");
-
- /* clear out free_inc, we'll be using it again below: */
- for_each_rw_member(ca, c, dev_iter)
- discard_invalidated_buckets(c, ca);
-
- pr_debug("scanning for reclaimable buckets");
-
- for_each_rw_member(ca, c, dev_iter) {
- find_reclaimable_buckets(c, ca);
-
- while (!fifo_full(&ca->free[RESERVE_BTREE]) &&
- (bu = next_alloc_bucket(ca)) >= 0) {
- invalidating_data |=
- bch2_invalidate_one_bucket(c, ca, bu, &journal_seq);
-
- fifo_push(&ca->free[RESERVE_BTREE], bu);
- set_bit(bu, ca->buckets_dirty);
- }
- }
-
- pr_debug("done scanning for reclaimable buckets");
-
- /*
- * We're moving buckets to freelists _before_ they've been marked as
- * invalidated on disk - we have to so that we can allocate new btree
- * nodes to mark them as invalidated on disk.
- *
- * However, we can't _write_ to any of these buckets yet - they might
- * have cached data in them, which is live until they're marked as
- * invalidated on disk:
- */
- if (invalidating_data) {
- BUG();
- pr_info("holding writes");
- pr_debug("invalidating existing data");
- set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
- } else {
- pr_debug("issuing discards");
- allocator_start_issue_discards(c);
- }
-
- /*
- * XXX: it's possible for this to deadlock waiting on journal reclaim,
- * since we're holding btree writes. What then?
- */
- ret = bch2_alloc_write(c);
- if (ret)
- return ret;
-
- if (invalidating_data) {
- pr_debug("flushing journal");
-
- ret = bch2_journal_flush_seq(&c->journal, journal_seq);
- if (ret)
- return ret;
-
- pr_debug("issuing discards");
- allocator_start_issue_discards(c);
- }
-
- set_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags);
-
- /* now flush dirty btree nodes: */
- if (invalidating_data)
- flush_held_btree_writes(c);
-
- return 0;
-}
-
-int bch2_fs_allocator_start(struct bch_fs *c)
+void bch2_fs_allocator_background_init(struct bch_fs *c)
{
- struct bch_dev *ca;
- unsigned i;
- int ret;
-
- down_read(&c->gc_lock);
- ret = __bch2_fs_allocator_start(c);
- up_read(&c->gc_lock);
-
- if (ret)
- return ret;
-
- for_each_rw_member(ca, c, i) {
- ret = bch2_dev_allocator_start(ca);
- if (ret) {
- percpu_ref_put(&ca->io_ref);
- return ret;
- }
- }
-
- return bch2_alloc_write(c);
-}
-
-void bch2_fs_allocator_init(struct bch_fs *c)
-{
- struct open_bucket *ob;
- struct write_point *wp;
-
- mutex_init(&c->write_points_hash_lock);
spin_lock_init(&c->freelist_lock);
- bch2_bucket_clock_init(c, READ);
- bch2_bucket_clock_init(c, WRITE);
-
- /* open bucket 0 is a sentinal NULL: */
- spin_lock_init(&c->open_buckets[0].lock);
-
- for (ob = c->open_buckets + 1;
- ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
- spin_lock_init(&ob->lock);
- c->open_buckets_nr_free++;
-
- ob->freelist = c->open_buckets_freelist;
- c->open_buckets_freelist = ob - c->open_buckets;
- }
-
- writepoint_init(&c->btree_write_point, BCH_DATA_BTREE);
- writepoint_init(&c->rebalance_write_point, BCH_DATA_USER);
-
- for (wp = c->write_points;
- wp < c->write_points + ARRAY_SIZE(c->write_points); wp++) {
- writepoint_init(wp, BCH_DATA_USER);
-
- wp->last_used = sched_clock();
- wp->write_point = (unsigned long) wp;
- hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
- }
c->pd_controllers_update_seconds = 5;
INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);