#include "bcachefs.h"
#include "alloc_background.h"
#include "alloc_foreground.h"
+#include "backpointers.h"
#include "btree_cache.h"
#include "btree_io.h"
+#include "btree_key_cache.h"
#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_gc.h"
#include "buckets.h"
+#include "buckets_waiting_for_journal.h"
#include "clock.h"
#include "debug.h"
#include "ec.h"
#include "error.h"
+#include "lru.h"
#include "recovery.h"
+#include "varint.h"
#include <linux/kthread.h>
#include <linux/math64.h>
#include <linux/sort.h>
#include <trace/events/bcachefs.h>
-static const char * const bch2_alloc_field_names[] = {
-#define x(name, bytes) #name,
- BCH_ALLOC_FIELDS()
+/* Persistent alloc info: */
+
+static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
+#define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
+ BCH_ALLOC_FIELDS_V1()
#undef x
- NULL
};
-static void bch2_recalc_oldest_io(struct bch_fs *, struct bch_dev *, int);
-
-/* Ratelimiting/PD controllers */
-
-static void pd_controllers_update(struct work_struct *work)
-{
- struct bch_fs *c = container_of(to_delayed_work(work),
- struct bch_fs,
- pd_controllers_update);
- struct bch_dev *ca;
- unsigned i;
-
- for_each_member_device(ca, c, i) {
- struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
-
- u64 free = bucket_to_sector(ca,
- __dev_buckets_free(ca, stats)) << 9;
- /*
- * Bytes of internal fragmentation, which can be
- * reclaimed by copy GC
- */
- s64 fragmented = (bucket_to_sector(ca,
- stats.buckets[BCH_DATA_USER] +
- stats.buckets[BCH_DATA_CACHED]) -
- (stats.sectors[BCH_DATA_USER] +
- stats.sectors[BCH_DATA_CACHED])) << 9;
-
- fragmented = max(0LL, fragmented);
-
- bch2_pd_controller_update(&ca->copygc_pd,
- free, fragmented, -1);
- }
-
- schedule_delayed_work(&c->pd_controllers_update,
- c->pd_controllers_update_seconds * HZ);
-}
-
-/* Persistent alloc info: */
+struct bkey_alloc_unpacked {
+ u64 journal_seq;
+ u8 gen;
+ u8 oldest_gen;
+ u8 data_type;
+ bool need_discard:1;
+ bool need_inc_gen:1;
+#define x(_name, _bits) u##_bits _name;
+ BCH_ALLOC_FIELDS_V2()
+#undef x
+};
-static inline u64 get_alloc_field(const struct bch_alloc *a,
- const void **p, unsigned field)
+static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
+ const void **p, unsigned field)
{
- unsigned bytes = BCH_ALLOC_FIELD_BYTES[field];
+ unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
u64 v;
if (!(a->fields & (1 << field)))
return v;
}
-static inline void put_alloc_field(struct bkey_i_alloc *a, void **p,
- unsigned field, u64 v)
+static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
+ unsigned field, u64 v)
{
- unsigned bytes = BCH_ALLOC_FIELD_BYTES[field];
+ unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
if (!v)
return;
*p += bytes;
}
-struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
+static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
+ struct bkey_s_c k)
{
- struct bkey_alloc_unpacked ret = { .gen = 0 };
-
- if (k.k->type == KEY_TYPE_alloc) {
- const struct bch_alloc *a = bkey_s_c_to_alloc(k).v;
- const void *d = a->data;
- unsigned idx = 0;
+ const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
+ const void *d = in->data;
+ unsigned idx = 0;
- ret.gen = a->gen;
+ out->gen = in->gen;
-#define x(_name, _bits) ret._name = get_alloc_field(a, &d, idx++);
- BCH_ALLOC_FIELDS()
+#define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
+ BCH_ALLOC_FIELDS_V1()
#undef x
- }
- return ret;
}
-void bch2_alloc_pack(struct bkey_i_alloc *dst,
- const struct bkey_alloc_unpacked src)
+static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
+ struct bkey_s_c k)
{
- unsigned idx = 0;
- void *d = dst->v.data;
-
- dst->v.fields = 0;
- dst->v.gen = src.gen;
+ struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
+ const u8 *in = a.v->data;
+ const u8 *end = bkey_val_end(a);
+ unsigned fieldnr = 0;
+ int ret;
+ u64 v;
-#define x(_name, _bits) put_alloc_field(dst, &d, idx++, src._name);
- BCH_ALLOC_FIELDS()
+ out->gen = a.v->gen;
+ out->oldest_gen = a.v->oldest_gen;
+ out->data_type = a.v->data_type;
+
+#define x(_name, _bits) \
+ if (fieldnr < a.v->nr_fields) { \
+ ret = bch2_varint_decode_fast(in, end, &v); \
+ if (ret < 0) \
+ return ret; \
+ in += ret; \
+ } else { \
+ v = 0; \
+ } \
+ out->_name = v; \
+ if (v != out->_name) \
+ return -1; \
+ fieldnr++;
+
+ BCH_ALLOC_FIELDS_V2()
#undef x
-
- set_bkey_val_bytes(&dst->k, (void *) d - (void *) &dst->v);
+ return 0;
}
-static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
+static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
+ struct bkey_s_c k)
{
- unsigned i, bytes = offsetof(struct bch_alloc, data);
-
- for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_FIELD_BYTES); i++)
- if (a->fields & (1 << i))
- bytes += BCH_ALLOC_FIELD_BYTES[i];
+ struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
+ const u8 *in = a.v->data;
+ const u8 *end = bkey_val_end(a);
+ unsigned fieldnr = 0;
+ int ret;
+ u64 v;
- return DIV_ROUND_UP(bytes, sizeof(u64));
+ out->gen = a.v->gen;
+ out->oldest_gen = a.v->oldest_gen;
+ out->data_type = a.v->data_type;
+ out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v);
+ out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v);
+ out->journal_seq = le64_to_cpu(a.v->journal_seq);
+
+#define x(_name, _bits) \
+ if (fieldnr < a.v->nr_fields) { \
+ ret = bch2_varint_decode_fast(in, end, &v); \
+ if (ret < 0) \
+ return ret; \
+ in += ret; \
+ } else { \
+ v = 0; \
+ } \
+ out->_name = v; \
+ if (v != out->_name) \
+ return -1; \
+ fieldnr++;
+
+ BCH_ALLOC_FIELDS_V2()
+#undef x
+ return 0;
}
-const char *bch2_alloc_invalid(const struct bch_fs *c, struct bkey_s_c k)
+static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
{
- struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
+ struct bkey_alloc_unpacked ret = { .gen = 0 };
- if (k.k->p.inode >= c->sb.nr_devices ||
- !c->devs[k.k->p.inode])
- return "invalid device";
-
- /* allow for unknown fields */
- if (bkey_val_u64s(a.k) < bch_alloc_val_u64s(a.v))
- return "incorrect value size";
+ switch (k.k->type) {
+ case KEY_TYPE_alloc:
+ bch2_alloc_unpack_v1(&ret, k);
+ break;
+ case KEY_TYPE_alloc_v2:
+ bch2_alloc_unpack_v2(&ret, k);
+ break;
+ case KEY_TYPE_alloc_v3:
+ bch2_alloc_unpack_v3(&ret, k);
+ break;
+ }
- return NULL;
+ return ret;
}
-void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
+static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
{
- struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
- const void *d = a.v->data;
- unsigned i;
+ unsigned i, bytes = offsetof(struct bch_alloc, data);
- pr_buf(out, "gen %u", a.v->gen);
+ for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
+ if (a->fields & (1 << i))
+ bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
- for (i = 0; i < BCH_ALLOC_FIELD_NR; i++)
- if (a.v->fields & (1 << i))
- pr_buf(out, " %s %llu",
- bch2_alloc_field_names[i],
- get_alloc_field(a.v, &d, i));
+ return DIV_ROUND_UP(bytes, sizeof(u64));
}
-int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
+int bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ int rw, struct printbuf *err)
{
- struct btree_trans trans;
- struct btree_iter *iter;
- struct bkey_s_c k;
- struct bch_dev *ca;
- struct journal_key *j;
- unsigned i;
- int ret;
-
- bch2_trans_init(&trans, c, 0, 0);
-
- for_each_btree_key(&trans, iter, BTREE_ID_ALLOC, POS_MIN, 0, k, ret)
- bch2_mark_key(c, k, 0, 0, NULL, 0,
- BCH_BUCKET_MARK_ALLOC_READ|
- BCH_BUCKET_MARK_NOATOMIC);
+ struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
- ret = bch2_trans_exit(&trans) ?: ret;
- if (ret) {
- bch_err(c, "error reading alloc info: %i", ret);
- return ret;
+ /* allow for unknown fields */
+ if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v)) {
+ prt_printf(err, "incorrect value size (%zu < %u)",
+ bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
+ return -EINVAL;
}
- for_each_journal_key(*journal_keys, j)
- if (j->btree_id == BTREE_ID_ALLOC)
- bch2_mark_key(c, bkey_i_to_s_c(j->k),
- 0, 0, NULL, 0,
- BCH_BUCKET_MARK_ALLOC_READ|
- BCH_BUCKET_MARK_NOATOMIC);
-
- percpu_down_write(&c->mark_lock);
- bch2_dev_usage_from_buckets(c);
- percpu_up_write(&c->mark_lock);
+ return 0;
+}
- mutex_lock(&c->bucket_clock[READ].lock);
- for_each_member_device(ca, c, i) {
- down_read(&ca->bucket_lock);
- bch2_recalc_oldest_io(c, ca, READ);
- up_read(&ca->bucket_lock);
- }
- mutex_unlock(&c->bucket_clock[READ].lock);
+int bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ int rw, struct printbuf *err)
+{
+ struct bkey_alloc_unpacked u;
- mutex_lock(&c->bucket_clock[WRITE].lock);
- for_each_member_device(ca, c, i) {
- down_read(&ca->bucket_lock);
- bch2_recalc_oldest_io(c, ca, WRITE);
- up_read(&ca->bucket_lock);
+ if (bch2_alloc_unpack_v2(&u, k)) {
+ prt_printf(err, "unpack error");
+ return -EINVAL;
}
- mutex_unlock(&c->bucket_clock[WRITE].lock);
return 0;
}
-int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
+int bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ int rw, struct printbuf *err)
{
- struct btree_trans trans;
- struct btree_iter *iter;
- struct bch_dev *ca;
- int ret;
-
- if (k->k.p.inode >= c->sb.nr_devices ||
- !c->devs[k->k.p.inode])
- return 0;
-
- ca = bch_dev_bkey_exists(c, k->k.p.inode);
-
- if (k->k.p.offset >= ca->mi.nbuckets)
- return 0;
-
- bch2_trans_init(&trans, c, 0, 0);
-
- iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, k->k.p,
- BTREE_ITER_INTENT);
-
- ret = bch2_btree_iter_traverse(iter);
- if (ret)
- goto err;
+ struct bkey_alloc_unpacked u;
- /* check buckets_written with btree node locked: */
- if (test_bit(k->k.p.offset, ca->buckets_written)) {
- ret = 0;
- goto err;
+ if (bch2_alloc_unpack_v3(&u, k)) {
+ prt_printf(err, "unpack error");
+ return -EINVAL;
}
- bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, k));
-
- ret = bch2_trans_commit(&trans, NULL, NULL,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_LAZY_RW|
- BTREE_INSERT_JOURNAL_REPLAY|
- BTREE_INSERT_NOMARK);
-err:
- bch2_trans_exit(&trans);
- return ret;
+ return 0;
}
-int bch2_alloc_write(struct bch_fs *c, unsigned flags, bool *wrote)
+int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ int rw, struct printbuf *err)
{
- struct btree_trans trans;
- struct btree_iter *iter;
- struct bucket_array *buckets;
- struct bch_dev *ca;
- struct bucket *g;
- struct bucket_mark m, new;
- struct bkey_alloc_unpacked old_u, new_u;
- __BKEY_PADDED(k, 8) alloc_key; /* hack: */
- struct bkey_i_alloc *a;
- struct bkey_s_c k;
- unsigned i;
- size_t b;
- int ret = 0;
-
- BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
-
- bch2_trans_init(&trans, c, 0, 0);
-
- iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, POS_MIN,
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
- for_each_rw_member(ca, c, i) {
- down_read(&ca->bucket_lock);
-restart:
- buckets = bucket_array(ca);
-
- for (b = buckets->first_bucket;
- b < buckets->nbuckets;
- b++) {
- if (!buckets->b[b].mark.dirty)
- continue;
-
- bch2_btree_iter_set_pos(iter, POS(i, b));
- k = bch2_btree_iter_peek_slot(iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- old_u = bch2_alloc_unpack(k);
-
- percpu_down_read(&c->mark_lock);
- g = bucket(ca, b);
- m = READ_ONCE(g->mark);
- new_u = alloc_mem_to_key(g, m);
- percpu_up_read(&c->mark_lock);
-
- if (!m.dirty)
- continue;
+ if (alloc_v4_u64s(a.v) != bkey_val_u64s(k.k)) {
+ prt_printf(err, "bad val size (%lu != %u)",
+ bkey_val_u64s(k.k), alloc_v4_u64s(a.v));
+ return -EINVAL;
+ }
- if ((flags & BTREE_INSERT_LAZY_RW) &&
- percpu_ref_is_zero(&c->writes)) {
- up_read(&ca->bucket_lock);
- bch2_trans_unlock(&trans);
+ if (!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
+ BCH_ALLOC_V4_NR_BACKPOINTERS(a.v)) {
+ prt_printf(err, "invalid backpointers_start");
+ return -EINVAL;
+ }
- ret = bch2_fs_read_write_early(c);
- down_read(&ca->bucket_lock);
+ if (rw == WRITE) {
+ if (alloc_data_type(*a.v, a.v->data_type) != a.v->data_type) {
+ prt_printf(err, "invalid data type (got %u should be %u)",
+ a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
+ return -EINVAL;
+ }
- if (ret)
- goto err;
- goto restart;
+ switch (a.v->data_type) {
+ case BCH_DATA_free:
+ case BCH_DATA_need_gc_gens:
+ case BCH_DATA_need_discard:
+ if (a.v->dirty_sectors ||
+ a.v->cached_sectors ||
+ a.v->stripe) {
+ prt_printf(err, "empty data type free but have data");
+ return -EINVAL;
}
-
- a = bkey_alloc_init(&alloc_key.k);
- a->k.p = iter->pos;
- bch2_alloc_pack(a, new_u);
-
- bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &a->k_i));
- ret = bch2_trans_commit(&trans, NULL, NULL,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_NOMARK|
- flags);
-err:
- if (ret && !test_bit(BCH_FS_EMERGENCY_RO, &c->flags)) {
- bch_err(c, "error %i writing alloc info", ret);
- printk(KERN_CONT "dev %llu bucket %llu\n",
- iter->pos.inode, iter->pos.offset);
- printk(KERN_CONT "gen %u -> %u\n", old_u.gen, new_u.gen);
-#define x(_name, _bits) printk(KERN_CONT #_name " %u -> %u\n", old_u._name, new_u._name);
- BCH_ALLOC_FIELDS()
-#undef x
+ break;
+ case BCH_DATA_sb:
+ case BCH_DATA_journal:
+ case BCH_DATA_btree:
+ case BCH_DATA_user:
+ case BCH_DATA_parity:
+ if (!a.v->dirty_sectors) {
+ prt_printf(err, "data_type %s but dirty_sectors==0",
+ bch2_data_types[a.v->data_type]);
+ return -EINVAL;
+ }
+ break;
+ case BCH_DATA_cached:
+ if (!a.v->cached_sectors ||
+ a.v->dirty_sectors ||
+ a.v->stripe) {
+ prt_printf(err, "data type inconsistency");
+ return -EINVAL;
}
- if (ret)
- break;
-
- new = m;
- new.dirty = false;
- atomic64_cmpxchg(&g->_mark.v, m.v.counter, new.v.counter);
-
- if (ca->buckets_written)
- set_bit(b, ca->buckets_written);
-
- bch2_trans_cond_resched(&trans);
- *wrote = true;
- }
- up_read(&ca->bucket_lock);
- if (ret) {
- percpu_ref_put(&ca->io_ref);
+ if (!a.v->io_time[READ] &&
+ test_bit(BCH_FS_CHECK_ALLOC_TO_LRU_REFS_DONE, &c->flags)) {
+ prt_printf(err, "cached bucket with read_time == 0");
+ return -EINVAL;
+ }
+ break;
+ case BCH_DATA_stripe:
+ if (!a.v->stripe) {
+ prt_printf(err, "data_type %s but stripe==0",
+ bch2_data_types[a.v->data_type]);
+ return -EINVAL;
+ }
break;
}
}
- bch2_trans_exit(&trans);
+ return 0;
+}
- return ret;
+static inline u64 swab40(u64 x)
+{
+ return (((x & 0x00000000ffULL) << 32)|
+ ((x & 0x000000ff00ULL) << 16)|
+ ((x & 0x0000ff0000ULL) >> 0)|
+ ((x & 0x00ff000000ULL) >> 16)|
+ ((x & 0xff00000000ULL) >> 32));
}
-/* Bucket IO clocks: */
+void bch2_alloc_v4_swab(struct bkey_s k)
+{
+ struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v;
+ struct bch_backpointer *bp, *bps;
+
+ a->journal_seq = swab64(a->journal_seq);
+ a->flags = swab32(a->flags);
+ a->dirty_sectors = swab32(a->dirty_sectors);
+ a->cached_sectors = swab32(a->cached_sectors);
+ a->io_time[0] = swab64(a->io_time[0]);
+ a->io_time[1] = swab64(a->io_time[1]);
+ a->stripe = swab32(a->stripe);
+ a->nr_external_backpointers = swab32(a->nr_external_backpointers);
+
+ bps = alloc_v4_backpointers(a);
+ for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
+ bp->bucket_offset = swab40(bp->bucket_offset);
+ bp->bucket_len = swab32(bp->bucket_len);
+ bch2_bpos_swab(&bp->pos);
+ }
+}
-static void bch2_recalc_oldest_io(struct bch_fs *c, struct bch_dev *ca, int rw)
+void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
{
- struct bucket_clock *clock = &c->bucket_clock[rw];
- struct bucket_array *buckets = bucket_array(ca);
- struct bucket *g;
- u16 max_last_io = 0;
+ struct bch_alloc_v4 _a;
+ const struct bch_alloc_v4 *a = &_a;
+ const struct bch_backpointer *bps;
unsigned i;
- lockdep_assert_held(&c->bucket_clock[rw].lock);
-
- /* Recalculate max_last_io for this device: */
- for_each_bucket(g, buckets)
- max_last_io = max(max_last_io, bucket_last_io(c, g, rw));
-
- ca->max_last_bucket_io[rw] = max_last_io;
-
- /* Recalculate global max_last_io: */
- max_last_io = 0;
+ if (k.k->type == KEY_TYPE_alloc_v4)
+ a = bkey_s_c_to_alloc_v4(k).v;
+ else
+ bch2_alloc_to_v4(k, &_a);
+
+ prt_newline(out);
+ printbuf_indent_add(out, 2);
+
+ prt_printf(out, "gen %u oldest_gen %u data_type %s",
+ a->gen, a->oldest_gen, bch2_data_types[a->data_type]);
+ prt_newline(out);
+ prt_printf(out, "journal_seq %llu", a->journal_seq);
+ prt_newline(out);
+ prt_printf(out, "need_discard %llu", BCH_ALLOC_V4_NEED_DISCARD(a));
+ prt_newline(out);
+ prt_printf(out, "need_inc_gen %llu", BCH_ALLOC_V4_NEED_INC_GEN(a));
+ prt_newline(out);
+ prt_printf(out, "dirty_sectors %u", a->dirty_sectors);
+ prt_newline(out);
+ prt_printf(out, "cached_sectors %u", a->cached_sectors);
+ prt_newline(out);
+ prt_printf(out, "stripe %u", a->stripe);
+ prt_newline(out);
+ prt_printf(out, "stripe_redundancy %u", a->stripe_redundancy);
+ prt_newline(out);
+ prt_printf(out, "io_time[READ] %llu", a->io_time[READ]);
+ prt_newline(out);
+ prt_printf(out, "io_time[WRITE] %llu", a->io_time[WRITE]);
+ prt_newline(out);
+ prt_printf(out, "backpointers: %llu", BCH_ALLOC_V4_NR_BACKPOINTERS(a));
+ printbuf_indent_add(out, 2);
+
+ bps = alloc_v4_backpointers_c(a);
+ for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a); i++) {
+ prt_newline(out);
+ bch2_backpointer_to_text(out, &bps[i]);
+ }
- for_each_member_device(ca, c, i)
- max_last_io = max(max_last_io, ca->max_last_bucket_io[rw]);
+ printbuf_indent_sub(out, 4);
+}
- clock->max_last_io = max_last_io;
+void bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
+{
+ if (k.k->type == KEY_TYPE_alloc_v4) {
+ int d;
+
+ *out = *bkey_s_c_to_alloc_v4(k).v;
+
+ d = (int) BCH_ALLOC_V4_U64s -
+ (int) (BCH_ALLOC_V4_BACKPOINTERS_START(out) ?: BCH_ALLOC_V4_U64s_V0);
+ if (unlikely(d > 0)) {
+ memset((u64 *) out + BCH_ALLOC_V4_BACKPOINTERS_START(out),
+ 0,
+ d * sizeof(u64));
+ SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
+ }
+ } else {
+ struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
+
+ *out = (struct bch_alloc_v4) {
+ .journal_seq = u.journal_seq,
+ .flags = u.need_discard,
+ .gen = u.gen,
+ .oldest_gen = u.oldest_gen,
+ .data_type = u.data_type,
+ .stripe_redundancy = u.stripe_redundancy,
+ .dirty_sectors = u.dirty_sectors,
+ .cached_sectors = u.cached_sectors,
+ .io_time[READ] = u.read_time,
+ .io_time[WRITE] = u.write_time,
+ .stripe = u.stripe,
+ };
+
+ SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
+ }
}
-static void bch2_rescale_bucket_io_times(struct bch_fs *c, int rw)
+static noinline struct bkey_i_alloc_v4 *
+__bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
{
- struct bucket_clock *clock = &c->bucket_clock[rw];
- struct bucket_array *buckets;
- struct bch_dev *ca;
- struct bucket *g;
- unsigned i;
+ struct bkey_i_alloc_v4 *ret;
+ unsigned bytes = k.k->type == KEY_TYPE_alloc_v4
+ ? bkey_bytes(k.k)
+ : sizeof(struct bkey_i_alloc_v4);
- trace_rescale_prios(c);
+ /*
+ * Reserve space for one more backpointer here:
+ * Not sketchy at doing it this way, nope...
+ */
+ ret = bch2_trans_kmalloc(trans, bytes + sizeof(struct bch_backpointer));
+ if (IS_ERR(ret))
+ return ret;
- for_each_member_device(ca, c, i) {
- down_read(&ca->bucket_lock);
- buckets = bucket_array(ca);
+ if (k.k->type == KEY_TYPE_alloc_v4) {
+ struct bch_backpointer *src, *dst;
- for_each_bucket(g, buckets)
- g->io_time[rw] = clock->hand -
- bucket_last_io(c, g, rw) / 2;
+ bkey_reassemble(&ret->k_i, k);
- bch2_recalc_oldest_io(c, ca, rw);
+ src = alloc_v4_backpointers(&ret->v);
+ SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s);
+ dst = alloc_v4_backpointers(&ret->v);
- up_read(&ca->bucket_lock);
+ memmove(dst, src, BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v) *
+ sizeof(struct bch_backpointer));
+ memset(src, 0, dst - src);
+ set_alloc_v4_u64s(ret);
+ } else {
+ bkey_alloc_v4_init(&ret->k_i);
+ ret->k.p = k.k->p;
+ bch2_alloc_to_v4(k, &ret->v);
}
+ return ret;
}
-static inline u64 bucket_clock_freq(u64 capacity)
+static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k)
{
- return max(capacity >> 10, 2028ULL);
+ if (likely(k.k->type == KEY_TYPE_alloc_v4) &&
+ BCH_ALLOC_V4_BACKPOINTERS_START(bkey_s_c_to_alloc_v4(k).v) == BCH_ALLOC_V4_U64s) {
+ /*
+ * Reserve space for one more backpointer here:
+ * Not sketchy at doing it this way, nope...
+ */
+ struct bkey_i_alloc_v4 *ret =
+ bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(struct bch_backpointer));
+ if (!IS_ERR(ret))
+ bkey_reassemble(&ret->k_i, k);
+ return ret;
+ }
+
+ return __bch2_alloc_to_v4_mut(trans, k);
}
-static void bch2_inc_clock_hand(struct io_timer *timer)
+struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
{
- struct bucket_clock *clock = container_of(timer,
- struct bucket_clock, rescale);
- struct bch_fs *c = container_of(clock,
- struct bch_fs, bucket_clock[clock->rw]);
- struct bch_dev *ca;
- u64 capacity;
- unsigned i;
+ return bch2_alloc_to_v4_mut_inlined(trans, k);
+}
- mutex_lock(&clock->lock);
+struct bkey_i_alloc_v4 *
+bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos pos)
+{
+ struct bkey_s_c k;
+ struct bkey_i_alloc_v4 *a;
+ int ret;
- /* if clock cannot be advanced more, rescale prio */
- if (clock->max_last_io >= U16_MAX - 2)
- bch2_rescale_bucket_io_times(c, clock->rw);
+ bch2_trans_iter_init(trans, iter, BTREE_ID_alloc, pos,
+ BTREE_ITER_WITH_UPDATES|
+ BTREE_ITER_CACHED|
+ BTREE_ITER_INTENT);
+ k = bch2_btree_iter_peek_slot(iter);
+ ret = bkey_err(k);
+ if (ret) {
+ bch2_trans_iter_exit(trans, iter);
+ return ERR_PTR(ret);
+ }
- BUG_ON(clock->max_last_io >= U16_MAX - 2);
+ a = bch2_alloc_to_v4_mut_inlined(trans, k);
+ if (IS_ERR(a))
+ bch2_trans_iter_exit(trans, iter);
+ return a;
+}
- for_each_member_device(ca, c, i)
- ca->max_last_bucket_io[clock->rw]++;
- clock->max_last_io++;
- clock->hand++;
+int bch2_alloc_read(struct bch_fs *c)
+{
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_alloc_v4 a;
+ struct bch_dev *ca;
+ int ret;
- mutex_unlock(&clock->lock);
+ bch2_trans_init(&trans, c, 0, 0);
- capacity = READ_ONCE(c->capacity);
+ for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
+ BTREE_ITER_PREFETCH, k, ret) {
+ /*
+ * Not a fsck error because this is checked/repaired by
+ * bch2_check_alloc_key() which runs later:
+ */
+ if (!bch2_dev_bucket_exists(c, k.k->p))
+ continue;
- if (!capacity)
- return;
+ ca = bch_dev_bkey_exists(c, k.k->p.inode);
+ bch2_alloc_to_v4(k, &a);
- /*
- * we only increment when 0.1% of the filesystem capacity has been read
- * or written too, this determines if it's time
- *
- * XXX: we shouldn't really be going off of the capacity of devices in
- * RW mode (that will be 0 when we're RO, yet we can still service
- * reads)
- */
- timer->expire += bucket_clock_freq(capacity);
+ *bucket_gen(ca, k.k->p.offset) = a.gen;
+ }
+ bch2_trans_iter_exit(&trans, &iter);
- bch2_io_timer_add(&c->io_clock[clock->rw], timer);
-}
+ bch2_trans_exit(&trans);
-static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
-{
- struct bucket_clock *clock = &c->bucket_clock[rw];
+ if (ret)
+ bch_err(c, "error reading alloc info: %s", bch2_err_str(ret));
- clock->hand = 1;
- clock->rw = rw;
- clock->rescale.fn = bch2_inc_clock_hand;
- clock->rescale.expire = bucket_clock_freq(c->capacity);
- mutex_init(&clock->lock);
+ return ret;
}
-/* Background allocator thread: */
-
-/*
- * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
- * (marking them as invalidated on disk), then optionally issues discard
- * commands to the newly free buckets, then puts them on the various freelists.
- */
+/* Free space/discard btree: */
-#define BUCKET_GC_GEN_MAX 96U
-
-/**
- * wait_buckets_available - wait on reclaimable buckets
- *
- * If there aren't enough available buckets to fill up free_inc, wait until
- * there are.
- */
-static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
+static int bch2_bucket_do_index(struct btree_trans *trans,
+ struct bkey_s_c alloc_k,
+ const struct bch_alloc_v4 *a,
+ bool set)
{
- unsigned long gc_count = c->gc_count;
- int ret = 0;
-
- ca->allocator_state = ALLOCATOR_BLOCKED;
- closure_wake_up(&c->freelist_wait);
+ struct bch_fs *c = trans->c;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
+ struct btree_iter iter;
+ struct bkey_s_c old;
+ struct bkey_i *k;
+ enum btree_id btree;
+ enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted;
+ enum bch_bkey_type new_type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
+ struct printbuf buf = PRINTBUF;
+ int ret;
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (kthread_should_stop()) {
- ret = 1;
- break;
- }
+ if (a->data_type != BCH_DATA_free &&
+ a->data_type != BCH_DATA_need_discard)
+ return 0;
- if (gc_count != c->gc_count)
- ca->inc_gen_really_needs_gc = 0;
+ k = bch2_trans_kmalloc(trans, sizeof(*k));
+ if (IS_ERR(k))
+ return PTR_ERR(k);
- if ((ssize_t) (dev_buckets_available(c, ca) -
- ca->inc_gen_really_needs_gc) >=
- (ssize_t) fifo_free(&ca->free_inc))
- break;
+ bkey_init(&k->k);
+ k->k.type = new_type;
- up_read(&c->gc_lock);
- schedule();
- try_to_freeze();
- down_read(&c->gc_lock);
+ switch (a->data_type) {
+ case BCH_DATA_free:
+ btree = BTREE_ID_freespace;
+ k->k.p = alloc_freespace_pos(alloc_k.k->p, *a);
+ bch2_key_resize(&k->k, 1);
+ break;
+ case BCH_DATA_need_discard:
+ btree = BTREE_ID_need_discard;
+ k->k.p = alloc_k.k->p;
+ break;
+ default:
+ return 0;
}
- __set_current_state(TASK_RUNNING);
- ca->allocator_state = ALLOCATOR_RUNNING;
- closure_wake_up(&c->freelist_wait);
+ bch2_trans_iter_init(trans, &iter, btree,
+ bkey_start_pos(&k->k),
+ BTREE_ITER_INTENT);
+ old = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(old);
+ if (ret)
+ goto err;
+
+ if (ca->mi.freespace_initialized &&
+ bch2_trans_inconsistent_on(old.k->type != old_type, trans,
+ "incorrect key when %s %s btree (got %s should be %s)\n"
+ " for %s",
+ set ? "setting" : "clearing",
+ bch2_btree_ids[btree],
+ bch2_bkey_types[old.k->type],
+ bch2_bkey_types[old_type],
+ (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
+ ret = -EIO;
+ goto err;
+ }
+ ret = bch2_trans_update(trans, &iter, k, 0);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ printbuf_exit(&buf);
return ret;
}
-static bool bch2_can_invalidate_bucket(struct bch_dev *ca,
- size_t bucket,
- struct bucket_mark mark)
+int bch2_trans_mark_alloc(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_i *new,
+ unsigned flags)
{
- u8 gc_gen;
+ struct bch_fs *c = trans->c;
+ struct bch_alloc_v4 old_a, *new_a;
+ u64 old_lru, new_lru;
+ int ret = 0;
- if (!is_available_bucket(mark))
- return false;
+ /*
+ * Deletion only happens in the device removal path, with
+ * BTREE_TRIGGER_NORUN:
+ */
+ BUG_ON(new->k.type != KEY_TYPE_alloc_v4);
- if (ca->buckets_nouse &&
- test_bit(bucket, ca->buckets_nouse))
- return false;
+ bch2_alloc_to_v4(old, &old_a);
+ new_a = &bkey_i_to_alloc_v4(new)->v;
- gc_gen = bucket_gc_gen(ca, bucket);
+ new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
- if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
- ca->inc_gen_needs_gc++;
+ if (new_a->dirty_sectors > old_a.dirty_sectors ||
+ new_a->cached_sectors > old_a.cached_sectors) {
+ new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
+ new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
+ SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
+ SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
+ }
- if (gc_gen >= BUCKET_GC_GEN_MAX)
- ca->inc_gen_really_needs_gc++;
+ if (data_type_is_empty(new_a->data_type) &&
+ BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
+ !bch2_bucket_is_open_safe(c, new->k.p.inode, new->k.p.offset)) {
+ new_a->gen++;
+ SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
+ }
- return gc_gen < BUCKET_GC_GEN_MAX;
-}
+ if (old_a.data_type != new_a->data_type ||
+ (new_a->data_type == BCH_DATA_free &&
+ alloc_freespace_genbits(old_a) != alloc_freespace_genbits(*new_a))) {
+ ret = bch2_bucket_do_index(trans, old, &old_a, false) ?:
+ bch2_bucket_do_index(trans, bkey_i_to_s_c(new), new_a, true);
+ if (ret)
+ return ret;
+ }
-/*
- * Determines what order we're going to reuse buckets, smallest bucket_key()
- * first.
- *
- *
- * - We take into account the read prio of the bucket, which gives us an
- * indication of how hot the data is -- we scale the prio so that the prio
- * farthest from the clock is worth 1/8th of the closest.
- *
- * - The number of sectors of cached data in the bucket, which gives us an
- * indication of the cost in cache misses this eviction will cause.
- *
- * - If hotness * sectors used compares equal, we pick the bucket with the
- * smallest bucket_gc_gen() - since incrementing the same bucket's generation
- * number repeatedly forces us to run mark and sweep gc to avoid generation
- * number wraparound.
- */
-
-static unsigned long bucket_sort_key(struct bch_fs *c, struct bch_dev *ca,
- size_t b, struct bucket_mark m)
-{
- unsigned last_io = bucket_last_io(c, bucket(ca, b), READ);
- unsigned max_last_io = ca->max_last_bucket_io[READ];
+ if (new_a->data_type == BCH_DATA_cached &&
+ !new_a->io_time[READ])
+ new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
- /*
- * Time since last read, scaled to [0, 8) where larger value indicates
- * more recently read data:
- */
- unsigned long hotness = (max_last_io - last_io) * 7 / max_last_io;
+ old_lru = alloc_lru_idx(old_a);
+ new_lru = alloc_lru_idx(*new_a);
- /* How much we want to keep the data in this bucket: */
- unsigned long data_wantness =
- (hotness + 1) * bucket_sectors_used(m);
+ if (old_lru != new_lru) {
+ ret = bch2_lru_change(trans, new->k.p.inode, new->k.p.offset,
+ old_lru, &new_lru, old);
+ if (ret)
+ return ret;
- unsigned long needs_journal_commit =
- bucket_needs_journal_commit(m, c->journal.last_seq_ondisk);
+ if (new_a->data_type == BCH_DATA_cached)
+ new_a->io_time[READ] = new_lru;
+ }
- return (data_wantness << 9) |
- (needs_journal_commit << 8) |
- (bucket_gc_gen(ca, b) / 16);
+ return 0;
}
-static inline int bucket_alloc_cmp(alloc_heap *h,
- struct alloc_heap_entry l,
- struct alloc_heap_entry r)
+static int bch2_check_alloc_key(struct btree_trans *trans,
+ struct btree_iter *alloc_iter,
+ struct btree_iter *discard_iter,
+ struct btree_iter *freespace_iter)
{
- return cmp_int(l.key, r.key) ?:
- cmp_int(r.nr, l.nr) ?:
- cmp_int(l.bucket, r.bucket);
-}
+ struct bch_fs *c = trans->c;
+ struct bch_dev *ca;
+ struct bch_alloc_v4 a;
+ unsigned discard_key_type, freespace_key_type;
+ struct bkey_s_c alloc_k, k;
+ struct printbuf buf = PRINTBUF;
+ int ret;
-static inline int bucket_idx_cmp(const void *_l, const void *_r)
-{
- const struct alloc_heap_entry *l = _l, *r = _r;
+ alloc_k = bch2_dev_bucket_exists(c, alloc_iter->pos)
+ ? bch2_btree_iter_peek_slot(alloc_iter)
+ : bch2_btree_iter_peek(alloc_iter);
+ if (!alloc_k.k)
+ return 1;
- return cmp_int(l->bucket, r->bucket);
-}
+ ret = bkey_err(alloc_k);
+ if (ret)
+ return ret;
-static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
-{
- struct bucket_array *buckets;
- struct alloc_heap_entry e = { 0 };
- size_t b, i, nr = 0;
+ if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c,
+ "alloc key for invalid device:bucket %llu:%llu",
+ alloc_k.k->p.inode, alloc_k.k->p.offset))
+ return bch2_btree_delete_at(trans, alloc_iter, 0);
- ca->alloc_heap.used = 0;
+ ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
+ if (!ca->mi.freespace_initialized)
+ return 0;
- mutex_lock(&c->bucket_clock[READ].lock);
- down_read(&ca->bucket_lock);
+ bch2_alloc_to_v4(alloc_k, &a);
- buckets = bucket_array(ca);
+ discard_key_type = a.data_type == BCH_DATA_need_discard
+ ? KEY_TYPE_set : 0;
+ freespace_key_type = a.data_type == BCH_DATA_free
+ ? KEY_TYPE_set : 0;
- bch2_recalc_oldest_io(c, ca, READ);
+ bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
+ bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, a));
- /*
- * Find buckets with lowest read priority, by building a maxheap sorted
- * by read priority and repeatedly replacing the maximum element until
- * all buckets have been visited.
- */
- for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
- struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
- unsigned long key = bucket_sort_key(c, ca, b, m);
+ k = bch2_btree_iter_peek_slot(discard_iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
- if (!bch2_can_invalidate_bucket(ca, b, m))
- continue;
+ if (k.k->type != discard_key_type &&
+ (c->opts.reconstruct_alloc ||
+ fsck_err(c, "incorrect key in need_discard btree (got %s should be %s)\n"
+ " %s",
+ bch2_bkey_types[k.k->type],
+ bch2_bkey_types[discard_key_type],
+ (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
+ struct bkey_i *update =
+ bch2_trans_kmalloc(trans, sizeof(*update));
+
+ ret = PTR_ERR_OR_ZERO(update);
+ if (ret)
+ goto err;
- if (e.nr && e.bucket + e.nr == b && e.key == key) {
- e.nr++;
- } else {
- if (e.nr)
- heap_add_or_replace(&ca->alloc_heap, e,
- -bucket_alloc_cmp, NULL);
-
- e = (struct alloc_heap_entry) {
- .bucket = b,
- .nr = 1,
- .key = key,
- };
- }
+ bkey_init(&update->k);
+ update->k.type = discard_key_type;
+ update->k.p = discard_iter->pos;
- cond_resched();
+ ret = bch2_trans_update(trans, discard_iter, update, 0);
+ if (ret)
+ goto err;
}
- if (e.nr)
- heap_add_or_replace(&ca->alloc_heap, e,
- -bucket_alloc_cmp, NULL);
+ k = bch2_btree_iter_peek_slot(freespace_iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
- for (i = 0; i < ca->alloc_heap.used; i++)
- nr += ca->alloc_heap.data[i].nr;
+ if (k.k->type != freespace_key_type &&
+ (c->opts.reconstruct_alloc ||
+ fsck_err(c, "incorrect key in freespace btree (got %s should be %s)\n"
+ " %s",
+ bch2_bkey_types[k.k->type],
+ bch2_bkey_types[freespace_key_type],
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
+ struct bkey_i *update =
+ bch2_trans_kmalloc(trans, sizeof(*update));
+
+ ret = PTR_ERR_OR_ZERO(update);
+ if (ret)
+ goto err;
- while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
- nr -= ca->alloc_heap.data[0].nr;
- heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
- }
+ bkey_init(&update->k);
+ update->k.type = freespace_key_type;
+ update->k.p = freespace_iter->pos;
+ bch2_key_resize(&update->k, 1);
- up_read(&ca->bucket_lock);
- mutex_unlock(&c->bucket_clock[READ].lock);
+ ret = bch2_trans_update(trans, freespace_iter, update, 0);
+ if (ret)
+ goto err;
+ }
+err:
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
}
-static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
+static int bch2_check_discard_freespace_key(struct btree_trans *trans,
+ struct btree_iter *iter)
{
- struct bucket_array *buckets = bucket_array(ca);
- struct bucket_mark m;
- size_t b, start;
-
- if (ca->fifo_last_bucket < ca->mi.first_bucket ||
- ca->fifo_last_bucket >= ca->mi.nbuckets)
- ca->fifo_last_bucket = ca->mi.first_bucket;
-
- start = ca->fifo_last_bucket;
+ struct bch_fs *c = trans->c;
+ struct btree_iter alloc_iter;
+ struct bkey_s_c alloc_k;
+ struct bch_alloc_v4 a;
+ u64 genbits;
+ struct bpos pos;
+ enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
+ ? BCH_DATA_need_discard
+ : BCH_DATA_free;
+ struct printbuf buf = PRINTBUF;
+ int ret;
- do {
- ca->fifo_last_bucket++;
- if (ca->fifo_last_bucket == ca->mi.nbuckets)
- ca->fifo_last_bucket = ca->mi.first_bucket;
+ pos = iter->pos;
+ pos.offset &= ~(~0ULL << 56);
+ genbits = iter->pos.offset & (~0ULL << 56);
- b = ca->fifo_last_bucket;
- m = READ_ONCE(buckets->b[b].mark);
+ bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
- if (bch2_can_invalidate_bucket(ca, b, m)) {
- struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
+ if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c,
+ "entry in %s btree for nonexistant dev:bucket %llu:%llu",
+ bch2_btree_ids[iter->btree_id], pos.inode, pos.offset))
+ goto delete;
- heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
- if (heap_full(&ca->alloc_heap))
- break;
- }
+ alloc_k = bch2_btree_iter_peek_slot(&alloc_iter);
+ ret = bkey_err(alloc_k);
+ if (ret)
+ goto err;
- cond_resched();
- } while (ca->fifo_last_bucket != start);
+ bch2_alloc_to_v4(alloc_k, &a);
+
+ if (fsck_err_on(a.data_type != state ||
+ (state == BCH_DATA_free &&
+ genbits != alloc_freespace_genbits(a)), c,
+ "%s\n incorrectly set in %s index (free %u, genbits %llu should be %llu)",
+ (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
+ bch2_btree_ids[iter->btree_id],
+ a.data_type == state,
+ genbits >> 56, alloc_freespace_genbits(a) >> 56))
+ goto delete;
+out:
+err:
+fsck_err:
+ bch2_trans_iter_exit(trans, &alloc_iter);
+ printbuf_exit(&buf);
+ return ret;
+delete:
+ ret = bch2_btree_delete_extent_at(trans, iter,
+ iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0);
+ goto out;
}
-static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
+int bch2_check_alloc_info(struct bch_fs *c)
{
- struct bucket_array *buckets = bucket_array(ca);
- struct bucket_mark m;
- size_t checked, i;
-
- for (checked = 0;
- checked < ca->mi.nbuckets / 2;
- checked++) {
- size_t b = bch2_rand_range(ca->mi.nbuckets -
- ca->mi.first_bucket) +
- ca->mi.first_bucket;
-
- m = READ_ONCE(buckets->b[b].mark);
+ struct btree_trans trans;
+ struct btree_iter iter, discard_iter, freespace_iter;
+ struct bkey_s_c k;
+ int ret = 0;
- if (bch2_can_invalidate_bucket(ca, b, m)) {
- struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
+ bch2_trans_init(&trans, c, 0, 0);
- heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
- if (heap_full(&ca->alloc_heap))
- break;
- }
+ bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN,
+ BTREE_ITER_PREFETCH);
+ bch2_trans_iter_init(&trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
+ BTREE_ITER_PREFETCH);
+ bch2_trans_iter_init(&trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
+ BTREE_ITER_PREFETCH);
+ while (1) {
+ ret = commit_do(&trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW,
+ bch2_check_alloc_key(&trans, &iter,
+ &discard_iter,
+ &freespace_iter));
+ if (ret)
+ break;
- cond_resched();
+ bch2_btree_iter_advance(&iter);
}
+ bch2_trans_iter_exit(&trans, &freespace_iter);
+ bch2_trans_iter_exit(&trans, &discard_iter);
+ bch2_trans_iter_exit(&trans, &iter);
- sort(ca->alloc_heap.data,
- ca->alloc_heap.used,
- sizeof(ca->alloc_heap.data[0]),
- bucket_idx_cmp, NULL);
+ if (ret < 0)
+ goto err;
- /* remove duplicates: */
- for (i = 0; i + 1 < ca->alloc_heap.used; i++)
- if (ca->alloc_heap.data[i].bucket ==
- ca->alloc_heap.data[i + 1].bucket)
- ca->alloc_heap.data[i].nr = 0;
+ ret = for_each_btree_key_commit(&trans, iter,
+ BTREE_ID_need_discard, POS_MIN,
+ BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
+ bch2_check_discard_freespace_key(&trans, &iter)) ?:
+ for_each_btree_key_commit(&trans, iter,
+ BTREE_ID_freespace, POS_MIN,
+ BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
+ bch2_check_discard_freespace_key(&trans, &iter));
+err:
+ bch2_trans_exit(&trans);
+ return ret < 0 ? ret : 0;
}
-static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
+static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
+ struct btree_iter *alloc_iter)
{
- size_t i, nr = 0;
+ struct bch_fs *c = trans->c;
+ struct btree_iter lru_iter;
+ struct bch_alloc_v4 a;
+ struct bkey_s_c alloc_k, k;
+ struct printbuf buf = PRINTBUF;
+ struct printbuf buf2 = PRINTBUF;
+ int ret;
- ca->inc_gen_needs_gc = 0;
+ alloc_k = bch2_btree_iter_peek(alloc_iter);
+ if (!alloc_k.k)
+ return 0;
- switch (ca->mi.replacement) {
- case CACHE_REPLACEMENT_LRU:
- find_reclaimable_buckets_lru(c, ca);
- break;
- case CACHE_REPLACEMENT_FIFO:
- find_reclaimable_buckets_fifo(c, ca);
- break;
- case CACHE_REPLACEMENT_RANDOM:
- find_reclaimable_buckets_random(c, ca);
- break;
- }
+ ret = bkey_err(alloc_k);
+ if (ret)
+ return ret;
- heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
+ bch2_alloc_to_v4(alloc_k, &a);
- for (i = 0; i < ca->alloc_heap.used; i++)
- nr += ca->alloc_heap.data[i].nr;
+ if (a.data_type != BCH_DATA_cached)
+ return 0;
- return nr;
-}
+ bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru,
+ POS(alloc_k.k->p.inode, a.io_time[READ]), 0);
-static inline long next_alloc_bucket(struct bch_dev *ca)
-{
- struct alloc_heap_entry e, *top = ca->alloc_heap.data;
+ k = bch2_btree_iter_peek_slot(&lru_iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (fsck_err_on(!a.io_time[READ], c,
+ "cached bucket with read_time 0\n"
+ " %s",
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)) ||
+ fsck_err_on(k.k->type != KEY_TYPE_lru ||
+ le64_to_cpu(bkey_s_c_to_lru(k).v->idx) != alloc_k.k->p.offset, c,
+ "incorrect/missing lru entry\n"
+ " %s\n"
+ " %s",
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
+ (bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) {
+ u64 read_time = a.io_time[READ];
+
+ if (!a.io_time[READ])
+ a.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
+
+ ret = bch2_lru_set(trans,
+ alloc_k.k->p.inode,
+ alloc_k.k->p.offset,
+ &a.io_time[READ]);
+ if (ret)
+ goto err;
- while (ca->alloc_heap.used) {
- if (top->nr) {
- size_t b = top->bucket;
+ if (a.io_time[READ] != read_time) {
+ struct bkey_i_alloc_v4 *a_mut =
+ bch2_alloc_to_v4_mut(trans, alloc_k);
+ ret = PTR_ERR_OR_ZERO(a_mut);
+ if (ret)
+ goto err;
- top->bucket++;
- top->nr--;
- return b;
+ a_mut->v.io_time[READ] = a.io_time[READ];
+ ret = bch2_trans_update(trans, alloc_iter,
+ &a_mut->k_i, BTREE_TRIGGER_NORUN);
+ if (ret)
+ goto err;
}
-
- heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
}
-
- return -1;
+err:
+fsck_err:
+ bch2_trans_iter_exit(trans, &lru_iter);
+ printbuf_exit(&buf2);
+ printbuf_exit(&buf);
+ return ret;
}
-/*
- * returns sequence number of most recent journal entry that updated this
- * bucket:
- */
-static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
+int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
{
- if (m.journal_seq_valid) {
- u64 journal_seq = atomic64_read(&c->journal.seq);
- u64 bucket_seq = journal_seq;
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret = 0;
- bucket_seq &= ~((u64) U16_MAX);
- bucket_seq |= m.journal_seq;
+ bch2_trans_init(&trans, c, 0, 0);
- if (bucket_seq > journal_seq)
- bucket_seq -= 1 << 16;
+ for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
+ POS_MIN, BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
+ bch2_check_alloc_to_lru_ref(&trans, &iter));
- return bucket_seq;
- } else {
- return 0;
- }
+ bch2_trans_exit(&trans);
+ return ret < 0 ? ret : 0;
}
-static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
- struct bch_dev *ca,
- struct btree_iter *iter,
- u64 *journal_seq, unsigned flags)
+static int bch2_discard_one_bucket(struct btree_trans *trans,
+ struct btree_iter *need_discard_iter,
+ struct bpos *discard_pos_done,
+ u64 *seen,
+ u64 *open,
+ u64 *need_journal_commit,
+ u64 *discarded)
{
-#if 0
- __BKEY_PADDED(k, BKEY_ALLOC_VAL_U64s_MAX) alloc_key;
-#else
- /* hack: */
- __BKEY_PADDED(k, 8) alloc_key;
-#endif
struct bch_fs *c = trans->c;
- struct bkey_i_alloc *a;
- struct bkey_alloc_unpacked u;
- struct bucket *g;
- struct bucket_mark m;
+ struct bpos pos = need_discard_iter->pos;
+ struct btree_iter iter = { NULL };
struct bkey_s_c k;
- bool invalidating_cached_data;
- size_t b;
- int ret;
-
- BUG_ON(!ca->alloc_heap.used ||
- !ca->alloc_heap.data[0].nr);
- b = ca->alloc_heap.data[0].bucket;
-
- /* first, put on free_inc and mark as owned by allocator: */
- percpu_down_read(&c->mark_lock);
- spin_lock(&c->freelist_lock);
-
- verify_not_on_freelist(c, ca, b);
-
- BUG_ON(!fifo_push(&ca->free_inc, b));
+ struct bch_dev *ca;
+ struct bkey_i_alloc_v4 *a;
+ struct printbuf buf = PRINTBUF;
+ bool did_discard = false;
+ int ret = 0;
- bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
+ ca = bch_dev_bkey_exists(c, pos.inode);
+ if (!percpu_ref_tryget(&ca->io_ref)) {
+ bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
+ return 0;
+ }
- spin_unlock(&c->freelist_lock);
- percpu_up_read(&c->mark_lock);
+ if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
+ (*open)++;
+ goto out;
+ }
- BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
+ if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
+ c->journal.flushed_seq_ondisk,
+ pos.inode, pos.offset)) {
+ (*need_journal_commit)++;
+ goto out;
+ }
- bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
-retry:
- k = bch2_btree_iter_peek_slot(iter);
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
+ need_discard_iter->pos,
+ BTREE_ITER_CACHED);
+ k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
- return ret;
-
- /*
- * The allocator has to start before journal replay is finished - thus,
- * we have to trust the in memory bucket @m, not the version in the
- * btree:
- */
- percpu_down_read(&c->mark_lock);
- g = bucket(ca, b);
- m = READ_ONCE(g->mark);
- u = alloc_mem_to_key(g, m);
- percpu_up_read(&c->mark_lock);
+ goto out;
- invalidating_cached_data = m.cached_sectors != 0;
-
- u.gen++;
- u.data_type = 0;
- u.dirty_sectors = 0;
- u.cached_sectors = 0;
- u.read_time = c->bucket_clock[READ].hand;
- u.write_time = c->bucket_clock[WRITE].hand;
+ a = bch2_alloc_to_v4_mut(trans, k);
+ ret = PTR_ERR_OR_ZERO(a);
+ if (ret)
+ goto out;
- a = bkey_alloc_init(&alloc_key.k);
- a->k.p = iter->pos;
- bch2_alloc_pack(a, u);
+ if (BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
+ a->v.gen++;
+ SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
+ goto write;
+ }
- bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &a->k_i));
+ if (bch2_trans_inconsistent_on(a->v.journal_seq > c->journal.flushed_seq_ondisk, trans,
+ "clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
+ "%s",
+ a->v.journal_seq,
+ c->journal.flushed_seq_ondisk,
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ ret = -EIO;
+ goto out;
+ }
- /*
- * XXX:
- * when using deferred btree updates, we have journal reclaim doing
- * btree updates and thus requiring the allocator to make forward
- * progress, and here the allocator is requiring space in the journal -
- * so we need a journal pre-reservation:
- */
- ret = bch2_trans_commit(trans, NULL,
- invalidating_cached_data ? journal_seq : NULL,
- BTREE_INSERT_ATOMIC|
- BTREE_INSERT_NOUNLOCK|
- BTREE_INSERT_NOCHECK_RW|
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_USE_RESERVE|
- BTREE_INSERT_USE_ALLOC_RESERVE|
- BTREE_INSERT_BUCKET_INVALIDATE|
- flags);
- if (ret == -EINTR)
- goto retry;
-
- if (!ret) {
- /* remove from alloc_heap: */
- struct alloc_heap_entry e, *top = ca->alloc_heap.data;
-
- top->bucket++;
- top->nr--;
-
- if (!top->nr)
- heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
-
- /* with btree still locked: */
- if (ca->buckets_written)
- set_bit(b, ca->buckets_written);
+ if (bch2_trans_inconsistent_on(a->v.data_type != BCH_DATA_need_discard, trans,
+ "bucket incorrectly set in need_discard btree\n"
+ "%s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ ret = -EIO;
+ goto out;
+ }
+ if (bkey_cmp(*discard_pos_done, iter.pos) &&
+ ca->mi.discard && !c->opts.nochanges) {
/*
- * Make sure we flush the last journal entry that updated this
- * bucket (i.e. deleting the last reference) before writing to
- * this bucket again:
+ * This works without any other locks because this is the only
+ * thread that removes items from the need_discard tree
*/
- *journal_seq = max(*journal_seq, bucket_journal_seq(c, m));
- } else {
- size_t b2;
+ bch2_trans_unlock(trans);
+ blkdev_issue_discard(ca->disk_sb.bdev,
+ k.k->p.offset * ca->mi.bucket_size,
+ ca->mi.bucket_size,
+ GFP_KERNEL);
- /* remove from free_inc: */
- percpu_down_read(&c->mark_lock);
- spin_lock(&c->freelist_lock);
+ ret = bch2_trans_relock(trans);
+ if (ret)
+ goto out;
+ }
- bch2_mark_alloc_bucket(c, ca, b, false,
- gc_pos_alloc(c, NULL), 0);
+ *discard_pos_done = iter.pos;
+ did_discard = true;
- BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
- BUG_ON(b != b2);
+ SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
+ a->v.data_type = alloc_data_type(a->v, a->v.data_type);
+write:
+ ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_USE_RESERVE|BTREE_INSERT_NOFAIL);
+ if (ret)
+ goto out;
- spin_unlock(&c->freelist_lock);
- percpu_up_read(&c->mark_lock);
+ if (did_discard) {
+ this_cpu_inc(c->counters[BCH_COUNTER_bucket_discard]);
+ (*discarded)++;
}
-
+out:
+ bch2_trans_iter_exit(trans, &iter);
+ percpu_ref_put(&ca->io_ref);
+ printbuf_exit(&buf);
return ret;
}
-static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
- size_t bucket, u64 *flush_seq)
+static void bch2_do_discards_work(struct work_struct *work)
{
- struct bucket_mark m;
-
- percpu_down_read(&c->mark_lock);
- spin_lock(&c->freelist_lock);
+ struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u64 seen = 0, open = 0, need_journal_commit = 0, discarded = 0;
+ struct bpos discard_pos_done = POS_MAX;
+ int ret;
- bch2_invalidate_bucket(c, ca, bucket, &m);
+ bch2_trans_init(&trans, c, 0, 0);
- verify_not_on_freelist(c, ca, bucket);
- BUG_ON(!fifo_push(&ca->free_inc, bucket));
+ /*
+ * We're doing the commit in bch2_discard_one_bucket instead of using
+ * for_each_btree_key_commit() so that we can increment counters after
+ * successful commit:
+ */
+ ret = for_each_btree_key2(&trans, iter,
+ BTREE_ID_need_discard, POS_MIN, 0, k,
+ bch2_discard_one_bucket(&trans, &iter, &discard_pos_done,
+ &seen,
+ &open,
+ &need_journal_commit,
+ &discarded));
- spin_unlock(&c->freelist_lock);
+ bch2_trans_exit(&trans);
- bucket_io_clock_reset(c, ca, bucket, READ);
- bucket_io_clock_reset(c, ca, bucket, WRITE);
+ if (need_journal_commit * 2 > seen)
+ bch2_journal_flush_async(&c->journal, NULL);
- percpu_up_read(&c->mark_lock);
+ percpu_ref_put(&c->writes);
- *flush_seq = max(*flush_seq, bucket_journal_seq(c, m));
+ trace_discard_buckets(c, seen, open, need_journal_commit, discarded,
+ bch2_err_str(ret));
+}
- return m.cached_sectors != 0;
+void bch2_do_discards(struct bch_fs *c)
+{
+ if (percpu_ref_tryget_live(&c->writes) &&
+ !queue_work(system_long_wq, &c->discard_work))
+ percpu_ref_put(&c->writes);
}
-/*
- * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
- */
-static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
+static int invalidate_one_bucket(struct btree_trans *trans,
+ struct btree_iter *lru_iter, struct bkey_s_c k,
+ unsigned dev_idx, s64 *nr_to_invalidate)
{
- struct btree_trans trans;
- struct btree_iter *iter;
- u64 journal_seq = 0;
+ struct bch_fs *c = trans->c;
+ struct btree_iter alloc_iter = { NULL };
+ struct bkey_i_alloc_v4 *a;
+ struct bpos bucket;
+ struct printbuf buf = PRINTBUF;
+ unsigned cached_sectors;
int ret = 0;
- bch2_trans_init(&trans, c, 0, 0);
-
- iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC,
- POS(ca->dev_idx, 0),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ if (*nr_to_invalidate <= 0 || k.k->p.inode != dev_idx)
+ return 1;
- /* Only use nowait if we've already invalidated at least one bucket: */
- while (!ret &&
- !fifo_full(&ca->free_inc) &&
- ca->alloc_heap.used)
- ret = bch2_invalidate_one_bucket2(&trans, ca, iter, &journal_seq,
- BTREE_INSERT_GC_LOCK_HELD|
- (!fifo_empty(&ca->free_inc)
- ? BTREE_INSERT_NOWAIT : 0));
+ if (k.k->type != KEY_TYPE_lru) {
+ prt_printf(&buf, "non lru key in lru btree:\n ");
+ bch2_bkey_val_to_text(&buf, c, k);
- bch2_trans_exit(&trans);
+ if (!test_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags)) {
+ bch_err(c, "%s", buf.buf);
+ } else {
+ bch2_trans_inconsistent(trans, "%s", buf.buf);
+ ret = -EINVAL;
+ }
- /* If we used NOWAIT, don't return the error: */
- if (!fifo_empty(&ca->free_inc))
- ret = 0;
- if (ret) {
- bch_err(ca, "error invalidating buckets: %i", ret);
- return ret;
+ goto out;
}
- if (journal_seq)
- ret = bch2_journal_flush_seq(&c->journal, journal_seq);
- if (ret) {
- bch_err(ca, "journal error: %i", ret);
- return ret;
+ bucket = POS(dev_idx, le64_to_cpu(bkey_s_c_to_lru(k).v->idx));
+
+ a = bch2_trans_start_alloc_update(trans, &alloc_iter, bucket);
+ ret = PTR_ERR_OR_ZERO(a);
+ if (ret)
+ goto out;
+
+ if (k.k->p.offset != alloc_lru_idx(a->v)) {
+ prt_printf(&buf, "alloc key does not point back to lru entry when invalidating bucket:\n ");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
+ prt_printf(&buf, "\n ");
+ bch2_bkey_val_to_text(&buf, c, k);
+
+ if (!test_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags)) {
+ bch_err(c, "%s", buf.buf);
+ } else {
+ bch2_trans_inconsistent(trans, "%s", buf.buf);
+ ret = -EINVAL;
+ }
+
+ goto out;
}
- return 0;
+ if (!a->v.cached_sectors)
+ bch_err(c, "invalidating empty bucket, confused");
+
+ cached_sectors = a->v.cached_sectors;
+
+ SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
+ a->v.gen++;
+ a->v.data_type = 0;
+ a->v.dirty_sectors = 0;
+ a->v.cached_sectors = 0;
+ a->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
+ a->v.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now);
+
+ ret = bch2_trans_update(trans, &alloc_iter, &a->k_i,
+ BTREE_TRIGGER_BUCKET_INVALIDATE) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_USE_RESERVE|BTREE_INSERT_NOFAIL);
+ if (ret)
+ goto out;
+
+ trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
+ --*nr_to_invalidate;
+out:
+ bch2_trans_iter_exit(trans, &alloc_iter);
+ printbuf_exit(&buf);
+ return ret;
}
-static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
+static void bch2_do_invalidates_work(struct work_struct *work)
{
+ struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
+ struct bch_dev *ca;
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
unsigned i;
int ret = 0;
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- spin_lock(&c->freelist_lock);
- for (i = 0; i < RESERVE_NR; i++)
- if (fifo_push(&ca->free[i], bucket)) {
- fifo_pop(&ca->free_inc, bucket);
-
- closure_wake_up(&c->freelist_wait);
- ca->allocator_state = ALLOCATOR_RUNNING;
-
- spin_unlock(&c->freelist_lock);
- goto out;
- }
+ bch2_trans_init(&trans, c, 0, 0);
- if (ca->allocator_state != ALLOCATOR_BLOCKED_FULL) {
- ca->allocator_state = ALLOCATOR_BLOCKED_FULL;
- closure_wake_up(&c->freelist_wait);
- }
+ for_each_member_device(ca, c, i) {
+ s64 nr_to_invalidate =
+ should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
- spin_unlock(&c->freelist_lock);
+ ret = for_each_btree_key2(&trans, iter, BTREE_ID_lru,
+ POS(ca->dev_idx, 0), BTREE_ITER_INTENT, k,
+ invalidate_one_bucket(&trans, &iter, k, ca->dev_idx, &nr_to_invalidate));
- if ((current->flags & PF_KTHREAD) &&
- kthread_should_stop()) {
- ret = 1;
+ if (ret < 0) {
+ percpu_ref_put(&ca->ref);
break;
}
-
- schedule();
- try_to_freeze();
}
-out:
- __set_current_state(TASK_RUNNING);
- return ret;
+
+ bch2_trans_exit(&trans);
+ percpu_ref_put(&c->writes);
}
-/*
- * Pulls buckets off free_inc, discards them (if enabled), then adds them to
- * freelists, waiting until there's room if necessary:
- */
-static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca)
+void bch2_do_invalidates(struct bch_fs *c)
{
- while (!fifo_empty(&ca->free_inc)) {
- size_t bucket = fifo_peek(&ca->free_inc);
+ if (percpu_ref_tryget_live(&c->writes) &&
+ !queue_work(system_long_wq, &c->invalidate_work))
+ percpu_ref_put(&c->writes);
+}
- if (ca->mi.discard &&
- blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
- blkdev_issue_discard(ca->disk_sb.bdev,
- bucket_to_sector(ca, bucket),
- ca->mi.bucket_size, GFP_NOIO, 0);
+static int bucket_freespace_init(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c k, struct bch_dev *ca)
+{
+ struct bch_alloc_v4 a;
- if (push_invalidated_bucket(c, ca, bucket))
- return 1;
- }
+ if (iter->pos.offset >= ca->mi.nbuckets)
+ return 1;
- return 0;
+ bch2_alloc_to_v4(k, &a);
+ return bch2_bucket_do_index(trans, k, &a, true);
}
-/**
- * bch_allocator_thread - move buckets from free_inc to reserves
- *
- * The free_inc FIFO is populated by find_reclaimable_buckets(), and
- * the reserves are depleted by bucket allocation. When we run out
- * of free_inc, try to invalidate some buckets and write out
- * prios and gens.
- */
-static int bch2_allocator_thread(void *arg)
+static int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca)
{
- struct bch_dev *ca = arg;
- struct bch_fs *c = ca->fs;
- size_t nr;
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_member *m;
int ret;
- set_freezable();
- ca->allocator_state = ALLOCATOR_RUNNING;
+ bch2_trans_init(&trans, c, 0, 0);
- while (1) {
- cond_resched();
+ ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
+ POS(ca->dev_idx, ca->mi.first_bucket),
+ BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_LAZY_RW,
+ bucket_freespace_init(&trans, &iter, k, ca));
- pr_debug("discarding %zu invalidated buckets",
- fifo_used(&ca->free_inc));
+ bch2_trans_exit(&trans);
- ret = discard_invalidated_buckets(c, ca);
- if (ret)
- goto stop;
+ if (ret < 0) {
+ bch_err(ca, "error initializing free space: %s", bch2_err_str(ret));
+ return ret;
+ }
- down_read(&c->gc_lock);
+ mutex_lock(&c->sb_lock);
+ m = bch2_sb_get_members(c->disk_sb.sb)->members + ca->dev_idx;
+ SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
+ mutex_unlock(&c->sb_lock);
- ret = bch2_invalidate_buckets(c, ca);
- if (ret) {
- up_read(&c->gc_lock);
- goto stop;
- }
+ return 0;
+}
- if (!fifo_empty(&ca->free_inc)) {
- up_read(&c->gc_lock);
- continue;
- }
+int bch2_fs_freespace_init(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i;
+ int ret = 0;
+ bool doing_init = false;
+
+ /*
+ * We can crash during the device add path, so we need to check this on
+ * every mount:
+ */
- pr_debug("free_inc now empty");
+ for_each_member_device(ca, c, i) {
+ if (ca->mi.freespace_initialized)
+ continue;
- do {
- /*
- * Find some buckets that we can invalidate, either
- * they're completely unused, or only contain clean data
- * that's been written back to the backing device or
- * another cache tier
- */
+ if (!doing_init) {
+ bch_info(c, "initializing freespace");
+ doing_init = true;
+ }
- pr_debug("scanning for reclaimable buckets");
+ ret = bch2_dev_freespace_init(c, ca);
+ if (ret) {
+ percpu_ref_put(&ca->ref);
+ return ret;
+ }
+ }
- nr = find_reclaimable_buckets(c, ca);
+ if (doing_init) {
+ mutex_lock(&c->sb_lock);
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
- pr_debug("found %zu buckets", nr);
+ bch_verbose(c, "done initializing freespace");
+ }
- trace_alloc_batch(ca, nr, ca->alloc_heap.size);
+ return ret;
+}
- if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
- ca->inc_gen_really_needs_gc) &&
- c->gc_thread) {
- atomic_inc(&c->kick_gc);
- wake_up_process(c->gc_thread);
- }
+/* Bucket IO clocks: */
- /*
- * If we found any buckets, we have to invalidate them
- * before we scan for more - but if we didn't find very
- * many we may want to wait on more buckets being
- * available so we don't spin:
- */
- if (!nr ||
- (nr < ALLOC_SCAN_BATCH(ca) &&
- !fifo_full(&ca->free[RESERVE_MOVINGGC]))) {
- ret = wait_buckets_available(c, ca);
- if (ret) {
- up_read(&c->gc_lock);
- goto stop;
- }
- }
- } while (!nr);
+int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
+ size_t bucket_nr, int rw)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_i_alloc_v4 *a;
+ u64 now;
+ int ret = 0;
- up_read(&c->gc_lock);
+ a = bch2_trans_start_alloc_update(trans, &iter, POS(dev, bucket_nr));
+ ret = PTR_ERR_OR_ZERO(a);
+ if (ret)
+ return ret;
- pr_debug("%zu buckets to invalidate", nr);
+ now = atomic64_read(&c->io_clock[rw].now);
+ if (a->v.io_time[rw] == now)
+ goto out;
- /*
- * alloc_heap is now full of newly-invalidated buckets: next,
- * write out the new bucket gens:
- */
- }
+ a->v.io_time[rw] = now;
-stop:
- pr_debug("alloc thread stopping (ret %i)", ret);
- ca->allocator_state = ALLOCATOR_STOPPED;
- closure_wake_up(&c->freelist_wait);
- return 0;
+ ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
+ bch2_trans_commit(trans, NULL, NULL, 0);
+out:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
}
/* Startup/shutdown (ro/rw): */
u64 capacity = 0, reserved_sectors = 0, gc_reserve;
unsigned bucket_size_max = 0;
unsigned long ra_pages = 0;
- unsigned i, j;
+ unsigned i;
lockdep_assert_held(&c->state_lock);
for_each_online_member(ca, c, i) {
- struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi;
+ struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
ra_pages += bdi->ra_pages;
}
* allocations for foreground writes must wait -
* not -ENOSPC calculations.
*/
- for (j = 0; j < RESERVE_NONE; j++)
- dev_reserve += ca->free[j].size;
+
+ dev_reserve += ca->nr_btree_reserve * 2;
+ dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
dev_reserve += 1; /* btree write point */
dev_reserve += 1; /* copygc write point */
dev_reserve *= ca->mi.bucket_size;
- ca->copygc_threshold = dev_reserve;
-
capacity += bucket_to_sector(ca, ca->mi.nbuckets -
ca->mi.first_bucket);
c->bucket_size_max = bucket_size_max;
- if (c->capacity) {
- bch2_io_timer_add(&c->io_clock[READ],
- &c->bucket_clock[READ].rescale);
- bch2_io_timer_add(&c->io_clock[WRITE],
- &c->bucket_clock[WRITE].rescale);
- } else {
- bch2_io_timer_del(&c->io_clock[READ],
- &c->bucket_clock[READ].rescale);
- bch2_io_timer_del(&c->io_clock[WRITE],
- &c->bucket_clock[WRITE].rescale);
- }
-
/* Wake up case someone was waiting for buckets */
closure_wake_up(&c->freelist_wait);
}
ob++) {
spin_lock(&ob->lock);
if (ob->valid && !ob->on_partial_list &&
- ob->ptr.dev == ca->dev_idx)
+ ob->dev == ca->dev_idx)
ret = true;
spin_unlock(&ob->lock);
}
{
unsigned i;
- BUG_ON(ca->alloc_thread);
-
/* First, remove device from allocation groups: */
for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
bch2_writepoint_stop(c, ca, &c->write_points[i]);
- bch2_writepoint_stop(c, ca, &ca->copygc_write_point);
+ bch2_writepoint_stop(c, ca, &c->copygc_write_point);
bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
bch2_writepoint_stop(c, ca, &c->btree_write_point);
set_bit(ca->dev_idx, c->rw_devs[i].d);
}
-void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
-{
- if (ca->alloc_thread)
- closure_wait_event(&c->freelist_wait,
- ca->allocator_state != ALLOCATOR_RUNNING);
-}
-
-/* stop allocator thread: */
-void bch2_dev_allocator_stop(struct bch_dev *ca)
-{
- struct task_struct *p;
-
- p = rcu_dereference_protected(ca->alloc_thread, 1);
- ca->alloc_thread = NULL;
-
- /*
- * We need an rcu barrier between setting ca->alloc_thread = NULL and
- * the thread shutting down to avoid bch2_wake_allocator() racing:
- *
- * XXX: it would be better to have the rcu barrier be asynchronous
- * instead of blocking us here
- */
- synchronize_rcu();
-
- if (p) {
- kthread_stop(p);
- put_task_struct(p);
- }
-}
-
-/* start allocator thread: */
-int bch2_dev_allocator_start(struct bch_dev *ca)
-{
- struct task_struct *p;
-
- /*
- * allocator thread already started?
- */
- if (ca->alloc_thread)
- return 0;
-
- p = kthread_create(bch2_allocator_thread, ca,
- "bch_alloc[%s]", ca->name);
- if (IS_ERR(p))
- return PTR_ERR(p);
-
- get_task_struct(p);
- rcu_assign_pointer(ca->alloc_thread, p);
- wake_up_process(p);
- return 0;
-}
-
-static bool flush_held_btree_writes(struct bch_fs *c)
-{
- struct bucket_table *tbl;
- struct rhash_head *pos;
- struct btree *b;
- bool nodes_unwritten;
- size_t i;
-again:
- cond_resched();
- nodes_unwritten = false;
-
- rcu_read_lock();
- for_each_cached_btree(b, c, tbl, i, pos)
- if (btree_node_need_write(b)) {
- if (btree_node_may_write(b)) {
- rcu_read_unlock();
- btree_node_lock_type(c, b, SIX_LOCK_read);
- bch2_btree_node_write(c, b, SIX_LOCK_read);
- six_unlock_read(&b->lock);
- goto again;
- } else {
- nodes_unwritten = true;
- }
- }
- rcu_read_unlock();
-
- if (c->btree_roots_dirty) {
- bch2_journal_meta(&c->journal);
- goto again;
- }
-
- return !nodes_unwritten &&
- !bch2_btree_interior_updates_nr_pending(c);
-}
-
-static void allocator_start_issue_discards(struct bch_fs *c)
-{
- struct bch_dev *ca;
- unsigned dev_iter;
- size_t bu;
-
- for_each_rw_member(ca, c, dev_iter)
- while (fifo_pop(&ca->free_inc, bu))
- blkdev_issue_discard(ca->disk_sb.bdev,
- bucket_to_sector(ca, bu),
- ca->mi.bucket_size, GFP_NOIO, 0);
-}
-
-static int resize_free_inc(struct bch_dev *ca)
-{
- alloc_fifo free_inc;
-
- if (!fifo_full(&ca->free_inc))
- return 0;
-
- if (!init_fifo(&free_inc,
- ca->free_inc.size * 2,
- GFP_KERNEL))
- return -ENOMEM;
-
- fifo_move(&free_inc, &ca->free_inc);
- swap(free_inc, ca->free_inc);
- free_fifo(&free_inc);
- return 0;
-}
-
-static bool bch2_fs_allocator_start_fast(struct bch_fs *c)
-{
- struct bch_dev *ca;
- unsigned dev_iter;
- bool ret = true;
-
- if (test_alloc_startup(c))
- return false;
-
- down_read(&c->gc_lock);
-
- /* Scan for buckets that are already invalidated: */
- for_each_rw_member(ca, c, dev_iter) {
- struct bucket_array *buckets;
- struct bucket_mark m;
- long bu;
-
- down_read(&ca->bucket_lock);
- buckets = bucket_array(ca);
-
- for (bu = buckets->first_bucket;
- bu < buckets->nbuckets; bu++) {
- m = READ_ONCE(buckets->b[bu].mark);
-
- if (!buckets->b[bu].gen_valid ||
- !is_available_bucket(m) ||
- m.cached_sectors ||
- (ca->buckets_nouse &&
- test_bit(bu, ca->buckets_nouse)))
- continue;
-
- percpu_down_read(&c->mark_lock);
- bch2_mark_alloc_bucket(c, ca, bu, true,
- gc_pos_alloc(c, NULL), 0);
- percpu_up_read(&c->mark_lock);
-
- fifo_push(&ca->free_inc, bu);
-
- discard_invalidated_buckets(c, ca);
-
- if (fifo_full(&ca->free[RESERVE_BTREE]))
- break;
- }
- up_read(&ca->bucket_lock);
- }
-
- up_read(&c->gc_lock);
-
- /* did we find enough buckets? */
- for_each_rw_member(ca, c, dev_iter)
- if (!fifo_full(&ca->free[RESERVE_BTREE]))
- ret = false;
-
- return ret;
-}
-
-int bch2_fs_allocator_start(struct bch_fs *c)
-{
- struct bch_dev *ca;
- unsigned dev_iter;
- u64 journal_seq = 0;
- bool wrote;
- long bu;
- int ret = 0;
-
- if (!test_alloc_startup(c) &&
- bch2_fs_allocator_start_fast(c))
- return 0;
-
- pr_debug("not enough empty buckets; scanning for reclaimable buckets");
-
- /*
- * We're moving buckets to freelists _before_ they've been marked as
- * invalidated on disk - we have to so that we can allocate new btree
- * nodes to mark them as invalidated on disk.
- *
- * However, we can't _write_ to any of these buckets yet - they might
- * have cached data in them, which is live until they're marked as
- * invalidated on disk:
- */
- set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
-
- down_read(&c->gc_lock);
- do {
- wrote = false;
-
- for_each_rw_member(ca, c, dev_iter) {
- find_reclaimable_buckets(c, ca);
-
- while (!fifo_full(&ca->free[RESERVE_BTREE]) &&
- (bu = next_alloc_bucket(ca)) >= 0) {
- ret = resize_free_inc(ca);
- if (ret) {
- percpu_ref_put(&ca->io_ref);
- up_read(&c->gc_lock);
- goto err;
- }
-
- bch2_invalidate_one_bucket(c, ca, bu,
- &journal_seq);
-
- fifo_push(&ca->free[RESERVE_BTREE], bu);
- }
- }
-
- pr_debug("done scanning for reclaimable buckets");
-
- /*
- * XXX: it's possible for this to deadlock waiting on journal reclaim,
- * since we're holding btree writes. What then?
- */
- ret = bch2_alloc_write(c,
- BTREE_INSERT_NOCHECK_RW|
- BTREE_INSERT_USE_ALLOC_RESERVE|
- BTREE_INSERT_NOWAIT, &wrote);
-
- /*
- * If bch2_alloc_write() did anything, it may have used some
- * buckets, and we need the RESERVE_BTREE freelist full - so we
- * need to loop and scan again.
- * And if it errored, it may have been because there weren't
- * enough buckets, so just scan and loop again as long as it
- * made some progress:
- */
- } while (wrote);
- up_read(&c->gc_lock);
-
- if (ret)
- goto err;
-
- pr_debug("flushing journal");
-
- ret = bch2_journal_flush(&c->journal);
- if (ret)
- goto err;
-
- pr_debug("issuing discards");
- allocator_start_issue_discards(c);
-err:
- clear_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
- closure_wait_event(&c->btree_interior_update_wait,
- flush_held_btree_writes(c));
-
- return ret;
-}
-
void bch2_fs_allocator_background_init(struct bch_fs *c)
{
spin_lock_init(&c->freelist_lock);
- bch2_bucket_clock_init(c, READ);
- bch2_bucket_clock_init(c, WRITE);
-
- c->pd_controllers_update_seconds = 5;
- INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);
+ INIT_WORK(&c->discard_work, bch2_do_discards_work);
+ INIT_WORK(&c->invalidate_work, bch2_do_invalidates_work);
}