#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_gc.h"
+#include "btree_write_buffer.h"
#include "buckets.h"
#include "buckets_waiting_for_journal.h"
#include "clock.h"
}
int bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k,
- int rw, struct printbuf *err)
+ unsigned flags, struct printbuf *err)
{
struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v)) {
prt_printf(err, "incorrect value size (%zu < %u)",
bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
- return -EINVAL;
+ return -BCH_ERR_invalid_bkey;
}
return 0;
}
int bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
- int rw, struct printbuf *err)
+ unsigned flags, struct printbuf *err)
{
struct bkey_alloc_unpacked u;
if (bch2_alloc_unpack_v2(&u, k)) {
prt_printf(err, "unpack error");
- return -EINVAL;
+ return -BCH_ERR_invalid_bkey;
}
return 0;
}
int bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k,
- int rw, struct printbuf *err)
+ unsigned flags, struct printbuf *err)
{
struct bkey_alloc_unpacked u;
if (bch2_alloc_unpack_v3(&u, k)) {
prt_printf(err, "unpack error");
- return -EINVAL;
+ return -BCH_ERR_invalid_bkey;
}
return 0;
}
int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k,
- int rw, struct printbuf *err)
+ unsigned flags, struct printbuf *err)
{
struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
+ int rw = flags & WRITE;
if (alloc_v4_u64s(a.v) != bkey_val_u64s(k.k)) {
prt_printf(err, "bad val size (%lu != %u)",
bkey_val_u64s(k.k), alloc_v4_u64s(a.v));
- return -EINVAL;
+ return -BCH_ERR_invalid_bkey;
}
if (!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
BCH_ALLOC_V4_NR_BACKPOINTERS(a.v)) {
prt_printf(err, "invalid backpointers_start");
- return -EINVAL;
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (rw == WRITE &&
+ !(flags & BKEY_INVALID_FROM_JOURNAL) &&
+ test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
+ unsigned i, bp_len = 0;
+
+ for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a.v); i++)
+ bp_len += alloc_v4_backpointers_c(a.v)[i].bucket_len;
+
+ if (bp_len > a.v->dirty_sectors) {
+ prt_printf(err, "too many backpointers");
+ return -BCH_ERR_invalid_bkey;
+ }
}
if (rw == WRITE) {
if (alloc_data_type(*a.v, a.v->data_type) != a.v->data_type) {
prt_printf(err, "invalid data type (got %u should be %u)",
a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
- return -EINVAL;
+ return -BCH_ERR_invalid_bkey;
}
switch (a.v->data_type) {
a.v->cached_sectors ||
a.v->stripe) {
prt_printf(err, "empty data type free but have data");
- return -EINVAL;
+ return -BCH_ERR_invalid_bkey;
}
break;
case BCH_DATA_sb:
if (!a.v->dirty_sectors) {
prt_printf(err, "data_type %s but dirty_sectors==0",
bch2_data_types[a.v->data_type]);
- return -EINVAL;
+ return -BCH_ERR_invalid_bkey;
}
break;
case BCH_DATA_cached:
a.v->dirty_sectors ||
a.v->stripe) {
prt_printf(err, "data type inconsistency");
- return -EINVAL;
+ return -BCH_ERR_invalid_bkey;
}
if (!a.v->io_time[READ] &&
test_bit(BCH_FS_CHECK_ALLOC_TO_LRU_REFS_DONE, &c->flags)) {
prt_printf(err, "cached bucket with read_time == 0");
- return -EINVAL;
+ return -BCH_ERR_invalid_bkey;
}
break;
case BCH_DATA_stripe:
if (!a.v->stripe) {
prt_printf(err, "data_type %s but stripe==0",
bch2_data_types[a.v->data_type]);
- return -EINVAL;
+ return -BCH_ERR_invalid_bkey;
}
break;
}
void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
{
struct bch_alloc_v4 _a;
- const struct bch_alloc_v4 *a = &_a;
- const struct bch_backpointer *bps;
+ const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
unsigned i;
- if (k.k->type == KEY_TYPE_alloc_v4)
- a = bkey_s_c_to_alloc_v4(k).v;
- else
- bch2_alloc_to_v4(k, &_a);
-
prt_newline(out);
printbuf_indent_add(out, 2);
prt_printf(out, "gen %u oldest_gen %u data_type %s",
- a->gen, a->oldest_gen, bch2_data_types[a->data_type]);
+ a->gen, a->oldest_gen,
+ a->data_type < BCH_DATA_NR
+ ? bch2_data_types[a->data_type]
+ : "(invalid data type)");
prt_newline(out);
prt_printf(out, "journal_seq %llu", a->journal_seq);
prt_newline(out);
prt_newline(out);
prt_printf(out, "io_time[WRITE] %llu", a->io_time[WRITE]);
prt_newline(out);
- prt_printf(out, "backpointers: %llu", BCH_ALLOC_V4_NR_BACKPOINTERS(a));
- printbuf_indent_add(out, 2);
+ prt_printf(out, "fragmentation %llu", a->fragmentation_lru);
+ prt_newline(out);
+ prt_printf(out, "bp_start %llu", BCH_ALLOC_V4_BACKPOINTERS_START(a));
+ prt_newline(out);
+
+ if (BCH_ALLOC_V4_NR_BACKPOINTERS(a)) {
+ struct bkey_s_c_alloc_v4 a_raw = bkey_s_c_to_alloc_v4(k);
+ const struct bch_backpointer *bps = alloc_v4_backpointers_c(a_raw.v);
+
+ prt_printf(out, "backpointers: %llu", BCH_ALLOC_V4_NR_BACKPOINTERS(a_raw.v));
+ printbuf_indent_add(out, 2);
+
+ for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a_raw.v); i++) {
+ prt_newline(out);
+ bch2_backpointer_to_text(out, &bps[i]);
+ }
- bps = alloc_v4_backpointers_c(a);
- for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a); i++) {
- prt_newline(out);
- bch2_backpointer_to_text(out, &bps[i]);
+ printbuf_indent_sub(out, 2);
}
- printbuf_indent_sub(out, 4);
+ printbuf_indent_sub(out, 2);
}
-void bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
+void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
{
if (k.k->type == KEY_TYPE_alloc_v4) {
- int d;
+ void *src, *dst;
*out = *bkey_s_c_to_alloc_v4(k).v;
- d = (int) BCH_ALLOC_V4_U64s -
- (int) (BCH_ALLOC_V4_BACKPOINTERS_START(out) ?: BCH_ALLOC_V4_U64s_V0);
- if (unlikely(d > 0)) {
- memset((u64 *) out + BCH_ALLOC_V4_BACKPOINTERS_START(out),
- 0,
- d * sizeof(u64));
- SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
- }
+ src = alloc_v4_backpointers(out);
+ SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
+ dst = alloc_v4_backpointers(out);
+
+ if (src < dst)
+ memset(src, 0, dst - src);
} else {
struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
__bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
{
struct bkey_i_alloc_v4 *ret;
- unsigned bytes = k.k->type == KEY_TYPE_alloc_v4
- ? bkey_bytes(k.k)
- : sizeof(struct bkey_i_alloc_v4);
-
- /*
- * Reserve space for one more backpointer here:
- * Not sketchy at doing it this way, nope...
- */
- ret = bch2_trans_kmalloc(trans, bytes + sizeof(struct bch_backpointer));
- if (IS_ERR(ret))
- return ret;
-
if (k.k->type == KEY_TYPE_alloc_v4) {
- struct bch_backpointer *src, *dst;
+ struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
+ unsigned bytes = sizeof(struct bkey_i_alloc_v4) +
+ BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) *
+ sizeof(struct bch_backpointer);
+ void *src, *dst;
+
+ /*
+ * Reserve space for one more backpointer here:
+ * Not sketchy at doing it this way, nope...
+ */
+ ret = bch2_trans_kmalloc(trans, bytes + sizeof(struct bch_backpointer));
+ if (IS_ERR(ret))
+ return ret;
bkey_reassemble(&ret->k_i, k);
memmove(dst, src, BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v) *
sizeof(struct bch_backpointer));
- memset(src, 0, dst - src);
+ if (src < dst)
+ memset(src, 0, dst - src);
set_alloc_v4_u64s(ret);
} else {
+ ret = bch2_trans_kmalloc(trans, sizeof(struct bkey_i_alloc_v4) +
+ sizeof(struct bch_backpointer));
+ if (IS_ERR(ret))
+ return ret;
+
bkey_alloc_v4_init(&ret->k_i);
ret->k.p = k.k->p;
bch2_alloc_to_v4(k, &ret->v);
* Not sketchy at doing it this way, nope...
*/
struct bkey_i_alloc_v4 *ret =
- bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(struct bch_backpointer));
+ bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k) + sizeof(struct bch_backpointer));
if (!IS_ERR(ret))
bkey_reassemble(&ret->k_i, k);
return ret;
BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
- if (ret) {
- bch2_trans_iter_exit(trans, iter);
- return ERR_PTR(ret);
- }
+ if (unlikely(ret))
+ goto err;
a = bch2_alloc_to_v4_mut_inlined(trans, k);
- if (IS_ERR(a))
- bch2_trans_iter_exit(trans, iter);
+ ret = PTR_ERR_OR_ZERO(a);
+ if (unlikely(ret))
+ goto err;
return a;
+err:
+ bch2_trans_iter_exit(trans, iter);
+ return ERR_PTR(ret);
}
int bch2_alloc_read(struct bch_fs *c)
continue;
ca = bch_dev_bkey_exists(c, k.k->p.inode);
- bch2_alloc_to_v4(k, &a);
- *bucket_gen(ca, k.k->p.offset) = a.gen;
+ *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
+ }
+ bch2_trans_iter_exit(&trans, &iter);
+
+ bch2_trans_exit(&trans);
+
+ if (ret)
+ bch_err(c, "error reading alloc info: %s", bch2_err_str(ret));
+
+ return ret;
+}
+
+static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset)
+{
+ *offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK;
+
+ pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS;
+ return pos;
+}
+
+static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset)
+{
+ pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS;
+ pos.offset += offset;
+ return pos;
+}
+
+static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
+{
+ return k.k->type == KEY_TYPE_bucket_gens
+ ? bkey_s_c_to_bucket_gens(k).v->gens[offset]
+ : 0;
+}
+
+int bch2_bucket_gens_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ unsigned flags, struct printbuf *err)
+{
+ if (bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens)) {
+ prt_printf(err, "bad val size (%lu != %zu)",
+ bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return 0;
+}
+
+void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k);
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) {
+ if (i)
+ prt_char(out, ' ');
+ prt_printf(out, "%u", g.v->gens[i]);
+ }
+}
+
+int bch2_bucket_gens_init(struct bch_fs *c)
+{
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_alloc_v4 a;
+ struct bkey_i_bucket_gens g;
+ bool have_bucket_gens_key = false;
+ unsigned offset;
+ struct bpos pos;
+ u8 gen;
+ int ret;
+
+ bch2_trans_init(&trans, c, 0, 0);
+
+ for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
+ BTREE_ITER_PREFETCH, k, ret) {
+ /*
+ * Not a fsck error because this is checked/repaired by
+ * bch2_check_alloc_key() which runs later:
+ */
+ if (!bch2_dev_bucket_exists(c, k.k->p))
+ continue;
+
+ gen = bch2_alloc_to_v4(k, &a)->gen;
+ pos = alloc_gens_pos(iter.pos, &offset);
+
+ if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) {
+ ret = commit_do(&trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW,
+ __bch2_btree_insert(&trans, BTREE_ID_bucket_gens, &g.k_i, 0));
+ if (ret)
+ break;
+ have_bucket_gens_key = false;
+ }
+
+ if (!have_bucket_gens_key) {
+ bkey_bucket_gens_init(&g.k_i);
+ g.k.p = pos;
+ have_bucket_gens_key = true;
+ }
+
+ g.v.gens[offset] = gen;
+ }
+ bch2_trans_iter_exit(&trans, &iter);
+
+ if (have_bucket_gens_key && !ret)
+ ret = commit_do(&trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW,
+ __bch2_btree_insert(&trans, BTREE_ID_bucket_gens, &g.k_i, 0));
+
+ bch2_trans_exit(&trans);
+
+ if (ret)
+ bch_err(c, "%s: error %s", __func__, bch2_err_str(ret));
+
+ return ret;
+}
+
+int bch2_bucket_gens_read(struct bch_fs *c)
+{
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ const struct bch_bucket_gens *g;
+ struct bch_dev *ca;
+ u64 b;
+ int ret;
+
+ bch2_trans_init(&trans, c, 0, 0);
+
+ for_each_btree_key(&trans, iter, BTREE_ID_bucket_gens, POS_MIN,
+ BTREE_ITER_PREFETCH, k, ret) {
+ u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
+ u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
+
+ if (k.k->type != KEY_TYPE_bucket_gens)
+ continue;
+
+ g = bkey_s_c_to_bucket_gens(k).v;
+
+ /*
+ * Not a fsck error because this is checked/repaired by
+ * bch2_check_alloc_key() which runs later:
+ */
+ if (!bch2_dev_exists2(c, k.k->p.inode))
+ continue;
+
+ ca = bch_dev_bkey_exists(c, k.k->p.inode);
+
+ for (b = max_t(u64, ca->mi.first_bucket, start);
+ b < min_t(u64, ca->mi.nbuckets, end);
+ b++)
+ *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
}
bch2_trans_iter_exit(&trans, &iter);
a->data_type != BCH_DATA_need_discard)
return 0;
- k = bch2_trans_kmalloc(trans, sizeof(*k));
+ k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
if (IS_ERR(k))
return PTR_ERR(k);
goto err;
if (ca->mi.freespace_initialized &&
+ test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags) &&
bch2_trans_inconsistent_on(old.k->type != old_type, trans,
"incorrect key when %s %s btree (got %s should be %s)\n"
" for %s",
return ret;
}
+static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
+ struct bpos bucket, u8 gen)
+{
+ struct btree_iter iter;
+ unsigned offset;
+ struct bpos pos = alloc_gens_pos(bucket, &offset);
+ struct bkey_i_bucket_gens *g;
+ struct bkey_s_c k;
+ int ret;
+
+ g = bch2_trans_kmalloc(trans, sizeof(*g));
+ ret = PTR_ERR_OR_ZERO(g);
+ if (ret)
+ return ret;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_bucket_gens, pos,
+ BTREE_ITER_INTENT|
+ BTREE_ITER_WITH_UPDATES);
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (k.k->type != KEY_TYPE_bucket_gens) {
+ bkey_bucket_gens_init(&g->k_i);
+ g->k.p = iter.pos;
+ } else {
+ bkey_reassemble(&g->k_i, k);
+ }
+
+ g->v.gens[offset] = gen;
+
+ ret = bch2_trans_update(trans, &iter, &g->k_i, 0);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
int bch2_trans_mark_alloc(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_i *new,
unsigned flags)
{
struct bch_fs *c = trans->c;
- struct bch_alloc_v4 old_a, *new_a;
+ struct bch_alloc_v4 old_a_convert, *new_a;
+ const struct bch_alloc_v4 *old_a;
u64 old_lru, new_lru;
int ret = 0;
*/
BUG_ON(new->k.type != KEY_TYPE_alloc_v4);
- bch2_alloc_to_v4(old, &old_a);
+ old_a = bch2_alloc_to_v4(old, &old_a_convert);
new_a = &bkey_i_to_alloc_v4(new)->v;
new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
- if (new_a->dirty_sectors > old_a.dirty_sectors ||
- new_a->cached_sectors > old_a.cached_sectors) {
+ if (new_a->dirty_sectors > old_a->dirty_sectors ||
+ new_a->cached_sectors > old_a->cached_sectors) {
new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
}
- if (old_a.data_type != new_a->data_type ||
+ if (old_a->data_type != new_a->data_type ||
(new_a->data_type == BCH_DATA_free &&
- alloc_freespace_genbits(old_a) != alloc_freespace_genbits(*new_a))) {
- ret = bch2_bucket_do_index(trans, old, &old_a, false) ?:
+ alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
+ ret = bch2_bucket_do_index(trans, old, old_a, false) ?:
bch2_bucket_do_index(trans, bkey_i_to_s_c(new), new_a, true);
if (ret)
return ret;
!new_a->io_time[READ])
new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
- old_lru = alloc_lru_idx(old_a);
- new_lru = alloc_lru_idx(*new_a);
+ old_lru = alloc_lru_idx_read(*old_a);
+ new_lru = alloc_lru_idx_read(*new_a);
if (old_lru != new_lru) {
- ret = bch2_lru_change(trans, new->k.p.inode, new->k.p.offset,
- old_lru, &new_lru, old);
+ ret = bch2_lru_change(trans, new->k.p.inode,
+ bucket_to_u64(new->k.p),
+ old_lru, new_lru);
if (ret)
return ret;
+ }
+
+ new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a,
+ bch_dev_bkey_exists(c, new->k.p.inode));
- if (new_a->data_type == BCH_DATA_cached)
- new_a->io_time[READ] = new_lru;
+ if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
+ ret = bch2_lru_change(trans,
+ BCH_LRU_FRAGMENTATION_START,
+ bucket_to_u64(new->k.p),
+ old_a->fragmentation_lru, new_a->fragmentation_lru);
+ if (ret)
+ return ret;
+ }
+
+ if (old_a->gen != new_a->gen) {
+ ret = bch2_bucket_gen_update(trans, new->k.p, new_a->gen);
+ if (ret)
+ return ret;
}
return 0;
}
+/*
+ * This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for
+ * extents style btrees, but works on non-extents btrees:
+ */
+struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
+{
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
+
+ if (bkey_err(k))
+ return k;
+
+ if (k.k->type) {
+ return k;
+ } else {
+ struct btree_iter iter2;
+ struct bpos next;
+
+ bch2_trans_copy_iter(&iter2, iter);
+ k = bch2_btree_iter_peek_upto(&iter2,
+ bkey_min(bkey_min(end,
+ iter->path->l[0].b->key.k.p),
+ POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1)));
+ next = iter2.pos;
+ bch2_trans_iter_exit(iter->trans, &iter2);
+
+ BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
+
+ if (bkey_err(k))
+ return k;
+
+ bkey_init(hole);
+ hole->p = iter->pos;
+
+ bch2_key_resize(hole, next.offset - iter->pos.offset);
+ return (struct bkey_s_c) { hole, NULL };
+ }
+}
+
+static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
+{
+ struct bch_dev *ca;
+ unsigned iter;
+
+ if (bch2_dev_bucket_exists(c, *bucket))
+ return true;
+
+ if (bch2_dev_exists2(c, bucket->inode)) {
+ ca = bch_dev_bkey_exists(c, bucket->inode);
+
+ if (bucket->offset < ca->mi.first_bucket) {
+ bucket->offset = ca->mi.first_bucket;
+ return true;
+ }
+
+ bucket->inode++;
+ bucket->offset = 0;
+ }
+
+ rcu_read_lock();
+ iter = bucket->inode;
+ ca = __bch2_next_dev(c, &iter, NULL);
+ if (ca)
+ *bucket = POS(ca->dev_idx, ca->mi.first_bucket);
+ rcu_read_unlock();
+
+ return ca != NULL;
+}
+
+struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, struct bkey *hole)
+{
+ struct bch_fs *c = iter->trans->c;
+ struct bkey_s_c k;
+again:
+ k = bch2_get_key_or_hole(iter, POS_MAX, hole);
+ if (bkey_err(k))
+ return k;
+
+ if (!k.k->type) {
+ struct bpos bucket = bkey_start_pos(k.k);
+
+ if (!bch2_dev_bucket_exists(c, bucket)) {
+ if (!next_bucket(c, &bucket))
+ return bkey_s_c_null;
+
+ bch2_btree_iter_set_pos(iter, bucket);
+ goto again;
+ }
+
+ if (!bch2_dev_bucket_exists(c, k.k->p)) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
+
+ bch2_key_resize(hole, ca->mi.nbuckets - bucket.offset);
+ }
+ }
+
+ return k;
+}
+
static int bch2_check_alloc_key(struct btree_trans *trans,
+ struct bkey_s_c alloc_k,
struct btree_iter *alloc_iter,
struct btree_iter *discard_iter,
- struct btree_iter *freespace_iter)
+ struct btree_iter *freespace_iter,
+ struct btree_iter *bucket_gens_iter)
{
struct bch_fs *c = trans->c;
struct bch_dev *ca;
- struct bch_alloc_v4 a;
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
unsigned discard_key_type, freespace_key_type;
- struct bkey_s_c alloc_k, k;
+ unsigned gens_offset;
+ struct bkey_s_c k;
struct printbuf buf = PRINTBUF;
int ret;
- alloc_k = bch2_dev_bucket_exists(c, alloc_iter->pos)
- ? bch2_btree_iter_peek_slot(alloc_iter)
- : bch2_btree_iter_peek(alloc_iter);
- if (!alloc_k.k)
- return 1;
-
- ret = bkey_err(alloc_k);
- if (ret)
- return ret;
-
if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c,
"alloc key for invalid device:bucket %llu:%llu",
alloc_k.k->p.inode, alloc_k.k->p.offset))
if (!ca->mi.freespace_initialized)
return 0;
- bch2_alloc_to_v4(alloc_k, &a);
-
- discard_key_type = a.data_type == BCH_DATA_need_discard
- ? KEY_TYPE_set : 0;
- freespace_key_type = a.data_type == BCH_DATA_free
- ? KEY_TYPE_set : 0;
+ a = bch2_alloc_to_v4(alloc_k, &a_convert);
+ discard_key_type = a->data_type == BCH_DATA_need_discard ? KEY_TYPE_set : 0;
bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
- bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, a));
-
k = bch2_btree_iter_peek_slot(discard_iter);
ret = bkey_err(k);
if (ret)
goto err;
}
+ freespace_key_type = a->data_type == BCH_DATA_free ? KEY_TYPE_set : 0;
+ bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
k = bch2_btree_iter_peek_slot(freespace_iter);
ret = bkey_err(k);
if (ret)
if (ret)
goto err;
}
+
+ bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
+ k = bch2_btree_iter_peek_slot(bucket_gens_iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (a->gen != alloc_gen(k, gens_offset) &&
+ (c->opts.reconstruct_alloc ||
+ fsck_err(c, "incorrect gen in bucket_gens btree (got %u should be %u)\n"
+ " %s",
+ alloc_gen(k, gens_offset), a->gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
+ struct bkey_i_bucket_gens *g =
+ bch2_trans_kmalloc(trans, sizeof(*g));
+
+ ret = PTR_ERR_OR_ZERO(g);
+ if (ret)
+ goto err;
+
+ if (k.k->type == KEY_TYPE_bucket_gens) {
+ bkey_reassemble(&g->k_i, k);
+ } else {
+ bkey_bucket_gens_init(&g->k_i);
+ g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset);
+ }
+
+ g->v.gens[gens_offset] = a->gen;
+
+ ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0);
+ if (ret)
+ goto err;
+ }
+err:
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
+ struct bpos start,
+ struct bpos *end,
+ struct btree_iter *freespace_iter)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_dev *ca;
+ struct bkey_s_c k;
+ struct printbuf buf = PRINTBUF;
+ int ret;
+
+ ca = bch_dev_bkey_exists(c, start.inode);
+ if (!ca->mi.freespace_initialized)
+ return 0;
+
+ bch2_btree_iter_set_pos(freespace_iter, start);
+
+ k = bch2_btree_iter_peek_slot(freespace_iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ *end = bkey_min(k.k->p, *end);
+
+ if (k.k->type != KEY_TYPE_set &&
+ (c->opts.reconstruct_alloc ||
+ fsck_err(c, "hole in alloc btree missing in freespace btree\n"
+ " device %llu buckets %llu-%llu",
+ freespace_iter->pos.inode,
+ freespace_iter->pos.offset,
+ end->offset))) {
+ struct bkey_i *update =
+ bch2_trans_kmalloc(trans, sizeof(*update));
+
+ ret = PTR_ERR_OR_ZERO(update);
+ if (ret)
+ goto err;
+
+ bkey_init(&update->k);
+ update->k.type = KEY_TYPE_set;
+ update->k.p = freespace_iter->pos;
+ bch2_key_resize(&update->k,
+ min_t(u64, U32_MAX, end->offset -
+ freespace_iter->pos.offset));
+
+ ret = bch2_trans_update(trans, freespace_iter, update, 0);
+ if (ret)
+ goto err;
+ }
+err:
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
+ struct bpos start,
+ struct bpos *end,
+ struct btree_iter *bucket_gens_iter)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c k;
+ struct printbuf buf = PRINTBUF;
+ unsigned i, gens_offset, gens_end_offset;
+ int ret;
+
+ if (c->sb.version < bcachefs_metadata_version_bucket_gens &&
+ !c->opts.version_upgrade)
+ return 0;
+
+ bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
+
+ k = bch2_btree_iter_peek_slot(bucket_gens_iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (bkey_cmp(alloc_gens_pos(start, &gens_offset),
+ alloc_gens_pos(*end, &gens_end_offset)))
+ gens_end_offset = KEY_TYPE_BUCKET_GENS_NR;
+
+ if (k.k->type == KEY_TYPE_bucket_gens) {
+ struct bkey_i_bucket_gens g;
+ bool need_update = false;
+
+ bkey_reassemble(&g.k_i, k);
+
+ for (i = gens_offset; i < gens_end_offset; i++) {
+ if (fsck_err_on(g.v.gens[i], c,
+ "hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)",
+ bucket_gens_pos_to_alloc(k.k->p, i).inode,
+ bucket_gens_pos_to_alloc(k.k->p, i).offset,
+ g.v.gens[i])) {
+ g.v.gens[i] = 0;
+ need_update = true;
+ }
+ }
+
+ if (need_update) {
+ struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(g));
+
+ ret = PTR_ERR_OR_ZERO(k);
+ if (ret)
+ goto err;
+
+ memcpy(k, &g, sizeof(g));
+
+ ret = bch2_trans_update(trans, bucket_gens_iter, k, 0);
+ if (ret)
+ goto err;
+ }
+ }
+
+ *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0));
err:
fsck_err:
printbuf_exit(&buf);
struct bch_fs *c = trans->c;
struct btree_iter alloc_iter;
struct bkey_s_c alloc_k;
- struct bch_alloc_v4 a;
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
u64 genbits;
struct bpos pos;
enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
if (ret)
goto err;
- bch2_alloc_to_v4(alloc_k, &a);
+ a = bch2_alloc_to_v4(alloc_k, &a_convert);
- if (fsck_err_on(a.data_type != state ||
+ if (fsck_err_on(a->data_type != state ||
(state == BCH_DATA_free &&
- genbits != alloc_freespace_genbits(a)), c,
+ genbits != alloc_freespace_genbits(*a)), c,
"%s\n incorrectly set in %s index (free %u, genbits %llu should be %llu)",
(bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
bch2_btree_ids[iter->btree_id],
- a.data_type == state,
- genbits >> 56, alloc_freespace_genbits(a) >> 56))
+ a->data_type == state,
+ genbits >> 56, alloc_freespace_genbits(*a) >> 56))
goto delete;
out:
err:
goto out;
}
+/*
+ * We've already checked that generation numbers in the bucket_gens btree are
+ * valid for buckets that exist; this just checks for keys for nonexistent
+ * buckets.
+ */
+static int bch2_check_bucket_gens_key(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_i_bucket_gens g;
+ struct bch_dev *ca;
+ u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
+ u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
+ u64 b;
+ bool need_update = false;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ BUG_ON(k.k->type != KEY_TYPE_bucket_gens);
+ bkey_reassemble(&g.k_i, k);
+
+ if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
+ "bucket_gens key for invalid device:\n %s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ ret = bch2_btree_delete_at(trans, iter, 0);
+ goto out;
+ }
+
+ ca = bch_dev_bkey_exists(c, k.k->p.inode);
+ if (fsck_err_on(end <= ca->mi.first_bucket ||
+ start >= ca->mi.nbuckets, c,
+ "bucket_gens key for invalid buckets:\n %s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ ret = bch2_btree_delete_at(trans, iter, 0);
+ goto out;
+ }
+
+ for (b = start; b < ca->mi.first_bucket; b++)
+ if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
+ "bucket_gens key has nonzero gen for invalid bucket")) {
+ g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
+ need_update = true;
+ }
+
+ for (b = ca->mi.nbuckets; b < end; b++)
+ if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
+ "bucket_gens key has nonzero gen for invalid bucket")) {
+ g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
+ need_update = true;
+ }
+
+ if (need_update) {
+ struct bkey_i *k;
+
+ k = bch2_trans_kmalloc(trans, sizeof(g));
+ ret = PTR_ERR_OR_ZERO(k);
+ if (ret)
+ goto out;
+
+ memcpy(k, &g, sizeof(g));
+ ret = bch2_trans_update(trans, iter, k, 0);
+ }
+out:
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
int bch2_check_alloc_info(struct bch_fs *c)
{
struct btree_trans trans;
- struct btree_iter iter, discard_iter, freespace_iter;
+ struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter;
+ struct bkey hole;
struct bkey_s_c k;
int ret = 0;
BTREE_ITER_PREFETCH);
bch2_trans_iter_init(&trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
BTREE_ITER_PREFETCH);
+ bch2_trans_iter_init(&trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
+ BTREE_ITER_PREFETCH);
+
while (1) {
- ret = commit_do(&trans, NULL, NULL,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_LAZY_RW,
- bch2_check_alloc_key(&trans, &iter,
- &discard_iter,
- &freespace_iter));
+ struct bpos next;
+
+ bch2_trans_begin(&trans);
+
+ k = bch2_get_key_or_real_bucket_hole(&iter, &hole);
+ ret = bkey_err(k);
if (ret)
+ goto bkey_err;
+
+ if (!k.k)
break;
- bch2_btree_iter_advance(&iter);
+ if (k.k->type) {
+ next = bpos_nosnap_successor(k.k->p);
+
+ ret = bch2_check_alloc_key(&trans,
+ k, &iter,
+ &discard_iter,
+ &freespace_iter,
+ &bucket_gens_iter);
+ if (ret)
+ goto bkey_err;
+ } else {
+ next = k.k->p;
+
+ ret = bch2_check_alloc_hole_freespace(&trans,
+ bkey_start_pos(k.k),
+ &next,
+ &freespace_iter) ?:
+ bch2_check_alloc_hole_bucket_gens(&trans,
+ bkey_start_pos(k.k),
+ &next,
+ &bucket_gens_iter);
+ if (ret)
+ goto bkey_err;
+ }
+
+ ret = bch2_trans_commit(&trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW);
+ if (ret)
+ goto bkey_err;
+
+ bch2_btree_iter_set_pos(&iter, next);
+bkey_err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
+ break;
}
+ bch2_trans_iter_exit(&trans, &bucket_gens_iter);
bch2_trans_iter_exit(&trans, &freespace_iter);
bch2_trans_iter_exit(&trans, &discard_iter);
bch2_trans_iter_exit(&trans, &iter);
BTREE_ID_freespace, POS_MIN,
BTREE_ITER_PREFETCH, k,
NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
- bch2_check_discard_freespace_key(&trans, &iter));
+ bch2_check_discard_freespace_key(&trans, &iter)) ?:
+ for_each_btree_key_commit(&trans, iter,
+ BTREE_ID_bucket_gens, POS_MIN,
+ BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
+ bch2_check_bucket_gens_key(&trans, &iter, k));
err:
bch2_trans_exit(&trans);
return ret < 0 ? ret : 0;
{
struct bch_fs *c = trans->c;
struct btree_iter lru_iter;
- struct bch_alloc_v4 a;
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
struct bkey_s_c alloc_k, k;
struct printbuf buf = PRINTBUF;
- struct printbuf buf2 = PRINTBUF;
int ret;
alloc_k = bch2_btree_iter_peek(alloc_iter);
if (ret)
return ret;
- bch2_alloc_to_v4(alloc_k, &a);
+ a = bch2_alloc_to_v4(alloc_k, &a_convert);
- if (a.data_type != BCH_DATA_cached)
+ if (a->data_type != BCH_DATA_cached)
return 0;
bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru,
- POS(alloc_k.k->p.inode, a.io_time[READ]), 0);
-
+ lru_pos(alloc_k.k->p.inode,
+ bucket_to_u64(alloc_k.k->p),
+ a->io_time[READ]), 0);
k = bch2_btree_iter_peek_slot(&lru_iter);
ret = bkey_err(k);
if (ret)
goto err;
- if (fsck_err_on(!a.io_time[READ], c,
+ if (fsck_err_on(!a->io_time[READ], c,
"cached bucket with read_time 0\n"
" %s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)) ||
- fsck_err_on(k.k->type != KEY_TYPE_lru ||
- le64_to_cpu(bkey_s_c_to_lru(k).v->idx) != alloc_k.k->p.offset, c,
- "incorrect/missing lru entry\n"
- " %s\n"
+ fsck_err_on(k.k->type != KEY_TYPE_set, c,
+ "missing lru entry\n"
" %s",
(printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
- (bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) {
- u64 read_time = a.io_time[READ];
-
- if (!a.io_time[READ])
- a.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
+ bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
+ u64 read_time = a->io_time[READ] ?:
+ atomic64_read(&c->io_clock[READ].now);
ret = bch2_lru_set(trans,
alloc_k.k->p.inode,
- alloc_k.k->p.offset,
- &a.io_time[READ]);
+ bucket_to_u64(alloc_k.k->p),
+ read_time);
if (ret)
goto err;
- if (a.io_time[READ] != read_time) {
+ if (a->io_time[READ] != read_time) {
struct bkey_i_alloc_v4 *a_mut =
bch2_alloc_to_v4_mut(trans, alloc_k);
ret = PTR_ERR_OR_ZERO(a_mut);
if (ret)
goto err;
- a_mut->v.io_time[READ] = a.io_time[READ];
+ a_mut->v.io_time[READ] = read_time;
ret = bch2_trans_update(trans, alloc_iter,
&a_mut->k_i, BTREE_TRIGGER_NORUN);
if (ret)
err:
fsck_err:
bch2_trans_iter_exit(trans, &lru_iter);
- printbuf_exit(&buf2);
printbuf_exit(&buf);
return ret;
}
struct bch_dev *ca;
struct bkey_i_alloc_v4 *a;
struct printbuf buf = PRINTBUF;
- bool did_discard = false;
int ret = 0;
ca = bch_dev_bkey_exists(c, pos.inode);
goto write;
}
- if (bch2_trans_inconsistent_on(a->v.journal_seq > c->journal.flushed_seq_ondisk, trans,
- "clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
- "%s",
- a->v.journal_seq,
- c->journal.flushed_seq_ondisk,
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- ret = -EIO;
+ if (a->v.journal_seq > c->journal.flushed_seq_ondisk) {
+ if (test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
+ bch2_trans_inconsistent(trans,
+ "clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
+ "%s",
+ a->v.journal_seq,
+ c->journal.flushed_seq_ondisk,
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ ret = -EIO;
+ }
goto out;
}
- if (bch2_trans_inconsistent_on(a->v.data_type != BCH_DATA_need_discard, trans,
- "bucket incorrectly set in need_discard btree\n"
- "%s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- ret = -EIO;
+ if (a->v.data_type != BCH_DATA_need_discard) {
+ if (test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
+ bch2_trans_inconsistent(trans,
+ "bucket incorrectly set in need_discard btree\n"
+ "%s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ ret = -EIO;
+ }
+
goto out;
}
- if (bkey_cmp(*discard_pos_done, iter.pos) &&
+ if (!bkey_eq(*discard_pos_done, iter.pos) &&
ca->mi.discard && !c->opts.nochanges) {
/*
* This works without any other locks because this is the only
k.k->p.offset * ca->mi.bucket_size,
ca->mi.bucket_size,
GFP_KERNEL);
+ *discard_pos_done = iter.pos;
- ret = bch2_trans_relock(trans);
+ ret = bch2_trans_relock_notrace(trans);
if (ret)
goto out;
}
- *discard_pos_done = iter.pos;
- did_discard = true;
-
SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
a->v.data_type = alloc_data_type(a->v, a->v.data_type);
write:
if (ret)
goto out;
- if (did_discard) {
- this_cpu_inc(c->counters[BCH_COUNTER_bucket_discard]);
- (*discarded)++;
- }
+ this_cpu_inc(c->counters[BCH_COUNTER_bucket_discard]);
+ (*discarded)++;
out:
+ (*seen)++;
bch2_trans_iter_exit(trans, &iter);
percpu_ref_put(&ca->io_ref);
printbuf_exit(&buf);
if (need_journal_commit * 2 > seen)
bch2_journal_flush_async(&c->journal, NULL);
- percpu_ref_put(&c->writes);
+ bch2_write_ref_put(c, BCH_WRITE_REF_discard);
trace_discard_buckets(c, seen, open, need_journal_commit, discarded,
bch2_err_str(ret));
void bch2_do_discards(struct bch_fs *c)
{
- if (percpu_ref_tryget_live(&c->writes) &&
+ if (bch2_write_ref_tryget(c, BCH_WRITE_REF_discard) &&
!queue_work(system_long_wq, &c->discard_work))
- percpu_ref_put(&c->writes);
+ bch2_write_ref_put(c, BCH_WRITE_REF_discard);
}
static int invalidate_one_bucket(struct btree_trans *trans,
- struct btree_iter *lru_iter, struct bkey_s_c k,
- unsigned dev_idx, s64 *nr_to_invalidate)
+ struct btree_iter *lru_iter,
+ struct bkey_s_c lru_k,
+ s64 *nr_to_invalidate)
{
struct bch_fs *c = trans->c;
struct btree_iter alloc_iter = { NULL };
- struct bkey_i_alloc_v4 *a;
- struct bpos bucket;
+ struct bkey_i_alloc_v4 *a = NULL;
struct printbuf buf = PRINTBUF;
+ struct bpos bucket = u64_to_bucket(lru_k.k->p.offset);
unsigned cached_sectors;
int ret = 0;
- if (*nr_to_invalidate <= 0 || k.k->p.inode != dev_idx)
+ if (*nr_to_invalidate <= 0)
return 1;
- if (k.k->type != KEY_TYPE_lru) {
- prt_printf(&buf, "non lru key in lru btree:\n ");
- bch2_bkey_val_to_text(&buf, c, k);
-
- if (!test_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags)) {
- bch_err(c, "%s", buf.buf);
- } else {
- bch2_trans_inconsistent(trans, "%s", buf.buf);
- ret = -EINVAL;
- }
-
- goto out;
+ if (!bch2_dev_bucket_exists(c, bucket)) {
+ prt_str(&buf, "lru entry points to invalid bucket");
+ goto err;
}
- bucket = POS(dev_idx, le64_to_cpu(bkey_s_c_to_lru(k).v->idx));
+ if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset))
+ return 0;
a = bch2_trans_start_alloc_update(trans, &alloc_iter, bucket);
ret = PTR_ERR_OR_ZERO(a);
if (ret)
goto out;
- if (k.k->p.offset != alloc_lru_idx(a->v)) {
- prt_printf(&buf, "alloc key does not point back to lru entry when invalidating bucket:\n ");
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
- prt_printf(&buf, "\n ");
- bch2_bkey_val_to_text(&buf, c, k);
-
- if (!test_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags)) {
- bch_err(c, "%s", buf.buf);
- } else {
- bch2_trans_inconsistent(trans, "%s", buf.buf);
- ret = -EINVAL;
- }
-
+ /* We expect harmless races here due to the btree write buffer: */
+ if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(a->v))
goto out;
- }
+
+ BUG_ON(a->v.data_type != BCH_DATA_cached);
if (!a->v.cached_sectors)
bch_err(c, "invalidating empty bucket, confused");
bch2_trans_iter_exit(trans, &alloc_iter);
printbuf_exit(&buf);
return ret;
+err:
+ prt_str(&buf, "\n lru key: ");
+ bch2_bkey_val_to_text(&buf, c, lru_k);
+
+ prt_str(&buf, "\n lru entry: ");
+ bch2_lru_pos_to_text(&buf, lru_iter->pos);
+
+ prt_str(&buf, "\n alloc key: ");
+ if (!a)
+ bch2_bpos_to_text(&buf, bucket);
+ else
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
+
+ bch_err(c, "%s", buf.buf);
+ if (test_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags)) {
+ bch2_inconsistent_error(c);
+ ret = -EINVAL;
+ }
+
+ goto out;
}
static void bch2_do_invalidates_work(struct work_struct *work)
bch2_trans_init(&trans, c, 0, 0);
+ ret = bch2_btree_write_buffer_flush(&trans);
+ if (ret)
+ goto err;
+
for_each_member_device(ca, c, i) {
s64 nr_to_invalidate =
should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
- ret = for_each_btree_key2(&trans, iter, BTREE_ID_lru,
- POS(ca->dev_idx, 0), BTREE_ITER_INTENT, k,
- invalidate_one_bucket(&trans, &iter, k, ca->dev_idx, &nr_to_invalidate));
+ ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_lru,
+ lru_pos(ca->dev_idx, 0, 0),
+ lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
+ BTREE_ITER_INTENT, k,
+ invalidate_one_bucket(&trans, &iter, k, &nr_to_invalidate));
if (ret < 0) {
percpu_ref_put(&ca->ref);
break;
}
}
-
+err:
bch2_trans_exit(&trans);
- percpu_ref_put(&c->writes);
+ bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
}
void bch2_do_invalidates(struct bch_fs *c)
{
- if (percpu_ref_tryget_live(&c->writes) &&
+ if (bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate) &&
!queue_work(system_long_wq, &c->invalidate_work))
- percpu_ref_put(&c->writes);
-}
-
-static int bucket_freespace_init(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c k, struct bch_dev *ca)
-{
- struct bch_alloc_v4 a;
-
- if (iter->pos.offset >= ca->mi.nbuckets)
- return 1;
-
- bch2_alloc_to_v4(k, &a);
- return bch2_bucket_do_index(trans, k, &a, true);
+ bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
}
static int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca)
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
+ struct bkey hole;
+ struct bpos end = POS(ca->dev_idx, ca->mi.nbuckets);
struct bch_member *m;
int ret;
bch2_trans_init(&trans, c, 0, 0);
- ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
- POS(ca->dev_idx, ca->mi.first_bucket),
- BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k,
- NULL, NULL, BTREE_INSERT_LAZY_RW,
- bucket_freespace_init(&trans, &iter, k, ca));
+ bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc,
+ POS(ca->dev_idx, ca->mi.first_bucket),
+ BTREE_ITER_PREFETCH);
+ /*
+ * Scan the alloc btree for every bucket on @ca, and add buckets to the
+ * freespace/need_discard/need_gc_gens btrees as needed:
+ */
+ while (1) {
+ bch2_trans_begin(&trans);
+
+ if (bkey_ge(iter.pos, end)) {
+ ret = 0;
+ break;
+ }
+
+ k = bch2_get_key_or_hole(&iter, end, &hole);
+ ret = bkey_err(k);
+ if (ret)
+ goto bkey_err;
+
+ if (k.k->type) {
+ /*
+ * We process live keys in the alloc btree one at a
+ * time:
+ */
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
+
+ ret = bch2_bucket_do_index(&trans, k, a, true) ?:
+ bch2_trans_commit(&trans, NULL, NULL,
+ BTREE_INSERT_LAZY_RW|
+ BTREE_INSERT_NOFAIL);
+ if (ret)
+ goto bkey_err;
+
+ bch2_btree_iter_advance(&iter);
+ } else {
+ struct bkey_i *freespace;
+
+ freespace = bch2_trans_kmalloc(&trans, sizeof(*freespace));
+ ret = PTR_ERR_OR_ZERO(freespace);
+ if (ret)
+ goto bkey_err;
+
+ bkey_init(&freespace->k);
+ freespace->k.type = KEY_TYPE_set;
+ freespace->k.p = k.k->p;
+ freespace->k.size = k.k->size;
+
+ ret = __bch2_btree_insert(&trans, BTREE_ID_freespace, freespace, 0) ?:
+ bch2_trans_commit(&trans, NULL, NULL,
+ BTREE_INSERT_LAZY_RW|
+ BTREE_INSERT_NOFAIL);
+ if (ret)
+ goto bkey_err;
+
+ bch2_btree_iter_set_pos(&iter, k.k->p);
+ }
+bkey_err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
+ break;
+ }
+ bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
if (ret < 0) {
*/
bch2_recalc_capacity(c);
- /* Next, close write points that point to this device... */
- for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
- bch2_writepoint_stop(c, ca, &c->write_points[i]);
-
- bch2_writepoint_stop(c, ca, &c->copygc_write_point);
- bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
- bch2_writepoint_stop(c, ca, &c->btree_write_point);
-
- mutex_lock(&c->btree_reserve_cache_lock);
- while (c->btree_reserve_cache_nr) {
- struct btree_alloc *a =
- &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
-
- bch2_open_buckets_put(c, &a->ob);
- }
- mutex_unlock(&c->btree_reserve_cache_lock);
-
- while (1) {
- struct open_bucket *ob;
-
- spin_lock(&c->freelist_lock);
- if (!ca->open_buckets_partial_nr) {
- spin_unlock(&c->freelist_lock);
- break;
- }
- ob = c->open_buckets +
- ca->open_buckets_partial[--ca->open_buckets_partial_nr];
- ob->on_partial_list = false;
- spin_unlock(&c->freelist_lock);
-
- bch2_open_bucket_put(c, ob);
- }
-
- bch2_ec_stop_dev(c, ca);
+ bch2_open_buckets_stop(c, ca, false);
/*
* Wake up threads that were blocked on allocation, so they can notice