#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_gc.h"
+#include "btree_write_buffer.h"
#include "buckets.h"
#include "buckets_waiting_for_journal.h"
#include "clock.h"
}
int bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k,
- int rw, struct printbuf *err)
+ unsigned flags, struct printbuf *err)
{
struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
}
int bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
- int rw, struct printbuf *err)
+ unsigned flags, struct printbuf *err)
{
struct bkey_alloc_unpacked u;
}
int bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k,
- int rw, struct printbuf *err)
+ unsigned flags, struct printbuf *err)
{
struct bkey_alloc_unpacked u;
}
int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k,
- int rw, struct printbuf *err)
+ unsigned flags, struct printbuf *err)
{
struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
+ int rw = flags & WRITE;
if (alloc_v4_u64s(a.v) != bkey_val_u64s(k.k)) {
prt_printf(err, "bad val size (%lu != %u)",
return -BCH_ERR_invalid_bkey;
}
- /*
- * XXX this is wrong, we'll be checking updates that happened from
- * before BCH_FS_CHECK_BACKPOINTERS_DONE
- */
- if (rw == WRITE && test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
+ if (rw == WRITE &&
+ !(flags & BKEY_INVALID_FROM_JOURNAL) &&
+ test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
unsigned i, bp_len = 0;
for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a.v); i++)
printbuf_indent_add(out, 2);
prt_printf(out, "gen %u oldest_gen %u data_type %s",
- a->gen, a->oldest_gen, bch2_data_types[a->data_type]);
+ a->gen, a->oldest_gen,
+ a->data_type < BCH_DATA_NR
+ ? bch2_data_types[a->data_type]
+ : "(invalid data type)");
prt_newline(out);
prt_printf(out, "journal_seq %llu", a->journal_seq);
prt_newline(out);
prt_newline(out);
prt_printf(out, "io_time[WRITE] %llu", a->io_time[WRITE]);
prt_newline(out);
+ prt_printf(out, "fragmentation %llu", a->fragmentation_lru);
+ prt_newline(out);
+ prt_printf(out, "bp_start %llu", BCH_ALLOC_V4_BACKPOINTERS_START(a));
+ prt_newline(out);
- if (k.k->type == KEY_TYPE_alloc_v4) {
+ if (BCH_ALLOC_V4_NR_BACKPOINTERS(a)) {
struct bkey_s_c_alloc_v4 a_raw = bkey_s_c_to_alloc_v4(k);
const struct bch_backpointer *bps = alloc_v4_backpointers_c(a_raw.v);
- prt_printf(out, "bp_start %llu", BCH_ALLOC_V4_BACKPOINTERS_START(a_raw.v));
- prt_newline(out);
-
prt_printf(out, "backpointers: %llu", BCH_ALLOC_V4_NR_BACKPOINTERS(a_raw.v));
printbuf_indent_add(out, 2);
}
int bch2_bucket_gens_invalid(const struct bch_fs *c, struct bkey_s_c k,
- int rw, struct printbuf *err)
+ unsigned flags, struct printbuf *err)
{
if (bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens)) {
prt_printf(err, "bad val size (%lu != %zu)",
ret = commit_do(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW,
- __bch2_btree_insert(&trans, BTREE_ID_bucket_gens, &g.k_i));
+ __bch2_btree_insert(&trans, BTREE_ID_bucket_gens, &g.k_i, 0));
if (ret)
break;
have_bucket_gens_key = false;
ret = commit_do(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW,
- __bch2_btree_insert(&trans, BTREE_ID_bucket_gens, &g.k_i));
+ __bch2_btree_insert(&trans, BTREE_ID_bucket_gens, &g.k_i, 0));
bch2_trans_exit(&trans);
!new_a->io_time[READ])
new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
- old_lru = alloc_lru_idx(*old_a);
- new_lru = alloc_lru_idx(*new_a);
+ old_lru = alloc_lru_idx_read(*old_a);
+ new_lru = alloc_lru_idx_read(*new_a);
if (old_lru != new_lru) {
- ret = bch2_lru_change(trans, new->k.p.inode, new->k.p.offset,
- old_lru, &new_lru, old);
+ ret = bch2_lru_change(trans, new->k.p.inode,
+ bucket_to_u64(new->k.p),
+ old_lru, new_lru);
if (ret)
return ret;
+ }
- if (new_a->data_type == BCH_DATA_cached)
- new_a->io_time[READ] = new_lru;
+ new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a,
+ bch_dev_bkey_exists(c, new->k.p.inode));
+
+ if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
+ ret = bch2_lru_change(trans,
+ BCH_LRU_FRAGMENTATION_START,
+ bucket_to_u64(new->k.p),
+ old_a->fragmentation_lru, new_a->fragmentation_lru);
+ if (ret)
+ return ret;
}
if (old_a->gen != new_a->gen) {
iter = bucket->inode;
ca = __bch2_next_dev(c, &iter, NULL);
if (ca)
- bucket->offset = ca->mi.first_bucket;
+ *bucket = POS(ca->dev_idx, ca->mi.first_bucket);
rcu_read_unlock();
return ca != NULL;
const struct bch_alloc_v4 *a;
struct bkey_s_c alloc_k, k;
struct printbuf buf = PRINTBUF;
- struct printbuf buf2 = PRINTBUF;
int ret;
alloc_k = bch2_btree_iter_peek(alloc_iter);
return 0;
bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru,
- POS(alloc_k.k->p.inode, a->io_time[READ]), 0);
-
+ lru_pos(alloc_k.k->p.inode,
+ bucket_to_u64(alloc_k.k->p),
+ a->io_time[READ]), 0);
k = bch2_btree_iter_peek_slot(&lru_iter);
ret = bkey_err(k);
if (ret)
" %s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)) ||
- fsck_err_on(k.k->type != KEY_TYPE_lru ||
- le64_to_cpu(bkey_s_c_to_lru(k).v->idx) != alloc_k.k->p.offset, c,
- "incorrect/missing lru entry\n"
- " %s\n"
+ fsck_err_on(k.k->type != KEY_TYPE_set, c,
+ "missing lru entry\n"
" %s",
(printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
- (bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) {
+ bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
u64 read_time = a->io_time[READ] ?:
atomic64_read(&c->io_clock[READ].now);
ret = bch2_lru_set(trans,
alloc_k.k->p.inode,
- alloc_k.k->p.offset,
- &read_time);
+ bucket_to_u64(alloc_k.k->p),
+ read_time);
if (ret)
goto err;
err:
fsck_err:
bch2_trans_iter_exit(trans, &lru_iter);
- printbuf_exit(&buf2);
printbuf_exit(&buf);
return ret;
}
struct bch_dev *ca;
struct bkey_i_alloc_v4 *a;
struct printbuf buf = PRINTBUF;
- bool did_discard = false;
int ret = 0;
ca = bch_dev_bkey_exists(c, pos.inode);
k.k->p.offset * ca->mi.bucket_size,
ca->mi.bucket_size,
GFP_KERNEL);
+ *discard_pos_done = iter.pos;
- ret = bch2_trans_relock(trans);
+ ret = bch2_trans_relock_notrace(trans);
if (ret)
goto out;
}
- *discard_pos_done = iter.pos;
- did_discard = true;
-
SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
a->v.data_type = alloc_data_type(a->v, a->v.data_type);
write:
if (ret)
goto out;
- if (did_discard) {
- this_cpu_inc(c->counters[BCH_COUNTER_bucket_discard]);
- (*discarded)++;
- }
+ this_cpu_inc(c->counters[BCH_COUNTER_bucket_discard]);
+ (*discarded)++;
out:
+ (*seen)++;
bch2_trans_iter_exit(trans, &iter);
percpu_ref_put(&ca->io_ref);
printbuf_exit(&buf);
if (need_journal_commit * 2 > seen)
bch2_journal_flush_async(&c->journal, NULL);
- percpu_ref_put(&c->writes);
+ bch2_write_ref_put(c, BCH_WRITE_REF_discard);
trace_discard_buckets(c, seen, open, need_journal_commit, discarded,
bch2_err_str(ret));
void bch2_do_discards(struct bch_fs *c)
{
- if (percpu_ref_tryget_live(&c->writes) &&
+ if (bch2_write_ref_tryget(c, BCH_WRITE_REF_discard) &&
!queue_work(system_long_wq, &c->discard_work))
- percpu_ref_put(&c->writes);
+ bch2_write_ref_put(c, BCH_WRITE_REF_discard);
}
static int invalidate_one_bucket(struct btree_trans *trans,
- struct btree_iter *lru_iter, struct bkey_s_c k,
- unsigned dev_idx, s64 *nr_to_invalidate)
+ struct btree_iter *lru_iter,
+ struct bkey_s_c lru_k,
+ s64 *nr_to_invalidate)
{
struct bch_fs *c = trans->c;
struct btree_iter alloc_iter = { NULL };
- struct bkey_i_alloc_v4 *a;
- struct bpos bucket;
+ struct bkey_i_alloc_v4 *a = NULL;
struct printbuf buf = PRINTBUF;
+ struct bpos bucket = u64_to_bucket(lru_k.k->p.offset);
unsigned cached_sectors;
int ret = 0;
- if (*nr_to_invalidate <= 0 || k.k->p.inode != dev_idx)
+ if (*nr_to_invalidate <= 0)
return 1;
- if (k.k->type != KEY_TYPE_lru) {
- prt_printf(&buf, "non lru key in lru btree:\n ");
- bch2_bkey_val_to_text(&buf, c, k);
-
- if (!test_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags)) {
- bch_err(c, "%s", buf.buf);
- } else {
- bch2_trans_inconsistent(trans, "%s", buf.buf);
- ret = -EINVAL;
- }
-
- goto out;
+ if (!bch2_dev_bucket_exists(c, bucket)) {
+ prt_str(&buf, "lru entry points to invalid bucket");
+ goto err;
}
- bucket = POS(dev_idx, le64_to_cpu(bkey_s_c_to_lru(k).v->idx));
+ if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset))
+ return 0;
a = bch2_trans_start_alloc_update(trans, &alloc_iter, bucket);
ret = PTR_ERR_OR_ZERO(a);
if (ret)
goto out;
- if (k.k->p.offset != alloc_lru_idx(a->v)) {
- prt_printf(&buf, "alloc key does not point back to lru entry when invalidating bucket:\n ");
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
- prt_printf(&buf, "\n ");
- bch2_bkey_val_to_text(&buf, c, k);
-
- if (!test_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags)) {
- bch_err(c, "%s", buf.buf);
- } else {
- bch2_trans_inconsistent(trans, "%s", buf.buf);
- ret = -EINVAL;
- }
-
+ /* We expect harmless races here due to the btree write buffer: */
+ if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(a->v))
goto out;
- }
+
+ BUG_ON(a->v.data_type != BCH_DATA_cached);
if (!a->v.cached_sectors)
bch_err(c, "invalidating empty bucket, confused");
bch2_trans_iter_exit(trans, &alloc_iter);
printbuf_exit(&buf);
return ret;
+err:
+ prt_str(&buf, "\n lru key: ");
+ bch2_bkey_val_to_text(&buf, c, lru_k);
+
+ prt_str(&buf, "\n lru entry: ");
+ bch2_lru_pos_to_text(&buf, lru_iter->pos);
+
+ prt_str(&buf, "\n alloc key: ");
+ if (!a)
+ bch2_bpos_to_text(&buf, bucket);
+ else
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
+
+ bch_err(c, "%s", buf.buf);
+ if (test_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags)) {
+ bch2_inconsistent_error(c);
+ ret = -EINVAL;
+ }
+
+ goto out;
}
static void bch2_do_invalidates_work(struct work_struct *work)
bch2_trans_init(&trans, c, 0, 0);
+ ret = bch2_btree_write_buffer_flush(&trans);
+ if (ret)
+ goto err;
+
for_each_member_device(ca, c, i) {
s64 nr_to_invalidate =
should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
- ret = for_each_btree_key2(&trans, iter, BTREE_ID_lru,
- POS(ca->dev_idx, 0), BTREE_ITER_INTENT, k,
- invalidate_one_bucket(&trans, &iter, k, ca->dev_idx, &nr_to_invalidate));
+ ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_lru,
+ lru_pos(ca->dev_idx, 0, 0),
+ lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
+ BTREE_ITER_INTENT, k,
+ invalidate_one_bucket(&trans, &iter, k, &nr_to_invalidate));
if (ret < 0) {
percpu_ref_put(&ca->ref);
break;
}
}
-
+err:
bch2_trans_exit(&trans);
- percpu_ref_put(&c->writes);
+ bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
}
void bch2_do_invalidates(struct bch_fs *c)
{
- if (percpu_ref_tryget_live(&c->writes) &&
+ if (bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate) &&
!queue_work(system_long_wq, &c->invalidate_work))
- percpu_ref_put(&c->writes);
+ bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
}
static int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca)
freespace->k.p = k.k->p;
freespace->k.size = k.k->size;
- ret = __bch2_btree_insert(&trans, BTREE_ID_freespace, freespace) ?:
+ ret = __bch2_btree_insert(&trans, BTREE_ID_freespace, freespace, 0) ?:
bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_LAZY_RW|
BTREE_INSERT_NOFAIL);
*/
bch2_recalc_capacity(c);
- /* Next, close write points that point to this device... */
- for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
- bch2_writepoint_stop(c, ca, &c->write_points[i]);
-
- bch2_writepoint_stop(c, ca, &c->copygc_write_point);
- bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
- bch2_writepoint_stop(c, ca, &c->btree_write_point);
-
- mutex_lock(&c->btree_reserve_cache_lock);
- while (c->btree_reserve_cache_nr) {
- struct btree_alloc *a =
- &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
-
- bch2_open_buckets_put(c, &a->ob);
- }
- mutex_unlock(&c->btree_reserve_cache_lock);
-
- while (1) {
- struct open_bucket *ob;
-
- spin_lock(&c->freelist_lock);
- if (!ca->open_buckets_partial_nr) {
- spin_unlock(&c->freelist_lock);
- break;
- }
- ob = c->open_buckets +
- ca->open_buckets_partial[--ca->open_buckets_partial_nr];
- ob->on_partial_list = false;
- spin_unlock(&c->freelist_lock);
-
- bch2_open_bucket_put(c, ob);
- }
-
- bch2_ec_stop_dev(c, ca);
+ bch2_open_buckets_stop(c, ca, false);
/*
* Wake up threads that were blocked on allocation, so they can notice