/* erasure coding */
#include "bcachefs.h"
+#include "alloc_background.h"
#include "alloc_foreground.h"
#include "backpointers.h"
#include "bkey_buf.h"
#include "bset.h"
#include "btree_gc.h"
#include "btree_update.h"
+#include "btree_write_buffer.h"
#include "buckets.h"
+#include "checksum.h"
#include "disk_groups.h"
#include "ec.h"
#include "error.h"
-#include "io.h"
+#include "io_read.h"
#include "keylist.h"
#include "recovery.h"
#include "replicas.h"
/* Stripes btree keys: */
-int bch2_stripe_invalid(const struct bch_fs *c, struct bkey_s_c k,
- int rw, struct printbuf *err)
+int bch2_stripe_invalid(struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
{
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
+ int ret = 0;
- if (!bkey_cmp(k.k->p, POS_MIN)) {
- prt_printf(err, "stripe at POS_MIN");
- return -EINVAL;
- }
-
- if (k.k->p.inode) {
- prt_printf(err, "nonzero inode field");
- return -EINVAL;
- }
-
- if (bkey_val_bytes(k.k) < sizeof(*s)) {
- prt_printf(err, "incorrect value size (%zu < %zu)",
- bkey_val_bytes(k.k), sizeof(*s));
- return -EINVAL;
- }
+ bkey_fsck_err_on(bkey_eq(k.k->p, POS_MIN) ||
+ bpos_gt(k.k->p, POS(0, U32_MAX)), c, err,
+ stripe_pos_bad,
+ "stripe at bad pos");
- if (bkey_val_u64s(k.k) < stripe_val_u64s(s)) {
- prt_printf(err, "incorrect value size (%zu < %u)",
- bkey_val_u64s(k.k), stripe_val_u64s(s));
- return -EINVAL;
- }
+ bkey_fsck_err_on(bkey_val_u64s(k.k) < stripe_val_u64s(s), c, err,
+ stripe_val_size_bad,
+ "incorrect value size (%zu < %u)",
+ bkey_val_u64s(k.k), stripe_val_u64s(s));
- return bch2_bkey_ptrs_invalid(c, k, rw, err);
+ ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
+fsck_err:
+ return ret;
}
void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
- unsigned i;
+ unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
prt_printf(out, "algo %u sectors %u blocks %u:%u csum %u gran %u",
s->algorithm,
le16_to_cpu(s->sectors),
- s->nr_blocks - s->nr_redundant,
+ nr_data,
s->nr_redundant,
s->csum_type,
1U << s->csum_granularity_bits);
- for (i = 0; i < s->nr_blocks; i++)
- prt_printf(out, " %u:%llu:%u", s->ptrs[i].dev,
- (u64) s->ptrs[i].offset,
- stripe_blockcount_get(s, i));
+ for (i = 0; i < s->nr_blocks; i++) {
+ const struct bch_extent_ptr *ptr = s->ptrs + i;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ u32 offset;
+ u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
+
+ prt_printf(out, " %u:%llu:%u", ptr->dev, b, offset);
+ if (i < nr_data)
+ prt_printf(out, "#%u", stripe_blockcount_get(s, i));
+ prt_printf(out, " gen %u", ptr->gen);
+ if (ptr_stale(ca, ptr))
+ prt_printf(out, " stale");
+ }
+}
+
+/* Triggers: */
+
+static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
+ struct bkey_s_c_stripe s,
+ unsigned idx, bool deleting)
+{
+ struct bch_fs *c = trans->c;
+ const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
+ struct btree_iter iter;
+ struct bkey_i_alloc_v4 *a;
+ enum bch_data_type data_type = idx >= s.v->nr_blocks - s.v->nr_redundant
+ ? BCH_DATA_parity : 0;
+ s64 sectors = data_type ? le16_to_cpu(s.v->sectors) : 0;
+ int ret = 0;
+
+ if (deleting)
+ sectors = -sectors;
+
+ a = bch2_trans_start_alloc_update(trans, &iter, PTR_BUCKET_POS(c, ptr));
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ ret = bch2_check_bucket_ref(trans, s.s_c, ptr, sectors, data_type,
+ a->v.gen, a->v.data_type,
+ a->v.dirty_sectors);
+ if (ret)
+ goto err;
+
+ if (!deleting) {
+ if (bch2_trans_inconsistent_on(a->v.stripe ||
+ a->v.stripe_redundancy, trans,
+ "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)",
+ iter.pos.inode, iter.pos.offset, a->v.gen,
+ bch2_data_type_str(a->v.data_type),
+ a->v.dirty_sectors,
+ a->v.stripe, s.k->p.offset)) {
+ ret = -EIO;
+ goto err;
+ }
+
+ if (bch2_trans_inconsistent_on(data_type && a->v.dirty_sectors, trans,
+ "bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu",
+ iter.pos.inode, iter.pos.offset, a->v.gen,
+ bch2_data_type_str(a->v.data_type),
+ a->v.dirty_sectors,
+ s.k->p.offset)) {
+ ret = -EIO;
+ goto err;
+ }
+
+ a->v.stripe = s.k->p.offset;
+ a->v.stripe_redundancy = s.v->nr_redundant;
+ a->v.data_type = BCH_DATA_stripe;
+ } else {
+ if (bch2_trans_inconsistent_on(a->v.stripe != s.k->p.offset ||
+ a->v.stripe_redundancy != s.v->nr_redundant, trans,
+ "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe %llu (got %u)",
+ iter.pos.inode, iter.pos.offset, a->v.gen,
+ s.k->p.offset, a->v.stripe)) {
+ ret = -EIO;
+ goto err;
+ }
+
+ a->v.stripe = 0;
+ a->v.stripe_redundancy = 0;
+ a->v.data_type = alloc_data_type(a->v, BCH_DATA_user);
+ }
+
+ a->v.dirty_sectors += sectors;
+ if (data_type)
+ a->v.data_type = !deleting ? data_type : 0;
+
+ ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
+ if (ret)
+ goto err;
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static int mark_stripe_bucket(struct btree_trans *trans,
+ struct bkey_s_c k,
+ unsigned ptr_idx,
+ unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
+ unsigned nr_data = s->nr_blocks - s->nr_redundant;
+ bool parity = ptr_idx >= nr_data;
+ enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe;
+ s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
+ const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bucket old, new, *g;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
+
+ /* * XXX doesn't handle deletion */
+
+ percpu_down_read(&c->mark_lock);
+ g = PTR_GC_BUCKET(ca, ptr);
+
+ if (g->dirty_sectors ||
+ (g->stripe && g->stripe != k.k->p.offset)) {
+ bch2_fs_inconsistent(c,
+ "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
+ ptr->dev, PTR_BUCKET_NR(ca, ptr), g->gen,
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ ret = -EINVAL;
+ goto err;
+ }
+
+ bucket_lock(g);
+ old = *g;
+
+ ret = bch2_check_bucket_ref(trans, k, ptr, sectors, data_type,
+ g->gen, g->data_type,
+ g->dirty_sectors);
+ if (ret)
+ goto err;
+
+ g->data_type = data_type;
+ g->dirty_sectors += sectors;
+
+ g->stripe = k.k->p.offset;
+ g->stripe_redundancy = s->nr_redundant;
+ new = *g;
+err:
+ bucket_unlock(g);
+ if (!ret)
+ bch2_dev_usage_update_m(c, ca, &old, &new);
+ percpu_up_read(&c->mark_lock);
+ printbuf_exit(&buf);
+ return ret;
+}
+
+int bch2_trigger_stripe(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_s _new,
+ unsigned flags)
+{
+ struct bkey_s_c new = _new.s_c;
+ struct bch_fs *c = trans->c;
+ u64 idx = new.k->p.offset;
+ const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
+ ? bkey_s_c_to_stripe(old).v : NULL;
+ const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
+ ? bkey_s_c_to_stripe(new).v : NULL;
+
+ if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
+ /*
+ * If the pointers aren't changing, we don't need to do anything:
+ */
+ if (new_s && old_s &&
+ new_s->nr_blocks == old_s->nr_blocks &&
+ new_s->nr_redundant == old_s->nr_redundant &&
+ !memcmp(old_s->ptrs, new_s->ptrs,
+ new_s->nr_blocks * sizeof(struct bch_extent_ptr)))
+ return 0;
+
+ BUG_ON(new_s && old_s &&
+ (new_s->nr_blocks != old_s->nr_blocks ||
+ new_s->nr_redundant != old_s->nr_redundant));
+
+ if (new_s) {
+ s64 sectors = le16_to_cpu(new_s->sectors);
+
+ struct bch_replicas_padded r;
+ bch2_bkey_to_replicas(&r.e, new);
+ int ret = bch2_update_replicas_list(trans, &r.e, sectors * new_s->nr_redundant);
+ if (ret)
+ return ret;
+ }
+
+ if (old_s) {
+ s64 sectors = -((s64) le16_to_cpu(old_s->sectors));
+
+ struct bch_replicas_padded r;
+ bch2_bkey_to_replicas(&r.e, old);
+ int ret = bch2_update_replicas_list(trans, &r.e, sectors * old_s->nr_redundant);
+ if (ret)
+ return ret;
+ }
+
+ unsigned nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks;
+ for (unsigned i = 0; i < nr_blocks; i++) {
+ if (new_s && old_s &&
+ !memcmp(&new_s->ptrs[i],
+ &old_s->ptrs[i],
+ sizeof(new_s->ptrs[i])))
+ continue;
+
+ if (new_s) {
+ int ret = bch2_trans_mark_stripe_bucket(trans,
+ bkey_s_c_to_stripe(new), i, false);
+ if (ret)
+ return ret;
+ }
+
+ if (old_s) {
+ int ret = bch2_trans_mark_stripe_bucket(trans,
+ bkey_s_c_to_stripe(old), i, true);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ if (flags & BTREE_TRIGGER_ATOMIC) {
+ struct stripe *m = genradix_ptr(&c->stripes, idx);
+
+ if (!m) {
+ struct printbuf buf1 = PRINTBUF;
+ struct printbuf buf2 = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf1, c, old);
+ bch2_bkey_val_to_text(&buf2, c, new);
+ bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
+ "old %s\n"
+ "new %s", idx, buf1.buf, buf2.buf);
+ printbuf_exit(&buf2);
+ printbuf_exit(&buf1);
+ bch2_inconsistent_error(c);
+ return -1;
+ }
+
+ if (!new_s) {
+ bch2_stripes_heap_del(c, m, idx);
+
+ memset(m, 0, sizeof(*m));
+ } else {
+ m->sectors = le16_to_cpu(new_s->sectors);
+ m->algorithm = new_s->algorithm;
+ m->nr_blocks = new_s->nr_blocks;
+ m->nr_redundant = new_s->nr_redundant;
+ m->blocks_nonempty = 0;
+
+ for (unsigned i = 0; i < new_s->nr_blocks; i++)
+ m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
+
+ if (!old_s)
+ bch2_stripes_heap_insert(c, m, idx);
+ else
+ bch2_stripes_heap_update(c, m, idx);
+ }
+ }
+
+ if (flags & BTREE_TRIGGER_GC) {
+ struct gc_stripe *m =
+ genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
+
+ if (!m) {
+ bch_err(c, "error allocating memory for gc_stripes, idx %llu",
+ idx);
+ return -BCH_ERR_ENOMEM_mark_stripe;
+ }
+ /*
+ * This will be wrong when we bring back runtime gc: we should
+ * be unmarking the old key and then marking the new key
+ */
+ m->alive = true;
+ m->sectors = le16_to_cpu(new_s->sectors);
+ m->nr_blocks = new_s->nr_blocks;
+ m->nr_redundant = new_s->nr_redundant;
+
+ for (unsigned i = 0; i < new_s->nr_blocks; i++)
+ m->ptrs[i] = new_s->ptrs[i];
+
+ bch2_bkey_to_replicas(&m->r.e, new);
+
+ /*
+ * gc recalculates this field from stripe ptr
+ * references:
+ */
+ memset(m->block_sectors, 0, sizeof(m->block_sectors));
+
+ for (unsigned i = 0; i < new_s->nr_blocks; i++) {
+ int ret = mark_stripe_bucket(trans, new, i, flags);
+ if (ret)
+ return ret;
+ }
+
+ int ret = bch2_update_replicas(c, new, &m->r.e,
+ ((s64) m->sectors * m->nr_redundant),
+ 0, true);
+ if (ret) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, new);
+ bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
+ printbuf_exit(&buf);
+ return ret;
+ }
+ }
+
+ return 0;
}
/* returns blocknr in stripe that we matched: */
struct bkey_s_c k, unsigned *block)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const struct bch_extent_ptr *ptr;
unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
bkey_for_each_ptr(ptrs, ptr)
static void ec_stripe_buf_exit(struct ec_stripe_buf *buf)
{
- unsigned i;
+ if (buf->key.k.type == KEY_TYPE_stripe) {
+ struct bkey_i_stripe *s = bkey_i_to_stripe(&buf->key);
+ unsigned i;
- for (i = 0; i < buf->key.v.nr_blocks; i++) {
- kvpfree(buf->data[i], buf->size << 9);
- buf->data[i] = NULL;
+ for (i = 0; i < s->v.nr_blocks; i++) {
+ kvfree(buf->data[i]);
+ buf->data[i] = NULL;
+ }
}
}
+/* XXX: this is a non-mempoolified memory allocation: */
static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
- unsigned offset, unsigned size)
+ unsigned offset, unsigned size)
{
- struct bch_stripe *v = &buf->key.v;
+ struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
unsigned csum_granularity = 1U << v->csum_granularity_bits;
unsigned end = offset + size;
unsigned i;
memset(buf->valid, 0xFF, sizeof(buf->valid));
- for (i = 0; i < buf->key.v.nr_blocks; i++) {
- buf->data[i] = kvpmalloc(buf->size << 9, GFP_KERNEL);
+ for (i = 0; i < v->nr_blocks; i++) {
+ buf->data[i] = kvmalloc(buf->size << 9, GFP_KERNEL);
if (!buf->data[i])
goto err;
}
return 0;
err:
ec_stripe_buf_exit(buf);
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_stripe_buf;
}
/* Checksumming: */
static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf,
unsigned block, unsigned offset)
{
- struct bch_stripe *v = &buf->key.v;
+ struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
unsigned csum_granularity = 1 << v->csum_granularity_bits;
unsigned end = buf->offset + buf->size;
unsigned len = min(csum_granularity, end - offset);
static void ec_generate_checksums(struct ec_stripe_buf *buf)
{
- struct bch_stripe *v = &buf->key.v;
+ struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
unsigned i, j, csums_per_device = stripe_csums_per_device(v);
if (!v->csum_type)
static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
{
- struct bch_stripe *v = &buf->key.v;
+ struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
unsigned csum_granularity = 1 << v->csum_granularity_bits;
unsigned i;
struct bch_csum got = ec_block_checksum(buf, i, offset);
if (bch2_crc_cmp(want, got)) {
- struct printbuf buf2 = PRINTBUF;
-
- bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&buf->key.k_i));
+ struct printbuf err = PRINTBUF;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, v->ptrs[i].dev);
+
+ prt_printf(&err, "stripe checksum error: expected %0llx:%0llx got %0llx:%0llx (type %s)\n",
+ want.hi, want.lo,
+ got.hi, got.lo,
+ bch2_csum_types[v->csum_type]);
+ prt_printf(&err, " for %ps at %u of\n ", (void *) _RET_IP_, i);
+ bch2_bkey_val_to_text(&err, c, bkey_i_to_s_c(&buf->key));
+ bch_err_ratelimited(ca, "%s", err.buf);
+ printbuf_exit(&err);
- bch_err_ratelimited(c,
- "stripe checksum error for %ps at %u:%u: csum type %u, expected %llx got %llx\n%s",
- (void *) _RET_IP_, i, j, v->csum_type,
- want.lo, got.lo, buf2.buf);
- printbuf_exit(&buf2);
clear_bit(i, buf->valid);
+
+ bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
break;
}
static void ec_generate_ec(struct ec_stripe_buf *buf)
{
- struct bch_stripe *v = &buf->key.v;
+ struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
unsigned nr_data = v->nr_blocks - v->nr_redundant;
unsigned bytes = le16_to_cpu(v->sectors) << 9;
static unsigned ec_nr_failed(struct ec_stripe_buf *buf)
{
- return buf->key.v.nr_blocks -
- bitmap_weight(buf->valid, buf->key.v.nr_blocks);
+ struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
+
+ return v->nr_blocks - bitmap_weight(buf->valid, v->nr_blocks);
}
static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf)
{
- struct bch_stripe *v = &buf->key.v;
+ struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0;
unsigned nr_data = v->nr_blocks - v->nr_redundant;
unsigned bytes = buf->size << 9;
static void ec_block_endio(struct bio *bio)
{
struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio);
- struct bch_stripe *v = &ec_bio->buf->key.v;
+ struct bch_stripe *v = &bkey_i_to_stripe(&ec_bio->buf->key)->v;
struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx];
struct bch_dev *ca = ec_bio->ca;
struct closure *cl = bio->bi_private;
- if (bch2_dev_io_err_on(bio->bi_status, ca, "erasure coding %s error: %s",
+ if (bch2_dev_io_err_on(bio->bi_status, ca,
+ bio_data_dir(bio)
+ ? BCH_MEMBER_ERROR_write
+ : BCH_MEMBER_ERROR_read,
+ "erasure coding %s error: %s",
bio_data_dir(bio) ? "write" : "read",
bch2_blk_status_to_str(bio->bi_status)))
clear_bit(ec_bio->idx, ec_bio->buf->valid);
}
static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
- unsigned rw, unsigned idx, struct closure *cl)
+ blk_opf_t opf, unsigned idx, struct closure *cl)
{
- struct bch_stripe *v = &buf->key.v;
+ struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
unsigned offset = 0, bytes = buf->size << 9;
struct bch_extent_ptr *ptr = &v->ptrs[idx];
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- enum bch_data_type data_type = idx < buf->key.v.nr_blocks - buf->key.v.nr_redundant
+ enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant
? BCH_DATA_user
: BCH_DATA_parity;
+ int rw = op_is_write(opf);
if (ptr_stale(ca, ptr)) {
bch_err_ratelimited(c,
ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev,
nr_iovecs,
- rw,
+ opf,
GFP_KERNEL,
&c->ec_bioset),
struct ec_bio, bio);
percpu_ref_put(&ca->io_ref);
}
-static int get_stripe_key(struct bch_fs *c, u64 idx, struct ec_stripe_buf *stripe)
+static int get_stripe_key_trans(struct btree_trans *trans, u64 idx,
+ struct ec_stripe_buf *stripe)
{
- struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
int ret;
- bch2_trans_init(&trans, c, 0, 0);
- bch2_trans_iter_init(&trans, &iter, BTREE_ID_stripes,
- POS(0, idx), BTREE_ITER_SLOTS);
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
+ POS(0, idx), BTREE_ITER_SLOTS);
ret = bkey_err(k);
if (ret)
goto err;
ret = -ENOENT;
goto err;
}
- bkey_reassemble(&stripe->key.k_i, k);
+ bkey_reassemble(&stripe->key, k);
err:
- bch2_trans_iter_exit(&trans, &iter);
- bch2_trans_exit(&trans);
+ bch2_trans_iter_exit(trans, &iter);
return ret;
}
/* recovery read path: */
-int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
+int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio)
{
+ struct bch_fs *c = trans->c;
struct ec_stripe_buf *buf;
struct closure cl;
struct bch_stripe *v;
BUG_ON(!rbio->pick.has_ec);
- buf = kzalloc(sizeof(*buf), GFP_NOIO);
+ buf = kzalloc(sizeof(*buf), GFP_NOFS);
if (!buf)
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_ec_read_extent;
- ret = get_stripe_key(c, rbio->pick.ec.idx, buf);
+ ret = lockrestart_do(trans, get_stripe_key_trans(trans, rbio->pick.ec.idx, buf));
if (ret) {
bch_err_ratelimited(c,
"error doing reconstruct read: error %i looking up stripe", ret);
return -EIO;
}
- v = &buf->key.v;
+ v = &bkey_i_to_stripe(&buf->key)->v;
if (!bch2_ptr_matches_stripe(v, rbio->pick)) {
bch_err_ratelimited(c,
if (idx >= h->size) {
if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp))
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
- spin_lock(&c->ec_stripes_heap_lock);
+ mutex_lock(&c->ec_stripes_heap_lock);
if (n.size > h->size) {
memcpy(n.data, h->data, h->used * sizeof(h->data[0]));
n.used = h->used;
swap(*h, n);
}
- spin_unlock(&c->ec_stripes_heap_lock);
+ mutex_unlock(&c->ec_stripes_heap_lock);
free_heap(&n);
}
if (!genradix_ptr_alloc(&c->stripes, idx, gfp))
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING &&
!genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
return 0;
}
static int ec_stripe_mem_alloc(struct btree_trans *trans,
struct btree_iter *iter)
{
- size_t idx = iter->pos.offset;
+ return allocate_dropping_locks_errcode(trans,
+ __ec_stripe_mem_alloc(trans->c, iter->pos.offset, _gfp));
+}
- if (!__ec_stripe_mem_alloc(trans->c, idx, GFP_NOWAIT|__GFP_NOWARN))
- return 0;
+/*
+ * Hash table of open stripes:
+ * Stripes that are being created or modified are kept in a hash table, so that
+ * stripe deletion can skip them.
+ */
+
+static bool __bch2_stripe_is_open(struct bch_fs *c, u64 idx)
+{
+ unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
+ struct ec_stripe_new *s;
+
+ hlist_for_each_entry(s, &c->ec_stripes_new[hash], hash)
+ if (s->idx == idx)
+ return true;
+ return false;
+}
+
+static bool bch2_stripe_is_open(struct bch_fs *c, u64 idx)
+{
+ bool ret = false;
+
+ spin_lock(&c->ec_stripes_new_lock);
+ ret = __bch2_stripe_is_open(c, idx);
+ spin_unlock(&c->ec_stripes_new_lock);
+
+ return ret;
+}
+
+static bool bch2_try_open_stripe(struct bch_fs *c,
+ struct ec_stripe_new *s,
+ u64 idx)
+{
+ bool ret;
+
+ spin_lock(&c->ec_stripes_new_lock);
+ ret = !__bch2_stripe_is_open(c, idx);
+ if (ret) {
+ unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
+
+ s->idx = idx;
+ hlist_add_head(&s->hash, &c->ec_stripes_new[hash]);
+ }
+ spin_unlock(&c->ec_stripes_new_lock);
+
+ return ret;
+}
+
+static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s)
+{
+ BUG_ON(!s->idx);
- bch2_trans_unlock(trans);
+ spin_lock(&c->ec_stripes_new_lock);
+ hlist_del_init(&s->hash);
+ spin_unlock(&c->ec_stripes_new_lock);
- return __ec_stripe_mem_alloc(trans->c, idx, GFP_KERNEL) ?:
- bch2_trans_relock(trans);
+ s->idx = 0;
}
-static ssize_t stripe_idx_to_delete(struct bch_fs *c)
+/* Heap of all existing stripes, ordered by blocks_nonempty */
+
+static u64 stripe_idx_to_delete(struct bch_fs *c)
{
ec_stripes_heap *h = &c->ec_stripes_heap;
- return h->used && h->data[0].blocks_nonempty == 0
- ? h->data[0].idx : -1;
+ lockdep_assert_held(&c->ec_stripes_heap_lock);
+
+ if (h->used &&
+ h->data[0].blocks_nonempty == 0 &&
+ !bch2_stripe_is_open(c, h->data[0].idx))
+ return h->data[0].idx;
+
+ return 0;
}
static inline int ec_stripes_heap_cmp(ec_stripes_heap *h,
ec_stripes_heap *h = &c->ec_stripes_heap;
struct stripe *m = genradix_ptr(&c->stripes, idx);
- BUG_ON(!m->alive);
BUG_ON(m->heap_idx >= h->used);
BUG_ON(h->data[m->heap_idx].idx != idx);
}
void bch2_stripes_heap_del(struct bch_fs *c,
struct stripe *m, size_t idx)
{
- if (!m->on_heap)
- return;
-
- m->on_heap = false;
-
+ mutex_lock(&c->ec_stripes_heap_lock);
heap_verify_backpointer(c, idx);
heap_del(&c->ec_stripes_heap, m->heap_idx,
ec_stripes_heap_cmp,
ec_stripes_heap_set_backpointer);
+ mutex_unlock(&c->ec_stripes_heap_lock);
}
void bch2_stripes_heap_insert(struct bch_fs *c,
struct stripe *m, size_t idx)
{
- if (m->on_heap)
- return;
-
+ mutex_lock(&c->ec_stripes_heap_lock);
BUG_ON(heap_full(&c->ec_stripes_heap));
- m->on_heap = true;
-
heap_add(&c->ec_stripes_heap, ((struct ec_stripe_heap_entry) {
.idx = idx,
.blocks_nonempty = m->blocks_nonempty,
ec_stripes_heap_set_backpointer);
heap_verify_backpointer(c, idx);
+ mutex_unlock(&c->ec_stripes_heap_lock);
}
void bch2_stripes_heap_update(struct bch_fs *c,
struct stripe *m, size_t idx)
{
ec_stripes_heap *h = &c->ec_stripes_heap;
+ bool do_deletes;
size_t i;
- if (!m->on_heap)
- return;
-
+ mutex_lock(&c->ec_stripes_heap_lock);
heap_verify_backpointer(c, idx);
h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
heap_verify_backpointer(c, idx);
- if (stripe_idx_to_delete(c) >= 0 &&
- !percpu_ref_is_dying(&c->writes))
- schedule_work(&c->ec_stripe_delete_work);
+ do_deletes = stripe_idx_to_delete(c) != 0;
+ mutex_unlock(&c->ec_stripes_heap_lock);
+
+ if (do_deletes)
+ bch2_do_stripe_deletes(c);
}
/* stripe deletion */
-static int ec_stripe_delete(struct bch_fs *c, size_t idx)
+static int ec_stripe_delete(struct btree_trans *trans, u64 idx)
{
- return bch2_btree_delete_range(c, BTREE_ID_stripes,
- POS(0, idx),
- POS(0, idx + 1),
- 0, NULL);
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bkey_s_c_stripe s;
+ int ret;
+
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, POS(0, idx),
+ BTREE_ITER_INTENT);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (k.k->type != KEY_TYPE_stripe) {
+ bch2_fs_inconsistent(c, "attempting to delete nonexistent stripe %llu", idx);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ s = bkey_s_c_to_stripe(k);
+ for (unsigned i = 0; i < s.v->nr_blocks; i++)
+ if (stripe_blockcount_get(s.v, i)) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, k);
+ bch2_fs_inconsistent(c, "attempting to delete nonempty stripe %s", buf.buf);
+ printbuf_exit(&buf);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = bch2_btree_delete_at(trans, &iter, 0);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
}
static void ec_stripe_delete_work(struct work_struct *work)
{
struct bch_fs *c =
container_of(work, struct bch_fs, ec_stripe_delete_work);
- ssize_t idx;
while (1) {
- spin_lock(&c->ec_stripes_heap_lock);
- idx = stripe_idx_to_delete(c);
- if (idx < 0) {
- spin_unlock(&c->ec_stripes_heap_lock);
- break;
- }
+ mutex_lock(&c->ec_stripes_heap_lock);
+ u64 idx = stripe_idx_to_delete(c);
+ mutex_unlock(&c->ec_stripes_heap_lock);
- bch2_stripes_heap_del(c, genradix_ptr(&c->stripes, idx), idx);
- spin_unlock(&c->ec_stripes_heap_lock);
+ if (!idx)
+ break;
- if (ec_stripe_delete(c, idx))
+ int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
+ ec_stripe_delete(trans, idx));
+ bch_err_fn(c, ret);
+ if (ret)
break;
}
+
+ bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
+}
+
+void bch2_do_stripe_deletes(struct bch_fs *c)
+{
+ if (bch2_write_ref_tryget(c, BCH_WRITE_REF_stripe_delete) &&
+ !queue_work(c->write_ref_wq, &c->ec_stripe_delete_work))
+ bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
}
/* stripe creation: */
-static int ec_stripe_bkey_insert(struct btree_trans *trans,
- struct bkey_i_stripe *stripe,
- struct disk_reservation *res)
+static int ec_stripe_key_update(struct btree_trans *trans,
+ struct bkey_i_stripe *new,
+ bool create)
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
- struct bpos min_pos = POS(0, 1);
- struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint));
int ret;
- for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
- if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0) {
- if (start_pos.offset) {
- start_pos = min_pos;
- bch2_btree_iter_set_pos(&iter, start_pos);
- continue;
- }
-
- ret = -BCH_ERR_ENOSPC_stripe_create;
- break;
- }
-
- if (bkey_deleted(k.k))
- break;
- }
-
- c->ec_stripe_hint = iter.pos.offset;
-
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
+ new->k.p, BTREE_ITER_INTENT);
+ ret = bkey_err(k);
if (ret)
goto err;
- ret = ec_stripe_mem_alloc(trans, &iter);
- if (ret)
+ if (k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe)) {
+ bch2_fs_inconsistent(c, "error %s stripe: got existing key type %s",
+ create ? "creating" : "updating",
+ bch2_bkey_types[k.k->type]);
+ ret = -EINVAL;
goto err;
+ }
- stripe->k.p = iter.pos;
-
- ret = bch2_trans_update(trans, &iter, &stripe->k_i, 0);
-err:
- bch2_trans_iter_exit(trans, &iter);
-
- return ret;
-}
-
-static int ec_stripe_bkey_update(struct btree_trans *trans,
- struct bkey_i_stripe *new,
- struct disk_reservation *res)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- const struct bch_stripe *existing;
- unsigned i;
- int ret;
+ if (k.k->type == KEY_TYPE_stripe) {
+ const struct bch_stripe *old = bkey_s_c_to_stripe(k).v;
+ unsigned i;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes,
- new->k.p, BTREE_ITER_INTENT);
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
+ if (old->nr_blocks != new->v.nr_blocks) {
+ bch_err(c, "error updating stripe: nr_blocks does not match");
+ ret = -EINVAL;
+ goto err;
+ }
- if (!k.k || k.k->type != KEY_TYPE_stripe) {
- bch_err(trans->c, "error updating stripe: not found");
- ret = -ENOENT;
- goto err;
- }
+ for (i = 0; i < new->v.nr_blocks; i++) {
+ unsigned v = stripe_blockcount_get(old, i);
- existing = bkey_s_c_to_stripe(k).v;
+ BUG_ON(v &&
+ (old->ptrs[i].dev != new->v.ptrs[i].dev ||
+ old->ptrs[i].gen != new->v.ptrs[i].gen ||
+ old->ptrs[i].offset != new->v.ptrs[i].offset));
- if (existing->nr_blocks != new->v.nr_blocks) {
- bch_err(trans->c, "error updating stripe: nr_blocks does not match");
- ret = -EINVAL;
- goto err;
+ stripe_blockcount_set(&new->v, i, v);
+ }
}
- for (i = 0; i < new->v.nr_blocks; i++)
- stripe_blockcount_set(&new->v, i,
- stripe_blockcount_get(existing, i));
-
ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
err:
bch2_trans_iter_exit(trans, &iter);
return ret;
}
-static void extent_stripe_ptr_add(struct bkey_s_extent e,
- struct ec_stripe_buf *s,
- struct bch_extent_ptr *ptr,
- unsigned block)
-{
- struct bch_extent_stripe_ptr *dst = (void *) ptr;
- union bch_extent_entry *end = extent_entry_last(e);
-
- memmove_u64s_up(dst + 1, dst, (u64 *) end - (u64 *) dst);
- e.k->u64s += sizeof(*dst) / sizeof(u64);
-
- *dst = (struct bch_extent_stripe_ptr) {
- .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr,
- .block = block,
- .redundancy = s->key.v.nr_redundant,
- .idx = s->key.k.p.offset,
- };
-}
-
static int ec_stripe_update_extent(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k,
- struct ec_stripe_buf *s)
+ struct bpos bucket, u8 gen,
+ struct ec_stripe_buf *s,
+ struct bpos *bp_pos)
{
+ struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
+ struct bch_fs *c = trans->c;
+ struct bch_backpointer bp;
+ struct btree_iter iter;
+ struct bkey_s_c k;
const struct bch_extent_ptr *ptr_c;
struct bch_extent_ptr *ptr, *ec_ptr = NULL;
+ struct bch_extent_stripe_ptr stripe_ptr;
struct bkey_i *n;
int ret, dev, block;
- if (extent_has_stripe_ptr(k, s->key.k.p.offset))
+ ret = bch2_get_next_backpointer(trans, bucket, gen,
+ bp_pos, &bp, BTREE_ITER_CACHED);
+ if (ret)
+ return ret;
+ if (bpos_eq(*bp_pos, SPOS_MAX))
+ return 0;
+
+ if (bp.level) {
+ struct printbuf buf = PRINTBUF;
+ struct btree_iter node_iter;
+ struct btree *b;
+
+ b = bch2_backpointer_get_node(trans, &node_iter, *bp_pos, bp);
+ bch2_trans_iter_exit(trans, &node_iter);
+
+ if (!b)
+ return 0;
+
+ prt_printf(&buf, "found btree node in erasure coded bucket: b=%px\n", b);
+ bch2_backpointer_to_text(&buf, &bp);
+
+ bch2_fs_inconsistent(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ return -EIO;
+ }
+
+ k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_INTENT);
+ ret = bkey_err(k);
+ if (ret)
+ return ret;
+ if (!k.k) {
+ /*
+ * extent no longer exists - we could flush the btree
+ * write buffer and retry to verify, but no need:
+ */
return 0;
+ }
- ptr_c = bkey_matches_stripe(&s->key.v, k, &block);
+ if (extent_has_stripe_ptr(k, s->key.k.p.offset))
+ goto out;
+
+ ptr_c = bkey_matches_stripe(v, k, &block);
/*
* It doesn't generally make sense to erasure code cached ptrs:
* XXX: should we be incrementing a counter?
*/
if (!ptr_c || ptr_c->cached)
- return 0;
+ goto out;
- dev = s->key.v.ptrs[block].dev;
+ dev = v->ptrs[block].dev;
- n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(stripe_ptr));
ret = PTR_ERR_OR_ZERO(n);
if (ret)
- return ret;
+ goto out;
bkey_reassemble(n, k);
bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, ptr->dev != dev);
- ec_ptr = (void *) bch2_bkey_has_device(bkey_i_to_s_c(n), dev);
+ ec_ptr = bch2_bkey_has_device(bkey_i_to_s(n), dev);
BUG_ON(!ec_ptr);
- extent_stripe_ptr_add(bkey_i_to_s_extent(n), s, ec_ptr, block);
+ stripe_ptr = (struct bch_extent_stripe_ptr) {
+ .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr,
+ .block = block,
+ .redundancy = v->nr_redundant,
+ .idx = s->key.k.p.offset,
+ };
- return bch2_trans_update(trans, iter, n, 0);
+ __extent_entry_insert(n,
+ (union bch_extent_entry *) ec_ptr,
+ (union bch_extent_entry *) &stripe_ptr);
+
+ ret = bch2_trans_update(trans, &iter, n, 0);
+out:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
}
static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_buf *s,
unsigned block)
{
struct bch_fs *c = trans->c;
- struct bch_extent_ptr bucket = s->key.v.ptrs[block];
+ struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
+ struct bch_extent_ptr bucket = v->ptrs[block];
struct bpos bucket_pos = PTR_BUCKET_POS(c, &bucket);
- struct bch_backpointer bp;
- struct btree_iter iter;
- struct bkey_s_c k;
- u64 bp_offset = 0;
+ struct bpos bp_pos = POS_MIN;
int ret = 0;
-retry:
- while (1) {
- bch2_trans_begin(trans);
-
- ret = bch2_get_next_backpointer(trans, bucket_pos, bucket.gen,
- &bp_offset, &bp,
- BTREE_ITER_CACHED);
- if (ret)
- break;
- if (bp_offset == U64_MAX)
- break;
-
- if (bch2_fs_inconsistent_on(bp.level, c, "found btree node in erasure coded bucket!?")) {
- ret = -EIO;
- break;
- }
- k = bch2_backpointer_get_key(trans, &iter, bucket_pos, bp_offset, bp);
- ret = bkey_err(k);
+ while (1) {
+ ret = commit_do(trans, NULL, NULL,
+ BCH_TRANS_COMMIT_no_check_rw|
+ BCH_TRANS_COMMIT_no_enospc,
+ ec_stripe_update_extent(trans, bucket_pos, bucket.gen,
+ s, &bp_pos));
if (ret)
break;
- if (!k.k)
- continue;
-
- ret = ec_stripe_update_extent(trans, &iter, k, s);
- bch2_trans_iter_exit(trans, &iter);
- if (ret)
+ if (bkey_eq(bp_pos, POS_MAX))
break;
- bp_offset++;
+ bp_pos = bpos_nosnap_successor(bp_pos);
}
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
return ret;
}
static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s)
{
- struct btree_trans trans;
- struct bch_stripe *v = &s->key.v;
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
int ret = 0;
- bch2_trans_init(&trans, c, 0, 0);
+ ret = bch2_btree_write_buffer_flush_sync(trans);
+ if (ret)
+ goto err;
for (i = 0; i < nr_data; i++) {
- ret = ec_stripe_update_bucket(&trans, s, i);
+ ret = ec_stripe_update_bucket(trans, s, i);
if (ret)
break;
}
+err:
+ bch2_trans_put(trans);
+ return ret;
+}
- bch2_trans_exit(&trans);
+static void zero_out_rest_of_ec_bucket(struct bch_fs *c,
+ struct ec_stripe_new *s,
+ unsigned block,
+ struct open_bucket *ob)
+{
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+ unsigned offset = ca->mi.bucket_size - ob->sectors_free;
+ int ret;
- return ret;
+ if (!bch2_dev_get_ioref(ca, WRITE)) {
+ s->err = -BCH_ERR_erofs_no_writes;
+ return;
+ }
+
+ memset(s->new_stripe.data[block] + (offset << 9),
+ 0,
+ ob->sectors_free << 9);
+
+ ret = blkdev_issue_zeroout(ca->disk_sb.bdev,
+ ob->bucket * ca->mi.bucket_size + offset,
+ ob->sectors_free,
+ GFP_KERNEL, 0);
+
+ percpu_ref_put(&ca->io_ref);
+
+ if (ret)
+ s->err = ret;
+}
+
+void bch2_ec_stripe_new_free(struct bch_fs *c, struct ec_stripe_new *s)
+{
+ if (s->idx)
+ bch2_stripe_close(c, s);
+ kfree(s);
}
/*
{
struct bch_fs *c = s->c;
struct open_bucket *ob;
- struct stripe *m;
- struct bch_stripe *v = &s->new_stripe.key.v;
+ struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v;
unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
int ret;
closure_sync(&s->iodone);
+ if (!s->err) {
+ for (i = 0; i < nr_data; i++)
+ if (s->blocks[i]) {
+ ob = c->open_buckets + s->blocks[i];
+
+ if (ob->sectors_free)
+ zero_out_rest_of_ec_bucket(c, s, i, ob);
+ }
+ }
+
if (s->err) {
- if (s->err != -EROFS)
+ if (!bch2_err_matches(s->err, EROFS))
bch_err(c, "error creating stripe: error writing data buckets");
goto err;
}
}
for (i = 0; i < nr_data; i++)
- if (stripe_blockcount_get(&s->existing_stripe.key.v, i))
+ if (stripe_blockcount_get(&bkey_i_to_stripe(&s->existing_stripe.key)->v, i))
swap(s->new_stripe.data[i],
s->existing_stripe.data[i]);
}
BUG_ON(!s->allocated);
-
- if (!percpu_ref_tryget_live(&c->writes))
- goto err;
+ BUG_ON(!s->idx);
ec_generate_ec(&s->new_stripe);
if (ec_nr_failed(&s->new_stripe)) {
bch_err(c, "error creating stripe: error writing redundancy buckets");
- goto err_put_writes;
+ goto err;
}
- ret = bch2_trans_do(c, &s->res, NULL, BTREE_INSERT_NOFAIL,
- s->have_existing_stripe
- ? ec_stripe_bkey_update(&trans, &s->new_stripe.key, &s->res)
- : ec_stripe_bkey_insert(&trans, &s->new_stripe.key, &s->res));
+ ret = bch2_trans_do(c, &s->res, NULL,
+ BCH_TRANS_COMMIT_no_check_rw|
+ BCH_TRANS_COMMIT_no_enospc,
+ ec_stripe_key_update(trans,
+ bkey_i_to_stripe(&s->new_stripe.key),
+ !s->have_existing_stripe));
+ bch_err_msg(c, ret, "creating stripe key");
if (ret) {
- bch_err(c, "error creating stripe: error creating stripe key");
- goto err_put_writes;
+ goto err;
}
ret = ec_stripe_update_extents(c, &s->new_stripe);
+ bch_err_msg(c, ret, "error updating extents");
if (ret)
- bch_err(c, "error creating stripe: error updating pointers: %s",
- bch2_err_str(ret));
-
- spin_lock(&c->ec_stripes_heap_lock);
- m = genradix_ptr(&c->stripes, s->new_stripe.key.k.p.offset);
-
- BUG_ON(m->on_heap);
- bch2_stripes_heap_insert(c, m, s->new_stripe.key.k.p.offset);
- spin_unlock(&c->ec_stripes_heap_lock);
-err_put_writes:
- percpu_ref_put(&c->writes);
+ goto err;
err:
bch2_disk_reservation_put(c, &s->res);
}
}
+ mutex_lock(&c->ec_stripe_new_lock);
+ list_del(&s->list);
+ mutex_unlock(&c->ec_stripe_new_lock);
+ wake_up(&c->ec_stripe_new_wait);
+
ec_stripe_buf_exit(&s->existing_stripe);
ec_stripe_buf_exit(&s->new_stripe);
closure_debug_destroy(&s->iodone);
- kfree(s);
+
+ ec_stripe_new_put(c, s, STRIPE_REF_stripe);
+}
+
+static struct ec_stripe_new *get_pending_stripe(struct bch_fs *c)
+{
+ struct ec_stripe_new *s;
+
+ mutex_lock(&c->ec_stripe_new_lock);
+ list_for_each_entry(s, &c->ec_stripe_new_list, list)
+ if (!atomic_read(&s->ref[STRIPE_REF_io]))
+ goto out;
+ s = NULL;
+out:
+ mutex_unlock(&c->ec_stripe_new_lock);
+
+ return s;
}
static void ec_stripe_create_work(struct work_struct *work)
{
struct bch_fs *c = container_of(work,
struct bch_fs, ec_stripe_create_work);
- struct ec_stripe_new *s, *n;
-restart:
- mutex_lock(&c->ec_stripe_new_lock);
- list_for_each_entry_safe(s, n, &c->ec_stripe_new_list, list)
- if (!atomic_read(&s->pin)) {
- list_del(&s->list);
- mutex_unlock(&c->ec_stripe_new_lock);
- ec_stripe_create(s);
- goto restart;
- }
- mutex_unlock(&c->ec_stripe_new_lock);
+ struct ec_stripe_new *s;
+
+ while ((s = get_pending_stripe(c)))
+ ec_stripe_create(s);
+
+ bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
}
-static void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s)
+void bch2_ec_do_stripe_creates(struct bch_fs *c)
{
- BUG_ON(atomic_read(&s->pin) <= 0);
+ bch2_write_ref_get(c, BCH_WRITE_REF_stripe_create);
- if (atomic_dec_and_test(&s->pin)) {
- BUG_ON(!s->pending);
- queue_work(system_long_wq, &c->ec_stripe_create_work);
- }
+ if (!queue_work(system_long_wq, &c->ec_stripe_create_work))
+ bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
}
static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
list_add(&s->list, &c->ec_stripe_new_list);
mutex_unlock(&c->ec_stripe_new_lock);
- ec_stripe_new_put(c, s);
-}
-
-/* have a full bucket - hand it off to be erasure coded: */
-void bch2_ec_bucket_written(struct bch_fs *c, struct open_bucket *ob)
-{
- struct ec_stripe_new *s = ob->ec;
-
- if (ob->sectors_free)
- s->err = -1;
-
- ec_stripe_new_put(c, s);
+ ec_stripe_new_put(c, s, STRIPE_REF_io);
}
void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob)
if (!ob)
return NULL;
+ BUG_ON(!ob->ec->new_stripe.data[ob->ec_idx]);
+
ca = bch_dev_bkey_exists(c, ob->dev);
offset = ca->mi.bucket_size - ob->sectors_free;
static unsigned pick_blocksize(struct bch_fs *c,
struct bch_devs_mask *devs)
{
- struct bch_dev *ca;
- unsigned i, nr = 0, sizes[BCH_SB_MEMBERS_MAX];
+ unsigned nr = 0, sizes[BCH_SB_MEMBERS_MAX];
struct {
unsigned nr, size;
} cur = { 0, 0 }, best = { 0, 0 };
- for_each_member_device_rcu(ca, c, i, devs)
+ for_each_member_device_rcu(c, ca, devs)
sizes[nr++] = ca->mi.bucket_size;
sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL);
- for (i = 0; i < nr; i++) {
+ for (unsigned i = 0; i < nr; i++) {
if (sizes[i] != cur.size) {
if (cur.nr > best.nr)
best = cur;
}
static void ec_stripe_key_init(struct bch_fs *c,
- struct bkey_i_stripe *s,
+ struct bkey_i *k,
unsigned nr_data,
unsigned nr_parity,
unsigned stripe_size)
{
+ struct bkey_i_stripe *s = bkey_stripe_init(k);
unsigned u64s;
- bkey_stripe_init(&s->k_i);
s->v.sectors = cpu_to_le16(stripe_size);
s->v.algorithm = 0;
s->v.nr_blocks = nr_data + nr_parity;
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_ec_new_stripe_alloc;
mutex_init(&s->lock);
closure_init(&s->iodone, NULL);
- atomic_set(&s->pin, 1);
+ atomic_set(&s->ref[STRIPE_REF_stripe], 1);
+ atomic_set(&s->ref[STRIPE_REF_io], 1);
s->c = c;
s->h = h;
s->nr_data = min_t(unsigned, h->nr_active_devs,
BCH_BKEY_PTRS_MAX) - h->redundancy;
s->nr_parity = h->redundancy;
- ec_stripe_key_init(c, &s->new_stripe.key, s->nr_data,
- s->nr_parity, h->blocksize);
+ ec_stripe_key_init(c, &s->new_stripe.key,
+ s->nr_data, s->nr_parity, h->blocksize);
h->s = s;
return 0;
static struct ec_stripe_head *
ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
unsigned algo, unsigned redundancy,
- bool copygc)
+ enum bch_watermark watermark)
{
struct ec_stripe_head *h;
- struct bch_dev *ca;
- unsigned i;
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return NULL;
mutex_init(&h->lock);
- mutex_lock(&h->lock);
+ BUG_ON(!mutex_trylock(&h->lock));
h->target = target;
h->algo = algo;
h->redundancy = redundancy;
- h->copygc = copygc;
+ h->watermark = watermark;
rcu_read_lock();
h->devs = target_rw_devs(c, BCH_DATA_user, target);
- for_each_member_device_rcu(ca, c, i, &h->devs)
+ for_each_member_device_rcu(c, ca, &h->devs)
if (!ca->mi.durability)
- __clear_bit(i, h->devs.d);
+ __clear_bit(ca->dev_idx, h->devs.d);
h->blocksize = pick_blocksize(c, &h->devs);
- for_each_member_device_rcu(ca, c, i, &h->devs)
+ for_each_member_device_rcu(c, ca, &h->devs)
if (ca->mi.bucket_size == h->blocksize)
h->nr_active_devs++;
rcu_read_unlock();
+
+ /*
+ * If we only have redundancy + 1 devices, we're better off with just
+ * replication:
+ */
+ if (h->nr_active_devs < h->redundancy + 2)
+ bch_err(c, "insufficient devices available to create stripe (have %u, need %u) - mismatched bucket sizes?",
+ h->nr_active_devs, h->redundancy + 2);
+
list_add(&h->list, &c->ec_stripe_head_list);
return h;
}
mutex_unlock(&h->lock);
}
-struct ec_stripe_head *__bch2_ec_stripe_head_get(struct bch_fs *c,
- unsigned target,
- unsigned algo,
- unsigned redundancy,
- bool copygc)
+static struct ec_stripe_head *
+__bch2_ec_stripe_head_get(struct btree_trans *trans,
+ unsigned target,
+ unsigned algo,
+ unsigned redundancy,
+ enum bch_watermark watermark)
{
+ struct bch_fs *c = trans->c;
struct ec_stripe_head *h;
+ int ret;
if (!redundancy)
return NULL;
- mutex_lock(&c->ec_stripe_head_lock);
+ ret = bch2_trans_mutex_lock(trans, &c->ec_stripe_head_lock);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (test_bit(BCH_FS_going_ro, &c->flags)) {
+ h = ERR_PTR(-BCH_ERR_erofs_no_writes);
+ goto found;
+ }
+
list_for_each_entry(h, &c->ec_stripe_head_list, list)
if (h->target == target &&
h->algo == algo &&
h->redundancy == redundancy &&
- h->copygc == copygc) {
- mutex_lock(&h->lock);
+ h->watermark == watermark) {
+ ret = bch2_trans_mutex_lock(trans, &h->lock);
+ if (ret)
+ h = ERR_PTR(ret);
goto found;
}
- h = ec_new_stripe_head_alloc(c, target, algo, redundancy, copygc);
+ h = ec_new_stripe_head_alloc(c, target, algo, redundancy, watermark);
found:
+ if (!IS_ERR_OR_NULL(h) &&
+ h->nr_active_devs < h->redundancy + 2) {
+ mutex_unlock(&h->lock);
+ h = NULL;
+ }
mutex_unlock(&c->ec_stripe_head_lock);
return h;
}
-static int new_stripe_alloc_buckets(struct bch_fs *c, struct ec_stripe_head *h,
- struct closure *cl)
+static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h,
+ enum bch_watermark watermark, struct closure *cl)
{
+ struct bch_fs *c = trans->c;
struct bch_devs_mask devs = h->devs;
struct open_bucket *ob;
struct open_buckets buckets;
+ struct bch_stripe *v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v;
unsigned i, j, nr_have_parity = 0, nr_have_data = 0;
bool have_cache = true;
int ret = 0;
- for (i = 0; i < h->s->new_stripe.key.v.nr_blocks; i++) {
- if (test_bit(i, h->s->blocks_gotten)) {
- __clear_bit(h->s->new_stripe.key.v.ptrs[i].dev, devs.d);
- if (i < h->s->nr_data)
- nr_have_data++;
- else
- nr_have_parity++;
- }
+ BUG_ON(v->nr_blocks != h->s->nr_data + h->s->nr_parity);
+ BUG_ON(v->nr_redundant != h->s->nr_parity);
+
+ for_each_set_bit(i, h->s->blocks_gotten, v->nr_blocks) {
+ __clear_bit(v->ptrs[i].dev, devs.d);
+ if (i < h->s->nr_data)
+ nr_have_data++;
+ else
+ nr_have_parity++;
}
BUG_ON(nr_have_data > h->s->nr_data);
buckets.nr = 0;
if (nr_have_parity < h->s->nr_parity) {
- ret = bch2_bucket_alloc_set(c, &buckets,
+ ret = bch2_bucket_alloc_set_trans(trans, &buckets,
&h->parity_stripe,
&devs,
h->s->nr_parity,
&nr_have_parity,
- &have_cache,
- h->copygc
- ? RESERVE_movinggc
- : RESERVE_none,
- 0,
+ &have_cache, 0,
+ BCH_DATA_parity,
+ watermark,
cl);
open_bucket_for_each(c, &buckets, ob, i) {
BUG_ON(j >= h->s->nr_data + h->s->nr_parity);
h->s->blocks[j] = buckets.v[i];
- h->s->new_stripe.key.v.ptrs[j] = bch2_ob_ptr(c, ob);
+ v->ptrs[j] = bch2_ob_ptr(c, ob);
__set_bit(j, h->s->blocks_gotten);
}
buckets.nr = 0;
if (nr_have_data < h->s->nr_data) {
- ret = bch2_bucket_alloc_set(c, &buckets,
+ ret = bch2_bucket_alloc_set_trans(trans, &buckets,
&h->block_stripe,
&devs,
h->s->nr_data,
&nr_have_data,
- &have_cache,
- h->copygc
- ? RESERVE_movinggc
- : RESERVE_none,
- 0,
+ &have_cache, 0,
+ BCH_DATA_user,
+ watermark,
cl);
open_bucket_for_each(c, &buckets, ob, i) {
BUG_ON(j >= h->s->nr_data);
h->s->blocks[j] = buckets.v[i];
- h->s->new_stripe.key.v.ptrs[j] = bch2_ob_ptr(c, ob);
+ v->ptrs[j] = bch2_ob_ptr(c, ob);
__set_bit(j, h->s->blocks_gotten);
}
if (may_create_new_stripe(c))
return -1;
- spin_lock(&c->ec_stripes_heap_lock);
+ mutex_lock(&c->ec_stripes_heap_lock);
for (heap_idx = 0; heap_idx < h->used; heap_idx++) {
/* No blocks worth reusing, stripe will just be deleted: */
if (!h->data[heap_idx].blocks_nonempty)
continue;
stripe_idx = h->data[heap_idx].idx;
+
m = genradix_ptr(&c->stripes, stripe_idx);
if (m->algorithm == head->algo &&
m->nr_redundant == head->redundancy &&
m->sectors == head->blocksize &&
- m->blocks_nonempty < m->nr_blocks - m->nr_redundant) {
- bch2_stripes_heap_del(c, m, stripe_idx);
+ m->blocks_nonempty < m->nr_blocks - m->nr_redundant &&
+ bch2_try_open_stripe(c, head->s, stripe_idx)) {
ret = stripe_idx;
break;
}
}
- spin_unlock(&c->ec_stripes_heap_lock);
+ mutex_unlock(&c->ec_stripes_heap_lock);
return ret;
}
-static int __bch2_ec_stripe_head_reuse(struct bch_fs *c,
- struct ec_stripe_head *h)
+static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h)
{
+ struct bch_fs *c = trans->c;
+ struct bch_stripe *new_v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v;
+ struct bch_stripe *existing_v;
unsigned i;
s64 idx;
int ret;
+ /*
+ * If we can't allocate a new stripe, and there's no stripes with empty
+ * blocks for us to reuse, that means we have to wait on copygc:
+ */
idx = get_existing_stripe(c, h);
if (idx < 0)
- return -BCH_ERR_ENOSPC_stripe_reuse;
+ return -BCH_ERR_stripe_alloc_blocked;
- h->s->have_existing_stripe = true;
- ret = get_stripe_key(c, idx, &h->s->existing_stripe);
+ ret = get_stripe_key_trans(trans, idx, &h->s->existing_stripe);
if (ret) {
- bch2_fs_fatal_error(c, "error reading stripe key: %i", ret);
+ bch2_stripe_close(c, h->s);
+ if (!bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ bch2_fs_fatal_error(c, "error reading stripe key: %s", bch2_err_str(ret));
return ret;
}
- if (ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize)) {
- /*
- * this is a problem: we have deleted from the
- * stripes heap already
- */
- BUG();
+ existing_v = &bkey_i_to_stripe(&h->s->existing_stripe.key)->v;
+
+ BUG_ON(existing_v->nr_redundant != h->s->nr_parity);
+ h->s->nr_data = existing_v->nr_blocks -
+ existing_v->nr_redundant;
+
+ ret = ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize);
+ if (ret) {
+ bch2_stripe_close(c, h->s);
+ return ret;
}
BUG_ON(h->s->existing_stripe.size != h->blocksize);
- BUG_ON(h->s->existing_stripe.size != h->s->existing_stripe.key.v.sectors);
+ BUG_ON(h->s->existing_stripe.size != le16_to_cpu(existing_v->sectors));
+
+ /*
+ * Free buckets we initially allocated - they might conflict with
+ * blocks from the stripe we're reusing:
+ */
+ for_each_set_bit(i, h->s->blocks_gotten, new_v->nr_blocks) {
+ bch2_open_bucket_put(c, c->open_buckets + h->s->blocks[i]);
+ h->s->blocks[i] = 0;
+ }
+ memset(h->s->blocks_gotten, 0, sizeof(h->s->blocks_gotten));
+ memset(h->s->blocks_allocated, 0, sizeof(h->s->blocks_allocated));
- for (i = 0; i < h->s->existing_stripe.key.v.nr_blocks; i++) {
- if (stripe_blockcount_get(&h->s->existing_stripe.key.v, i)) {
+ for (i = 0; i < existing_v->nr_blocks; i++) {
+ if (stripe_blockcount_get(existing_v, i)) {
__set_bit(i, h->s->blocks_gotten);
__set_bit(i, h->s->blocks_allocated);
}
ec_block_io(c, &h->s->existing_stripe, READ, i, &h->s->iodone);
}
- bkey_copy(&h->s->new_stripe.key.k_i,
- &h->s->existing_stripe.key.k_i);
+ bkey_copy(&h->s->new_stripe.key, &h->s->existing_stripe.key);
+ h->s->have_existing_stripe = true;
return 0;
}
-static int __bch2_ec_stripe_head_reserve(struct bch_fs *c,
- struct ec_stripe_head *h)
+static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_stripe_head *h)
{
- return bch2_disk_reservation_get(c, &h->s->res,
- h->blocksize,
- h->s->nr_parity, 0);
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bpos min_pos = POS(0, 1);
+ struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint));
+ int ret;
+
+ if (!h->s->res.sectors) {
+ ret = bch2_disk_reservation_get(c, &h->s->res,
+ h->blocksize,
+ h->s->nr_parity,
+ BCH_DISK_RESERVATION_NOFAIL);
+ if (ret)
+ return ret;
+ }
+
+ for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
+ if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
+ if (start_pos.offset) {
+ start_pos = min_pos;
+ bch2_btree_iter_set_pos(&iter, start_pos);
+ continue;
+ }
+
+ ret = -BCH_ERR_ENOSPC_stripe_create;
+ break;
+ }
+
+ if (bkey_deleted(k.k) &&
+ bch2_try_open_stripe(c, h->s, k.k->p.offset))
+ break;
+ }
+
+ c->ec_stripe_hint = iter.pos.offset;
+
+ if (ret)
+ goto err;
+
+ ret = ec_stripe_mem_alloc(trans, &iter);
+ if (ret) {
+ bch2_stripe_close(c, h->s);
+ goto err;
+ }
+
+ h->s->new_stripe.key.k.p = iter.pos;
+out:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+err:
+ bch2_disk_reservation_put(c, &h->s->res);
+ goto out;
}
-struct ec_stripe_head *bch2_ec_stripe_head_get(struct bch_fs *c,
+struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
unsigned target,
unsigned algo,
unsigned redundancy,
- bool copygc,
+ enum bch_watermark watermark,
struct closure *cl)
{
+ struct bch_fs *c = trans->c;
struct ec_stripe_head *h;
+ bool waiting = false;
int ret;
- bool needs_stripe_new;
- h = __bch2_ec_stripe_head_get(c, target, algo, redundancy, copygc);
- if (!h) {
- bch_err(c, "no stripe head");
- return NULL;
- }
+ h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, watermark);
+ if (IS_ERR_OR_NULL(h))
+ return h;
- needs_stripe_new = !h->s;
- if (needs_stripe_new) {
- if (ec_new_stripe_alloc(c, h)) {
- ret = -ENOMEM;
+ if (!h->s) {
+ ret = ec_new_stripe_alloc(c, h);
+ if (ret) {
bch_err(c, "failed to allocate new stripe");
goto err;
}
-
- if (ec_stripe_buf_init(&h->s->new_stripe, 0, h->blocksize))
- BUG();
}
- /*
- * Try reserve a new stripe before reusing an
- * existing stripe. This will prevent unnecessary
- * read amplification during write oriented workloads.
- */
- ret = 0;
- if (!h->s->allocated && !h->s->res.sectors && !h->s->have_existing_stripe)
- ret = __bch2_ec_stripe_head_reserve(c, h);
- if (ret && needs_stripe_new)
- ret = __bch2_ec_stripe_head_reuse(c, h);
- if (ret) {
- bch_err_ratelimited(c, "failed to get stripe: %s", bch2_err_str(ret));
+ if (h->s->allocated)
+ goto allocated;
+
+ if (h->s->have_existing_stripe)
+ goto alloc_existing;
+
+ /* First, try to allocate a full stripe: */
+ ret = new_stripe_alloc_buckets(trans, h, BCH_WATERMARK_stripe, NULL) ?:
+ __bch2_ec_stripe_head_reserve(trans, h);
+ if (!ret)
+ goto allocate_buf;
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
+ bch2_err_matches(ret, ENOMEM))
goto err;
- }
- if (!h->s->allocated) {
- ret = new_stripe_alloc_buckets(c, h, cl);
- if (ret)
+ /*
+ * Not enough buckets available for a full stripe: we must reuse an
+ * existing stripe:
+ */
+ while (1) {
+ ret = __bch2_ec_stripe_head_reuse(trans, h);
+ if (!ret)
+ break;
+ if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked)
goto err;
- h->s->allocated = true;
+ if (watermark == BCH_WATERMARK_copygc) {
+ ret = new_stripe_alloc_buckets(trans, h, watermark, NULL) ?:
+ __bch2_ec_stripe_head_reserve(trans, h);
+ if (ret)
+ goto err;
+ goto allocate_buf;
+ }
+
+ /* XXX freelist_wait? */
+ closure_wait(&c->freelist_wait, cl);
+ waiting = true;
}
- return h;
+ if (waiting)
+ closure_wake_up(&c->freelist_wait);
+alloc_existing:
+ /*
+ * Retry allocating buckets, with the watermark for this
+ * particular write:
+ */
+ ret = new_stripe_alloc_buckets(trans, h, watermark, cl);
+ if (ret)
+ goto err;
+allocate_buf:
+ ret = ec_stripe_buf_init(&h->s->new_stripe, 0, h->blocksize);
+ if (ret)
+ goto err;
+
+ h->s->allocated = true;
+allocated:
+ BUG_ON(!h->s->idx);
+ BUG_ON(!h->s->new_stripe.data[0]);
+ BUG_ON(trans->restarted);
+ return h;
err:
bch2_ec_stripe_head_put(c, h);
return ERR_PTR(ret);
}
-void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca)
+static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca)
{
struct ec_stripe_head *h;
struct open_bucket *ob;
mutex_lock(&c->ec_stripe_head_lock);
list_for_each_entry(h, &c->ec_stripe_head_list, list) {
-
mutex_lock(&h->lock);
if (!h->s)
goto unlock;
- for (i = 0; i < h->s->new_stripe.key.v.nr_blocks; i++) {
+ if (!ca)
+ goto found;
+
+ for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) {
if (!h->s->blocks[i])
continue;
}
goto unlock;
found:
- h->s->err = -EROFS;
+ h->s->err = -BCH_ERR_erofs_no_writes;
ec_stripe_set_pending(c, h);
unlock:
mutex_unlock(&h->lock);
mutex_unlock(&c->ec_stripe_head_lock);
}
-void bch2_stripes_heap_start(struct bch_fs *c)
+void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca)
{
- struct genradix_iter iter;
- struct stripe *m;
+ __bch2_ec_stop(c, ca);
+}
- genradix_for_each(&c->stripes, iter, m)
- if (m->alive)
- bch2_stripes_heap_insert(c, m, iter.pos);
+void bch2_fs_ec_stop(struct bch_fs *c)
+{
+ __bch2_ec_stop(c, NULL);
}
-int bch2_stripes_read(struct bch_fs *c)
+static bool bch2_fs_ec_flush_done(struct bch_fs *c)
{
- struct btree_trans trans;
- struct btree_iter iter;
- struct bkey_s_c k;
- const struct bch_stripe *s;
- struct stripe *m;
- unsigned i;
- int ret;
+ bool ret;
- bch2_trans_init(&trans, c, 0, 0);
+ mutex_lock(&c->ec_stripe_new_lock);
+ ret = list_empty(&c->ec_stripe_new_list);
+ mutex_unlock(&c->ec_stripe_new_lock);
- for_each_btree_key(&trans, iter, BTREE_ID_stripes, POS_MIN,
- BTREE_ITER_PREFETCH, k, ret) {
- if (k.k->type != KEY_TYPE_stripe)
- continue;
+ return ret;
+}
- ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL);
- if (ret)
- break;
+void bch2_fs_ec_flush(struct bch_fs *c)
+{
+ wait_event(c->ec_stripe_new_wait, bch2_fs_ec_flush_done(c));
+}
- s = bkey_s_c_to_stripe(k).v;
+int bch2_stripes_read(struct bch_fs *c)
+{
+ int ret = bch2_trans_run(c,
+ for_each_btree_key(trans, iter, BTREE_ID_stripes, POS_MIN,
+ BTREE_ITER_PREFETCH, k, ({
+ if (k.k->type != KEY_TYPE_stripe)
+ continue;
- m = genradix_ptr(&c->stripes, k.k->p.offset);
- m->alive = true;
- m->sectors = le16_to_cpu(s->sectors);
- m->algorithm = s->algorithm;
- m->nr_blocks = s->nr_blocks;
- m->nr_redundant = s->nr_redundant;
- m->blocks_nonempty = 0;
-
- for (i = 0; i < s->nr_blocks; i++)
- m->blocks_nonempty += !!stripe_blockcount_get(s, i);
-
- spin_lock(&c->ec_stripes_heap_lock);
- bch2_stripes_heap_update(c, m, k.k->p.offset);
- spin_unlock(&c->ec_stripes_heap_lock);
- }
- bch2_trans_iter_exit(&trans, &iter);
+ ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL);
+ if (ret)
+ break;
- bch2_trans_exit(&trans);
+ const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
- if (ret)
- bch_err(c, "error reading stripes: %i", ret);
+ struct stripe *m = genradix_ptr(&c->stripes, k.k->p.offset);
+ m->sectors = le16_to_cpu(s->sectors);
+ m->algorithm = s->algorithm;
+ m->nr_blocks = s->nr_blocks;
+ m->nr_redundant = s->nr_redundant;
+ m->blocks_nonempty = 0;
+
+ for (unsigned i = 0; i < s->nr_blocks; i++)
+ m->blocks_nonempty += !!stripe_blockcount_get(s, i);
+ bch2_stripes_heap_insert(c, m, k.k->p.offset);
+ 0;
+ })));
+ bch_err_fn(c, ret);
return ret;
}
struct stripe *m;
size_t i;
- spin_lock(&c->ec_stripes_heap_lock);
- for (i = 0; i < min_t(size_t, h->used, 20); i++) {
+ mutex_lock(&c->ec_stripes_heap_lock);
+ for (i = 0; i < min_t(size_t, h->used, 50); i++) {
m = genradix_ptr(&c->stripes, h->data[i].idx);
- prt_printf(out, "%zu %u/%u+%u\n", h->data[i].idx,
+ prt_printf(out, "%zu %u/%u+%u", h->data[i].idx,
h->data[i].blocks_nonempty,
m->nr_blocks - m->nr_redundant,
m->nr_redundant);
+ if (bch2_stripe_is_open(c, h->data[i].idx))
+ prt_str(out, " open");
+ prt_newline(out);
}
- spin_unlock(&c->ec_stripes_heap_lock);
+ mutex_unlock(&c->ec_stripes_heap_lock);
}
void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
mutex_lock(&c->ec_stripe_head_lock);
list_for_each_entry(h, &c->ec_stripe_head_list, list) {
- prt_printf(out, "target %u algo %u redundancy %u:\n",
- h->target, h->algo, h->redundancy);
+ prt_printf(out, "target %u algo %u redundancy %u %s:\n",
+ h->target, h->algo, h->redundancy,
+ bch2_watermarks[h->watermark]);
if (h->s)
- prt_printf(out, "\tpending: blocks %u+%u allocated %u\n",
- h->s->nr_data, h->s->nr_parity,
+ prt_printf(out, "\tidx %llu blocks %u+%u allocated %u\n",
+ h->s->idx, h->s->nr_data, h->s->nr_parity,
bitmap_weight(h->s->blocks_allocated,
h->s->nr_data));
}
mutex_unlock(&c->ec_stripe_head_lock);
+ prt_printf(out, "in flight:\n");
+
mutex_lock(&c->ec_stripe_new_lock);
list_for_each_entry(s, &c->ec_stripe_new_list, list) {
- prt_printf(out, "\tin flight: blocks %u+%u pin %u\n",
- s->nr_data, s->nr_parity,
- atomic_read(&s->pin));
+ prt_printf(out, "\tidx %llu blocks %u+%u ref %u %u %s\n",
+ s->idx, s->nr_data, s->nr_parity,
+ atomic_read(&s->ref[STRIPE_REF_io]),
+ atomic_read(&s->ref[STRIPE_REF_stripe]),
+ bch2_watermarks[s->h->watermark]);
}
mutex_unlock(&c->ec_stripe_new_lock);
}
void bch2_fs_ec_exit(struct bch_fs *c)
{
struct ec_stripe_head *h;
+ unsigned i;
while (1) {
mutex_lock(&c->ec_stripe_head_lock);
if (!h)
break;
- BUG_ON(h->s);
+ if (h->s) {
+ for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++)
+ BUG_ON(h->s->blocks[i]);
+
+ kfree(h->s);
+ }
kfree(h);
}
void bch2_fs_ec_init_early(struct bch_fs *c)
{
+ spin_lock_init(&c->ec_stripes_new_lock);
+ mutex_init(&c->ec_stripes_heap_lock);
+
+ INIT_LIST_HEAD(&c->ec_stripe_head_list);
+ mutex_init(&c->ec_stripe_head_lock);
+
+ INIT_LIST_HEAD(&c->ec_stripe_new_list);
+ mutex_init(&c->ec_stripe_new_lock);
+ init_waitqueue_head(&c->ec_stripe_new_wait);
+
INIT_WORK(&c->ec_stripe_create_work, ec_stripe_create_work);
INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work);
}