X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fec.c;h=8646856e4539eae46fed2634154ebb340f841a46;hb=bd9e0153342c51390ec655b4e78eda1aa1c32a84;hp=aa67af8b98160b0571ebc2bbbec8985880924e6d;hpb=33c91e2ff4e228cb618ca22d642a34ec1c2cf0ef;p=bcachefs-tools-debian diff --git a/libbcachefs/ec.c b/libbcachefs/ec.c index aa67af8..8646856 100644 --- a/libbcachefs/ec.c +++ b/libbcachefs/ec.c @@ -4,16 +4,21 @@ #include "bcachefs.h" #include "alloc_foreground.h" +#include "backpointers.h" +#include "bkey_buf.h" #include "bset.h" #include "btree_gc.h" #include "btree_update.h" +#include "btree_write_buffer.h" #include "buckets.h" +#include "checksum.h" #include "disk_groups.h" #include "ec.h" #include "error.h" -#include "io.h" +#include "io_read.h" #include "keylist.h" #include "recovery.h" +#include "replicas.h" #include "super-io.h" #include "util.h" @@ -100,179 +105,191 @@ struct ec_bio { /* Stripes btree keys: */ -const char *bch2_stripe_invalid(const struct bch_fs *c, struct bkey_s_c k) +int bch2_stripe_invalid(const struct bch_fs *c, struct bkey_s_c k, + enum bkey_invalid_flags flags, + struct printbuf *err) { const struct bch_stripe *s = bkey_s_c_to_stripe(k).v; - if (k.k->p.inode) - return "invalid stripe key"; + if (bkey_eq(k.k->p, POS_MIN)) { + prt_printf(err, "stripe at POS_MIN"); + return -BCH_ERR_invalid_bkey; + } - if (bkey_val_bytes(k.k) < sizeof(*s)) - return "incorrect value size"; + if (k.k->p.inode) { + prt_printf(err, "nonzero inode field"); + return -BCH_ERR_invalid_bkey; + } - if (bkey_val_bytes(k.k) < sizeof(*s) || - bkey_val_u64s(k.k) < stripe_val_u64s(s)) - return "incorrect value size"; + if (bkey_val_u64s(k.k) < stripe_val_u64s(s)) { + prt_printf(err, "incorrect value size (%zu < %u)", + bkey_val_u64s(k.k), stripe_val_u64s(s)); + return -BCH_ERR_invalid_bkey; + } - return bch2_bkey_ptrs_invalid(c, k); + return bch2_bkey_ptrs_invalid(c, k, flags, err); } void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) { const struct bch_stripe *s = bkey_s_c_to_stripe(k).v; - unsigned i; + unsigned i, nr_data = s->nr_blocks - s->nr_redundant; - pr_buf(out, "algo %u sectors %u blocks %u:%u csum %u gran %u", + prt_printf(out, "algo %u sectors %u blocks %u:%u csum %u gran %u", s->algorithm, le16_to_cpu(s->sectors), - s->nr_blocks - s->nr_redundant, + nr_data, s->nr_redundant, s->csum_type, 1U << s->csum_granularity_bits); - for (i = 0; i < s->nr_blocks; i++) - pr_buf(out, " %u:%llu:%u", s->ptrs[i].dev, - (u64) s->ptrs[i].offset, - stripe_blockcount_get(s, i)); - - bch2_bkey_ptrs_to_text(out, c, k); + for (i = 0; i < s->nr_blocks; i++) { + const struct bch_extent_ptr *ptr = s->ptrs + i; + struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); + u32 offset; + u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset); + + prt_printf(out, " %u:%llu:%u", ptr->dev, b, offset); + if (i < nr_data) + prt_printf(out, "#%u", stripe_blockcount_get(s, i)); + if (ptr_stale(ca, ptr)) + prt_printf(out, " stale"); + } } -static int ptr_matches_stripe(struct bch_fs *c, - struct bch_stripe *v, - const struct bch_extent_ptr *ptr) +/* returns blocknr in stripe that we matched: */ +static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s, + struct bkey_s_c k, unsigned *block) { - unsigned i; - - for (i = 0; i < v->nr_blocks - v->nr_redundant; i++) { - const struct bch_extent_ptr *ptr2 = v->ptrs + i; - - if (ptr->dev == ptr2->dev && - ptr->gen == ptr2->gen && - ptr->offset >= ptr2->offset && - ptr->offset < ptr2->offset + le16_to_cpu(v->sectors)) - return i; - } + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + const struct bch_extent_ptr *ptr; + unsigned i, nr_data = s->nr_blocks - s->nr_redundant; + + bkey_for_each_ptr(ptrs, ptr) + for (i = 0; i < nr_data; i++) + if (__bch2_ptr_matches_stripe(&s->ptrs[i], ptr, + le16_to_cpu(s->sectors))) { + *block = i; + return ptr; + } - return -1; + return NULL; } -static int extent_matches_stripe(struct bch_fs *c, - struct bch_stripe *v, - struct bkey_s_c k) +static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx) { - struct bkey_s_c_extent e; - const struct bch_extent_ptr *ptr; - int idx; - - if (!bkey_extent_is_data(k.k)) - return -1; + switch (k.k->type) { + case KEY_TYPE_extent: { + struct bkey_s_c_extent e = bkey_s_c_to_extent(k); + const union bch_extent_entry *entry; - e = bkey_s_c_to_extent(k); + extent_for_each_entry(e, entry) + if (extent_entry_type(entry) == + BCH_EXTENT_ENTRY_stripe_ptr && + entry->stripe_ptr.idx == idx) + return true; - extent_for_each_ptr(e, ptr) { - idx = ptr_matches_stripe(c, v, ptr); - if (idx >= 0) - return idx; + break; + } } - return -1; + return false; } -static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx) -{ - struct bkey_s_c_extent e; - const union bch_extent_entry *entry; - - if (!bkey_extent_is_data(k.k)) - return false; - - e = bkey_s_c_to_extent(k); +/* Stripe bufs: */ - extent_for_each_entry(e, entry) - if (extent_entry_type(entry) == - BCH_EXTENT_ENTRY_stripe_ptr && - entry->stripe_ptr.idx == idx) - return true; +static void ec_stripe_buf_exit(struct ec_stripe_buf *buf) +{ + if (buf->key.k.type == KEY_TYPE_stripe) { + struct bkey_i_stripe *s = bkey_i_to_stripe(&buf->key); + unsigned i; - return false; + for (i = 0; i < s->v.nr_blocks; i++) { + kvpfree(buf->data[i], buf->size << 9); + buf->data[i] = NULL; + } + } } -static void ec_stripe_key_init(struct bch_fs *c, - struct bkey_i_stripe *s, - struct open_buckets *blocks, - struct open_buckets *parity, - unsigned stripe_size) +/* XXX: this is a non-mempoolified memory allocation: */ +static int ec_stripe_buf_init(struct ec_stripe_buf *buf, + unsigned offset, unsigned size) { - struct open_bucket *ob; - unsigned i, u64s; + struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; + unsigned csum_granularity = 1U << v->csum_granularity_bits; + unsigned end = offset + size; + unsigned i; - bkey_stripe_init(&s->k_i); - s->v.sectors = cpu_to_le16(stripe_size); - s->v.algorithm = 0; - s->v.nr_blocks = parity->nr + blocks->nr; - s->v.nr_redundant = parity->nr; - s->v.csum_granularity_bits = ilog2(c->sb.encoded_extent_max); - s->v.csum_type = BCH_CSUM_CRC32C; - s->v.pad = 0; + BUG_ON(end > le16_to_cpu(v->sectors)); - open_bucket_for_each(c, blocks, ob, i) - s->v.ptrs[i] = ob->ptr; + offset = round_down(offset, csum_granularity); + end = min_t(unsigned, le16_to_cpu(v->sectors), + round_up(end, csum_granularity)); - open_bucket_for_each(c, parity, ob, i) - s->v.ptrs[blocks->nr + i] = ob->ptr; + buf->offset = offset; + buf->size = end - offset; - while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) { - BUG_ON(1 << s->v.csum_granularity_bits >= - le16_to_cpu(s->v.sectors) || - s->v.csum_granularity_bits == U8_MAX); - s->v.csum_granularity_bits++; + memset(buf->valid, 0xFF, sizeof(buf->valid)); + + for (i = 0; i < v->nr_blocks; i++) { + buf->data[i] = kvpmalloc(buf->size << 9, GFP_KERNEL); + if (!buf->data[i]) + goto err; } - set_bkey_val_u64s(&s->k, u64s); + return 0; +err: + ec_stripe_buf_exit(buf); + return -BCH_ERR_ENOMEM_stripe_buf; } /* Checksumming: */ -static void ec_generate_checksums(struct ec_stripe_buf *buf) +static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf, + unsigned block, unsigned offset) { - struct bch_stripe *v = &buf->key.v; + struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; unsigned csum_granularity = 1 << v->csum_granularity_bits; - unsigned csums_per_device = stripe_csums_per_device(v); - unsigned csum_bytes = bch_crc_bytes[v->csum_type]; - unsigned i, j; + unsigned end = buf->offset + buf->size; + unsigned len = min(csum_granularity, end - offset); + + BUG_ON(offset >= end); + BUG_ON(offset < buf->offset); + BUG_ON(offset & (csum_granularity - 1)); + BUG_ON(offset + len != le16_to_cpu(v->sectors) && + (len & (csum_granularity - 1))); + + return bch2_checksum(NULL, v->csum_type, + null_nonce(), + buf->data[block] + ((offset - buf->offset) << 9), + len << 9); +} + +static void ec_generate_checksums(struct ec_stripe_buf *buf) +{ + struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; + unsigned i, j, csums_per_device = stripe_csums_per_device(v); - if (!csum_bytes) + if (!v->csum_type) return; BUG_ON(buf->offset); BUG_ON(buf->size != le16_to_cpu(v->sectors)); - for (i = 0; i < v->nr_blocks; i++) { - for (j = 0; j < csums_per_device; j++) { - unsigned offset = j << v->csum_granularity_bits; - unsigned len = min(csum_granularity, buf->size - offset); - - struct bch_csum csum = - bch2_checksum(NULL, v->csum_type, - null_nonce(), - buf->data[i] + (offset << 9), - len << 9); - - memcpy(stripe_csum(v, i, j), &csum, csum_bytes); - } - } + for (i = 0; i < v->nr_blocks; i++) + for (j = 0; j < csums_per_device; j++) + stripe_csum_set(v, i, j, + ec_block_checksum(buf, i, j << v->csum_granularity_bits)); } static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf) { - struct bch_stripe *v = &buf->key.v; + struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; unsigned csum_granularity = 1 << v->csum_granularity_bits; - unsigned csum_bytes = bch_crc_bytes[v->csum_type]; unsigned i; - if (!csum_bytes) + if (!v->csum_type) return; for (i = 0; i < v->nr_blocks; i++) { @@ -285,21 +302,19 @@ static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf) while (offset < end) { unsigned j = offset >> v->csum_granularity_bits; unsigned len = min(csum_granularity, end - offset); - struct bch_csum csum; + struct bch_csum want = stripe_csum_get(v, i, j); + struct bch_csum got = ec_block_checksum(buf, i, offset); - BUG_ON(offset & (csum_granularity - 1)); - BUG_ON(offset + len != le16_to_cpu(v->sectors) && - ((offset + len) & (csum_granularity - 1))); + if (bch2_crc_cmp(want, got)) { + struct printbuf buf2 = PRINTBUF; - csum = bch2_checksum(NULL, v->csum_type, - null_nonce(), - buf->data[i] + ((offset - buf->offset) << 9), - len << 9); + bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&buf->key)); - if (memcmp(stripe_csum(v, i, j), &csum, csum_bytes)) { - __bcache_io_error(c, - "checksum error while doing reconstruct read (%u:%u)", - i, j); + bch_err_ratelimited(c, + "stripe checksum error for %ps at %u:%u: csum type %u, expected %llx got %llx\n%s", + (void *) _RET_IP_, i, j, v->csum_type, + want.lo, got.lo, buf2.buf); + printbuf_exit(&buf2); clear_bit(i, buf->valid); break; } @@ -313,32 +328,29 @@ static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf) static void ec_generate_ec(struct ec_stripe_buf *buf) { - struct bch_stripe *v = &buf->key.v; + struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; unsigned nr_data = v->nr_blocks - v->nr_redundant; unsigned bytes = le16_to_cpu(v->sectors) << 9; raid_gen(nr_data, v->nr_redundant, bytes, buf->data); } -static unsigned __ec_nr_failed(struct ec_stripe_buf *buf, unsigned nr) -{ - return nr - bitmap_weight(buf->valid, nr); -} - static unsigned ec_nr_failed(struct ec_stripe_buf *buf) { - return __ec_nr_failed(buf, buf->key.v.nr_blocks); + struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; + + return v->nr_blocks - bitmap_weight(buf->valid, v->nr_blocks); } static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf) { - struct bch_stripe *v = &buf->key.v; - unsigned i, failed[EC_STRIPE_MAX], nr_failed = 0; + struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; + unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0; unsigned nr_data = v->nr_blocks - v->nr_redundant; unsigned bytes = buf->size << 9; if (ec_nr_failed(buf) > v->nr_redundant) { - __bcache_io_error(c, + bch_err_ratelimited(c, "error doing reconstruct read: unable to read enough blocks"); return -1; } @@ -356,38 +368,66 @@ static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf) static void ec_block_endio(struct bio *bio) { struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio); + struct bch_stripe *v = &bkey_i_to_stripe(&ec_bio->buf->key)->v; + struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx]; struct bch_dev *ca = ec_bio->ca; struct closure *cl = bio->bi_private; - if (bch2_dev_io_err_on(bio->bi_status, ca, "erasure coding")) + if (bch2_dev_io_err_on(bio->bi_status, ca, "erasure coding %s error: %s", + bio_data_dir(bio) ? "write" : "read", + bch2_blk_status_to_str(bio->bi_status))) clear_bit(ec_bio->idx, ec_bio->buf->valid); + if (ptr_stale(ca, ptr)) { + bch_err_ratelimited(ca->fs, + "error %s stripe: stale pointer after io", + bio_data_dir(bio) == READ ? "reading from" : "writing to"); + clear_bit(ec_bio->idx, ec_bio->buf->valid); + } + bio_put(&ec_bio->bio); percpu_ref_put(&ca->io_ref); closure_put(cl); } static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf, - unsigned rw, unsigned idx, struct closure *cl) + blk_opf_t opf, unsigned idx, struct closure *cl) { - struct bch_stripe *v = &buf->key.v; + struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; unsigned offset = 0, bytes = buf->size << 9; struct bch_extent_ptr *ptr = &v->ptrs[idx]; struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); + enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant + ? BCH_DATA_user + : BCH_DATA_parity; + int rw = op_is_write(opf); + + if (ptr_stale(ca, ptr)) { + bch_err_ratelimited(c, + "error %s stripe: stale pointer", + rw == READ ? "reading from" : "writing to"); + clear_bit(idx, buf->valid); + return; + } if (!bch2_dev_get_ioref(ca, rw)) { clear_bit(idx, buf->valid); return; } + this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size); + while (offset < bytes) { - unsigned nr_iovecs = min_t(size_t, BIO_MAX_PAGES, + unsigned nr_iovecs = min_t(size_t, BIO_MAX_VECS, DIV_ROUND_UP(bytes, PAGE_SIZE)); unsigned b = min_t(size_t, bytes - offset, nr_iovecs << PAGE_SHIFT); struct ec_bio *ec_bio; - ec_bio = container_of(bio_alloc_bioset(GFP_KERNEL, nr_iovecs, + ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, + nr_iovecs, + opf, + GFP_KERNEL, &c->ec_bioset), struct ec_bio, bio); @@ -395,9 +435,6 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf, ec_bio->buf = buf; ec_bio->idx = idx; - bio_set_dev(&ec_bio->bio, ca->disk_sb.bdev); - bio_set_op_attrs(&ec_bio->bio, rw, 0); - ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9); ec_bio->bio.bi_end_io = ec_block_endio; ec_bio->bio.bi_private = cl; @@ -415,93 +452,86 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf, percpu_ref_put(&ca->io_ref); } +static int get_stripe_key_trans(struct btree_trans *trans, u64 idx, + struct ec_stripe_buf *stripe) +{ + struct btree_iter iter; + struct bkey_s_c k; + int ret; + + k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, + POS(0, idx), BTREE_ITER_SLOTS); + ret = bkey_err(k); + if (ret) + goto err; + if (k.k->type != KEY_TYPE_stripe) { + ret = -ENOENT; + goto err; + } + bkey_reassemble(&stripe->key, k); +err: + bch2_trans_iter_exit(trans, &iter); + return ret; +} + +static int get_stripe_key(struct bch_fs *c, u64 idx, struct ec_stripe_buf *stripe) +{ + return bch2_trans_run(c, get_stripe_key_trans(trans, idx, stripe)); +} + /* recovery read path: */ int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio) { - struct btree_trans trans; - struct btree_iter *iter; struct ec_stripe_buf *buf; struct closure cl; - struct bkey_s_c k; struct bch_stripe *v; - unsigned stripe_idx; - unsigned offset, end; - unsigned i, nr_data, csum_granularity; - int ret = 0, idx; + unsigned i, offset; + int ret = 0; closure_init_stack(&cl); - BUG_ON(!rbio->pick.idx || - rbio->pick.idx - 1 >= rbio->pick.ec_nr); + BUG_ON(!rbio->pick.has_ec); - stripe_idx = rbio->pick.ec[rbio->pick.idx - 1].idx; - - buf = kzalloc(sizeof(*buf), GFP_NOIO); + buf = kzalloc(sizeof(*buf), GFP_NOFS); if (!buf) - return -ENOMEM; - - bch2_trans_init(&trans, c, 0, 0); + return -BCH_ERR_ENOMEM_ec_read_extent; - iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, - POS(0, stripe_idx), - BTREE_ITER_SLOTS); - k = bch2_btree_iter_peek_slot(iter); - if (bkey_err(k) || k.k->type != KEY_TYPE_stripe) { - __bcache_io_error(c, - "error doing reconstruct read: stripe not found"); + ret = get_stripe_key(c, rbio->pick.ec.idx, buf); + if (ret) { + bch_err_ratelimited(c, + "error doing reconstruct read: error %i looking up stripe", ret); kfree(buf); - return bch2_trans_exit(&trans) ?: -EIO; + return -EIO; } - bkey_reassemble(&buf->key.k_i, k); - bch2_trans_exit(&trans); - - v = &buf->key.v; - - nr_data = v->nr_blocks - v->nr_redundant; + v = &bkey_i_to_stripe(&buf->key)->v; - idx = ptr_matches_stripe(c, v, &rbio->pick.ptr); - BUG_ON(idx < 0); - - csum_granularity = 1U << v->csum_granularity_bits; - - offset = rbio->bio.bi_iter.bi_sector - v->ptrs[idx].offset; - end = offset + bio_sectors(&rbio->bio); - - BUG_ON(end > le16_to_cpu(v->sectors)); - - buf->offset = round_down(offset, csum_granularity); - buf->size = min_t(unsigned, le16_to_cpu(v->sectors), - round_up(end, csum_granularity)) - buf->offset; - - for (i = 0; i < v->nr_blocks; i++) { - buf->data[i] = kmalloc(buf->size << 9, GFP_NOIO); - if (!buf->data[i]) { - ret = -ENOMEM; - goto err; - } + if (!bch2_ptr_matches_stripe(v, rbio->pick)) { + bch_err_ratelimited(c, + "error doing reconstruct read: pointer doesn't match stripe"); + ret = -EIO; + goto err; } - memset(buf->valid, 0xFF, sizeof(buf->valid)); - - for (i = 0; i < v->nr_blocks; i++) { - struct bch_extent_ptr *ptr = v->ptrs + i; - struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); + offset = rbio->bio.bi_iter.bi_sector - v->ptrs[rbio->pick.ec.block].offset; + if (offset + bio_sectors(&rbio->bio) > le16_to_cpu(v->sectors)) { + bch_err_ratelimited(c, + "error doing reconstruct read: read is bigger than stripe"); + ret = -EIO; + goto err; + } - if (ptr_stale(ca, ptr)) { - __bcache_io_error(c, - "error doing reconstruct read: stale pointer"); - clear_bit(i, buf->valid); - continue; - } + ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio)); + if (ret) + goto err; + for (i = 0; i < v->nr_blocks; i++) ec_block_io(c, buf, REQ_OP_READ, i, &cl); - } closure_sync(&cl); if (ec_nr_failed(buf) > v->nr_redundant) { - __bcache_io_error(c, + bch_err_ratelimited(c, "error doing reconstruct read: unable to read enough blocks"); ret = -EIO; goto err; @@ -514,10 +544,9 @@ int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio) goto err; memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter, - buf->data[idx] + ((offset - buf->offset) << 9)); + buf->data[rbio->pick.ec.block] + ((offset - buf->offset) << 9)); err: - for (i = 0; i < v->nr_blocks; i++) - kfree(buf->data[i]); + ec_stripe_buf_exit(buf); kfree(buf); return ret; } @@ -530,53 +559,108 @@ static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp) if (idx >= h->size) { if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp)) - return -ENOMEM; + return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; - spin_lock(&c->ec_stripes_heap_lock); + mutex_lock(&c->ec_stripes_heap_lock); if (n.size > h->size) { memcpy(n.data, h->data, h->used * sizeof(h->data[0])); n.used = h->used; swap(*h, n); } - spin_unlock(&c->ec_stripes_heap_lock); + mutex_unlock(&c->ec_stripes_heap_lock); free_heap(&n); } - if (!genradix_ptr_alloc(&c->stripes[0], idx, gfp)) - return -ENOMEM; + if (!genradix_ptr_alloc(&c->stripes, idx, gfp)) + return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING && - !genradix_ptr_alloc(&c->stripes[1], idx, gfp)) - return -ENOMEM; + !genradix_ptr_alloc(&c->gc_stripes, idx, gfp)) + return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc; return 0; } -static int ec_stripe_mem_alloc(struct bch_fs *c, +static int ec_stripe_mem_alloc(struct btree_trans *trans, struct btree_iter *iter) { - size_t idx = iter->pos.offset; - int ret = 0; + return allocate_dropping_locks_errcode(trans, + __ec_stripe_mem_alloc(trans->c, iter->pos.offset, _gfp)); +} - if (!__ec_stripe_mem_alloc(c, idx, GFP_NOWAIT)) - return ret; +/* + * Hash table of open stripes: + * Stripes that are being created or modified are kept in a hash table, so that + * stripe deletion can skip them. + */ + +static bool __bch2_stripe_is_open(struct bch_fs *c, u64 idx) +{ + unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new))); + struct ec_stripe_new *s; + + hlist_for_each_entry(s, &c->ec_stripes_new[hash], hash) + if (s->idx == idx) + return true; + return false; +} + +static bool bch2_stripe_is_open(struct bch_fs *c, u64 idx) +{ + bool ret = false; - bch2_trans_unlock(iter->trans); - ret = -EINTR; + spin_lock(&c->ec_stripes_new_lock); + ret = __bch2_stripe_is_open(c, idx); + spin_unlock(&c->ec_stripes_new_lock); - if (!__ec_stripe_mem_alloc(c, idx, GFP_KERNEL)) - return ret; + return ret; +} + +static bool bch2_try_open_stripe(struct bch_fs *c, + struct ec_stripe_new *s, + u64 idx) +{ + bool ret; + + spin_lock(&c->ec_stripes_new_lock); + ret = !__bch2_stripe_is_open(c, idx); + if (ret) { + unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new))); + + s->idx = idx; + hlist_add_head(&s->hash, &c->ec_stripes_new[hash]); + } + spin_unlock(&c->ec_stripes_new_lock); - return -ENOMEM; + return ret; +} + +static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s) +{ + BUG_ON(!s->idx); + + spin_lock(&c->ec_stripes_new_lock); + hlist_del_init(&s->hash); + spin_unlock(&c->ec_stripes_new_lock); + + s->idx = 0; } -static ssize_t stripe_idx_to_delete(struct bch_fs *c) +/* Heap of all existing stripes, ordered by blocks_nonempty */ + +static u64 stripe_idx_to_delete(struct bch_fs *c) { ec_stripes_heap *h = &c->ec_stripes_heap; - return h->used && h->data[0].blocks_nonempty == 0 - ? h->data[0].idx : -1; + lockdep_assert_held(&c->ec_stripes_heap_lock); + + if (h->used && + h->data[0].blocks_nonempty == 0 && + !bch2_stripe_is_open(c, h->data[0].idx)) + return h->data[0].idx; + + return 0; } static inline int ec_stripes_heap_cmp(ec_stripes_heap *h, @@ -592,60 +676,34 @@ static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h, { struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap); - genradix_ptr(&c->stripes[0], h->data[i].idx)->heap_idx = i; + genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i; } static void heap_verify_backpointer(struct bch_fs *c, size_t idx) { ec_stripes_heap *h = &c->ec_stripes_heap; - struct stripe *m = genradix_ptr(&c->stripes[0], idx); + struct stripe *m = genradix_ptr(&c->stripes, idx); - BUG_ON(!m->alive); BUG_ON(m->heap_idx >= h->used); BUG_ON(h->data[m->heap_idx].idx != idx); } -void bch2_stripes_heap_update(struct bch_fs *c, - struct stripe *m, size_t idx) -{ - ec_stripes_heap *h = &c->ec_stripes_heap; - size_t i; - - if (m->alive) { - heap_verify_backpointer(c, idx); - - h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty; - - i = m->heap_idx; - heap_sift_up(h, i, ec_stripes_heap_cmp, - ec_stripes_heap_set_backpointer); - heap_sift_down(h, i, ec_stripes_heap_cmp, - ec_stripes_heap_set_backpointer); - - heap_verify_backpointer(c, idx); - } else { - bch2_stripes_heap_insert(c, m, idx); - } - - if (stripe_idx_to_delete(c) >= 0 && - !percpu_ref_is_dying(&c->writes)) - schedule_work(&c->ec_stripe_delete_work); -} - void bch2_stripes_heap_del(struct bch_fs *c, struct stripe *m, size_t idx) { + mutex_lock(&c->ec_stripes_heap_lock); heap_verify_backpointer(c, idx); - m->alive = false; heap_del(&c->ec_stripes_heap, m->heap_idx, ec_stripes_heap_cmp, ec_stripes_heap_set_backpointer); + mutex_unlock(&c->ec_stripes_heap_lock); } void bch2_stripes_heap_insert(struct bch_fs *c, struct stripe *m, size_t idx) { + mutex_lock(&c->ec_stripes_heap_lock); BUG_ON(heap_full(&c->ec_stripes_heap)); heap_add(&c->ec_stripes_heap, ((struct ec_stripe_heap_entry) { @@ -654,283 +712,503 @@ void bch2_stripes_heap_insert(struct bch_fs *c, }), ec_stripes_heap_cmp, ec_stripes_heap_set_backpointer); - m->alive = true; heap_verify_backpointer(c, idx); + mutex_unlock(&c->ec_stripes_heap_lock); +} + +void bch2_stripes_heap_update(struct bch_fs *c, + struct stripe *m, size_t idx) +{ + ec_stripes_heap *h = &c->ec_stripes_heap; + bool do_deletes; + size_t i; + + mutex_lock(&c->ec_stripes_heap_lock); + heap_verify_backpointer(c, idx); + + h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty; + + i = m->heap_idx; + heap_sift_up(h, i, ec_stripes_heap_cmp, + ec_stripes_heap_set_backpointer); + heap_sift_down(h, i, ec_stripes_heap_cmp, + ec_stripes_heap_set_backpointer); + + heap_verify_backpointer(c, idx); + + do_deletes = stripe_idx_to_delete(c) != 0; + mutex_unlock(&c->ec_stripes_heap_lock); + + if (do_deletes) + bch2_do_stripe_deletes(c); } /* stripe deletion */ -static int ec_stripe_delete(struct bch_fs *c, size_t idx) +static int ec_stripe_delete(struct btree_trans *trans, u64 idx) { - return bch2_btree_delete_range(c, BTREE_ID_EC, - POS(0, idx), - POS(0, idx + 1), - NULL); + struct bch_fs *c = trans->c; + struct btree_iter iter; + struct bkey_s_c k; + struct bkey_s_c_stripe s; + int ret; + + k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, POS(0, idx), + BTREE_ITER_INTENT); + ret = bkey_err(k); + if (ret) + goto err; + + if (k.k->type != KEY_TYPE_stripe) { + bch2_fs_inconsistent(c, "attempting to delete nonexistent stripe %llu", idx); + ret = -EINVAL; + goto err; + } + + s = bkey_s_c_to_stripe(k); + for (unsigned i = 0; i < s.v->nr_blocks; i++) + if (stripe_blockcount_get(s.v, i)) { + struct printbuf buf = PRINTBUF; + + bch2_bkey_val_to_text(&buf, c, k); + bch2_fs_inconsistent(c, "attempting to delete nonempty stripe %s", buf.buf); + printbuf_exit(&buf); + ret = -EINVAL; + goto err; + } + + ret = bch2_btree_delete_at(trans, &iter, 0); +err: + bch2_trans_iter_exit(trans, &iter); + return ret; } static void ec_stripe_delete_work(struct work_struct *work) { struct bch_fs *c = container_of(work, struct bch_fs, ec_stripe_delete_work); - ssize_t idx; - - down_read(&c->gc_lock); - mutex_lock(&c->ec_stripe_create_lock); + struct btree_trans *trans = bch2_trans_get(c); + int ret; + u64 idx; while (1) { - spin_lock(&c->ec_stripes_heap_lock); + mutex_lock(&c->ec_stripes_heap_lock); idx = stripe_idx_to_delete(c); - spin_unlock(&c->ec_stripes_heap_lock); + mutex_unlock(&c->ec_stripes_heap_lock); - if (idx < 0) + if (!idx) break; - if (ec_stripe_delete(c, idx)) + ret = commit_do(trans, NULL, NULL, BTREE_INSERT_NOFAIL, + ec_stripe_delete(trans, idx)); + if (ret) { + bch_err_fn(c, ret); break; + } } - mutex_unlock(&c->ec_stripe_create_lock); - up_read(&c->gc_lock); + bch2_trans_put(trans); + + bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete); +} + +void bch2_do_stripe_deletes(struct bch_fs *c) +{ + if (bch2_write_ref_tryget(c, BCH_WRITE_REF_stripe_delete) && + !queue_work(c->write_ref_wq, &c->ec_stripe_delete_work)) + bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete); } /* stripe creation: */ -static int ec_stripe_bkey_insert(struct bch_fs *c, - struct bkey_i_stripe *stripe) +static int ec_stripe_key_update(struct btree_trans *trans, + struct bkey_i_stripe *new, + bool create) { - struct btree_trans trans; - struct btree_iter *iter; + struct bch_fs *c = trans->c; + struct btree_iter iter; struct bkey_s_c k; int ret; - bch2_trans_init(&trans, c, 0, 0); -retry: - bch2_trans_begin(&trans); - - /* XXX: start pos hint */ - for_each_btree_key(&trans, iter, BTREE_ID_EC, POS_MIN, - BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) { - if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0) - break; + k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, + new->k.p, BTREE_ITER_INTENT); + ret = bkey_err(k); + if (ret) + goto err; - if (bkey_deleted(k.k)) - goto found_slot; + if (k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe)) { + bch2_fs_inconsistent(c, "error %s stripe: got existing key type %s", + create ? "creating" : "updating", + bch2_bkey_types[k.k->type]); + ret = -EINVAL; + goto err; } - if (!ret) - ret = -ENOSPC; - goto err; -found_slot: - ret = ec_stripe_mem_alloc(c, iter); - if (ret) - goto err; + if (k.k->type == KEY_TYPE_stripe) { + const struct bch_stripe *old = bkey_s_c_to_stripe(k).v; + unsigned i; - stripe->k.p = iter->pos; + if (old->nr_blocks != new->v.nr_blocks) { + bch_err(c, "error updating stripe: nr_blocks does not match"); + ret = -EINVAL; + goto err; + } - bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &stripe->k_i)); + for (i = 0; i < new->v.nr_blocks; i++) { + unsigned v = stripe_blockcount_get(old, i); - ret = bch2_trans_commit(&trans, NULL, NULL, - BTREE_INSERT_ATOMIC| - BTREE_INSERT_NOFAIL); -err: - if (ret == -EINTR) - goto retry; - bch2_trans_exit(&trans); + BUG_ON(v && + (old->ptrs[i].dev != new->v.ptrs[i].dev || + old->ptrs[i].gen != new->v.ptrs[i].gen || + old->ptrs[i].offset != new->v.ptrs[i].offset)); + stripe_blockcount_set(&new->v, i, v); + } + } + + ret = bch2_trans_update(trans, &iter, &new->k_i, 0); +err: + bch2_trans_iter_exit(trans, &iter); return ret; } -static void extent_stripe_ptr_add(struct bkey_s_extent e, - struct ec_stripe_buf *s, - struct bch_extent_ptr *ptr, - unsigned block) +static int ec_stripe_update_extent(struct btree_trans *trans, + struct bpos bucket, u8 gen, + struct ec_stripe_buf *s, + struct bpos *bp_pos) { - struct bch_extent_stripe_ptr *dst = (void *) ptr; - union bch_extent_entry *end = extent_entry_last(e); + struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; + struct bch_fs *c = trans->c; + struct bch_backpointer bp; + struct btree_iter iter; + struct bkey_s_c k; + const struct bch_extent_ptr *ptr_c; + struct bch_extent_ptr *ptr, *ec_ptr = NULL; + struct bch_extent_stripe_ptr stripe_ptr; + struct bkey_i *n; + int ret, dev, block; + + ret = bch2_get_next_backpointer(trans, bucket, gen, + bp_pos, &bp, BTREE_ITER_CACHED); + if (ret) + return ret; + if (bpos_eq(*bp_pos, SPOS_MAX)) + return 0; - memmove_u64s_up(dst + 1, dst, (u64 *) end - (u64 *) dst); - e.k->u64s += sizeof(*dst) / sizeof(u64); + if (bp.level) { + struct printbuf buf = PRINTBUF; + struct btree_iter node_iter; + struct btree *b; - *dst = (struct bch_extent_stripe_ptr) { - .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr, - .block = block, - .idx = s->key.k.p.offset, - }; -} + b = bch2_backpointer_get_node(trans, &node_iter, *bp_pos, bp); + bch2_trans_iter_exit(trans, &node_iter); -static int ec_stripe_update_ptrs(struct bch_fs *c, - struct ec_stripe_buf *s, - struct bkey *pos) -{ - struct btree_trans trans; - struct btree_iter *iter; - struct bkey_s_c k; - struct bkey_s_extent e; - struct bch_extent_ptr *ptr; - BKEY_PADDED(k) tmp; - int ret = 0, dev, idx; - - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); - - iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, - bkey_start_pos(pos), - BTREE_ITER_INTENT); - - while ((k = bch2_btree_iter_peek(iter)).k && - !(ret = bkey_err(k)) && - bkey_cmp(bkey_start_pos(k.k), pos->p) < 0) { - if (extent_has_stripe_ptr(k, s->key.k.p.offset)) { - bch2_btree_iter_next(iter); - continue; - } + if (!b) + return 0; - idx = extent_matches_stripe(c, &s->key.v, k); - if (idx < 0) { - bch2_btree_iter_next(iter); - continue; - } + prt_printf(&buf, "found btree node in erasure coded bucket: b=%px\n", b); + bch2_backpointer_to_text(&buf, &bp); + + bch2_fs_inconsistent(c, "%s", buf.buf); + printbuf_exit(&buf); + return -EIO; + } - bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k)); + k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_INTENT); + ret = bkey_err(k); + if (ret) + return ret; + if (!k.k) { + /* + * extent no longer exists - we could flush the btree + * write buffer and retry to verify, but no need: + */ + return 0; + } - dev = s->key.v.ptrs[idx].dev; + if (extent_has_stripe_ptr(k, s->key.k.p.offset)) + goto out; - bkey_reassemble(&tmp.k, k); - e = bkey_i_to_s_extent(&tmp.k); + ptr_c = bkey_matches_stripe(v, k, &block); + /* + * It doesn't generally make sense to erasure code cached ptrs: + * XXX: should we be incrementing a counter? + */ + if (!ptr_c || ptr_c->cached) + goto out; - extent_for_each_ptr(e, ptr) - if (ptr->dev != dev) - ptr->cached = true; + dev = v->ptrs[block].dev; - ptr = (void *) bch2_extent_has_device(e.c, dev); - BUG_ON(!ptr); + n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(stripe_ptr)); + ret = PTR_ERR_OR_ZERO(n); + if (ret) + goto out; - extent_stripe_ptr_add(e, s, ptr, idx); + bkey_reassemble(n, k); - bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &tmp.k)); + bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, ptr->dev != dev); + ec_ptr = bch2_bkey_has_device(bkey_i_to_s(n), dev); + BUG_ON(!ec_ptr); - ret = bch2_trans_commit(&trans, NULL, NULL, - BTREE_INSERT_ATOMIC| - BTREE_INSERT_NOFAIL| - BTREE_INSERT_USE_RESERVE); - if (ret == -EINTR) - ret = 0; - if (ret) - break; - } + stripe_ptr = (struct bch_extent_stripe_ptr) { + .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr, + .block = block, + .redundancy = v->nr_redundant, + .idx = s->key.k.p.offset, + }; - bch2_trans_exit(&trans); + __extent_entry_insert(n, + (union bch_extent_entry *) ec_ptr, + (union bch_extent_entry *) &stripe_ptr); + ret = bch2_trans_update(trans, &iter, n, 0); +out: + bch2_trans_iter_exit(trans, &iter); return ret; } -/* - * data buckets of new stripe all written: create the stripe +static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_buf *s, + unsigned block) +{ + struct bch_fs *c = trans->c; + struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; + struct bch_extent_ptr bucket = v->ptrs[block]; + struct bpos bucket_pos = PTR_BUCKET_POS(c, &bucket); + struct bpos bp_pos = POS_MIN; + int ret = 0; + + while (1) { + ret = commit_do(trans, NULL, NULL, + BTREE_INSERT_NOCHECK_RW| + BTREE_INSERT_NOFAIL, + ec_stripe_update_extent(trans, bucket_pos, bucket.gen, + s, &bp_pos)); + if (ret) + break; + if (bkey_eq(bp_pos, POS_MAX)) + break; + + bp_pos = bpos_nosnap_successor(bp_pos); + } + + return ret; +} + +static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s) +{ + struct btree_trans *trans = bch2_trans_get(c); + struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; + unsigned i, nr_data = v->nr_blocks - v->nr_redundant; + int ret = 0; + + ret = bch2_btree_write_buffer_flush(trans); + if (ret) + goto err; + + for (i = 0; i < nr_data; i++) { + ret = ec_stripe_update_bucket(trans, s, i); + if (ret) + break; + } +err: + bch2_trans_put(trans); + + return ret; +} + +static void zero_out_rest_of_ec_bucket(struct bch_fs *c, + struct ec_stripe_new *s, + unsigned block, + struct open_bucket *ob) +{ + struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev); + unsigned offset = ca->mi.bucket_size - ob->sectors_free; + int ret; + + if (!bch2_dev_get_ioref(ca, WRITE)) { + s->err = -BCH_ERR_erofs_no_writes; + return; + } + + memset(s->new_stripe.data[block] + (offset << 9), + 0, + ob->sectors_free << 9); + + ret = blkdev_issue_zeroout(ca->disk_sb.bdev, + ob->bucket * ca->mi.bucket_size + offset, + ob->sectors_free, + GFP_KERNEL, 0); + + percpu_ref_put(&ca->io_ref); + + if (ret) + s->err = ret; +} + +void bch2_ec_stripe_new_free(struct bch_fs *c, struct ec_stripe_new *s) +{ + if (s->idx) + bch2_stripe_close(c, s); + kfree(s); +} + +/* + * data buckets of new stripe all written: create the stripe */ static void ec_stripe_create(struct ec_stripe_new *s) { struct bch_fs *c = s->c; struct open_bucket *ob; - struct bkey_i *k; - struct bch_stripe *v = &s->stripe.key.v; + struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v; unsigned i, nr_data = v->nr_blocks - v->nr_redundant; - struct closure cl; int ret; BUG_ON(s->h->s == s); - closure_init_stack(&cl); + closure_sync(&s->iodone); + + if (!s->err) { + for (i = 0; i < nr_data; i++) + if (s->blocks[i]) { + ob = c->open_buckets + s->blocks[i]; + + if (ob->sectors_free) + zero_out_rest_of_ec_bucket(c, s, i, ob); + } + } if (s->err) { - bch_err(c, "error creating stripe: error writing data buckets"); + if (!bch2_err_matches(s->err, EROFS)) + bch_err(c, "error creating stripe: error writing data buckets"); goto err; } - if (!percpu_ref_tryget(&c->writes)) - goto err; + if (s->have_existing_stripe) { + ec_validate_checksums(c, &s->existing_stripe); + + if (ec_do_recov(c, &s->existing_stripe)) { + bch_err(c, "error creating stripe: error reading existing stripe"); + goto err; + } - BUG_ON(bitmap_weight(s->blocks_allocated, - s->blocks.nr) != s->blocks.nr); + for (i = 0; i < nr_data; i++) + if (stripe_blockcount_get(&bkey_i_to_stripe(&s->existing_stripe.key)->v, i)) + swap(s->new_stripe.data[i], + s->existing_stripe.data[i]); - ec_generate_ec(&s->stripe); + ec_stripe_buf_exit(&s->existing_stripe); + } - ec_generate_checksums(&s->stripe); + BUG_ON(!s->allocated); + BUG_ON(!s->idx); - /* write p/q: */ - for (i = nr_data; i < v->nr_blocks; i++) - ec_block_io(c, &s->stripe, REQ_OP_WRITE, i, &cl); + ec_generate_ec(&s->new_stripe); - closure_sync(&cl); + ec_generate_checksums(&s->new_stripe); + /* write p/q: */ for (i = nr_data; i < v->nr_blocks; i++) - if (!test_bit(i, s->stripe.valid)) { - bch_err(c, "error creating stripe: error writing redundancy buckets"); - goto err_put_writes; - } + ec_block_io(c, &s->new_stripe, REQ_OP_WRITE, i, &s->iodone); + closure_sync(&s->iodone); - mutex_lock(&c->ec_stripe_create_lock); + if (ec_nr_failed(&s->new_stripe)) { + bch_err(c, "error creating stripe: error writing redundancy buckets"); + goto err; + } - ret = ec_stripe_bkey_insert(c, &s->stripe.key); + ret = bch2_trans_do(c, &s->res, NULL, + BTREE_INSERT_NOCHECK_RW| + BTREE_INSERT_NOFAIL, + ec_stripe_key_update(trans, + bkey_i_to_stripe(&s->new_stripe.key), + !s->have_existing_stripe)); if (ret) { bch_err(c, "error creating stripe: error creating stripe key"); - goto err_unlock; + goto err; } - for_each_keylist_key(&s->keys, k) { - ret = ec_stripe_update_ptrs(c, &s->stripe, &k->k); - if (ret) - break; + ret = ec_stripe_update_extents(c, &s->new_stripe); + if (ret) { + bch_err_msg(c, ret, "creating stripe: error updating pointers"); + goto err; } - -err_unlock: - mutex_unlock(&c->ec_stripe_create_lock); -err_put_writes: - percpu_ref_put(&c->writes); err: - open_bucket_for_each(c, &s->blocks, ob, i) { - ob->ec = NULL; - __bch2_open_bucket_put(c, ob); - } - - bch2_open_buckets_put(c, &s->parity); + bch2_disk_reservation_put(c, &s->res); - bch2_keylist_free(&s->keys, s->inline_keys); + for (i = 0; i < v->nr_blocks; i++) + if (s->blocks[i]) { + ob = c->open_buckets + s->blocks[i]; + + if (i < nr_data) { + ob->ec = NULL; + __bch2_open_bucket_put(c, ob); + } else { + bch2_open_bucket_put(c, ob); + } + } - mutex_lock(&s->h->lock); + mutex_lock(&c->ec_stripe_new_lock); list_del(&s->list); - mutex_unlock(&s->h->lock); + mutex_unlock(&c->ec_stripe_new_lock); + wake_up(&c->ec_stripe_new_wait); - for (i = 0; i < s->stripe.key.v.nr_blocks; i++) - kvpfree(s->stripe.data[i], s->stripe.size << 9); - kfree(s); + ec_stripe_buf_exit(&s->existing_stripe); + ec_stripe_buf_exit(&s->new_stripe); + closure_debug_destroy(&s->iodone); + + ec_stripe_new_put(c, s, STRIPE_REF_stripe); } -static struct ec_stripe_new *ec_stripe_set_pending(struct ec_stripe_head *h) +static struct ec_stripe_new *get_pending_stripe(struct bch_fs *c) { - struct ec_stripe_new *s = h->s; + struct ec_stripe_new *s; - list_add(&s->list, &h->stripes); - h->s = NULL; + mutex_lock(&c->ec_stripe_new_lock); + list_for_each_entry(s, &c->ec_stripe_new_list, list) + if (!atomic_read(&s->ref[STRIPE_REF_io])) + goto out; + s = NULL; +out: + mutex_unlock(&c->ec_stripe_new_lock); return s; } -static void ec_stripe_new_put(struct ec_stripe_new *s) +static void ec_stripe_create_work(struct work_struct *work) { - BUG_ON(atomic_read(&s->pin) <= 0); - if (atomic_dec_and_test(&s->pin)) + struct bch_fs *c = container_of(work, + struct bch_fs, ec_stripe_create_work); + struct ec_stripe_new *s; + + while ((s = get_pending_stripe(c))) ec_stripe_create(s); + + bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create); } -/* have a full bucket - hand it off to be erasure coded: */ -void bch2_ec_bucket_written(struct bch_fs *c, struct open_bucket *ob) +void bch2_ec_do_stripe_creates(struct bch_fs *c) { - struct ec_stripe_new *s = ob->ec; + bch2_write_ref_get(c, BCH_WRITE_REF_stripe_create); + + if (!queue_work(system_long_wq, &c->ec_stripe_create_work)) + bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create); +} + +static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h) +{ + struct ec_stripe_new *s = h->s; - if (ob->sectors_free) - s->err = -1; + BUG_ON(!s->allocated && !s->err); - ec_stripe_new_put(s); + h->s = NULL; + s->pending = true; + + mutex_lock(&c->ec_stripe_new_lock); + list_add(&s->list, &c->ec_stripe_new_list); + mutex_unlock(&c->ec_stripe_new_lock); + + ec_stripe_new_put(c, s, STRIPE_REF_io); } void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob) @@ -949,36 +1227,12 @@ void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp) if (!ob) return NULL; - ca = bch_dev_bkey_exists(c, ob->ptr.dev); - offset = ca->mi.bucket_size - ob->sectors_free; - - return ob->ec->stripe.data[ob->ec_idx] + (offset << 9); -} - -void bch2_ec_add_backpointer(struct bch_fs *c, struct write_point *wp, - struct bpos pos, unsigned sectors) -{ - struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs); - struct ec_stripe_new *ec; - - if (!ob) - return; - - ec = ob->ec; - mutex_lock(&ec->lock); - - if (bch2_keylist_realloc(&ec->keys, ec->inline_keys, - ARRAY_SIZE(ec->inline_keys), - BKEY_U64s)) { - BUG(); - } + BUG_ON(!ob->ec->new_stripe.data[ob->ec_idx]); - bkey_init(&ec->keys.top->k); - ec->keys.top->k.p = pos; - bch2_key_resize(&ec->keys.top->k, sectors); - bch2_keylist_push(&ec->keys); + ca = bch_dev_bkey_exists(c, ob->dev); + offset = ca->mi.bucket_size - ob->sectors_free; - mutex_unlock(&ec->lock); + return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9); } static int unsigned_cmp(const void *_l, const void *_r) @@ -1022,59 +1276,69 @@ static unsigned pick_blocksize(struct bch_fs *c, return best.size; } -int bch2_ec_stripe_new_alloc(struct bch_fs *c, struct ec_stripe_head *h) +static bool may_create_new_stripe(struct bch_fs *c) +{ + return false; +} + +static void ec_stripe_key_init(struct bch_fs *c, + struct bkey_i *k, + unsigned nr_data, + unsigned nr_parity, + unsigned stripe_size) +{ + struct bkey_i_stripe *s = bkey_stripe_init(k); + unsigned u64s; + + s->v.sectors = cpu_to_le16(stripe_size); + s->v.algorithm = 0; + s->v.nr_blocks = nr_data + nr_parity; + s->v.nr_redundant = nr_parity; + s->v.csum_granularity_bits = ilog2(c->opts.encoded_extent_max >> 9); + s->v.csum_type = BCH_CSUM_crc32c; + s->v.pad = 0; + + while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) { + BUG_ON(1 << s->v.csum_granularity_bits >= + le16_to_cpu(s->v.sectors) || + s->v.csum_granularity_bits == U8_MAX); + s->v.csum_granularity_bits++; + } + + set_bkey_val_u64s(&s->k, u64s); +} + +static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h) { struct ec_stripe_new *s; - unsigned i; - BUG_ON(h->parity.nr != h->redundancy); - BUG_ON(!h->blocks.nr); - BUG_ON(h->parity.nr + h->blocks.nr > EC_STRIPE_MAX); lockdep_assert_held(&h->lock); s = kzalloc(sizeof(*s), GFP_KERNEL); if (!s) - return -ENOMEM; + return -BCH_ERR_ENOMEM_ec_new_stripe_alloc; mutex_init(&s->lock); - atomic_set(&s->pin, 1); + closure_init(&s->iodone, NULL); + atomic_set(&s->ref[STRIPE_REF_stripe], 1); + atomic_set(&s->ref[STRIPE_REF_io], 1); s->c = c; s->h = h; - s->blocks = h->blocks; - s->parity = h->parity; - - memset(&h->blocks, 0, sizeof(h->blocks)); - memset(&h->parity, 0, sizeof(h->parity)); - - bch2_keylist_init(&s->keys, s->inline_keys); - - s->stripe.offset = 0; - s->stripe.size = h->blocksize; - memset(s->stripe.valid, 0xFF, sizeof(s->stripe.valid)); + s->nr_data = min_t(unsigned, h->nr_active_devs, + BCH_BKEY_PTRS_MAX) - h->redundancy; + s->nr_parity = h->redundancy; - ec_stripe_key_init(c, &s->stripe.key, - &s->blocks, &s->parity, - h->blocksize); - - for (i = 0; i < s->stripe.key.v.nr_blocks; i++) { - s->stripe.data[i] = kvpmalloc(s->stripe.size << 9, GFP_KERNEL); - if (!s->stripe.data[i]) - goto err; - } + ec_stripe_key_init(c, &s->new_stripe.key, + s->nr_data, s->nr_parity, h->blocksize); h->s = s; - return 0; -err: - for (i = 0; i < s->stripe.key.v.nr_blocks; i++) - kvpfree(s->stripe.data[i], s->stripe.size << 9); - kfree(s); - return -ENOMEM; } static struct ec_stripe_head * ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target, - unsigned algo, unsigned redundancy) + unsigned algo, unsigned redundancy, + enum bch_watermark watermark) { struct ec_stripe_head *h; struct bch_dev *ca; @@ -1085,15 +1349,15 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target, return NULL; mutex_init(&h->lock); - mutex_lock(&h->lock); - INIT_LIST_HEAD(&h->stripes); + BUG_ON(!mutex_trylock(&h->lock)); h->target = target; h->algo = algo; h->redundancy = redundancy; + h->watermark = watermark; rcu_read_lock(); - h->devs = target_rw_devs(c, BCH_DATA_USER, target); + h->devs = target_rw_devs(c, BCH_DATA_user, target); for_each_member_device_rcu(ca, c, i, &h->devs) if (!ca->mi.durability) @@ -1106,266 +1370,597 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target, h->nr_active_devs++; rcu_read_unlock(); - list_add(&h->list, &c->ec_new_stripe_list); + list_add(&h->list, &c->ec_stripe_head_list); return h; } -void bch2_ec_stripe_head_put(struct ec_stripe_head *h) +void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h) { - struct ec_stripe_new *s = NULL; - if (h->s && + h->s->allocated && bitmap_weight(h->s->blocks_allocated, - h->s->blocks.nr) == h->s->blocks.nr) - s = ec_stripe_set_pending(h); + h->s->nr_data) == h->s->nr_data) + ec_stripe_set_pending(c, h); mutex_unlock(&h->lock); - - if (s) - ec_stripe_new_put(s); } -struct ec_stripe_head *bch2_ec_stripe_head_get(struct bch_fs *c, - unsigned target, - unsigned algo, - unsigned redundancy) +static struct ec_stripe_head * +__bch2_ec_stripe_head_get(struct btree_trans *trans, + unsigned target, + unsigned algo, + unsigned redundancy, + enum bch_watermark watermark) { + struct bch_fs *c = trans->c; struct ec_stripe_head *h; + int ret; if (!redundancy) return NULL; - mutex_lock(&c->ec_new_stripe_lock); - list_for_each_entry(h, &c->ec_new_stripe_list, list) + ret = bch2_trans_mutex_lock(trans, &c->ec_stripe_head_lock); + if (ret) + return ERR_PTR(ret); + + if (test_bit(BCH_FS_GOING_RO, &c->flags)) { + h = ERR_PTR(-BCH_ERR_erofs_no_writes); + goto found; + } + + list_for_each_entry(h, &c->ec_stripe_head_list, list) if (h->target == target && h->algo == algo && - h->redundancy == redundancy) { - mutex_lock(&h->lock); + h->redundancy == redundancy && + h->watermark == watermark) { + ret = bch2_trans_mutex_lock(trans, &h->lock); + if (ret) + h = ERR_PTR(ret); goto found; } - h = ec_new_stripe_head_alloc(c, target, algo, redundancy); + h = ec_new_stripe_head_alloc(c, target, algo, redundancy, watermark); found: - mutex_unlock(&c->ec_new_stripe_lock); + mutex_unlock(&c->ec_stripe_head_lock); return h; } -void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca) +static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h, + enum bch_watermark watermark, struct closure *cl) { - struct ec_stripe_head *h; + struct bch_fs *c = trans->c; + struct bch_devs_mask devs = h->devs; struct open_bucket *ob; - unsigned i; + struct open_buckets buckets; + struct bch_stripe *v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v; + unsigned i, j, nr_have_parity = 0, nr_have_data = 0; + bool have_cache = true; + int ret = 0; - mutex_lock(&c->ec_new_stripe_lock); - list_for_each_entry(h, &c->ec_new_stripe_list, list) { - struct ec_stripe_new *s = NULL; + BUG_ON(v->nr_blocks != h->s->nr_data + h->s->nr_parity); + BUG_ON(v->nr_redundant != h->s->nr_parity); - mutex_lock(&h->lock); - bch2_open_buckets_stop_dev(c, ca, - &h->blocks, - BCH_DATA_USER); - bch2_open_buckets_stop_dev(c, ca, - &h->parity, - BCH_DATA_USER); + for_each_set_bit(i, h->s->blocks_gotten, v->nr_blocks) { + __clear_bit(v->ptrs[i].dev, devs.d); + if (i < h->s->nr_data) + nr_have_data++; + else + nr_have_parity++; + } - if (!h->s) - goto unlock; + BUG_ON(nr_have_data > h->s->nr_data); + BUG_ON(nr_have_parity > h->s->nr_parity); + + buckets.nr = 0; + if (nr_have_parity < h->s->nr_parity) { + ret = bch2_bucket_alloc_set_trans(trans, &buckets, + &h->parity_stripe, + &devs, + h->s->nr_parity, + &nr_have_parity, + &have_cache, 0, + BCH_DATA_parity, + watermark, + cl); + + open_bucket_for_each(c, &buckets, ob, i) { + j = find_next_zero_bit(h->s->blocks_gotten, + h->s->nr_data + h->s->nr_parity, + h->s->nr_data); + BUG_ON(j >= h->s->nr_data + h->s->nr_parity); + + h->s->blocks[j] = buckets.v[i]; + v->ptrs[j] = bch2_ob_ptr(c, ob); + __set_bit(j, h->s->blocks_gotten); + } - open_bucket_for_each(c, &h->s->blocks, ob, i) - if (ob->ptr.dev == ca->dev_idx) - goto found; - open_bucket_for_each(c, &h->s->parity, ob, i) - if (ob->ptr.dev == ca->dev_idx) - goto found; - goto unlock; -found: - h->s->err = -1; - s = ec_stripe_set_pending(h); -unlock: - mutex_unlock(&h->lock); + if (ret) + return ret; + } - if (s) - ec_stripe_new_put(s); + buckets.nr = 0; + if (nr_have_data < h->s->nr_data) { + ret = bch2_bucket_alloc_set_trans(trans, &buckets, + &h->block_stripe, + &devs, + h->s->nr_data, + &nr_have_data, + &have_cache, 0, + BCH_DATA_user, + watermark, + cl); + + open_bucket_for_each(c, &buckets, ob, i) { + j = find_next_zero_bit(h->s->blocks_gotten, + h->s->nr_data, 0); + BUG_ON(j >= h->s->nr_data); + + h->s->blocks[j] = buckets.v[i]; + v->ptrs[j] = bch2_ob_ptr(c, ob); + __set_bit(j, h->s->blocks_gotten); + } + + if (ret) + return ret; } - mutex_unlock(&c->ec_new_stripe_lock); + + return 0; +} + +/* XXX: doesn't obey target: */ +static s64 get_existing_stripe(struct bch_fs *c, + struct ec_stripe_head *head) +{ + ec_stripes_heap *h = &c->ec_stripes_heap; + struct stripe *m; + size_t heap_idx; + u64 stripe_idx; + s64 ret = -1; + + if (may_create_new_stripe(c)) + return -1; + + mutex_lock(&c->ec_stripes_heap_lock); + for (heap_idx = 0; heap_idx < h->used; heap_idx++) { + /* No blocks worth reusing, stripe will just be deleted: */ + if (!h->data[heap_idx].blocks_nonempty) + continue; + + stripe_idx = h->data[heap_idx].idx; + + m = genradix_ptr(&c->stripes, stripe_idx); + + if (m->algorithm == head->algo && + m->nr_redundant == head->redundancy && + m->sectors == head->blocksize && + m->blocks_nonempty < m->nr_blocks - m->nr_redundant && + bch2_try_open_stripe(c, head->s, stripe_idx)) { + ret = stripe_idx; + break; + } + } + mutex_unlock(&c->ec_stripes_heap_lock); + return ret; } -static int __bch2_stripe_write_key(struct btree_trans *trans, - struct btree_iter *iter, - struct stripe *m, - size_t idx, - struct bkey_i_stripe *new_key, - unsigned flags) +static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h) { struct bch_fs *c = trans->c; - struct bkey_s_c k; + struct bch_stripe *new_v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v; + struct bch_stripe *existing_v; unsigned i; + s64 idx; int ret; - bch2_btree_iter_set_pos(iter, POS(0, idx)); + /* + * If we can't allocate a new stripe, and there's no stripes with empty + * blocks for us to reuse, that means we have to wait on copygc: + */ + idx = get_existing_stripe(c, h); + if (idx < 0) + return -BCH_ERR_stripe_alloc_blocked; - k = bch2_btree_iter_peek_slot(iter); - ret = bkey_err(k); - if (ret) + ret = get_stripe_key_trans(trans, idx, &h->s->existing_stripe); + if (ret) { + bch2_stripe_close(c, h->s); + if (!bch2_err_matches(ret, BCH_ERR_transaction_restart)) + bch2_fs_fatal_error(c, "error reading stripe key: %s", bch2_err_str(ret)); return ret; + } - if (k.k->type != KEY_TYPE_stripe) - return -EIO; + existing_v = &bkey_i_to_stripe(&h->s->existing_stripe.key)->v; - bkey_reassemble(&new_key->k_i, k); + BUG_ON(existing_v->nr_redundant != h->s->nr_parity); + h->s->nr_data = existing_v->nr_blocks - + existing_v->nr_redundant; - spin_lock(&c->ec_stripes_heap_lock); + ret = ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize); + if (ret) { + bch2_stripe_close(c, h->s); + return ret; + } - for (i = 0; i < new_key->v.nr_blocks; i++) - stripe_blockcount_set(&new_key->v, i, - m->block_sectors[i]); - m->dirty = false; + BUG_ON(h->s->existing_stripe.size != h->blocksize); + BUG_ON(h->s->existing_stripe.size != le16_to_cpu(existing_v->sectors)); - spin_unlock(&c->ec_stripes_heap_lock); + /* + * Free buckets we initially allocated - they might conflict with + * blocks from the stripe we're reusing: + */ + for_each_set_bit(i, h->s->blocks_gotten, new_v->nr_blocks) { + bch2_open_bucket_put(c, c->open_buckets + h->s->blocks[i]); + h->s->blocks[i] = 0; + } + memset(h->s->blocks_gotten, 0, sizeof(h->s->blocks_gotten)); + memset(h->s->blocks_allocated, 0, sizeof(h->s->blocks_allocated)); - bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &new_key->k_i)); + for (i = 0; i < existing_v->nr_blocks; i++) { + if (stripe_blockcount_get(existing_v, i)) { + __set_bit(i, h->s->blocks_gotten); + __set_bit(i, h->s->blocks_allocated); + } - return bch2_trans_commit(trans, NULL, NULL, - BTREE_INSERT_NOFAIL|flags); -} + ec_block_io(c, &h->s->existing_stripe, READ, i, &h->s->iodone); + } -int bch2_stripes_write(struct bch_fs *c, unsigned flags, bool *wrote) -{ - struct btree_trans trans; - struct btree_iter *iter; - struct genradix_iter giter; - struct bkey_i_stripe *new_key; - struct stripe *m; - int ret = 0; + bkey_copy(&h->s->new_stripe.key, &h->s->existing_stripe.key); + h->s->have_existing_stripe = true; - new_key = kmalloc(255 * sizeof(u64), GFP_KERNEL); - BUG_ON(!new_key); + return 0; +} - bch2_trans_init(&trans, c, 0, 0); +static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_stripe_head *h) +{ + struct bch_fs *c = trans->c; + struct btree_iter iter; + struct bkey_s_c k; + struct bpos min_pos = POS(0, 1); + struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint)); + int ret; - iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS_MIN, - BTREE_ITER_SLOTS|BTREE_ITER_INTENT); + if (!h->s->res.sectors) { + ret = bch2_disk_reservation_get(c, &h->s->res, + h->blocksize, + h->s->nr_parity, + BCH_DISK_RESERVATION_NOFAIL); + if (ret) + return ret; + } - genradix_for_each(&c->stripes[0], giter, m) { - if (!m->dirty) - continue; + for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos, + BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) { + if (bkey_gt(k.k->p, POS(0, U32_MAX))) { + if (start_pos.offset) { + start_pos = min_pos; + bch2_btree_iter_set_pos(&iter, start_pos); + continue; + } - ret = __bch2_stripe_write_key(&trans, iter, m, giter.pos, - new_key, flags); - if (ret) + ret = -BCH_ERR_ENOSPC_stripe_create; break; + } - *wrote = true; + if (bkey_deleted(k.k) && + bch2_try_open_stripe(c, h->s, k.k->p.offset)) + break; } - bch2_trans_exit(&trans); + c->ec_stripe_hint = iter.pos.offset; - kfree(new_key); + if (ret) + goto err; + ret = ec_stripe_mem_alloc(trans, &iter); + if (ret) { + bch2_stripe_close(c, h->s); + goto err; + } + + h->s->new_stripe.key.k.p = iter.pos; +out: + bch2_trans_iter_exit(trans, &iter); return ret; +err: + bch2_disk_reservation_put(c, &h->s->res); + goto out; } -int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys) +struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, + unsigned target, + unsigned algo, + unsigned redundancy, + enum bch_watermark watermark, + struct closure *cl) { - struct journal_key *i; - struct btree_trans trans; - struct btree_iter *iter; - struct bkey_s_c k; + struct bch_fs *c = trans->c; + struct ec_stripe_head *h; + bool waiting = false; int ret; - ret = bch2_fs_ec_start(c); + h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, watermark); + if (!h) + bch_err(c, "no stripe head"); + if (IS_ERR_OR_NULL(h)) + return h; + + if (!h->s) { + ret = ec_new_stripe_alloc(c, h); + if (ret) { + bch_err(c, "failed to allocate new stripe"); + goto err; + } + } + + if (h->s->allocated) + goto allocated; + + if (h->s->have_existing_stripe) + goto alloc_existing; + + /* First, try to allocate a full stripe: */ + ret = new_stripe_alloc_buckets(trans, h, BCH_WATERMARK_stripe, NULL) ?: + __bch2_ec_stripe_head_reserve(trans, h); + if (!ret) + goto allocate_buf; + if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || + bch2_err_matches(ret, ENOMEM)) + goto err; + + /* + * Not enough buckets available for a full stripe: we must reuse an + * existing stripe: + */ + while (1) { + ret = __bch2_ec_stripe_head_reuse(trans, h); + if (!ret) + break; + if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked) + goto err; + + if (watermark == BCH_WATERMARK_copygc) { + ret = new_stripe_alloc_buckets(trans, h, watermark, NULL) ?: + __bch2_ec_stripe_head_reserve(trans, h); + if (ret) + goto err; + goto allocate_buf; + } + + /* XXX freelist_wait? */ + closure_wait(&c->freelist_wait, cl); + waiting = true; + } + + if (waiting) + closure_wake_up(&c->freelist_wait); +alloc_existing: + /* + * Retry allocating buckets, with the watermark for this + * particular write: + */ + ret = new_stripe_alloc_buckets(trans, h, watermark, cl); if (ret) - return ret; + goto err; - bch2_trans_init(&trans, c, 0, 0); +allocate_buf: + ret = ec_stripe_buf_init(&h->s->new_stripe, 0, h->blocksize); + if (ret) + goto err; - for_each_btree_key(&trans, iter, BTREE_ID_EC, POS_MIN, 0, k, ret) - bch2_mark_key(c, k, 0, NULL, 0, - BCH_BUCKET_MARK_ALLOC_READ| - BCH_BUCKET_MARK_NOATOMIC); + h->s->allocated = true; +allocated: + BUG_ON(!h->s->idx); + BUG_ON(!h->s->new_stripe.data[0]); + BUG_ON(trans->restarted); + return h; +err: + bch2_ec_stripe_head_put(c, h); + return ERR_PTR(ret); +} - ret = bch2_trans_exit(&trans) ?: ret; - if (ret) { - bch_err(c, "error reading stripes: %i", ret); - return ret; +static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca) +{ + struct ec_stripe_head *h; + struct open_bucket *ob; + unsigned i; + + mutex_lock(&c->ec_stripe_head_lock); + list_for_each_entry(h, &c->ec_stripe_head_list, list) { + mutex_lock(&h->lock); + if (!h->s) + goto unlock; + + if (!ca) + goto found; + + for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) { + if (!h->s->blocks[i]) + continue; + + ob = c->open_buckets + h->s->blocks[i]; + if (ob->dev == ca->dev_idx) + goto found; + } + goto unlock; +found: + h->s->err = -BCH_ERR_erofs_no_writes; + ec_stripe_set_pending(c, h); +unlock: + mutex_unlock(&h->lock); } + mutex_unlock(&c->ec_stripe_head_lock); +} - for_each_journal_key(*journal_keys, i) - if (i->btree_id == BTREE_ID_EC) - bch2_mark_key(c, bkey_i_to_s_c(i->k), - 0, NULL, 0, - BCH_BUCKET_MARK_ALLOC_READ| - BCH_BUCKET_MARK_NOATOMIC); +void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca) +{ + __bch2_ec_stop(c, ca); +} - return 0; +void bch2_fs_ec_stop(struct bch_fs *c) +{ + __bch2_ec_stop(c, NULL); } -int bch2_ec_mem_alloc(struct bch_fs *c, bool gc) +static bool bch2_fs_ec_flush_done(struct bch_fs *c) { - struct btree_trans trans; - struct btree_iter *iter; + bool ret; + + mutex_lock(&c->ec_stripe_new_lock); + ret = list_empty(&c->ec_stripe_new_list); + mutex_unlock(&c->ec_stripe_new_lock); + + return ret; +} + +void bch2_fs_ec_flush(struct bch_fs *c) +{ + wait_event(c->ec_stripe_new_wait, bch2_fs_ec_flush_done(c)); +} + +int bch2_stripes_read(struct bch_fs *c) +{ + struct btree_trans *trans = bch2_trans_get(c); + struct btree_iter iter; struct bkey_s_c k; - size_t i, idx = 0; - int ret = 0; + const struct bch_stripe *s; + struct stripe *m; + unsigned i; + int ret; + + for_each_btree_key(trans, iter, BTREE_ID_stripes, POS_MIN, + BTREE_ITER_PREFETCH, k, ret) { + if (k.k->type != KEY_TYPE_stripe) + continue; + + ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL); + if (ret) + break; - bch2_trans_init(&trans, c, 0, 0); + s = bkey_s_c_to_stripe(k).v; - iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS(0, U64_MAX), 0); + m = genradix_ptr(&c->stripes, k.k->p.offset); + m->sectors = le16_to_cpu(s->sectors); + m->algorithm = s->algorithm; + m->nr_blocks = s->nr_blocks; + m->nr_redundant = s->nr_redundant; + m->blocks_nonempty = 0; + + for (i = 0; i < s->nr_blocks; i++) + m->blocks_nonempty += !!stripe_blockcount_get(s, i); + + bch2_stripes_heap_insert(c, m, k.k->p.offset); + } + bch2_trans_iter_exit(trans, &iter); + + bch2_trans_put(trans); - k = bch2_btree_iter_prev(iter); - if (!IS_ERR_OR_NULL(k.k)) - idx = k.k->p.offset + 1; - ret = bch2_trans_exit(&trans); if (ret) - return ret; + bch_err_fn(c, ret); - if (!gc && - !init_heap(&c->ec_stripes_heap, roundup_pow_of_two(idx), - GFP_KERNEL)) - return -ENOMEM; -#if 0 - ret = genradix_prealloc(&c->stripes[gc], idx, GFP_KERNEL); -#else - for (i = 0; i < idx; i++) - if (!genradix_ptr_alloc(&c->stripes[gc], i, GFP_KERNEL)) - return -ENOMEM; -#endif - return 0; + return ret; } -int bch2_fs_ec_start(struct bch_fs *c) +void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c) { - return bch2_ec_mem_alloc(c, false); + ec_stripes_heap *h = &c->ec_stripes_heap; + struct stripe *m; + size_t i; + + mutex_lock(&c->ec_stripes_heap_lock); + for (i = 0; i < min_t(size_t, h->used, 50); i++) { + m = genradix_ptr(&c->stripes, h->data[i].idx); + + prt_printf(out, "%zu %u/%u+%u", h->data[i].idx, + h->data[i].blocks_nonempty, + m->nr_blocks - m->nr_redundant, + m->nr_redundant); + if (bch2_stripe_is_open(c, h->data[i].idx)) + prt_str(out, " open"); + prt_newline(out); + } + mutex_unlock(&c->ec_stripes_heap_lock); +} + +void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c) +{ + struct ec_stripe_head *h; + struct ec_stripe_new *s; + + mutex_lock(&c->ec_stripe_head_lock); + list_for_each_entry(h, &c->ec_stripe_head_list, list) { + prt_printf(out, "target %u algo %u redundancy %u %s:\n", + h->target, h->algo, h->redundancy, + bch2_watermarks[h->watermark]); + + if (h->s) + prt_printf(out, "\tidx %llu blocks %u+%u allocated %u\n", + h->s->idx, h->s->nr_data, h->s->nr_parity, + bitmap_weight(h->s->blocks_allocated, + h->s->nr_data)); + } + mutex_unlock(&c->ec_stripe_head_lock); + + prt_printf(out, "in flight:\n"); + + mutex_lock(&c->ec_stripe_new_lock); + list_for_each_entry(s, &c->ec_stripe_new_list, list) { + prt_printf(out, "\tidx %llu blocks %u+%u ref %u %u %s\n", + s->idx, s->nr_data, s->nr_parity, + atomic_read(&s->ref[STRIPE_REF_io]), + atomic_read(&s->ref[STRIPE_REF_stripe]), + bch2_watermarks[s->h->watermark]); + } + mutex_unlock(&c->ec_stripe_new_lock); } void bch2_fs_ec_exit(struct bch_fs *c) { struct ec_stripe_head *h; + unsigned i; while (1) { - mutex_lock(&c->ec_new_stripe_lock); - h = list_first_entry_or_null(&c->ec_new_stripe_list, + mutex_lock(&c->ec_stripe_head_lock); + h = list_first_entry_or_null(&c->ec_stripe_head_list, struct ec_stripe_head, list); if (h) list_del(&h->list); - mutex_unlock(&c->ec_new_stripe_lock); + mutex_unlock(&c->ec_stripe_head_lock); if (!h) break; - BUG_ON(h->s); - BUG_ON(!list_empty(&h->stripes)); + if (h->s) { + for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) + BUG_ON(h->s->blocks[i]); + + kfree(h->s); + } kfree(h); } + BUG_ON(!list_empty(&c->ec_stripe_new_list)); + free_heap(&c->ec_stripes_heap); - genradix_free(&c->stripes[0]); + genradix_free(&c->stripes); bioset_exit(&c->ec_bioset); } -int bch2_fs_ec_init(struct bch_fs *c) +void bch2_fs_ec_init_early(struct bch_fs *c) { + spin_lock_init(&c->ec_stripes_new_lock); + mutex_init(&c->ec_stripes_heap_lock); + + INIT_LIST_HEAD(&c->ec_stripe_head_list); + mutex_init(&c->ec_stripe_head_lock); + + INIT_LIST_HEAD(&c->ec_stripe_new_list); + mutex_init(&c->ec_stripe_new_lock); + init_waitqueue_head(&c->ec_stripe_new_wait); + + INIT_WORK(&c->ec_stripe_create_work, ec_stripe_create_work); INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work); +} +int bch2_fs_ec_init(struct bch_fs *c) +{ return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio), BIOSET_NEED_BVECS); }