1 // SPDX-License-Identifier: GPL-2.0
6 #include "alloc_foreground.h"
10 #include "btree_update.h"
12 #include "disk_groups.h"
22 #include <linux/sort.h>
26 #include <linux/raid/pq.h>
27 #include <linux/raid/xor.h>
29 static void raid5_recov(unsigned disks, unsigned failed_idx,
30 size_t size, void **data)
34 BUG_ON(failed_idx >= disks);
36 swap(data[0], data[failed_idx]);
37 memcpy(data[0], data[1], size);
40 nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS);
41 xor_blocks(nr, size, data[0], data + i);
45 swap(data[0], data[failed_idx]);
48 static void raid_gen(int nd, int np, size_t size, void **v)
51 raid5_recov(nd + np, nd, size, v);
53 raid6_call.gen_syndrome(nd + np, size, v);
57 static void raid_rec(int nr, int *ir, int nd, int np, size_t size, void **v)
64 raid5_recov(nd + 1, ir[0], size, v);
66 raid6_call.gen_syndrome(nd + np, size, v);
70 /* data+data failure. */
71 raid6_2data_recov(nd + np, size, ir[0], ir[1], v);
72 } else if (ir[0] < nd) {
73 /* data + p/q failure */
75 if (ir[1] == nd) /* data + p failure */
76 raid6_datap_recov(nd + np, size, ir[0], v);
77 else { /* data + q failure */
78 raid5_recov(nd + 1, ir[0], size, v);
79 raid6_call.gen_syndrome(nd + np, size, v);
82 raid_gen(nd, np, size, v);
92 #include <raid/raid.h>
98 struct ec_stripe_buf *buf;
103 /* Stripes btree keys: */
105 const char *bch2_stripe_invalid(const struct bch_fs *c, struct bkey_s_c k)
107 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
109 if (!bkey_cmp(k.k->p, POS_MIN))
110 return "stripe at pos 0";
113 return "invalid stripe key";
115 if (bkey_val_bytes(k.k) < sizeof(*s))
116 return "incorrect value size";
118 if (bkey_val_bytes(k.k) < sizeof(*s) ||
119 bkey_val_u64s(k.k) < stripe_val_u64s(s))
120 return "incorrect value size";
122 return bch2_bkey_ptrs_invalid(c, k);
125 void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
128 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
131 pr_buf(out, "algo %u sectors %u blocks %u:%u csum %u gran %u",
133 le16_to_cpu(s->sectors),
134 s->nr_blocks - s->nr_redundant,
137 1U << s->csum_granularity_bits);
139 for (i = 0; i < s->nr_blocks; i++)
140 pr_buf(out, " %u:%llu:%u", s->ptrs[i].dev,
141 (u64) s->ptrs[i].offset,
142 stripe_blockcount_get(s, i));
145 /* returns blocknr in stripe that we matched: */
146 static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s,
147 struct bkey_s_c k, unsigned *block)
149 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
150 const struct bch_extent_ptr *ptr;
151 unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
153 bkey_for_each_ptr(ptrs, ptr)
154 for (i = 0; i < nr_data; i++)
155 if (__bch2_ptr_matches_stripe(&s->ptrs[i], ptr,
156 le16_to_cpu(s->sectors))) {
164 static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx)
167 case KEY_TYPE_extent: {
168 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
169 const union bch_extent_entry *entry;
171 extent_for_each_entry(e, entry)
172 if (extent_entry_type(entry) ==
173 BCH_EXTENT_ENTRY_stripe_ptr &&
174 entry->stripe_ptr.idx == idx)
186 static void ec_stripe_buf_exit(struct ec_stripe_buf *buf)
190 for (i = 0; i < buf->key.v.nr_blocks; i++) {
191 kvpfree(buf->data[i], buf->size << 9);
196 static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
197 unsigned offset, unsigned size)
199 struct bch_stripe *v = &buf->key.v;
200 unsigned csum_granularity = 1U << v->csum_granularity_bits;
201 unsigned end = offset + size;
204 BUG_ON(end > le16_to_cpu(v->sectors));
206 offset = round_down(offset, csum_granularity);
207 end = min_t(unsigned, le16_to_cpu(v->sectors),
208 round_up(end, csum_granularity));
210 buf->offset = offset;
211 buf->size = end - offset;
213 memset(buf->valid, 0xFF, sizeof(buf->valid));
215 for (i = 0; i < buf->key.v.nr_blocks; i++) {
216 buf->data[i] = kvpmalloc(buf->size << 9, GFP_KERNEL);
223 ec_stripe_buf_exit(buf);
229 static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf,
230 unsigned block, unsigned offset)
232 struct bch_stripe *v = &buf->key.v;
233 unsigned csum_granularity = 1 << v->csum_granularity_bits;
234 unsigned end = buf->offset + buf->size;
235 unsigned len = min(csum_granularity, end - offset);
237 BUG_ON(offset >= end);
238 BUG_ON(offset < buf->offset);
239 BUG_ON(offset & (csum_granularity - 1));
240 BUG_ON(offset + len != le16_to_cpu(v->sectors) &&
241 (len & (csum_granularity - 1)));
243 return bch2_checksum(NULL, v->csum_type,
245 buf->data[block] + ((offset - buf->offset) << 9),
249 static void ec_generate_checksums(struct ec_stripe_buf *buf)
251 struct bch_stripe *v = &buf->key.v;
252 unsigned i, j, csums_per_device = stripe_csums_per_device(v);
258 BUG_ON(buf->size != le16_to_cpu(v->sectors));
260 for (i = 0; i < v->nr_blocks; i++)
261 for (j = 0; j < csums_per_device; j++)
262 stripe_csum_set(v, i, j,
263 ec_block_checksum(buf, i, j << v->csum_granularity_bits));
266 static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
268 struct bch_stripe *v = &buf->key.v;
269 unsigned csum_granularity = 1 << v->csum_granularity_bits;
275 for (i = 0; i < v->nr_blocks; i++) {
276 unsigned offset = buf->offset;
277 unsigned end = buf->offset + buf->size;
279 if (!test_bit(i, buf->valid))
282 while (offset < end) {
283 unsigned j = offset >> v->csum_granularity_bits;
284 unsigned len = min(csum_granularity, end - offset);
285 struct bch_csum want = stripe_csum_get(v, i, j);
286 struct bch_csum got = ec_block_checksum(buf, i, offset);
288 if (bch2_crc_cmp(want, got)) {
291 bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(&buf->key.k_i));
293 bch_err_ratelimited(c,
294 "stripe checksum error for %ps at %u:%u: csum type %u, expected %llx got %llx\n%s",
295 (void *) _RET_IP_, i, j, v->csum_type,
296 want.lo, got.lo, buf2);
297 clear_bit(i, buf->valid);
306 /* Erasure coding: */
308 static void ec_generate_ec(struct ec_stripe_buf *buf)
310 struct bch_stripe *v = &buf->key.v;
311 unsigned nr_data = v->nr_blocks - v->nr_redundant;
312 unsigned bytes = le16_to_cpu(v->sectors) << 9;
314 raid_gen(nr_data, v->nr_redundant, bytes, buf->data);
317 static unsigned ec_nr_failed(struct ec_stripe_buf *buf)
319 return buf->key.v.nr_blocks -
320 bitmap_weight(buf->valid, buf->key.v.nr_blocks);
323 static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf)
325 struct bch_stripe *v = &buf->key.v;
326 unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0;
327 unsigned nr_data = v->nr_blocks - v->nr_redundant;
328 unsigned bytes = buf->size << 9;
330 if (ec_nr_failed(buf) > v->nr_redundant) {
331 bch_err_ratelimited(c,
332 "error doing reconstruct read: unable to read enough blocks");
336 for (i = 0; i < nr_data; i++)
337 if (!test_bit(i, buf->valid))
338 failed[nr_failed++] = i;
340 raid_rec(nr_failed, failed, nr_data, v->nr_redundant, bytes, buf->data);
346 static void ec_block_endio(struct bio *bio)
348 struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio);
349 struct bch_stripe *v = &ec_bio->buf->key.v;
350 struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx];
351 struct bch_dev *ca = ec_bio->ca;
352 struct closure *cl = bio->bi_private;
354 if (bch2_dev_io_err_on(bio->bi_status, ca, "erasure coding %s error: %s",
355 bio_data_dir(bio) ? "write" : "read",
356 bch2_blk_status_to_str(bio->bi_status)))
357 clear_bit(ec_bio->idx, ec_bio->buf->valid);
359 if (ptr_stale(ca, ptr)) {
360 bch_err_ratelimited(ca->fs,
361 "error %s stripe: stale pointer after io",
362 bio_data_dir(bio) == READ ? "reading from" : "writing to");
363 clear_bit(ec_bio->idx, ec_bio->buf->valid);
366 bio_put(&ec_bio->bio);
367 percpu_ref_put(&ca->io_ref);
371 static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
372 unsigned rw, unsigned idx, struct closure *cl)
374 struct bch_stripe *v = &buf->key.v;
375 unsigned offset = 0, bytes = buf->size << 9;
376 struct bch_extent_ptr *ptr = &v->ptrs[idx];
377 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
378 enum bch_data_type data_type = idx < buf->key.v.nr_blocks - buf->key.v.nr_redundant
382 if (ptr_stale(ca, ptr)) {
383 bch_err_ratelimited(c,
384 "error %s stripe: stale pointer",
385 rw == READ ? "reading from" : "writing to");
386 clear_bit(idx, buf->valid);
390 if (!bch2_dev_get_ioref(ca, rw)) {
391 clear_bit(idx, buf->valid);
395 this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size);
397 while (offset < bytes) {
398 unsigned nr_iovecs = min_t(size_t, BIO_MAX_VECS,
399 DIV_ROUND_UP(bytes, PAGE_SIZE));
400 unsigned b = min_t(size_t, bytes - offset,
401 nr_iovecs << PAGE_SHIFT);
402 struct ec_bio *ec_bio;
404 ec_bio = container_of(bio_alloc_bioset(GFP_KERNEL, nr_iovecs,
412 bio_set_dev(&ec_bio->bio, ca->disk_sb.bdev);
413 bio_set_op_attrs(&ec_bio->bio, rw, 0);
415 ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9);
416 ec_bio->bio.bi_end_io = ec_block_endio;
417 ec_bio->bio.bi_private = cl;
419 bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b);
422 percpu_ref_get(&ca->io_ref);
424 submit_bio(&ec_bio->bio);
429 percpu_ref_put(&ca->io_ref);
432 static int get_stripe_key(struct bch_fs *c, u64 idx, struct ec_stripe_buf *stripe)
434 struct btree_trans trans;
435 struct btree_iter iter;
439 bch2_trans_init(&trans, c, 0, 0);
440 bch2_trans_iter_init(&trans, &iter, BTREE_ID_stripes,
441 POS(0, idx), BTREE_ITER_SLOTS);
442 k = bch2_btree_iter_peek_slot(&iter);
446 if (k.k->type != KEY_TYPE_stripe) {
450 bkey_reassemble(&stripe->key.k_i, k);
452 bch2_trans_iter_exit(&trans, &iter);
453 bch2_trans_exit(&trans);
457 /* recovery read path: */
458 int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
460 struct ec_stripe_buf *buf;
462 struct bch_stripe *v;
466 closure_init_stack(&cl);
468 BUG_ON(!rbio->pick.has_ec);
470 buf = kzalloc(sizeof(*buf), GFP_NOIO);
474 ret = get_stripe_key(c, rbio->pick.ec.idx, buf);
476 bch_err_ratelimited(c,
477 "error doing reconstruct read: error %i looking up stripe", ret);
484 if (!bch2_ptr_matches_stripe(v, rbio->pick)) {
485 bch_err_ratelimited(c,
486 "error doing reconstruct read: pointer doesn't match stripe");
491 offset = rbio->bio.bi_iter.bi_sector - v->ptrs[rbio->pick.ec.block].offset;
492 if (offset + bio_sectors(&rbio->bio) > le16_to_cpu(v->sectors)) {
493 bch_err_ratelimited(c,
494 "error doing reconstruct read: read is bigger than stripe");
499 ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio));
503 for (i = 0; i < v->nr_blocks; i++)
504 ec_block_io(c, buf, REQ_OP_READ, i, &cl);
508 if (ec_nr_failed(buf) > v->nr_redundant) {
509 bch_err_ratelimited(c,
510 "error doing reconstruct read: unable to read enough blocks");
515 ec_validate_checksums(c, buf);
517 ret = ec_do_recov(c, buf);
521 memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter,
522 buf->data[rbio->pick.ec.block] + ((offset - buf->offset) << 9));
524 ec_stripe_buf_exit(buf);
529 /* stripe bucket accounting: */
531 static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
533 ec_stripes_heap n, *h = &c->ec_stripes_heap;
535 if (idx >= h->size) {
536 if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp))
539 spin_lock(&c->ec_stripes_heap_lock);
540 if (n.size > h->size) {
541 memcpy(n.data, h->data, h->used * sizeof(h->data[0]));
545 spin_unlock(&c->ec_stripes_heap_lock);
550 if (!genradix_ptr_alloc(&c->stripes, idx, gfp))
553 if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING &&
554 !genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
560 static int ec_stripe_mem_alloc(struct btree_trans *trans,
561 struct btree_iter *iter)
563 size_t idx = iter->pos.offset;
566 if (!__ec_stripe_mem_alloc(trans->c, idx, GFP_NOWAIT|__GFP_NOWARN))
569 bch2_trans_unlock(trans);
572 if (!__ec_stripe_mem_alloc(trans->c, idx, GFP_KERNEL))
578 static ssize_t stripe_idx_to_delete(struct bch_fs *c)
580 ec_stripes_heap *h = &c->ec_stripes_heap;
582 return h->used && h->data[0].blocks_nonempty == 0
583 ? h->data[0].idx : -1;
586 static inline int ec_stripes_heap_cmp(ec_stripes_heap *h,
587 struct ec_stripe_heap_entry l,
588 struct ec_stripe_heap_entry r)
590 return ((l.blocks_nonempty > r.blocks_nonempty) -
591 (l.blocks_nonempty < r.blocks_nonempty));
594 static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h,
597 struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap);
599 genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i;
602 static void heap_verify_backpointer(struct bch_fs *c, size_t idx)
604 ec_stripes_heap *h = &c->ec_stripes_heap;
605 struct stripe *m = genradix_ptr(&c->stripes, idx);
608 BUG_ON(m->heap_idx >= h->used);
609 BUG_ON(h->data[m->heap_idx].idx != idx);
612 void bch2_stripes_heap_del(struct bch_fs *c,
613 struct stripe *m, size_t idx)
620 heap_verify_backpointer(c, idx);
622 heap_del(&c->ec_stripes_heap, m->heap_idx,
624 ec_stripes_heap_set_backpointer);
627 void bch2_stripes_heap_insert(struct bch_fs *c,
628 struct stripe *m, size_t idx)
633 BUG_ON(heap_full(&c->ec_stripes_heap));
637 heap_add(&c->ec_stripes_heap, ((struct ec_stripe_heap_entry) {
639 .blocks_nonempty = m->blocks_nonempty,
642 ec_stripes_heap_set_backpointer);
644 heap_verify_backpointer(c, idx);
647 void bch2_stripes_heap_update(struct bch_fs *c,
648 struct stripe *m, size_t idx)
650 ec_stripes_heap *h = &c->ec_stripes_heap;
656 heap_verify_backpointer(c, idx);
658 h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
661 heap_sift_up(h, i, ec_stripes_heap_cmp,
662 ec_stripes_heap_set_backpointer);
663 heap_sift_down(h, i, ec_stripes_heap_cmp,
664 ec_stripes_heap_set_backpointer);
666 heap_verify_backpointer(c, idx);
668 if (stripe_idx_to_delete(c) >= 0 &&
669 !percpu_ref_is_dying(&c->writes))
670 schedule_work(&c->ec_stripe_delete_work);
673 /* stripe deletion */
675 static int ec_stripe_delete(struct bch_fs *c, size_t idx)
677 return bch2_btree_delete_range(c, BTREE_ID_stripes,
683 static void ec_stripe_delete_work(struct work_struct *work)
686 container_of(work, struct bch_fs, ec_stripe_delete_work);
690 spin_lock(&c->ec_stripes_heap_lock);
691 idx = stripe_idx_to_delete(c);
693 spin_unlock(&c->ec_stripes_heap_lock);
697 bch2_stripes_heap_del(c, genradix_ptr(&c->stripes, idx), idx);
698 spin_unlock(&c->ec_stripes_heap_lock);
700 if (ec_stripe_delete(c, idx))
705 /* stripe creation: */
707 static int ec_stripe_bkey_insert(struct btree_trans *trans,
708 struct bkey_i_stripe *stripe,
709 struct disk_reservation *res)
711 struct bch_fs *c = trans->c;
712 struct btree_iter iter;
714 struct bpos min_pos = POS(0, 1);
715 struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint));
718 for_each_btree_key(trans, iter, BTREE_ID_stripes, start_pos,
719 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
720 if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0) {
721 if (start_pos.offset) {
723 bch2_btree_iter_set_pos(&iter, start_pos);
731 if (bkey_deleted(k.k))
737 start_pos = iter.pos;
739 ret = ec_stripe_mem_alloc(trans, &iter);
743 stripe->k.p = iter.pos;
745 ret = bch2_trans_update(trans, &iter, &stripe->k_i, 0);
747 c->ec_stripe_hint = start_pos.offset;
749 bch2_trans_iter_exit(trans, &iter);
754 static int ec_stripe_bkey_update(struct btree_trans *trans,
755 struct bkey_i_stripe *new,
756 struct disk_reservation *res)
758 struct btree_iter iter;
760 const struct bch_stripe *existing;
764 bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes,
765 new->k.p, BTREE_ITER_INTENT);
766 k = bch2_btree_iter_peek_slot(&iter);
771 if (!k.k || k.k->type != KEY_TYPE_stripe) {
772 bch_err(trans->c, "error updating stripe: not found");
777 existing = bkey_s_c_to_stripe(k).v;
779 if (existing->nr_blocks != new->v.nr_blocks) {
780 bch_err(trans->c, "error updating stripe: nr_blocks does not match");
785 for (i = 0; i < new->v.nr_blocks; i++)
786 stripe_blockcount_set(&new->v, i,
787 stripe_blockcount_get(existing, i));
789 ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
791 bch2_trans_iter_exit(trans, &iter);
795 static void extent_stripe_ptr_add(struct bkey_s_extent e,
796 struct ec_stripe_buf *s,
797 struct bch_extent_ptr *ptr,
800 struct bch_extent_stripe_ptr *dst = (void *) ptr;
801 union bch_extent_entry *end = extent_entry_last(e);
803 memmove_u64s_up(dst + 1, dst, (u64 *) end - (u64 *) dst);
804 e.k->u64s += sizeof(*dst) / sizeof(u64);
806 *dst = (struct bch_extent_stripe_ptr) {
807 .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr,
809 .redundancy = s->key.v.nr_redundant,
810 .idx = s->key.k.p.offset,
814 static int ec_stripe_update_ptrs(struct bch_fs *c,
815 struct ec_stripe_buf *s,
818 struct btree_trans trans;
819 struct btree_iter iter;
821 struct bkey_s_extent e;
823 struct bpos next_pos;
824 int ret = 0, dev, block;
826 bch2_bkey_buf_init(&sk);
827 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
829 /* XXX this doesn't support the reflink btree */
831 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
835 while (bch2_trans_begin(&trans),
836 (k = bch2_btree_iter_peek(&iter)).k &&
837 !(ret = bkey_err(k)) &&
838 bkey_cmp(bkey_start_pos(k.k), pos->p) < 0) {
839 const struct bch_extent_ptr *ptr_c;
840 struct bch_extent_ptr *ptr, *ec_ptr = NULL;
842 if (extent_has_stripe_ptr(k, s->key.k.p.offset)) {
843 bch2_btree_iter_advance(&iter);
847 ptr_c = bkey_matches_stripe(&s->key.v, k, &block);
849 * It doesn't generally make sense to erasure code cached ptrs:
850 * XXX: should we be incrementing a counter?
852 if (!ptr_c || ptr_c->cached) {
853 bch2_btree_iter_advance(&iter);
857 dev = s->key.v.ptrs[block].dev;
859 bch2_bkey_buf_reassemble(&sk, c, k);
860 e = bkey_i_to_s_extent(sk.k);
862 bch2_bkey_drop_ptrs(e.s, ptr, ptr->dev != dev);
863 ec_ptr = (void *) bch2_bkey_has_device(e.s_c, dev);
866 extent_stripe_ptr_add(e, s, ec_ptr, block);
868 bch2_btree_iter_set_pos(&iter, bkey_start_pos(&sk.k->k));
869 next_pos = sk.k->k.p;
871 ret = bch2_btree_iter_traverse(&iter) ?:
872 bch2_trans_update(&trans, &iter, sk.k, 0) ?:
873 bch2_trans_commit(&trans, NULL, NULL,
874 BTREE_INSERT_NOFAIL);
876 bch2_btree_iter_set_pos(&iter, next_pos);
882 bch2_trans_iter_exit(&trans, &iter);
884 bch2_trans_exit(&trans);
885 bch2_bkey_buf_exit(&sk, c);
891 * data buckets of new stripe all written: create the stripe
893 static void ec_stripe_create(struct ec_stripe_new *s)
895 struct bch_fs *c = s->c;
896 struct open_bucket *ob;
899 struct bch_stripe *v = &s->new_stripe.key.v;
900 unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
903 BUG_ON(s->h->s == s);
905 closure_sync(&s->iodone);
908 if (s->err != -EROFS)
909 bch_err(c, "error creating stripe: error writing data buckets");
913 if (s->have_existing_stripe) {
914 ec_validate_checksums(c, &s->existing_stripe);
916 if (ec_do_recov(c, &s->existing_stripe)) {
917 bch_err(c, "error creating stripe: error reading existing stripe");
921 for (i = 0; i < nr_data; i++)
922 if (stripe_blockcount_get(&s->existing_stripe.key.v, i))
923 swap(s->new_stripe.data[i],
924 s->existing_stripe.data[i]);
926 ec_stripe_buf_exit(&s->existing_stripe);
929 BUG_ON(!s->allocated);
931 if (!percpu_ref_tryget(&c->writes))
934 ec_generate_ec(&s->new_stripe);
936 ec_generate_checksums(&s->new_stripe);
939 for (i = nr_data; i < v->nr_blocks; i++)
940 ec_block_io(c, &s->new_stripe, REQ_OP_WRITE, i, &s->iodone);
941 closure_sync(&s->iodone);
943 if (ec_nr_failed(&s->new_stripe)) {
944 bch_err(c, "error creating stripe: error writing redundancy buckets");
948 ret = bch2_trans_do(c, &s->res, NULL, BTREE_INSERT_NOFAIL,
949 s->have_existing_stripe
950 ? ec_stripe_bkey_update(&trans, &s->new_stripe.key, &s->res)
951 : ec_stripe_bkey_insert(&trans, &s->new_stripe.key, &s->res));
953 bch_err(c, "error creating stripe: error creating stripe key");
957 for_each_keylist_key(&s->keys, k) {
958 ret = ec_stripe_update_ptrs(c, &s->new_stripe, &k->k);
960 bch_err(c, "error creating stripe: error %i updating pointers", ret);
965 spin_lock(&c->ec_stripes_heap_lock);
966 m = genradix_ptr(&c->stripes, s->new_stripe.key.k.p.offset);
969 bch2_stripes_heap_insert(c, m, s->new_stripe.key.k.p.offset);
970 spin_unlock(&c->ec_stripes_heap_lock);
972 percpu_ref_put(&c->writes);
974 bch2_disk_reservation_put(c, &s->res);
976 for (i = 0; i < v->nr_blocks; i++)
978 ob = c->open_buckets + s->blocks[i];
982 __bch2_open_bucket_put(c, ob);
984 bch2_open_bucket_put(c, ob);
988 bch2_keylist_free(&s->keys, s->inline_keys);
990 ec_stripe_buf_exit(&s->existing_stripe);
991 ec_stripe_buf_exit(&s->new_stripe);
992 closure_debug_destroy(&s->iodone);
996 static void ec_stripe_create_work(struct work_struct *work)
998 struct bch_fs *c = container_of(work,
999 struct bch_fs, ec_stripe_create_work);
1000 struct ec_stripe_new *s, *n;
1002 mutex_lock(&c->ec_stripe_new_lock);
1003 list_for_each_entry_safe(s, n, &c->ec_stripe_new_list, list)
1004 if (!atomic_read(&s->pin)) {
1006 mutex_unlock(&c->ec_stripe_new_lock);
1007 ec_stripe_create(s);
1010 mutex_unlock(&c->ec_stripe_new_lock);
1013 static void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s)
1015 BUG_ON(atomic_read(&s->pin) <= 0);
1017 if (atomic_dec_and_test(&s->pin)) {
1018 BUG_ON(!s->pending);
1019 queue_work(system_long_wq, &c->ec_stripe_create_work);
1023 static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
1025 struct ec_stripe_new *s = h->s;
1027 BUG_ON(!s->allocated && !s->err);
1032 mutex_lock(&c->ec_stripe_new_lock);
1033 list_add(&s->list, &c->ec_stripe_new_list);
1034 mutex_unlock(&c->ec_stripe_new_lock);
1036 ec_stripe_new_put(c, s);
1039 /* have a full bucket - hand it off to be erasure coded: */
1040 void bch2_ec_bucket_written(struct bch_fs *c, struct open_bucket *ob)
1042 struct ec_stripe_new *s = ob->ec;
1044 if (ob->sectors_free)
1047 ec_stripe_new_put(c, s);
1050 void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob)
1052 struct ec_stripe_new *s = ob->ec;
1057 void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp)
1059 struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs);
1066 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1067 offset = ca->mi.bucket_size - ob->sectors_free;
1069 return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9);
1072 void bch2_ob_add_backpointer(struct bch_fs *c, struct open_bucket *ob,
1075 struct ec_stripe_new *ec = ob->ec;
1080 mutex_lock(&ec->lock);
1082 if (bch2_keylist_realloc(&ec->keys, ec->inline_keys,
1083 ARRAY_SIZE(ec->inline_keys),
1088 bkey_init(&ec->keys.top->k);
1089 ec->keys.top->k.p = k->p;
1090 ec->keys.top->k.size = k->size;
1091 bch2_keylist_push(&ec->keys);
1093 mutex_unlock(&ec->lock);
1096 static int unsigned_cmp(const void *_l, const void *_r)
1098 unsigned l = *((const unsigned *) _l);
1099 unsigned r = *((const unsigned *) _r);
1101 return cmp_int(l, r);
1104 /* pick most common bucket size: */
1105 static unsigned pick_blocksize(struct bch_fs *c,
1106 struct bch_devs_mask *devs)
1109 unsigned i, nr = 0, sizes[BCH_SB_MEMBERS_MAX];
1112 } cur = { 0, 0 }, best = { 0, 0 };
1114 for_each_member_device_rcu(ca, c, i, devs)
1115 sizes[nr++] = ca->mi.bucket_size;
1117 sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL);
1119 for (i = 0; i < nr; i++) {
1120 if (sizes[i] != cur.size) {
1121 if (cur.nr > best.nr)
1125 cur.size = sizes[i];
1131 if (cur.nr > best.nr)
1137 static bool may_create_new_stripe(struct bch_fs *c)
1142 static void ec_stripe_key_init(struct bch_fs *c,
1143 struct bkey_i_stripe *s,
1146 unsigned stripe_size)
1150 bkey_stripe_init(&s->k_i);
1151 s->v.sectors = cpu_to_le16(stripe_size);
1153 s->v.nr_blocks = nr_data + nr_parity;
1154 s->v.nr_redundant = nr_parity;
1155 s->v.csum_granularity_bits = ilog2(c->sb.encoded_extent_max);
1156 s->v.csum_type = BCH_CSUM_crc32c;
1159 while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) {
1160 BUG_ON(1 << s->v.csum_granularity_bits >=
1161 le16_to_cpu(s->v.sectors) ||
1162 s->v.csum_granularity_bits == U8_MAX);
1163 s->v.csum_granularity_bits++;
1166 set_bkey_val_u64s(&s->k, u64s);
1169 static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
1171 struct ec_stripe_new *s;
1173 lockdep_assert_held(&h->lock);
1175 s = kzalloc(sizeof(*s), GFP_KERNEL);
1179 mutex_init(&s->lock);
1180 closure_init(&s->iodone, NULL);
1181 atomic_set(&s->pin, 1);
1184 s->nr_data = min_t(unsigned, h->nr_active_devs,
1185 BCH_BKEY_PTRS_MAX) - h->redundancy;
1186 s->nr_parity = h->redundancy;
1188 bch2_keylist_init(&s->keys, s->inline_keys);
1190 ec_stripe_key_init(c, &s->new_stripe.key, s->nr_data,
1191 s->nr_parity, h->blocksize);
1197 static struct ec_stripe_head *
1198 ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
1199 unsigned algo, unsigned redundancy,
1202 struct ec_stripe_head *h;
1206 h = kzalloc(sizeof(*h), GFP_KERNEL);
1210 mutex_init(&h->lock);
1211 mutex_lock(&h->lock);
1215 h->redundancy = redundancy;
1219 h->devs = target_rw_devs(c, BCH_DATA_user, target);
1221 for_each_member_device_rcu(ca, c, i, &h->devs)
1222 if (!ca->mi.durability)
1223 __clear_bit(i, h->devs.d);
1225 h->blocksize = pick_blocksize(c, &h->devs);
1227 for_each_member_device_rcu(ca, c, i, &h->devs)
1228 if (ca->mi.bucket_size == h->blocksize)
1229 h->nr_active_devs++;
1232 list_add(&h->list, &c->ec_stripe_head_list);
1236 void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h)
1240 bitmap_weight(h->s->blocks_allocated,
1241 h->s->nr_data) == h->s->nr_data)
1242 ec_stripe_set_pending(c, h);
1244 mutex_unlock(&h->lock);
1247 struct ec_stripe_head *__bch2_ec_stripe_head_get(struct bch_fs *c,
1250 unsigned redundancy,
1253 struct ec_stripe_head *h;
1258 mutex_lock(&c->ec_stripe_head_lock);
1259 list_for_each_entry(h, &c->ec_stripe_head_list, list)
1260 if (h->target == target &&
1262 h->redundancy == redundancy &&
1263 h->copygc == copygc) {
1264 mutex_lock(&h->lock);
1268 h = ec_new_stripe_head_alloc(c, target, algo, redundancy, copygc);
1270 mutex_unlock(&c->ec_stripe_head_lock);
1274 static int new_stripe_alloc_buckets(struct bch_fs *c, struct ec_stripe_head *h,
1277 struct bch_devs_mask devs = h->devs;
1278 struct open_bucket *ob;
1279 struct open_buckets buckets;
1280 unsigned i, j, nr_have_parity = 0, nr_have_data = 0;
1281 bool have_cache = true;
1284 for (i = 0; i < h->s->new_stripe.key.v.nr_blocks; i++) {
1285 if (test_bit(i, h->s->blocks_gotten)) {
1286 __clear_bit(h->s->new_stripe.key.v.ptrs[i].dev, devs.d);
1287 if (i < h->s->nr_data)
1294 BUG_ON(nr_have_data > h->s->nr_data);
1295 BUG_ON(nr_have_parity > h->s->nr_parity);
1297 percpu_down_read(&c->mark_lock);
1301 if (nr_have_parity < h->s->nr_parity) {
1302 ret = bch2_bucket_alloc_set(c, &buckets,
1314 open_bucket_for_each(c, &buckets, ob, i) {
1315 j = find_next_zero_bit(h->s->blocks_gotten,
1316 h->s->nr_data + h->s->nr_parity,
1318 BUG_ON(j >= h->s->nr_data + h->s->nr_parity);
1320 h->s->blocks[j] = buckets.v[i];
1321 h->s->new_stripe.key.v.ptrs[j] = ob->ptr;
1322 __set_bit(j, h->s->blocks_gotten);
1330 if (nr_have_data < h->s->nr_data) {
1331 ret = bch2_bucket_alloc_set(c, &buckets,
1343 open_bucket_for_each(c, &buckets, ob, i) {
1344 j = find_next_zero_bit(h->s->blocks_gotten,
1346 BUG_ON(j >= h->s->nr_data);
1348 h->s->blocks[j] = buckets.v[i];
1349 h->s->new_stripe.key.v.ptrs[j] = ob->ptr;
1350 __set_bit(j, h->s->blocks_gotten);
1358 percpu_up_read(&c->mark_lock);
1362 /* XXX: doesn't obey target: */
1363 static s64 get_existing_stripe(struct bch_fs *c,
1364 struct ec_stripe_head *head)
1366 ec_stripes_heap *h = &c->ec_stripes_heap;
1372 if (may_create_new_stripe(c))
1375 spin_lock(&c->ec_stripes_heap_lock);
1376 for (heap_idx = 0; heap_idx < h->used; heap_idx++) {
1377 /* No blocks worth reusing, stripe will just be deleted: */
1378 if (!h->data[heap_idx].blocks_nonempty)
1381 stripe_idx = h->data[heap_idx].idx;
1382 m = genradix_ptr(&c->stripes, stripe_idx);
1384 if (m->algorithm == head->algo &&
1385 m->nr_redundant == head->redundancy &&
1386 m->sectors == head->blocksize &&
1387 m->blocks_nonempty < m->nr_blocks - m->nr_redundant) {
1388 bch2_stripes_heap_del(c, m, stripe_idx);
1393 spin_unlock(&c->ec_stripes_heap_lock);
1397 static int __bch2_ec_stripe_head_reuse(struct bch_fs *c,
1398 struct ec_stripe_head *h)
1404 idx = get_existing_stripe(c, h);
1406 bch_err(c, "failed to find an existing stripe");
1410 h->s->have_existing_stripe = true;
1411 ret = get_stripe_key(c, idx, &h->s->existing_stripe);
1413 bch2_fs_fatal_error(c, "error reading stripe key: %i", ret);
1417 if (ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize)) {
1419 * this is a problem: we have deleted from the
1420 * stripes heap already
1425 BUG_ON(h->s->existing_stripe.size != h->blocksize);
1426 BUG_ON(h->s->existing_stripe.size != h->s->existing_stripe.key.v.sectors);
1428 for (i = 0; i < h->s->existing_stripe.key.v.nr_blocks; i++) {
1429 if (stripe_blockcount_get(&h->s->existing_stripe.key.v, i)) {
1430 __set_bit(i, h->s->blocks_gotten);
1431 __set_bit(i, h->s->blocks_allocated);
1434 ec_block_io(c, &h->s->existing_stripe, READ, i, &h->s->iodone);
1437 bkey_copy(&h->s->new_stripe.key.k_i,
1438 &h->s->existing_stripe.key.k_i);
1443 static int __bch2_ec_stripe_head_reserve(struct bch_fs *c,
1444 struct ec_stripe_head *h)
1448 ret = bch2_disk_reservation_get(c, &h->s->res,
1450 h->s->nr_parity, 0);
1454 * This means we need to wait for copygc to
1455 * empty out buckets from existing stripes:
1457 bch_err(c, "failed to reserve stripe");
1463 struct ec_stripe_head *bch2_ec_stripe_head_get(struct bch_fs *c,
1466 unsigned redundancy,
1470 struct ec_stripe_head *h;
1472 bool needs_stripe_new;
1474 h = __bch2_ec_stripe_head_get(c, target, algo, redundancy, copygc);
1476 bch_err(c, "no stripe head");
1480 needs_stripe_new = !h->s;
1481 if (needs_stripe_new) {
1482 if (ec_new_stripe_alloc(c, h)) {
1484 bch_err(c, "failed to allocate new stripe");
1488 if (ec_stripe_buf_init(&h->s->new_stripe, 0, h->blocksize))
1493 * Try reserve a new stripe before reusing an
1494 * existing stripe. This will prevent unnecessary
1495 * read amplification during write oriented workloads.
1498 if (!h->s->allocated && !h->s->res.sectors && !h->s->have_existing_stripe)
1499 ret = __bch2_ec_stripe_head_reserve(c, h);
1500 if (ret && needs_stripe_new)
1501 ret = __bch2_ec_stripe_head_reuse(c, h);
1505 if (!h->s->allocated) {
1506 ret = new_stripe_alloc_buckets(c, h, cl);
1510 h->s->allocated = true;
1516 bch2_ec_stripe_head_put(c, h);
1517 return ERR_PTR(ret);
1520 void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca)
1522 struct ec_stripe_head *h;
1523 struct open_bucket *ob;
1526 mutex_lock(&c->ec_stripe_head_lock);
1527 list_for_each_entry(h, &c->ec_stripe_head_list, list) {
1529 mutex_lock(&h->lock);
1533 for (i = 0; i < h->s->new_stripe.key.v.nr_blocks; i++) {
1534 if (!h->s->blocks[i])
1537 ob = c->open_buckets + h->s->blocks[i];
1538 if (ob->ptr.dev == ca->dev_idx)
1544 ec_stripe_set_pending(c, h);
1546 mutex_unlock(&h->lock);
1548 mutex_unlock(&c->ec_stripe_head_lock);
1551 void bch2_stripes_heap_start(struct bch_fs *c)
1553 struct genradix_iter iter;
1556 genradix_for_each(&c->stripes, iter, m)
1558 bch2_stripes_heap_insert(c, m, iter.pos);
1561 static int bch2_stripes_read_fn(struct btree_trans *trans, struct bkey_s_c k)
1563 const struct bch_stripe *s;
1564 struct bch_fs *c = trans->c;
1569 if (k.k->type != KEY_TYPE_stripe)
1572 ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL);
1576 s = bkey_s_c_to_stripe(k).v;
1578 m = genradix_ptr(&c->stripes, k.k->p.offset);
1580 m->sectors = le16_to_cpu(s->sectors);
1581 m->algorithm = s->algorithm;
1582 m->nr_blocks = s->nr_blocks;
1583 m->nr_redundant = s->nr_redundant;
1584 m->blocks_nonempty = 0;
1586 for (i = 0; i < s->nr_blocks; i++)
1587 m->blocks_nonempty += !!stripe_blockcount_get(s, i);
1589 spin_lock(&c->ec_stripes_heap_lock);
1590 bch2_stripes_heap_update(c, m, k.k->p.offset);
1591 spin_unlock(&c->ec_stripes_heap_lock);
1596 int bch2_stripes_read(struct bch_fs *c)
1598 struct btree_trans trans;
1601 bch2_trans_init(&trans, c, 0, 0);
1602 ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_stripes,
1603 bch2_stripes_read_fn);
1604 bch2_trans_exit(&trans);
1606 bch_err(c, "error reading stripes: %i", ret);
1611 int bch2_ec_mem_alloc(struct bch_fs *c, bool gc)
1613 struct btree_trans trans;
1614 struct btree_iter iter;
1619 bch2_trans_init(&trans, c, 0, 0);
1620 bch2_trans_iter_init(&trans, &iter, BTREE_ID_stripes, POS(0, U64_MAX), 0);
1622 k = bch2_btree_iter_prev(&iter);
1625 idx = k.k->p.offset + 1;
1627 bch2_trans_iter_exit(&trans, &iter);
1628 bch2_trans_exit(&trans);
1636 !init_heap(&c->ec_stripes_heap, roundup_pow_of_two(idx),
1640 ret = genradix_prealloc(&c->stripes[gc], idx, GFP_KERNEL);
1642 for (i = 0; i < idx; i++)
1644 ? !genradix_ptr_alloc(&c->stripes, i, GFP_KERNEL)
1645 : !genradix_ptr_alloc(&c->gc_stripes, i, GFP_KERNEL))
1651 void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c)
1653 ec_stripes_heap *h = &c->ec_stripes_heap;
1657 spin_lock(&c->ec_stripes_heap_lock);
1658 for (i = 0; i < min_t(size_t, h->used, 20); i++) {
1659 m = genradix_ptr(&c->stripes, h->data[i].idx);
1661 pr_buf(out, "%zu %u/%u+%u\n", h->data[i].idx,
1662 h->data[i].blocks_nonempty,
1663 m->nr_blocks - m->nr_redundant,
1666 spin_unlock(&c->ec_stripes_heap_lock);
1669 void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
1671 struct ec_stripe_head *h;
1672 struct ec_stripe_new *s;
1674 mutex_lock(&c->ec_stripe_head_lock);
1675 list_for_each_entry(h, &c->ec_stripe_head_list, list) {
1676 pr_buf(out, "target %u algo %u redundancy %u:\n",
1677 h->target, h->algo, h->redundancy);
1680 pr_buf(out, "\tpending: blocks %u+%u allocated %u\n",
1681 h->s->nr_data, h->s->nr_parity,
1682 bitmap_weight(h->s->blocks_allocated,
1685 mutex_unlock(&c->ec_stripe_head_lock);
1687 mutex_lock(&c->ec_stripe_new_lock);
1688 list_for_each_entry(s, &c->ec_stripe_new_list, list) {
1689 pr_buf(out, "\tin flight: blocks %u+%u pin %u\n",
1690 s->nr_data, s->nr_parity,
1691 atomic_read(&s->pin));
1693 mutex_unlock(&c->ec_stripe_new_lock);
1696 void bch2_fs_ec_exit(struct bch_fs *c)
1698 struct ec_stripe_head *h;
1701 mutex_lock(&c->ec_stripe_head_lock);
1702 h = list_first_entry_or_null(&c->ec_stripe_head_list,
1703 struct ec_stripe_head, list);
1706 mutex_unlock(&c->ec_stripe_head_lock);
1714 BUG_ON(!list_empty(&c->ec_stripe_new_list));
1716 free_heap(&c->ec_stripes_heap);
1717 genradix_free(&c->stripes);
1718 bioset_exit(&c->ec_bioset);
1721 int bch2_fs_ec_init(struct bch_fs *c)
1723 INIT_WORK(&c->ec_stripe_create_work, ec_stripe_create_work);
1724 INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work);
1726 return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio),