1 // SPDX-License-Identifier: GPL-2.0
6 #include "alloc_foreground.h"
10 #include "btree_update.h"
12 #include "disk_groups.h"
21 #include <linux/sort.h>
25 #include <linux/raid/pq.h>
26 #include <linux/raid/xor.h>
28 static void raid5_recov(unsigned disks, unsigned failed_idx,
29 size_t size, void **data)
33 BUG_ON(failed_idx >= disks);
35 swap(data[0], data[failed_idx]);
36 memcpy(data[0], data[1], size);
39 nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS);
40 xor_blocks(nr, size, data[0], data + i);
44 swap(data[0], data[failed_idx]);
47 static void raid_gen(int nd, int np, size_t size, void **v)
50 raid5_recov(nd + np, nd, size, v);
52 raid6_call.gen_syndrome(nd + np, size, v);
56 static void raid_rec(int nr, int *ir, int nd, int np, size_t size, void **v)
63 raid5_recov(nd + 1, ir[0], size, v);
65 raid6_call.gen_syndrome(nd + np, size, v);
69 /* data+data failure. */
70 raid6_2data_recov(nd + np, size, ir[0], ir[1], v);
71 } else if (ir[0] < nd) {
72 /* data + p/q failure */
74 if (ir[1] == nd) /* data + p failure */
75 raid6_datap_recov(nd + np, size, ir[0], v);
76 else { /* data + q failure */
77 raid5_recov(nd + 1, ir[0], size, v);
78 raid6_call.gen_syndrome(nd + np, size, v);
81 raid_gen(nd, np, size, v);
91 #include <raid/raid.h>
97 struct ec_stripe_buf *buf;
102 /* Stripes btree keys: */
104 const char *bch2_stripe_invalid(const struct bch_fs *c, struct bkey_s_c k)
106 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
109 return "invalid stripe key";
111 if (bkey_val_bytes(k.k) < sizeof(*s))
112 return "incorrect value size";
114 if (bkey_val_bytes(k.k) < sizeof(*s) ||
115 bkey_val_u64s(k.k) < stripe_val_u64s(s))
116 return "incorrect value size";
118 return bch2_bkey_ptrs_invalid(c, k);
121 void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
124 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
127 pr_buf(out, "algo %u sectors %u blocks %u:%u csum %u gran %u",
129 le16_to_cpu(s->sectors),
130 s->nr_blocks - s->nr_redundant,
133 1U << s->csum_granularity_bits);
135 for (i = 0; i < s->nr_blocks; i++)
136 pr_buf(out, " %u:%llu:%u", s->ptrs[i].dev,
137 (u64) s->ptrs[i].offset,
138 stripe_blockcount_get(s, i));
141 /* returns blocknr in stripe that we matched: */
142 static int bkey_matches_stripe(struct bch_stripe *s,
145 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
146 const struct bch_extent_ptr *ptr;
147 unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
149 bkey_for_each_ptr(ptrs, ptr)
150 for (i = 0; i < nr_data; i++)
151 if (__bch2_ptr_matches_stripe(s, ptr, i))
157 static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx)
160 case KEY_TYPE_extent: {
161 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
162 const union bch_extent_entry *entry;
164 extent_for_each_entry(e, entry)
165 if (extent_entry_type(entry) ==
166 BCH_EXTENT_ENTRY_stripe_ptr &&
167 entry->stripe_ptr.idx == idx)
179 static void ec_stripe_buf_exit(struct ec_stripe_buf *buf)
183 for (i = 0; i < buf->key.v.nr_blocks; i++) {
184 kvpfree(buf->data[i], buf->size << 9);
189 static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
190 unsigned offset, unsigned size)
192 struct bch_stripe *v = &buf->key.v;
193 unsigned csum_granularity = 1U << v->csum_granularity_bits;
194 unsigned end = offset + size;
197 BUG_ON(end > le16_to_cpu(v->sectors));
199 offset = round_down(offset, csum_granularity);
200 end = min_t(unsigned, le16_to_cpu(v->sectors),
201 round_up(end, csum_granularity));
203 buf->offset = offset;
204 buf->size = end - offset;
206 memset(buf->valid, 0xFF, sizeof(buf->valid));
208 for (i = 0; i < buf->key.v.nr_blocks; i++) {
209 buf->data[i] = kvpmalloc(buf->size << 9, GFP_KERNEL);
216 ec_stripe_buf_exit(buf);
222 static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf,
223 unsigned block, unsigned offset)
225 struct bch_stripe *v = &buf->key.v;
226 unsigned csum_granularity = 1 << v->csum_granularity_bits;
227 unsigned end = buf->offset + buf->size;
228 unsigned len = min(csum_granularity, end - offset);
230 BUG_ON(offset >= end);
231 BUG_ON(offset < buf->offset);
232 BUG_ON(offset & (csum_granularity - 1));
233 BUG_ON(offset + len != le16_to_cpu(v->sectors) &&
234 (len & (csum_granularity - 1)));
236 return bch2_checksum(NULL, v->csum_type,
238 buf->data[block] + ((offset - buf->offset) << 9),
242 static void ec_generate_checksums(struct ec_stripe_buf *buf)
244 struct bch_stripe *v = &buf->key.v;
245 unsigned i, j, csums_per_device = stripe_csums_per_device(v);
251 BUG_ON(buf->size != le16_to_cpu(v->sectors));
253 for (i = 0; i < v->nr_blocks; i++)
254 for (j = 0; j < csums_per_device; j++)
255 stripe_csum_set(v, i, j,
256 ec_block_checksum(buf, i, j << v->csum_granularity_bits));
259 static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
261 struct bch_stripe *v = &buf->key.v;
262 unsigned csum_granularity = 1 << v->csum_granularity_bits;
268 for (i = 0; i < v->nr_blocks; i++) {
269 unsigned offset = buf->offset;
270 unsigned end = buf->offset + buf->size;
272 if (!test_bit(i, buf->valid))
275 while (offset < end) {
276 unsigned j = offset >> v->csum_granularity_bits;
277 unsigned len = min(csum_granularity, end - offset);
278 struct bch_csum want = stripe_csum_get(v, i, j);
279 struct bch_csum got = ec_block_checksum(buf, i, offset);
281 if (bch2_crc_cmp(want, got)) {
282 bch_err_ratelimited(c,
283 "stripe checksum error at %u:%u: csum type %u, expected %llx got %llx",
286 clear_bit(i, buf->valid);
295 /* Erasure coding: */
297 static void ec_generate_ec(struct ec_stripe_buf *buf)
299 struct bch_stripe *v = &buf->key.v;
300 unsigned nr_data = v->nr_blocks - v->nr_redundant;
301 unsigned bytes = le16_to_cpu(v->sectors) << 9;
303 raid_gen(nr_data, v->nr_redundant, bytes, buf->data);
306 static unsigned ec_nr_failed(struct ec_stripe_buf *buf)
308 return buf->key.v.nr_blocks -
309 bitmap_weight(buf->valid, buf->key.v.nr_blocks);
312 static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf)
314 struct bch_stripe *v = &buf->key.v;
315 unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0;
316 unsigned nr_data = v->nr_blocks - v->nr_redundant;
317 unsigned bytes = buf->size << 9;
319 if (ec_nr_failed(buf) > v->nr_redundant) {
320 bch_err_ratelimited(c,
321 "error doing reconstruct read: unable to read enough blocks");
325 for (i = 0; i < nr_data; i++)
326 if (!test_bit(i, buf->valid))
327 failed[nr_failed++] = i;
329 raid_rec(nr_failed, failed, nr_data, v->nr_redundant, bytes, buf->data);
335 static void ec_block_endio(struct bio *bio)
337 struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio);
338 struct bch_dev *ca = ec_bio->ca;
339 struct closure *cl = bio->bi_private;
341 if (bch2_dev_io_err_on(bio->bi_status, ca, "erasure coding %s error: %s",
342 bio_data_dir(bio) ? "write" : "read",
343 bch2_blk_status_to_str(bio->bi_status)))
344 clear_bit(ec_bio->idx, ec_bio->buf->valid);
346 bio_put(&ec_bio->bio);
347 percpu_ref_put(&ca->io_ref);
351 static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
352 unsigned rw, unsigned idx, struct closure *cl)
354 struct bch_stripe *v = &buf->key.v;
355 unsigned offset = 0, bytes = buf->size << 9;
356 struct bch_extent_ptr *ptr = &v->ptrs[idx];
357 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
358 enum bch_data_type data_type = idx < buf->key.v.nr_blocks - buf->key.v.nr_redundant
362 if (ptr_stale(ca, ptr)) {
363 bch_err_ratelimited(c,
364 "error %s stripe: stale pointer",
365 rw == READ ? "reading from" : "writing to");
366 clear_bit(idx, buf->valid);
370 if (!bch2_dev_get_ioref(ca, rw)) {
371 clear_bit(idx, buf->valid);
375 this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size);
377 while (offset < bytes) {
378 unsigned nr_iovecs = min_t(size_t, BIO_MAX_PAGES,
379 DIV_ROUND_UP(bytes, PAGE_SIZE));
380 unsigned b = min_t(size_t, bytes - offset,
381 nr_iovecs << PAGE_SHIFT);
382 struct ec_bio *ec_bio;
384 ec_bio = container_of(bio_alloc_bioset(GFP_KERNEL, nr_iovecs,
392 bio_set_dev(&ec_bio->bio, ca->disk_sb.bdev);
393 bio_set_op_attrs(&ec_bio->bio, rw, 0);
395 ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9);
396 ec_bio->bio.bi_end_io = ec_block_endio;
397 ec_bio->bio.bi_private = cl;
399 bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b);
402 percpu_ref_get(&ca->io_ref);
404 submit_bio(&ec_bio->bio);
409 percpu_ref_put(&ca->io_ref);
412 static int get_stripe_key(struct bch_fs *c, u64 idx, struct ec_stripe_buf *stripe)
414 struct btree_trans trans;
415 struct btree_iter *iter;
419 bch2_trans_init(&trans, c, 0, 0);
420 iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS(0, idx), BTREE_ITER_SLOTS);
421 k = bch2_btree_iter_peek_slot(iter);
425 if (k.k->type != KEY_TYPE_stripe) {
429 bkey_reassemble(&stripe->key.k_i, k);
431 bch2_trans_exit(&trans);
435 /* recovery read path: */
436 int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
438 struct ec_stripe_buf *buf;
440 struct bch_stripe *v;
444 closure_init_stack(&cl);
446 BUG_ON(!rbio->pick.has_ec);
448 buf = kzalloc(sizeof(*buf), GFP_NOIO);
452 ret = get_stripe_key(c, rbio->pick.ec.idx, buf);
454 bch_err_ratelimited(c,
455 "error doing reconstruct read: error %i looking up stripe", ret);
462 if (!bch2_ptr_matches_stripe(v, rbio->pick)) {
463 bch_err_ratelimited(c,
464 "error doing reconstruct read: pointer doesn't match stripe");
469 offset = rbio->bio.bi_iter.bi_sector - v->ptrs[rbio->pick.ec.block].offset;
470 if (offset + bio_sectors(&rbio->bio) > le16_to_cpu(v->sectors)) {
471 bch_err_ratelimited(c,
472 "error doing reconstruct read: read is bigger than stripe");
477 ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio));
481 for (i = 0; i < v->nr_blocks; i++)
482 ec_block_io(c, buf, REQ_OP_READ, i, &cl);
486 if (ec_nr_failed(buf) > v->nr_redundant) {
487 bch_err_ratelimited(c,
488 "error doing reconstruct read: unable to read enough blocks");
493 ec_validate_checksums(c, buf);
495 ret = ec_do_recov(c, buf);
499 memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter,
500 buf->data[rbio->pick.ec.block] + ((offset - buf->offset) << 9));
502 ec_stripe_buf_exit(buf);
507 /* stripe bucket accounting: */
509 static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
511 ec_stripes_heap n, *h = &c->ec_stripes_heap;
513 if (idx >= h->size) {
514 if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp))
517 spin_lock(&c->ec_stripes_heap_lock);
518 if (n.size > h->size) {
519 memcpy(n.data, h->data, h->used * sizeof(h->data[0]));
523 spin_unlock(&c->ec_stripes_heap_lock);
528 if (!genradix_ptr_alloc(&c->stripes[0], idx, gfp))
531 if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING &&
532 !genradix_ptr_alloc(&c->stripes[1], idx, gfp))
538 static int ec_stripe_mem_alloc(struct bch_fs *c,
539 struct btree_iter *iter)
541 size_t idx = iter->pos.offset;
544 if (!__ec_stripe_mem_alloc(c, idx, GFP_NOWAIT|__GFP_NOWARN))
547 bch2_trans_unlock(iter->trans);
550 if (!__ec_stripe_mem_alloc(c, idx, GFP_KERNEL))
556 static ssize_t stripe_idx_to_delete(struct bch_fs *c)
558 ec_stripes_heap *h = &c->ec_stripes_heap;
560 return h->used && h->data[0].blocks_nonempty == 0
561 ? h->data[0].idx : -1;
564 static inline int ec_stripes_heap_cmp(ec_stripes_heap *h,
565 struct ec_stripe_heap_entry l,
566 struct ec_stripe_heap_entry r)
568 return ((l.blocks_nonempty > r.blocks_nonempty) -
569 (l.blocks_nonempty < r.blocks_nonempty));
572 static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h,
575 struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap);
577 genradix_ptr(&c->stripes[0], h->data[i].idx)->heap_idx = i;
580 static void heap_verify_backpointer(struct bch_fs *c, size_t idx)
582 ec_stripes_heap *h = &c->ec_stripes_heap;
583 struct stripe *m = genradix_ptr(&c->stripes[0], idx);
586 BUG_ON(m->heap_idx >= h->used);
587 BUG_ON(h->data[m->heap_idx].idx != idx);
590 void bch2_stripes_heap_del(struct bch_fs *c,
591 struct stripe *m, size_t idx)
598 heap_verify_backpointer(c, idx);
600 heap_del(&c->ec_stripes_heap, m->heap_idx,
602 ec_stripes_heap_set_backpointer);
605 void bch2_stripes_heap_insert(struct bch_fs *c,
606 struct stripe *m, size_t idx)
611 BUG_ON(heap_full(&c->ec_stripes_heap));
615 heap_add(&c->ec_stripes_heap, ((struct ec_stripe_heap_entry) {
617 .blocks_nonempty = m->blocks_nonempty,
620 ec_stripes_heap_set_backpointer);
622 heap_verify_backpointer(c, idx);
625 void bch2_stripes_heap_update(struct bch_fs *c,
626 struct stripe *m, size_t idx)
628 ec_stripes_heap *h = &c->ec_stripes_heap;
634 heap_verify_backpointer(c, idx);
636 h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
639 heap_sift_up(h, i, ec_stripes_heap_cmp,
640 ec_stripes_heap_set_backpointer);
641 heap_sift_down(h, i, ec_stripes_heap_cmp,
642 ec_stripes_heap_set_backpointer);
644 heap_verify_backpointer(c, idx);
646 if (stripe_idx_to_delete(c) >= 0 &&
647 !percpu_ref_is_dying(&c->writes))
648 schedule_work(&c->ec_stripe_delete_work);
651 /* stripe deletion */
653 static int ec_stripe_delete(struct bch_fs *c, size_t idx)
655 //pr_info("deleting stripe %zu", idx);
656 return bch2_btree_delete_range(c, BTREE_ID_EC,
662 static void ec_stripe_delete_work(struct work_struct *work)
665 container_of(work, struct bch_fs, ec_stripe_delete_work);
669 spin_lock(&c->ec_stripes_heap_lock);
670 idx = stripe_idx_to_delete(c);
672 spin_unlock(&c->ec_stripes_heap_lock);
676 bch2_stripes_heap_del(c, genradix_ptr(&c->stripes[0], idx), idx);
677 spin_unlock(&c->ec_stripes_heap_lock);
679 if (ec_stripe_delete(c, idx))
684 /* stripe creation: */
686 static int ec_stripe_bkey_insert(struct bch_fs *c,
687 struct bkey_i_stripe *stripe,
688 struct disk_reservation *res)
690 struct btree_trans trans;
691 struct btree_iter *iter;
693 struct bpos min_pos = POS(0, 1);
694 struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint));
697 bch2_trans_init(&trans, c, 0, 0);
699 bch2_trans_begin(&trans);
701 for_each_btree_key(&trans, iter, BTREE_ID_EC, start_pos,
702 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
703 if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0) {
704 if (start_pos.offset) {
706 bch2_btree_iter_set_pos(iter, start_pos);
714 if (bkey_deleted(k.k))
720 start_pos = iter->pos;
722 ret = ec_stripe_mem_alloc(c, iter);
726 stripe->k.p = iter->pos;
728 bch2_trans_update(&trans, iter, &stripe->k_i, 0);
730 ret = bch2_trans_commit(&trans, res, NULL,
731 BTREE_INSERT_NOFAIL);
733 bch2_trans_iter_put(&trans, iter);
738 c->ec_stripe_hint = ret ? start_pos.offset : start_pos.offset + 1;
739 bch2_trans_exit(&trans);
744 static int ec_stripe_bkey_update(struct btree_trans *trans,
745 struct bkey_i_stripe *new)
747 struct bch_fs *c = trans->c;
748 struct btree_iter *iter;
750 const struct bch_stripe *existing;
754 iter = bch2_trans_get_iter(trans, BTREE_ID_EC,
755 new->k.p, BTREE_ITER_INTENT);
756 k = bch2_btree_iter_peek_slot(iter);
761 if (!k.k || k.k->type != KEY_TYPE_stripe) {
762 bch_err(c, "error updating stripe: not found");
767 existing = bkey_s_c_to_stripe(k).v;
769 if (existing->nr_blocks != new->v.nr_blocks) {
770 bch_err(c, "error updating stripe: nr_blocks does not match");
775 for (i = 0; i < new->v.nr_blocks; i++)
776 stripe_blockcount_set(&new->v, i,
777 stripe_blockcount_get(existing, i));
779 bch2_trans_update(trans, iter, &new->k_i, 0);
781 bch2_trans_iter_put(trans, iter);
785 static void extent_stripe_ptr_add(struct bkey_s_extent e,
786 struct ec_stripe_buf *s,
787 struct bch_extent_ptr *ptr,
790 struct bch_extent_stripe_ptr *dst = (void *) ptr;
791 union bch_extent_entry *end = extent_entry_last(e);
793 memmove_u64s_up(dst + 1, dst, (u64 *) end - (u64 *) dst);
794 e.k->u64s += sizeof(*dst) / sizeof(u64);
796 *dst = (struct bch_extent_stripe_ptr) {
797 .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr,
799 .idx = s->key.k.p.offset,
803 static int ec_stripe_update_ptrs(struct bch_fs *c,
804 struct ec_stripe_buf *s,
807 struct btree_trans trans;
808 struct btree_iter *iter;
810 struct bkey_s_extent e;
812 int ret = 0, dev, block;
814 bch2_bkey_buf_init(&sk);
815 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
817 /* XXX this doesn't support the reflink btree */
819 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
823 while ((k = bch2_btree_iter_peek(iter)).k &&
824 !(ret = bkey_err(k)) &&
825 bkey_cmp(bkey_start_pos(k.k), pos->p) < 0) {
826 struct bch_extent_ptr *ptr, *ec_ptr = NULL;
828 if (extent_has_stripe_ptr(k, s->key.k.p.offset)) {
829 bch2_btree_iter_next(iter);
833 block = bkey_matches_stripe(&s->key.v, k);
835 bch2_btree_iter_next(iter);
839 dev = s->key.v.ptrs[block].dev;
841 bch2_bkey_buf_reassemble(&sk, c, k);
842 e = bkey_i_to_s_extent(sk.k);
844 bch2_bkey_drop_ptrs(e.s, ptr, ptr->dev != dev);
845 ec_ptr = (void *) bch2_bkey_has_device(e.s_c, dev);
848 extent_stripe_ptr_add(e, s, ec_ptr, block);
850 bch2_btree_iter_set_pos(iter, bkey_start_pos(&sk.k->k));
851 bch2_trans_update(&trans, iter, sk.k, 0);
853 ret = bch2_trans_commit(&trans, NULL, NULL,
854 BTREE_INSERT_NOFAIL);
861 bch2_trans_exit(&trans);
862 bch2_bkey_buf_exit(&sk, c);
868 * data buckets of new stripe all written: create the stripe
870 static void ec_stripe_create(struct ec_stripe_new *s)
872 struct bch_fs *c = s->c;
873 struct open_bucket *ob;
876 struct bch_stripe *v = &s->new_stripe.key.v;
877 unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
880 BUG_ON(s->h->s == s);
882 closure_sync(&s->iodone);
885 if (s->err != -EROFS)
886 bch_err(c, "error creating stripe: error writing data buckets");
890 if (s->have_existing_stripe) {
891 ec_validate_checksums(c, &s->existing_stripe);
893 if (ec_do_recov(c, &s->existing_stripe)) {
894 bch_err(c, "error creating stripe: error reading existing stripe");
898 for (i = 0; i < nr_data; i++)
899 if (stripe_blockcount_get(&s->existing_stripe.key.v, i))
900 swap(s->new_stripe.data[i],
901 s->existing_stripe.data[i]);
903 ec_stripe_buf_exit(&s->existing_stripe);
906 BUG_ON(!s->allocated);
908 if (!percpu_ref_tryget(&c->writes))
911 ec_generate_ec(&s->new_stripe);
913 ec_generate_checksums(&s->new_stripe);
916 for (i = nr_data; i < v->nr_blocks; i++)
917 ec_block_io(c, &s->new_stripe, REQ_OP_WRITE, i, &s->iodone);
918 closure_sync(&s->iodone);
920 if (ec_nr_failed(&s->new_stripe)) {
921 bch_err(c, "error creating stripe: error writing redundancy buckets");
925 ret = s->have_existing_stripe
926 ? bch2_trans_do(c, &s->res, NULL, BTREE_INSERT_NOFAIL,
927 ec_stripe_bkey_update(&trans, &s->new_stripe.key))
928 : ec_stripe_bkey_insert(c, &s->new_stripe.key, &s->res);
930 bch_err(c, "error creating stripe: error creating stripe key");
934 for_each_keylist_key(&s->keys, k) {
935 ret = ec_stripe_update_ptrs(c, &s->new_stripe, &k->k);
937 bch_err(c, "error creating stripe: error %i updating pointers", ret);
942 spin_lock(&c->ec_stripes_heap_lock);
943 m = genradix_ptr(&c->stripes[0], s->new_stripe.key.k.p.offset);
946 bch2_stripes_heap_insert(c, m, s->new_stripe.key.k.p.offset);
947 spin_unlock(&c->ec_stripes_heap_lock);
949 percpu_ref_put(&c->writes);
951 bch2_disk_reservation_put(c, &s->res);
953 for (i = 0; i < v->nr_blocks; i++)
955 ob = c->open_buckets + s->blocks[i];
959 __bch2_open_bucket_put(c, ob);
961 bch2_open_bucket_put(c, ob);
965 bch2_keylist_free(&s->keys, s->inline_keys);
967 ec_stripe_buf_exit(&s->existing_stripe);
968 ec_stripe_buf_exit(&s->new_stripe);
969 closure_debug_destroy(&s->iodone);
973 static void ec_stripe_create_work(struct work_struct *work)
975 struct bch_fs *c = container_of(work,
976 struct bch_fs, ec_stripe_create_work);
977 struct ec_stripe_new *s, *n;
979 mutex_lock(&c->ec_stripe_new_lock);
980 list_for_each_entry_safe(s, n, &c->ec_stripe_new_list, list)
981 if (!atomic_read(&s->pin)) {
983 mutex_unlock(&c->ec_stripe_new_lock);
987 mutex_unlock(&c->ec_stripe_new_lock);
990 static void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s)
992 BUG_ON(atomic_read(&s->pin) <= 0);
994 if (atomic_dec_and_test(&s->pin)) {
996 queue_work(system_long_wq, &c->ec_stripe_create_work);
1000 static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
1002 struct ec_stripe_new *s = h->s;
1004 BUG_ON(!s->allocated && !s->err);
1009 mutex_lock(&c->ec_stripe_new_lock);
1010 list_add(&s->list, &c->ec_stripe_new_list);
1011 mutex_unlock(&c->ec_stripe_new_lock);
1013 ec_stripe_new_put(c, s);
1016 /* have a full bucket - hand it off to be erasure coded: */
1017 void bch2_ec_bucket_written(struct bch_fs *c, struct open_bucket *ob)
1019 struct ec_stripe_new *s = ob->ec;
1021 if (ob->sectors_free)
1024 ec_stripe_new_put(c, s);
1027 void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob)
1029 struct ec_stripe_new *s = ob->ec;
1034 void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp)
1036 struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs);
1043 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1044 offset = ca->mi.bucket_size - ob->sectors_free;
1046 return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9);
1049 void bch2_ec_add_backpointer(struct bch_fs *c, struct write_point *wp,
1050 struct bpos pos, unsigned sectors)
1052 struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs);
1053 struct ec_stripe_new *ec;
1058 //pr_info("adding backpointer at %llu:%llu", pos.inode, pos.offset);
1061 mutex_lock(&ec->lock);
1063 if (bch2_keylist_realloc(&ec->keys, ec->inline_keys,
1064 ARRAY_SIZE(ec->inline_keys),
1069 bkey_init(&ec->keys.top->k);
1070 ec->keys.top->k.p = pos;
1071 bch2_key_resize(&ec->keys.top->k, sectors);
1072 bch2_keylist_push(&ec->keys);
1074 mutex_unlock(&ec->lock);
1077 static int unsigned_cmp(const void *_l, const void *_r)
1079 unsigned l = *((const unsigned *) _l);
1080 unsigned r = *((const unsigned *) _r);
1082 return cmp_int(l, r);
1085 /* pick most common bucket size: */
1086 static unsigned pick_blocksize(struct bch_fs *c,
1087 struct bch_devs_mask *devs)
1090 unsigned i, nr = 0, sizes[BCH_SB_MEMBERS_MAX];
1093 } cur = { 0, 0 }, best = { 0, 0 };
1095 for_each_member_device_rcu(ca, c, i, devs)
1096 sizes[nr++] = ca->mi.bucket_size;
1098 sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL);
1100 for (i = 0; i < nr; i++) {
1101 if (sizes[i] != cur.size) {
1102 if (cur.nr > best.nr)
1106 cur.size = sizes[i];
1112 if (cur.nr > best.nr)
1118 static bool may_create_new_stripe(struct bch_fs *c)
1123 static void ec_stripe_key_init(struct bch_fs *c,
1124 struct bkey_i_stripe *s,
1127 unsigned stripe_size)
1131 bkey_stripe_init(&s->k_i);
1132 s->v.sectors = cpu_to_le16(stripe_size);
1134 s->v.nr_blocks = nr_data + nr_parity;
1135 s->v.nr_redundant = nr_parity;
1136 s->v.csum_granularity_bits = ilog2(c->sb.encoded_extent_max);
1137 s->v.csum_type = BCH_CSUM_CRC32C;
1140 while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) {
1141 BUG_ON(1 << s->v.csum_granularity_bits >=
1142 le16_to_cpu(s->v.sectors) ||
1143 s->v.csum_granularity_bits == U8_MAX);
1144 s->v.csum_granularity_bits++;
1147 set_bkey_val_u64s(&s->k, u64s);
1150 static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
1152 struct ec_stripe_new *s;
1154 lockdep_assert_held(&h->lock);
1156 s = kzalloc(sizeof(*s), GFP_KERNEL);
1160 mutex_init(&s->lock);
1161 closure_init(&s->iodone, NULL);
1162 atomic_set(&s->pin, 1);
1165 s->nr_data = min_t(unsigned, h->nr_active_devs,
1166 BCH_BKEY_PTRS_MAX) - h->redundancy;
1167 s->nr_parity = h->redundancy;
1169 bch2_keylist_init(&s->keys, s->inline_keys);
1171 ec_stripe_key_init(c, &s->new_stripe.key, s->nr_data,
1172 s->nr_parity, h->blocksize);
1178 static struct ec_stripe_head *
1179 ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
1180 unsigned algo, unsigned redundancy,
1183 struct ec_stripe_head *h;
1187 h = kzalloc(sizeof(*h), GFP_KERNEL);
1191 mutex_init(&h->lock);
1192 mutex_lock(&h->lock);
1196 h->redundancy = redundancy;
1200 h->devs = target_rw_devs(c, BCH_DATA_user, target);
1202 for_each_member_device_rcu(ca, c, i, &h->devs)
1203 if (!ca->mi.durability)
1204 __clear_bit(i, h->devs.d);
1206 h->blocksize = pick_blocksize(c, &h->devs);
1208 for_each_member_device_rcu(ca, c, i, &h->devs)
1209 if (ca->mi.bucket_size == h->blocksize)
1210 h->nr_active_devs++;
1213 list_add(&h->list, &c->ec_stripe_head_list);
1217 void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h)
1221 bitmap_weight(h->s->blocks_allocated,
1222 h->s->nr_data) == h->s->nr_data)
1223 ec_stripe_set_pending(c, h);
1225 mutex_unlock(&h->lock);
1228 struct ec_stripe_head *__bch2_ec_stripe_head_get(struct bch_fs *c,
1231 unsigned redundancy,
1234 struct ec_stripe_head *h;
1239 mutex_lock(&c->ec_stripe_head_lock);
1240 list_for_each_entry(h, &c->ec_stripe_head_list, list)
1241 if (h->target == target &&
1243 h->redundancy == redundancy &&
1244 h->copygc == copygc) {
1245 mutex_lock(&h->lock);
1249 h = ec_new_stripe_head_alloc(c, target, algo, redundancy, copygc);
1251 mutex_unlock(&c->ec_stripe_head_lock);
1255 static enum bucket_alloc_ret
1256 new_stripe_alloc_buckets(struct bch_fs *c, struct ec_stripe_head *h,
1259 struct bch_devs_mask devs = h->devs;
1260 struct open_bucket *ob;
1261 struct open_buckets buckets;
1262 unsigned i, j, nr_have_parity = 0, nr_have_data = 0;
1263 bool have_cache = true;
1264 enum bucket_alloc_ret ret = ALLOC_SUCCESS;
1266 for (i = 0; i < h->s->new_stripe.key.v.nr_blocks; i++) {
1267 if (test_bit(i, h->s->blocks_gotten)) {
1268 __clear_bit(h->s->new_stripe.key.v.ptrs[i].dev, devs.d);
1269 if (i < h->s->nr_data)
1276 BUG_ON(nr_have_data > h->s->nr_data);
1277 BUG_ON(nr_have_parity > h->s->nr_parity);
1279 percpu_down_read(&c->mark_lock);
1283 if (nr_have_parity < h->s->nr_parity) {
1284 ret = bch2_bucket_alloc_set(c, &buckets,
1296 open_bucket_for_each(c, &buckets, ob, i) {
1297 j = find_next_zero_bit(h->s->blocks_gotten,
1298 h->s->nr_data + h->s->nr_parity,
1300 BUG_ON(j >= h->s->nr_data + h->s->nr_parity);
1302 h->s->blocks[j] = buckets.v[i];
1303 h->s->new_stripe.key.v.ptrs[j] = ob->ptr;
1304 __set_bit(j, h->s->blocks_gotten);
1312 if (nr_have_data < h->s->nr_data) {
1313 ret = bch2_bucket_alloc_set(c, &buckets,
1325 open_bucket_for_each(c, &buckets, ob, i) {
1326 j = find_next_zero_bit(h->s->blocks_gotten,
1328 BUG_ON(j >= h->s->nr_data);
1330 h->s->blocks[j] = buckets.v[i];
1331 h->s->new_stripe.key.v.ptrs[j] = ob->ptr;
1332 __set_bit(j, h->s->blocks_gotten);
1340 percpu_up_read(&c->mark_lock);
1344 /* XXX: doesn't obey target: */
1345 static s64 get_existing_stripe(struct bch_fs *c,
1346 struct ec_stripe_head *head)
1348 ec_stripes_heap *h = &c->ec_stripes_heap;
1353 if (may_create_new_stripe(c))
1356 spin_lock(&c->ec_stripes_heap_lock);
1357 for (heap_idx = 0; heap_idx < h->used; heap_idx++) {
1358 if (!h->data[heap_idx].blocks_nonempty)
1361 stripe_idx = h->data[heap_idx].idx;
1362 m = genradix_ptr(&c->stripes[0], stripe_idx);
1364 if (m->algorithm == head->algo &&
1365 m->nr_redundant == head->redundancy &&
1366 m->sectors == head->blocksize &&
1367 m->blocks_nonempty < m->nr_blocks - m->nr_redundant) {
1368 bch2_stripes_heap_del(c, m, stripe_idx);
1369 spin_unlock(&c->ec_stripes_heap_lock);
1374 spin_unlock(&c->ec_stripes_heap_lock);
1378 struct ec_stripe_head *bch2_ec_stripe_head_get(struct bch_fs *c,
1381 unsigned redundancy,
1385 struct ec_stripe_head *h;
1390 h = __bch2_ec_stripe_head_get(c, target, algo, redundancy, copygc);
1392 bch_err(c, "no stripe head");
1397 if (ec_new_stripe_alloc(c, h)) {
1398 bch2_ec_stripe_head_put(c, h);
1399 bch_err(c, "failed to allocate new stripe");
1403 idx = get_existing_stripe(c, h);
1405 h->s->have_existing_stripe = true;
1406 ret = get_stripe_key(c, idx, &h->s->existing_stripe);
1408 bch2_fs_fatal_error(c, "error reading stripe key: %i", ret);
1409 bch2_ec_stripe_head_put(c, h);
1413 if (ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize)) {
1415 * this is a problem: we have deleted from the
1416 * stripes heap already
1421 BUG_ON(h->s->existing_stripe.size != h->blocksize);
1422 BUG_ON(h->s->existing_stripe.size != h->s->existing_stripe.key.v.sectors);
1424 for (i = 0; i < h->s->existing_stripe.key.v.nr_blocks; i++) {
1425 if (stripe_blockcount_get(&h->s->existing_stripe.key.v, i)) {
1426 __set_bit(i, h->s->blocks_gotten);
1427 __set_bit(i, h->s->blocks_allocated);
1430 ec_block_io(c, &h->s->existing_stripe, READ, i, &h->s->iodone);
1433 bkey_copy(&h->s->new_stripe.key.k_i,
1434 &h->s->existing_stripe.key.k_i);
1437 if (ec_stripe_buf_init(&h->s->new_stripe, 0, h->blocksize)) {
1442 if (!h->s->allocated) {
1443 if (!h->s->have_existing_stripe &&
1444 !h->s->res.sectors) {
1445 ret = bch2_disk_reservation_get(c, &h->s->res,
1447 h->s->nr_parity, 0);
1450 * This means we need to wait for copygc to
1451 * empty out buckets from existing stripes:
1453 bch2_ec_stripe_head_put(c, h);
1459 ret = new_stripe_alloc_buckets(c, h, cl);
1461 bch2_ec_stripe_head_put(c, h);
1466 h->s->allocated = true;
1472 void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca)
1474 struct ec_stripe_head *h;
1475 struct open_bucket *ob;
1478 mutex_lock(&c->ec_stripe_head_lock);
1479 list_for_each_entry(h, &c->ec_stripe_head_list, list) {
1481 mutex_lock(&h->lock);
1485 for (i = 0; i < h->s->new_stripe.key.v.nr_blocks; i++) {
1486 if (!h->s->blocks[i])
1489 ob = c->open_buckets + h->s->blocks[i];
1490 if (ob->ptr.dev == ca->dev_idx)
1496 ec_stripe_set_pending(c, h);
1498 mutex_unlock(&h->lock);
1500 mutex_unlock(&c->ec_stripe_head_lock);
1503 void bch2_stripes_heap_start(struct bch_fs *c)
1505 struct genradix_iter iter;
1508 genradix_for_each(&c->stripes[0], iter, m)
1510 bch2_stripes_heap_insert(c, m, iter.pos);
1513 static int __bch2_stripe_write_key(struct btree_trans *trans,
1514 struct btree_iter *iter,
1517 struct bkey_i_stripe *new_key)
1519 const struct bch_stripe *v;
1524 bch2_btree_iter_set_pos(iter, POS(0, idx));
1526 k = bch2_btree_iter_peek_slot(iter);
1531 if (k.k->type != KEY_TYPE_stripe)
1534 v = bkey_s_c_to_stripe(k).v;
1535 for (i = 0; i < v->nr_blocks; i++)
1536 if (m->block_sectors[i] != stripe_blockcount_get(v, i))
1540 bkey_reassemble(&new_key->k_i, k);
1542 for (i = 0; i < new_key->v.nr_blocks; i++)
1543 stripe_blockcount_set(&new_key->v, i,
1544 m->block_sectors[i]);
1546 bch2_trans_update(trans, iter, &new_key->k_i, 0);
1550 int bch2_stripes_write(struct bch_fs *c, unsigned flags)
1552 struct btree_trans trans;
1553 struct btree_iter *iter;
1554 struct genradix_iter giter;
1555 struct bkey_i_stripe *new_key;
1559 new_key = kmalloc(255 * sizeof(u64), GFP_KERNEL);
1562 bch2_trans_init(&trans, c, 0, 0);
1564 iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS_MIN,
1565 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
1567 genradix_for_each(&c->stripes[0], giter, m) {
1571 ret = __bch2_trans_do(&trans, NULL, NULL,
1572 BTREE_INSERT_NOFAIL|flags,
1573 __bch2_stripe_write_key(&trans, iter, m,
1574 giter.pos, new_key));
1580 bch2_trans_exit(&trans);
1587 static int bch2_stripes_read_fn(struct bch_fs *c, enum btree_id id,
1588 unsigned level, struct bkey_s_c k)
1592 if (k.k->type == KEY_TYPE_stripe) {
1593 ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL) ?:
1594 bch2_mark_key(c, k, 0, 0, NULL, 0,
1595 BTREE_TRIGGER_NOATOMIC);
1603 int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys)
1605 int ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_EC,
1606 NULL, bch2_stripes_read_fn);
1608 bch_err(c, "error reading stripes: %i", ret);
1613 int bch2_ec_mem_alloc(struct bch_fs *c, bool gc)
1615 struct btree_trans trans;
1616 struct btree_iter *iter;
1621 bch2_trans_init(&trans, c, 0, 0);
1623 iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS(0, U64_MAX), 0);
1625 k = bch2_btree_iter_prev(iter);
1626 if (!IS_ERR_OR_NULL(k.k))
1627 idx = k.k->p.offset + 1;
1628 ret = bch2_trans_exit(&trans);
1636 !init_heap(&c->ec_stripes_heap, roundup_pow_of_two(idx),
1640 ret = genradix_prealloc(&c->stripes[gc], idx, GFP_KERNEL);
1642 for (i = 0; i < idx; i++)
1643 if (!genradix_ptr_alloc(&c->stripes[gc], i, GFP_KERNEL))
1649 void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c)
1651 ec_stripes_heap *h = &c->ec_stripes_heap;
1655 spin_lock(&c->ec_stripes_heap_lock);
1656 for (i = 0; i < min_t(size_t, h->used, 20); i++) {
1657 m = genradix_ptr(&c->stripes[0], h->data[i].idx);
1659 pr_buf(out, "%zu %u/%u+%u\n", h->data[i].idx,
1660 h->data[i].blocks_nonempty,
1661 m->nr_blocks - m->nr_redundant,
1664 spin_unlock(&c->ec_stripes_heap_lock);
1667 void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
1669 struct ec_stripe_head *h;
1670 struct ec_stripe_new *s;
1672 mutex_lock(&c->ec_stripe_head_lock);
1673 list_for_each_entry(h, &c->ec_stripe_head_list, list) {
1674 pr_buf(out, "target %u algo %u redundancy %u:\n",
1675 h->target, h->algo, h->redundancy);
1678 pr_buf(out, "\tpending: blocks %u+%u allocated %u\n",
1679 h->s->nr_data, h->s->nr_parity,
1680 bitmap_weight(h->s->blocks_allocated,
1683 mutex_unlock(&c->ec_stripe_head_lock);
1685 mutex_lock(&c->ec_stripe_new_lock);
1686 list_for_each_entry(s, &c->ec_stripe_new_list, list) {
1687 pr_buf(out, "\tin flight: blocks %u+%u pin %u\n",
1688 s->nr_data, s->nr_parity,
1689 atomic_read(&s->pin));
1691 mutex_unlock(&c->ec_stripe_new_lock);
1694 void bch2_fs_ec_exit(struct bch_fs *c)
1696 struct ec_stripe_head *h;
1699 mutex_lock(&c->ec_stripe_head_lock);
1700 h = list_first_entry_or_null(&c->ec_stripe_head_list,
1701 struct ec_stripe_head, list);
1704 mutex_unlock(&c->ec_stripe_head_lock);
1712 BUG_ON(!list_empty(&c->ec_stripe_new_list));
1714 free_heap(&c->ec_stripes_heap);
1715 genradix_free(&c->stripes[0]);
1716 bioset_exit(&c->ec_bioset);
1719 int bch2_fs_ec_init(struct bch_fs *c)
1721 INIT_WORK(&c->ec_stripe_create_work, ec_stripe_create_work);
1722 INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work);
1724 return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio),