1 // SPDX-License-Identifier: GPL-2.0
6 #include "alloc_foreground.h"
7 #include "backpointers.h"
11 #include "btree_update.h"
12 #include "btree_write_buffer.h"
14 #include "disk_groups.h"
24 #include <linux/sort.h>
28 #include <linux/raid/pq.h>
29 #include <linux/raid/xor.h>
31 static void raid5_recov(unsigned disks, unsigned failed_idx,
32 size_t size, void **data)
36 BUG_ON(failed_idx >= disks);
38 swap(data[0], data[failed_idx]);
39 memcpy(data[0], data[1], size);
42 nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS);
43 xor_blocks(nr, size, data[0], data + i);
47 swap(data[0], data[failed_idx]);
50 static void raid_gen(int nd, int np, size_t size, void **v)
53 raid5_recov(nd + np, nd, size, v);
55 raid6_call.gen_syndrome(nd + np, size, v);
59 static void raid_rec(int nr, int *ir, int nd, int np, size_t size, void **v)
66 raid5_recov(nd + 1, ir[0], size, v);
68 raid6_call.gen_syndrome(nd + np, size, v);
72 /* data+data failure. */
73 raid6_2data_recov(nd + np, size, ir[0], ir[1], v);
74 } else if (ir[0] < nd) {
75 /* data + p/q failure */
77 if (ir[1] == nd) /* data + p failure */
78 raid6_datap_recov(nd + np, size, ir[0], v);
79 else { /* data + q failure */
80 raid5_recov(nd + 1, ir[0], size, v);
81 raid6_call.gen_syndrome(nd + np, size, v);
84 raid_gen(nd, np, size, v);
94 #include <raid/raid.h>
100 struct ec_stripe_buf *buf;
105 /* Stripes btree keys: */
107 int bch2_stripe_invalid(const struct bch_fs *c, struct bkey_s_c k,
108 unsigned flags, struct printbuf *err)
110 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
112 if (bkey_eq(k.k->p, POS_MIN)) {
113 prt_printf(err, "stripe at POS_MIN");
114 return -BCH_ERR_invalid_bkey;
118 prt_printf(err, "nonzero inode field");
119 return -BCH_ERR_invalid_bkey;
122 if (bkey_val_bytes(k.k) < sizeof(*s)) {
123 prt_printf(err, "incorrect value size (%zu < %zu)",
124 bkey_val_bytes(k.k), sizeof(*s));
125 return -BCH_ERR_invalid_bkey;
128 if (bkey_val_u64s(k.k) < stripe_val_u64s(s)) {
129 prt_printf(err, "incorrect value size (%zu < %u)",
130 bkey_val_u64s(k.k), stripe_val_u64s(s));
131 return -BCH_ERR_invalid_bkey;
134 return bch2_bkey_ptrs_invalid(c, k, flags, err);
137 void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
140 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
143 prt_printf(out, "algo %u sectors %u blocks %u:%u csum %u gran %u",
145 le16_to_cpu(s->sectors),
146 s->nr_blocks - s->nr_redundant,
149 1U << s->csum_granularity_bits);
151 for (i = 0; i < s->nr_blocks; i++)
152 prt_printf(out, " %u:%llu:%u", s->ptrs[i].dev,
153 (u64) s->ptrs[i].offset,
154 stripe_blockcount_get(s, i));
157 /* returns blocknr in stripe that we matched: */
158 static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s,
159 struct bkey_s_c k, unsigned *block)
161 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
162 const struct bch_extent_ptr *ptr;
163 unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
165 bkey_for_each_ptr(ptrs, ptr)
166 for (i = 0; i < nr_data; i++)
167 if (__bch2_ptr_matches_stripe(&s->ptrs[i], ptr,
168 le16_to_cpu(s->sectors))) {
176 static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx)
179 case KEY_TYPE_extent: {
180 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
181 const union bch_extent_entry *entry;
183 extent_for_each_entry(e, entry)
184 if (extent_entry_type(entry) ==
185 BCH_EXTENT_ENTRY_stripe_ptr &&
186 entry->stripe_ptr.idx == idx)
198 static void ec_stripe_buf_exit(struct ec_stripe_buf *buf)
202 for (i = 0; i < buf->key.v.nr_blocks; i++) {
203 kvpfree(buf->data[i], buf->size << 9);
208 static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
209 unsigned offset, unsigned size)
211 struct bch_stripe *v = &buf->key.v;
212 unsigned csum_granularity = 1U << v->csum_granularity_bits;
213 unsigned end = offset + size;
216 BUG_ON(end > le16_to_cpu(v->sectors));
218 offset = round_down(offset, csum_granularity);
219 end = min_t(unsigned, le16_to_cpu(v->sectors),
220 round_up(end, csum_granularity));
222 buf->offset = offset;
223 buf->size = end - offset;
225 memset(buf->valid, 0xFF, sizeof(buf->valid));
227 for (i = 0; i < buf->key.v.nr_blocks; i++) {
228 buf->data[i] = kvpmalloc(buf->size << 9, GFP_KERNEL);
235 ec_stripe_buf_exit(buf);
241 static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf,
242 unsigned block, unsigned offset)
244 struct bch_stripe *v = &buf->key.v;
245 unsigned csum_granularity = 1 << v->csum_granularity_bits;
246 unsigned end = buf->offset + buf->size;
247 unsigned len = min(csum_granularity, end - offset);
249 BUG_ON(offset >= end);
250 BUG_ON(offset < buf->offset);
251 BUG_ON(offset & (csum_granularity - 1));
252 BUG_ON(offset + len != le16_to_cpu(v->sectors) &&
253 (len & (csum_granularity - 1)));
255 return bch2_checksum(NULL, v->csum_type,
257 buf->data[block] + ((offset - buf->offset) << 9),
261 static void ec_generate_checksums(struct ec_stripe_buf *buf)
263 struct bch_stripe *v = &buf->key.v;
264 unsigned i, j, csums_per_device = stripe_csums_per_device(v);
270 BUG_ON(buf->size != le16_to_cpu(v->sectors));
272 for (i = 0; i < v->nr_blocks; i++)
273 for (j = 0; j < csums_per_device; j++)
274 stripe_csum_set(v, i, j,
275 ec_block_checksum(buf, i, j << v->csum_granularity_bits));
278 static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
280 struct bch_stripe *v = &buf->key.v;
281 unsigned csum_granularity = 1 << v->csum_granularity_bits;
287 for (i = 0; i < v->nr_blocks; i++) {
288 unsigned offset = buf->offset;
289 unsigned end = buf->offset + buf->size;
291 if (!test_bit(i, buf->valid))
294 while (offset < end) {
295 unsigned j = offset >> v->csum_granularity_bits;
296 unsigned len = min(csum_granularity, end - offset);
297 struct bch_csum want = stripe_csum_get(v, i, j);
298 struct bch_csum got = ec_block_checksum(buf, i, offset);
300 if (bch2_crc_cmp(want, got)) {
301 struct printbuf buf2 = PRINTBUF;
303 bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&buf->key.k_i));
305 bch_err_ratelimited(c,
306 "stripe checksum error for %ps at %u:%u: csum type %u, expected %llx got %llx\n%s",
307 (void *) _RET_IP_, i, j, v->csum_type,
308 want.lo, got.lo, buf2.buf);
309 printbuf_exit(&buf2);
310 clear_bit(i, buf->valid);
319 /* Erasure coding: */
321 static void ec_generate_ec(struct ec_stripe_buf *buf)
323 struct bch_stripe *v = &buf->key.v;
324 unsigned nr_data = v->nr_blocks - v->nr_redundant;
325 unsigned bytes = le16_to_cpu(v->sectors) << 9;
327 raid_gen(nr_data, v->nr_redundant, bytes, buf->data);
330 static unsigned ec_nr_failed(struct ec_stripe_buf *buf)
332 return buf->key.v.nr_blocks -
333 bitmap_weight(buf->valid, buf->key.v.nr_blocks);
336 static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf)
338 struct bch_stripe *v = &buf->key.v;
339 unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0;
340 unsigned nr_data = v->nr_blocks - v->nr_redundant;
341 unsigned bytes = buf->size << 9;
343 if (ec_nr_failed(buf) > v->nr_redundant) {
344 bch_err_ratelimited(c,
345 "error doing reconstruct read: unable to read enough blocks");
349 for (i = 0; i < nr_data; i++)
350 if (!test_bit(i, buf->valid))
351 failed[nr_failed++] = i;
353 raid_rec(nr_failed, failed, nr_data, v->nr_redundant, bytes, buf->data);
359 static void ec_block_endio(struct bio *bio)
361 struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio);
362 struct bch_stripe *v = &ec_bio->buf->key.v;
363 struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx];
364 struct bch_dev *ca = ec_bio->ca;
365 struct closure *cl = bio->bi_private;
367 if (bch2_dev_io_err_on(bio->bi_status, ca, "erasure coding %s error: %s",
368 bio_data_dir(bio) ? "write" : "read",
369 bch2_blk_status_to_str(bio->bi_status)))
370 clear_bit(ec_bio->idx, ec_bio->buf->valid);
372 if (ptr_stale(ca, ptr)) {
373 bch_err_ratelimited(ca->fs,
374 "error %s stripe: stale pointer after io",
375 bio_data_dir(bio) == READ ? "reading from" : "writing to");
376 clear_bit(ec_bio->idx, ec_bio->buf->valid);
379 bio_put(&ec_bio->bio);
380 percpu_ref_put(&ca->io_ref);
384 static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
385 unsigned rw, unsigned idx, struct closure *cl)
387 struct bch_stripe *v = &buf->key.v;
388 unsigned offset = 0, bytes = buf->size << 9;
389 struct bch_extent_ptr *ptr = &v->ptrs[idx];
390 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
391 enum bch_data_type data_type = idx < buf->key.v.nr_blocks - buf->key.v.nr_redundant
395 if (ptr_stale(ca, ptr)) {
396 bch_err_ratelimited(c,
397 "error %s stripe: stale pointer",
398 rw == READ ? "reading from" : "writing to");
399 clear_bit(idx, buf->valid);
403 if (!bch2_dev_get_ioref(ca, rw)) {
404 clear_bit(idx, buf->valid);
408 this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size);
410 while (offset < bytes) {
411 unsigned nr_iovecs = min_t(size_t, BIO_MAX_VECS,
412 DIV_ROUND_UP(bytes, PAGE_SIZE));
413 unsigned b = min_t(size_t, bytes - offset,
414 nr_iovecs << PAGE_SHIFT);
415 struct ec_bio *ec_bio;
417 ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev,
428 ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9);
429 ec_bio->bio.bi_end_io = ec_block_endio;
430 ec_bio->bio.bi_private = cl;
432 bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b);
435 percpu_ref_get(&ca->io_ref);
437 submit_bio(&ec_bio->bio);
442 percpu_ref_put(&ca->io_ref);
445 static int get_stripe_key(struct bch_fs *c, u64 idx, struct ec_stripe_buf *stripe)
447 struct btree_trans trans;
448 struct btree_iter iter;
452 bch2_trans_init(&trans, c, 0, 0);
453 bch2_trans_iter_init(&trans, &iter, BTREE_ID_stripes,
454 POS(0, idx), BTREE_ITER_SLOTS);
455 k = bch2_btree_iter_peek_slot(&iter);
459 if (k.k->type != KEY_TYPE_stripe) {
463 bkey_reassemble(&stripe->key.k_i, k);
465 bch2_trans_iter_exit(&trans, &iter);
466 bch2_trans_exit(&trans);
470 /* recovery read path: */
471 int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
473 struct ec_stripe_buf *buf;
475 struct bch_stripe *v;
479 closure_init_stack(&cl);
481 BUG_ON(!rbio->pick.has_ec);
483 buf = kzalloc(sizeof(*buf), GFP_NOIO);
487 ret = get_stripe_key(c, rbio->pick.ec.idx, buf);
489 bch_err_ratelimited(c,
490 "error doing reconstruct read: error %i looking up stripe", ret);
497 if (!bch2_ptr_matches_stripe(v, rbio->pick)) {
498 bch_err_ratelimited(c,
499 "error doing reconstruct read: pointer doesn't match stripe");
504 offset = rbio->bio.bi_iter.bi_sector - v->ptrs[rbio->pick.ec.block].offset;
505 if (offset + bio_sectors(&rbio->bio) > le16_to_cpu(v->sectors)) {
506 bch_err_ratelimited(c,
507 "error doing reconstruct read: read is bigger than stripe");
512 ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio));
516 for (i = 0; i < v->nr_blocks; i++)
517 ec_block_io(c, buf, REQ_OP_READ, i, &cl);
521 if (ec_nr_failed(buf) > v->nr_redundant) {
522 bch_err_ratelimited(c,
523 "error doing reconstruct read: unable to read enough blocks");
528 ec_validate_checksums(c, buf);
530 ret = ec_do_recov(c, buf);
534 memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter,
535 buf->data[rbio->pick.ec.block] + ((offset - buf->offset) << 9));
537 ec_stripe_buf_exit(buf);
542 /* stripe bucket accounting: */
544 static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
546 ec_stripes_heap n, *h = &c->ec_stripes_heap;
548 if (idx >= h->size) {
549 if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp))
552 spin_lock(&c->ec_stripes_heap_lock);
553 if (n.size > h->size) {
554 memcpy(n.data, h->data, h->used * sizeof(h->data[0]));
558 spin_unlock(&c->ec_stripes_heap_lock);
563 if (!genradix_ptr_alloc(&c->stripes, idx, gfp))
566 if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING &&
567 !genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
573 static int ec_stripe_mem_alloc(struct btree_trans *trans,
574 struct btree_iter *iter)
576 size_t idx = iter->pos.offset;
578 if (!__ec_stripe_mem_alloc(trans->c, idx, GFP_NOWAIT|__GFP_NOWARN))
581 bch2_trans_unlock(trans);
583 return __ec_stripe_mem_alloc(trans->c, idx, GFP_KERNEL) ?:
584 bch2_trans_relock(trans);
587 static ssize_t stripe_idx_to_delete(struct bch_fs *c)
589 ec_stripes_heap *h = &c->ec_stripes_heap;
591 return h->used && h->data[0].blocks_nonempty == 0
592 ? h->data[0].idx : -1;
595 static inline int ec_stripes_heap_cmp(ec_stripes_heap *h,
596 struct ec_stripe_heap_entry l,
597 struct ec_stripe_heap_entry r)
599 return ((l.blocks_nonempty > r.blocks_nonempty) -
600 (l.blocks_nonempty < r.blocks_nonempty));
603 static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h,
606 struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap);
608 genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i;
611 static void heap_verify_backpointer(struct bch_fs *c, size_t idx)
613 ec_stripes_heap *h = &c->ec_stripes_heap;
614 struct stripe *m = genradix_ptr(&c->stripes, idx);
617 BUG_ON(m->heap_idx >= h->used);
618 BUG_ON(h->data[m->heap_idx].idx != idx);
621 void bch2_stripes_heap_del(struct bch_fs *c,
622 struct stripe *m, size_t idx)
629 heap_verify_backpointer(c, idx);
631 heap_del(&c->ec_stripes_heap, m->heap_idx,
633 ec_stripes_heap_set_backpointer);
636 void bch2_stripes_heap_insert(struct bch_fs *c,
637 struct stripe *m, size_t idx)
642 BUG_ON(heap_full(&c->ec_stripes_heap));
646 heap_add(&c->ec_stripes_heap, ((struct ec_stripe_heap_entry) {
648 .blocks_nonempty = m->blocks_nonempty,
651 ec_stripes_heap_set_backpointer);
653 heap_verify_backpointer(c, idx);
656 void bch2_stripes_heap_update(struct bch_fs *c,
657 struct stripe *m, size_t idx)
659 ec_stripes_heap *h = &c->ec_stripes_heap;
665 heap_verify_backpointer(c, idx);
667 h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
670 heap_sift_up(h, i, ec_stripes_heap_cmp,
671 ec_stripes_heap_set_backpointer);
672 heap_sift_down(h, i, ec_stripes_heap_cmp,
673 ec_stripes_heap_set_backpointer);
675 heap_verify_backpointer(c, idx);
677 if (stripe_idx_to_delete(c) >= 0)
678 bch2_do_stripe_deletes(c);
681 /* stripe deletion */
683 static int ec_stripe_delete(struct bch_fs *c, size_t idx)
685 return bch2_btree_delete_range(c, BTREE_ID_stripes,
691 static void ec_stripe_delete_work(struct work_struct *work)
694 container_of(work, struct bch_fs, ec_stripe_delete_work);
698 spin_lock(&c->ec_stripes_heap_lock);
699 idx = stripe_idx_to_delete(c);
701 spin_unlock(&c->ec_stripes_heap_lock);
705 bch2_stripes_heap_del(c, genradix_ptr(&c->stripes, idx), idx);
706 spin_unlock(&c->ec_stripes_heap_lock);
708 if (ec_stripe_delete(c, idx))
712 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
715 void bch2_do_stripe_deletes(struct bch_fs *c)
717 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_stripe_delete) &&
718 !schedule_work(&c->ec_stripe_delete_work))
719 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
722 /* stripe creation: */
724 static int ec_stripe_bkey_insert(struct btree_trans *trans,
725 struct bkey_i_stripe *stripe,
726 struct disk_reservation *res)
728 struct bch_fs *c = trans->c;
729 struct btree_iter iter;
731 struct bpos min_pos = POS(0, 1);
732 struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint));
735 for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
736 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
737 if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
738 if (start_pos.offset) {
740 bch2_btree_iter_set_pos(&iter, start_pos);
744 ret = -BCH_ERR_ENOSPC_stripe_create;
748 if (bkey_deleted(k.k))
752 c->ec_stripe_hint = iter.pos.offset;
757 ret = ec_stripe_mem_alloc(trans, &iter);
761 stripe->k.p = iter.pos;
763 ret = bch2_trans_update(trans, &iter, &stripe->k_i, 0);
765 bch2_trans_iter_exit(trans, &iter);
770 static int ec_stripe_bkey_update(struct btree_trans *trans,
771 struct bkey_i_stripe *new,
772 struct disk_reservation *res)
774 struct btree_iter iter;
776 const struct bch_stripe *existing;
780 bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes,
781 new->k.p, BTREE_ITER_INTENT);
782 k = bch2_btree_iter_peek_slot(&iter);
787 if (!k.k || k.k->type != KEY_TYPE_stripe) {
788 bch_err(trans->c, "error updating stripe: not found");
793 existing = bkey_s_c_to_stripe(k).v;
795 if (existing->nr_blocks != new->v.nr_blocks) {
796 bch_err(trans->c, "error updating stripe: nr_blocks does not match");
801 for (i = 0; i < new->v.nr_blocks; i++)
802 stripe_blockcount_set(&new->v, i,
803 stripe_blockcount_get(existing, i));
805 ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
807 bch2_trans_iter_exit(trans, &iter);
811 static void extent_stripe_ptr_add(struct bkey_s_extent e,
812 struct ec_stripe_buf *s,
813 struct bch_extent_ptr *ptr,
816 struct bch_extent_stripe_ptr *dst = (void *) ptr;
817 union bch_extent_entry *end = extent_entry_last(e);
819 memmove_u64s_up(dst + 1, dst, (u64 *) end - (u64 *) dst);
820 e.k->u64s += sizeof(*dst) / sizeof(u64);
822 *dst = (struct bch_extent_stripe_ptr) {
823 .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr,
825 .redundancy = s->key.v.nr_redundant,
826 .idx = s->key.k.p.offset,
830 static int ec_stripe_update_extent(struct btree_trans *trans,
831 struct btree_iter *iter,
833 struct ec_stripe_buf *s)
835 const struct bch_extent_ptr *ptr_c;
836 struct bch_extent_ptr *ptr, *ec_ptr = NULL;
840 if (extent_has_stripe_ptr(k, s->key.k.p.offset))
843 ptr_c = bkey_matches_stripe(&s->key.v, k, &block);
845 * It doesn't generally make sense to erasure code cached ptrs:
846 * XXX: should we be incrementing a counter?
848 if (!ptr_c || ptr_c->cached)
851 dev = s->key.v.ptrs[block].dev;
853 n = bch2_bkey_make_mut(trans, k);
854 ret = PTR_ERR_OR_ZERO(n);
858 bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, ptr->dev != dev);
859 ec_ptr = (void *) bch2_bkey_has_device(bkey_i_to_s_c(n), dev);
862 extent_stripe_ptr_add(bkey_i_to_s_extent(n), s, ec_ptr, block);
864 return bch2_trans_update(trans, iter, n, 0);
867 static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_buf *s,
870 struct bch_fs *c = trans->c;
871 struct bch_extent_ptr bucket = s->key.v.ptrs[block];
872 struct bpos bucket_pos = PTR_BUCKET_POS(c, &bucket);
873 struct bch_backpointer bp;
874 struct btree_iter iter;
880 bch2_trans_begin(trans);
882 ret = bch2_get_next_backpointer(trans, bucket_pos, bucket.gen,
887 if (bp_offset == U64_MAX)
890 if (bch2_fs_inconsistent_on(bp.level, c, "found btree node in erasure coded bucket!?")) {
895 k = bch2_backpointer_get_key(trans, &iter, bucket_pos, bp_offset, bp);
902 ret = ec_stripe_update_extent(trans, &iter, k, s);
903 bch2_trans_iter_exit(trans, &iter);
910 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
916 static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s)
918 struct btree_trans trans;
919 struct bch_stripe *v = &s->key.v;
920 unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
923 bch2_trans_init(&trans, c, 0, 0);
925 ret = bch2_btree_write_buffer_flush(&trans);
929 for (i = 0; i < nr_data; i++) {
930 ret = ec_stripe_update_bucket(&trans, s, i);
935 bch2_trans_exit(&trans);
941 * data buckets of new stripe all written: create the stripe
943 static void ec_stripe_create(struct ec_stripe_new *s)
945 struct bch_fs *c = s->c;
946 struct open_bucket *ob;
948 struct bch_stripe *v = &s->new_stripe.key.v;
949 unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
952 BUG_ON(s->h->s == s);
954 closure_sync(&s->iodone);
957 if (!bch2_err_matches(s->err, EROFS))
958 bch_err(c, "error creating stripe: error writing data buckets");
962 if (s->have_existing_stripe) {
963 ec_validate_checksums(c, &s->existing_stripe);
965 if (ec_do_recov(c, &s->existing_stripe)) {
966 bch_err(c, "error creating stripe: error reading existing stripe");
970 for (i = 0; i < nr_data; i++)
971 if (stripe_blockcount_get(&s->existing_stripe.key.v, i))
972 swap(s->new_stripe.data[i],
973 s->existing_stripe.data[i]);
975 ec_stripe_buf_exit(&s->existing_stripe);
978 BUG_ON(!s->allocated);
980 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_stripe_create))
983 ec_generate_ec(&s->new_stripe);
985 ec_generate_checksums(&s->new_stripe);
988 for (i = nr_data; i < v->nr_blocks; i++)
989 ec_block_io(c, &s->new_stripe, REQ_OP_WRITE, i, &s->iodone);
990 closure_sync(&s->iodone);
992 if (ec_nr_failed(&s->new_stripe)) {
993 bch_err(c, "error creating stripe: error writing redundancy buckets");
997 ret = bch2_trans_do(c, &s->res, NULL, BTREE_INSERT_NOFAIL,
998 s->have_existing_stripe
999 ? ec_stripe_bkey_update(&trans, &s->new_stripe.key, &s->res)
1000 : ec_stripe_bkey_insert(&trans, &s->new_stripe.key, &s->res));
1002 bch_err(c, "error creating stripe: error creating stripe key");
1003 goto err_put_writes;
1006 ret = ec_stripe_update_extents(c, &s->new_stripe);
1008 bch_err(c, "error creating stripe: error updating pointers: %s",
1011 spin_lock(&c->ec_stripes_heap_lock);
1012 m = genradix_ptr(&c->stripes, s->new_stripe.key.k.p.offset);
1015 bch2_stripes_heap_insert(c, m, s->new_stripe.key.k.p.offset);
1016 spin_unlock(&c->ec_stripes_heap_lock);
1018 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
1020 bch2_disk_reservation_put(c, &s->res);
1022 for (i = 0; i < v->nr_blocks; i++)
1024 ob = c->open_buckets + s->blocks[i];
1028 __bch2_open_bucket_put(c, ob);
1030 bch2_open_bucket_put(c, ob);
1034 ec_stripe_buf_exit(&s->existing_stripe);
1035 ec_stripe_buf_exit(&s->new_stripe);
1036 closure_debug_destroy(&s->iodone);
1040 static void ec_stripe_create_work(struct work_struct *work)
1042 struct bch_fs *c = container_of(work,
1043 struct bch_fs, ec_stripe_create_work);
1044 struct ec_stripe_new *s, *n;
1046 mutex_lock(&c->ec_stripe_new_lock);
1047 list_for_each_entry_safe(s, n, &c->ec_stripe_new_list, list)
1048 if (!atomic_read(&s->pin)) {
1050 mutex_unlock(&c->ec_stripe_new_lock);
1051 ec_stripe_create(s);
1054 mutex_unlock(&c->ec_stripe_new_lock);
1057 static void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s)
1059 BUG_ON(atomic_read(&s->pin) <= 0);
1061 if (atomic_dec_and_test(&s->pin)) {
1062 BUG_ON(!s->pending);
1063 queue_work(system_long_wq, &c->ec_stripe_create_work);
1067 static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
1069 struct ec_stripe_new *s = h->s;
1071 BUG_ON(!s->allocated && !s->err);
1076 mutex_lock(&c->ec_stripe_new_lock);
1077 list_add(&s->list, &c->ec_stripe_new_list);
1078 mutex_unlock(&c->ec_stripe_new_lock);
1080 ec_stripe_new_put(c, s);
1083 /* have a full bucket - hand it off to be erasure coded: */
1084 void bch2_ec_bucket_written(struct bch_fs *c, struct open_bucket *ob)
1086 struct ec_stripe_new *s = ob->ec;
1088 if (ob->sectors_free)
1091 ec_stripe_new_put(c, s);
1094 void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob)
1096 struct ec_stripe_new *s = ob->ec;
1101 void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp)
1103 struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs);
1110 ca = bch_dev_bkey_exists(c, ob->dev);
1111 offset = ca->mi.bucket_size - ob->sectors_free;
1113 return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9);
1116 static int unsigned_cmp(const void *_l, const void *_r)
1118 unsigned l = *((const unsigned *) _l);
1119 unsigned r = *((const unsigned *) _r);
1121 return cmp_int(l, r);
1124 /* pick most common bucket size: */
1125 static unsigned pick_blocksize(struct bch_fs *c,
1126 struct bch_devs_mask *devs)
1129 unsigned i, nr = 0, sizes[BCH_SB_MEMBERS_MAX];
1132 } cur = { 0, 0 }, best = { 0, 0 };
1134 for_each_member_device_rcu(ca, c, i, devs)
1135 sizes[nr++] = ca->mi.bucket_size;
1137 sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL);
1139 for (i = 0; i < nr; i++) {
1140 if (sizes[i] != cur.size) {
1141 if (cur.nr > best.nr)
1145 cur.size = sizes[i];
1151 if (cur.nr > best.nr)
1157 static bool may_create_new_stripe(struct bch_fs *c)
1162 static void ec_stripe_key_init(struct bch_fs *c,
1163 struct bkey_i_stripe *s,
1166 unsigned stripe_size)
1170 bkey_stripe_init(&s->k_i);
1171 s->v.sectors = cpu_to_le16(stripe_size);
1173 s->v.nr_blocks = nr_data + nr_parity;
1174 s->v.nr_redundant = nr_parity;
1175 s->v.csum_granularity_bits = ilog2(c->opts.encoded_extent_max >> 9);
1176 s->v.csum_type = BCH_CSUM_crc32c;
1179 while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) {
1180 BUG_ON(1 << s->v.csum_granularity_bits >=
1181 le16_to_cpu(s->v.sectors) ||
1182 s->v.csum_granularity_bits == U8_MAX);
1183 s->v.csum_granularity_bits++;
1186 set_bkey_val_u64s(&s->k, u64s);
1189 static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
1191 struct ec_stripe_new *s;
1193 lockdep_assert_held(&h->lock);
1195 s = kzalloc(sizeof(*s), GFP_KERNEL);
1199 mutex_init(&s->lock);
1200 closure_init(&s->iodone, NULL);
1201 atomic_set(&s->pin, 1);
1204 s->nr_data = min_t(unsigned, h->nr_active_devs,
1205 BCH_BKEY_PTRS_MAX) - h->redundancy;
1206 s->nr_parity = h->redundancy;
1208 ec_stripe_key_init(c, &s->new_stripe.key, s->nr_data,
1209 s->nr_parity, h->blocksize);
1215 static struct ec_stripe_head *
1216 ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
1217 unsigned algo, unsigned redundancy,
1220 struct ec_stripe_head *h;
1224 h = kzalloc(sizeof(*h), GFP_KERNEL);
1228 mutex_init(&h->lock);
1229 mutex_lock(&h->lock);
1233 h->redundancy = redundancy;
1237 h->devs = target_rw_devs(c, BCH_DATA_user, target);
1239 for_each_member_device_rcu(ca, c, i, &h->devs)
1240 if (!ca->mi.durability)
1241 __clear_bit(i, h->devs.d);
1243 h->blocksize = pick_blocksize(c, &h->devs);
1245 for_each_member_device_rcu(ca, c, i, &h->devs)
1246 if (ca->mi.bucket_size == h->blocksize)
1247 h->nr_active_devs++;
1250 list_add(&h->list, &c->ec_stripe_head_list);
1254 void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h)
1258 bitmap_weight(h->s->blocks_allocated,
1259 h->s->nr_data) == h->s->nr_data)
1260 ec_stripe_set_pending(c, h);
1262 mutex_unlock(&h->lock);
1265 struct ec_stripe_head *__bch2_ec_stripe_head_get(struct bch_fs *c,
1268 unsigned redundancy,
1271 struct ec_stripe_head *h;
1276 mutex_lock(&c->ec_stripe_head_lock);
1277 list_for_each_entry(h, &c->ec_stripe_head_list, list)
1278 if (h->target == target &&
1280 h->redundancy == redundancy &&
1281 h->copygc == copygc) {
1282 mutex_lock(&h->lock);
1286 h = ec_new_stripe_head_alloc(c, target, algo, redundancy, copygc);
1288 mutex_unlock(&c->ec_stripe_head_lock);
1292 static int new_stripe_alloc_buckets(struct bch_fs *c, struct ec_stripe_head *h,
1295 struct bch_devs_mask devs = h->devs;
1296 struct open_bucket *ob;
1297 struct open_buckets buckets;
1298 unsigned i, j, nr_have_parity = 0, nr_have_data = 0;
1299 bool have_cache = true;
1302 for (i = 0; i < h->s->new_stripe.key.v.nr_blocks; i++) {
1303 if (test_bit(i, h->s->blocks_gotten)) {
1304 __clear_bit(h->s->new_stripe.key.v.ptrs[i].dev, devs.d);
1305 if (i < h->s->nr_data)
1312 BUG_ON(nr_have_data > h->s->nr_data);
1313 BUG_ON(nr_have_parity > h->s->nr_parity);
1316 if (nr_have_parity < h->s->nr_parity) {
1317 ret = bch2_bucket_alloc_set(c, &buckets,
1329 open_bucket_for_each(c, &buckets, ob, i) {
1330 j = find_next_zero_bit(h->s->blocks_gotten,
1331 h->s->nr_data + h->s->nr_parity,
1333 BUG_ON(j >= h->s->nr_data + h->s->nr_parity);
1335 h->s->blocks[j] = buckets.v[i];
1336 h->s->new_stripe.key.v.ptrs[j] = bch2_ob_ptr(c, ob);
1337 __set_bit(j, h->s->blocks_gotten);
1345 if (nr_have_data < h->s->nr_data) {
1346 ret = bch2_bucket_alloc_set(c, &buckets,
1358 open_bucket_for_each(c, &buckets, ob, i) {
1359 j = find_next_zero_bit(h->s->blocks_gotten,
1361 BUG_ON(j >= h->s->nr_data);
1363 h->s->blocks[j] = buckets.v[i];
1364 h->s->new_stripe.key.v.ptrs[j] = bch2_ob_ptr(c, ob);
1365 __set_bit(j, h->s->blocks_gotten);
1375 /* XXX: doesn't obey target: */
1376 static s64 get_existing_stripe(struct bch_fs *c,
1377 struct ec_stripe_head *head)
1379 ec_stripes_heap *h = &c->ec_stripes_heap;
1385 if (may_create_new_stripe(c))
1388 spin_lock(&c->ec_stripes_heap_lock);
1389 for (heap_idx = 0; heap_idx < h->used; heap_idx++) {
1390 /* No blocks worth reusing, stripe will just be deleted: */
1391 if (!h->data[heap_idx].blocks_nonempty)
1394 stripe_idx = h->data[heap_idx].idx;
1395 m = genradix_ptr(&c->stripes, stripe_idx);
1397 if (m->algorithm == head->algo &&
1398 m->nr_redundant == head->redundancy &&
1399 m->sectors == head->blocksize &&
1400 m->blocks_nonempty < m->nr_blocks - m->nr_redundant) {
1401 bch2_stripes_heap_del(c, m, stripe_idx);
1406 spin_unlock(&c->ec_stripes_heap_lock);
1410 static int __bch2_ec_stripe_head_reuse(struct bch_fs *c,
1411 struct ec_stripe_head *h)
1417 idx = get_existing_stripe(c, h);
1419 return -BCH_ERR_ENOSPC_stripe_reuse;
1421 h->s->have_existing_stripe = true;
1422 ret = get_stripe_key(c, idx, &h->s->existing_stripe);
1424 bch2_fs_fatal_error(c, "error reading stripe key: %i", ret);
1428 if (ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize)) {
1430 * this is a problem: we have deleted from the
1431 * stripes heap already
1436 BUG_ON(h->s->existing_stripe.size != h->blocksize);
1437 BUG_ON(h->s->existing_stripe.size != h->s->existing_stripe.key.v.sectors);
1439 for (i = 0; i < h->s->existing_stripe.key.v.nr_blocks; i++) {
1440 if (stripe_blockcount_get(&h->s->existing_stripe.key.v, i)) {
1441 __set_bit(i, h->s->blocks_gotten);
1442 __set_bit(i, h->s->blocks_allocated);
1445 ec_block_io(c, &h->s->existing_stripe, READ, i, &h->s->iodone);
1448 bkey_copy(&h->s->new_stripe.key.k_i,
1449 &h->s->existing_stripe.key.k_i);
1454 static int __bch2_ec_stripe_head_reserve(struct bch_fs *c,
1455 struct ec_stripe_head *h)
1457 return bch2_disk_reservation_get(c, &h->s->res,
1459 h->s->nr_parity, 0);
1462 struct ec_stripe_head *bch2_ec_stripe_head_get(struct bch_fs *c,
1465 unsigned redundancy,
1469 struct ec_stripe_head *h;
1471 bool needs_stripe_new;
1473 h = __bch2_ec_stripe_head_get(c, target, algo, redundancy, copygc);
1475 bch_err(c, "no stripe head");
1479 needs_stripe_new = !h->s;
1480 if (needs_stripe_new) {
1481 if (ec_new_stripe_alloc(c, h)) {
1483 bch_err(c, "failed to allocate new stripe");
1487 if (ec_stripe_buf_init(&h->s->new_stripe, 0, h->blocksize))
1492 * Try reserve a new stripe before reusing an
1493 * existing stripe. This will prevent unnecessary
1494 * read amplification during write oriented workloads.
1497 if (!h->s->allocated && !h->s->res.sectors && !h->s->have_existing_stripe)
1498 ret = __bch2_ec_stripe_head_reserve(c, h);
1499 if (ret && needs_stripe_new)
1500 ret = __bch2_ec_stripe_head_reuse(c, h);
1502 bch_err_ratelimited(c, "failed to get stripe: %s", bch2_err_str(ret));
1506 if (!h->s->allocated) {
1507 ret = new_stripe_alloc_buckets(c, h, cl);
1511 h->s->allocated = true;
1517 bch2_ec_stripe_head_put(c, h);
1518 return ERR_PTR(ret);
1521 void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca)
1523 struct ec_stripe_head *h;
1524 struct open_bucket *ob;
1527 mutex_lock(&c->ec_stripe_head_lock);
1528 list_for_each_entry(h, &c->ec_stripe_head_list, list) {
1530 mutex_lock(&h->lock);
1534 for (i = 0; i < h->s->new_stripe.key.v.nr_blocks; i++) {
1535 if (!h->s->blocks[i])
1538 ob = c->open_buckets + h->s->blocks[i];
1539 if (ob->dev == ca->dev_idx)
1545 ec_stripe_set_pending(c, h);
1547 mutex_unlock(&h->lock);
1549 mutex_unlock(&c->ec_stripe_head_lock);
1552 void bch2_stripes_heap_start(struct bch_fs *c)
1554 struct genradix_iter iter;
1557 genradix_for_each(&c->stripes, iter, m)
1559 bch2_stripes_heap_insert(c, m, iter.pos);
1562 int bch2_stripes_read(struct bch_fs *c)
1564 struct btree_trans trans;
1565 struct btree_iter iter;
1567 const struct bch_stripe *s;
1572 bch2_trans_init(&trans, c, 0, 0);
1574 for_each_btree_key(&trans, iter, BTREE_ID_stripes, POS_MIN,
1575 BTREE_ITER_PREFETCH, k, ret) {
1576 if (k.k->type != KEY_TYPE_stripe)
1579 ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL);
1583 s = bkey_s_c_to_stripe(k).v;
1585 m = genradix_ptr(&c->stripes, k.k->p.offset);
1587 m->sectors = le16_to_cpu(s->sectors);
1588 m->algorithm = s->algorithm;
1589 m->nr_blocks = s->nr_blocks;
1590 m->nr_redundant = s->nr_redundant;
1591 m->blocks_nonempty = 0;
1593 for (i = 0; i < s->nr_blocks; i++)
1594 m->blocks_nonempty += !!stripe_blockcount_get(s, i);
1596 spin_lock(&c->ec_stripes_heap_lock);
1597 bch2_stripes_heap_update(c, m, k.k->p.offset);
1598 spin_unlock(&c->ec_stripes_heap_lock);
1600 bch2_trans_iter_exit(&trans, &iter);
1602 bch2_trans_exit(&trans);
1605 bch_err(c, "error reading stripes: %i", ret);
1610 void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c)
1612 ec_stripes_heap *h = &c->ec_stripes_heap;
1616 spin_lock(&c->ec_stripes_heap_lock);
1617 for (i = 0; i < min_t(size_t, h->used, 20); i++) {
1618 m = genradix_ptr(&c->stripes, h->data[i].idx);
1620 prt_printf(out, "%zu %u/%u+%u\n", h->data[i].idx,
1621 h->data[i].blocks_nonempty,
1622 m->nr_blocks - m->nr_redundant,
1625 spin_unlock(&c->ec_stripes_heap_lock);
1628 void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
1630 struct ec_stripe_head *h;
1631 struct ec_stripe_new *s;
1633 mutex_lock(&c->ec_stripe_head_lock);
1634 list_for_each_entry(h, &c->ec_stripe_head_list, list) {
1635 prt_printf(out, "target %u algo %u redundancy %u:\n",
1636 h->target, h->algo, h->redundancy);
1639 prt_printf(out, "\tpending: blocks %u+%u allocated %u\n",
1640 h->s->nr_data, h->s->nr_parity,
1641 bitmap_weight(h->s->blocks_allocated,
1644 mutex_unlock(&c->ec_stripe_head_lock);
1646 mutex_lock(&c->ec_stripe_new_lock);
1647 list_for_each_entry(s, &c->ec_stripe_new_list, list) {
1648 prt_printf(out, "\tin flight: blocks %u+%u pin %u\n",
1649 s->nr_data, s->nr_parity,
1650 atomic_read(&s->pin));
1652 mutex_unlock(&c->ec_stripe_new_lock);
1655 void bch2_fs_ec_exit(struct bch_fs *c)
1657 struct ec_stripe_head *h;
1660 mutex_lock(&c->ec_stripe_head_lock);
1661 h = list_first_entry_or_null(&c->ec_stripe_head_list,
1662 struct ec_stripe_head, list);
1665 mutex_unlock(&c->ec_stripe_head_lock);
1673 BUG_ON(!list_empty(&c->ec_stripe_new_list));
1675 free_heap(&c->ec_stripes_heap);
1676 genradix_free(&c->stripes);
1677 bioset_exit(&c->ec_bioset);
1680 void bch2_fs_ec_init_early(struct bch_fs *c)
1682 INIT_WORK(&c->ec_stripe_create_work, ec_stripe_create_work);
1683 INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work);
1686 int bch2_fs_ec_init(struct bch_fs *c)
1688 return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio),