1 // SPDX-License-Identifier: GPL-2.0
6 #include "alloc_foreground.h"
7 #include "backpointers.h"
11 #include "btree_update.h"
12 #include "btree_write_buffer.h"
14 #include "disk_groups.h"
24 #include <linux/sort.h>
28 #include <linux/raid/pq.h>
29 #include <linux/raid/xor.h>
31 static void raid5_recov(unsigned disks, unsigned failed_idx,
32 size_t size, void **data)
36 BUG_ON(failed_idx >= disks);
38 swap(data[0], data[failed_idx]);
39 memcpy(data[0], data[1], size);
42 nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS);
43 xor_blocks(nr, size, data[0], data + i);
47 swap(data[0], data[failed_idx]);
50 static void raid_gen(int nd, int np, size_t size, void **v)
53 raid5_recov(nd + np, nd, size, v);
55 raid6_call.gen_syndrome(nd + np, size, v);
59 static void raid_rec(int nr, int *ir, int nd, int np, size_t size, void **v)
66 raid5_recov(nd + 1, ir[0], size, v);
68 raid6_call.gen_syndrome(nd + np, size, v);
72 /* data+data failure. */
73 raid6_2data_recov(nd + np, size, ir[0], ir[1], v);
74 } else if (ir[0] < nd) {
75 /* data + p/q failure */
77 if (ir[1] == nd) /* data + p failure */
78 raid6_datap_recov(nd + np, size, ir[0], v);
79 else { /* data + q failure */
80 raid5_recov(nd + 1, ir[0], size, v);
81 raid6_call.gen_syndrome(nd + np, size, v);
84 raid_gen(nd, np, size, v);
94 #include <raid/raid.h>
100 struct ec_stripe_buf *buf;
105 /* Stripes btree keys: */
107 int bch2_stripe_invalid(const struct bch_fs *c, struct bkey_s_c k,
108 unsigned flags, struct printbuf *err)
110 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
112 if (bkey_eq(k.k->p, POS_MIN)) {
113 prt_printf(err, "stripe at POS_MIN");
114 return -BCH_ERR_invalid_bkey;
118 prt_printf(err, "nonzero inode field");
119 return -BCH_ERR_invalid_bkey;
122 if (bkey_val_u64s(k.k) < stripe_val_u64s(s)) {
123 prt_printf(err, "incorrect value size (%zu < %u)",
124 bkey_val_u64s(k.k), stripe_val_u64s(s));
125 return -BCH_ERR_invalid_bkey;
128 return bch2_bkey_ptrs_invalid(c, k, flags, err);
131 void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
134 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
135 unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
137 prt_printf(out, "algo %u sectors %u blocks %u:%u csum %u gran %u",
139 le16_to_cpu(s->sectors),
143 1U << s->csum_granularity_bits);
145 for (i = 0; i < s->nr_blocks; i++) {
146 const struct bch_extent_ptr *ptr = s->ptrs + i;
147 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
149 u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
151 prt_printf(out, " %u:%llu:%u", ptr->dev, b, offset);
153 prt_printf(out, "#%u", stripe_blockcount_get(s, i));
154 if (ptr_stale(ca, ptr))
155 prt_printf(out, " stale");
159 /* returns blocknr in stripe that we matched: */
160 static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s,
161 struct bkey_s_c k, unsigned *block)
163 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
164 const struct bch_extent_ptr *ptr;
165 unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
167 bkey_for_each_ptr(ptrs, ptr)
168 for (i = 0; i < nr_data; i++)
169 if (__bch2_ptr_matches_stripe(&s->ptrs[i], ptr,
170 le16_to_cpu(s->sectors))) {
178 static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx)
181 case KEY_TYPE_extent: {
182 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
183 const union bch_extent_entry *entry;
185 extent_for_each_entry(e, entry)
186 if (extent_entry_type(entry) ==
187 BCH_EXTENT_ENTRY_stripe_ptr &&
188 entry->stripe_ptr.idx == idx)
200 static void ec_stripe_buf_exit(struct ec_stripe_buf *buf)
204 for (i = 0; i < buf->key.v.nr_blocks; i++) {
205 kvpfree(buf->data[i], buf->size << 9);
210 /* XXX: this is a non-mempoolified memory allocation: */
211 static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
212 unsigned offset, unsigned size)
214 struct bch_stripe *v = &buf->key.v;
215 unsigned csum_granularity = 1U << v->csum_granularity_bits;
216 unsigned end = offset + size;
219 BUG_ON(end > le16_to_cpu(v->sectors));
221 offset = round_down(offset, csum_granularity);
222 end = min_t(unsigned, le16_to_cpu(v->sectors),
223 round_up(end, csum_granularity));
225 buf->offset = offset;
226 buf->size = end - offset;
228 memset(buf->valid, 0xFF, sizeof(buf->valid));
230 for (i = 0; i < buf->key.v.nr_blocks; i++) {
231 buf->data[i] = kvpmalloc(buf->size << 9, GFP_KERNEL);
238 ec_stripe_buf_exit(buf);
239 return -BCH_ERR_ENOMEM_stripe_buf;
244 static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf,
245 unsigned block, unsigned offset)
247 struct bch_stripe *v = &buf->key.v;
248 unsigned csum_granularity = 1 << v->csum_granularity_bits;
249 unsigned end = buf->offset + buf->size;
250 unsigned len = min(csum_granularity, end - offset);
252 BUG_ON(offset >= end);
253 BUG_ON(offset < buf->offset);
254 BUG_ON(offset & (csum_granularity - 1));
255 BUG_ON(offset + len != le16_to_cpu(v->sectors) &&
256 (len & (csum_granularity - 1)));
258 return bch2_checksum(NULL, v->csum_type,
260 buf->data[block] + ((offset - buf->offset) << 9),
264 static void ec_generate_checksums(struct ec_stripe_buf *buf)
266 struct bch_stripe *v = &buf->key.v;
267 unsigned i, j, csums_per_device = stripe_csums_per_device(v);
273 BUG_ON(buf->size != le16_to_cpu(v->sectors));
275 for (i = 0; i < v->nr_blocks; i++)
276 for (j = 0; j < csums_per_device; j++)
277 stripe_csum_set(v, i, j,
278 ec_block_checksum(buf, i, j << v->csum_granularity_bits));
281 static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
283 struct bch_stripe *v = &buf->key.v;
284 unsigned csum_granularity = 1 << v->csum_granularity_bits;
290 for (i = 0; i < v->nr_blocks; i++) {
291 unsigned offset = buf->offset;
292 unsigned end = buf->offset + buf->size;
294 if (!test_bit(i, buf->valid))
297 while (offset < end) {
298 unsigned j = offset >> v->csum_granularity_bits;
299 unsigned len = min(csum_granularity, end - offset);
300 struct bch_csum want = stripe_csum_get(v, i, j);
301 struct bch_csum got = ec_block_checksum(buf, i, offset);
303 if (bch2_crc_cmp(want, got)) {
304 struct printbuf buf2 = PRINTBUF;
306 bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&buf->key.k_i));
308 bch_err_ratelimited(c,
309 "stripe checksum error for %ps at %u:%u: csum type %u, expected %llx got %llx\n%s",
310 (void *) _RET_IP_, i, j, v->csum_type,
311 want.lo, got.lo, buf2.buf);
312 printbuf_exit(&buf2);
313 clear_bit(i, buf->valid);
322 /* Erasure coding: */
324 static void ec_generate_ec(struct ec_stripe_buf *buf)
326 struct bch_stripe *v = &buf->key.v;
327 unsigned nr_data = v->nr_blocks - v->nr_redundant;
328 unsigned bytes = le16_to_cpu(v->sectors) << 9;
330 raid_gen(nr_data, v->nr_redundant, bytes, buf->data);
333 static unsigned ec_nr_failed(struct ec_stripe_buf *buf)
335 return buf->key.v.nr_blocks -
336 bitmap_weight(buf->valid, buf->key.v.nr_blocks);
339 static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf)
341 struct bch_stripe *v = &buf->key.v;
342 unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0;
343 unsigned nr_data = v->nr_blocks - v->nr_redundant;
344 unsigned bytes = buf->size << 9;
346 if (ec_nr_failed(buf) > v->nr_redundant) {
347 bch_err_ratelimited(c,
348 "error doing reconstruct read: unable to read enough blocks");
352 for (i = 0; i < nr_data; i++)
353 if (!test_bit(i, buf->valid))
354 failed[nr_failed++] = i;
356 raid_rec(nr_failed, failed, nr_data, v->nr_redundant, bytes, buf->data);
362 static void ec_block_endio(struct bio *bio)
364 struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio);
365 struct bch_stripe *v = &ec_bio->buf->key.v;
366 struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx];
367 struct bch_dev *ca = ec_bio->ca;
368 struct closure *cl = bio->bi_private;
370 if (bch2_dev_io_err_on(bio->bi_status, ca, "erasure coding %s error: %s",
371 bio_data_dir(bio) ? "write" : "read",
372 bch2_blk_status_to_str(bio->bi_status)))
373 clear_bit(ec_bio->idx, ec_bio->buf->valid);
375 if (ptr_stale(ca, ptr)) {
376 bch_err_ratelimited(ca->fs,
377 "error %s stripe: stale pointer after io",
378 bio_data_dir(bio) == READ ? "reading from" : "writing to");
379 clear_bit(ec_bio->idx, ec_bio->buf->valid);
382 bio_put(&ec_bio->bio);
383 percpu_ref_put(&ca->io_ref);
387 static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
388 unsigned rw, unsigned idx, struct closure *cl)
390 struct bch_stripe *v = &buf->key.v;
391 unsigned offset = 0, bytes = buf->size << 9;
392 struct bch_extent_ptr *ptr = &v->ptrs[idx];
393 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
394 enum bch_data_type data_type = idx < buf->key.v.nr_blocks - buf->key.v.nr_redundant
398 if (ptr_stale(ca, ptr)) {
399 bch_err_ratelimited(c,
400 "error %s stripe: stale pointer",
401 rw == READ ? "reading from" : "writing to");
402 clear_bit(idx, buf->valid);
406 if (!bch2_dev_get_ioref(ca, rw)) {
407 clear_bit(idx, buf->valid);
411 this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size);
413 while (offset < bytes) {
414 unsigned nr_iovecs = min_t(size_t, BIO_MAX_VECS,
415 DIV_ROUND_UP(bytes, PAGE_SIZE));
416 unsigned b = min_t(size_t, bytes - offset,
417 nr_iovecs << PAGE_SHIFT);
418 struct ec_bio *ec_bio;
420 ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev,
431 ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9);
432 ec_bio->bio.bi_end_io = ec_block_endio;
433 ec_bio->bio.bi_private = cl;
435 bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b);
438 percpu_ref_get(&ca->io_ref);
440 submit_bio(&ec_bio->bio);
445 percpu_ref_put(&ca->io_ref);
448 static int get_stripe_key_trans(struct btree_trans *trans, u64 idx,
449 struct ec_stripe_buf *stripe)
451 struct btree_iter iter;
455 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
456 POS(0, idx), BTREE_ITER_SLOTS);
460 if (k.k->type != KEY_TYPE_stripe) {
464 bkey_reassemble(&stripe->key.k_i, k);
466 bch2_trans_iter_exit(trans, &iter);
470 static int get_stripe_key(struct bch_fs *c, u64 idx, struct ec_stripe_buf *stripe)
472 return bch2_trans_run(c, get_stripe_key_trans(&trans, idx, stripe));
475 /* recovery read path: */
476 int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
478 struct ec_stripe_buf *buf;
480 struct bch_stripe *v;
484 closure_init_stack(&cl);
486 BUG_ON(!rbio->pick.has_ec);
488 buf = kzalloc(sizeof(*buf), GFP_NOIO);
490 return -BCH_ERR_ENOMEM_ec_read_extent;
492 ret = get_stripe_key(c, rbio->pick.ec.idx, buf);
494 bch_err_ratelimited(c,
495 "error doing reconstruct read: error %i looking up stripe", ret);
502 if (!bch2_ptr_matches_stripe(v, rbio->pick)) {
503 bch_err_ratelimited(c,
504 "error doing reconstruct read: pointer doesn't match stripe");
509 offset = rbio->bio.bi_iter.bi_sector - v->ptrs[rbio->pick.ec.block].offset;
510 if (offset + bio_sectors(&rbio->bio) > le16_to_cpu(v->sectors)) {
511 bch_err_ratelimited(c,
512 "error doing reconstruct read: read is bigger than stripe");
517 ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio));
521 for (i = 0; i < v->nr_blocks; i++)
522 ec_block_io(c, buf, REQ_OP_READ, i, &cl);
526 if (ec_nr_failed(buf) > v->nr_redundant) {
527 bch_err_ratelimited(c,
528 "error doing reconstruct read: unable to read enough blocks");
533 ec_validate_checksums(c, buf);
535 ret = ec_do_recov(c, buf);
539 memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter,
540 buf->data[rbio->pick.ec.block] + ((offset - buf->offset) << 9));
542 ec_stripe_buf_exit(buf);
547 /* stripe bucket accounting: */
549 static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
551 ec_stripes_heap n, *h = &c->ec_stripes_heap;
553 if (idx >= h->size) {
554 if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp))
555 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
557 mutex_lock(&c->ec_stripes_heap_lock);
558 if (n.size > h->size) {
559 memcpy(n.data, h->data, h->used * sizeof(h->data[0]));
563 mutex_unlock(&c->ec_stripes_heap_lock);
568 if (!genradix_ptr_alloc(&c->stripes, idx, gfp))
569 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
571 if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING &&
572 !genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
573 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
578 static int ec_stripe_mem_alloc(struct btree_trans *trans,
579 struct btree_iter *iter)
581 size_t idx = iter->pos.offset;
583 if (!__ec_stripe_mem_alloc(trans->c, idx, GFP_NOWAIT|__GFP_NOWARN))
586 bch2_trans_unlock(trans);
588 return __ec_stripe_mem_alloc(trans->c, idx, GFP_KERNEL) ?:
589 bch2_trans_relock(trans);
593 * Hash table of open stripes:
594 * Stripes that are being created or modified are kept in a hash table, so that
595 * stripe deletion can skip them.
598 static bool __bch2_stripe_is_open(struct bch_fs *c, u64 idx)
600 unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
601 struct ec_stripe_new *s;
603 hlist_for_each_entry(s, &c->ec_stripes_new[hash], hash)
609 static bool bch2_stripe_is_open(struct bch_fs *c, u64 idx)
613 spin_lock(&c->ec_stripes_new_lock);
614 ret = __bch2_stripe_is_open(c, idx);
615 spin_unlock(&c->ec_stripes_new_lock);
620 static bool bch2_try_open_stripe(struct bch_fs *c,
621 struct ec_stripe_new *s,
626 spin_lock(&c->ec_stripes_new_lock);
627 ret = !__bch2_stripe_is_open(c, idx);
629 unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
632 hlist_add_head(&s->hash, &c->ec_stripes_new[hash]);
634 spin_unlock(&c->ec_stripes_new_lock);
639 static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s)
643 spin_lock(&c->ec_stripes_new_lock);
644 hlist_del_init(&s->hash);
645 spin_unlock(&c->ec_stripes_new_lock);
650 /* Heap of all existing stripes, ordered by blocks_nonempty */
652 static u64 stripe_idx_to_delete(struct bch_fs *c)
654 ec_stripes_heap *h = &c->ec_stripes_heap;
656 lockdep_assert_held(&c->ec_stripes_heap_lock);
659 h->data[0].blocks_nonempty == 0 &&
660 !bch2_stripe_is_open(c, h->data[0].idx))
661 return h->data[0].idx;
666 static inline int ec_stripes_heap_cmp(ec_stripes_heap *h,
667 struct ec_stripe_heap_entry l,
668 struct ec_stripe_heap_entry r)
670 return ((l.blocks_nonempty > r.blocks_nonempty) -
671 (l.blocks_nonempty < r.blocks_nonempty));
674 static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h,
677 struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap);
679 genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i;
682 static void heap_verify_backpointer(struct bch_fs *c, size_t idx)
684 ec_stripes_heap *h = &c->ec_stripes_heap;
685 struct stripe *m = genradix_ptr(&c->stripes, idx);
687 BUG_ON(m->heap_idx >= h->used);
688 BUG_ON(h->data[m->heap_idx].idx != idx);
691 void bch2_stripes_heap_del(struct bch_fs *c,
692 struct stripe *m, size_t idx)
694 mutex_lock(&c->ec_stripes_heap_lock);
695 heap_verify_backpointer(c, idx);
697 heap_del(&c->ec_stripes_heap, m->heap_idx,
699 ec_stripes_heap_set_backpointer);
700 mutex_unlock(&c->ec_stripes_heap_lock);
703 void bch2_stripes_heap_insert(struct bch_fs *c,
704 struct stripe *m, size_t idx)
706 mutex_lock(&c->ec_stripes_heap_lock);
707 BUG_ON(heap_full(&c->ec_stripes_heap));
709 heap_add(&c->ec_stripes_heap, ((struct ec_stripe_heap_entry) {
711 .blocks_nonempty = m->blocks_nonempty,
714 ec_stripes_heap_set_backpointer);
716 heap_verify_backpointer(c, idx);
717 mutex_unlock(&c->ec_stripes_heap_lock);
720 void bch2_stripes_heap_update(struct bch_fs *c,
721 struct stripe *m, size_t idx)
723 ec_stripes_heap *h = &c->ec_stripes_heap;
727 mutex_lock(&c->ec_stripes_heap_lock);
728 heap_verify_backpointer(c, idx);
730 h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
733 heap_sift_up(h, i, ec_stripes_heap_cmp,
734 ec_stripes_heap_set_backpointer);
735 heap_sift_down(h, i, ec_stripes_heap_cmp,
736 ec_stripes_heap_set_backpointer);
738 heap_verify_backpointer(c, idx);
740 do_deletes = stripe_idx_to_delete(c) != 0;
741 mutex_unlock(&c->ec_stripes_heap_lock);
744 bch2_do_stripe_deletes(c);
747 /* stripe deletion */
749 static int ec_stripe_delete(struct btree_trans *trans, u64 idx)
751 struct bch_fs *c = trans->c;
752 struct btree_iter iter;
754 struct bkey_s_c_stripe s;
757 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, POS(0, idx),
763 if (k.k->type != KEY_TYPE_stripe) {
764 bch2_fs_inconsistent(c, "attempting to delete nonexistent stripe %llu", idx);
769 s = bkey_s_c_to_stripe(k);
770 for (unsigned i = 0; i < s.v->nr_blocks; i++)
771 if (stripe_blockcount_get(s.v, i)) {
772 struct printbuf buf = PRINTBUF;
774 bch2_bkey_val_to_text(&buf, c, k);
775 bch2_fs_inconsistent(c, "attempting to delete nonempty stripe %s", buf.buf);
781 ret = bch2_btree_delete_at(trans, &iter, 0);
783 bch2_trans_iter_exit(trans, &iter);
787 static void ec_stripe_delete_work(struct work_struct *work)
790 container_of(work, struct bch_fs, ec_stripe_delete_work);
791 struct btree_trans trans;
795 bch2_trans_init(&trans, c, 0, 0);
798 mutex_lock(&c->ec_stripes_heap_lock);
799 idx = stripe_idx_to_delete(c);
800 mutex_unlock(&c->ec_stripes_heap_lock);
805 ret = commit_do(&trans, NULL, NULL, BTREE_INSERT_NOFAIL,
806 ec_stripe_delete(&trans, idx));
808 bch_err(c, "%s: err %s", __func__, bch2_err_str(ret));
813 bch2_trans_exit(&trans);
815 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
818 void bch2_do_stripe_deletes(struct bch_fs *c)
820 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_stripe_delete) &&
821 !queue_work(c->write_ref_wq, &c->ec_stripe_delete_work))
822 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
825 /* stripe creation: */
827 static int ec_stripe_key_update(struct btree_trans *trans,
828 struct bkey_i_stripe *new,
831 struct bch_fs *c = trans->c;
832 struct btree_iter iter;
836 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
837 new->k.p, BTREE_ITER_INTENT);
842 if (k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe)) {
843 bch2_fs_inconsistent(c, "error %s stripe: got existing key type %s",
844 create ? "creating" : "updating",
845 bch2_bkey_types[k.k->type]);
850 if (k.k->type == KEY_TYPE_stripe) {
851 const struct bch_stripe *old = bkey_s_c_to_stripe(k).v;
854 if (old->nr_blocks != new->v.nr_blocks) {
855 bch_err(c, "error updating stripe: nr_blocks does not match");
860 for (i = 0; i < new->v.nr_blocks; i++) {
861 unsigned v = stripe_blockcount_get(old, i);
864 (old->ptrs[i].dev != new->v.ptrs[i].dev ||
865 old->ptrs[i].gen != new->v.ptrs[i].gen ||
866 old->ptrs[i].offset != new->v.ptrs[i].offset));
868 stripe_blockcount_set(&new->v, i, v);
872 ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
874 bch2_trans_iter_exit(trans, &iter);
878 static int ec_stripe_update_extent(struct btree_trans *trans,
879 struct bpos bucket, u8 gen,
880 struct ec_stripe_buf *s,
883 struct bch_fs *c = trans->c;
884 struct bch_backpointer bp;
885 struct btree_iter iter;
887 const struct bch_extent_ptr *ptr_c;
888 struct bch_extent_ptr *ptr, *ec_ptr = NULL;
889 struct bch_extent_stripe_ptr stripe_ptr;
893 ret = bch2_get_next_backpointer(trans, bucket, gen,
894 bp_pos, &bp, BTREE_ITER_CACHED);
897 if (bpos_eq(*bp_pos, SPOS_MAX))
901 struct printbuf buf = PRINTBUF;
902 struct btree_iter node_iter;
905 b = bch2_backpointer_get_node(trans, &node_iter, *bp_pos, bp);
906 bch2_trans_iter_exit(trans, &node_iter);
911 prt_printf(&buf, "found btree node in erasure coded bucket: b=%px\n", b);
912 bch2_backpointer_to_text(&buf, &bp);
914 bch2_fs_inconsistent(c, "%s", buf.buf);
919 k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_INTENT);
925 * extent no longer exists - we could flush the btree
926 * write buffer and retry to verify, but no need:
931 if (extent_has_stripe_ptr(k, s->key.k.p.offset))
934 ptr_c = bkey_matches_stripe(&s->key.v, k, &block);
936 * It doesn't generally make sense to erasure code cached ptrs:
937 * XXX: should we be incrementing a counter?
939 if (!ptr_c || ptr_c->cached)
942 dev = s->key.v.ptrs[block].dev;
944 n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(stripe_ptr));
945 ret = PTR_ERR_OR_ZERO(n);
949 bkey_reassemble(n, k);
951 bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, ptr->dev != dev);
952 ec_ptr = bch2_bkey_has_device(bkey_i_to_s(n), dev);
955 stripe_ptr = (struct bch_extent_stripe_ptr) {
956 .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr,
958 .redundancy = s->key.v.nr_redundant,
959 .idx = s->key.k.p.offset,
962 __extent_entry_insert(n,
963 (union bch_extent_entry *) ec_ptr,
964 (union bch_extent_entry *) &stripe_ptr);
966 ret = bch2_trans_update(trans, &iter, n, 0);
968 bch2_trans_iter_exit(trans, &iter);
972 static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_buf *s,
975 struct bch_fs *c = trans->c;
976 struct bch_extent_ptr bucket = s->key.v.ptrs[block];
977 struct bpos bucket_pos = PTR_BUCKET_POS(c, &bucket);
978 struct bpos bp_pos = POS_MIN;
982 ret = commit_do(trans, NULL, NULL,
983 BTREE_INSERT_NOCHECK_RW|
985 ec_stripe_update_extent(trans, bucket_pos, bucket.gen,
989 if (bkey_eq(bp_pos, POS_MAX))
992 bp_pos = bpos_nosnap_successor(bp_pos);
998 static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s)
1000 struct btree_trans trans;
1001 struct bch_stripe *v = &s->key.v;
1002 unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
1005 bch2_trans_init(&trans, c, 0, 0);
1007 ret = bch2_btree_write_buffer_flush(&trans);
1011 for (i = 0; i < nr_data; i++) {
1012 ret = ec_stripe_update_bucket(&trans, s, i);
1017 bch2_trans_exit(&trans);
1022 static void zero_out_rest_of_ec_bucket(struct bch_fs *c,
1023 struct ec_stripe_new *s,
1025 struct open_bucket *ob)
1027 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1028 unsigned offset = ca->mi.bucket_size - ob->sectors_free;
1031 if (!bch2_dev_get_ioref(ca, WRITE)) {
1036 memset(s->new_stripe.data[block] + (offset << 9),
1038 ob->sectors_free << 9);
1040 ret = blkdev_issue_zeroout(ca->disk_sb.bdev,
1041 ob->bucket * ca->mi.bucket_size + offset,
1045 percpu_ref_put(&ca->io_ref);
1051 void bch2_ec_stripe_new_free(struct bch_fs *c, struct ec_stripe_new *s)
1054 bch2_stripe_close(c, s);
1059 * data buckets of new stripe all written: create the stripe
1061 static void ec_stripe_create(struct ec_stripe_new *s)
1063 struct bch_fs *c = s->c;
1064 struct open_bucket *ob;
1065 struct bch_stripe *v = &s->new_stripe.key.v;
1066 unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
1069 BUG_ON(s->h->s == s);
1071 closure_sync(&s->iodone);
1074 for (i = 0; i < nr_data; i++)
1076 ob = c->open_buckets + s->blocks[i];
1078 if (ob->sectors_free)
1079 zero_out_rest_of_ec_bucket(c, s, i, ob);
1084 if (!bch2_err_matches(s->err, EROFS))
1085 bch_err(c, "error creating stripe: error writing data buckets");
1089 if (s->have_existing_stripe) {
1090 ec_validate_checksums(c, &s->existing_stripe);
1092 if (ec_do_recov(c, &s->existing_stripe)) {
1093 bch_err(c, "error creating stripe: error reading existing stripe");
1097 for (i = 0; i < nr_data; i++)
1098 if (stripe_blockcount_get(&s->existing_stripe.key.v, i))
1099 swap(s->new_stripe.data[i],
1100 s->existing_stripe.data[i]);
1102 ec_stripe_buf_exit(&s->existing_stripe);
1105 BUG_ON(!s->allocated);
1108 ec_generate_ec(&s->new_stripe);
1110 ec_generate_checksums(&s->new_stripe);
1113 for (i = nr_data; i < v->nr_blocks; i++)
1114 ec_block_io(c, &s->new_stripe, REQ_OP_WRITE, i, &s->iodone);
1115 closure_sync(&s->iodone);
1117 if (ec_nr_failed(&s->new_stripe)) {
1118 bch_err(c, "error creating stripe: error writing redundancy buckets");
1122 ret = bch2_trans_do(c, &s->res, NULL,
1123 BTREE_INSERT_NOCHECK_RW|
1124 BTREE_INSERT_NOFAIL,
1125 ec_stripe_key_update(&trans, &s->new_stripe.key,
1126 !s->have_existing_stripe));
1128 bch_err(c, "error creating stripe: error creating stripe key");
1132 ret = ec_stripe_update_extents(c, &s->new_stripe);
1134 bch_err(c, "error creating stripe: error updating pointers: %s",
1139 bch2_disk_reservation_put(c, &s->res);
1141 for (i = 0; i < v->nr_blocks; i++)
1143 ob = c->open_buckets + s->blocks[i];
1147 __bch2_open_bucket_put(c, ob);
1149 bch2_open_bucket_put(c, ob);
1153 mutex_lock(&c->ec_stripe_new_lock);
1155 mutex_unlock(&c->ec_stripe_new_lock);
1157 ec_stripe_buf_exit(&s->existing_stripe);
1158 ec_stripe_buf_exit(&s->new_stripe);
1159 closure_debug_destroy(&s->iodone);
1161 ec_stripe_new_put(c, s, STRIPE_REF_stripe);
1164 static struct ec_stripe_new *get_pending_stripe(struct bch_fs *c)
1166 struct ec_stripe_new *s;
1168 mutex_lock(&c->ec_stripe_new_lock);
1169 list_for_each_entry(s, &c->ec_stripe_new_list, list)
1170 if (!atomic_read(&s->ref[STRIPE_REF_io]))
1174 mutex_unlock(&c->ec_stripe_new_lock);
1179 static void ec_stripe_create_work(struct work_struct *work)
1181 struct bch_fs *c = container_of(work,
1182 struct bch_fs, ec_stripe_create_work);
1183 struct ec_stripe_new *s;
1185 while ((s = get_pending_stripe(c)))
1186 ec_stripe_create(s);
1188 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
1191 void bch2_ec_do_stripe_creates(struct bch_fs *c)
1193 bch2_write_ref_get(c, BCH_WRITE_REF_stripe_create);
1195 if (!queue_work(system_long_wq, &c->ec_stripe_create_work))
1196 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
1199 static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
1201 struct ec_stripe_new *s = h->s;
1203 BUG_ON(!s->allocated && !s->err);
1208 mutex_lock(&c->ec_stripe_new_lock);
1209 list_add(&s->list, &c->ec_stripe_new_list);
1210 mutex_unlock(&c->ec_stripe_new_lock);
1212 ec_stripe_new_put(c, s, STRIPE_REF_io);
1215 void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob)
1217 struct ec_stripe_new *s = ob->ec;
1222 void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp)
1224 struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs);
1231 BUG_ON(!ob->ec->new_stripe.data[ob->ec_idx]);
1233 ca = bch_dev_bkey_exists(c, ob->dev);
1234 offset = ca->mi.bucket_size - ob->sectors_free;
1236 return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9);
1239 static int unsigned_cmp(const void *_l, const void *_r)
1241 unsigned l = *((const unsigned *) _l);
1242 unsigned r = *((const unsigned *) _r);
1244 return cmp_int(l, r);
1247 /* pick most common bucket size: */
1248 static unsigned pick_blocksize(struct bch_fs *c,
1249 struct bch_devs_mask *devs)
1252 unsigned i, nr = 0, sizes[BCH_SB_MEMBERS_MAX];
1255 } cur = { 0, 0 }, best = { 0, 0 };
1257 for_each_member_device_rcu(ca, c, i, devs)
1258 sizes[nr++] = ca->mi.bucket_size;
1260 sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL);
1262 for (i = 0; i < nr; i++) {
1263 if (sizes[i] != cur.size) {
1264 if (cur.nr > best.nr)
1268 cur.size = sizes[i];
1274 if (cur.nr > best.nr)
1280 static bool may_create_new_stripe(struct bch_fs *c)
1285 static void ec_stripe_key_init(struct bch_fs *c,
1286 struct bkey_i_stripe *s,
1289 unsigned stripe_size)
1293 bkey_stripe_init(&s->k_i);
1294 s->v.sectors = cpu_to_le16(stripe_size);
1296 s->v.nr_blocks = nr_data + nr_parity;
1297 s->v.nr_redundant = nr_parity;
1298 s->v.csum_granularity_bits = ilog2(c->opts.encoded_extent_max >> 9);
1299 s->v.csum_type = BCH_CSUM_crc32c;
1302 while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) {
1303 BUG_ON(1 << s->v.csum_granularity_bits >=
1304 le16_to_cpu(s->v.sectors) ||
1305 s->v.csum_granularity_bits == U8_MAX);
1306 s->v.csum_granularity_bits++;
1309 set_bkey_val_u64s(&s->k, u64s);
1312 static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
1314 struct ec_stripe_new *s;
1316 lockdep_assert_held(&h->lock);
1318 s = kzalloc(sizeof(*s), GFP_KERNEL);
1320 return -BCH_ERR_ENOMEM_ec_new_stripe_alloc;
1322 mutex_init(&s->lock);
1323 closure_init(&s->iodone, NULL);
1324 atomic_set(&s->ref[STRIPE_REF_stripe], 1);
1325 atomic_set(&s->ref[STRIPE_REF_io], 1);
1328 s->nr_data = min_t(unsigned, h->nr_active_devs,
1329 BCH_BKEY_PTRS_MAX) - h->redundancy;
1330 s->nr_parity = h->redundancy;
1332 ec_stripe_key_init(c, &s->new_stripe.key, s->nr_data,
1333 s->nr_parity, h->blocksize);
1339 static struct ec_stripe_head *
1340 ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
1341 unsigned algo, unsigned redundancy,
1342 enum alloc_reserve reserve)
1344 struct ec_stripe_head *h;
1348 h = kzalloc(sizeof(*h), GFP_KERNEL);
1352 mutex_init(&h->lock);
1353 BUG_ON(!mutex_trylock(&h->lock));
1357 h->redundancy = redundancy;
1358 h->reserve = reserve;
1361 h->devs = target_rw_devs(c, BCH_DATA_user, target);
1363 for_each_member_device_rcu(ca, c, i, &h->devs)
1364 if (!ca->mi.durability)
1365 __clear_bit(i, h->devs.d);
1367 h->blocksize = pick_blocksize(c, &h->devs);
1369 for_each_member_device_rcu(ca, c, i, &h->devs)
1370 if (ca->mi.bucket_size == h->blocksize)
1371 h->nr_active_devs++;
1374 list_add(&h->list, &c->ec_stripe_head_list);
1378 void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h)
1382 bitmap_weight(h->s->blocks_allocated,
1383 h->s->nr_data) == h->s->nr_data)
1384 ec_stripe_set_pending(c, h);
1386 mutex_unlock(&h->lock);
1389 struct ec_stripe_head *__bch2_ec_stripe_head_get(struct btree_trans *trans,
1392 unsigned redundancy,
1393 enum alloc_reserve reserve)
1395 struct bch_fs *c = trans->c;
1396 struct ec_stripe_head *h;
1402 ret = bch2_trans_mutex_lock(trans, &c->ec_stripe_head_lock);
1404 return ERR_PTR(ret);
1406 if (test_bit(BCH_FS_GOING_RO, &c->flags)) {
1407 h = ERR_PTR(-EROFS);
1411 list_for_each_entry(h, &c->ec_stripe_head_list, list)
1412 if (h->target == target &&
1414 h->redundancy == redundancy &&
1415 h->reserve == reserve) {
1416 ret = bch2_trans_mutex_lock(trans, &h->lock);
1422 h = ec_new_stripe_head_alloc(c, target, algo, redundancy, reserve);
1424 mutex_unlock(&c->ec_stripe_head_lock);
1428 static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h,
1429 enum alloc_reserve reserve, struct closure *cl)
1431 struct bch_fs *c = trans->c;
1432 struct bch_devs_mask devs = h->devs;
1433 struct open_bucket *ob;
1434 struct open_buckets buckets;
1435 unsigned i, j, nr_have_parity = 0, nr_have_data = 0;
1436 bool have_cache = true;
1439 BUG_ON(h->s->new_stripe.key.v.nr_blocks != h->s->nr_data + h->s->nr_parity);
1440 BUG_ON(h->s->new_stripe.key.v.nr_redundant != h->s->nr_parity);
1442 for_each_set_bit(i, h->s->blocks_gotten, h->s->new_stripe.key.v.nr_blocks) {
1443 __clear_bit(h->s->new_stripe.key.v.ptrs[i].dev, devs.d);
1444 if (i < h->s->nr_data)
1450 BUG_ON(nr_have_data > h->s->nr_data);
1451 BUG_ON(nr_have_parity > h->s->nr_parity);
1454 if (nr_have_parity < h->s->nr_parity) {
1455 ret = bch2_bucket_alloc_set_trans(trans, &buckets,
1465 open_bucket_for_each(c, &buckets, ob, i) {
1466 j = find_next_zero_bit(h->s->blocks_gotten,
1467 h->s->nr_data + h->s->nr_parity,
1469 BUG_ON(j >= h->s->nr_data + h->s->nr_parity);
1471 h->s->blocks[j] = buckets.v[i];
1472 h->s->new_stripe.key.v.ptrs[j] = bch2_ob_ptr(c, ob);
1473 __set_bit(j, h->s->blocks_gotten);
1481 if (nr_have_data < h->s->nr_data) {
1482 ret = bch2_bucket_alloc_set_trans(trans, &buckets,
1492 open_bucket_for_each(c, &buckets, ob, i) {
1493 j = find_next_zero_bit(h->s->blocks_gotten,
1495 BUG_ON(j >= h->s->nr_data);
1497 h->s->blocks[j] = buckets.v[i];
1498 h->s->new_stripe.key.v.ptrs[j] = bch2_ob_ptr(c, ob);
1499 __set_bit(j, h->s->blocks_gotten);
1509 /* XXX: doesn't obey target: */
1510 static s64 get_existing_stripe(struct bch_fs *c,
1511 struct ec_stripe_head *head)
1513 ec_stripes_heap *h = &c->ec_stripes_heap;
1519 if (may_create_new_stripe(c))
1522 mutex_lock(&c->ec_stripes_heap_lock);
1523 for (heap_idx = 0; heap_idx < h->used; heap_idx++) {
1524 /* No blocks worth reusing, stripe will just be deleted: */
1525 if (!h->data[heap_idx].blocks_nonempty)
1528 stripe_idx = h->data[heap_idx].idx;
1530 m = genradix_ptr(&c->stripes, stripe_idx);
1532 if (m->algorithm == head->algo &&
1533 m->nr_redundant == head->redundancy &&
1534 m->sectors == head->blocksize &&
1535 m->blocks_nonempty < m->nr_blocks - m->nr_redundant &&
1536 bch2_try_open_stripe(c, head->s, stripe_idx)) {
1541 mutex_unlock(&c->ec_stripes_heap_lock);
1545 static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h)
1547 struct bch_fs *c = trans->c;
1553 * If we can't allocate a new stripe, and there's no stripes with empty
1554 * blocks for us to reuse, that means we have to wait on copygc:
1556 idx = get_existing_stripe(c, h);
1558 return -BCH_ERR_stripe_alloc_blocked;
1560 ret = get_stripe_key_trans(trans, idx, &h->s->existing_stripe);
1562 bch2_stripe_close(c, h->s);
1563 if (!bch2_err_matches(ret, BCH_ERR_transaction_restart))
1564 bch2_fs_fatal_error(c, "error reading stripe key: %s", bch2_err_str(ret));
1568 BUG_ON(h->s->existing_stripe.key.v.nr_redundant != h->s->nr_parity);
1569 h->s->nr_data = h->s->existing_stripe.key.v.nr_blocks -
1570 h->s->existing_stripe.key.v.nr_redundant;
1572 ret = ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize);
1574 bch2_stripe_close(c, h->s);
1578 BUG_ON(h->s->existing_stripe.size != h->blocksize);
1579 BUG_ON(h->s->existing_stripe.size != h->s->existing_stripe.key.v.sectors);
1582 * Free buckets we initially allocated - they might conflict with
1583 * blocks from the stripe we're reusing:
1585 for_each_set_bit(i, h->s->blocks_gotten, h->s->new_stripe.key.v.nr_blocks) {
1586 bch2_open_bucket_put(c, c->open_buckets + h->s->blocks[i]);
1587 h->s->blocks[i] = 0;
1589 memset(h->s->blocks_gotten, 0, sizeof(h->s->blocks_gotten));
1590 memset(h->s->blocks_allocated, 0, sizeof(h->s->blocks_allocated));
1592 for (i = 0; i < h->s->existing_stripe.key.v.nr_blocks; i++) {
1593 if (stripe_blockcount_get(&h->s->existing_stripe.key.v, i)) {
1594 __set_bit(i, h->s->blocks_gotten);
1595 __set_bit(i, h->s->blocks_allocated);
1598 ec_block_io(c, &h->s->existing_stripe, READ, i, &h->s->iodone);
1601 bkey_copy(&h->s->new_stripe.key.k_i, &h->s->existing_stripe.key.k_i);
1602 h->s->have_existing_stripe = true;
1607 static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_stripe_head *h)
1609 struct bch_fs *c = trans->c;
1610 struct btree_iter iter;
1612 struct bpos min_pos = POS(0, 1);
1613 struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint));
1616 if (!h->s->res.sectors) {
1617 ret = bch2_disk_reservation_get(c, &h->s->res,
1620 BCH_DISK_RESERVATION_NOFAIL);
1625 for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
1626 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
1627 if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
1628 if (start_pos.offset) {
1629 start_pos = min_pos;
1630 bch2_btree_iter_set_pos(&iter, start_pos);
1634 ret = -BCH_ERR_ENOSPC_stripe_create;
1638 if (bkey_deleted(k.k) &&
1639 bch2_try_open_stripe(c, h->s, k.k->p.offset))
1643 c->ec_stripe_hint = iter.pos.offset;
1648 ret = ec_stripe_mem_alloc(trans, &iter);
1650 bch2_stripe_close(c, h->s);
1654 h->s->new_stripe.key.k.p = iter.pos;
1656 bch2_trans_iter_exit(trans, &iter);
1659 bch2_disk_reservation_put(c, &h->s->res);
1663 struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
1666 unsigned redundancy,
1667 enum alloc_reserve reserve,
1670 struct bch_fs *c = trans->c;
1671 struct ec_stripe_head *h;
1672 bool waiting = false;
1675 h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, reserve);
1677 bch_err(c, "no stripe head");
1678 if (IS_ERR_OR_NULL(h))
1682 ret = ec_new_stripe_alloc(c, h);
1684 bch_err(c, "failed to allocate new stripe");
1689 if (h->s->allocated)
1692 if (h->s->have_existing_stripe)
1693 goto alloc_existing;
1695 /* First, try to allocate a full stripe: */
1696 ret = new_stripe_alloc_buckets(trans, h, RESERVE_stripe, NULL) ?:
1697 __bch2_ec_stripe_head_reserve(trans, h);
1700 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1701 bch2_err_matches(ret, ENOMEM))
1705 * Not enough buckets available for a full stripe: we must reuse an
1709 ret = __bch2_ec_stripe_head_reuse(trans, h);
1712 if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked)
1715 if (reserve == RESERVE_movinggc) {
1716 ret = new_stripe_alloc_buckets(trans, h, reserve, NULL) ?:
1717 __bch2_ec_stripe_head_reserve(trans, h);
1723 /* XXX freelist_wait? */
1724 closure_wait(&c->freelist_wait, cl);
1729 closure_wake_up(&c->freelist_wait);
1732 * Retry allocating buckets, with the reserve watermark for this
1735 ret = new_stripe_alloc_buckets(trans, h, reserve, cl);
1740 ret = ec_stripe_buf_init(&h->s->new_stripe, 0, h->blocksize);
1744 h->s->allocated = true;
1747 BUG_ON(!h->s->new_stripe.data[0]);
1748 BUG_ON(trans->restarted);
1751 bch2_ec_stripe_head_put(c, h);
1752 return ERR_PTR(ret);
1755 static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca)
1757 struct ec_stripe_head *h;
1758 struct open_bucket *ob;
1761 mutex_lock(&c->ec_stripe_head_lock);
1762 list_for_each_entry(h, &c->ec_stripe_head_list, list) {
1763 mutex_lock(&h->lock);
1770 for (i = 0; i < h->s->new_stripe.key.v.nr_blocks; i++) {
1771 if (!h->s->blocks[i])
1774 ob = c->open_buckets + h->s->blocks[i];
1775 if (ob->dev == ca->dev_idx)
1781 ec_stripe_set_pending(c, h);
1783 mutex_unlock(&h->lock);
1785 mutex_unlock(&c->ec_stripe_head_lock);
1788 void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca)
1790 __bch2_ec_stop(c, ca);
1793 void bch2_fs_ec_stop(struct bch_fs *c)
1795 __bch2_ec_stop(c, NULL);
1798 static bool bch2_fs_ec_flush_done(struct bch_fs *c)
1802 mutex_lock(&c->ec_stripe_new_lock);
1803 ret = list_empty(&c->ec_stripe_new_list);
1804 mutex_unlock(&c->ec_stripe_new_lock);
1809 void bch2_fs_ec_flush(struct bch_fs *c)
1811 wait_event(c->ec_stripe_new_wait, bch2_fs_ec_flush_done(c));
1814 int bch2_stripes_read(struct bch_fs *c)
1816 struct btree_trans trans;
1817 struct btree_iter iter;
1819 const struct bch_stripe *s;
1824 bch2_trans_init(&trans, c, 0, 0);
1826 for_each_btree_key(&trans, iter, BTREE_ID_stripes, POS_MIN,
1827 BTREE_ITER_PREFETCH, k, ret) {
1828 if (k.k->type != KEY_TYPE_stripe)
1831 ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL);
1835 s = bkey_s_c_to_stripe(k).v;
1837 m = genradix_ptr(&c->stripes, k.k->p.offset);
1838 m->sectors = le16_to_cpu(s->sectors);
1839 m->algorithm = s->algorithm;
1840 m->nr_blocks = s->nr_blocks;
1841 m->nr_redundant = s->nr_redundant;
1842 m->blocks_nonempty = 0;
1844 for (i = 0; i < s->nr_blocks; i++)
1845 m->blocks_nonempty += !!stripe_blockcount_get(s, i);
1847 bch2_stripes_heap_insert(c, m, k.k->p.offset);
1849 bch2_trans_iter_exit(&trans, &iter);
1851 bch2_trans_exit(&trans);
1854 bch_err(c, "error reading stripes: %i", ret);
1859 void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c)
1861 ec_stripes_heap *h = &c->ec_stripes_heap;
1865 mutex_lock(&c->ec_stripes_heap_lock);
1866 for (i = 0; i < min_t(size_t, h->used, 50); i++) {
1867 m = genradix_ptr(&c->stripes, h->data[i].idx);
1869 prt_printf(out, "%zu %u/%u+%u", h->data[i].idx,
1870 h->data[i].blocks_nonempty,
1871 m->nr_blocks - m->nr_redundant,
1873 if (bch2_stripe_is_open(c, h->data[i].idx))
1874 prt_str(out, " open");
1877 mutex_unlock(&c->ec_stripes_heap_lock);
1880 void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
1882 struct ec_stripe_head *h;
1883 struct ec_stripe_new *s;
1885 mutex_lock(&c->ec_stripe_head_lock);
1886 list_for_each_entry(h, &c->ec_stripe_head_list, list) {
1887 prt_printf(out, "target %u algo %u redundancy %u %s:\n",
1888 h->target, h->algo, h->redundancy,
1889 bch2_alloc_reserves[h->reserve]);
1892 prt_printf(out, "\tidx %llu blocks %u+%u allocated %u\n",
1893 h->s->idx, h->s->nr_data, h->s->nr_parity,
1894 bitmap_weight(h->s->blocks_allocated,
1897 mutex_unlock(&c->ec_stripe_head_lock);
1899 prt_printf(out, "in flight:\n");
1901 mutex_lock(&c->ec_stripe_new_lock);
1902 list_for_each_entry(s, &c->ec_stripe_new_list, list) {
1903 prt_printf(out, "\tidx %llu blocks %u+%u ref %u %u %s\n",
1904 s->idx, s->nr_data, s->nr_parity,
1905 atomic_read(&s->ref[STRIPE_REF_io]),
1906 atomic_read(&s->ref[STRIPE_REF_stripe]),
1907 bch2_alloc_reserves[s->h->reserve]);
1909 mutex_unlock(&c->ec_stripe_new_lock);
1912 void bch2_fs_ec_exit(struct bch_fs *c)
1914 struct ec_stripe_head *h;
1918 mutex_lock(&c->ec_stripe_head_lock);
1919 h = list_first_entry_or_null(&c->ec_stripe_head_list,
1920 struct ec_stripe_head, list);
1923 mutex_unlock(&c->ec_stripe_head_lock);
1928 for (i = 0; i < h->s->new_stripe.key.v.nr_blocks; i++)
1929 BUG_ON(h->s->blocks[i]);
1936 BUG_ON(!list_empty(&c->ec_stripe_new_list));
1938 free_heap(&c->ec_stripes_heap);
1939 genradix_free(&c->stripes);
1940 bioset_exit(&c->ec_bioset);
1943 void bch2_fs_ec_init_early(struct bch_fs *c)
1945 spin_lock_init(&c->ec_stripes_new_lock);
1946 mutex_init(&c->ec_stripes_heap_lock);
1948 INIT_LIST_HEAD(&c->ec_stripe_head_list);
1949 mutex_init(&c->ec_stripe_head_lock);
1951 INIT_LIST_HEAD(&c->ec_stripe_new_list);
1952 mutex_init(&c->ec_stripe_new_lock);
1953 init_waitqueue_head(&c->ec_stripe_new_wait);
1955 INIT_WORK(&c->ec_stripe_create_work, ec_stripe_create_work);
1956 INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work);
1959 int bch2_fs_ec_init(struct bch_fs *c)
1961 return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio),