1 // SPDX-License-Identifier: GPL-2.0
6 #include "alloc_foreground.h"
9 #include "btree_update.h"
11 #include "disk_groups.h"
20 #include <linux/sort.h>
24 #include <linux/raid/pq.h>
25 #include <linux/raid/xor.h>
27 static void raid5_recov(unsigned disks, unsigned failed_idx,
28 size_t size, void **data)
32 BUG_ON(failed_idx >= disks);
34 swap(data[0], data[failed_idx]);
35 memcpy(data[0], data[1], size);
38 nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS);
39 xor_blocks(nr, size, data[0], data + i);
43 swap(data[0], data[failed_idx]);
46 static void raid_gen(int nd, int np, size_t size, void **v)
49 raid5_recov(nd + np, nd, size, v);
51 raid6_call.gen_syndrome(nd + np, size, v);
55 static void raid_rec(int nr, int *ir, int nd, int np, size_t size, void **v)
62 raid5_recov(nd + 1, ir[0], size, v);
64 raid6_call.gen_syndrome(nd + np, size, v);
68 /* data+data failure. */
69 raid6_2data_recov(nd + np, size, ir[0], ir[1], v);
70 } else if (ir[0] < nd) {
71 /* data + p/q failure */
73 if (ir[1] == nd) /* data + p failure */
74 raid6_datap_recov(nd + np, size, ir[0], v);
75 else { /* data + q failure */
76 raid5_recov(nd + 1, ir[0], size, v);
77 raid6_call.gen_syndrome(nd + np, size, v);
80 raid_gen(nd, np, size, v);
90 #include <raid/raid.h>
96 struct ec_stripe_buf *buf;
101 /* Stripes btree keys: */
103 const char *bch2_stripe_invalid(const struct bch_fs *c, struct bkey_s_c k)
105 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
108 return "invalid stripe key";
110 if (bkey_val_bytes(k.k) < sizeof(*s))
111 return "incorrect value size";
113 if (bkey_val_bytes(k.k) < sizeof(*s) ||
114 bkey_val_u64s(k.k) < stripe_val_u64s(s))
115 return "incorrect value size";
117 return bch2_bkey_ptrs_invalid(c, k);
120 void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
123 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
126 pr_buf(out, "algo %u sectors %u blocks %u:%u csum %u gran %u",
128 le16_to_cpu(s->sectors),
129 s->nr_blocks - s->nr_redundant,
132 1U << s->csum_granularity_bits);
134 for (i = 0; i < s->nr_blocks; i++)
135 pr_buf(out, " %u:%llu:%u", s->ptrs[i].dev,
136 (u64) s->ptrs[i].offset,
137 stripe_blockcount_get(s, i));
139 bch2_bkey_ptrs_to_text(out, c, k);
142 static int ptr_matches_stripe(struct bch_fs *c,
143 struct bch_stripe *v,
144 const struct bch_extent_ptr *ptr)
148 for (i = 0; i < v->nr_blocks - v->nr_redundant; i++) {
149 const struct bch_extent_ptr *ptr2 = v->ptrs + i;
151 if (ptr->dev == ptr2->dev &&
152 ptr->gen == ptr2->gen &&
153 ptr->offset >= ptr2->offset &&
154 ptr->offset < ptr2->offset + le16_to_cpu(v->sectors))
161 static int extent_matches_stripe(struct bch_fs *c,
162 struct bch_stripe *v,
167 case KEY_TYPE_extent: {
168 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
169 const struct bch_extent_ptr *ptr;
172 extent_for_each_ptr(e, ptr) {
173 idx = ptr_matches_stripe(c, v, ptr);
184 static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx)
187 case KEY_TYPE_extent: {
188 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
189 const union bch_extent_entry *entry;
191 extent_for_each_entry(e, entry)
192 if (extent_entry_type(entry) ==
193 BCH_EXTENT_ENTRY_stripe_ptr &&
194 entry->stripe_ptr.idx == idx)
204 static void ec_stripe_key_init(struct bch_fs *c,
205 struct bkey_i_stripe *s,
206 struct open_buckets *blocks,
207 struct open_buckets *parity,
208 unsigned stripe_size)
210 struct open_bucket *ob;
213 bkey_stripe_init(&s->k_i);
214 s->v.sectors = cpu_to_le16(stripe_size);
216 s->v.nr_blocks = parity->nr + blocks->nr;
217 s->v.nr_redundant = parity->nr;
218 s->v.csum_granularity_bits = ilog2(c->sb.encoded_extent_max);
219 s->v.csum_type = BCH_CSUM_CRC32C;
222 open_bucket_for_each(c, blocks, ob, i)
223 s->v.ptrs[i] = ob->ptr;
225 open_bucket_for_each(c, parity, ob, i)
226 s->v.ptrs[blocks->nr + i] = ob->ptr;
228 while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) {
229 BUG_ON(1 << s->v.csum_granularity_bits >=
230 le16_to_cpu(s->v.sectors) ||
231 s->v.csum_granularity_bits == U8_MAX);
232 s->v.csum_granularity_bits++;
235 set_bkey_val_u64s(&s->k, u64s);
240 static void ec_generate_checksums(struct ec_stripe_buf *buf)
242 struct bch_stripe *v = &buf->key.v;
243 unsigned csum_granularity = 1 << v->csum_granularity_bits;
244 unsigned csums_per_device = stripe_csums_per_device(v);
245 unsigned csum_bytes = bch_crc_bytes[v->csum_type];
252 BUG_ON(buf->size != le16_to_cpu(v->sectors));
254 for (i = 0; i < v->nr_blocks; i++) {
255 for (j = 0; j < csums_per_device; j++) {
256 unsigned offset = j << v->csum_granularity_bits;
257 unsigned len = min(csum_granularity, buf->size - offset);
259 struct bch_csum csum =
260 bch2_checksum(NULL, v->csum_type,
262 buf->data[i] + (offset << 9),
265 memcpy(stripe_csum(v, i, j), &csum, csum_bytes);
270 static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
272 struct bch_stripe *v = &buf->key.v;
273 unsigned csum_granularity = 1 << v->csum_granularity_bits;
274 unsigned csum_bytes = bch_crc_bytes[v->csum_type];
280 for (i = 0; i < v->nr_blocks; i++) {
281 unsigned offset = buf->offset;
282 unsigned end = buf->offset + buf->size;
284 if (!test_bit(i, buf->valid))
287 while (offset < end) {
288 unsigned j = offset >> v->csum_granularity_bits;
289 unsigned len = min(csum_granularity, end - offset);
290 struct bch_csum csum;
292 BUG_ON(offset & (csum_granularity - 1));
293 BUG_ON(offset + len != le16_to_cpu(v->sectors) &&
294 ((offset + len) & (csum_granularity - 1)));
296 csum = bch2_checksum(NULL, v->csum_type,
298 buf->data[i] + ((offset - buf->offset) << 9),
301 if (memcmp(stripe_csum(v, i, j), &csum, csum_bytes)) {
303 "checksum error while doing reconstruct read (%u:%u)",
305 clear_bit(i, buf->valid);
314 /* Erasure coding: */
316 static void ec_generate_ec(struct ec_stripe_buf *buf)
318 struct bch_stripe *v = &buf->key.v;
319 unsigned nr_data = v->nr_blocks - v->nr_redundant;
320 unsigned bytes = le16_to_cpu(v->sectors) << 9;
322 raid_gen(nr_data, v->nr_redundant, bytes, buf->data);
325 static unsigned __ec_nr_failed(struct ec_stripe_buf *buf, unsigned nr)
327 return nr - bitmap_weight(buf->valid, nr);
330 static unsigned ec_nr_failed(struct ec_stripe_buf *buf)
332 return __ec_nr_failed(buf, buf->key.v.nr_blocks);
335 static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf)
337 struct bch_stripe *v = &buf->key.v;
338 unsigned i, failed[EC_STRIPE_MAX], nr_failed = 0;
339 unsigned nr_data = v->nr_blocks - v->nr_redundant;
340 unsigned bytes = buf->size << 9;
342 if (ec_nr_failed(buf) > v->nr_redundant) {
344 "error doing reconstruct read: unable to read enough blocks");
348 for (i = 0; i < nr_data; i++)
349 if (!test_bit(i, buf->valid))
350 failed[nr_failed++] = i;
352 raid_rec(nr_failed, failed, nr_data, v->nr_redundant, bytes, buf->data);
358 static void ec_block_endio(struct bio *bio)
360 struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio);
361 struct bch_dev *ca = ec_bio->ca;
362 struct closure *cl = bio->bi_private;
364 if (bch2_dev_io_err_on(bio->bi_status, ca, "erasure coding"))
365 clear_bit(ec_bio->idx, ec_bio->buf->valid);
367 bio_put(&ec_bio->bio);
368 percpu_ref_put(&ca->io_ref);
372 static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
373 unsigned rw, unsigned idx, struct closure *cl)
375 struct bch_stripe *v = &buf->key.v;
376 unsigned offset = 0, bytes = buf->size << 9;
377 struct bch_extent_ptr *ptr = &v->ptrs[idx];
378 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
380 if (!bch2_dev_get_ioref(ca, rw)) {
381 clear_bit(idx, buf->valid);
385 while (offset < bytes) {
386 unsigned nr_iovecs = min_t(size_t, BIO_MAX_PAGES,
387 DIV_ROUND_UP(bytes, PAGE_SIZE));
388 unsigned b = min_t(size_t, bytes - offset,
389 nr_iovecs << PAGE_SHIFT);
390 struct ec_bio *ec_bio;
392 ec_bio = container_of(bio_alloc_bioset(GFP_KERNEL, nr_iovecs,
400 bio_set_dev(&ec_bio->bio, ca->disk_sb.bdev);
401 bio_set_op_attrs(&ec_bio->bio, rw, 0);
403 ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9);
404 ec_bio->bio.bi_end_io = ec_block_endio;
405 ec_bio->bio.bi_private = cl;
407 bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b);
410 percpu_ref_get(&ca->io_ref);
412 submit_bio(&ec_bio->bio);
417 percpu_ref_put(&ca->io_ref);
420 /* recovery read path: */
421 int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
423 struct btree_trans trans;
424 struct btree_iter *iter;
425 struct ec_stripe_buf *buf;
428 struct bch_stripe *v;
430 unsigned offset, end;
431 unsigned i, nr_data, csum_granularity;
434 closure_init_stack(&cl);
436 BUG_ON(!rbio->pick.idx ||
437 rbio->pick.idx - 1 >= rbio->pick.ec_nr);
439 stripe_idx = rbio->pick.ec[rbio->pick.idx - 1].idx;
441 buf = kzalloc(sizeof(*buf), GFP_NOIO);
445 bch2_trans_init(&trans, c, 0, 0);
447 iter = bch2_trans_get_iter(&trans, BTREE_ID_EC,
450 k = bch2_btree_iter_peek_slot(iter);
451 if (bkey_err(k) || k.k->type != KEY_TYPE_stripe) {
453 "error doing reconstruct read: stripe not found");
455 return bch2_trans_exit(&trans) ?: -EIO;
458 bkey_reassemble(&buf->key.k_i, k);
459 bch2_trans_exit(&trans);
463 nr_data = v->nr_blocks - v->nr_redundant;
465 idx = ptr_matches_stripe(c, v, &rbio->pick.ptr);
468 csum_granularity = 1U << v->csum_granularity_bits;
470 offset = rbio->bio.bi_iter.bi_sector - v->ptrs[idx].offset;
471 end = offset + bio_sectors(&rbio->bio);
473 BUG_ON(end > le16_to_cpu(v->sectors));
475 buf->offset = round_down(offset, csum_granularity);
476 buf->size = min_t(unsigned, le16_to_cpu(v->sectors),
477 round_up(end, csum_granularity)) - buf->offset;
479 for (i = 0; i < v->nr_blocks; i++) {
480 buf->data[i] = kmalloc(buf->size << 9, GFP_NOIO);
487 memset(buf->valid, 0xFF, sizeof(buf->valid));
489 for (i = 0; i < v->nr_blocks; i++) {
490 struct bch_extent_ptr *ptr = v->ptrs + i;
491 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
493 if (ptr_stale(ca, ptr)) {
495 "error doing reconstruct read: stale pointer");
496 clear_bit(i, buf->valid);
500 ec_block_io(c, buf, REQ_OP_READ, i, &cl);
505 if (ec_nr_failed(buf) > v->nr_redundant) {
507 "error doing reconstruct read: unable to read enough blocks");
512 ec_validate_checksums(c, buf);
514 ret = ec_do_recov(c, buf);
518 memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter,
519 buf->data[idx] + ((offset - buf->offset) << 9));
521 for (i = 0; i < v->nr_blocks; i++)
527 /* stripe bucket accounting: */
529 static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
531 ec_stripes_heap n, *h = &c->ec_stripes_heap;
533 if (idx >= h->size) {
534 if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp))
537 spin_lock(&c->ec_stripes_heap_lock);
538 if (n.size > h->size) {
539 memcpy(n.data, h->data, h->used * sizeof(h->data[0]));
543 spin_unlock(&c->ec_stripes_heap_lock);
548 if (!genradix_ptr_alloc(&c->stripes[0], idx, gfp))
551 if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING &&
552 !genradix_ptr_alloc(&c->stripes[1], idx, gfp))
558 static int ec_stripe_mem_alloc(struct bch_fs *c,
559 struct btree_iter *iter)
561 size_t idx = iter->pos.offset;
564 if (!__ec_stripe_mem_alloc(c, idx, GFP_NOWAIT))
567 bch2_trans_unlock(iter->trans);
570 if (!__ec_stripe_mem_alloc(c, idx, GFP_KERNEL))
576 static ssize_t stripe_idx_to_delete(struct bch_fs *c)
578 ec_stripes_heap *h = &c->ec_stripes_heap;
580 return h->used && h->data[0].blocks_nonempty == 0
581 ? h->data[0].idx : -1;
584 static inline int ec_stripes_heap_cmp(ec_stripes_heap *h,
585 struct ec_stripe_heap_entry l,
586 struct ec_stripe_heap_entry r)
588 return ((l.blocks_nonempty > r.blocks_nonempty) -
589 (l.blocks_nonempty < r.blocks_nonempty));
592 static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h,
595 struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap);
597 genradix_ptr(&c->stripes[0], h->data[i].idx)->heap_idx = i;
600 static void heap_verify_backpointer(struct bch_fs *c, size_t idx)
602 ec_stripes_heap *h = &c->ec_stripes_heap;
603 struct stripe *m = genradix_ptr(&c->stripes[0], idx);
606 BUG_ON(m->heap_idx >= h->used);
607 BUG_ON(h->data[m->heap_idx].idx != idx);
610 void bch2_stripes_heap_update(struct bch_fs *c,
611 struct stripe *m, size_t idx)
613 ec_stripes_heap *h = &c->ec_stripes_heap;
617 heap_verify_backpointer(c, idx);
619 h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
622 heap_sift_up(h, i, ec_stripes_heap_cmp,
623 ec_stripes_heap_set_backpointer);
624 heap_sift_down(h, i, ec_stripes_heap_cmp,
625 ec_stripes_heap_set_backpointer);
627 heap_verify_backpointer(c, idx);
629 bch2_stripes_heap_insert(c, m, idx);
632 if (stripe_idx_to_delete(c) >= 0 &&
633 !percpu_ref_is_dying(&c->writes))
634 schedule_work(&c->ec_stripe_delete_work);
637 void bch2_stripes_heap_del(struct bch_fs *c,
638 struct stripe *m, size_t idx)
640 heap_verify_backpointer(c, idx);
643 heap_del(&c->ec_stripes_heap, m->heap_idx,
645 ec_stripes_heap_set_backpointer);
648 void bch2_stripes_heap_insert(struct bch_fs *c,
649 struct stripe *m, size_t idx)
651 BUG_ON(heap_full(&c->ec_stripes_heap));
653 heap_add(&c->ec_stripes_heap, ((struct ec_stripe_heap_entry) {
655 .blocks_nonempty = m->blocks_nonempty,
658 ec_stripes_heap_set_backpointer);
661 heap_verify_backpointer(c, idx);
664 /* stripe deletion */
666 static int ec_stripe_delete(struct bch_fs *c, size_t idx)
668 return bch2_btree_delete_range(c, BTREE_ID_EC,
674 static void ec_stripe_delete_work(struct work_struct *work)
677 container_of(work, struct bch_fs, ec_stripe_delete_work);
680 down_read(&c->gc_lock);
681 mutex_lock(&c->ec_stripe_create_lock);
684 spin_lock(&c->ec_stripes_heap_lock);
685 idx = stripe_idx_to_delete(c);
686 spin_unlock(&c->ec_stripes_heap_lock);
691 if (ec_stripe_delete(c, idx))
695 mutex_unlock(&c->ec_stripe_create_lock);
696 up_read(&c->gc_lock);
699 /* stripe creation: */
701 static int ec_stripe_bkey_insert(struct bch_fs *c,
702 struct bkey_i_stripe *stripe)
704 struct btree_trans trans;
705 struct btree_iter *iter;
707 struct bpos start_pos = POS(0, c->ec_stripe_hint);
710 bch2_trans_init(&trans, c, 0, 0);
712 bch2_trans_begin(&trans);
714 for_each_btree_key(&trans, iter, BTREE_ID_EC, start_pos,
715 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
716 if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0) {
717 if (start_pos.offset) {
719 bch2_btree_iter_set_pos(iter, start_pos);
727 if (bkey_deleted(k.k))
733 start_pos = iter->pos;
735 ret = ec_stripe_mem_alloc(c, iter);
739 stripe->k.p = iter->pos;
741 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &stripe->k_i));
743 ret = bch2_trans_commit(&trans, NULL, NULL,
745 BTREE_INSERT_NOFAIL);
750 c->ec_stripe_hint = ret ? start_pos.offset : start_pos.offset + 1;
751 bch2_trans_exit(&trans);
756 static void extent_stripe_ptr_add(struct bkey_s_extent e,
757 struct ec_stripe_buf *s,
758 struct bch_extent_ptr *ptr,
761 struct bch_extent_stripe_ptr *dst = (void *) ptr;
762 union bch_extent_entry *end = extent_entry_last(e);
764 memmove_u64s_up(dst + 1, dst, (u64 *) end - (u64 *) dst);
765 e.k->u64s += sizeof(*dst) / sizeof(u64);
767 *dst = (struct bch_extent_stripe_ptr) {
768 .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr,
770 .idx = s->key.k.p.offset,
774 static int ec_stripe_update_ptrs(struct bch_fs *c,
775 struct ec_stripe_buf *s,
778 struct btree_trans trans;
779 struct btree_iter *iter;
781 struct bkey_s_extent e;
782 struct bch_extent_ptr *ptr;
784 int ret = 0, dev, idx;
786 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
788 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
792 while ((k = bch2_btree_iter_peek(iter)).k &&
793 !(ret = bkey_err(k)) &&
794 bkey_cmp(bkey_start_pos(k.k), pos->p) < 0) {
795 if (extent_has_stripe_ptr(k, s->key.k.p.offset)) {
796 bch2_btree_iter_next(iter);
800 idx = extent_matches_stripe(c, &s->key.v, k);
802 bch2_btree_iter_next(iter);
806 bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
808 dev = s->key.v.ptrs[idx].dev;
810 bkey_reassemble(&tmp.k, k);
811 e = bkey_i_to_s_extent(&tmp.k);
813 extent_for_each_ptr(e, ptr)
817 ptr = (void *) bch2_extent_has_device(e.c, dev);
820 extent_stripe_ptr_add(e, s, ptr, idx);
822 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &tmp.k));
824 ret = bch2_trans_commit(&trans, NULL, NULL,
827 BTREE_INSERT_USE_RESERVE);
834 bch2_trans_exit(&trans);
840 * data buckets of new stripe all written: create the stripe
842 static void ec_stripe_create(struct ec_stripe_new *s)
844 struct bch_fs *c = s->c;
845 struct open_bucket *ob;
847 struct bch_stripe *v = &s->stripe.key.v;
848 unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
852 BUG_ON(s->h->s == s);
854 closure_init_stack(&cl);
857 bch_err(c, "error creating stripe: error writing data buckets");
861 if (!percpu_ref_tryget(&c->writes))
864 BUG_ON(bitmap_weight(s->blocks_allocated,
865 s->blocks.nr) != s->blocks.nr);
867 ec_generate_ec(&s->stripe);
869 ec_generate_checksums(&s->stripe);
872 for (i = nr_data; i < v->nr_blocks; i++)
873 ec_block_io(c, &s->stripe, REQ_OP_WRITE, i, &cl);
877 for (i = nr_data; i < v->nr_blocks; i++)
878 if (!test_bit(i, s->stripe.valid)) {
879 bch_err(c, "error creating stripe: error writing redundancy buckets");
883 mutex_lock(&c->ec_stripe_create_lock);
885 ret = ec_stripe_bkey_insert(c, &s->stripe.key);
887 bch_err(c, "error creating stripe: error creating stripe key");
891 for_each_keylist_key(&s->keys, k) {
892 ret = ec_stripe_update_ptrs(c, &s->stripe, &k->k);
898 mutex_unlock(&c->ec_stripe_create_lock);
900 percpu_ref_put(&c->writes);
902 open_bucket_for_each(c, &s->blocks, ob, i) {
904 __bch2_open_bucket_put(c, ob);
907 bch2_open_buckets_put(c, &s->parity);
909 bch2_keylist_free(&s->keys, s->inline_keys);
911 mutex_lock(&s->h->lock);
913 mutex_unlock(&s->h->lock);
915 for (i = 0; i < s->stripe.key.v.nr_blocks; i++)
916 kvpfree(s->stripe.data[i], s->stripe.size << 9);
920 static struct ec_stripe_new *ec_stripe_set_pending(struct ec_stripe_head *h)
922 struct ec_stripe_new *s = h->s;
924 list_add(&s->list, &h->stripes);
930 static void ec_stripe_new_put(struct ec_stripe_new *s)
932 BUG_ON(atomic_read(&s->pin) <= 0);
933 if (atomic_dec_and_test(&s->pin))
937 /* have a full bucket - hand it off to be erasure coded: */
938 void bch2_ec_bucket_written(struct bch_fs *c, struct open_bucket *ob)
940 struct ec_stripe_new *s = ob->ec;
942 if (ob->sectors_free)
945 ec_stripe_new_put(s);
948 void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob)
950 struct ec_stripe_new *s = ob->ec;
955 void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp)
957 struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs);
964 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
965 offset = ca->mi.bucket_size - ob->sectors_free;
967 return ob->ec->stripe.data[ob->ec_idx] + (offset << 9);
970 void bch2_ec_add_backpointer(struct bch_fs *c, struct write_point *wp,
971 struct bpos pos, unsigned sectors)
973 struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs);
974 struct ec_stripe_new *ec;
980 mutex_lock(&ec->lock);
982 if (bch2_keylist_realloc(&ec->keys, ec->inline_keys,
983 ARRAY_SIZE(ec->inline_keys),
988 bkey_init(&ec->keys.top->k);
989 ec->keys.top->k.p = pos;
990 bch2_key_resize(&ec->keys.top->k, sectors);
991 bch2_keylist_push(&ec->keys);
993 mutex_unlock(&ec->lock);
996 static int unsigned_cmp(const void *_l, const void *_r)
998 unsigned l = *((const unsigned *) _l);
999 unsigned r = *((const unsigned *) _r);
1001 return cmp_int(l, r);
1004 /* pick most common bucket size: */
1005 static unsigned pick_blocksize(struct bch_fs *c,
1006 struct bch_devs_mask *devs)
1009 unsigned i, nr = 0, sizes[BCH_SB_MEMBERS_MAX];
1012 } cur = { 0, 0 }, best = { 0, 0 };
1014 for_each_member_device_rcu(ca, c, i, devs)
1015 sizes[nr++] = ca->mi.bucket_size;
1017 sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL);
1019 for (i = 0; i < nr; i++) {
1020 if (sizes[i] != cur.size) {
1021 if (cur.nr > best.nr)
1025 cur.size = sizes[i];
1031 if (cur.nr > best.nr)
1037 int bch2_ec_stripe_new_alloc(struct bch_fs *c, struct ec_stripe_head *h)
1039 struct ec_stripe_new *s;
1042 BUG_ON(h->parity.nr != h->redundancy);
1043 BUG_ON(!h->blocks.nr);
1044 BUG_ON(h->parity.nr + h->blocks.nr > EC_STRIPE_MAX);
1045 lockdep_assert_held(&h->lock);
1047 s = kzalloc(sizeof(*s), GFP_KERNEL);
1051 mutex_init(&s->lock);
1052 atomic_set(&s->pin, 1);
1055 s->blocks = h->blocks;
1056 s->parity = h->parity;
1058 memset(&h->blocks, 0, sizeof(h->blocks));
1059 memset(&h->parity, 0, sizeof(h->parity));
1061 bch2_keylist_init(&s->keys, s->inline_keys);
1063 s->stripe.offset = 0;
1064 s->stripe.size = h->blocksize;
1065 memset(s->stripe.valid, 0xFF, sizeof(s->stripe.valid));
1067 ec_stripe_key_init(c, &s->stripe.key,
1068 &s->blocks, &s->parity,
1071 for (i = 0; i < s->stripe.key.v.nr_blocks; i++) {
1072 s->stripe.data[i] = kvpmalloc(s->stripe.size << 9, GFP_KERNEL);
1073 if (!s->stripe.data[i])
1081 for (i = 0; i < s->stripe.key.v.nr_blocks; i++)
1082 kvpfree(s->stripe.data[i], s->stripe.size << 9);
1087 static struct ec_stripe_head *
1088 ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
1089 unsigned algo, unsigned redundancy)
1091 struct ec_stripe_head *h;
1095 h = kzalloc(sizeof(*h), GFP_KERNEL);
1099 mutex_init(&h->lock);
1100 mutex_lock(&h->lock);
1101 INIT_LIST_HEAD(&h->stripes);
1105 h->redundancy = redundancy;
1108 h->devs = target_rw_devs(c, BCH_DATA_USER, target);
1110 for_each_member_device_rcu(ca, c, i, &h->devs)
1111 if (!ca->mi.durability)
1112 __clear_bit(i, h->devs.d);
1114 h->blocksize = pick_blocksize(c, &h->devs);
1116 for_each_member_device_rcu(ca, c, i, &h->devs)
1117 if (ca->mi.bucket_size == h->blocksize)
1118 h->nr_active_devs++;
1121 list_add(&h->list, &c->ec_new_stripe_list);
1125 void bch2_ec_stripe_head_put(struct ec_stripe_head *h)
1127 struct ec_stripe_new *s = NULL;
1130 bitmap_weight(h->s->blocks_allocated,
1131 h->s->blocks.nr) == h->s->blocks.nr)
1132 s = ec_stripe_set_pending(h);
1134 mutex_unlock(&h->lock);
1137 ec_stripe_new_put(s);
1140 struct ec_stripe_head *bch2_ec_stripe_head_get(struct bch_fs *c,
1143 unsigned redundancy)
1145 struct ec_stripe_head *h;
1150 mutex_lock(&c->ec_new_stripe_lock);
1151 list_for_each_entry(h, &c->ec_new_stripe_list, list)
1152 if (h->target == target &&
1154 h->redundancy == redundancy) {
1155 mutex_lock(&h->lock);
1159 h = ec_new_stripe_head_alloc(c, target, algo, redundancy);
1161 mutex_unlock(&c->ec_new_stripe_lock);
1165 void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca)
1167 struct ec_stripe_head *h;
1168 struct open_bucket *ob;
1171 mutex_lock(&c->ec_new_stripe_lock);
1172 list_for_each_entry(h, &c->ec_new_stripe_list, list) {
1173 struct ec_stripe_new *s = NULL;
1175 mutex_lock(&h->lock);
1176 bch2_open_buckets_stop_dev(c, ca, &h->blocks);
1177 bch2_open_buckets_stop_dev(c, ca, &h->parity);
1182 open_bucket_for_each(c, &h->s->blocks, ob, i)
1183 if (ob->ptr.dev == ca->dev_idx)
1185 open_bucket_for_each(c, &h->s->parity, ob, i)
1186 if (ob->ptr.dev == ca->dev_idx)
1191 s = ec_stripe_set_pending(h);
1193 mutex_unlock(&h->lock);
1196 ec_stripe_new_put(s);
1198 mutex_unlock(&c->ec_new_stripe_lock);
1201 static int __bch2_stripe_write_key(struct btree_trans *trans,
1202 struct btree_iter *iter,
1205 struct bkey_i_stripe *new_key,
1208 struct bch_fs *c = trans->c;
1213 bch2_btree_iter_set_pos(iter, POS(0, idx));
1215 k = bch2_btree_iter_peek_slot(iter);
1220 if (k.k->type != KEY_TYPE_stripe)
1223 bkey_reassemble(&new_key->k_i, k);
1225 spin_lock(&c->ec_stripes_heap_lock);
1227 for (i = 0; i < new_key->v.nr_blocks; i++)
1228 stripe_blockcount_set(&new_key->v, i,
1229 m->block_sectors[i]);
1232 spin_unlock(&c->ec_stripes_heap_lock);
1234 bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &new_key->k_i));
1236 return bch2_trans_commit(trans, NULL, NULL,
1237 BTREE_INSERT_NOFAIL|flags);
1240 int bch2_stripes_write(struct bch_fs *c, unsigned flags, bool *wrote)
1242 struct btree_trans trans;
1243 struct btree_iter *iter;
1244 struct genradix_iter giter;
1245 struct bkey_i_stripe *new_key;
1249 new_key = kmalloc(255 * sizeof(u64), GFP_KERNEL);
1252 bch2_trans_init(&trans, c, 0, 0);
1254 iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS_MIN,
1255 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
1257 genradix_for_each(&c->stripes[0], giter, m) {
1261 ret = __bch2_stripe_write_key(&trans, iter, m, giter.pos,
1269 bch2_trans_exit(&trans);
1276 int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys)
1278 struct btree_trans trans;
1279 struct btree_iter *btree_iter;
1280 struct journal_iter journal_iter;
1281 struct bkey_s_c btree_k, journal_k, k;
1284 ret = bch2_fs_ec_start(c);
1288 bch2_trans_init(&trans, c, 0, 0);
1290 btree_iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS_MIN, 0);
1291 journal_iter = bch2_journal_iter_init(journal_keys, BTREE_ID_EC);
1293 btree_k = bch2_btree_iter_peek(btree_iter);
1294 journal_k = bch2_journal_iter_peek(&journal_iter);
1297 if (btree_k.k && journal_k.k) {
1298 int cmp = bkey_cmp(btree_k.k->p, journal_k.k->p);
1302 btree_k = bch2_btree_iter_next(btree_iter);
1303 } else if (cmp == 0) {
1304 btree_k = bch2_btree_iter_next(btree_iter);
1306 journal_k = bch2_journal_iter_next(&journal_iter);
1309 journal_k = bch2_journal_iter_next(&journal_iter);
1311 } else if (btree_k.k) {
1313 btree_k = bch2_btree_iter_next(btree_iter);
1314 } else if (journal_k.k) {
1316 journal_k = bch2_journal_iter_next(&journal_iter);
1321 bch2_mark_key(c, k, 0, 0, NULL, 0,
1322 BCH_BUCKET_MARK_ALLOC_READ|
1323 BCH_BUCKET_MARK_NOATOMIC);
1326 ret = bch2_trans_exit(&trans) ?: ret;
1328 bch_err(c, "error reading stripes: %i", ret);
1335 int bch2_ec_mem_alloc(struct bch_fs *c, bool gc)
1337 struct btree_trans trans;
1338 struct btree_iter *iter;
1343 bch2_trans_init(&trans, c, 0, 0);
1345 iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS(0, U64_MAX), 0);
1347 k = bch2_btree_iter_prev(iter);
1348 if (!IS_ERR_OR_NULL(k.k))
1349 idx = k.k->p.offset + 1;
1350 ret = bch2_trans_exit(&trans);
1355 !init_heap(&c->ec_stripes_heap, roundup_pow_of_two(idx),
1359 ret = genradix_prealloc(&c->stripes[gc], idx, GFP_KERNEL);
1361 for (i = 0; i < idx; i++)
1362 if (!genradix_ptr_alloc(&c->stripes[gc], i, GFP_KERNEL))
1368 int bch2_fs_ec_start(struct bch_fs *c)
1370 return bch2_ec_mem_alloc(c, false);
1373 void bch2_fs_ec_exit(struct bch_fs *c)
1375 struct ec_stripe_head *h;
1378 mutex_lock(&c->ec_new_stripe_lock);
1379 h = list_first_entry_or_null(&c->ec_new_stripe_list,
1380 struct ec_stripe_head, list);
1383 mutex_unlock(&c->ec_new_stripe_lock);
1388 BUG_ON(!list_empty(&h->stripes));
1392 free_heap(&c->ec_stripes_heap);
1393 genradix_free(&c->stripes[0]);
1394 bioset_exit(&c->ec_bioset);
1397 int bch2_fs_ec_init(struct bch_fs *c)
1399 INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work);
1401 return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio),