1 // SPDX-License-Identifier: GPL-2.0
6 #include "alloc_foreground.h"
7 #include "bkey_on_stack.h"
10 #include "btree_update.h"
12 #include "disk_groups.h"
21 #include <linux/sort.h>
25 #include <linux/raid/pq.h>
26 #include <linux/raid/xor.h>
28 static void raid5_recov(unsigned disks, unsigned failed_idx,
29 size_t size, void **data)
33 BUG_ON(failed_idx >= disks);
35 swap(data[0], data[failed_idx]);
36 memcpy(data[0], data[1], size);
39 nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS);
40 xor_blocks(nr, size, data[0], data + i);
44 swap(data[0], data[failed_idx]);
47 static void raid_gen(int nd, int np, size_t size, void **v)
50 raid5_recov(nd + np, nd, size, v);
52 raid6_call.gen_syndrome(nd + np, size, v);
56 static void raid_rec(int nr, int *ir, int nd, int np, size_t size, void **v)
63 raid5_recov(nd + 1, ir[0], size, v);
65 raid6_call.gen_syndrome(nd + np, size, v);
69 /* data+data failure. */
70 raid6_2data_recov(nd + np, size, ir[0], ir[1], v);
71 } else if (ir[0] < nd) {
72 /* data + p/q failure */
74 if (ir[1] == nd) /* data + p failure */
75 raid6_datap_recov(nd + np, size, ir[0], v);
76 else { /* data + q failure */
77 raid5_recov(nd + 1, ir[0], size, v);
78 raid6_call.gen_syndrome(nd + np, size, v);
81 raid_gen(nd, np, size, v);
91 #include <raid/raid.h>
97 struct ec_stripe_buf *buf;
102 /* Stripes btree keys: */
104 const char *bch2_stripe_invalid(const struct bch_fs *c, struct bkey_s_c k)
106 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
109 return "invalid stripe key";
111 if (bkey_val_bytes(k.k) < sizeof(*s))
112 return "incorrect value size";
114 if (bkey_val_bytes(k.k) < sizeof(*s) ||
115 bkey_val_u64s(k.k) < stripe_val_u64s(s))
116 return "incorrect value size";
118 return bch2_bkey_ptrs_invalid(c, k);
121 void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
124 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
127 pr_buf(out, "algo %u sectors %u blocks %u:%u csum %u gran %u",
129 le16_to_cpu(s->sectors),
130 s->nr_blocks - s->nr_redundant,
133 1U << s->csum_granularity_bits);
135 for (i = 0; i < s->nr_blocks; i++)
136 pr_buf(out, " %u:%llu:%u", s->ptrs[i].dev,
137 (u64) s->ptrs[i].offset,
138 stripe_blockcount_get(s, i));
141 static int ptr_matches_stripe(struct bch_fs *c,
142 struct bch_stripe *v,
143 const struct bch_extent_ptr *ptr)
147 for (i = 0; i < v->nr_blocks - v->nr_redundant; i++) {
148 const struct bch_extent_ptr *ptr2 = v->ptrs + i;
150 if (ptr->dev == ptr2->dev &&
151 ptr->gen == ptr2->gen &&
152 ptr->offset >= ptr2->offset &&
153 ptr->offset < ptr2->offset + le16_to_cpu(v->sectors))
160 static int extent_matches_stripe(struct bch_fs *c,
161 struct bch_stripe *v,
166 case KEY_TYPE_extent: {
167 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
168 const struct bch_extent_ptr *ptr;
171 extent_for_each_ptr(e, ptr) {
172 idx = ptr_matches_stripe(c, v, ptr);
183 static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx)
186 case KEY_TYPE_extent: {
187 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
188 const union bch_extent_entry *entry;
190 extent_for_each_entry(e, entry)
191 if (extent_entry_type(entry) ==
192 BCH_EXTENT_ENTRY_stripe_ptr &&
193 entry->stripe_ptr.idx == idx)
205 static void ec_generate_checksums(struct ec_stripe_buf *buf)
207 struct bch_stripe *v = &buf->key.v;
208 unsigned csum_granularity = 1 << v->csum_granularity_bits;
209 unsigned csums_per_device = stripe_csums_per_device(v);
210 unsigned csum_bytes = bch_crc_bytes[v->csum_type];
217 BUG_ON(buf->size != le16_to_cpu(v->sectors));
219 for (i = 0; i < v->nr_blocks; i++) {
220 for (j = 0; j < csums_per_device; j++) {
221 unsigned offset = j << v->csum_granularity_bits;
222 unsigned len = min(csum_granularity, buf->size - offset);
224 struct bch_csum csum =
225 bch2_checksum(NULL, v->csum_type,
227 buf->data[i] + (offset << 9),
230 memcpy(stripe_csum(v, i, j), &csum, csum_bytes);
235 static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
237 struct bch_stripe *v = &buf->key.v;
238 unsigned csum_granularity = 1 << v->csum_granularity_bits;
239 unsigned csum_bytes = bch_crc_bytes[v->csum_type];
245 for (i = 0; i < v->nr_blocks; i++) {
246 unsigned offset = buf->offset;
247 unsigned end = buf->offset + buf->size;
249 if (!test_bit(i, buf->valid))
252 while (offset < end) {
253 unsigned j = offset >> v->csum_granularity_bits;
254 unsigned len = min(csum_granularity, end - offset);
255 struct bch_csum csum;
257 BUG_ON(offset & (csum_granularity - 1));
258 BUG_ON(offset + len != le16_to_cpu(v->sectors) &&
259 ((offset + len) & (csum_granularity - 1)));
261 csum = bch2_checksum(NULL, v->csum_type,
263 buf->data[i] + ((offset - buf->offset) << 9),
266 if (memcmp(stripe_csum(v, i, j), &csum, csum_bytes)) {
268 "checksum error while doing reconstruct read (%u:%u)",
270 clear_bit(i, buf->valid);
279 /* Erasure coding: */
281 static void ec_generate_ec(struct ec_stripe_buf *buf)
283 struct bch_stripe *v = &buf->key.v;
284 unsigned nr_data = v->nr_blocks - v->nr_redundant;
285 unsigned bytes = le16_to_cpu(v->sectors) << 9;
287 raid_gen(nr_data, v->nr_redundant, bytes, buf->data);
290 static unsigned __ec_nr_failed(struct ec_stripe_buf *buf, unsigned nr)
292 return nr - bitmap_weight(buf->valid, nr);
295 static unsigned ec_nr_failed(struct ec_stripe_buf *buf)
297 return __ec_nr_failed(buf, buf->key.v.nr_blocks);
300 static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf)
302 struct bch_stripe *v = &buf->key.v;
303 unsigned i, failed[EC_STRIPE_MAX], nr_failed = 0;
304 unsigned nr_data = v->nr_blocks - v->nr_redundant;
305 unsigned bytes = buf->size << 9;
307 if (ec_nr_failed(buf) > v->nr_redundant) {
309 "error doing reconstruct read: unable to read enough blocks");
313 for (i = 0; i < nr_data; i++)
314 if (!test_bit(i, buf->valid))
315 failed[nr_failed++] = i;
317 raid_rec(nr_failed, failed, nr_data, v->nr_redundant, bytes, buf->data);
323 static void ec_block_endio(struct bio *bio)
325 struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio);
326 struct bch_dev *ca = ec_bio->ca;
327 struct closure *cl = bio->bi_private;
329 if (bch2_dev_io_err_on(bio->bi_status, ca, "erasure coding %s: %s",
330 bio_data_dir(bio) ? "write" : "read",
331 bch2_blk_status_to_str(bio->bi_status)))
332 clear_bit(ec_bio->idx, ec_bio->buf->valid);
334 bio_put(&ec_bio->bio);
335 percpu_ref_put(&ca->io_ref);
339 static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
340 unsigned rw, unsigned idx, struct closure *cl)
342 struct bch_stripe *v = &buf->key.v;
343 unsigned offset = 0, bytes = buf->size << 9;
344 struct bch_extent_ptr *ptr = &v->ptrs[idx];
345 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
346 enum bch_data_type data_type = idx < buf->key.v.nr_blocks - buf->key.v.nr_redundant
350 if (!bch2_dev_get_ioref(ca, rw)) {
351 clear_bit(idx, buf->valid);
355 this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size);
357 while (offset < bytes) {
358 unsigned nr_iovecs = min_t(size_t, BIO_MAX_PAGES,
359 DIV_ROUND_UP(bytes, PAGE_SIZE));
360 unsigned b = min_t(size_t, bytes - offset,
361 nr_iovecs << PAGE_SHIFT);
362 struct ec_bio *ec_bio;
364 ec_bio = container_of(bio_alloc_bioset(GFP_KERNEL, nr_iovecs,
372 bio_set_dev(&ec_bio->bio, ca->disk_sb.bdev);
373 bio_set_op_attrs(&ec_bio->bio, rw, 0);
375 ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9);
376 ec_bio->bio.bi_end_io = ec_block_endio;
377 ec_bio->bio.bi_private = cl;
379 bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b);
382 percpu_ref_get(&ca->io_ref);
384 submit_bio(&ec_bio->bio);
389 percpu_ref_put(&ca->io_ref);
392 /* recovery read path: */
393 int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
395 struct btree_trans trans;
396 struct btree_iter *iter;
397 struct ec_stripe_buf *buf;
400 struct bch_stripe *v;
402 unsigned offset, end;
403 unsigned i, nr_data, csum_granularity;
406 closure_init_stack(&cl);
408 BUG_ON(!rbio->pick.has_ec);
410 stripe_idx = rbio->pick.ec.idx;
412 buf = kzalloc(sizeof(*buf), GFP_NOIO);
416 bch2_trans_init(&trans, c, 0, 0);
418 iter = bch2_trans_get_iter(&trans, BTREE_ID_EC,
421 k = bch2_btree_iter_peek_slot(iter);
422 if (bkey_err(k) || k.k->type != KEY_TYPE_stripe) {
424 "error doing reconstruct read: stripe not found");
426 return bch2_trans_exit(&trans) ?: -EIO;
429 bkey_reassemble(&buf->key.k_i, k);
430 bch2_trans_exit(&trans);
434 nr_data = v->nr_blocks - v->nr_redundant;
436 idx = ptr_matches_stripe(c, v, &rbio->pick.ptr);
439 csum_granularity = 1U << v->csum_granularity_bits;
441 offset = rbio->bio.bi_iter.bi_sector - v->ptrs[idx].offset;
442 end = offset + bio_sectors(&rbio->bio);
444 BUG_ON(end > le16_to_cpu(v->sectors));
446 buf->offset = round_down(offset, csum_granularity);
447 buf->size = min_t(unsigned, le16_to_cpu(v->sectors),
448 round_up(end, csum_granularity)) - buf->offset;
450 for (i = 0; i < v->nr_blocks; i++) {
451 buf->data[i] = kmalloc(buf->size << 9, GFP_NOIO);
458 memset(buf->valid, 0xFF, sizeof(buf->valid));
460 for (i = 0; i < v->nr_blocks; i++) {
461 struct bch_extent_ptr *ptr = v->ptrs + i;
462 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
464 if (ptr_stale(ca, ptr)) {
466 "error doing reconstruct read: stale pointer");
467 clear_bit(i, buf->valid);
471 ec_block_io(c, buf, REQ_OP_READ, i, &cl);
476 if (ec_nr_failed(buf) > v->nr_redundant) {
478 "error doing reconstruct read: unable to read enough blocks");
483 ec_validate_checksums(c, buf);
485 ret = ec_do_recov(c, buf);
489 memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter,
490 buf->data[idx] + ((offset - buf->offset) << 9));
492 for (i = 0; i < v->nr_blocks; i++)
498 /* stripe bucket accounting: */
500 static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
502 ec_stripes_heap n, *h = &c->ec_stripes_heap;
504 if (idx >= h->size) {
505 if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp))
508 spin_lock(&c->ec_stripes_heap_lock);
509 if (n.size > h->size) {
510 memcpy(n.data, h->data, h->used * sizeof(h->data[0]));
514 spin_unlock(&c->ec_stripes_heap_lock);
519 if (!genradix_ptr_alloc(&c->stripes[0], idx, gfp))
522 if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING &&
523 !genradix_ptr_alloc(&c->stripes[1], idx, gfp))
529 static int ec_stripe_mem_alloc(struct bch_fs *c,
530 struct btree_iter *iter)
532 size_t idx = iter->pos.offset;
535 if (!__ec_stripe_mem_alloc(c, idx, GFP_NOWAIT|__GFP_NOWARN))
538 bch2_trans_unlock(iter->trans);
541 if (!__ec_stripe_mem_alloc(c, idx, GFP_KERNEL))
547 static ssize_t stripe_idx_to_delete(struct bch_fs *c)
549 ec_stripes_heap *h = &c->ec_stripes_heap;
551 return h->used && h->data[0].blocks_nonempty == 0
552 ? h->data[0].idx : -1;
555 static inline int ec_stripes_heap_cmp(ec_stripes_heap *h,
556 struct ec_stripe_heap_entry l,
557 struct ec_stripe_heap_entry r)
559 return ((l.blocks_nonempty > r.blocks_nonempty) -
560 (l.blocks_nonempty < r.blocks_nonempty));
563 static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h,
566 struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap);
568 genradix_ptr(&c->stripes[0], h->data[i].idx)->heap_idx = i;
571 static void heap_verify_backpointer(struct bch_fs *c, size_t idx)
573 ec_stripes_heap *h = &c->ec_stripes_heap;
574 struct stripe *m = genradix_ptr(&c->stripes[0], idx);
577 BUG_ON(m->heap_idx >= h->used);
578 BUG_ON(h->data[m->heap_idx].idx != idx);
581 void bch2_stripes_heap_del(struct bch_fs *c,
582 struct stripe *m, size_t idx)
589 heap_verify_backpointer(c, idx);
591 heap_del(&c->ec_stripes_heap, m->heap_idx,
593 ec_stripes_heap_set_backpointer);
596 void bch2_stripes_heap_insert(struct bch_fs *c,
597 struct stripe *m, size_t idx)
602 BUG_ON(heap_full(&c->ec_stripes_heap));
606 heap_add(&c->ec_stripes_heap, ((struct ec_stripe_heap_entry) {
608 .blocks_nonempty = m->blocks_nonempty,
611 ec_stripes_heap_set_backpointer);
613 heap_verify_backpointer(c, idx);
616 void bch2_stripes_heap_update(struct bch_fs *c,
617 struct stripe *m, size_t idx)
619 ec_stripes_heap *h = &c->ec_stripes_heap;
625 heap_verify_backpointer(c, idx);
627 h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
630 heap_sift_up(h, i, ec_stripes_heap_cmp,
631 ec_stripes_heap_set_backpointer);
632 heap_sift_down(h, i, ec_stripes_heap_cmp,
633 ec_stripes_heap_set_backpointer);
635 heap_verify_backpointer(c, idx);
637 if (stripe_idx_to_delete(c) >= 0 &&
638 !percpu_ref_is_dying(&c->writes))
639 schedule_work(&c->ec_stripe_delete_work);
642 /* stripe deletion */
644 static int ec_stripe_delete(struct bch_fs *c, size_t idx)
646 //pr_info("deleting stripe %zu", idx);
647 return bch2_btree_delete_range(c, BTREE_ID_EC,
653 static void ec_stripe_delete_work(struct work_struct *work)
656 container_of(work, struct bch_fs, ec_stripe_delete_work);
660 spin_lock(&c->ec_stripes_heap_lock);
661 idx = stripe_idx_to_delete(c);
663 spin_unlock(&c->ec_stripes_heap_lock);
667 bch2_stripes_heap_del(c, genradix_ptr(&c->stripes[0], idx), idx);
668 spin_unlock(&c->ec_stripes_heap_lock);
670 if (ec_stripe_delete(c, idx))
675 /* stripe creation: */
677 static int ec_stripe_bkey_insert(struct bch_fs *c,
678 struct ec_stripe_new *s,
679 struct bkey_i_stripe *stripe)
681 struct btree_trans trans;
682 struct btree_iter *iter;
684 struct bpos start_pos = POS(0, c->ec_stripe_hint);
687 bch2_trans_init(&trans, c, 0, 0);
689 bch2_trans_begin(&trans);
691 for_each_btree_key(&trans, iter, BTREE_ID_EC, start_pos,
692 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
693 if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0) {
694 if (start_pos.offset) {
696 bch2_btree_iter_set_pos(iter, start_pos);
704 if (bkey_deleted(k.k))
710 start_pos = iter->pos;
712 ret = ec_stripe_mem_alloc(c, iter);
716 stripe->k.p = iter->pos;
718 bch2_trans_update(&trans, iter, &stripe->k_i, 0);
720 ret = bch2_trans_commit(&trans, &s->res, NULL,
721 BTREE_INSERT_NOFAIL);
723 bch2_trans_iter_put(&trans, iter);
728 c->ec_stripe_hint = ret ? start_pos.offset : start_pos.offset + 1;
729 bch2_trans_exit(&trans);
734 static void extent_stripe_ptr_add(struct bkey_s_extent e,
735 struct ec_stripe_buf *s,
736 struct bch_extent_ptr *ptr,
739 struct bch_extent_stripe_ptr *dst = (void *) ptr;
740 union bch_extent_entry *end = extent_entry_last(e);
742 memmove_u64s_up(dst + 1, dst, (u64 *) end - (u64 *) dst);
743 e.k->u64s += sizeof(*dst) / sizeof(u64);
745 *dst = (struct bch_extent_stripe_ptr) {
746 .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr,
748 .idx = s->key.k.p.offset,
752 static int ec_stripe_update_ptrs(struct bch_fs *c,
753 struct ec_stripe_buf *s,
756 struct btree_trans trans;
757 struct btree_iter *iter;
759 struct bkey_s_extent e;
760 struct bkey_on_stack sk;
761 int ret = 0, dev, idx;
763 bkey_on_stack_init(&sk);
764 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
766 /* XXX this doesn't support the reflink btree */
768 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
772 while ((k = bch2_btree_iter_peek(iter)).k &&
773 !(ret = bkey_err(k)) &&
774 bkey_cmp(bkey_start_pos(k.k), pos->p) < 0) {
775 struct bch_extent_ptr *ptr, *ec_ptr = NULL;
777 if (extent_has_stripe_ptr(k, s->key.k.p.offset)) {
778 bch2_btree_iter_next(iter);
782 idx = extent_matches_stripe(c, &s->key.v, k);
784 bch2_btree_iter_next(iter);
788 dev = s->key.v.ptrs[idx].dev;
790 bkey_on_stack_reassemble(&sk, c, k);
791 e = bkey_i_to_s_extent(sk.k);
793 bch2_bkey_drop_ptrs(e.s, ptr, ptr->dev != dev);
794 ec_ptr = (void *) bch2_bkey_has_device(e.s_c, dev);
797 extent_stripe_ptr_add(e, s, ec_ptr, idx);
799 bch2_btree_iter_set_pos(iter, bkey_start_pos(&sk.k->k));
800 bch2_trans_update(&trans, iter, sk.k, 0);
802 ret = bch2_trans_commit(&trans, NULL, NULL,
804 BTREE_INSERT_USE_RESERVE);
811 bch2_trans_exit(&trans);
812 bkey_on_stack_exit(&sk, c);
818 * data buckets of new stripe all written: create the stripe
820 static void ec_stripe_create(struct ec_stripe_new *s)
822 struct bch_fs *c = s->c;
823 struct open_bucket *ob;
826 struct bch_stripe *v = &s->stripe.key.v;
827 unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
831 BUG_ON(s->h->s == s);
833 closure_init_stack(&cl);
836 if (s->err != -EROFS)
837 bch_err(c, "error creating stripe: error writing data buckets");
841 BUG_ON(!s->allocated);
843 if (!percpu_ref_tryget(&c->writes))
846 BUG_ON(bitmap_weight(s->blocks_allocated,
847 s->blocks.nr) != s->blocks.nr);
849 ec_generate_ec(&s->stripe);
851 ec_generate_checksums(&s->stripe);
854 for (i = nr_data; i < v->nr_blocks; i++)
855 ec_block_io(c, &s->stripe, REQ_OP_WRITE, i, &cl);
859 for (i = nr_data; i < v->nr_blocks; i++)
860 if (!test_bit(i, s->stripe.valid)) {
861 bch_err(c, "error creating stripe: error writing redundancy buckets");
865 ret = s->existing_stripe
866 ? bch2_btree_insert(c, BTREE_ID_EC, &s->stripe.key.k_i,
867 &s->res, NULL, BTREE_INSERT_NOFAIL)
868 : ec_stripe_bkey_insert(c, s, &s->stripe.key);
870 bch_err(c, "error creating stripe: error creating stripe key");
874 for_each_keylist_key(&s->keys, k) {
875 ret = ec_stripe_update_ptrs(c, &s->stripe, &k->k);
877 bch_err(c, "error creating stripe: error updating pointers");
882 spin_lock(&c->ec_stripes_heap_lock);
883 m = genradix_ptr(&c->stripes[0], s->stripe.key.k.p.offset);
885 pr_info("created a %s stripe %llu",
886 s->existing_stripe ? "existing" : "new",
887 s->stripe.key.k.p.offset);
890 bch2_stripes_heap_insert(c, m, s->stripe.key.k.p.offset);
891 spin_unlock(&c->ec_stripes_heap_lock);
893 percpu_ref_put(&c->writes);
895 bch2_disk_reservation_put(c, &s->res);
897 open_bucket_for_each(c, &s->blocks, ob, i) {
899 __bch2_open_bucket_put(c, ob);
902 bch2_open_buckets_put(c, &s->parity);
904 bch2_keylist_free(&s->keys, s->inline_keys);
906 for (i = 0; i < s->stripe.key.v.nr_blocks; i++)
907 kvpfree(s->stripe.data[i], s->stripe.size << 9);
911 static void ec_stripe_create_work(struct work_struct *work)
913 struct bch_fs *c = container_of(work,
914 struct bch_fs, ec_stripe_create_work);
915 struct ec_stripe_new *s, *n;
917 mutex_lock(&c->ec_stripe_new_lock);
918 list_for_each_entry_safe(s, n, &c->ec_stripe_new_list, list)
919 if (!atomic_read(&s->pin)) {
921 mutex_unlock(&c->ec_stripe_new_lock);
925 mutex_unlock(&c->ec_stripe_new_lock);
928 static void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s)
930 BUG_ON(atomic_read(&s->pin) <= 0);
932 if (atomic_dec_and_test(&s->pin)) {
934 queue_work(system_long_wq, &c->ec_stripe_create_work);
938 static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
940 struct ec_stripe_new *s = h->s;
942 BUG_ON(!s->allocated && !s->err);
947 mutex_lock(&c->ec_stripe_new_lock);
948 list_add(&s->list, &c->ec_stripe_new_list);
949 mutex_unlock(&c->ec_stripe_new_lock);
951 ec_stripe_new_put(c, s);
954 /* have a full bucket - hand it off to be erasure coded: */
955 void bch2_ec_bucket_written(struct bch_fs *c, struct open_bucket *ob)
957 struct ec_stripe_new *s = ob->ec;
959 if (ob->sectors_free)
962 ec_stripe_new_put(c, s);
965 void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob)
967 struct ec_stripe_new *s = ob->ec;
972 void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp)
974 struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs);
981 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
982 offset = ca->mi.bucket_size - ob->sectors_free;
984 return ob->ec->stripe.data[ob->ec_idx] + (offset << 9);
987 void bch2_ec_add_backpointer(struct bch_fs *c, struct write_point *wp,
988 struct bpos pos, unsigned sectors)
990 struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs);
991 struct ec_stripe_new *ec;
996 //pr_info("adding backpointer at %llu:%llu", pos.inode, pos.offset);
999 mutex_lock(&ec->lock);
1001 if (bch2_keylist_realloc(&ec->keys, ec->inline_keys,
1002 ARRAY_SIZE(ec->inline_keys),
1007 bkey_init(&ec->keys.top->k);
1008 ec->keys.top->k.p = pos;
1009 bch2_key_resize(&ec->keys.top->k, sectors);
1010 bch2_keylist_push(&ec->keys);
1012 mutex_unlock(&ec->lock);
1015 static int unsigned_cmp(const void *_l, const void *_r)
1017 unsigned l = *((const unsigned *) _l);
1018 unsigned r = *((const unsigned *) _r);
1020 return cmp_int(l, r);
1023 /* pick most common bucket size: */
1024 static unsigned pick_blocksize(struct bch_fs *c,
1025 struct bch_devs_mask *devs)
1028 unsigned i, nr = 0, sizes[BCH_SB_MEMBERS_MAX];
1031 } cur = { 0, 0 }, best = { 0, 0 };
1033 for_each_member_device_rcu(ca, c, i, devs)
1034 sizes[nr++] = ca->mi.bucket_size;
1036 sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL);
1038 for (i = 0; i < nr; i++) {
1039 if (sizes[i] != cur.size) {
1040 if (cur.nr > best.nr)
1044 cur.size = sizes[i];
1050 if (cur.nr > best.nr)
1056 static bool may_create_new_stripe(struct bch_fs *c)
1061 static void ec_stripe_key_init(struct bch_fs *c,
1062 struct bkey_i_stripe *s,
1065 unsigned stripe_size)
1069 bkey_stripe_init(&s->k_i);
1070 s->v.sectors = cpu_to_le16(stripe_size);
1072 s->v.nr_blocks = nr_data + nr_parity;
1073 s->v.nr_redundant = nr_parity;
1074 s->v.csum_granularity_bits = ilog2(c->sb.encoded_extent_max);
1075 s->v.csum_type = BCH_CSUM_CRC32C;
1078 while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) {
1079 BUG_ON(1 << s->v.csum_granularity_bits >=
1080 le16_to_cpu(s->v.sectors) ||
1081 s->v.csum_granularity_bits == U8_MAX);
1082 s->v.csum_granularity_bits++;
1085 set_bkey_val_u64s(&s->k, u64s);
1088 static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
1090 struct ec_stripe_new *s;
1093 lockdep_assert_held(&h->lock);
1095 s = kzalloc(sizeof(*s), GFP_KERNEL);
1099 mutex_init(&s->lock);
1100 atomic_set(&s->pin, 1);
1103 s->nr_data = min_t(unsigned, h->nr_active_devs,
1104 EC_STRIPE_MAX) - h->redundancy;
1105 s->nr_parity = h->redundancy;
1107 bch2_keylist_init(&s->keys, s->inline_keys);
1109 s->stripe.offset = 0;
1110 s->stripe.size = h->blocksize;
1111 memset(s->stripe.valid, 0xFF, sizeof(s->stripe.valid));
1113 ec_stripe_key_init(c, &s->stripe.key, s->nr_data,
1114 s->nr_parity, h->blocksize);
1116 for (i = 0; i < s->stripe.key.v.nr_blocks; i++) {
1117 s->stripe.data[i] = kvpmalloc(s->stripe.size << 9, GFP_KERNEL);
1118 if (!s->stripe.data[i])
1126 for (i = 0; i < s->stripe.key.v.nr_blocks; i++)
1127 kvpfree(s->stripe.data[i], s->stripe.size << 9);
1132 static struct ec_stripe_head *
1133 ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
1134 unsigned algo, unsigned redundancy)
1136 struct ec_stripe_head *h;
1140 h = kzalloc(sizeof(*h), GFP_KERNEL);
1144 mutex_init(&h->lock);
1145 mutex_lock(&h->lock);
1149 h->redundancy = redundancy;
1152 h->devs = target_rw_devs(c, BCH_DATA_user, target);
1154 for_each_member_device_rcu(ca, c, i, &h->devs)
1155 if (!ca->mi.durability)
1156 __clear_bit(i, h->devs.d);
1158 h->blocksize = pick_blocksize(c, &h->devs);
1160 for_each_member_device_rcu(ca, c, i, &h->devs)
1161 if (ca->mi.bucket_size == h->blocksize)
1162 h->nr_active_devs++;
1165 list_add(&h->list, &c->ec_stripe_head_list);
1169 void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h)
1173 bitmap_weight(h->s->blocks_allocated,
1174 h->s->blocks.nr) == h->s->blocks.nr)
1175 ec_stripe_set_pending(c, h);
1177 mutex_unlock(&h->lock);
1180 struct ec_stripe_head *__bch2_ec_stripe_head_get(struct bch_fs *c,
1183 unsigned redundancy)
1185 struct ec_stripe_head *h;
1190 mutex_lock(&c->ec_stripe_head_lock);
1191 list_for_each_entry(h, &c->ec_stripe_head_list, list)
1192 if (h->target == target &&
1194 h->redundancy == redundancy) {
1195 mutex_lock(&h->lock);
1199 h = ec_new_stripe_head_alloc(c, target, algo, redundancy);
1201 mutex_unlock(&c->ec_stripe_head_lock);
1206 * XXX: use a higher watermark for allocating open buckets here:
1208 static int new_stripe_alloc_buckets(struct bch_fs *c, struct ec_stripe_head *h)
1210 struct bch_devs_mask devs;
1211 struct open_bucket *ob;
1212 unsigned i, nr_have, nr_data =
1213 min_t(unsigned, h->nr_active_devs,
1214 EC_STRIPE_MAX) - h->redundancy;
1215 bool have_cache = true;
1220 for_each_set_bit(i, h->s->blocks_allocated, EC_STRIPE_MAX) {
1221 __clear_bit(h->s->stripe.key.v.ptrs[i].dev, devs.d);
1225 BUG_ON(h->s->blocks.nr > nr_data);
1226 BUG_ON(h->s->parity.nr > h->redundancy);
1228 open_bucket_for_each(c, &h->s->parity, ob, i)
1229 __clear_bit(ob->ptr.dev, devs.d);
1230 open_bucket_for_each(c, &h->s->blocks, ob, i)
1231 __clear_bit(ob->ptr.dev, devs.d);
1233 percpu_down_read(&c->mark_lock);
1236 if (h->s->parity.nr < h->redundancy) {
1237 nr_have = h->s->parity.nr;
1239 ret = bch2_bucket_alloc_set(c, &h->s->parity,
1252 if (h->s->blocks.nr < nr_data) {
1253 nr_have = h->s->blocks.nr;
1255 ret = bch2_bucket_alloc_set(c, &h->s->blocks,
1269 percpu_up_read(&c->mark_lock);
1273 /* XXX: doesn't obey target: */
1274 static s64 get_existing_stripe(struct bch_fs *c,
1277 unsigned redundancy)
1279 ec_stripes_heap *h = &c->ec_stripes_heap;
1284 if (may_create_new_stripe(c))
1287 spin_lock(&c->ec_stripes_heap_lock);
1288 for (heap_idx = 0; heap_idx < h->used; heap_idx++) {
1289 if (!h->data[heap_idx].blocks_nonempty)
1292 stripe_idx = h->data[heap_idx].idx;
1293 m = genradix_ptr(&c->stripes[0], stripe_idx);
1295 if (m->algorithm == algo &&
1296 m->nr_redundant == redundancy &&
1297 m->blocks_nonempty < m->nr_blocks - m->nr_redundant) {
1298 bch2_stripes_heap_del(c, m, stripe_idx);
1299 spin_unlock(&c->ec_stripes_heap_lock);
1304 spin_unlock(&c->ec_stripes_heap_lock);
1308 static int get_stripe_key(struct bch_fs *c, u64 idx, struct ec_stripe_buf *stripe)
1310 struct btree_trans trans;
1311 struct btree_iter *iter;
1315 bch2_trans_init(&trans, c, 0, 0);
1316 iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS(0, idx), BTREE_ITER_SLOTS);
1317 k = bch2_btree_iter_peek_slot(iter);
1320 bkey_reassemble(&stripe->key.k_i, k);
1321 bch2_trans_exit(&trans);
1326 struct ec_stripe_head *bch2_ec_stripe_head_get(struct bch_fs *c,
1329 unsigned redundancy)
1332 struct ec_stripe_head *h;
1333 struct open_bucket *ob;
1334 unsigned i, data_idx = 0;
1338 closure_init_stack(&cl);
1340 h = __bch2_ec_stripe_head_get(c, target, algo, redundancy);
1344 if (!h->s && ec_new_stripe_alloc(c, h)) {
1345 bch2_ec_stripe_head_put(c, h);
1349 if (!h->s->allocated) {
1350 if (!h->s->existing_stripe &&
1351 (idx = get_existing_stripe(c, target, algo, redundancy)) >= 0) {
1352 //pr_info("got existing stripe %llu", idx);
1354 h->s->existing_stripe = true;
1355 h->s->existing_stripe_idx = idx;
1356 if (get_stripe_key(c, idx, &h->s->stripe)) {
1361 for (i = 0; i < h->s->stripe.key.v.nr_blocks; i++)
1362 if (stripe_blockcount_get(&h->s->stripe.key.v, i)) {
1363 __set_bit(i, h->s->blocks_allocated);
1364 ec_block_io(c, &h->s->stripe, READ, i, &cl);
1368 if (!h->s->existing_stripe &&
1369 !h->s->res.sectors) {
1370 ret = bch2_disk_reservation_get(c, &h->s->res,
1372 h->s->nr_parity, 0);
1374 /* What should we do here? */
1375 bch_err(c, "unable to create new stripe: %i", ret);
1376 bch2_ec_stripe_head_put(c, h);
1384 if (new_stripe_alloc_buckets(c, h)) {
1385 bch2_ec_stripe_head_put(c, h);
1390 open_bucket_for_each(c, &h->s->blocks, ob, i) {
1391 data_idx = find_next_zero_bit(h->s->blocks_allocated,
1392 h->s->nr_data, data_idx);
1393 BUG_ON(data_idx >= h->s->nr_data);
1395 h->s->stripe.key.v.ptrs[data_idx] = ob->ptr;
1396 h->s->data_block_idx[i] = data_idx;
1400 open_bucket_for_each(c, &h->s->parity, ob, i)
1401 h->s->stripe.key.v.ptrs[h->s->nr_data + i] = ob->ptr;
1403 //pr_info("new stripe, blocks_allocated %lx", h->s->blocks_allocated[0]);
1404 h->s->allocated = true;
1411 void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca)
1413 struct ec_stripe_head *h;
1414 struct open_bucket *ob;
1417 mutex_lock(&c->ec_stripe_head_lock);
1418 list_for_each_entry(h, &c->ec_stripe_head_list, list) {
1420 mutex_lock(&h->lock);
1424 open_bucket_for_each(c, &h->s->blocks, ob, i)
1425 if (ob->ptr.dev == ca->dev_idx)
1427 open_bucket_for_each(c, &h->s->parity, ob, i)
1428 if (ob->ptr.dev == ca->dev_idx)
1433 ec_stripe_set_pending(c, h);
1435 mutex_unlock(&h->lock);
1437 mutex_unlock(&c->ec_stripe_head_lock);
1440 static int __bch2_stripe_write_key(struct btree_trans *trans,
1441 struct btree_iter *iter,
1444 struct bkey_i_stripe *new_key)
1446 struct bch_fs *c = trans->c;
1451 bch2_btree_iter_set_pos(iter, POS(0, idx));
1453 k = bch2_btree_iter_peek_slot(iter);
1458 if (k.k->type != KEY_TYPE_stripe)
1461 bkey_reassemble(&new_key->k_i, k);
1463 spin_lock(&c->ec_stripes_heap_lock);
1465 for (i = 0; i < new_key->v.nr_blocks; i++)
1466 stripe_blockcount_set(&new_key->v, i,
1467 m->block_sectors[i]);
1470 spin_unlock(&c->ec_stripes_heap_lock);
1472 bch2_trans_update(trans, iter, &new_key->k_i, 0);
1476 int bch2_stripes_write(struct bch_fs *c, unsigned flags)
1478 struct btree_trans trans;
1479 struct btree_iter *iter;
1480 struct genradix_iter giter;
1481 struct bkey_i_stripe *new_key;
1485 new_key = kmalloc(255 * sizeof(u64), GFP_KERNEL);
1488 bch2_trans_init(&trans, c, 0, 0);
1490 iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS_MIN,
1491 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
1493 genradix_for_each(&c->stripes[0], giter, m) {
1497 ret = __bch2_trans_do(&trans, NULL, NULL,
1498 BTREE_INSERT_NOFAIL|flags,
1499 __bch2_stripe_write_key(&trans, iter, m,
1500 giter.pos, new_key));
1506 bch2_trans_exit(&trans);
1513 static int bch2_stripes_read_fn(struct bch_fs *c, enum btree_id id,
1514 unsigned level, struct bkey_s_c k)
1518 if (k.k->type == KEY_TYPE_stripe) {
1521 ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL) ?:
1522 bch2_mark_key(c, k, 0, 0, NULL, 0,
1523 BTREE_TRIGGER_NOATOMIC);
1527 spin_lock(&c->ec_stripes_heap_lock);
1528 m = genradix_ptr(&c->stripes[0], k.k->p.offset);
1529 bch2_stripes_heap_insert(c, m, k.k->p.offset);
1530 spin_unlock(&c->ec_stripes_heap_lock);
1536 int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys)
1538 int ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_EC,
1539 NULL, bch2_stripes_read_fn);
1541 bch_err(c, "error reading stripes: %i", ret);
1546 int bch2_ec_mem_alloc(struct bch_fs *c, bool gc)
1548 struct btree_trans trans;
1549 struct btree_iter *iter;
1554 bch2_trans_init(&trans, c, 0, 0);
1556 iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS(0, U64_MAX), 0);
1558 k = bch2_btree_iter_prev(iter);
1559 if (!IS_ERR_OR_NULL(k.k))
1560 idx = k.k->p.offset + 1;
1561 ret = bch2_trans_exit(&trans);
1569 !init_heap(&c->ec_stripes_heap, roundup_pow_of_two(idx),
1573 ret = genradix_prealloc(&c->stripes[gc], idx, GFP_KERNEL);
1575 for (i = 0; i < idx; i++)
1576 if (!genradix_ptr_alloc(&c->stripes[gc], i, GFP_KERNEL))
1582 void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c)
1584 ec_stripes_heap *h = &c->ec_stripes_heap;
1588 spin_lock(&c->ec_stripes_heap_lock);
1589 for (i = 0; i < min_t(size_t, h->used, 20); i++) {
1590 m = genradix_ptr(&c->stripes[0], h->data[i].idx);
1592 pr_buf(out, "%zu %u/%u+%u\n", h->data[i].idx,
1593 h->data[i].blocks_nonempty,
1594 m->nr_blocks - m->nr_redundant,
1597 spin_unlock(&c->ec_stripes_heap_lock);
1600 void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
1602 struct ec_stripe_head *h;
1603 struct ec_stripe_new *s;
1605 mutex_lock(&c->ec_stripe_head_lock);
1606 list_for_each_entry(h, &c->ec_stripe_head_list, list) {
1607 pr_buf(out, "target %u algo %u redundancy %u:\n",
1608 h->target, h->algo, h->redundancy);
1611 pr_buf(out, "\tpending: blocks %u allocated %u\n",
1613 bitmap_weight(h->s->blocks_allocated,
1616 mutex_unlock(&c->ec_stripe_head_lock);
1618 mutex_lock(&c->ec_stripe_new_lock);
1619 list_for_each_entry(s, &c->ec_stripe_new_list, list) {
1620 pr_buf(out, "\tin flight: blocks %u allocated %u pin %u\n",
1622 bitmap_weight(s->blocks_allocated,
1624 atomic_read(&s->pin));
1626 mutex_unlock(&c->ec_stripe_new_lock);
1629 void bch2_fs_ec_exit(struct bch_fs *c)
1631 struct ec_stripe_head *h;
1634 mutex_lock(&c->ec_stripe_head_lock);
1635 h = list_first_entry_or_null(&c->ec_stripe_head_list,
1636 struct ec_stripe_head, list);
1639 mutex_unlock(&c->ec_stripe_head_lock);
1647 BUG_ON(!list_empty(&c->ec_stripe_new_list));
1649 free_heap(&c->ec_stripes_heap);
1650 genradix_free(&c->stripes[0]);
1651 bioset_exit(&c->ec_bioset);
1654 int bch2_fs_ec_init(struct bch_fs *c)
1656 INIT_WORK(&c->ec_stripe_create_work, ec_stripe_create_work);
1657 INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work);
1659 return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio),