]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/ec.c
rust: bump rpassword to v7.x
[bcachefs-tools-debian] / libbcachefs / ec.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /* erasure coding */
4
5 #include "bcachefs.h"
6 #include "alloc_background.h"
7 #include "alloc_foreground.h"
8 #include "backpointers.h"
9 #include "bkey_buf.h"
10 #include "bset.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
14 #include "buckets.h"
15 #include "checksum.h"
16 #include "disk_groups.h"
17 #include "ec.h"
18 #include "error.h"
19 #include "io_read.h"
20 #include "keylist.h"
21 #include "recovery.h"
22 #include "replicas.h"
23 #include "super-io.h"
24 #include "util.h"
25
26 #include <linux/sort.h>
27
28 #ifdef __KERNEL__
29
30 #include <linux/raid/pq.h>
31 #include <linux/raid/xor.h>
32
33 static void raid5_recov(unsigned disks, unsigned failed_idx,
34                         size_t size, void **data)
35 {
36         unsigned i = 2, nr;
37
38         BUG_ON(failed_idx >= disks);
39
40         swap(data[0], data[failed_idx]);
41         memcpy(data[0], data[1], size);
42
43         while (i < disks) {
44                 nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS);
45                 xor_blocks(nr, size, data[0], data + i);
46                 i += nr;
47         }
48
49         swap(data[0], data[failed_idx]);
50 }
51
52 static void raid_gen(int nd, int np, size_t size, void **v)
53 {
54         if (np >= 1)
55                 raid5_recov(nd + np, nd, size, v);
56         if (np >= 2)
57                 raid6_call.gen_syndrome(nd + np, size, v);
58         BUG_ON(np > 2);
59 }
60
61 static void raid_rec(int nr, int *ir, int nd, int np, size_t size, void **v)
62 {
63         switch (nr) {
64         case 0:
65                 break;
66         case 1:
67                 if (ir[0] < nd + 1)
68                         raid5_recov(nd + 1, ir[0], size, v);
69                 else
70                         raid6_call.gen_syndrome(nd + np, size, v);
71                 break;
72         case 2:
73                 if (ir[1] < nd) {
74                         /* data+data failure. */
75                         raid6_2data_recov(nd + np, size, ir[0], ir[1], v);
76                 } else if (ir[0] < nd) {
77                         /* data + p/q failure */
78
79                         if (ir[1] == nd) /* data + p failure */
80                                 raid6_datap_recov(nd + np, size, ir[0], v);
81                         else { /* data + q failure */
82                                 raid5_recov(nd + 1, ir[0], size, v);
83                                 raid6_call.gen_syndrome(nd + np, size, v);
84                         }
85                 } else {
86                         raid_gen(nd, np, size, v);
87                 }
88                 break;
89         default:
90                 BUG();
91         }
92 }
93
94 #else
95
96 #include <raid/raid.h>
97
98 #endif
99
100 struct ec_bio {
101         struct bch_dev          *ca;
102         struct ec_stripe_buf    *buf;
103         size_t                  idx;
104         struct bio              bio;
105 };
106
107 /* Stripes btree keys: */
108
109 int bch2_stripe_invalid(struct bch_fs *c, struct bkey_s_c k,
110                         enum bkey_invalid_flags flags,
111                         struct printbuf *err)
112 {
113         const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
114         int ret = 0;
115
116         bkey_fsck_err_on(bkey_eq(k.k->p, POS_MIN) ||
117                          bpos_gt(k.k->p, POS(0, U32_MAX)), c, err,
118                          stripe_pos_bad,
119                          "stripe at bad pos");
120
121         bkey_fsck_err_on(bkey_val_u64s(k.k) < stripe_val_u64s(s), c, err,
122                          stripe_val_size_bad,
123                          "incorrect value size (%zu < %u)",
124                          bkey_val_u64s(k.k), stripe_val_u64s(s));
125
126         ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
127 fsck_err:
128         return ret;
129 }
130
131 void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
132                          struct bkey_s_c k)
133 {
134         const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
135         unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
136
137         prt_printf(out, "algo %u sectors %u blocks %u:%u csum %u gran %u",
138                s->algorithm,
139                le16_to_cpu(s->sectors),
140                nr_data,
141                s->nr_redundant,
142                s->csum_type,
143                1U << s->csum_granularity_bits);
144
145         for (i = 0; i < s->nr_blocks; i++) {
146                 const struct bch_extent_ptr *ptr = s->ptrs + i;
147                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
148                 u32 offset;
149                 u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
150
151                 prt_printf(out, " %u:%llu:%u", ptr->dev, b, offset);
152                 if (i < nr_data)
153                         prt_printf(out, "#%u", stripe_blockcount_get(s, i));
154                 prt_printf(out, " gen %u", ptr->gen);
155                 if (ptr_stale(ca, ptr))
156                         prt_printf(out, " stale");
157         }
158 }
159
160 /* Triggers: */
161
162 static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
163                                          struct bkey_s_c_stripe s,
164                                          unsigned idx, bool deleting)
165 {
166         struct bch_fs *c = trans->c;
167         const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
168         struct btree_iter iter;
169         struct bkey_i_alloc_v4 *a;
170         enum bch_data_type data_type = idx >= s.v->nr_blocks - s.v->nr_redundant
171                 ? BCH_DATA_parity : 0;
172         s64 sectors = data_type ? le16_to_cpu(s.v->sectors) : 0;
173         int ret = 0;
174
175         if (deleting)
176                 sectors = -sectors;
177
178         a = bch2_trans_start_alloc_update(trans, &iter, PTR_BUCKET_POS(c, ptr));
179         if (IS_ERR(a))
180                 return PTR_ERR(a);
181
182         ret = bch2_check_bucket_ref(trans, s.s_c, ptr, sectors, data_type,
183                                     a->v.gen, a->v.data_type,
184                                     a->v.dirty_sectors);
185         if (ret)
186                 goto err;
187
188         if (!deleting) {
189                 if (bch2_trans_inconsistent_on(a->v.stripe ||
190                                                a->v.stripe_redundancy, trans,
191                                 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)",
192                                 iter.pos.inode, iter.pos.offset, a->v.gen,
193                                 bch2_data_types[a->v.data_type],
194                                 a->v.dirty_sectors,
195                                 a->v.stripe, s.k->p.offset)) {
196                         ret = -EIO;
197                         goto err;
198                 }
199
200                 if (bch2_trans_inconsistent_on(data_type && a->v.dirty_sectors, trans,
201                                 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu",
202                                 iter.pos.inode, iter.pos.offset, a->v.gen,
203                                 bch2_data_types[a->v.data_type],
204                                 a->v.dirty_sectors,
205                                 s.k->p.offset)) {
206                         ret = -EIO;
207                         goto err;
208                 }
209
210                 a->v.stripe             = s.k->p.offset;
211                 a->v.stripe_redundancy  = s.v->nr_redundant;
212                 a->v.data_type          = BCH_DATA_stripe;
213         } else {
214                 if (bch2_trans_inconsistent_on(a->v.stripe != s.k->p.offset ||
215                                                a->v.stripe_redundancy != s.v->nr_redundant, trans,
216                                 "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe %llu (got %u)",
217                                 iter.pos.inode, iter.pos.offset, a->v.gen,
218                                 s.k->p.offset, a->v.stripe)) {
219                         ret = -EIO;
220                         goto err;
221                 }
222
223                 a->v.stripe             = 0;
224                 a->v.stripe_redundancy  = 0;
225                 a->v.data_type          = alloc_data_type(a->v, BCH_DATA_user);
226         }
227
228         a->v.dirty_sectors += sectors;
229         if (data_type)
230                 a->v.data_type = !deleting ? data_type : 0;
231
232         ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
233         if (ret)
234                 goto err;
235 err:
236         bch2_trans_iter_exit(trans, &iter);
237         return ret;
238 }
239
240 static int mark_stripe_bucket(struct btree_trans *trans,
241                               struct bkey_s_c k,
242                               unsigned ptr_idx,
243                               unsigned flags)
244 {
245         struct bch_fs *c = trans->c;
246         const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
247         unsigned nr_data = s->nr_blocks - s->nr_redundant;
248         bool parity = ptr_idx >= nr_data;
249         enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe;
250         s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
251         const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
252         struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
253         struct bucket old, new, *g;
254         struct printbuf buf = PRINTBUF;
255         int ret = 0;
256
257         BUG_ON(!(flags & BTREE_TRIGGER_GC));
258
259         /* * XXX doesn't handle deletion */
260
261         percpu_down_read(&c->mark_lock);
262         g = PTR_GC_BUCKET(ca, ptr);
263
264         if (g->dirty_sectors ||
265             (g->stripe && g->stripe != k.k->p.offset)) {
266                 bch2_fs_inconsistent(c,
267                               "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
268                               ptr->dev, PTR_BUCKET_NR(ca, ptr), g->gen,
269                               (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
270                 ret = -EINVAL;
271                 goto err;
272         }
273
274         bucket_lock(g);
275         old = *g;
276
277         ret = bch2_check_bucket_ref(trans, k, ptr, sectors, data_type,
278                                     g->gen, g->data_type,
279                                     g->dirty_sectors);
280         if (ret)
281                 goto err;
282
283         g->data_type = data_type;
284         g->dirty_sectors += sectors;
285
286         g->stripe               = k.k->p.offset;
287         g->stripe_redundancy    = s->nr_redundant;
288         new = *g;
289 err:
290         bucket_unlock(g);
291         if (!ret)
292                 bch2_dev_usage_update_m(c, ca, &old, &new);
293         percpu_up_read(&c->mark_lock);
294         printbuf_exit(&buf);
295         return ret;
296 }
297
298 int bch2_trigger_stripe(struct btree_trans *trans,
299                         enum btree_id btree_id, unsigned level,
300                         struct bkey_s_c old, struct bkey_s _new,
301                         unsigned flags)
302 {
303         struct bkey_s_c new = _new.s_c;
304         struct bch_fs *c = trans->c;
305         u64 idx = new.k->p.offset;
306         const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
307                 ? bkey_s_c_to_stripe(old).v : NULL;
308         const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
309                 ? bkey_s_c_to_stripe(new).v : NULL;
310
311         if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
312                 /*
313                  * If the pointers aren't changing, we don't need to do anything:
314                  */
315                 if (new_s && old_s &&
316                     new_s->nr_blocks    == old_s->nr_blocks &&
317                     new_s->nr_redundant == old_s->nr_redundant &&
318                     !memcmp(old_s->ptrs, new_s->ptrs,
319                             new_s->nr_blocks * sizeof(struct bch_extent_ptr)))
320                         return 0;
321
322                 BUG_ON(new_s && old_s &&
323                        (new_s->nr_blocks        != old_s->nr_blocks ||
324                         new_s->nr_redundant     != old_s->nr_redundant));
325
326                 if (new_s) {
327                         s64 sectors = le16_to_cpu(new_s->sectors);
328
329                         struct bch_replicas_padded r;
330                         bch2_bkey_to_replicas(&r.e, new);
331                         int ret = bch2_update_replicas_list(trans, &r.e, sectors * new_s->nr_redundant);
332                         if (ret)
333                                 return ret;
334                 }
335
336                 if (old_s) {
337                         s64 sectors = -((s64) le16_to_cpu(old_s->sectors));
338
339                         struct bch_replicas_padded r;
340                         bch2_bkey_to_replicas(&r.e, old);
341                         int ret = bch2_update_replicas_list(trans, &r.e, sectors * old_s->nr_redundant);
342                         if (ret)
343                                 return ret;
344                 }
345
346                 unsigned nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks;
347                 for (unsigned i = 0; i < nr_blocks; i++) {
348                         if (new_s && old_s &&
349                             !memcmp(&new_s->ptrs[i],
350                                     &old_s->ptrs[i],
351                                     sizeof(new_s->ptrs[i])))
352                                 continue;
353
354                         if (new_s) {
355                                 int ret = bch2_trans_mark_stripe_bucket(trans,
356                                                 bkey_s_c_to_stripe(new), i, false);
357                                 if (ret)
358                                         return ret;
359                         }
360
361                         if (old_s) {
362                                 int ret = bch2_trans_mark_stripe_bucket(trans,
363                                                 bkey_s_c_to_stripe(old), i, true);
364                                 if (ret)
365                                         return ret;
366                         }
367                 }
368         }
369
370         if (!(flags & (BTREE_TRIGGER_TRANSACTIONAL|BTREE_TRIGGER_GC))) {
371                 struct stripe *m = genradix_ptr(&c->stripes, idx);
372
373                 if (!m) {
374                         struct printbuf buf1 = PRINTBUF;
375                         struct printbuf buf2 = PRINTBUF;
376
377                         bch2_bkey_val_to_text(&buf1, c, old);
378                         bch2_bkey_val_to_text(&buf2, c, new);
379                         bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
380                                             "old %s\n"
381                                             "new %s", idx, buf1.buf, buf2.buf);
382                         printbuf_exit(&buf2);
383                         printbuf_exit(&buf1);
384                         bch2_inconsistent_error(c);
385                         return -1;
386                 }
387
388                 if (!new_s) {
389                         bch2_stripes_heap_del(c, m, idx);
390
391                         memset(m, 0, sizeof(*m));
392                 } else {
393                         m->sectors      = le16_to_cpu(new_s->sectors);
394                         m->algorithm    = new_s->algorithm;
395                         m->nr_blocks    = new_s->nr_blocks;
396                         m->nr_redundant = new_s->nr_redundant;
397                         m->blocks_nonempty = 0;
398
399                         for (unsigned i = 0; i < new_s->nr_blocks; i++)
400                                 m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
401
402                         if (!old_s)
403                                 bch2_stripes_heap_insert(c, m, idx);
404                         else
405                                 bch2_stripes_heap_update(c, m, idx);
406                 }
407         }
408
409         if (flags & BTREE_TRIGGER_GC) {
410                 struct gc_stripe *m =
411                         genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
412
413                 if (!m) {
414                         bch_err(c, "error allocating memory for gc_stripes, idx %llu",
415                                 idx);
416                         return -BCH_ERR_ENOMEM_mark_stripe;
417                 }
418                 /*
419                  * This will be wrong when we bring back runtime gc: we should
420                  * be unmarking the old key and then marking the new key
421                  */
422                 m->alive        = true;
423                 m->sectors      = le16_to_cpu(new_s->sectors);
424                 m->nr_blocks    = new_s->nr_blocks;
425                 m->nr_redundant = new_s->nr_redundant;
426
427                 for (unsigned i = 0; i < new_s->nr_blocks; i++)
428                         m->ptrs[i] = new_s->ptrs[i];
429
430                 bch2_bkey_to_replicas(&m->r.e, new);
431
432                 /*
433                  * gc recalculates this field from stripe ptr
434                  * references:
435                  */
436                 memset(m->block_sectors, 0, sizeof(m->block_sectors));
437
438                 for (unsigned i = 0; i < new_s->nr_blocks; i++) {
439                         int ret = mark_stripe_bucket(trans, new, i, flags);
440                         if (ret)
441                                 return ret;
442                 }
443
444                 int ret = bch2_update_replicas(c, new, &m->r.e,
445                                       ((s64) m->sectors * m->nr_redundant),
446                                       0, true);
447                 if (ret) {
448                         struct printbuf buf = PRINTBUF;
449
450                         bch2_bkey_val_to_text(&buf, c, new);
451                         bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
452                         printbuf_exit(&buf);
453                         return ret;
454                 }
455         }
456
457         return 0;
458 }
459
460 /* returns blocknr in stripe that we matched: */
461 static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s,
462                                                 struct bkey_s_c k, unsigned *block)
463 {
464         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
465         unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
466
467         bkey_for_each_ptr(ptrs, ptr)
468                 for (i = 0; i < nr_data; i++)
469                         if (__bch2_ptr_matches_stripe(&s->ptrs[i], ptr,
470                                                       le16_to_cpu(s->sectors))) {
471                                 *block = i;
472                                 return ptr;
473                         }
474
475         return NULL;
476 }
477
478 static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx)
479 {
480         switch (k.k->type) {
481         case KEY_TYPE_extent: {
482                 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
483                 const union bch_extent_entry *entry;
484
485                 extent_for_each_entry(e, entry)
486                         if (extent_entry_type(entry) ==
487                             BCH_EXTENT_ENTRY_stripe_ptr &&
488                             entry->stripe_ptr.idx == idx)
489                                 return true;
490
491                 break;
492         }
493         }
494
495         return false;
496 }
497
498 /* Stripe bufs: */
499
500 static void ec_stripe_buf_exit(struct ec_stripe_buf *buf)
501 {
502         if (buf->key.k.type == KEY_TYPE_stripe) {
503                 struct bkey_i_stripe *s = bkey_i_to_stripe(&buf->key);
504                 unsigned i;
505
506                 for (i = 0; i < s->v.nr_blocks; i++) {
507                         kvpfree(buf->data[i], buf->size << 9);
508                         buf->data[i] = NULL;
509                 }
510         }
511 }
512
513 /* XXX: this is a non-mempoolified memory allocation: */
514 static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
515                               unsigned offset, unsigned size)
516 {
517         struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
518         unsigned csum_granularity = 1U << v->csum_granularity_bits;
519         unsigned end = offset + size;
520         unsigned i;
521
522         BUG_ON(end > le16_to_cpu(v->sectors));
523
524         offset  = round_down(offset, csum_granularity);
525         end     = min_t(unsigned, le16_to_cpu(v->sectors),
526                         round_up(end, csum_granularity));
527
528         buf->offset     = offset;
529         buf->size       = end - offset;
530
531         memset(buf->valid, 0xFF, sizeof(buf->valid));
532
533         for (i = 0; i < v->nr_blocks; i++) {
534                 buf->data[i] = kvpmalloc(buf->size << 9, GFP_KERNEL);
535                 if (!buf->data[i])
536                         goto err;
537         }
538
539         return 0;
540 err:
541         ec_stripe_buf_exit(buf);
542         return -BCH_ERR_ENOMEM_stripe_buf;
543 }
544
545 /* Checksumming: */
546
547 static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf,
548                                          unsigned block, unsigned offset)
549 {
550         struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
551         unsigned csum_granularity = 1 << v->csum_granularity_bits;
552         unsigned end = buf->offset + buf->size;
553         unsigned len = min(csum_granularity, end - offset);
554
555         BUG_ON(offset >= end);
556         BUG_ON(offset <  buf->offset);
557         BUG_ON(offset & (csum_granularity - 1));
558         BUG_ON(offset + len != le16_to_cpu(v->sectors) &&
559                (len & (csum_granularity - 1)));
560
561         return bch2_checksum(NULL, v->csum_type,
562                              null_nonce(),
563                              buf->data[block] + ((offset - buf->offset) << 9),
564                              len << 9);
565 }
566
567 static void ec_generate_checksums(struct ec_stripe_buf *buf)
568 {
569         struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
570         unsigned i, j, csums_per_device = stripe_csums_per_device(v);
571
572         if (!v->csum_type)
573                 return;
574
575         BUG_ON(buf->offset);
576         BUG_ON(buf->size != le16_to_cpu(v->sectors));
577
578         for (i = 0; i < v->nr_blocks; i++)
579                 for (j = 0; j < csums_per_device; j++)
580                         stripe_csum_set(v, i, j,
581                                 ec_block_checksum(buf, i, j << v->csum_granularity_bits));
582 }
583
584 static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
585 {
586         struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
587         unsigned csum_granularity = 1 << v->csum_granularity_bits;
588         unsigned i;
589
590         if (!v->csum_type)
591                 return;
592
593         for (i = 0; i < v->nr_blocks; i++) {
594                 unsigned offset = buf->offset;
595                 unsigned end = buf->offset + buf->size;
596
597                 if (!test_bit(i, buf->valid))
598                         continue;
599
600                 while (offset < end) {
601                         unsigned j = offset >> v->csum_granularity_bits;
602                         unsigned len = min(csum_granularity, end - offset);
603                         struct bch_csum want = stripe_csum_get(v, i, j);
604                         struct bch_csum got = ec_block_checksum(buf, i, offset);
605
606                         if (bch2_crc_cmp(want, got)) {
607                                 struct printbuf err = PRINTBUF;
608                                 struct bch_dev *ca = bch_dev_bkey_exists(c, v->ptrs[i].dev);
609
610                                 prt_printf(&err, "stripe checksum error: expected %0llx:%0llx got %0llx:%0llx (type %s)\n",
611                                            want.hi, want.lo,
612                                            got.hi, got.lo,
613                                            bch2_csum_types[v->csum_type]);
614                                 prt_printf(&err, "  for %ps at %u of\n  ", (void *) _RET_IP_, i);
615                                 bch2_bkey_val_to_text(&err, c, bkey_i_to_s_c(&buf->key));
616                                 bch_err_ratelimited(ca, "%s", err.buf);
617                                 printbuf_exit(&err);
618
619                                 clear_bit(i, buf->valid);
620
621                                 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
622                                 break;
623                         }
624
625                         offset += len;
626                 }
627         }
628 }
629
630 /* Erasure coding: */
631
632 static void ec_generate_ec(struct ec_stripe_buf *buf)
633 {
634         struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
635         unsigned nr_data = v->nr_blocks - v->nr_redundant;
636         unsigned bytes = le16_to_cpu(v->sectors) << 9;
637
638         raid_gen(nr_data, v->nr_redundant, bytes, buf->data);
639 }
640
641 static unsigned ec_nr_failed(struct ec_stripe_buf *buf)
642 {
643         struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
644
645         return v->nr_blocks - bitmap_weight(buf->valid, v->nr_blocks);
646 }
647
648 static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf)
649 {
650         struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
651         unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0;
652         unsigned nr_data = v->nr_blocks - v->nr_redundant;
653         unsigned bytes = buf->size << 9;
654
655         if (ec_nr_failed(buf) > v->nr_redundant) {
656                 bch_err_ratelimited(c,
657                         "error doing reconstruct read: unable to read enough blocks");
658                 return -1;
659         }
660
661         for (i = 0; i < nr_data; i++)
662                 if (!test_bit(i, buf->valid))
663                         failed[nr_failed++] = i;
664
665         raid_rec(nr_failed, failed, nr_data, v->nr_redundant, bytes, buf->data);
666         return 0;
667 }
668
669 /* IO: */
670
671 static void ec_block_endio(struct bio *bio)
672 {
673         struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio);
674         struct bch_stripe *v = &bkey_i_to_stripe(&ec_bio->buf->key)->v;
675         struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx];
676         struct bch_dev *ca = ec_bio->ca;
677         struct closure *cl = bio->bi_private;
678
679         if (bch2_dev_io_err_on(bio->bi_status, ca,
680                                bio_data_dir(bio)
681                                ? BCH_MEMBER_ERROR_write
682                                : BCH_MEMBER_ERROR_read,
683                                "erasure coding %s error: %s",
684                                bio_data_dir(bio) ? "write" : "read",
685                                bch2_blk_status_to_str(bio->bi_status)))
686                 clear_bit(ec_bio->idx, ec_bio->buf->valid);
687
688         if (ptr_stale(ca, ptr)) {
689                 bch_err_ratelimited(ca->fs,
690                                     "error %s stripe: stale pointer after io",
691                                     bio_data_dir(bio) == READ ? "reading from" : "writing to");
692                 clear_bit(ec_bio->idx, ec_bio->buf->valid);
693         }
694
695         bio_put(&ec_bio->bio);
696         percpu_ref_put(&ca->io_ref);
697         closure_put(cl);
698 }
699
700 static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
701                         blk_opf_t opf, unsigned idx, struct closure *cl)
702 {
703         struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
704         unsigned offset = 0, bytes = buf->size << 9;
705         struct bch_extent_ptr *ptr = &v->ptrs[idx];
706         struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
707         enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant
708                 ? BCH_DATA_user
709                 : BCH_DATA_parity;
710         int rw = op_is_write(opf);
711
712         if (ptr_stale(ca, ptr)) {
713                 bch_err_ratelimited(c,
714                                     "error %s stripe: stale pointer",
715                                     rw == READ ? "reading from" : "writing to");
716                 clear_bit(idx, buf->valid);
717                 return;
718         }
719
720         if (!bch2_dev_get_ioref(ca, rw)) {
721                 clear_bit(idx, buf->valid);
722                 return;
723         }
724
725         this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size);
726
727         while (offset < bytes) {
728                 unsigned nr_iovecs = min_t(size_t, BIO_MAX_VECS,
729                                            DIV_ROUND_UP(bytes, PAGE_SIZE));
730                 unsigned b = min_t(size_t, bytes - offset,
731                                    nr_iovecs << PAGE_SHIFT);
732                 struct ec_bio *ec_bio;
733
734                 ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev,
735                                                        nr_iovecs,
736                                                        opf,
737                                                        GFP_KERNEL,
738                                                        &c->ec_bioset),
739                                       struct ec_bio, bio);
740
741                 ec_bio->ca                      = ca;
742                 ec_bio->buf                     = buf;
743                 ec_bio->idx                     = idx;
744
745                 ec_bio->bio.bi_iter.bi_sector   = ptr->offset + buf->offset + (offset >> 9);
746                 ec_bio->bio.bi_end_io           = ec_block_endio;
747                 ec_bio->bio.bi_private          = cl;
748
749                 bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b);
750
751                 closure_get(cl);
752                 percpu_ref_get(&ca->io_ref);
753
754                 submit_bio(&ec_bio->bio);
755
756                 offset += b;
757         }
758
759         percpu_ref_put(&ca->io_ref);
760 }
761
762 static int get_stripe_key_trans(struct btree_trans *trans, u64 idx,
763                                 struct ec_stripe_buf *stripe)
764 {
765         struct btree_iter iter;
766         struct bkey_s_c k;
767         int ret;
768
769         k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
770                                POS(0, idx), BTREE_ITER_SLOTS);
771         ret = bkey_err(k);
772         if (ret)
773                 goto err;
774         if (k.k->type != KEY_TYPE_stripe) {
775                 ret = -ENOENT;
776                 goto err;
777         }
778         bkey_reassemble(&stripe->key, k);
779 err:
780         bch2_trans_iter_exit(trans, &iter);
781         return ret;
782 }
783
784 /* recovery read path: */
785 int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio)
786 {
787         struct bch_fs *c = trans->c;
788         struct ec_stripe_buf *buf;
789         struct closure cl;
790         struct bch_stripe *v;
791         unsigned i, offset;
792         int ret = 0;
793
794         closure_init_stack(&cl);
795
796         BUG_ON(!rbio->pick.has_ec);
797
798         buf = kzalloc(sizeof(*buf), GFP_NOFS);
799         if (!buf)
800                 return -BCH_ERR_ENOMEM_ec_read_extent;
801
802         ret = lockrestart_do(trans, get_stripe_key_trans(trans, rbio->pick.ec.idx, buf));
803         if (ret) {
804                 bch_err_ratelimited(c,
805                         "error doing reconstruct read: error %i looking up stripe", ret);
806                 kfree(buf);
807                 return -EIO;
808         }
809
810         v = &bkey_i_to_stripe(&buf->key)->v;
811
812         if (!bch2_ptr_matches_stripe(v, rbio->pick)) {
813                 bch_err_ratelimited(c,
814                         "error doing reconstruct read: pointer doesn't match stripe");
815                 ret = -EIO;
816                 goto err;
817         }
818
819         offset = rbio->bio.bi_iter.bi_sector - v->ptrs[rbio->pick.ec.block].offset;
820         if (offset + bio_sectors(&rbio->bio) > le16_to_cpu(v->sectors)) {
821                 bch_err_ratelimited(c,
822                         "error doing reconstruct read: read is bigger than stripe");
823                 ret = -EIO;
824                 goto err;
825         }
826
827         ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio));
828         if (ret)
829                 goto err;
830
831         for (i = 0; i < v->nr_blocks; i++)
832                 ec_block_io(c, buf, REQ_OP_READ, i, &cl);
833
834         closure_sync(&cl);
835
836         if (ec_nr_failed(buf) > v->nr_redundant) {
837                 bch_err_ratelimited(c,
838                         "error doing reconstruct read: unable to read enough blocks");
839                 ret = -EIO;
840                 goto err;
841         }
842
843         ec_validate_checksums(c, buf);
844
845         ret = ec_do_recov(c, buf);
846         if (ret)
847                 goto err;
848
849         memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter,
850                       buf->data[rbio->pick.ec.block] + ((offset - buf->offset) << 9));
851 err:
852         ec_stripe_buf_exit(buf);
853         kfree(buf);
854         return ret;
855 }
856
857 /* stripe bucket accounting: */
858
859 static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
860 {
861         ec_stripes_heap n, *h = &c->ec_stripes_heap;
862
863         if (idx >= h->size) {
864                 if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp))
865                         return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
866
867                 mutex_lock(&c->ec_stripes_heap_lock);
868                 if (n.size > h->size) {
869                         memcpy(n.data, h->data, h->used * sizeof(h->data[0]));
870                         n.used = h->used;
871                         swap(*h, n);
872                 }
873                 mutex_unlock(&c->ec_stripes_heap_lock);
874
875                 free_heap(&n);
876         }
877
878         if (!genradix_ptr_alloc(&c->stripes, idx, gfp))
879                 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
880
881         if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING &&
882             !genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
883                 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
884
885         return 0;
886 }
887
888 static int ec_stripe_mem_alloc(struct btree_trans *trans,
889                                struct btree_iter *iter)
890 {
891         return allocate_dropping_locks_errcode(trans,
892                         __ec_stripe_mem_alloc(trans->c, iter->pos.offset, _gfp));
893 }
894
895 /*
896  * Hash table of open stripes:
897  * Stripes that are being created or modified are kept in a hash table, so that
898  * stripe deletion can skip them.
899  */
900
901 static bool __bch2_stripe_is_open(struct bch_fs *c, u64 idx)
902 {
903         unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
904         struct ec_stripe_new *s;
905
906         hlist_for_each_entry(s, &c->ec_stripes_new[hash], hash)
907                 if (s->idx == idx)
908                         return true;
909         return false;
910 }
911
912 static bool bch2_stripe_is_open(struct bch_fs *c, u64 idx)
913 {
914         bool ret = false;
915
916         spin_lock(&c->ec_stripes_new_lock);
917         ret = __bch2_stripe_is_open(c, idx);
918         spin_unlock(&c->ec_stripes_new_lock);
919
920         return ret;
921 }
922
923 static bool bch2_try_open_stripe(struct bch_fs *c,
924                                  struct ec_stripe_new *s,
925                                  u64 idx)
926 {
927         bool ret;
928
929         spin_lock(&c->ec_stripes_new_lock);
930         ret = !__bch2_stripe_is_open(c, idx);
931         if (ret) {
932                 unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
933
934                 s->idx = idx;
935                 hlist_add_head(&s->hash, &c->ec_stripes_new[hash]);
936         }
937         spin_unlock(&c->ec_stripes_new_lock);
938
939         return ret;
940 }
941
942 static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s)
943 {
944         BUG_ON(!s->idx);
945
946         spin_lock(&c->ec_stripes_new_lock);
947         hlist_del_init(&s->hash);
948         spin_unlock(&c->ec_stripes_new_lock);
949
950         s->idx = 0;
951 }
952
953 /* Heap of all existing stripes, ordered by blocks_nonempty */
954
955 static u64 stripe_idx_to_delete(struct bch_fs *c)
956 {
957         ec_stripes_heap *h = &c->ec_stripes_heap;
958
959         lockdep_assert_held(&c->ec_stripes_heap_lock);
960
961         if (h->used &&
962             h->data[0].blocks_nonempty == 0 &&
963             !bch2_stripe_is_open(c, h->data[0].idx))
964                 return h->data[0].idx;
965
966         return 0;
967 }
968
969 static inline int ec_stripes_heap_cmp(ec_stripes_heap *h,
970                                       struct ec_stripe_heap_entry l,
971                                       struct ec_stripe_heap_entry r)
972 {
973         return ((l.blocks_nonempty > r.blocks_nonempty) -
974                 (l.blocks_nonempty < r.blocks_nonempty));
975 }
976
977 static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h,
978                                                    size_t i)
979 {
980         struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap);
981
982         genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i;
983 }
984
985 static void heap_verify_backpointer(struct bch_fs *c, size_t idx)
986 {
987         ec_stripes_heap *h = &c->ec_stripes_heap;
988         struct stripe *m = genradix_ptr(&c->stripes, idx);
989
990         BUG_ON(m->heap_idx >= h->used);
991         BUG_ON(h->data[m->heap_idx].idx != idx);
992 }
993
994 void bch2_stripes_heap_del(struct bch_fs *c,
995                            struct stripe *m, size_t idx)
996 {
997         mutex_lock(&c->ec_stripes_heap_lock);
998         heap_verify_backpointer(c, idx);
999
1000         heap_del(&c->ec_stripes_heap, m->heap_idx,
1001                  ec_stripes_heap_cmp,
1002                  ec_stripes_heap_set_backpointer);
1003         mutex_unlock(&c->ec_stripes_heap_lock);
1004 }
1005
1006 void bch2_stripes_heap_insert(struct bch_fs *c,
1007                               struct stripe *m, size_t idx)
1008 {
1009         mutex_lock(&c->ec_stripes_heap_lock);
1010         BUG_ON(heap_full(&c->ec_stripes_heap));
1011
1012         heap_add(&c->ec_stripes_heap, ((struct ec_stripe_heap_entry) {
1013                         .idx = idx,
1014                         .blocks_nonempty = m->blocks_nonempty,
1015                 }),
1016                  ec_stripes_heap_cmp,
1017                  ec_stripes_heap_set_backpointer);
1018
1019         heap_verify_backpointer(c, idx);
1020         mutex_unlock(&c->ec_stripes_heap_lock);
1021 }
1022
1023 void bch2_stripes_heap_update(struct bch_fs *c,
1024                               struct stripe *m, size_t idx)
1025 {
1026         ec_stripes_heap *h = &c->ec_stripes_heap;
1027         bool do_deletes;
1028         size_t i;
1029
1030         mutex_lock(&c->ec_stripes_heap_lock);
1031         heap_verify_backpointer(c, idx);
1032
1033         h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
1034
1035         i = m->heap_idx;
1036         heap_sift_up(h,   i, ec_stripes_heap_cmp,
1037                      ec_stripes_heap_set_backpointer);
1038         heap_sift_down(h, i, ec_stripes_heap_cmp,
1039                        ec_stripes_heap_set_backpointer);
1040
1041         heap_verify_backpointer(c, idx);
1042
1043         do_deletes = stripe_idx_to_delete(c) != 0;
1044         mutex_unlock(&c->ec_stripes_heap_lock);
1045
1046         if (do_deletes)
1047                 bch2_do_stripe_deletes(c);
1048 }
1049
1050 /* stripe deletion */
1051
1052 static int ec_stripe_delete(struct btree_trans *trans, u64 idx)
1053 {
1054         struct bch_fs *c = trans->c;
1055         struct btree_iter iter;
1056         struct bkey_s_c k;
1057         struct bkey_s_c_stripe s;
1058         int ret;
1059
1060         k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, POS(0, idx),
1061                                BTREE_ITER_INTENT);
1062         ret = bkey_err(k);
1063         if (ret)
1064                 goto err;
1065
1066         if (k.k->type != KEY_TYPE_stripe) {
1067                 bch2_fs_inconsistent(c, "attempting to delete nonexistent stripe %llu", idx);
1068                 ret = -EINVAL;
1069                 goto err;
1070         }
1071
1072         s = bkey_s_c_to_stripe(k);
1073         for (unsigned i = 0; i < s.v->nr_blocks; i++)
1074                 if (stripe_blockcount_get(s.v, i)) {
1075                         struct printbuf buf = PRINTBUF;
1076
1077                         bch2_bkey_val_to_text(&buf, c, k);
1078                         bch2_fs_inconsistent(c, "attempting to delete nonempty stripe %s", buf.buf);
1079                         printbuf_exit(&buf);
1080                         ret = -EINVAL;
1081                         goto err;
1082                 }
1083
1084         ret = bch2_btree_delete_at(trans, &iter, 0);
1085 err:
1086         bch2_trans_iter_exit(trans, &iter);
1087         return ret;
1088 }
1089
1090 static void ec_stripe_delete_work(struct work_struct *work)
1091 {
1092         struct bch_fs *c =
1093                 container_of(work, struct bch_fs, ec_stripe_delete_work);
1094
1095         while (1) {
1096                 mutex_lock(&c->ec_stripes_heap_lock);
1097                 u64 idx = stripe_idx_to_delete(c);
1098                 mutex_unlock(&c->ec_stripes_heap_lock);
1099
1100                 if (!idx)
1101                         break;
1102
1103                 int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1104                                         ec_stripe_delete(trans, idx));
1105                 bch_err_fn(c, ret);
1106                 if (ret)
1107                         break;
1108         }
1109
1110         bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
1111 }
1112
1113 void bch2_do_stripe_deletes(struct bch_fs *c)
1114 {
1115         if (bch2_write_ref_tryget(c, BCH_WRITE_REF_stripe_delete) &&
1116             !queue_work(c->write_ref_wq, &c->ec_stripe_delete_work))
1117                 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
1118 }
1119
1120 /* stripe creation: */
1121
1122 static int ec_stripe_key_update(struct btree_trans *trans,
1123                                 struct bkey_i_stripe *new,
1124                                 bool create)
1125 {
1126         struct bch_fs *c = trans->c;
1127         struct btree_iter iter;
1128         struct bkey_s_c k;
1129         int ret;
1130
1131         k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
1132                                new->k.p, BTREE_ITER_INTENT);
1133         ret = bkey_err(k);
1134         if (ret)
1135                 goto err;
1136
1137         if (k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe)) {
1138                 bch2_fs_inconsistent(c, "error %s stripe: got existing key type %s",
1139                                      create ? "creating" : "updating",
1140                                      bch2_bkey_types[k.k->type]);
1141                 ret = -EINVAL;
1142                 goto err;
1143         }
1144
1145         if (k.k->type == KEY_TYPE_stripe) {
1146                 const struct bch_stripe *old = bkey_s_c_to_stripe(k).v;
1147                 unsigned i;
1148
1149                 if (old->nr_blocks != new->v.nr_blocks) {
1150                         bch_err(c, "error updating stripe: nr_blocks does not match");
1151                         ret = -EINVAL;
1152                         goto err;
1153                 }
1154
1155                 for (i = 0; i < new->v.nr_blocks; i++) {
1156                         unsigned v = stripe_blockcount_get(old, i);
1157
1158                         BUG_ON(v &&
1159                                (old->ptrs[i].dev != new->v.ptrs[i].dev ||
1160                                 old->ptrs[i].gen != new->v.ptrs[i].gen ||
1161                                 old->ptrs[i].offset != new->v.ptrs[i].offset));
1162
1163                         stripe_blockcount_set(&new->v, i, v);
1164                 }
1165         }
1166
1167         ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
1168 err:
1169         bch2_trans_iter_exit(trans, &iter);
1170         return ret;
1171 }
1172
1173 static int ec_stripe_update_extent(struct btree_trans *trans,
1174                                    struct bpos bucket, u8 gen,
1175                                    struct ec_stripe_buf *s,
1176                                    struct bpos *bp_pos)
1177 {
1178         struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
1179         struct bch_fs *c = trans->c;
1180         struct bch_backpointer bp;
1181         struct btree_iter iter;
1182         struct bkey_s_c k;
1183         const struct bch_extent_ptr *ptr_c;
1184         struct bch_extent_ptr *ptr, *ec_ptr = NULL;
1185         struct bch_extent_stripe_ptr stripe_ptr;
1186         struct bkey_i *n;
1187         int ret, dev, block;
1188
1189         ret = bch2_get_next_backpointer(trans, bucket, gen,
1190                                 bp_pos, &bp, BTREE_ITER_CACHED);
1191         if (ret)
1192                 return ret;
1193         if (bpos_eq(*bp_pos, SPOS_MAX))
1194                 return 0;
1195
1196         if (bp.level) {
1197                 struct printbuf buf = PRINTBUF;
1198                 struct btree_iter node_iter;
1199                 struct btree *b;
1200
1201                 b = bch2_backpointer_get_node(trans, &node_iter, *bp_pos, bp);
1202                 bch2_trans_iter_exit(trans, &node_iter);
1203
1204                 if (!b)
1205                         return 0;
1206
1207                 prt_printf(&buf, "found btree node in erasure coded bucket: b=%px\n", b);
1208                 bch2_backpointer_to_text(&buf, &bp);
1209
1210                 bch2_fs_inconsistent(c, "%s", buf.buf);
1211                 printbuf_exit(&buf);
1212                 return -EIO;
1213         }
1214
1215         k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_INTENT);
1216         ret = bkey_err(k);
1217         if (ret)
1218                 return ret;
1219         if (!k.k) {
1220                 /*
1221                  * extent no longer exists - we could flush the btree
1222                  * write buffer and retry to verify, but no need:
1223                  */
1224                 return 0;
1225         }
1226
1227         if (extent_has_stripe_ptr(k, s->key.k.p.offset))
1228                 goto out;
1229
1230         ptr_c = bkey_matches_stripe(v, k, &block);
1231         /*
1232          * It doesn't generally make sense to erasure code cached ptrs:
1233          * XXX: should we be incrementing a counter?
1234          */
1235         if (!ptr_c || ptr_c->cached)
1236                 goto out;
1237
1238         dev = v->ptrs[block].dev;
1239
1240         n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(stripe_ptr));
1241         ret = PTR_ERR_OR_ZERO(n);
1242         if (ret)
1243                 goto out;
1244
1245         bkey_reassemble(n, k);
1246
1247         bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, ptr->dev != dev);
1248         ec_ptr = bch2_bkey_has_device(bkey_i_to_s(n), dev);
1249         BUG_ON(!ec_ptr);
1250
1251         stripe_ptr = (struct bch_extent_stripe_ptr) {
1252                 .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr,
1253                 .block          = block,
1254                 .redundancy     = v->nr_redundant,
1255                 .idx            = s->key.k.p.offset,
1256         };
1257
1258         __extent_entry_insert(n,
1259                         (union bch_extent_entry *) ec_ptr,
1260                         (union bch_extent_entry *) &stripe_ptr);
1261
1262         ret = bch2_trans_update(trans, &iter, n, 0);
1263 out:
1264         bch2_trans_iter_exit(trans, &iter);
1265         return ret;
1266 }
1267
1268 static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_buf *s,
1269                                    unsigned block)
1270 {
1271         struct bch_fs *c = trans->c;
1272         struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
1273         struct bch_extent_ptr bucket = v->ptrs[block];
1274         struct bpos bucket_pos = PTR_BUCKET_POS(c, &bucket);
1275         struct bpos bp_pos = POS_MIN;
1276         int ret = 0;
1277
1278         while (1) {
1279                 ret = commit_do(trans, NULL, NULL,
1280                                 BCH_TRANS_COMMIT_no_check_rw|
1281                                 BCH_TRANS_COMMIT_no_enospc,
1282                         ec_stripe_update_extent(trans, bucket_pos, bucket.gen,
1283                                                 s, &bp_pos));
1284                 if (ret)
1285                         break;
1286                 if (bkey_eq(bp_pos, POS_MAX))
1287                         break;
1288
1289                 bp_pos = bpos_nosnap_successor(bp_pos);
1290         }
1291
1292         return ret;
1293 }
1294
1295 static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s)
1296 {
1297         struct btree_trans *trans = bch2_trans_get(c);
1298         struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
1299         unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
1300         int ret = 0;
1301
1302         ret = bch2_btree_write_buffer_flush_sync(trans);
1303         if (ret)
1304                 goto err;
1305
1306         for (i = 0; i < nr_data; i++) {
1307                 ret = ec_stripe_update_bucket(trans, s, i);
1308                 if (ret)
1309                         break;
1310         }
1311 err:
1312         bch2_trans_put(trans);
1313
1314         return ret;
1315 }
1316
1317 static void zero_out_rest_of_ec_bucket(struct bch_fs *c,
1318                                        struct ec_stripe_new *s,
1319                                        unsigned block,
1320                                        struct open_bucket *ob)
1321 {
1322         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1323         unsigned offset = ca->mi.bucket_size - ob->sectors_free;
1324         int ret;
1325
1326         if (!bch2_dev_get_ioref(ca, WRITE)) {
1327                 s->err = -BCH_ERR_erofs_no_writes;
1328                 return;
1329         }
1330
1331         memset(s->new_stripe.data[block] + (offset << 9),
1332                0,
1333                ob->sectors_free << 9);
1334
1335         ret = blkdev_issue_zeroout(ca->disk_sb.bdev,
1336                         ob->bucket * ca->mi.bucket_size + offset,
1337                         ob->sectors_free,
1338                         GFP_KERNEL, 0);
1339
1340         percpu_ref_put(&ca->io_ref);
1341
1342         if (ret)
1343                 s->err = ret;
1344 }
1345
1346 void bch2_ec_stripe_new_free(struct bch_fs *c, struct ec_stripe_new *s)
1347 {
1348         if (s->idx)
1349                 bch2_stripe_close(c, s);
1350         kfree(s);
1351 }
1352
1353 /*
1354  * data buckets of new stripe all written: create the stripe
1355  */
1356 static void ec_stripe_create(struct ec_stripe_new *s)
1357 {
1358         struct bch_fs *c = s->c;
1359         struct open_bucket *ob;
1360         struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v;
1361         unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
1362         int ret;
1363
1364         BUG_ON(s->h->s == s);
1365
1366         closure_sync(&s->iodone);
1367
1368         if (!s->err) {
1369                 for (i = 0; i < nr_data; i++)
1370                         if (s->blocks[i]) {
1371                                 ob = c->open_buckets + s->blocks[i];
1372
1373                                 if (ob->sectors_free)
1374                                         zero_out_rest_of_ec_bucket(c, s, i, ob);
1375                         }
1376         }
1377
1378         if (s->err) {
1379                 if (!bch2_err_matches(s->err, EROFS))
1380                         bch_err(c, "error creating stripe: error writing data buckets");
1381                 goto err;
1382         }
1383
1384         if (s->have_existing_stripe) {
1385                 ec_validate_checksums(c, &s->existing_stripe);
1386
1387                 if (ec_do_recov(c, &s->existing_stripe)) {
1388                         bch_err(c, "error creating stripe: error reading existing stripe");
1389                         goto err;
1390                 }
1391
1392                 for (i = 0; i < nr_data; i++)
1393                         if (stripe_blockcount_get(&bkey_i_to_stripe(&s->existing_stripe.key)->v, i))
1394                                 swap(s->new_stripe.data[i],
1395                                      s->existing_stripe.data[i]);
1396
1397                 ec_stripe_buf_exit(&s->existing_stripe);
1398         }
1399
1400         BUG_ON(!s->allocated);
1401         BUG_ON(!s->idx);
1402
1403         ec_generate_ec(&s->new_stripe);
1404
1405         ec_generate_checksums(&s->new_stripe);
1406
1407         /* write p/q: */
1408         for (i = nr_data; i < v->nr_blocks; i++)
1409                 ec_block_io(c, &s->new_stripe, REQ_OP_WRITE, i, &s->iodone);
1410         closure_sync(&s->iodone);
1411
1412         if (ec_nr_failed(&s->new_stripe)) {
1413                 bch_err(c, "error creating stripe: error writing redundancy buckets");
1414                 goto err;
1415         }
1416
1417         ret = bch2_trans_do(c, &s->res, NULL,
1418                             BCH_TRANS_COMMIT_no_check_rw|
1419                             BCH_TRANS_COMMIT_no_enospc,
1420                             ec_stripe_key_update(trans,
1421                                         bkey_i_to_stripe(&s->new_stripe.key),
1422                                         !s->have_existing_stripe));
1423         bch_err_msg(c, ret, "creating stripe key");
1424         if (ret) {
1425                 goto err;
1426         }
1427
1428         ret = ec_stripe_update_extents(c, &s->new_stripe);
1429         bch_err_msg(c, ret, "error updating extents");
1430         if (ret)
1431                 goto err;
1432 err:
1433         bch2_disk_reservation_put(c, &s->res);
1434
1435         for (i = 0; i < v->nr_blocks; i++)
1436                 if (s->blocks[i]) {
1437                         ob = c->open_buckets + s->blocks[i];
1438
1439                         if (i < nr_data) {
1440                                 ob->ec = NULL;
1441                                 __bch2_open_bucket_put(c, ob);
1442                         } else {
1443                                 bch2_open_bucket_put(c, ob);
1444                         }
1445                 }
1446
1447         mutex_lock(&c->ec_stripe_new_lock);
1448         list_del(&s->list);
1449         mutex_unlock(&c->ec_stripe_new_lock);
1450         wake_up(&c->ec_stripe_new_wait);
1451
1452         ec_stripe_buf_exit(&s->existing_stripe);
1453         ec_stripe_buf_exit(&s->new_stripe);
1454         closure_debug_destroy(&s->iodone);
1455
1456         ec_stripe_new_put(c, s, STRIPE_REF_stripe);
1457 }
1458
1459 static struct ec_stripe_new *get_pending_stripe(struct bch_fs *c)
1460 {
1461         struct ec_stripe_new *s;
1462
1463         mutex_lock(&c->ec_stripe_new_lock);
1464         list_for_each_entry(s, &c->ec_stripe_new_list, list)
1465                 if (!atomic_read(&s->ref[STRIPE_REF_io]))
1466                         goto out;
1467         s = NULL;
1468 out:
1469         mutex_unlock(&c->ec_stripe_new_lock);
1470
1471         return s;
1472 }
1473
1474 static void ec_stripe_create_work(struct work_struct *work)
1475 {
1476         struct bch_fs *c = container_of(work,
1477                 struct bch_fs, ec_stripe_create_work);
1478         struct ec_stripe_new *s;
1479
1480         while ((s = get_pending_stripe(c)))
1481                 ec_stripe_create(s);
1482
1483         bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
1484 }
1485
1486 void bch2_ec_do_stripe_creates(struct bch_fs *c)
1487 {
1488         bch2_write_ref_get(c, BCH_WRITE_REF_stripe_create);
1489
1490         if (!queue_work(system_long_wq, &c->ec_stripe_create_work))
1491                 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
1492 }
1493
1494 static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
1495 {
1496         struct ec_stripe_new *s = h->s;
1497
1498         BUG_ON(!s->allocated && !s->err);
1499
1500         h->s            = NULL;
1501         s->pending      = true;
1502
1503         mutex_lock(&c->ec_stripe_new_lock);
1504         list_add(&s->list, &c->ec_stripe_new_list);
1505         mutex_unlock(&c->ec_stripe_new_lock);
1506
1507         ec_stripe_new_put(c, s, STRIPE_REF_io);
1508 }
1509
1510 void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob)
1511 {
1512         struct ec_stripe_new *s = ob->ec;
1513
1514         s->err = -EIO;
1515 }
1516
1517 void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp)
1518 {
1519         struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs);
1520         struct bch_dev *ca;
1521         unsigned offset;
1522
1523         if (!ob)
1524                 return NULL;
1525
1526         BUG_ON(!ob->ec->new_stripe.data[ob->ec_idx]);
1527
1528         ca      = bch_dev_bkey_exists(c, ob->dev);
1529         offset  = ca->mi.bucket_size - ob->sectors_free;
1530
1531         return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9);
1532 }
1533
1534 static int unsigned_cmp(const void *_l, const void *_r)
1535 {
1536         unsigned l = *((const unsigned *) _l);
1537         unsigned r = *((const unsigned *) _r);
1538
1539         return cmp_int(l, r);
1540 }
1541
1542 /* pick most common bucket size: */
1543 static unsigned pick_blocksize(struct bch_fs *c,
1544                                struct bch_devs_mask *devs)
1545 {
1546         unsigned nr = 0, sizes[BCH_SB_MEMBERS_MAX];
1547         struct {
1548                 unsigned nr, size;
1549         } cur = { 0, 0 }, best = { 0, 0 };
1550
1551         for_each_member_device_rcu(c, ca, devs)
1552                 sizes[nr++] = ca->mi.bucket_size;
1553
1554         sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL);
1555
1556         for (unsigned i = 0; i < nr; i++) {
1557                 if (sizes[i] != cur.size) {
1558                         if (cur.nr > best.nr)
1559                                 best = cur;
1560
1561                         cur.nr = 0;
1562                         cur.size = sizes[i];
1563                 }
1564
1565                 cur.nr++;
1566         }
1567
1568         if (cur.nr > best.nr)
1569                 best = cur;
1570
1571         return best.size;
1572 }
1573
1574 static bool may_create_new_stripe(struct bch_fs *c)
1575 {
1576         return false;
1577 }
1578
1579 static void ec_stripe_key_init(struct bch_fs *c,
1580                                struct bkey_i *k,
1581                                unsigned nr_data,
1582                                unsigned nr_parity,
1583                                unsigned stripe_size)
1584 {
1585         struct bkey_i_stripe *s = bkey_stripe_init(k);
1586         unsigned u64s;
1587
1588         s->v.sectors                    = cpu_to_le16(stripe_size);
1589         s->v.algorithm                  = 0;
1590         s->v.nr_blocks                  = nr_data + nr_parity;
1591         s->v.nr_redundant               = nr_parity;
1592         s->v.csum_granularity_bits      = ilog2(c->opts.encoded_extent_max >> 9);
1593         s->v.csum_type                  = BCH_CSUM_crc32c;
1594         s->v.pad                        = 0;
1595
1596         while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) {
1597                 BUG_ON(1 << s->v.csum_granularity_bits >=
1598                        le16_to_cpu(s->v.sectors) ||
1599                        s->v.csum_granularity_bits == U8_MAX);
1600                 s->v.csum_granularity_bits++;
1601         }
1602
1603         set_bkey_val_u64s(&s->k, u64s);
1604 }
1605
1606 static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
1607 {
1608         struct ec_stripe_new *s;
1609
1610         lockdep_assert_held(&h->lock);
1611
1612         s = kzalloc(sizeof(*s), GFP_KERNEL);
1613         if (!s)
1614                 return -BCH_ERR_ENOMEM_ec_new_stripe_alloc;
1615
1616         mutex_init(&s->lock);
1617         closure_init(&s->iodone, NULL);
1618         atomic_set(&s->ref[STRIPE_REF_stripe], 1);
1619         atomic_set(&s->ref[STRIPE_REF_io], 1);
1620         s->c            = c;
1621         s->h            = h;
1622         s->nr_data      = min_t(unsigned, h->nr_active_devs,
1623                                 BCH_BKEY_PTRS_MAX) - h->redundancy;
1624         s->nr_parity    = h->redundancy;
1625
1626         ec_stripe_key_init(c, &s->new_stripe.key,
1627                            s->nr_data, s->nr_parity, h->blocksize);
1628
1629         h->s = s;
1630         return 0;
1631 }
1632
1633 static struct ec_stripe_head *
1634 ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
1635                          unsigned algo, unsigned redundancy,
1636                          enum bch_watermark watermark)
1637 {
1638         struct ec_stripe_head *h;
1639
1640         h = kzalloc(sizeof(*h), GFP_KERNEL);
1641         if (!h)
1642                 return NULL;
1643
1644         mutex_init(&h->lock);
1645         BUG_ON(!mutex_trylock(&h->lock));
1646
1647         h->target       = target;
1648         h->algo         = algo;
1649         h->redundancy   = redundancy;
1650         h->watermark    = watermark;
1651
1652         rcu_read_lock();
1653         h->devs = target_rw_devs(c, BCH_DATA_user, target);
1654
1655         for_each_member_device_rcu(c, ca, &h->devs)
1656                 if (!ca->mi.durability)
1657                         __clear_bit(ca->dev_idx, h->devs.d);
1658
1659         h->blocksize = pick_blocksize(c, &h->devs);
1660
1661         for_each_member_device_rcu(c, ca, &h->devs)
1662                 if (ca->mi.bucket_size == h->blocksize)
1663                         h->nr_active_devs++;
1664
1665         rcu_read_unlock();
1666
1667         /*
1668          * If we only have redundancy + 1 devices, we're better off with just
1669          * replication:
1670          */
1671         if (h->nr_active_devs < h->redundancy + 2)
1672                 bch_err(c, "insufficient devices available to create stripe (have %u, need %u) - mismatched bucket sizes?",
1673                         h->nr_active_devs, h->redundancy + 2);
1674
1675         list_add(&h->list, &c->ec_stripe_head_list);
1676         return h;
1677 }
1678
1679 void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h)
1680 {
1681         if (h->s &&
1682             h->s->allocated &&
1683             bitmap_weight(h->s->blocks_allocated,
1684                           h->s->nr_data) == h->s->nr_data)
1685                 ec_stripe_set_pending(c, h);
1686
1687         mutex_unlock(&h->lock);
1688 }
1689
1690 static struct ec_stripe_head *
1691 __bch2_ec_stripe_head_get(struct btree_trans *trans,
1692                           unsigned target,
1693                           unsigned algo,
1694                           unsigned redundancy,
1695                           enum bch_watermark watermark)
1696 {
1697         struct bch_fs *c = trans->c;
1698         struct ec_stripe_head *h;
1699         int ret;
1700
1701         if (!redundancy)
1702                 return NULL;
1703
1704         ret = bch2_trans_mutex_lock(trans, &c->ec_stripe_head_lock);
1705         if (ret)
1706                 return ERR_PTR(ret);
1707
1708         if (test_bit(BCH_FS_going_ro, &c->flags)) {
1709                 h = ERR_PTR(-BCH_ERR_erofs_no_writes);
1710                 goto found;
1711         }
1712
1713         list_for_each_entry(h, &c->ec_stripe_head_list, list)
1714                 if (h->target           == target &&
1715                     h->algo             == algo &&
1716                     h->redundancy       == redundancy &&
1717                     h->watermark        == watermark) {
1718                         ret = bch2_trans_mutex_lock(trans, &h->lock);
1719                         if (ret)
1720                                 h = ERR_PTR(ret);
1721                         goto found;
1722                 }
1723
1724         h = ec_new_stripe_head_alloc(c, target, algo, redundancy, watermark);
1725 found:
1726         if (!IS_ERR_OR_NULL(h) &&
1727             h->nr_active_devs < h->redundancy + 2) {
1728                 mutex_unlock(&h->lock);
1729                 h = NULL;
1730         }
1731         mutex_unlock(&c->ec_stripe_head_lock);
1732         return h;
1733 }
1734
1735 static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h,
1736                                     enum bch_watermark watermark, struct closure *cl)
1737 {
1738         struct bch_fs *c = trans->c;
1739         struct bch_devs_mask devs = h->devs;
1740         struct open_bucket *ob;
1741         struct open_buckets buckets;
1742         struct bch_stripe *v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v;
1743         unsigned i, j, nr_have_parity = 0, nr_have_data = 0;
1744         bool have_cache = true;
1745         int ret = 0;
1746
1747         BUG_ON(v->nr_blocks     != h->s->nr_data + h->s->nr_parity);
1748         BUG_ON(v->nr_redundant  != h->s->nr_parity);
1749
1750         for_each_set_bit(i, h->s->blocks_gotten, v->nr_blocks) {
1751                 __clear_bit(v->ptrs[i].dev, devs.d);
1752                 if (i < h->s->nr_data)
1753                         nr_have_data++;
1754                 else
1755                         nr_have_parity++;
1756         }
1757
1758         BUG_ON(nr_have_data     > h->s->nr_data);
1759         BUG_ON(nr_have_parity   > h->s->nr_parity);
1760
1761         buckets.nr = 0;
1762         if (nr_have_parity < h->s->nr_parity) {
1763                 ret = bch2_bucket_alloc_set_trans(trans, &buckets,
1764                                             &h->parity_stripe,
1765                                             &devs,
1766                                             h->s->nr_parity,
1767                                             &nr_have_parity,
1768                                             &have_cache, 0,
1769                                             BCH_DATA_parity,
1770                                             watermark,
1771                                             cl);
1772
1773                 open_bucket_for_each(c, &buckets, ob, i) {
1774                         j = find_next_zero_bit(h->s->blocks_gotten,
1775                                                h->s->nr_data + h->s->nr_parity,
1776                                                h->s->nr_data);
1777                         BUG_ON(j >= h->s->nr_data + h->s->nr_parity);
1778
1779                         h->s->blocks[j] = buckets.v[i];
1780                         v->ptrs[j] = bch2_ob_ptr(c, ob);
1781                         __set_bit(j, h->s->blocks_gotten);
1782                 }
1783
1784                 if (ret)
1785                         return ret;
1786         }
1787
1788         buckets.nr = 0;
1789         if (nr_have_data < h->s->nr_data) {
1790                 ret = bch2_bucket_alloc_set_trans(trans, &buckets,
1791                                             &h->block_stripe,
1792                                             &devs,
1793                                             h->s->nr_data,
1794                                             &nr_have_data,
1795                                             &have_cache, 0,
1796                                             BCH_DATA_user,
1797                                             watermark,
1798                                             cl);
1799
1800                 open_bucket_for_each(c, &buckets, ob, i) {
1801                         j = find_next_zero_bit(h->s->blocks_gotten,
1802                                                h->s->nr_data, 0);
1803                         BUG_ON(j >= h->s->nr_data);
1804
1805                         h->s->blocks[j] = buckets.v[i];
1806                         v->ptrs[j] = bch2_ob_ptr(c, ob);
1807                         __set_bit(j, h->s->blocks_gotten);
1808                 }
1809
1810                 if (ret)
1811                         return ret;
1812         }
1813
1814         return 0;
1815 }
1816
1817 /* XXX: doesn't obey target: */
1818 static s64 get_existing_stripe(struct bch_fs *c,
1819                                struct ec_stripe_head *head)
1820 {
1821         ec_stripes_heap *h = &c->ec_stripes_heap;
1822         struct stripe *m;
1823         size_t heap_idx;
1824         u64 stripe_idx;
1825         s64 ret = -1;
1826
1827         if (may_create_new_stripe(c))
1828                 return -1;
1829
1830         mutex_lock(&c->ec_stripes_heap_lock);
1831         for (heap_idx = 0; heap_idx < h->used; heap_idx++) {
1832                 /* No blocks worth reusing, stripe will just be deleted: */
1833                 if (!h->data[heap_idx].blocks_nonempty)
1834                         continue;
1835
1836                 stripe_idx = h->data[heap_idx].idx;
1837
1838                 m = genradix_ptr(&c->stripes, stripe_idx);
1839
1840                 if (m->algorithm        == head->algo &&
1841                     m->nr_redundant     == head->redundancy &&
1842                     m->sectors          == head->blocksize &&
1843                     m->blocks_nonempty  < m->nr_blocks - m->nr_redundant &&
1844                     bch2_try_open_stripe(c, head->s, stripe_idx)) {
1845                         ret = stripe_idx;
1846                         break;
1847                 }
1848         }
1849         mutex_unlock(&c->ec_stripes_heap_lock);
1850         return ret;
1851 }
1852
1853 static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h)
1854 {
1855         struct bch_fs *c = trans->c;
1856         struct bch_stripe *new_v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v;
1857         struct bch_stripe *existing_v;
1858         unsigned i;
1859         s64 idx;
1860         int ret;
1861
1862         /*
1863          * If we can't allocate a new stripe, and there's no stripes with empty
1864          * blocks for us to reuse, that means we have to wait on copygc:
1865          */
1866         idx = get_existing_stripe(c, h);
1867         if (idx < 0)
1868                 return -BCH_ERR_stripe_alloc_blocked;
1869
1870         ret = get_stripe_key_trans(trans, idx, &h->s->existing_stripe);
1871         if (ret) {
1872                 bch2_stripe_close(c, h->s);
1873                 if (!bch2_err_matches(ret, BCH_ERR_transaction_restart))
1874                         bch2_fs_fatal_error(c, "error reading stripe key: %s", bch2_err_str(ret));
1875                 return ret;
1876         }
1877
1878         existing_v = &bkey_i_to_stripe(&h->s->existing_stripe.key)->v;
1879
1880         BUG_ON(existing_v->nr_redundant != h->s->nr_parity);
1881         h->s->nr_data = existing_v->nr_blocks -
1882                 existing_v->nr_redundant;
1883
1884         ret = ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize);
1885         if (ret) {
1886                 bch2_stripe_close(c, h->s);
1887                 return ret;
1888         }
1889
1890         BUG_ON(h->s->existing_stripe.size != h->blocksize);
1891         BUG_ON(h->s->existing_stripe.size != le16_to_cpu(existing_v->sectors));
1892
1893         /*
1894          * Free buckets we initially allocated - they might conflict with
1895          * blocks from the stripe we're reusing:
1896          */
1897         for_each_set_bit(i, h->s->blocks_gotten, new_v->nr_blocks) {
1898                 bch2_open_bucket_put(c, c->open_buckets + h->s->blocks[i]);
1899                 h->s->blocks[i] = 0;
1900         }
1901         memset(h->s->blocks_gotten, 0, sizeof(h->s->blocks_gotten));
1902         memset(h->s->blocks_allocated, 0, sizeof(h->s->blocks_allocated));
1903
1904         for (i = 0; i < existing_v->nr_blocks; i++) {
1905                 if (stripe_blockcount_get(existing_v, i)) {
1906                         __set_bit(i, h->s->blocks_gotten);
1907                         __set_bit(i, h->s->blocks_allocated);
1908                 }
1909
1910                 ec_block_io(c, &h->s->existing_stripe, READ, i, &h->s->iodone);
1911         }
1912
1913         bkey_copy(&h->s->new_stripe.key, &h->s->existing_stripe.key);
1914         h->s->have_existing_stripe = true;
1915
1916         return 0;
1917 }
1918
1919 static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_stripe_head *h)
1920 {
1921         struct bch_fs *c = trans->c;
1922         struct btree_iter iter;
1923         struct bkey_s_c k;
1924         struct bpos min_pos = POS(0, 1);
1925         struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint));
1926         int ret;
1927
1928         if (!h->s->res.sectors) {
1929                 ret = bch2_disk_reservation_get(c, &h->s->res,
1930                                         h->blocksize,
1931                                         h->s->nr_parity,
1932                                         BCH_DISK_RESERVATION_NOFAIL);
1933                 if (ret)
1934                         return ret;
1935         }
1936
1937         for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
1938                            BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
1939                 if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
1940                         if (start_pos.offset) {
1941                                 start_pos = min_pos;
1942                                 bch2_btree_iter_set_pos(&iter, start_pos);
1943                                 continue;
1944                         }
1945
1946                         ret = -BCH_ERR_ENOSPC_stripe_create;
1947                         break;
1948                 }
1949
1950                 if (bkey_deleted(k.k) &&
1951                     bch2_try_open_stripe(c, h->s, k.k->p.offset))
1952                         break;
1953         }
1954
1955         c->ec_stripe_hint = iter.pos.offset;
1956
1957         if (ret)
1958                 goto err;
1959
1960         ret = ec_stripe_mem_alloc(trans, &iter);
1961         if (ret) {
1962                 bch2_stripe_close(c, h->s);
1963                 goto err;
1964         }
1965
1966         h->s->new_stripe.key.k.p = iter.pos;
1967 out:
1968         bch2_trans_iter_exit(trans, &iter);
1969         return ret;
1970 err:
1971         bch2_disk_reservation_put(c, &h->s->res);
1972         goto out;
1973 }
1974
1975 struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
1976                                                unsigned target,
1977                                                unsigned algo,
1978                                                unsigned redundancy,
1979                                                enum bch_watermark watermark,
1980                                                struct closure *cl)
1981 {
1982         struct bch_fs *c = trans->c;
1983         struct ec_stripe_head *h;
1984         bool waiting = false;
1985         int ret;
1986
1987         h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, watermark);
1988         if (IS_ERR_OR_NULL(h))
1989                 return h;
1990
1991         if (!h->s) {
1992                 ret = ec_new_stripe_alloc(c, h);
1993                 if (ret) {
1994                         bch_err(c, "failed to allocate new stripe");
1995                         goto err;
1996                 }
1997         }
1998
1999         if (h->s->allocated)
2000                 goto allocated;
2001
2002         if (h->s->have_existing_stripe)
2003                 goto alloc_existing;
2004
2005         /* First, try to allocate a full stripe: */
2006         ret =   new_stripe_alloc_buckets(trans, h, BCH_WATERMARK_stripe, NULL) ?:
2007                 __bch2_ec_stripe_head_reserve(trans, h);
2008         if (!ret)
2009                 goto allocate_buf;
2010         if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
2011             bch2_err_matches(ret, ENOMEM))
2012                 goto err;
2013
2014         /*
2015          * Not enough buckets available for a full stripe: we must reuse an
2016          * existing stripe:
2017          */
2018         while (1) {
2019                 ret = __bch2_ec_stripe_head_reuse(trans, h);
2020                 if (!ret)
2021                         break;
2022                 if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked)
2023                         goto err;
2024
2025                 if (watermark == BCH_WATERMARK_copygc) {
2026                         ret =   new_stripe_alloc_buckets(trans, h, watermark, NULL) ?:
2027                                 __bch2_ec_stripe_head_reserve(trans, h);
2028                         if (ret)
2029                                 goto err;
2030                         goto allocate_buf;
2031                 }
2032
2033                 /* XXX freelist_wait? */
2034                 closure_wait(&c->freelist_wait, cl);
2035                 waiting = true;
2036         }
2037
2038         if (waiting)
2039                 closure_wake_up(&c->freelist_wait);
2040 alloc_existing:
2041         /*
2042          * Retry allocating buckets, with the watermark for this
2043          * particular write:
2044          */
2045         ret = new_stripe_alloc_buckets(trans, h, watermark, cl);
2046         if (ret)
2047                 goto err;
2048
2049 allocate_buf:
2050         ret = ec_stripe_buf_init(&h->s->new_stripe, 0, h->blocksize);
2051         if (ret)
2052                 goto err;
2053
2054         h->s->allocated = true;
2055 allocated:
2056         BUG_ON(!h->s->idx);
2057         BUG_ON(!h->s->new_stripe.data[0]);
2058         BUG_ON(trans->restarted);
2059         return h;
2060 err:
2061         bch2_ec_stripe_head_put(c, h);
2062         return ERR_PTR(ret);
2063 }
2064
2065 static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca)
2066 {
2067         struct ec_stripe_head *h;
2068         struct open_bucket *ob;
2069         unsigned i;
2070
2071         mutex_lock(&c->ec_stripe_head_lock);
2072         list_for_each_entry(h, &c->ec_stripe_head_list, list) {
2073                 mutex_lock(&h->lock);
2074                 if (!h->s)
2075                         goto unlock;
2076
2077                 if (!ca)
2078                         goto found;
2079
2080                 for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) {
2081                         if (!h->s->blocks[i])
2082                                 continue;
2083
2084                         ob = c->open_buckets + h->s->blocks[i];
2085                         if (ob->dev == ca->dev_idx)
2086                                 goto found;
2087                 }
2088                 goto unlock;
2089 found:
2090                 h->s->err = -BCH_ERR_erofs_no_writes;
2091                 ec_stripe_set_pending(c, h);
2092 unlock:
2093                 mutex_unlock(&h->lock);
2094         }
2095         mutex_unlock(&c->ec_stripe_head_lock);
2096 }
2097
2098 void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca)
2099 {
2100         __bch2_ec_stop(c, ca);
2101 }
2102
2103 void bch2_fs_ec_stop(struct bch_fs *c)
2104 {
2105         __bch2_ec_stop(c, NULL);
2106 }
2107
2108 static bool bch2_fs_ec_flush_done(struct bch_fs *c)
2109 {
2110         bool ret;
2111
2112         mutex_lock(&c->ec_stripe_new_lock);
2113         ret = list_empty(&c->ec_stripe_new_list);
2114         mutex_unlock(&c->ec_stripe_new_lock);
2115
2116         return ret;
2117 }
2118
2119 void bch2_fs_ec_flush(struct bch_fs *c)
2120 {
2121         wait_event(c->ec_stripe_new_wait, bch2_fs_ec_flush_done(c));
2122 }
2123
2124 int bch2_stripes_read(struct bch_fs *c)
2125 {
2126         int ret = bch2_trans_run(c,
2127                 for_each_btree_key(trans, iter, BTREE_ID_stripes, POS_MIN,
2128                                    BTREE_ITER_PREFETCH, k, ({
2129                         if (k.k->type != KEY_TYPE_stripe)
2130                                 continue;
2131
2132                         ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL);
2133                         if (ret)
2134                                 break;
2135
2136                         const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
2137
2138                         struct stripe *m = genradix_ptr(&c->stripes, k.k->p.offset);
2139                         m->sectors      = le16_to_cpu(s->sectors);
2140                         m->algorithm    = s->algorithm;
2141                         m->nr_blocks    = s->nr_blocks;
2142                         m->nr_redundant = s->nr_redundant;
2143                         m->blocks_nonempty = 0;
2144
2145                         for (unsigned i = 0; i < s->nr_blocks; i++)
2146                                 m->blocks_nonempty += !!stripe_blockcount_get(s, i);
2147
2148                         bch2_stripes_heap_insert(c, m, k.k->p.offset);
2149                         0;
2150                 })));
2151         bch_err_fn(c, ret);
2152         return ret;
2153 }
2154
2155 void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c)
2156 {
2157         ec_stripes_heap *h = &c->ec_stripes_heap;
2158         struct stripe *m;
2159         size_t i;
2160
2161         mutex_lock(&c->ec_stripes_heap_lock);
2162         for (i = 0; i < min_t(size_t, h->used, 50); i++) {
2163                 m = genradix_ptr(&c->stripes, h->data[i].idx);
2164
2165                 prt_printf(out, "%zu %u/%u+%u", h->data[i].idx,
2166                        h->data[i].blocks_nonempty,
2167                        m->nr_blocks - m->nr_redundant,
2168                        m->nr_redundant);
2169                 if (bch2_stripe_is_open(c, h->data[i].idx))
2170                         prt_str(out, " open");
2171                 prt_newline(out);
2172         }
2173         mutex_unlock(&c->ec_stripes_heap_lock);
2174 }
2175
2176 void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
2177 {
2178         struct ec_stripe_head *h;
2179         struct ec_stripe_new *s;
2180
2181         mutex_lock(&c->ec_stripe_head_lock);
2182         list_for_each_entry(h, &c->ec_stripe_head_list, list) {
2183                 prt_printf(out, "target %u algo %u redundancy %u %s:\n",
2184                        h->target, h->algo, h->redundancy,
2185                        bch2_watermarks[h->watermark]);
2186
2187                 if (h->s)
2188                         prt_printf(out, "\tidx %llu blocks %u+%u allocated %u\n",
2189                                h->s->idx, h->s->nr_data, h->s->nr_parity,
2190                                bitmap_weight(h->s->blocks_allocated,
2191                                              h->s->nr_data));
2192         }
2193         mutex_unlock(&c->ec_stripe_head_lock);
2194
2195         prt_printf(out, "in flight:\n");
2196
2197         mutex_lock(&c->ec_stripe_new_lock);
2198         list_for_each_entry(s, &c->ec_stripe_new_list, list) {
2199                 prt_printf(out, "\tidx %llu blocks %u+%u ref %u %u %s\n",
2200                            s->idx, s->nr_data, s->nr_parity,
2201                            atomic_read(&s->ref[STRIPE_REF_io]),
2202                            atomic_read(&s->ref[STRIPE_REF_stripe]),
2203                            bch2_watermarks[s->h->watermark]);
2204         }
2205         mutex_unlock(&c->ec_stripe_new_lock);
2206 }
2207
2208 void bch2_fs_ec_exit(struct bch_fs *c)
2209 {
2210         struct ec_stripe_head *h;
2211         unsigned i;
2212
2213         while (1) {
2214                 mutex_lock(&c->ec_stripe_head_lock);
2215                 h = list_first_entry_or_null(&c->ec_stripe_head_list,
2216                                              struct ec_stripe_head, list);
2217                 if (h)
2218                         list_del(&h->list);
2219                 mutex_unlock(&c->ec_stripe_head_lock);
2220                 if (!h)
2221                         break;
2222
2223                 if (h->s) {
2224                         for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++)
2225                                 BUG_ON(h->s->blocks[i]);
2226
2227                         kfree(h->s);
2228                 }
2229                 kfree(h);
2230         }
2231
2232         BUG_ON(!list_empty(&c->ec_stripe_new_list));
2233
2234         free_heap(&c->ec_stripes_heap);
2235         genradix_free(&c->stripes);
2236         bioset_exit(&c->ec_bioset);
2237 }
2238
2239 void bch2_fs_ec_init_early(struct bch_fs *c)
2240 {
2241         spin_lock_init(&c->ec_stripes_new_lock);
2242         mutex_init(&c->ec_stripes_heap_lock);
2243
2244         INIT_LIST_HEAD(&c->ec_stripe_head_list);
2245         mutex_init(&c->ec_stripe_head_lock);
2246
2247         INIT_LIST_HEAD(&c->ec_stripe_new_list);
2248         mutex_init(&c->ec_stripe_new_lock);
2249         init_waitqueue_head(&c->ec_stripe_new_wait);
2250
2251         INIT_WORK(&c->ec_stripe_create_work, ec_stripe_create_work);
2252         INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work);
2253 }
2254
2255 int bch2_fs_ec_init(struct bch_fs *c)
2256 {
2257         return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio),
2258                            BIOSET_NEED_BVECS);
2259 }