]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/io.c
e045eb20d686a1e3d0953169562457919c027a98
[bcachefs-tools-debian] / libbcachefs / io.c
1 /*
2  * Some low level IO code, and hacks for various block layer limitations
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc.h"
10 #include "bset.h"
11 #include "btree_update.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "compress.h"
15 #include "clock.h"
16 #include "debug.h"
17 #include "error.h"
18 #include "extents.h"
19 #include "io.h"
20 #include "journal.h"
21 #include "keylist.h"
22 #include "move.h"
23 #include "super.h"
24 #include "super-io.h"
25
26 #include <linux/blkdev.h>
27 #include <linux/random.h>
28
29 #include <trace/events/bcachefs.h>
30
31 /* Allocate, free from mempool: */
32
33 void bch2_latency_acct(struct bch_dev *ca, unsigned submit_time_us, int rw)
34 {
35         u64 now = local_clock();
36         unsigned io_latency = (now >> 10) - submit_time_us;
37         atomic_t *latency = &ca->latency[rw];
38         unsigned old, new, v = atomic_read(latency);
39
40         do {
41                 old = v;
42
43                 /*
44                  * If the io latency was reasonably close to the current
45                  * latency, skip doing the update and atomic operation - most of
46                  * the time:
47                  */
48                 if (abs((int) (old - io_latency)) < (old >> 1) &&
49                     now & ~(~0 << 5))
50                         break;
51
52                 new = ewma_add((u64) old, io_latency, 6);
53         } while ((v = atomic_cmpxchg(latency, old, new)) != old);
54 }
55
56 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
57 {
58         struct bio_vec *bv;
59         unsigned i;
60
61         bio_for_each_segment_all(bv, bio, i)
62                 if (bv->bv_page != ZERO_PAGE(0))
63                         mempool_free(bv->bv_page, &c->bio_bounce_pages);
64         bio->bi_vcnt = 0;
65 }
66
67 static void bch2_bio_alloc_page_pool(struct bch_fs *c, struct bio *bio,
68                                     bool *using_mempool)
69 {
70         struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt++];
71
72         if (likely(!*using_mempool)) {
73                 bv->bv_page = alloc_page(GFP_NOIO);
74                 if (unlikely(!bv->bv_page)) {
75                         mutex_lock(&c->bio_bounce_pages_lock);
76                         *using_mempool = true;
77                         goto pool_alloc;
78
79                 }
80         } else {
81 pool_alloc:
82                 bv->bv_page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
83         }
84
85         bv->bv_len = PAGE_SIZE;
86         bv->bv_offset = 0;
87 }
88
89 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
90                                size_t bytes)
91 {
92         bool using_mempool = false;
93
94         BUG_ON(DIV_ROUND_UP(bytes, PAGE_SIZE) > bio->bi_max_vecs);
95
96         bio->bi_iter.bi_size = bytes;
97
98         while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE))
99                 bch2_bio_alloc_page_pool(c, bio, &using_mempool);
100
101         if (using_mempool)
102                 mutex_unlock(&c->bio_bounce_pages_lock);
103 }
104
105 void bch2_bio_alloc_more_pages_pool(struct bch_fs *c, struct bio *bio,
106                                     size_t bytes)
107 {
108         while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE)) {
109                 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
110
111                 BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
112
113                 bv->bv_page = alloc_page(GFP_NOIO);
114                 if (!bv->bv_page) {
115                         /*
116                          * We already allocated from mempool, we can't allocate from it again
117                          * without freeing the pages we already allocated or else we could
118                          * deadlock:
119                          */
120                         bch2_bio_free_pages_pool(c, bio);
121                         bch2_bio_alloc_pages_pool(c, bio, bytes);
122                         return;
123                 }
124
125                 bv->bv_len = PAGE_SIZE;
126                 bv->bv_offset = 0;
127                 bio->bi_vcnt++;
128         }
129
130         bio->bi_iter.bi_size = bytes;
131 }
132
133 /* Writes */
134
135 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
136                                enum bch_data_type type,
137                                const struct bkey_i *k)
138 {
139         struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k);
140         const struct bch_extent_ptr *ptr;
141         struct bch_write_bio *n;
142         struct bch_dev *ca;
143
144         BUG_ON(c->opts.nochanges);
145
146         extent_for_each_ptr(e, ptr) {
147                 BUG_ON(ptr->dev >= BCH_SB_MEMBERS_MAX ||
148                        !c->devs[ptr->dev]);
149
150                 ca = bch_dev_bkey_exists(c, ptr->dev);
151
152                 if (ptr + 1 < &extent_entry_last(e)->ptr) {
153                         n = to_wbio(bio_clone_fast(&wbio->bio, GFP_NOIO,
154                                                    &ca->replica_set));
155
156                         n->bio.bi_end_io        = wbio->bio.bi_end_io;
157                         n->bio.bi_private       = wbio->bio.bi_private;
158                         n->parent               = wbio;
159                         n->split                = true;
160                         n->bounce               = false;
161                         n->put_bio              = true;
162                         n->bio.bi_opf           = wbio->bio.bi_opf;
163                         bio_inc_remaining(&wbio->bio);
164                 } else {
165                         n = wbio;
166                         n->split                = false;
167                 }
168
169                 n->c                    = c;
170                 n->ca                   = ca;
171                 n->submit_time_us       = local_clock_us();
172                 n->bio.bi_iter.bi_sector = ptr->offset;
173
174                 if (!journal_flushes_device(ca))
175                         n->bio.bi_opf |= REQ_FUA;
176
177                 if (likely(percpu_ref_tryget(&ca->io_ref))) {
178                         this_cpu_add(ca->io_done->sectors[WRITE][type],
179                                      bio_sectors(&n->bio));
180
181                         n->have_io_ref          = true;
182                         n->bio.bi_bdev          = ca->disk_sb.bdev;
183                         submit_bio(&n->bio);
184                 } else {
185                         n->have_io_ref          = false;
186                         n->bio.bi_status        = BLK_STS_REMOVED;
187                         bio_endio(&n->bio);
188                 }
189         }
190 }
191
192 static void __bch2_write(struct closure *);
193
194 static void bch2_write_done(struct closure *cl)
195 {
196         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
197
198         BUG_ON(!(op->flags & BCH_WRITE_DONE));
199
200         if (!op->error && (op->flags & BCH_WRITE_FLUSH))
201                 op->error = bch2_journal_error(&op->c->journal);
202
203         if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
204                 bch2_disk_reservation_put(op->c, &op->res);
205         percpu_ref_put(&op->c->writes);
206         bch2_keylist_free(&op->insert_keys, op->inline_keys);
207         op->flags &= ~(BCH_WRITE_DONE|BCH_WRITE_LOOPED);
208
209         closure_return(cl);
210 }
211
212 static u64 keylist_sectors(struct keylist *keys)
213 {
214         struct bkey_i *k;
215         u64 ret = 0;
216
217         for_each_keylist_key(keys, k)
218                 ret += k->k.size;
219
220         return ret;
221 }
222
223 int bch2_write_index_default(struct bch_write_op *op)
224 {
225         struct keylist *keys = &op->insert_keys;
226         struct btree_iter iter;
227         int ret;
228
229         bch2_btree_iter_init(&iter, op->c, BTREE_ID_EXTENTS,
230                              bkey_start_pos(&bch2_keylist_front(keys)->k),
231                              BTREE_ITER_INTENT);
232
233         ret = bch2_btree_insert_list_at(&iter, keys, &op->res,
234                                        NULL, op_journal_seq(op),
235                                        BTREE_INSERT_NOFAIL);
236         bch2_btree_iter_unlock(&iter);
237
238         return ret;
239 }
240
241 /**
242  * bch_write_index - after a write, update index to point to new data
243  */
244 static void bch2_write_index(struct closure *cl)
245 {
246         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
247         struct bch_fs *c = op->c;
248         struct keylist *keys = &op->insert_keys;
249         struct bkey_s_extent e;
250         struct bch_extent_ptr *ptr;
251         struct bkey_i *src, *dst = keys->keys, *n;
252         int ret;
253
254         op->flags |= BCH_WRITE_LOOPED;
255
256         for (src = keys->keys; src != keys->top; src = n) {
257                 n = bkey_next(src);
258                 bkey_copy(dst, src);
259
260                 e = bkey_i_to_s_extent(dst);
261                 extent_for_each_ptr_backwards(e, ptr)
262                         if (test_bit(ptr->dev, op->failed.d))
263                                 bch2_extent_drop_ptr(e, ptr);
264
265                 if (!bch2_extent_nr_ptrs(e.c)) {
266                         ret = -EIO;
267                         goto err;
268                 }
269
270                 if (!(op->flags & BCH_WRITE_NOMARK_REPLICAS)) {
271                         ret = bch2_check_mark_super(c, e.c, BCH_DATA_USER);
272                         if (ret)
273                                 goto err;
274                 }
275
276                 dst = bkey_next(dst);
277         }
278
279         keys->top = dst;
280
281         if (!bch2_keylist_empty(keys)) {
282                 u64 sectors_start = keylist_sectors(keys);
283                 int ret = op->index_update_fn(op);
284
285                 BUG_ON(keylist_sectors(keys) && !ret);
286
287                 op->written += sectors_start - keylist_sectors(keys);
288
289                 if (ret) {
290                         __bcache_io_error(c, "btree IO error %i", ret);
291                         op->error = ret;
292                 }
293         }
294 out:
295         bch2_open_bucket_put_refs(c, &op->open_buckets_nr, op->open_buckets);
296
297         if (!(op->flags & BCH_WRITE_DONE))
298                 continue_at(cl, __bch2_write, op->io_wq);
299
300         if (!op->error && (op->flags & BCH_WRITE_FLUSH)) {
301                 bch2_journal_flush_seq_async(&c->journal,
302                                              *op_journal_seq(op),
303                                              cl);
304                 continue_at(cl, bch2_write_done, index_update_wq(op));
305         } else {
306                 continue_at_nobarrier(cl, bch2_write_done, NULL);
307         }
308         return;
309 err:
310         keys->top = keys->keys;
311         op->error = ret;
312         op->flags |= BCH_WRITE_DONE;
313         goto out;
314 }
315
316 static void bch2_write_endio(struct bio *bio)
317 {
318         struct closure *cl              = bio->bi_private;
319         struct bch_write_op *op         = container_of(cl, struct bch_write_op, cl);
320         struct bch_write_bio *wbio      = to_wbio(bio);
321         struct bch_write_bio *parent    = wbio->split ? wbio->parent : NULL;
322         struct bch_fs *c                = wbio->c;
323         struct bch_dev *ca              = wbio->ca;
324
325         bch2_latency_acct(ca, wbio->submit_time_us, WRITE);
326
327         if (bch2_dev_io_err_on(bio->bi_status, ca, "data write"))
328                 set_bit(ca->dev_idx, op->failed.d);
329
330         if (wbio->have_io_ref)
331                 percpu_ref_put(&ca->io_ref);
332
333         if (wbio->bounce)
334                 bch2_bio_free_pages_pool(c, bio);
335
336         if (wbio->put_bio)
337                 bio_put(bio);
338
339         if (parent)
340                 bio_endio(&parent->bio);
341         else
342                 closure_put(cl);
343 }
344
345 static void init_append_extent(struct bch_write_op *op,
346                                struct write_point *wp,
347                                struct bversion version,
348                                struct bch_extent_crc_unpacked crc)
349 {
350         struct bkey_i_extent *e = bkey_extent_init(op->insert_keys.top);
351
352         op->pos.offset += crc.uncompressed_size;
353         e->k.p = op->pos;
354         e->k.size = crc.uncompressed_size;
355         e->k.version = version;
356         bkey_extent_set_cached(&e->k, op->flags & BCH_WRITE_CACHED);
357
358         bch2_extent_crc_append(e, crc);
359         bch2_alloc_sectors_append_ptrs(op->c, wp, e, crc.compressed_size);
360
361         bch2_keylist_push(&op->insert_keys);
362 }
363
364 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
365                                         struct write_point *wp,
366                                         struct bio *src,
367                                         bool *page_alloc_failed)
368 {
369         struct bch_write_bio *wbio;
370         struct bio *bio;
371         unsigned output_available =
372                 min(wp->sectors_free << 9, src->bi_iter.bi_size);
373         unsigned pages = DIV_ROUND_UP(output_available, PAGE_SIZE);
374
375         bio = bio_alloc_bioset(GFP_NOIO, pages, &c->bio_write);
376         wbio                    = wbio_init(bio);
377         wbio->bounce            = true;
378         wbio->put_bio           = true;
379         /* copy WRITE_SYNC flag */
380         wbio->bio.bi_opf        = src->bi_opf;
381
382         /*
383          * We can't use mempool for more than c->sb.encoded_extent_max
384          * worth of pages, but we'd like to allocate more if we can:
385          */
386         while (bio->bi_iter.bi_size < output_available) {
387                 unsigned len = min_t(unsigned, PAGE_SIZE,
388                                      output_available - bio->bi_iter.bi_size);
389                 struct page *p;
390
391                 p = alloc_page(GFP_NOIO);
392                 if (!p) {
393                         unsigned pool_max =
394                                 min_t(unsigned, output_available,
395                                       c->sb.encoded_extent_max << 9);
396
397                         if (bio_sectors(bio) < pool_max)
398                                 bch2_bio_alloc_pages_pool(c, bio, pool_max);
399                         break;
400                 }
401
402                 bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) {
403                         .bv_page        = p,
404                         .bv_len         = len,
405                         .bv_offset      = 0,
406                 };
407                 bio->bi_iter.bi_size += len;
408         }
409
410         *page_alloc_failed = bio->bi_vcnt < pages;
411         return bio;
412 }
413
414 static int bch2_write_rechecksum(struct bch_fs *c,
415                                  struct bch_write_op *op,
416                                  unsigned new_csum_type)
417 {
418         struct bio *bio = &op->wbio.bio;
419         struct bch_extent_crc_unpacked new_crc;
420         int ret;
421
422         /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
423
424         if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
425             bch2_csum_type_is_encryption(new_csum_type))
426                 new_csum_type = op->crc.csum_type;
427
428         ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
429                                   NULL, &new_crc,
430                                   op->crc.offset, op->crc.live_size,
431                                   new_csum_type);
432         if (ret)
433                 return ret;
434
435         bio_advance(bio, op->crc.offset << 9);
436         bio->bi_iter.bi_size = op->crc.live_size << 9;
437         op->crc = new_crc;
438         return 0;
439 }
440
441 static int bch2_write_decrypt(struct bch_write_op *op)
442 {
443         struct bch_fs *c = op->c;
444         struct nonce nonce = extent_nonce(op->version, op->crc);
445         struct bch_csum csum;
446
447         if (!bch2_csum_type_is_encryption(op->crc.csum_type))
448                 return 0;
449
450         /*
451          * If we need to decrypt data in the write path, we'll no longer be able
452          * to verify the existing checksum (poly1305 mac, in this case) after
453          * it's decrypted - this is the last point we'll be able to reverify the
454          * checksum:
455          */
456         csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
457         if (bch2_crc_cmp(op->crc.csum, csum))
458                 return -EIO;
459
460         bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
461         op->crc.csum_type = 0;
462         op->crc.csum = (struct bch_csum) { 0, 0 };
463         return 0;
464 }
465
466 static enum prep_encoded_ret {
467         PREP_ENCODED_OK,
468         PREP_ENCODED_ERR,
469         PREP_ENCODED_CHECKSUM_ERR,
470         PREP_ENCODED_DO_WRITE,
471 } bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
472 {
473         struct bch_fs *c = op->c;
474         struct bio *bio = &op->wbio.bio;
475
476         if (!(op->flags & BCH_WRITE_DATA_ENCODED))
477                 return PREP_ENCODED_OK;
478
479         BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
480
481         /* Can we just write the entire extent as is? */
482         if (op->crc.uncompressed_size == op->crc.live_size &&
483             op->crc.compressed_size <= wp->sectors_free &&
484             op->crc.compression_type == op->compression_type) {
485                 if (!op->crc.compression_type &&
486                     op->csum_type != op->crc.csum_type &&
487                     bch2_write_rechecksum(c, op, op->csum_type))
488                         return PREP_ENCODED_CHECKSUM_ERR;
489
490                 return PREP_ENCODED_DO_WRITE;
491         }
492
493         /*
494          * If the data is compressed and we couldn't write the entire extent as
495          * is, we have to decompress it:
496          */
497         if (op->crc.compression_type) {
498                 struct bch_csum csum;
499
500                 if (bch2_write_decrypt(op))
501                         return PREP_ENCODED_CHECKSUM_ERR;
502
503                 /* Last point we can still verify checksum: */
504                 csum = bch2_checksum_bio(c, op->crc.csum_type,
505                                          extent_nonce(op->version, op->crc),
506                                          bio);
507                 if (bch2_crc_cmp(op->crc.csum, csum))
508                         return PREP_ENCODED_CHECKSUM_ERR;
509
510                 if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
511                         return PREP_ENCODED_ERR;
512         }
513
514         /*
515          * No longer have compressed data after this point - data might be
516          * encrypted:
517          */
518
519         /*
520          * If the data is checksummed and we're only writing a subset,
521          * rechecksum and adjust bio to point to currently live data:
522          */
523         if ((op->crc.live_size != op->crc.uncompressed_size ||
524              op->crc.csum_type != op->csum_type) &&
525             bch2_write_rechecksum(c, op, op->csum_type))
526                 return PREP_ENCODED_CHECKSUM_ERR;
527
528         /*
529          * If we want to compress the data, it has to be decrypted:
530          */
531         if ((op->compression_type ||
532              bch2_csum_type_is_encryption(op->crc.csum_type) !=
533              bch2_csum_type_is_encryption(op->csum_type)) &&
534             bch2_write_decrypt(op))
535                 return PREP_ENCODED_CHECKSUM_ERR;
536
537         return PREP_ENCODED_OK;
538 }
539
540 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp)
541 {
542         struct bch_fs *c = op->c;
543         struct bio *src = &op->wbio.bio, *dst = src;
544         struct bvec_iter saved_iter;
545         struct bkey_i *key_to_write;
546         unsigned key_to_write_offset = op->insert_keys.top_p -
547                 op->insert_keys.keys_p;
548         unsigned total_output = 0;
549         bool bounce = false, page_alloc_failed = false;
550         int ret, more = 0;
551
552         BUG_ON(!bio_sectors(src));
553
554         switch (bch2_write_prep_encoded_data(op, wp)) {
555         case PREP_ENCODED_OK:
556                 break;
557         case PREP_ENCODED_ERR:
558                 ret = -EIO;
559                 goto err;
560         case PREP_ENCODED_CHECKSUM_ERR:
561                 goto csum_err;
562         case PREP_ENCODED_DO_WRITE:
563                 init_append_extent(op, wp, op->version, op->crc);
564                 goto do_write;
565         }
566
567         if (op->compression_type ||
568             (op->csum_type &&
569              !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
570             (bch2_csum_type_is_encryption(op->csum_type) &&
571              !(op->flags & BCH_WRITE_PAGES_OWNED))) {
572                 dst = bch2_write_bio_alloc(c, wp, src, &page_alloc_failed);
573                 bounce = true;
574         }
575
576         saved_iter = dst->bi_iter;
577
578         do {
579                 struct bch_extent_crc_unpacked crc =
580                         (struct bch_extent_crc_unpacked) { 0 };
581                 struct bversion version = op->version;
582                 size_t dst_len, src_len;
583
584                 if (page_alloc_failed &&
585                     bio_sectors(dst) < wp->sectors_free &&
586                     bio_sectors(dst) < c->sb.encoded_extent_max)
587                         break;
588
589                 BUG_ON(op->compression_type &&
590                        (op->flags & BCH_WRITE_DATA_ENCODED) &&
591                        bch2_csum_type_is_encryption(op->crc.csum_type));
592                 BUG_ON(op->compression_type && !bounce);
593
594                 crc.compression_type = op->compression_type
595                         ?  bch2_bio_compress(c, dst, &dst_len, src, &src_len,
596                                              op->compression_type)
597                         : 0;
598                 if (!crc.compression_type) {
599                         dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
600                         dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
601
602                         if (op->csum_type)
603                                 dst_len = min_t(unsigned, dst_len,
604                                                 c->sb.encoded_extent_max << 9);
605
606                         if (bounce) {
607                                 swap(dst->bi_iter.bi_size, dst_len);
608                                 bio_copy_data(dst, src);
609                                 swap(dst->bi_iter.bi_size, dst_len);
610                         }
611
612                         src_len = dst_len;
613                 }
614
615                 BUG_ON(!src_len || !dst_len);
616
617                 if (bch2_csum_type_is_encryption(op->csum_type)) {
618                         if (bversion_zero(version)) {
619                                 version.lo = atomic64_inc_return(&c->key_version) + 1;
620                         } else {
621                                 crc.nonce = op->nonce;
622                                 op->nonce += src_len >> 9;
623                         }
624                 }
625
626                 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
627                     !crc.compression_type &&
628                     bch2_csum_type_is_encryption(op->crc.csum_type) ==
629                     bch2_csum_type_is_encryption(op->csum_type)) {
630                         /*
631                          * Note: when we're using rechecksum(), we need to be
632                          * checksumming @src because it has all the data our
633                          * existing checksum covers - if we bounced (because we
634                          * were trying to compress), @dst will only have the
635                          * part of the data the new checksum will cover.
636                          *
637                          * But normally we want to be checksumming post bounce,
638                          * because part of the reason for bouncing is so the
639                          * data can't be modified (by userspace) while it's in
640                          * flight.
641                          */
642                         if (bch2_rechecksum_bio(c, src, version, op->crc,
643                                         &crc, &op->crc,
644                                         src_len >> 9,
645                                         bio_sectors(src) - (src_len >> 9),
646                                         op->csum_type))
647                                 goto csum_err;
648                 } else {
649                         if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
650                             bch2_rechecksum_bio(c, src, version, op->crc,
651                                         NULL, &op->crc,
652                                         src_len >> 9,
653                                         bio_sectors(src) - (src_len >> 9),
654                                         op->crc.csum_type))
655                                 goto csum_err;
656
657                         crc.compressed_size     = dst_len >> 9;
658                         crc.uncompressed_size   = src_len >> 9;
659                         crc.live_size           = src_len >> 9;
660
661                         swap(dst->bi_iter.bi_size, dst_len);
662                         bch2_encrypt_bio(c, op->csum_type,
663                                          extent_nonce(version, crc), dst);
664                         crc.csum = bch2_checksum_bio(c, op->csum_type,
665                                          extent_nonce(version, crc), dst);
666                         crc.csum_type = op->csum_type;
667                         swap(dst->bi_iter.bi_size, dst_len);
668                 }
669
670                 init_append_extent(op, wp, version, crc);
671
672                 if (dst != src)
673                         bio_advance(dst, dst_len);
674                 bio_advance(src, src_len);
675                 total_output += dst_len;
676         } while (dst->bi_iter.bi_size &&
677                  src->bi_iter.bi_size &&
678                  wp->sectors_free &&
679                  !bch2_keylist_realloc(&op->insert_keys,
680                                       op->inline_keys,
681                                       ARRAY_SIZE(op->inline_keys),
682                                       BKEY_EXTENT_U64s_MAX));
683
684         more = src->bi_iter.bi_size != 0;
685
686         dst->bi_iter = saved_iter;
687
688         if (!bounce && more) {
689                 dst = bio_split(src, total_output >> 9,
690                                 GFP_NOIO, &c->bio_write);
691                 wbio_init(dst)->put_bio = true;
692         }
693
694         dst->bi_iter.bi_size = total_output;
695
696         /* Free unneeded pages after compressing: */
697         if (bounce)
698                 while (dst->bi_vcnt > DIV_ROUND_UP(dst->bi_iter.bi_size, PAGE_SIZE))
699                         mempool_free(dst->bi_io_vec[--dst->bi_vcnt].bv_page,
700                                      &c->bio_bounce_pages);
701 do_write:
702         /* might have done a realloc... */
703
704         key_to_write = (void *) (op->insert_keys.keys_p + key_to_write_offset);
705
706         dst->bi_end_io  = bch2_write_endio;
707         dst->bi_private = &op->cl;
708         bio_set_op_attrs(dst, REQ_OP_WRITE, 0);
709
710         closure_get(dst->bi_private);
711
712         bch2_submit_wbio_replicas(to_wbio(dst), c, BCH_DATA_USER,
713                                   key_to_write);
714         return more;
715 csum_err:
716         bch_err(c, "error verifying existing checksum while "
717                 "rewriting existing data (memory corruption?)");
718         ret = -EIO;
719 err:
720         if (bounce) {
721                 bch2_bio_free_pages_pool(c, dst);
722                 bio_put(dst);
723         }
724
725         return ret;
726 }
727
728 static void __bch2_write(struct closure *cl)
729 {
730         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
731         struct bch_fs *c = op->c;
732         struct write_point *wp;
733         int ret;
734
735         do {
736                 if (op->open_buckets_nr + op->nr_replicas >
737                     ARRAY_SIZE(op->open_buckets))
738                         continue_at(cl, bch2_write_index, index_update_wq(op));
739
740                 /* for the device pointers and 1 for the chksum */
741                 if (bch2_keylist_realloc(&op->insert_keys,
742                                         op->inline_keys,
743                                         ARRAY_SIZE(op->inline_keys),
744                                         BKEY_EXTENT_U64s_MAX))
745                         continue_at(cl, bch2_write_index, index_update_wq(op));
746
747                 wp = bch2_alloc_sectors_start(c,
748                         op->devs,
749                         op->write_point,
750                         &op->devs_have,
751                         op->nr_replicas,
752                         op->nr_replicas_required,
753                         op->alloc_reserve,
754                         op->flags,
755                         (op->flags & BCH_WRITE_ALLOC_NOWAIT) ? NULL : cl);
756                 EBUG_ON(!wp);
757
758                 if (unlikely(IS_ERR(wp))) {
759                         if (unlikely(PTR_ERR(wp) != -EAGAIN)) {
760                                 ret = PTR_ERR(wp);
761                                 goto err;
762                         }
763
764                         /*
765                          * If we already have some keys, must insert them first
766                          * before allocating another open bucket. We only hit
767                          * this case if open_bucket_nr > 1.
768                          */
769                         if (!bch2_keylist_empty(&op->insert_keys))
770                                 continue_at(cl, bch2_write_index,
771                                             index_update_wq(op));
772
773                         /*
774                          * If we've looped, we're running out of a workqueue -
775                          * not the bch2_write() caller's context - and we don't
776                          * want to block the workqueue:
777                          */
778                         if (op->flags & BCH_WRITE_LOOPED)
779                                 continue_at(cl, __bch2_write, op->io_wq);
780
781                         /*
782                          * Otherwise, we do want to block the caller on alloc
783                          * failure instead of letting it queue up more and more
784                          * writes:
785                          * XXX: this technically needs a try_to_freeze() -
786                          * except that that's not safe because caller may have
787                          * issued other IO... hmm..
788                          */
789                         closure_sync(cl);
790                         continue;
791                 }
792
793                 ret = bch2_write_extent(op, wp);
794
795                 BUG_ON(op->open_buckets_nr + wp->nr_ptrs_can_use >
796                        ARRAY_SIZE(op->open_buckets));
797                 bch2_open_bucket_get(c, wp,
798                                      &op->open_buckets_nr,
799                                      op->open_buckets);
800                 bch2_alloc_sectors_done(c, wp);
801
802                 if (ret < 0)
803                         goto err;
804         } while (ret);
805
806         op->flags |= BCH_WRITE_DONE;
807         continue_at(cl, bch2_write_index, index_update_wq(op));
808 err:
809         /*
810          * Right now we can only error here if we went RO - the
811          * allocation failed, but we already checked for -ENOSPC when we
812          * got our reservation.
813          *
814          * XXX capacity might have changed, but we don't check for that
815          * yet:
816          */
817         op->error = ret;
818         op->flags |= BCH_WRITE_DONE;
819
820         /*
821          * No reason not to insert keys for whatever data was successfully
822          * written (especially for a cmpxchg operation that's moving data
823          * around)
824          */
825         continue_at(cl, !bch2_keylist_empty(&op->insert_keys)
826                     ? bch2_write_index
827                     : bch2_write_done, index_update_wq(op));
828 }
829
830 /**
831  * bch_write - handle a write to a cache device or flash only volume
832  *
833  * This is the starting point for any data to end up in a cache device; it could
834  * be from a normal write, or a writeback write, or a write to a flash only
835  * volume - it's also used by the moving garbage collector to compact data in
836  * mostly empty buckets.
837  *
838  * It first writes the data to the cache, creating a list of keys to be inserted
839  * (if the data won't fit in a single open bucket, there will be multiple keys);
840  * after the data is written it calls bch_journal, and after the keys have been
841  * added to the next journal write they're inserted into the btree.
842  *
843  * If op->discard is true, instead of inserting the data it invalidates the
844  * region of the cache represented by op->bio and op->inode.
845  */
846 void bch2_write(struct closure *cl)
847 {
848         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
849         struct bch_fs *c = op->c;
850
851         BUG_ON(!op->nr_replicas);
852         BUG_ON(!op->write_point.v);
853         BUG_ON(!bkey_cmp(op->pos, POS_MAX));
854         BUG_ON(bio_sectors(&op->wbio.bio) > U16_MAX);
855
856         memset(&op->failed, 0, sizeof(op->failed));
857
858         bch2_keylist_init(&op->insert_keys, op->inline_keys);
859         wbio_init(&op->wbio.bio)->put_bio = false;
860
861         if (c->opts.nochanges ||
862             !percpu_ref_tryget(&c->writes)) {
863                 __bcache_io_error(c, "read only");
864                 op->error = -EROFS;
865                 if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
866                         bch2_disk_reservation_put(c, &op->res);
867                 closure_return(cl);
868         }
869
870         bch2_increment_clock(c, bio_sectors(&op->wbio.bio), WRITE);
871
872         continue_at_nobarrier(cl, __bch2_write, NULL);
873 }
874
875 /* Cache promotion on read */
876
877 struct promote_op {
878         struct closure          cl;
879         struct migrate_write    write;
880         struct bio_vec          bi_inline_vecs[0]; /* must be last */
881 };
882
883 static void promote_done(struct closure *cl)
884 {
885         struct promote_op *op =
886                 container_of(cl, struct promote_op, cl);
887         struct bch_fs *c = op->write.op.c;
888
889         percpu_ref_put(&c->writes);
890         bch2_bio_free_pages_pool(c, &op->write.op.wbio.bio);
891         kfree(op);
892 }
893
894 static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
895 {
896         struct bch_fs *c = rbio->c;
897         struct closure *cl = &op->cl;
898         struct bio *bio = &op->write.op.wbio.bio;
899
900         BUG_ON(!rbio->split || !rbio->bounce);
901
902         if (!percpu_ref_tryget(&c->writes))
903                 return;
904
905         trace_promote(&rbio->bio);
906
907         /* we now own pages: */
908         BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs);
909         swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
910         rbio->promote = NULL;
911
912         bch2_write_op_init(&op->write.op, c);
913         op->write.op.csum_type = bch2_data_checksum_type(c, rbio->opts.data_checksum);
914         op->write.op.compression_type =
915                 bch2_compression_opt_to_type(rbio->opts.compression);
916
917         op->write.move_dev      = -1;
918         op->write.op.devs       = c->fastest_devs;
919         op->write.op.write_point = writepoint_hashed((unsigned long) current);
920         op->write.op.flags      |= BCH_WRITE_ALLOC_NOWAIT;
921         op->write.op.flags      |= BCH_WRITE_CACHED;
922
923         bch2_migrate_write_init(&op->write, rbio);
924
925         closure_init(cl, NULL);
926         closure_call(&op->write.op.cl, bch2_write, c->wq, cl);
927         closure_return_with_destructor(cl, promote_done);
928 }
929
930 /*
931  * XXX: multiple promotes can race with each other, wastefully. Keep a list of
932  * outstanding promotes?
933  */
934 static struct promote_op *promote_alloc(struct bch_read_bio *rbio)
935 {
936         struct promote_op *op;
937         struct bio *bio;
938         /* data might have to be decompressed in the write path: */
939         unsigned pages = DIV_ROUND_UP(rbio->pick.crc.uncompressed_size,
940                                       PAGE_SECTORS);
941
942         BUG_ON(!rbio->bounce);
943         BUG_ON(pages < rbio->bio.bi_vcnt);
944
945         op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages,
946                      GFP_NOIO);
947         if (!op)
948                 return NULL;
949
950         bio = &op->write.op.wbio.bio;
951         bio_init(bio, bio->bi_inline_vecs, pages);
952
953         memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec,
954                sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
955
956         return op;
957 }
958
959 /* only promote if we're not reading from the fastest tier: */
960 static bool should_promote(struct bch_fs *c,
961                            struct extent_pick_ptr *pick, unsigned flags)
962 {
963         if (!(flags & BCH_READ_MAY_PROMOTE))
964                 return false;
965
966         if (percpu_ref_is_dying(&c->writes))
967                 return false;
968
969         return c->fastest_tier &&
970                 c->fastest_tier < c->tiers + pick->ca->mi.tier;
971 }
972
973 /* Read */
974
975 static void bch2_read_nodecode_retry(struct bch_fs *, struct bch_read_bio *,
976                                      struct bvec_iter, u64,
977                                      struct bch_devs_mask *, unsigned);
978
979 #define READ_RETRY_AVOID        1
980 #define READ_RETRY              2
981 #define READ_ERR                3
982
983 enum rbio_context {
984         RBIO_CONTEXT_NULL,
985         RBIO_CONTEXT_HIGHPRI,
986         RBIO_CONTEXT_UNBOUND,
987 };
988
989 static inline struct bch_read_bio *
990 bch2_rbio_parent(struct bch_read_bio *rbio)
991 {
992         return rbio->split ? rbio->parent : rbio;
993 }
994
995 __always_inline
996 static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn,
997                            enum rbio_context context,
998                            struct workqueue_struct *wq)
999 {
1000         if (context <= rbio->context) {
1001                 fn(&rbio->work);
1002         } else {
1003                 rbio->work.func         = fn;
1004                 rbio->context           = context;
1005                 queue_work(wq, &rbio->work);
1006         }
1007 }
1008
1009 static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
1010 {
1011         struct bch_read_bio *parent = rbio->parent;
1012
1013         BUG_ON(!rbio->split);
1014
1015         if (rbio->promote)
1016                 kfree(rbio->promote);
1017         if (rbio->bounce)
1018                 bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
1019         bio_put(&rbio->bio);
1020
1021         return parent;
1022 }
1023
1024 static void bch2_rbio_done(struct bch_read_bio *rbio)
1025 {
1026         if (rbio->promote)
1027                 kfree(rbio->promote);
1028         rbio->promote = NULL;
1029
1030         if (rbio->split)
1031                 rbio = bch2_rbio_free(rbio);
1032         bio_endio(&rbio->bio);
1033 }
1034
1035 static void bch2_rbio_retry(struct work_struct *work)
1036 {
1037         struct bch_read_bio *rbio =
1038                 container_of(work, struct bch_read_bio, work);
1039         struct bch_fs *c                = rbio->c;
1040         struct bvec_iter iter           = rbio->bvec_iter;
1041         unsigned flags                  = rbio->flags;
1042         u64 inode                       = rbio->pos.inode;
1043         struct bch_devs_mask avoid;
1044
1045         trace_read_retry(&rbio->bio);
1046
1047         memset(&avoid, 0, sizeof(avoid));
1048
1049         if (rbio->retry == READ_RETRY_AVOID)
1050                 __set_bit(rbio->pick.ca->dev_idx, avoid.d);
1051
1052         if (rbio->promote)
1053                 kfree(rbio->promote);
1054         rbio->promote = NULL;
1055
1056         if (rbio->split)
1057                 rbio = bch2_rbio_free(rbio);
1058         else
1059                 rbio->bio.bi_status = 0;
1060
1061         if (!(flags & BCH_READ_NODECODE))
1062                 flags |= BCH_READ_MUST_CLONE;
1063         flags |= BCH_READ_IN_RETRY;
1064         flags &= ~BCH_READ_MAY_PROMOTE;
1065
1066         if (flags & BCH_READ_NODECODE)
1067                 bch2_read_nodecode_retry(c, rbio, iter, inode, &avoid, flags);
1068         else
1069                 __bch2_read(c, rbio, iter, inode, &avoid, flags);
1070 }
1071
1072 static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
1073                             blk_status_t error)
1074 {
1075         rbio->retry = retry;
1076
1077         if (rbio->flags & BCH_READ_IN_RETRY)
1078                 return;
1079
1080         if (retry == READ_ERR) {
1081                 bch2_rbio_parent(rbio)->bio.bi_status = error;
1082                 bch2_rbio_done(rbio);
1083         } else {
1084                 bch2_rbio_punt(rbio, bch2_rbio_retry,
1085                                RBIO_CONTEXT_UNBOUND, system_unbound_wq);
1086         }
1087 }
1088
1089 static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
1090 {
1091         struct bch_fs *c = rbio->c;
1092         struct btree_iter iter;
1093         struct bkey_s_c k;
1094         struct bkey_i_extent *e;
1095         BKEY_PADDED(k) new;
1096         struct bch_extent_crc_unpacked new_crc;
1097         unsigned offset;
1098         int ret;
1099
1100         if (rbio->pick.crc.compression_type)
1101                 return;
1102
1103         bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, rbio->pos,
1104                              BTREE_ITER_INTENT);
1105 retry:
1106         k = bch2_btree_iter_peek(&iter);
1107         if (IS_ERR_OR_NULL(k.k))
1108                 goto out;
1109
1110         if (!bkey_extent_is_data(k.k))
1111                 goto out;
1112
1113         bkey_reassemble(&new.k, k);
1114         e = bkey_i_to_extent(&new.k);
1115
1116         if (!bch2_extent_matches_ptr(c, extent_i_to_s_c(e),
1117                                      rbio->pick.ptr,
1118                                      rbio->pos.offset -
1119                                      rbio->pick.crc.offset) ||
1120             bversion_cmp(e->k.version, rbio->version))
1121                 goto out;
1122
1123         /* Extent was merged? */
1124         if (bkey_start_offset(&e->k) < rbio->pos.offset ||
1125             e->k.p.offset > rbio->pos.offset + rbio->pick.crc.uncompressed_size)
1126                 goto out;
1127
1128         /* The extent might have been partially overwritten since we read it: */
1129         offset = rbio->pick.crc.offset + (bkey_start_offset(&e->k) - rbio->pos.offset);
1130
1131         if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version,
1132                                 rbio->pick.crc, NULL, &new_crc,
1133                                 offset, e->k.size,
1134                                 rbio->pick.crc.csum_type)) {
1135                 bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)");
1136                 goto out;
1137         }
1138
1139         if (!bch2_extent_narrow_crcs(e, new_crc))
1140                 goto out;
1141
1142         ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
1143                                    BTREE_INSERT_ATOMIC|
1144                                    BTREE_INSERT_NOFAIL|
1145                                    BTREE_INSERT_NOWAIT,
1146                                    BTREE_INSERT_ENTRY(&iter, &e->k_i));
1147         if (ret == -EINTR)
1148                 goto retry;
1149 out:
1150         bch2_btree_iter_unlock(&iter);
1151 }
1152
1153 static bool should_narrow_crcs(struct bkey_s_c_extent e,
1154                                struct extent_pick_ptr *pick,
1155                                unsigned flags)
1156 {
1157         return !(flags & BCH_READ_IN_RETRY) &&
1158                 bch2_can_narrow_extent_crcs(e, pick->crc);
1159 }
1160
1161 /* Inner part that may run in process context */
1162 static void __bch2_read_endio(struct work_struct *work)
1163 {
1164         struct bch_read_bio *rbio =
1165                 container_of(work, struct bch_read_bio, work);
1166         struct bch_fs *c = rbio->c;
1167         struct bio *src = &rbio->bio, *dst = &bch2_rbio_parent(rbio)->bio;
1168         struct bvec_iter dst_iter = rbio->bvec_iter;
1169         struct bch_extent_crc_unpacked crc = rbio->pick.crc;
1170         struct nonce nonce = extent_nonce(rbio->version, crc);
1171         struct bch_csum csum;
1172
1173         /* Reset iterator for checksumming and copying bounced data: */
1174         if (rbio->bounce) {
1175                 src->bi_iter.bi_size            = crc.compressed_size << 9;
1176                 src->bi_iter.bi_idx             = 0;
1177                 src->bi_iter.bi_bvec_done       = 0;
1178         } else {
1179                 src->bi_iter                    = rbio->bvec_iter;
1180         }
1181
1182         csum = bch2_checksum_bio(c, crc.csum_type, nonce, src);
1183         if (bch2_crc_cmp(csum, rbio->pick.crc.csum))
1184                 goto csum_err;
1185
1186         if (unlikely(rbio->narrow_crcs))
1187                 bch2_rbio_narrow_crcs(rbio);
1188
1189         if (rbio->flags & BCH_READ_NODECODE)
1190                 goto nodecode;
1191
1192         /* Adjust crc to point to subset of data we want: */
1193         crc.offset     += rbio->bvec_iter.bi_sector - rbio->pos.offset;
1194         crc.live_size   = bvec_iter_sectors(rbio->bvec_iter);
1195
1196         if (crc.compression_type != BCH_COMPRESSION_NONE) {
1197                 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1198                 if (bch2_bio_uncompress(c, src, dst, dst_iter, crc))
1199                         goto decompression_err;
1200         } else {
1201                 /* don't need to decrypt the entire bio: */
1202                 nonce = nonce_add(nonce, crc.offset << 9);
1203                 bio_advance(src, crc.offset << 9);
1204
1205                 BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
1206                 src->bi_iter.bi_size = dst_iter.bi_size;
1207
1208                 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1209
1210                 if (rbio->bounce) {
1211                         struct bvec_iter src_iter = src->bi_iter;
1212                         bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1213                 }
1214         }
1215
1216         if (rbio->promote) {
1217                 /*
1218                  * Re encrypt data we decrypted, so it's consistent with
1219                  * rbio->crc:
1220                  */
1221                 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1222                 promote_start(rbio->promote, rbio);
1223         }
1224 nodecode:
1225         if (likely(!(rbio->flags & BCH_READ_IN_RETRY)))
1226                 bch2_rbio_done(rbio);
1227         return;
1228 csum_err:
1229         /*
1230          * Checksum error: if the bio wasn't bounced, we may have been
1231          * reading into buffers owned by userspace (that userspace can
1232          * scribble over) - retry the read, bouncing it this time:
1233          */
1234         if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
1235                 rbio->flags |= BCH_READ_MUST_BOUNCE;
1236                 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
1237                 return;
1238         }
1239
1240         bch2_dev_io_error(rbio->pick.ca,
1241                 "data checksum error, inode %llu offset %llu: expected %0llx%0llx got %0llx%0llx (type %u)",
1242                 rbio->pos.inode, (u64) rbio->bvec_iter.bi_sector,
1243                 rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
1244                 csum.hi, csum.lo, crc.csum_type);
1245         bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1246         return;
1247 decompression_err:
1248         __bcache_io_error(c, "decompression error, inode %llu offset %llu",
1249                           rbio->pos.inode,
1250                           (u64) rbio->bvec_iter.bi_sector);
1251         bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
1252         return;
1253 }
1254
1255 static void bch2_read_endio(struct bio *bio)
1256 {
1257         struct bch_read_bio *rbio =
1258                 container_of(bio, struct bch_read_bio, bio);
1259         struct bch_fs *c = rbio->c;
1260         struct workqueue_struct *wq = NULL;
1261         enum rbio_context context = RBIO_CONTEXT_NULL;
1262
1263         bch2_latency_acct(rbio->pick.ca, rbio->submit_time_us, READ);
1264
1265         percpu_ref_put(&rbio->pick.ca->io_ref);
1266
1267         if (!rbio->split)
1268                 rbio->bio.bi_end_io = rbio->end_io;
1269
1270         if (bch2_dev_io_err_on(bio->bi_status, rbio->pick.ca, "data read")) {
1271                 bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
1272                 return;
1273         }
1274
1275         if (rbio->pick.ptr.cached &&
1276             (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
1277              ptr_stale(rbio->pick.ca, &rbio->pick.ptr))) {
1278                 atomic_long_inc(&c->read_realloc_races);
1279
1280                 if (rbio->flags & BCH_READ_RETRY_IF_STALE)
1281                         bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
1282                 else
1283                         bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN);
1284                 return;
1285         }
1286
1287         if (rbio->narrow_crcs ||
1288             rbio->pick.crc.compression_type ||
1289             bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
1290                 context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
1291         else if (rbio->pick.crc.csum_type)
1292                 context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq;
1293
1294         bch2_rbio_punt(rbio, __bch2_read_endio, context, wq);
1295 }
1296
1297 int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
1298                        struct bvec_iter iter, struct bkey_s_c_extent e,
1299                        struct extent_pick_ptr *pick, unsigned flags)
1300 {
1301         struct bch_read_bio *rbio;
1302         bool split = false, bounce = false, read_full = false;
1303         bool promote = false, narrow_crcs = false;
1304         struct bpos pos = bkey_start_pos(e.k);
1305         int ret = 0;
1306
1307         lg_local_lock(&c->usage_lock);
1308         bucket_io_clock_reset(c, pick->ca,
1309                         PTR_BUCKET_NR(pick->ca, &pick->ptr), READ);
1310         lg_local_unlock(&c->usage_lock);
1311
1312         narrow_crcs = should_narrow_crcs(e, pick, flags);
1313
1314         if (flags & BCH_READ_NODECODE) {
1315                 BUG_ON(iter.bi_size < pick->crc.compressed_size << 9);
1316                 iter.bi_size = pick->crc.compressed_size << 9;
1317                 goto noclone;
1318         }
1319
1320         if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
1321                 flags |= BCH_READ_MUST_BOUNCE;
1322
1323         EBUG_ON(bkey_start_offset(e.k) > iter.bi_sector ||
1324                 e.k->p.offset < bvec_iter_end_sector(iter));
1325
1326         if (pick->crc.compression_type != BCH_COMPRESSION_NONE ||
1327             (pick->crc.csum_type != BCH_CSUM_NONE &&
1328              (bvec_iter_sectors(iter) != pick->crc.uncompressed_size ||
1329               (bch2_csum_type_is_encryption(pick->crc.csum_type) &&
1330                (flags & BCH_READ_USER_MAPPED)) ||
1331               (flags & BCH_READ_MUST_BOUNCE)))) {
1332                 read_full = true;
1333                 bounce = true;
1334         }
1335
1336         promote = should_promote(c, pick, flags);
1337         /* could also set read_full */
1338         if (promote)
1339                 bounce = true;
1340
1341         if (!read_full) {
1342                 EBUG_ON(pick->crc.compression_type);
1343                 EBUG_ON(pick->crc.csum_type &&
1344                         (bvec_iter_sectors(iter) != pick->crc.uncompressed_size ||
1345                          bvec_iter_sectors(iter) != pick->crc.live_size ||
1346                          pick->crc.offset ||
1347                          iter.bi_sector != pos.offset));
1348
1349                 pick->ptr.offset += pick->crc.offset +
1350                         (iter.bi_sector - pos.offset);
1351                 pick->crc.compressed_size       = bvec_iter_sectors(iter);
1352                 pick->crc.uncompressed_size     = bvec_iter_sectors(iter);
1353                 pick->crc.offset                = 0;
1354                 pick->crc.live_size             = bvec_iter_sectors(iter);
1355                 pos.offset                      = iter.bi_sector;
1356         }
1357
1358         if (bounce) {
1359                 unsigned sectors = pick->crc.compressed_size;
1360
1361                 rbio = rbio_init(bio_alloc_bioset(GFP_NOIO,
1362                                         DIV_ROUND_UP(sectors, PAGE_SECTORS),
1363                                         &c->bio_read_split),
1364                                  orig->opts);
1365
1366                 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
1367                 split = true;
1368         } else if (flags & BCH_READ_MUST_CLONE) {
1369                 /*
1370                  * Have to clone if there were any splits, due to error
1371                  * reporting issues (if a split errored, and retrying didn't
1372                  * work, when it reports the error to its parent (us) we don't
1373                  * know if the error was from our bio, and we should retry, or
1374                  * from the whole bio, in which case we don't want to retry and
1375                  * lose the error)
1376                  */
1377                 rbio = rbio_init(bio_clone_fast(&orig->bio, GFP_NOIO,
1378                                                 &c->bio_read_split),
1379                                  orig->opts);
1380                 rbio->bio.bi_iter = iter;
1381                 split = true;
1382         } else {
1383 noclone:
1384                 rbio = orig;
1385                 rbio->bio.bi_iter = iter;
1386                 split = false;
1387                 BUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
1388         }
1389
1390         BUG_ON(bio_sectors(&rbio->bio) != pick->crc.compressed_size);
1391
1392         rbio->c                 = c;
1393         if (split)
1394                 rbio->parent    = orig;
1395         else
1396                 rbio->end_io    = orig->bio.bi_end_io;
1397         rbio->bvec_iter         = iter;
1398         rbio->submit_time_us    = local_clock_us();
1399         rbio->flags             = flags;
1400         rbio->bounce            = bounce;
1401         rbio->split             = split;
1402         rbio->narrow_crcs       = narrow_crcs;
1403         rbio->retry             = 0;
1404         rbio->context           = 0;
1405         rbio->devs_have         = bch2_extent_devs(e);
1406         rbio->pick              = *pick;
1407         rbio->pos               = pos;
1408         rbio->version           = e.k->version;
1409         rbio->promote           = promote ? promote_alloc(rbio) : NULL;
1410         INIT_WORK(&rbio->work, NULL);
1411
1412         rbio->bio.bi_bdev       = pick->ca->disk_sb.bdev;
1413         rbio->bio.bi_opf        = orig->bio.bi_opf;
1414         rbio->bio.bi_iter.bi_sector = pick->ptr.offset;
1415         rbio->bio.bi_end_io     = bch2_read_endio;
1416
1417         if (bounce)
1418                 trace_read_bounce(&rbio->bio);
1419
1420         bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
1421         this_cpu_add(pick->ca->io_done->sectors[READ][BCH_DATA_USER],
1422                      bio_sectors(&rbio->bio));
1423
1424         if (likely(!(flags & BCH_READ_IN_RETRY))) {
1425                 submit_bio(&rbio->bio);
1426         } else {
1427                 submit_bio_wait(&rbio->bio);
1428
1429                 rbio->context = RBIO_CONTEXT_UNBOUND;
1430                 bch2_read_endio(&rbio->bio);
1431
1432                 ret = rbio->retry;
1433                 if (rbio->split)
1434                         rbio = bch2_rbio_free(rbio);
1435                 if (!ret)
1436                         bch2_rbio_done(rbio);
1437         }
1438
1439         return ret;
1440 }
1441
1442 static void bch2_read_nodecode_retry(struct bch_fs *c, struct bch_read_bio *rbio,
1443                                      struct bvec_iter bvec_iter, u64 inode,
1444                                      struct bch_devs_mask *avoid, unsigned flags)
1445 {
1446         struct extent_pick_ptr pick;
1447         struct btree_iter iter;
1448         BKEY_PADDED(k) tmp;
1449         struct bkey_s_c k;
1450         int ret;
1451
1452         bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
1453                              POS(inode, bvec_iter.bi_sector),
1454                              BTREE_ITER_WITH_HOLES);
1455 retry:
1456         k = bch2_btree_iter_peek_with_holes(&iter);
1457         if (btree_iter_err(k)) {
1458                 bch2_btree_iter_unlock(&iter);
1459                 goto err;
1460         }
1461
1462         bkey_reassemble(&tmp.k, k);
1463         k = bkey_i_to_s_c(&tmp.k);
1464         bch2_btree_iter_unlock(&iter);
1465
1466         if (!bkey_extent_is_data(k.k) ||
1467             !bch2_extent_matches_ptr(c, bkey_i_to_s_c_extent(&tmp.k),
1468                                      rbio->pick.ptr,
1469                                      rbio->pos.offset -
1470                                      rbio->pick.crc.offset) ||
1471             bkey_start_offset(k.k) != bvec_iter.bi_sector)
1472                 goto err;
1473
1474         bch2_extent_pick_ptr(c, k, avoid, &pick);
1475         if (IS_ERR(pick.ca)) {
1476                 bcache_io_error(c, &rbio->bio, "no device to read from");
1477                 bio_endio(&rbio->bio);
1478                 return;
1479         }
1480
1481         if (!pick.ca)
1482                 goto err;
1483
1484         if (pick.crc.compressed_size > bvec_iter_sectors(bvec_iter)) {
1485                 percpu_ref_put(&pick.ca->io_ref);
1486                 goto err;
1487
1488         }
1489
1490         ret = __bch2_read_extent(c, rbio, bvec_iter, bkey_s_c_to_extent(k),
1491                                  &pick, flags);
1492         switch (ret) {
1493         case READ_RETRY_AVOID:
1494                 __set_bit(pick.ca->dev_idx, avoid->d);
1495         case READ_RETRY:
1496                 goto retry;
1497         case READ_ERR:
1498                 bio_endio(&rbio->bio);
1499                 return;
1500         };
1501
1502         return;
1503 err:
1504         /*
1505          * extent we wanted to read no longer exists, or
1506          * was merged or partially overwritten (and thus
1507          * possibly bigger than the memory that was
1508          * originally allocated)
1509          */
1510         rbio->bio.bi_status = BLK_STS_AGAIN;
1511         bio_endio(&rbio->bio);
1512         return;
1513 }
1514
1515 void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
1516                  struct bvec_iter bvec_iter, u64 inode,
1517                  struct bch_devs_mask *avoid, unsigned flags)
1518 {
1519         struct btree_iter iter;
1520         struct bkey_s_c k;
1521         int ret;
1522
1523         EBUG_ON(flags & BCH_READ_NODECODE);
1524 retry:
1525         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
1526                            POS(inode, bvec_iter.bi_sector),
1527                            BTREE_ITER_WITH_HOLES, k) {
1528                 BKEY_PADDED(k) tmp;
1529                 struct extent_pick_ptr pick;
1530                 struct bvec_iter fragment;
1531
1532                 /*
1533                  * Unlock the iterator while the btree node's lock is still in
1534                  * cache, before doing the IO:
1535                  */
1536                 bkey_reassemble(&tmp.k, k);
1537                 k = bkey_i_to_s_c(&tmp.k);
1538                 bch2_btree_iter_unlock(&iter);
1539
1540                 bch2_extent_pick_ptr(c, k, avoid, &pick);
1541                 if (IS_ERR(pick.ca)) {
1542                         bcache_io_error(c, &rbio->bio, "no device to read from");
1543                         bio_endio(&rbio->bio);
1544                         return;
1545                 }
1546
1547                 fragment = bvec_iter;
1548                 fragment.bi_size = (min_t(u64, k.k->p.offset,
1549                                           bvec_iter_end_sector(bvec_iter)) -
1550                                     bvec_iter.bi_sector) << 9;
1551
1552                 if (pick.ca) {
1553                         if (fragment.bi_size != bvec_iter.bi_size) {
1554                                 bio_inc_remaining(&rbio->bio);
1555                                 flags |= BCH_READ_MUST_CLONE;
1556                                 trace_read_split(&rbio->bio);
1557                         }
1558
1559                         ret = __bch2_read_extent(c, rbio, fragment,
1560                                                  bkey_s_c_to_extent(k),
1561                                                  &pick, flags);
1562                         switch (ret) {
1563                         case READ_RETRY_AVOID:
1564                                 __set_bit(pick.ca->dev_idx, avoid->d);
1565                         case READ_RETRY:
1566                                 goto retry;
1567                         case READ_ERR:
1568                                 rbio->bio.bi_status = BLK_STS_IOERR;
1569                                 bio_endio(&rbio->bio);
1570                                 return;
1571                         };
1572                 } else {
1573                         zero_fill_bio_iter(&rbio->bio, fragment);
1574
1575                         if (fragment.bi_size == bvec_iter.bi_size)
1576                                 bio_endio(&rbio->bio);
1577                 }
1578
1579                 if (fragment.bi_size == bvec_iter.bi_size)
1580                         return;
1581
1582                 bio_advance_iter(&rbio->bio, &bvec_iter, fragment.bi_size);
1583         }
1584
1585         /*
1586          * If we get here, it better have been because there was an error
1587          * reading a btree node
1588          */
1589         ret = bch2_btree_iter_unlock(&iter);
1590         BUG_ON(!ret);
1591         bcache_io_error(c, &rbio->bio, "btree IO error %i", ret);
1592         bio_endio(&rbio->bio);
1593 }