]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/io.c
Update bcachefs sources to e99d29e402 bcachefs: zstd support, compression refactoring
[bcachefs-tools-debian] / libbcachefs / io.c
1 /*
2  * Some low level IO code, and hacks for various block layer limitations
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc.h"
10 #include "bset.h"
11 #include "btree_update.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "compress.h"
15 #include "clock.h"
16 #include "debug.h"
17 #include "error.h"
18 #include "extents.h"
19 #include "io.h"
20 #include "journal.h"
21 #include "keylist.h"
22 #include "move.h"
23 #include "super.h"
24 #include "super-io.h"
25
26 #include <linux/blkdev.h>
27 #include <linux/random.h>
28
29 #include <trace/events/bcachefs.h>
30
31 /* Allocate, free from mempool: */
32
33 void bch2_latency_acct(struct bch_dev *ca, unsigned submit_time_us, int rw)
34 {
35         u64 now = local_clock();
36         unsigned io_latency = (now >> 10) - submit_time_us;
37         atomic_t *latency = &ca->latency[rw];
38         unsigned old, new, v = atomic_read(latency);
39
40         do {
41                 old = v;
42
43                 /*
44                  * If the io latency was reasonably close to the current
45                  * latency, skip doing the update and atomic operation - most of
46                  * the time:
47                  */
48                 if (abs((int) (old - io_latency)) < (old >> 1) &&
49                     now & ~(~0 << 5))
50                         break;
51
52                 new = ewma_add((u64) old, io_latency, 6);
53         } while ((v = atomic_cmpxchg(latency, old, new)) != old);
54 }
55
56 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
57 {
58         struct bio_vec *bv;
59         unsigned i;
60
61         bio_for_each_segment_all(bv, bio, i)
62                 if (bv->bv_page != ZERO_PAGE(0))
63                         mempool_free(bv->bv_page, &c->bio_bounce_pages);
64         bio->bi_vcnt = 0;
65 }
66
67 static void bch2_bio_alloc_page_pool(struct bch_fs *c, struct bio *bio,
68                                     bool *using_mempool)
69 {
70         struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt++];
71
72         if (likely(!*using_mempool)) {
73                 bv->bv_page = alloc_page(GFP_NOIO);
74                 if (unlikely(!bv->bv_page)) {
75                         mutex_lock(&c->bio_bounce_pages_lock);
76                         *using_mempool = true;
77                         goto pool_alloc;
78
79                 }
80         } else {
81 pool_alloc:
82                 bv->bv_page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
83         }
84
85         bv->bv_len = PAGE_SIZE;
86         bv->bv_offset = 0;
87 }
88
89 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
90                                size_t bytes)
91 {
92         bool using_mempool = false;
93
94         BUG_ON(DIV_ROUND_UP(bytes, PAGE_SIZE) > bio->bi_max_vecs);
95
96         bio->bi_iter.bi_size = bytes;
97
98         while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE))
99                 bch2_bio_alloc_page_pool(c, bio, &using_mempool);
100
101         if (using_mempool)
102                 mutex_unlock(&c->bio_bounce_pages_lock);
103 }
104
105 void bch2_bio_alloc_more_pages_pool(struct bch_fs *c, struct bio *bio,
106                                     size_t bytes)
107 {
108         while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE)) {
109                 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
110
111                 BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
112
113                 bv->bv_page = alloc_page(GFP_NOIO);
114                 if (!bv->bv_page) {
115                         /*
116                          * We already allocated from mempool, we can't allocate from it again
117                          * without freeing the pages we already allocated or else we could
118                          * deadlock:
119                          */
120                         bch2_bio_free_pages_pool(c, bio);
121                         bch2_bio_alloc_pages_pool(c, bio, bytes);
122                         return;
123                 }
124
125                 bv->bv_len = PAGE_SIZE;
126                 bv->bv_offset = 0;
127                 bio->bi_vcnt++;
128         }
129
130         bio->bi_iter.bi_size = bytes;
131 }
132
133 /* Writes */
134
135 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
136                                enum bch_data_type type,
137                                const struct bkey_i *k)
138 {
139         struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k);
140         const struct bch_extent_ptr *ptr;
141         struct bch_write_bio *n;
142         struct bch_dev *ca;
143
144         BUG_ON(c->opts.nochanges);
145
146         extent_for_each_ptr(e, ptr) {
147                 BUG_ON(ptr->dev >= BCH_SB_MEMBERS_MAX ||
148                        !c->devs[ptr->dev]);
149
150                 ca = bch_dev_bkey_exists(c, ptr->dev);
151
152                 if (ptr + 1 < &extent_entry_last(e)->ptr) {
153                         n = to_wbio(bio_clone_fast(&wbio->bio, GFP_NOIO,
154                                                    &ca->replica_set));
155
156                         n->bio.bi_end_io        = wbio->bio.bi_end_io;
157                         n->bio.bi_private       = wbio->bio.bi_private;
158                         n->parent               = wbio;
159                         n->split                = true;
160                         n->bounce               = false;
161                         n->put_bio              = true;
162                         n->bio.bi_opf           = wbio->bio.bi_opf;
163                         bio_inc_remaining(&wbio->bio);
164                 } else {
165                         n = wbio;
166                         n->split                = false;
167                 }
168
169                 n->c                    = c;
170                 n->ca                   = ca;
171                 n->submit_time_us       = local_clock_us();
172                 n->bio.bi_iter.bi_sector = ptr->offset;
173
174                 if (!journal_flushes_device(ca))
175                         n->bio.bi_opf |= REQ_FUA;
176
177                 if (likely(percpu_ref_tryget(&ca->io_ref))) {
178                         this_cpu_add(ca->io_done->sectors[WRITE][type],
179                                      bio_sectors(&n->bio));
180
181                         n->have_io_ref          = true;
182                         bio_set_dev(&n->bio, ca->disk_sb.bdev);
183                         submit_bio(&n->bio);
184                 } else {
185                         n->have_io_ref          = false;
186                         n->bio.bi_status        = BLK_STS_REMOVED;
187                         bio_endio(&n->bio);
188                 }
189         }
190 }
191
192 static void __bch2_write(struct closure *);
193
194 static void bch2_write_done(struct closure *cl)
195 {
196         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
197
198         BUG_ON(!(op->flags & BCH_WRITE_DONE));
199
200         if (!op->error && (op->flags & BCH_WRITE_FLUSH))
201                 op->error = bch2_journal_error(&op->c->journal);
202
203         if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
204                 bch2_disk_reservation_put(op->c, &op->res);
205         percpu_ref_put(&op->c->writes);
206         bch2_keylist_free(&op->insert_keys, op->inline_keys);
207         op->flags &= ~(BCH_WRITE_DONE|BCH_WRITE_LOOPED);
208
209         closure_return(cl);
210 }
211
212 int bch2_write_index_default(struct bch_write_op *op)
213 {
214         struct keylist *keys = &op->insert_keys;
215         struct btree_iter iter;
216         int ret;
217
218         bch2_btree_iter_init(&iter, op->c, BTREE_ID_EXTENTS,
219                              bkey_start_pos(&bch2_keylist_front(keys)->k),
220                              BTREE_ITER_INTENT);
221
222         ret = bch2_btree_insert_list_at(&iter, keys, &op->res,
223                                        NULL, op_journal_seq(op),
224                                        BTREE_INSERT_NOFAIL|
225                                        BTREE_INSERT_USE_RESERVE);
226         bch2_btree_iter_unlock(&iter);
227
228         return ret;
229 }
230
231 /**
232  * bch_write_index - after a write, update index to point to new data
233  */
234 static void bch2_write_index(struct closure *cl)
235 {
236         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
237         struct bch_fs *c = op->c;
238         struct keylist *keys = &op->insert_keys;
239         struct bkey_s_extent e;
240         struct bch_extent_ptr *ptr;
241         struct bkey_i *src, *dst = keys->keys, *n;
242         int ret;
243
244         op->flags |= BCH_WRITE_LOOPED;
245
246         for (src = keys->keys; src != keys->top; src = n) {
247                 n = bkey_next(src);
248                 bkey_copy(dst, src);
249
250                 e = bkey_i_to_s_extent(dst);
251                 extent_for_each_ptr_backwards(e, ptr)
252                         if (test_bit(ptr->dev, op->failed.d))
253                                 bch2_extent_drop_ptr(e, ptr);
254
255                 if (!bch2_extent_nr_ptrs(e.c)) {
256                         ret = -EIO;
257                         goto err;
258                 }
259
260                 if (!(op->flags & BCH_WRITE_NOMARK_REPLICAS)) {
261                         ret = bch2_mark_bkey_replicas(c, BCH_DATA_USER, e.s_c);
262                         if (ret)
263                                 goto err;
264                 }
265
266                 dst = bkey_next(dst);
267         }
268
269         keys->top = dst;
270
271         if (!bch2_keylist_empty(keys)) {
272                 u64 sectors_start = keylist_sectors(keys);
273                 int ret = op->index_update_fn(op);
274
275                 BUG_ON(keylist_sectors(keys) && !ret);
276
277                 op->written += sectors_start - keylist_sectors(keys);
278
279                 if (ret) {
280                         __bcache_io_error(c, "btree IO error %i", ret);
281                         op->error = ret;
282                 }
283         }
284 out:
285         bch2_open_bucket_put_refs(c, &op->open_buckets_nr, op->open_buckets);
286
287         if (!(op->flags & BCH_WRITE_DONE))
288                 continue_at(cl, __bch2_write, op->io_wq);
289
290         if (!op->error && (op->flags & BCH_WRITE_FLUSH)) {
291                 bch2_journal_flush_seq_async(&c->journal,
292                                              *op_journal_seq(op),
293                                              cl);
294                 continue_at(cl, bch2_write_done, index_update_wq(op));
295         } else {
296                 continue_at_nobarrier(cl, bch2_write_done, NULL);
297         }
298         return;
299 err:
300         keys->top = keys->keys;
301         op->error = ret;
302         op->flags |= BCH_WRITE_DONE;
303         goto out;
304 }
305
306 static void bch2_write_endio(struct bio *bio)
307 {
308         struct closure *cl              = bio->bi_private;
309         struct bch_write_op *op         = container_of(cl, struct bch_write_op, cl);
310         struct bch_write_bio *wbio      = to_wbio(bio);
311         struct bch_write_bio *parent    = wbio->split ? wbio->parent : NULL;
312         struct bch_fs *c                = wbio->c;
313         struct bch_dev *ca              = wbio->ca;
314
315         bch2_latency_acct(ca, wbio->submit_time_us, WRITE);
316
317         if (bch2_dev_io_err_on(bio->bi_status, ca, "data write"))
318                 set_bit(ca->dev_idx, op->failed.d);
319
320         if (wbio->have_io_ref)
321                 percpu_ref_put(&ca->io_ref);
322
323         if (wbio->bounce)
324                 bch2_bio_free_pages_pool(c, bio);
325
326         if (wbio->put_bio)
327                 bio_put(bio);
328
329         if (parent)
330                 bio_endio(&parent->bio);
331         else
332                 closure_put(cl);
333 }
334
335 static void init_append_extent(struct bch_write_op *op,
336                                struct write_point *wp,
337                                struct bversion version,
338                                struct bch_extent_crc_unpacked crc)
339 {
340         struct bkey_i_extent *e = bkey_extent_init(op->insert_keys.top);
341
342         op->pos.offset += crc.uncompressed_size;
343         e->k.p = op->pos;
344         e->k.size = crc.uncompressed_size;
345         e->k.version = version;
346         bkey_extent_set_cached(&e->k, op->flags & BCH_WRITE_CACHED);
347
348         bch2_extent_crc_append(e, crc);
349         bch2_alloc_sectors_append_ptrs(op->c, wp, e, crc.compressed_size);
350
351         bch2_keylist_push(&op->insert_keys);
352 }
353
354 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
355                                         struct write_point *wp,
356                                         struct bio *src,
357                                         bool *page_alloc_failed)
358 {
359         struct bch_write_bio *wbio;
360         struct bio *bio;
361         unsigned output_available =
362                 min(wp->sectors_free << 9, src->bi_iter.bi_size);
363         unsigned pages = DIV_ROUND_UP(output_available, PAGE_SIZE);
364
365         bio = bio_alloc_bioset(GFP_NOIO, pages, &c->bio_write);
366         wbio                    = wbio_init(bio);
367         wbio->bounce            = true;
368         wbio->put_bio           = true;
369         /* copy WRITE_SYNC flag */
370         wbio->bio.bi_opf        = src->bi_opf;
371
372         /*
373          * We can't use mempool for more than c->sb.encoded_extent_max
374          * worth of pages, but we'd like to allocate more if we can:
375          */
376         while (bio->bi_iter.bi_size < output_available) {
377                 unsigned len = min_t(unsigned, PAGE_SIZE,
378                                      output_available - bio->bi_iter.bi_size);
379                 struct page *p;
380
381                 p = alloc_page(GFP_NOIO);
382                 if (!p) {
383                         unsigned pool_max =
384                                 min_t(unsigned, output_available,
385                                       c->sb.encoded_extent_max << 9);
386
387                         if (bio_sectors(bio) < pool_max)
388                                 bch2_bio_alloc_pages_pool(c, bio, pool_max);
389                         break;
390                 }
391
392                 bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) {
393                         .bv_page        = p,
394                         .bv_len         = len,
395                         .bv_offset      = 0,
396                 };
397                 bio->bi_iter.bi_size += len;
398         }
399
400         *page_alloc_failed = bio->bi_vcnt < pages;
401         return bio;
402 }
403
404 static int bch2_write_rechecksum(struct bch_fs *c,
405                                  struct bch_write_op *op,
406                                  unsigned new_csum_type)
407 {
408         struct bio *bio = &op->wbio.bio;
409         struct bch_extent_crc_unpacked new_crc;
410         int ret;
411
412         /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
413
414         if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
415             bch2_csum_type_is_encryption(new_csum_type))
416                 new_csum_type = op->crc.csum_type;
417
418         ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
419                                   NULL, &new_crc,
420                                   op->crc.offset, op->crc.live_size,
421                                   new_csum_type);
422         if (ret)
423                 return ret;
424
425         bio_advance(bio, op->crc.offset << 9);
426         bio->bi_iter.bi_size = op->crc.live_size << 9;
427         op->crc = new_crc;
428         return 0;
429 }
430
431 static int bch2_write_decrypt(struct bch_write_op *op)
432 {
433         struct bch_fs *c = op->c;
434         struct nonce nonce = extent_nonce(op->version, op->crc);
435         struct bch_csum csum;
436
437         if (!bch2_csum_type_is_encryption(op->crc.csum_type))
438                 return 0;
439
440         /*
441          * If we need to decrypt data in the write path, we'll no longer be able
442          * to verify the existing checksum (poly1305 mac, in this case) after
443          * it's decrypted - this is the last point we'll be able to reverify the
444          * checksum:
445          */
446         csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
447         if (bch2_crc_cmp(op->crc.csum, csum))
448                 return -EIO;
449
450         bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
451         op->crc.csum_type = 0;
452         op->crc.csum = (struct bch_csum) { 0, 0 };
453         return 0;
454 }
455
456 static enum prep_encoded_ret {
457         PREP_ENCODED_OK,
458         PREP_ENCODED_ERR,
459         PREP_ENCODED_CHECKSUM_ERR,
460         PREP_ENCODED_DO_WRITE,
461 } bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
462 {
463         struct bch_fs *c = op->c;
464         struct bio *bio = &op->wbio.bio;
465
466         if (!(op->flags & BCH_WRITE_DATA_ENCODED))
467                 return PREP_ENCODED_OK;
468
469         BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
470
471         /* Can we just write the entire extent as is? */
472         if (op->crc.uncompressed_size == op->crc.live_size &&
473             op->crc.compressed_size <= wp->sectors_free &&
474             op->crc.compression_type == op->compression_type) {
475                 if (!op->crc.compression_type &&
476                     op->csum_type != op->crc.csum_type &&
477                     bch2_write_rechecksum(c, op, op->csum_type))
478                         return PREP_ENCODED_CHECKSUM_ERR;
479
480                 return PREP_ENCODED_DO_WRITE;
481         }
482
483         /*
484          * If the data is compressed and we couldn't write the entire extent as
485          * is, we have to decompress it:
486          */
487         if (op->crc.compression_type) {
488                 struct bch_csum csum;
489
490                 if (bch2_write_decrypt(op))
491                         return PREP_ENCODED_CHECKSUM_ERR;
492
493                 /* Last point we can still verify checksum: */
494                 csum = bch2_checksum_bio(c, op->crc.csum_type,
495                                          extent_nonce(op->version, op->crc),
496                                          bio);
497                 if (bch2_crc_cmp(op->crc.csum, csum))
498                         return PREP_ENCODED_CHECKSUM_ERR;
499
500                 if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
501                         return PREP_ENCODED_ERR;
502         }
503
504         /*
505          * No longer have compressed data after this point - data might be
506          * encrypted:
507          */
508
509         /*
510          * If the data is checksummed and we're only writing a subset,
511          * rechecksum and adjust bio to point to currently live data:
512          */
513         if ((op->crc.live_size != op->crc.uncompressed_size ||
514              op->crc.csum_type != op->csum_type) &&
515             bch2_write_rechecksum(c, op, op->csum_type))
516                 return PREP_ENCODED_CHECKSUM_ERR;
517
518         /*
519          * If we want to compress the data, it has to be decrypted:
520          */
521         if ((op->compression_type ||
522              bch2_csum_type_is_encryption(op->crc.csum_type) !=
523              bch2_csum_type_is_encryption(op->csum_type)) &&
524             bch2_write_decrypt(op))
525                 return PREP_ENCODED_CHECKSUM_ERR;
526
527         return PREP_ENCODED_OK;
528 }
529
530 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp)
531 {
532         struct bch_fs *c = op->c;
533         struct bio *src = &op->wbio.bio, *dst = src;
534         struct bvec_iter saved_iter;
535         struct bkey_i *key_to_write;
536         unsigned key_to_write_offset = op->insert_keys.top_p -
537                 op->insert_keys.keys_p;
538         unsigned total_output = 0;
539         bool bounce = false, page_alloc_failed = false;
540         int ret, more = 0;
541
542         BUG_ON(!bio_sectors(src));
543
544         switch (bch2_write_prep_encoded_data(op, wp)) {
545         case PREP_ENCODED_OK:
546                 break;
547         case PREP_ENCODED_ERR:
548                 ret = -EIO;
549                 goto err;
550         case PREP_ENCODED_CHECKSUM_ERR:
551                 goto csum_err;
552         case PREP_ENCODED_DO_WRITE:
553                 init_append_extent(op, wp, op->version, op->crc);
554                 goto do_write;
555         }
556
557         if (op->compression_type ||
558             (op->csum_type &&
559              !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
560             (bch2_csum_type_is_encryption(op->csum_type) &&
561              !(op->flags & BCH_WRITE_PAGES_OWNED))) {
562                 dst = bch2_write_bio_alloc(c, wp, src, &page_alloc_failed);
563                 bounce = true;
564         }
565
566         saved_iter = dst->bi_iter;
567
568         do {
569                 struct bch_extent_crc_unpacked crc =
570                         (struct bch_extent_crc_unpacked) { 0 };
571                 struct bversion version = op->version;
572                 size_t dst_len, src_len;
573
574                 if (page_alloc_failed &&
575                     bio_sectors(dst) < wp->sectors_free &&
576                     bio_sectors(dst) < c->sb.encoded_extent_max)
577                         break;
578
579                 BUG_ON(op->compression_type &&
580                        (op->flags & BCH_WRITE_DATA_ENCODED) &&
581                        bch2_csum_type_is_encryption(op->crc.csum_type));
582                 BUG_ON(op->compression_type && !bounce);
583
584                 crc.compression_type = op->compression_type
585                         ?  bch2_bio_compress(c, dst, &dst_len, src, &src_len,
586                                              op->compression_type)
587                         : 0;
588                 if (!crc.compression_type) {
589                         dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
590                         dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
591
592                         if (op->csum_type)
593                                 dst_len = min_t(unsigned, dst_len,
594                                                 c->sb.encoded_extent_max << 9);
595
596                         if (bounce) {
597                                 swap(dst->bi_iter.bi_size, dst_len);
598                                 bio_copy_data(dst, src);
599                                 swap(dst->bi_iter.bi_size, dst_len);
600                         }
601
602                         src_len = dst_len;
603                 }
604
605                 BUG_ON(!src_len || !dst_len);
606
607                 if (bch2_csum_type_is_encryption(op->csum_type)) {
608                         if (bversion_zero(version)) {
609                                 version.lo = atomic64_inc_return(&c->key_version) + 1;
610                         } else {
611                                 crc.nonce = op->nonce;
612                                 op->nonce += src_len >> 9;
613                         }
614                 }
615
616                 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
617                     !crc.compression_type &&
618                     bch2_csum_type_is_encryption(op->crc.csum_type) ==
619                     bch2_csum_type_is_encryption(op->csum_type)) {
620                         /*
621                          * Note: when we're using rechecksum(), we need to be
622                          * checksumming @src because it has all the data our
623                          * existing checksum covers - if we bounced (because we
624                          * were trying to compress), @dst will only have the
625                          * part of the data the new checksum will cover.
626                          *
627                          * But normally we want to be checksumming post bounce,
628                          * because part of the reason for bouncing is so the
629                          * data can't be modified (by userspace) while it's in
630                          * flight.
631                          */
632                         if (bch2_rechecksum_bio(c, src, version, op->crc,
633                                         &crc, &op->crc,
634                                         src_len >> 9,
635                                         bio_sectors(src) - (src_len >> 9),
636                                         op->csum_type))
637                                 goto csum_err;
638                 } else {
639                         if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
640                             bch2_rechecksum_bio(c, src, version, op->crc,
641                                         NULL, &op->crc,
642                                         src_len >> 9,
643                                         bio_sectors(src) - (src_len >> 9),
644                                         op->crc.csum_type))
645                                 goto csum_err;
646
647                         crc.compressed_size     = dst_len >> 9;
648                         crc.uncompressed_size   = src_len >> 9;
649                         crc.live_size           = src_len >> 9;
650
651                         swap(dst->bi_iter.bi_size, dst_len);
652                         bch2_encrypt_bio(c, op->csum_type,
653                                          extent_nonce(version, crc), dst);
654                         crc.csum = bch2_checksum_bio(c, op->csum_type,
655                                          extent_nonce(version, crc), dst);
656                         crc.csum_type = op->csum_type;
657                         swap(dst->bi_iter.bi_size, dst_len);
658                 }
659
660                 init_append_extent(op, wp, version, crc);
661
662                 if (dst != src)
663                         bio_advance(dst, dst_len);
664                 bio_advance(src, src_len);
665                 total_output += dst_len;
666         } while (dst->bi_iter.bi_size &&
667                  src->bi_iter.bi_size &&
668                  wp->sectors_free &&
669                  !bch2_keylist_realloc(&op->insert_keys,
670                                       op->inline_keys,
671                                       ARRAY_SIZE(op->inline_keys),
672                                       BKEY_EXTENT_U64s_MAX));
673
674         more = src->bi_iter.bi_size != 0;
675
676         dst->bi_iter = saved_iter;
677
678         if (!bounce && more) {
679                 dst = bio_split(src, total_output >> 9,
680                                 GFP_NOIO, &c->bio_write);
681                 wbio_init(dst)->put_bio = true;
682         }
683
684         dst->bi_iter.bi_size = total_output;
685
686         /* Free unneeded pages after compressing: */
687         if (bounce)
688                 while (dst->bi_vcnt > DIV_ROUND_UP(dst->bi_iter.bi_size, PAGE_SIZE))
689                         mempool_free(dst->bi_io_vec[--dst->bi_vcnt].bv_page,
690                                      &c->bio_bounce_pages);
691 do_write:
692         /* might have done a realloc... */
693
694         key_to_write = (void *) (op->insert_keys.keys_p + key_to_write_offset);
695
696         dst->bi_end_io  = bch2_write_endio;
697         dst->bi_private = &op->cl;
698         bio_set_op_attrs(dst, REQ_OP_WRITE, 0);
699
700         closure_get(dst->bi_private);
701
702         bch2_submit_wbio_replicas(to_wbio(dst), c, BCH_DATA_USER,
703                                   key_to_write);
704         return more;
705 csum_err:
706         bch_err(c, "error verifying existing checksum while "
707                 "rewriting existing data (memory corruption?)");
708         ret = -EIO;
709 err:
710         if (bounce) {
711                 bch2_bio_free_pages_pool(c, dst);
712                 bio_put(dst);
713         }
714
715         return ret;
716 }
717
718 static void __bch2_write(struct closure *cl)
719 {
720         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
721         struct bch_fs *c = op->c;
722         struct write_point *wp;
723         int ret;
724
725         do {
726                 if (op->open_buckets_nr + op->nr_replicas >
727                     ARRAY_SIZE(op->open_buckets))
728                         continue_at(cl, bch2_write_index, index_update_wq(op));
729
730                 /* for the device pointers and 1 for the chksum */
731                 if (bch2_keylist_realloc(&op->insert_keys,
732                                         op->inline_keys,
733                                         ARRAY_SIZE(op->inline_keys),
734                                         BKEY_EXTENT_U64s_MAX))
735                         continue_at(cl, bch2_write_index, index_update_wq(op));
736
737                 wp = bch2_alloc_sectors_start(c,
738                         op->devs,
739                         op->write_point,
740                         &op->devs_have,
741                         op->nr_replicas,
742                         op->nr_replicas_required,
743                         op->alloc_reserve,
744                         op->flags,
745                         (op->flags & BCH_WRITE_ALLOC_NOWAIT) ? NULL : cl);
746                 EBUG_ON(!wp);
747
748                 if (unlikely(IS_ERR(wp))) {
749                         if (unlikely(PTR_ERR(wp) != -EAGAIN)) {
750                                 ret = PTR_ERR(wp);
751                                 goto err;
752                         }
753
754                         /*
755                          * If we already have some keys, must insert them first
756                          * before allocating another open bucket. We only hit
757                          * this case if open_bucket_nr > 1.
758                          */
759                         if (!bch2_keylist_empty(&op->insert_keys))
760                                 continue_at(cl, bch2_write_index,
761                                             index_update_wq(op));
762
763                         /*
764                          * If we've looped, we're running out of a workqueue -
765                          * not the bch2_write() caller's context - and we don't
766                          * want to block the workqueue:
767                          */
768                         if (op->flags & BCH_WRITE_LOOPED)
769                                 continue_at(cl, __bch2_write, op->io_wq);
770
771                         /*
772                          * Otherwise, we do want to block the caller on alloc
773                          * failure instead of letting it queue up more and more
774                          * writes:
775                          * XXX: this technically needs a try_to_freeze() -
776                          * except that that's not safe because caller may have
777                          * issued other IO... hmm..
778                          */
779                         closure_sync(cl);
780                         continue;
781                 }
782
783                 ret = bch2_write_extent(op, wp);
784
785                 BUG_ON(op->open_buckets_nr + wp->nr_ptrs_can_use >
786                        ARRAY_SIZE(op->open_buckets));
787                 bch2_open_bucket_get(c, wp,
788                                      &op->open_buckets_nr,
789                                      op->open_buckets);
790                 bch2_alloc_sectors_done(c, wp);
791
792                 if (ret < 0)
793                         goto err;
794         } while (ret);
795
796         op->flags |= BCH_WRITE_DONE;
797         continue_at(cl, bch2_write_index, index_update_wq(op));
798 err:
799         /*
800          * Right now we can only error here if we went RO - the
801          * allocation failed, but we already checked for -ENOSPC when we
802          * got our reservation.
803          *
804          * XXX capacity might have changed, but we don't check for that
805          * yet:
806          */
807         op->error = ret;
808         op->flags |= BCH_WRITE_DONE;
809
810         /*
811          * No reason not to insert keys for whatever data was successfully
812          * written (especially for a cmpxchg operation that's moving data
813          * around)
814          */
815         continue_at(cl, !bch2_keylist_empty(&op->insert_keys)
816                     ? bch2_write_index
817                     : bch2_write_done, index_update_wq(op));
818 }
819
820 /**
821  * bch_write - handle a write to a cache device or flash only volume
822  *
823  * This is the starting point for any data to end up in a cache device; it could
824  * be from a normal write, or a writeback write, or a write to a flash only
825  * volume - it's also used by the moving garbage collector to compact data in
826  * mostly empty buckets.
827  *
828  * It first writes the data to the cache, creating a list of keys to be inserted
829  * (if the data won't fit in a single open bucket, there will be multiple keys);
830  * after the data is written it calls bch_journal, and after the keys have been
831  * added to the next journal write they're inserted into the btree.
832  *
833  * If op->discard is true, instead of inserting the data it invalidates the
834  * region of the cache represented by op->bio and op->inode.
835  */
836 void bch2_write(struct closure *cl)
837 {
838         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
839         struct bch_fs *c = op->c;
840
841         BUG_ON(!op->nr_replicas);
842         BUG_ON(!op->write_point.v);
843         BUG_ON(!bkey_cmp(op->pos, POS_MAX));
844         BUG_ON(bio_sectors(&op->wbio.bio) > U16_MAX);
845
846         memset(&op->failed, 0, sizeof(op->failed));
847
848         bch2_keylist_init(&op->insert_keys, op->inline_keys);
849         wbio_init(&op->wbio.bio)->put_bio = false;
850
851         if (c->opts.nochanges ||
852             !percpu_ref_tryget(&c->writes)) {
853                 __bcache_io_error(c, "read only");
854                 op->error = -EROFS;
855                 if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
856                         bch2_disk_reservation_put(c, &op->res);
857                 closure_return(cl);
858         }
859
860         bch2_increment_clock(c, bio_sectors(&op->wbio.bio), WRITE);
861
862         continue_at_nobarrier(cl, __bch2_write, NULL);
863 }
864
865 /* Cache promotion on read */
866
867 struct promote_op {
868         struct closure          cl;
869         struct migrate_write    write;
870         struct bio_vec          bi_inline_vecs[0]; /* must be last */
871 };
872
873 static void promote_done(struct closure *cl)
874 {
875         struct promote_op *op =
876                 container_of(cl, struct promote_op, cl);
877         struct bch_fs *c = op->write.op.c;
878
879         percpu_ref_put(&c->writes);
880         bch2_bio_free_pages_pool(c, &op->write.op.wbio.bio);
881         kfree(op);
882 }
883
884 static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
885 {
886         struct bch_fs *c = rbio->c;
887         struct closure *cl = &op->cl;
888         struct bio *bio = &op->write.op.wbio.bio;
889
890         BUG_ON(!rbio->split || !rbio->bounce);
891
892         if (!percpu_ref_tryget(&c->writes))
893                 return;
894
895         trace_promote(&rbio->bio);
896
897         /* we now own pages: */
898         BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs);
899         swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
900         rbio->promote = NULL;
901
902         bch2_migrate_read_done(&op->write, rbio);
903
904         closure_init(cl, NULL);
905         closure_call(&op->write.op.cl, bch2_write, c->wq, cl);
906         closure_return_with_destructor(cl, promote_done);
907 }
908
909 /*
910  * XXX: multiple promotes can race with each other, wastefully. Keep a list of
911  * outstanding promotes?
912  */
913 static struct promote_op *promote_alloc(struct bch_read_bio *rbio,
914                                         struct bkey_s_c k)
915 {
916         struct bch_fs *c = rbio->c;
917         struct promote_op *op;
918         struct bio *bio;
919         /* data might have to be decompressed in the write path: */
920         unsigned pages = DIV_ROUND_UP(rbio->pick.crc.uncompressed_size,
921                                       PAGE_SECTORS);
922         int ret;
923
924         BUG_ON(!rbio->bounce);
925         BUG_ON(pages < rbio->bio.bi_vcnt);
926
927         op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages,
928                      GFP_NOIO);
929         if (!op)
930                 return NULL;
931
932         bio = &op->write.op.wbio.bio;
933         bio_init(bio, bio->bi_inline_vecs, pages);
934
935         memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec,
936                sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
937
938         ret = bch2_migrate_write_init(c, &op->write, c->fastest_devs,
939                                       writepoint_hashed((unsigned long) current),
940                                       rbio->opts,
941                                       DATA_PROMOTE,
942                                       (struct data_opts) { 0 },
943                                       k);
944         BUG_ON(ret);
945
946         return op;
947 }
948
949 /* only promote if we're not reading from the fastest tier: */
950 static bool should_promote(struct bch_fs *c,
951                            struct extent_pick_ptr *pick, unsigned flags)
952 {
953         if (!(flags & BCH_READ_MAY_PROMOTE))
954                 return false;
955
956         if (percpu_ref_is_dying(&c->writes))
957                 return false;
958
959         return c->fastest_tier &&
960                 c->fastest_tier < c->tiers + pick->ca->mi.tier;
961 }
962
963 /* Read */
964
965 static void bch2_read_nodecode_retry(struct bch_fs *, struct bch_read_bio *,
966                                      struct bvec_iter, u64,
967                                      struct bch_devs_mask *, unsigned);
968
969 #define READ_RETRY_AVOID        1
970 #define READ_RETRY              2
971 #define READ_ERR                3
972
973 enum rbio_context {
974         RBIO_CONTEXT_NULL,
975         RBIO_CONTEXT_HIGHPRI,
976         RBIO_CONTEXT_UNBOUND,
977 };
978
979 static inline struct bch_read_bio *
980 bch2_rbio_parent(struct bch_read_bio *rbio)
981 {
982         return rbio->split ? rbio->parent : rbio;
983 }
984
985 __always_inline
986 static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn,
987                            enum rbio_context context,
988                            struct workqueue_struct *wq)
989 {
990         if (context <= rbio->context) {
991                 fn(&rbio->work);
992         } else {
993                 rbio->work.func         = fn;
994                 rbio->context           = context;
995                 queue_work(wq, &rbio->work);
996         }
997 }
998
999 static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
1000 {
1001         struct bch_read_bio *parent = rbio->parent;
1002
1003         BUG_ON(!rbio->split);
1004
1005         if (rbio->promote)
1006                 kfree(rbio->promote);
1007         if (rbio->bounce)
1008                 bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
1009         bio_put(&rbio->bio);
1010
1011         return parent;
1012 }
1013
1014 static void bch2_rbio_done(struct bch_read_bio *rbio)
1015 {
1016         if (rbio->promote)
1017                 kfree(rbio->promote);
1018         rbio->promote = NULL;
1019
1020         if (rbio->split)
1021                 rbio = bch2_rbio_free(rbio);
1022         bio_endio(&rbio->bio);
1023 }
1024
1025 static void bch2_rbio_retry(struct work_struct *work)
1026 {
1027         struct bch_read_bio *rbio =
1028                 container_of(work, struct bch_read_bio, work);
1029         struct bch_fs *c                = rbio->c;
1030         struct bvec_iter iter           = rbio->bvec_iter;
1031         unsigned flags                  = rbio->flags;
1032         u64 inode                       = rbio->pos.inode;
1033         struct bch_devs_mask avoid;
1034
1035         trace_read_retry(&rbio->bio);
1036
1037         memset(&avoid, 0, sizeof(avoid));
1038
1039         if (rbio->retry == READ_RETRY_AVOID)
1040                 __set_bit(rbio->pick.ca->dev_idx, avoid.d);
1041
1042         if (rbio->promote)
1043                 kfree(rbio->promote);
1044         rbio->promote = NULL;
1045
1046         if (rbio->split)
1047                 rbio = bch2_rbio_free(rbio);
1048         else
1049                 rbio->bio.bi_status = 0;
1050
1051         if (!(flags & BCH_READ_NODECODE))
1052                 flags |= BCH_READ_MUST_CLONE;
1053         flags |= BCH_READ_IN_RETRY;
1054         flags &= ~BCH_READ_MAY_PROMOTE;
1055
1056         if (flags & BCH_READ_NODECODE)
1057                 bch2_read_nodecode_retry(c, rbio, iter, inode, &avoid, flags);
1058         else
1059                 __bch2_read(c, rbio, iter, inode, &avoid, flags);
1060 }
1061
1062 static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
1063                             blk_status_t error)
1064 {
1065         rbio->retry = retry;
1066
1067         if (rbio->flags & BCH_READ_IN_RETRY)
1068                 return;
1069
1070         if (retry == READ_ERR) {
1071                 bch2_rbio_parent(rbio)->bio.bi_status = error;
1072                 bch2_rbio_done(rbio);
1073         } else {
1074                 bch2_rbio_punt(rbio, bch2_rbio_retry,
1075                                RBIO_CONTEXT_UNBOUND, system_unbound_wq);
1076         }
1077 }
1078
1079 static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
1080 {
1081         struct bch_fs *c = rbio->c;
1082         struct btree_iter iter;
1083         struct bkey_s_c k;
1084         struct bkey_i_extent *e;
1085         BKEY_PADDED(k) new;
1086         struct bch_extent_crc_unpacked new_crc;
1087         unsigned offset;
1088         int ret;
1089
1090         if (rbio->pick.crc.compression_type)
1091                 return;
1092
1093         bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, rbio->pos,
1094                              BTREE_ITER_INTENT);
1095 retry:
1096         k = bch2_btree_iter_peek(&iter);
1097         if (IS_ERR_OR_NULL(k.k))
1098                 goto out;
1099
1100         if (!bkey_extent_is_data(k.k))
1101                 goto out;
1102
1103         bkey_reassemble(&new.k, k);
1104         e = bkey_i_to_extent(&new.k);
1105
1106         if (!bch2_extent_matches_ptr(c, extent_i_to_s_c(e),
1107                                      rbio->pick.ptr,
1108                                      rbio->pos.offset -
1109                                      rbio->pick.crc.offset) ||
1110             bversion_cmp(e->k.version, rbio->version))
1111                 goto out;
1112
1113         /* Extent was merged? */
1114         if (bkey_start_offset(&e->k) < rbio->pos.offset ||
1115             e->k.p.offset > rbio->pos.offset + rbio->pick.crc.uncompressed_size)
1116                 goto out;
1117
1118         /* The extent might have been partially overwritten since we read it: */
1119         offset = rbio->pick.crc.offset + (bkey_start_offset(&e->k) - rbio->pos.offset);
1120
1121         if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version,
1122                                 rbio->pick.crc, NULL, &new_crc,
1123                                 offset, e->k.size,
1124                                 rbio->pick.crc.csum_type)) {
1125                 bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)");
1126                 goto out;
1127         }
1128
1129         if (!bch2_extent_narrow_crcs(e, new_crc))
1130                 goto out;
1131
1132         ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
1133                                    BTREE_INSERT_ATOMIC|
1134                                    BTREE_INSERT_NOFAIL|
1135                                    BTREE_INSERT_NOWAIT,
1136                                    BTREE_INSERT_ENTRY(&iter, &e->k_i));
1137         if (ret == -EINTR)
1138                 goto retry;
1139 out:
1140         bch2_btree_iter_unlock(&iter);
1141 }
1142
1143 static bool should_narrow_crcs(struct bkey_s_c_extent e,
1144                                struct extent_pick_ptr *pick,
1145                                unsigned flags)
1146 {
1147         return !(flags & BCH_READ_IN_RETRY) &&
1148                 bch2_can_narrow_extent_crcs(e, pick->crc);
1149 }
1150
1151 /* Inner part that may run in process context */
1152 static void __bch2_read_endio(struct work_struct *work)
1153 {
1154         struct bch_read_bio *rbio =
1155                 container_of(work, struct bch_read_bio, work);
1156         struct bch_fs *c = rbio->c;
1157         struct bio *src = &rbio->bio, *dst = &bch2_rbio_parent(rbio)->bio;
1158         struct bvec_iter dst_iter = rbio->bvec_iter;
1159         struct bch_extent_crc_unpacked crc = rbio->pick.crc;
1160         struct nonce nonce = extent_nonce(rbio->version, crc);
1161         struct bch_csum csum;
1162
1163         /* Reset iterator for checksumming and copying bounced data: */
1164         if (rbio->bounce) {
1165                 src->bi_iter.bi_size            = crc.compressed_size << 9;
1166                 src->bi_iter.bi_idx             = 0;
1167                 src->bi_iter.bi_bvec_done       = 0;
1168         } else {
1169                 src->bi_iter                    = rbio->bvec_iter;
1170         }
1171
1172         csum = bch2_checksum_bio(c, crc.csum_type, nonce, src);
1173         if (bch2_crc_cmp(csum, rbio->pick.crc.csum))
1174                 goto csum_err;
1175
1176         if (unlikely(rbio->narrow_crcs))
1177                 bch2_rbio_narrow_crcs(rbio);
1178
1179         if (rbio->flags & BCH_READ_NODECODE)
1180                 goto nodecode;
1181
1182         /* Adjust crc to point to subset of data we want: */
1183         crc.offset     += rbio->bvec_iter.bi_sector - rbio->pos.offset;
1184         crc.live_size   = bvec_iter_sectors(rbio->bvec_iter);
1185
1186         if (crc.compression_type != BCH_COMPRESSION_NONE) {
1187                 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1188                 if (bch2_bio_uncompress(c, src, dst, dst_iter, crc))
1189                         goto decompression_err;
1190         } else {
1191                 /* don't need to decrypt the entire bio: */
1192                 nonce = nonce_add(nonce, crc.offset << 9);
1193                 bio_advance(src, crc.offset << 9);
1194
1195                 BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
1196                 src->bi_iter.bi_size = dst_iter.bi_size;
1197
1198                 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1199
1200                 if (rbio->bounce) {
1201                         struct bvec_iter src_iter = src->bi_iter;
1202                         bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1203                 }
1204         }
1205
1206         if (rbio->promote) {
1207                 /*
1208                  * Re encrypt data we decrypted, so it's consistent with
1209                  * rbio->crc:
1210                  */
1211                 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1212                 promote_start(rbio->promote, rbio);
1213         }
1214 nodecode:
1215         if (likely(!(rbio->flags & BCH_READ_IN_RETRY)))
1216                 bch2_rbio_done(rbio);
1217         return;
1218 csum_err:
1219         /*
1220          * Checksum error: if the bio wasn't bounced, we may have been
1221          * reading into buffers owned by userspace (that userspace can
1222          * scribble over) - retry the read, bouncing it this time:
1223          */
1224         if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
1225                 rbio->flags |= BCH_READ_MUST_BOUNCE;
1226                 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
1227                 return;
1228         }
1229
1230         bch2_dev_io_error(rbio->pick.ca,
1231                 "data checksum error, inode %llu offset %llu: expected %0llx%0llx got %0llx%0llx (type %u)",
1232                 rbio->pos.inode, (u64) rbio->bvec_iter.bi_sector,
1233                 rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
1234                 csum.hi, csum.lo, crc.csum_type);
1235         bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1236         return;
1237 decompression_err:
1238         __bcache_io_error(c, "decompression error, inode %llu offset %llu",
1239                           rbio->pos.inode,
1240                           (u64) rbio->bvec_iter.bi_sector);
1241         bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
1242         return;
1243 }
1244
1245 static void bch2_read_endio(struct bio *bio)
1246 {
1247         struct bch_read_bio *rbio =
1248                 container_of(bio, struct bch_read_bio, bio);
1249         struct bch_fs *c = rbio->c;
1250         struct workqueue_struct *wq = NULL;
1251         enum rbio_context context = RBIO_CONTEXT_NULL;
1252
1253         bch2_latency_acct(rbio->pick.ca, rbio->submit_time_us, READ);
1254
1255         percpu_ref_put(&rbio->pick.ca->io_ref);
1256
1257         if (!rbio->split)
1258                 rbio->bio.bi_end_io = rbio->end_io;
1259
1260         if (bch2_dev_io_err_on(bio->bi_status, rbio->pick.ca, "data read")) {
1261                 bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
1262                 return;
1263         }
1264
1265         if (rbio->pick.ptr.cached &&
1266             (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
1267              ptr_stale(rbio->pick.ca, &rbio->pick.ptr))) {
1268                 atomic_long_inc(&c->read_realloc_races);
1269
1270                 if (rbio->flags & BCH_READ_RETRY_IF_STALE)
1271                         bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
1272                 else
1273                         bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN);
1274                 return;
1275         }
1276
1277         if (rbio->narrow_crcs ||
1278             rbio->pick.crc.compression_type ||
1279             bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
1280                 context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
1281         else if (rbio->pick.crc.csum_type)
1282                 context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq;
1283
1284         bch2_rbio_punt(rbio, __bch2_read_endio, context, wq);
1285 }
1286
1287 int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
1288                        struct bvec_iter iter, struct bkey_s_c_extent e,
1289                        struct extent_pick_ptr *pick, unsigned flags)
1290 {
1291         struct bch_read_bio *rbio;
1292         bool split = false, bounce = false, read_full = false;
1293         bool promote = false, narrow_crcs = false;
1294         struct bpos pos = bkey_start_pos(e.k);
1295         int ret = 0;
1296
1297         lg_local_lock(&c->usage_lock);
1298         bucket_io_clock_reset(c, pick->ca,
1299                         PTR_BUCKET_NR(pick->ca, &pick->ptr), READ);
1300         lg_local_unlock(&c->usage_lock);
1301
1302         narrow_crcs = should_narrow_crcs(e, pick, flags);
1303
1304         if (flags & BCH_READ_NODECODE) {
1305                 BUG_ON(iter.bi_size < pick->crc.compressed_size << 9);
1306                 iter.bi_size = pick->crc.compressed_size << 9;
1307                 goto noclone;
1308         }
1309
1310         if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
1311                 flags |= BCH_READ_MUST_BOUNCE;
1312
1313         EBUG_ON(bkey_start_offset(e.k) > iter.bi_sector ||
1314                 e.k->p.offset < bvec_iter_end_sector(iter));
1315
1316         if (pick->crc.compression_type != BCH_COMPRESSION_NONE ||
1317             (pick->crc.csum_type != BCH_CSUM_NONE &&
1318              (bvec_iter_sectors(iter) != pick->crc.uncompressed_size ||
1319               (bch2_csum_type_is_encryption(pick->crc.csum_type) &&
1320                (flags & BCH_READ_USER_MAPPED)) ||
1321               (flags & BCH_READ_MUST_BOUNCE)))) {
1322                 read_full = true;
1323                 bounce = true;
1324         }
1325
1326         promote = should_promote(c, pick, flags);
1327         /* could also set read_full */
1328         if (promote)
1329                 bounce = true;
1330
1331         if (!read_full) {
1332                 EBUG_ON(pick->crc.compression_type);
1333                 EBUG_ON(pick->crc.csum_type &&
1334                         (bvec_iter_sectors(iter) != pick->crc.uncompressed_size ||
1335                          bvec_iter_sectors(iter) != pick->crc.live_size ||
1336                          pick->crc.offset ||
1337                          iter.bi_sector != pos.offset));
1338
1339                 pick->ptr.offset += pick->crc.offset +
1340                         (iter.bi_sector - pos.offset);
1341                 pick->crc.compressed_size       = bvec_iter_sectors(iter);
1342                 pick->crc.uncompressed_size     = bvec_iter_sectors(iter);
1343                 pick->crc.offset                = 0;
1344                 pick->crc.live_size             = bvec_iter_sectors(iter);
1345                 pos.offset                      = iter.bi_sector;
1346         }
1347
1348         if (bounce) {
1349                 unsigned sectors = pick->crc.compressed_size;
1350
1351                 rbio = rbio_init(bio_alloc_bioset(GFP_NOIO,
1352                                         DIV_ROUND_UP(sectors, PAGE_SECTORS),
1353                                         &c->bio_read_split),
1354                                  orig->opts);
1355
1356                 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
1357                 split = true;
1358         } else if (flags & BCH_READ_MUST_CLONE) {
1359                 /*
1360                  * Have to clone if there were any splits, due to error
1361                  * reporting issues (if a split errored, and retrying didn't
1362                  * work, when it reports the error to its parent (us) we don't
1363                  * know if the error was from our bio, and we should retry, or
1364                  * from the whole bio, in which case we don't want to retry and
1365                  * lose the error)
1366                  */
1367                 rbio = rbio_init(bio_clone_fast(&orig->bio, GFP_NOIO,
1368                                                 &c->bio_read_split),
1369                                  orig->opts);
1370                 rbio->bio.bi_iter = iter;
1371                 split = true;
1372         } else {
1373 noclone:
1374                 rbio = orig;
1375                 rbio->bio.bi_iter = iter;
1376                 split = false;
1377                 BUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
1378         }
1379
1380         BUG_ON(bio_sectors(&rbio->bio) != pick->crc.compressed_size);
1381
1382         rbio->c                 = c;
1383         if (split)
1384                 rbio->parent    = orig;
1385         else
1386                 rbio->end_io    = orig->bio.bi_end_io;
1387         rbio->bvec_iter         = iter;
1388         rbio->submit_time_us    = local_clock_us();
1389         rbio->flags             = flags;
1390         rbio->bounce            = bounce;
1391         rbio->split             = split;
1392         rbio->narrow_crcs       = narrow_crcs;
1393         rbio->retry             = 0;
1394         rbio->context           = 0;
1395         rbio->devs_have         = bch2_extent_devs(e);
1396         rbio->pick              = *pick;
1397         rbio->pos               = pos;
1398         rbio->version           = e.k->version;
1399         rbio->promote           = promote ? promote_alloc(rbio, e.s_c) : NULL;
1400         INIT_WORK(&rbio->work, NULL);
1401
1402         bio_set_dev(&rbio->bio, pick->ca->disk_sb.bdev);
1403         rbio->bio.bi_opf        = orig->bio.bi_opf;
1404         rbio->bio.bi_iter.bi_sector = pick->ptr.offset;
1405         rbio->bio.bi_end_io     = bch2_read_endio;
1406
1407         if (bounce)
1408                 trace_read_bounce(&rbio->bio);
1409
1410         bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
1411         this_cpu_add(pick->ca->io_done->sectors[READ][BCH_DATA_USER],
1412                      bio_sectors(&rbio->bio));
1413
1414         if (likely(!(flags & BCH_READ_IN_RETRY))) {
1415                 submit_bio(&rbio->bio);
1416         } else {
1417                 submit_bio_wait(&rbio->bio);
1418
1419                 rbio->context = RBIO_CONTEXT_UNBOUND;
1420                 bch2_read_endio(&rbio->bio);
1421
1422                 ret = rbio->retry;
1423                 if (rbio->split)
1424                         rbio = bch2_rbio_free(rbio);
1425                 if (!ret)
1426                         bch2_rbio_done(rbio);
1427         }
1428
1429         return ret;
1430 }
1431
1432 static void bch2_read_nodecode_retry(struct bch_fs *c, struct bch_read_bio *rbio,
1433                                      struct bvec_iter bvec_iter, u64 inode,
1434                                      struct bch_devs_mask *avoid, unsigned flags)
1435 {
1436         struct extent_pick_ptr pick;
1437         struct btree_iter iter;
1438         BKEY_PADDED(k) tmp;
1439         struct bkey_s_c k;
1440         int ret;
1441
1442         bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
1443                              POS(inode, bvec_iter.bi_sector),
1444                              BTREE_ITER_SLOTS);
1445 retry:
1446         k = bch2_btree_iter_peek_slot(&iter);
1447         if (btree_iter_err(k)) {
1448                 bch2_btree_iter_unlock(&iter);
1449                 goto err;
1450         }
1451
1452         bkey_reassemble(&tmp.k, k);
1453         k = bkey_i_to_s_c(&tmp.k);
1454         bch2_btree_iter_unlock(&iter);
1455
1456         if (!bkey_extent_is_data(k.k) ||
1457             !bch2_extent_matches_ptr(c, bkey_i_to_s_c_extent(&tmp.k),
1458                                      rbio->pick.ptr,
1459                                      rbio->pos.offset -
1460                                      rbio->pick.crc.offset) ||
1461             bkey_start_offset(k.k) != bvec_iter.bi_sector)
1462                 goto err;
1463
1464         bch2_extent_pick_ptr(c, k, avoid, &pick);
1465         if (IS_ERR(pick.ca)) {
1466                 bcache_io_error(c, &rbio->bio, "no device to read from");
1467                 bio_endio(&rbio->bio);
1468                 return;
1469         }
1470
1471         if (!pick.ca)
1472                 goto err;
1473
1474         if (pick.crc.compressed_size > bvec_iter_sectors(bvec_iter)) {
1475                 percpu_ref_put(&pick.ca->io_ref);
1476                 goto err;
1477
1478         }
1479
1480         ret = __bch2_read_extent(c, rbio, bvec_iter, bkey_s_c_to_extent(k),
1481                                  &pick, flags);
1482         switch (ret) {
1483         case READ_RETRY_AVOID:
1484                 __set_bit(pick.ca->dev_idx, avoid->d);
1485         case READ_RETRY:
1486                 goto retry;
1487         case READ_ERR:
1488                 bio_endio(&rbio->bio);
1489                 return;
1490         };
1491
1492         return;
1493 err:
1494         /*
1495          * extent we wanted to read no longer exists, or
1496          * was merged or partially overwritten (and thus
1497          * possibly bigger than the memory that was
1498          * originally allocated)
1499          */
1500         rbio->bio.bi_status = BLK_STS_AGAIN;
1501         bio_endio(&rbio->bio);
1502         return;
1503 }
1504
1505 void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
1506                  struct bvec_iter bvec_iter, u64 inode,
1507                  struct bch_devs_mask *avoid, unsigned flags)
1508 {
1509         struct btree_iter iter;
1510         struct bkey_s_c k;
1511         int ret;
1512
1513         EBUG_ON(flags & BCH_READ_NODECODE);
1514 retry:
1515         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
1516                            POS(inode, bvec_iter.bi_sector),
1517                            BTREE_ITER_SLOTS, k) {
1518                 BKEY_PADDED(k) tmp;
1519                 struct extent_pick_ptr pick;
1520                 struct bvec_iter fragment;
1521
1522                 /*
1523                  * Unlock the iterator while the btree node's lock is still in
1524                  * cache, before doing the IO:
1525                  */
1526                 bkey_reassemble(&tmp.k, k);
1527                 k = bkey_i_to_s_c(&tmp.k);
1528                 bch2_btree_iter_unlock(&iter);
1529
1530                 bch2_extent_pick_ptr(c, k, avoid, &pick);
1531                 if (IS_ERR(pick.ca)) {
1532                         bcache_io_error(c, &rbio->bio, "no device to read from");
1533                         bio_endio(&rbio->bio);
1534                         return;
1535                 }
1536
1537                 fragment = bvec_iter;
1538                 fragment.bi_size = (min_t(u64, k.k->p.offset,
1539                                           bvec_iter_end_sector(bvec_iter)) -
1540                                     bvec_iter.bi_sector) << 9;
1541
1542                 if (pick.ca) {
1543                         if (fragment.bi_size != bvec_iter.bi_size) {
1544                                 bio_inc_remaining(&rbio->bio);
1545                                 flags |= BCH_READ_MUST_CLONE;
1546                                 trace_read_split(&rbio->bio);
1547                         }
1548
1549                         ret = __bch2_read_extent(c, rbio, fragment,
1550                                                  bkey_s_c_to_extent(k),
1551                                                  &pick, flags);
1552                         switch (ret) {
1553                         case READ_RETRY_AVOID:
1554                                 __set_bit(pick.ca->dev_idx, avoid->d);
1555                         case READ_RETRY:
1556                                 goto retry;
1557                         case READ_ERR:
1558                                 rbio->bio.bi_status = BLK_STS_IOERR;
1559                                 bio_endio(&rbio->bio);
1560                                 return;
1561                         };
1562                 } else {
1563                         zero_fill_bio_iter(&rbio->bio, fragment);
1564
1565                         if (fragment.bi_size == bvec_iter.bi_size)
1566                                 bio_endio(&rbio->bio);
1567                 }
1568
1569                 if (fragment.bi_size == bvec_iter.bi_size)
1570                         return;
1571
1572                 bio_advance_iter(&rbio->bio, &bvec_iter, fragment.bi_size);
1573         }
1574
1575         /*
1576          * If we get here, it better have been because there was an error
1577          * reading a btree node
1578          */
1579         ret = bch2_btree_iter_unlock(&iter);
1580         BUG_ON(!ret);
1581         bcache_io_error(c, &rbio->bio, "btree IO error %i", ret);
1582         bio_endio(&rbio->bio);
1583 }