]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/io.c
Update bcachefs sources to 0906b1fb49 bcachefs: fixes for 32 bit/big endian machines
[bcachefs-tools-debian] / libbcachefs / io.c
1 /*
2  * Some low level IO code, and hacks for various block layer limitations
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc.h"
10 #include "bset.h"
11 #include "btree_update.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "compress.h"
15 #include "clock.h"
16 #include "debug.h"
17 #include "disk_groups.h"
18 #include "error.h"
19 #include "extents.h"
20 #include "io.h"
21 #include "journal.h"
22 #include "keylist.h"
23 #include "move.h"
24 #include "rebalance.h"
25 #include "replicas.h"
26 #include "super.h"
27 #include "super-io.h"
28
29 #include <linux/blkdev.h>
30 #include <linux/random.h>
31
32 #include <trace/events/bcachefs.h>
33
34 static bool bch2_target_congested(struct bch_fs *c, u16 target)
35 {
36         const struct bch_devs_mask *devs;
37         unsigned d, nr = 0, total = 0;
38         u64 now = local_clock(), last;
39         s64 congested;
40         struct bch_dev *ca;
41
42         if (!target)
43                 return false;
44
45         rcu_read_lock();
46         devs = bch2_target_to_mask(c, target);
47         for_each_set_bit(d, devs->d, BCH_SB_MEMBERS_MAX) {
48                 ca = rcu_dereference(c->devs[d]);
49                 if (!ca)
50                         continue;
51
52                 congested = atomic_read(&ca->congested);
53                 last = READ_ONCE(ca->congested_last);
54                 if (time_after64(now, last))
55                         congested -= (now - last) >> 12;
56
57                 total += max(congested, 0LL);
58                 nr++;
59         }
60         rcu_read_unlock();
61
62         return bch2_rand_range(nr * CONGESTED_MAX) < total;
63 }
64
65 static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
66                                        u64 now, int rw)
67 {
68         u64 latency_capable =
69                 ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
70         /* ideally we'd be taking into account the device's variance here: */
71         u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
72         s64 latency_over = io_latency - latency_threshold;
73
74         if (latency_threshold && latency_over > 0) {
75                 /*
76                  * bump up congested by approximately latency_over * 4 /
77                  * latency_threshold - we don't need much accuracy here so don't
78                  * bother with the divide:
79                  */
80                 if (atomic_read(&ca->congested) < CONGESTED_MAX)
81                         atomic_add(latency_over >>
82                                    max_t(int, ilog2(latency_threshold) - 2, 0),
83                                    &ca->congested);
84
85                 ca->congested_last = now;
86         } else if (atomic_read(&ca->congested) > 0) {
87                 atomic_dec(&ca->congested);
88         }
89 }
90
91 void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
92 {
93         atomic64_t *latency = &ca->cur_latency[rw];
94         u64 now = local_clock();
95         u64 io_latency = time_after64(now, submit_time)
96                 ? now - submit_time
97                 : 0;
98         u64 old, new, v = atomic64_read(latency);
99
100         do {
101                 old = v;
102
103                 /*
104                  * If the io latency was reasonably close to the current
105                  * latency, skip doing the update and atomic operation - most of
106                  * the time:
107                  */
108                 if (abs((int) (old - io_latency)) < (old >> 1) &&
109                     now & ~(~0 << 5))
110                         break;
111
112                 new = ewma_add(old, io_latency, 5);
113         } while ((v = atomic64_cmpxchg(latency, old, new)) != old);
114
115         bch2_congested_acct(ca, io_latency, now, rw);
116
117         __bch2_time_stats_update(&ca->io_latency[rw], submit_time, now);
118 }
119
120 /* Allocate, free from mempool: */
121
122 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
123 {
124         struct bio_vec *bv;
125         unsigned i;
126
127         bio_for_each_segment_all(bv, bio, i)
128                 if (bv->bv_page != ZERO_PAGE(0))
129                         mempool_free(bv->bv_page, &c->bio_bounce_pages);
130         bio->bi_vcnt = 0;
131 }
132
133 static void bch2_bio_alloc_page_pool(struct bch_fs *c, struct bio *bio,
134                                     bool *using_mempool)
135 {
136         struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt++];
137
138         if (likely(!*using_mempool)) {
139                 bv->bv_page = alloc_page(GFP_NOIO);
140                 if (unlikely(!bv->bv_page)) {
141                         mutex_lock(&c->bio_bounce_pages_lock);
142                         *using_mempool = true;
143                         goto pool_alloc;
144
145                 }
146         } else {
147 pool_alloc:
148                 bv->bv_page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
149         }
150
151         bv->bv_len = PAGE_SIZE;
152         bv->bv_offset = 0;
153 }
154
155 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
156                                size_t bytes)
157 {
158         bool using_mempool = false;
159
160         BUG_ON(DIV_ROUND_UP(bytes, PAGE_SIZE) > bio->bi_max_vecs);
161
162         bio->bi_iter.bi_size = bytes;
163
164         while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE))
165                 bch2_bio_alloc_page_pool(c, bio, &using_mempool);
166
167         if (using_mempool)
168                 mutex_unlock(&c->bio_bounce_pages_lock);
169 }
170
171 void bch2_bio_alloc_more_pages_pool(struct bch_fs *c, struct bio *bio,
172                                     size_t bytes)
173 {
174         while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE)) {
175                 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
176
177                 BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
178
179                 bv->bv_page = alloc_page(GFP_NOIO);
180                 if (!bv->bv_page) {
181                         /*
182                          * We already allocated from mempool, we can't allocate from it again
183                          * without freeing the pages we already allocated or else we could
184                          * deadlock:
185                          */
186                         bch2_bio_free_pages_pool(c, bio);
187                         bch2_bio_alloc_pages_pool(c, bio, bytes);
188                         return;
189                 }
190
191                 bv->bv_len = PAGE_SIZE;
192                 bv->bv_offset = 0;
193                 bio->bi_vcnt++;
194         }
195
196         bio->bi_iter.bi_size = bytes;
197 }
198
199 /* Writes */
200
201 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
202                                enum bch_data_type type,
203                                const struct bkey_i *k)
204 {
205         struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k);
206         const struct bch_extent_ptr *ptr;
207         struct bch_write_bio *n;
208         struct bch_dev *ca;
209
210         BUG_ON(c->opts.nochanges);
211
212         extent_for_each_ptr(e, ptr) {
213                 BUG_ON(ptr->dev >= BCH_SB_MEMBERS_MAX ||
214                        !c->devs[ptr->dev]);
215
216                 ca = bch_dev_bkey_exists(c, ptr->dev);
217
218                 if (ptr + 1 < &extent_entry_last(e)->ptr) {
219                         n = to_wbio(bio_clone_fast(&wbio->bio, GFP_NOIO,
220                                                    &ca->replica_set));
221
222                         n->bio.bi_end_io        = wbio->bio.bi_end_io;
223                         n->bio.bi_private       = wbio->bio.bi_private;
224                         n->parent               = wbio;
225                         n->split                = true;
226                         n->bounce               = false;
227                         n->put_bio              = true;
228                         n->bio.bi_opf           = wbio->bio.bi_opf;
229                         bio_inc_remaining(&wbio->bio);
230                 } else {
231                         n = wbio;
232                         n->split                = false;
233                 }
234
235                 n->c                    = c;
236                 n->dev                  = ptr->dev;
237                 n->have_ioref           = bch2_dev_get_ioref(ca, WRITE);
238                 n->submit_time          = local_clock();
239                 n->bio.bi_iter.bi_sector = ptr->offset;
240
241                 if (!journal_flushes_device(ca))
242                         n->bio.bi_opf |= REQ_FUA;
243
244                 if (likely(n->have_ioref)) {
245                         this_cpu_add(ca->io_done->sectors[WRITE][type],
246                                      bio_sectors(&n->bio));
247
248                         bio_set_dev(&n->bio, ca->disk_sb.bdev);
249                         submit_bio(&n->bio);
250                 } else {
251                         n->bio.bi_status        = BLK_STS_REMOVED;
252                         bio_endio(&n->bio);
253                 }
254         }
255 }
256
257 static void __bch2_write(struct closure *);
258
259 static void bch2_write_done(struct closure *cl)
260 {
261         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
262         struct bch_fs *c = op->c;
263
264         if (!op->error && (op->flags & BCH_WRITE_FLUSH))
265                 op->error = bch2_journal_error(&c->journal);
266
267         if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
268                 bch2_disk_reservation_put(c, &op->res);
269         percpu_ref_put(&c->writes);
270         bch2_keylist_free(&op->insert_keys, op->inline_keys);
271
272         bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
273
274         closure_return(cl);
275 }
276
277 int bch2_write_index_default(struct bch_write_op *op)
278 {
279         struct keylist *keys = &op->insert_keys;
280         struct btree_iter iter;
281         int ret;
282
283         bch2_btree_iter_init(&iter, op->c, BTREE_ID_EXTENTS,
284                              bkey_start_pos(&bch2_keylist_front(keys)->k),
285                              BTREE_ITER_INTENT);
286
287         ret = bch2_btree_insert_list_at(&iter, keys, &op->res,
288                                         NULL, op_journal_seq(op),
289                                         BTREE_INSERT_NOFAIL|
290                                         BTREE_INSERT_USE_RESERVE);
291         bch2_btree_iter_unlock(&iter);
292
293         return ret;
294 }
295
296 /**
297  * bch_write_index - after a write, update index to point to new data
298  */
299 static void __bch2_write_index(struct bch_write_op *op)
300 {
301         struct bch_fs *c = op->c;
302         struct keylist *keys = &op->insert_keys;
303         struct bkey_s_extent e;
304         struct bch_extent_ptr *ptr;
305         struct bkey_i *src, *dst = keys->keys, *n, *k;
306         int ret;
307
308         for (src = keys->keys; src != keys->top; src = n) {
309                 n = bkey_next(src);
310                 bkey_copy(dst, src);
311
312                 e = bkey_i_to_s_extent(dst);
313                 extent_for_each_ptr_backwards(e, ptr)
314                         if (test_bit(ptr->dev, op->failed.d))
315                                 bch2_extent_drop_ptr(e, ptr);
316
317                 if (!bch2_extent_nr_ptrs(e.c)) {
318                         ret = -EIO;
319                         goto err;
320                 }
321
322                 if (!(op->flags & BCH_WRITE_NOMARK_REPLICAS)) {
323                         ret = bch2_mark_bkey_replicas(c, BCH_DATA_USER, e.s_c);
324                         if (ret)
325                                 goto err;
326                 }
327
328                 dst = bkey_next(dst);
329         }
330
331         keys->top = dst;
332
333         /*
334          * probably not the ideal place to hook this in, but I don't
335          * particularly want to plumb io_opts all the way through the btree
336          * update stack right now
337          */
338         for_each_keylist_key(keys, k)
339                 bch2_rebalance_add_key(c, bkey_i_to_s_c(k), &op->opts);
340
341         if (!bch2_keylist_empty(keys)) {
342                 u64 sectors_start = keylist_sectors(keys);
343                 int ret = op->index_update_fn(op);
344
345                 BUG_ON(keylist_sectors(keys) && !ret);
346
347                 op->written += sectors_start - keylist_sectors(keys);
348
349                 if (ret) {
350                         __bcache_io_error(c, "btree IO error %i", ret);
351                         op->error = ret;
352                 }
353         }
354 out:
355         bch2_open_bucket_put_refs(c, &op->open_buckets_nr, op->open_buckets);
356         return;
357 err:
358         keys->top = keys->keys;
359         op->error = ret;
360         goto out;
361 }
362
363 static void bch2_write_index(struct closure *cl)
364 {
365         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
366         struct bch_fs *c = op->c;
367
368         __bch2_write_index(op);
369
370         if (!op->error && (op->flags & BCH_WRITE_FLUSH)) {
371                 bch2_journal_flush_seq_async(&c->journal,
372                                              *op_journal_seq(op),
373                                              cl);
374                 continue_at(cl, bch2_write_done, index_update_wq(op));
375         } else {
376                 continue_at_nobarrier(cl, bch2_write_done, NULL);
377         }
378 }
379
380 static void bch2_write_endio(struct bio *bio)
381 {
382         struct closure *cl              = bio->bi_private;
383         struct bch_write_op *op         = container_of(cl, struct bch_write_op, cl);
384         struct bch_write_bio *wbio      = to_wbio(bio);
385         struct bch_write_bio *parent    = wbio->split ? wbio->parent : NULL;
386         struct bch_fs *c                = wbio->c;
387         struct bch_dev *ca              = bch_dev_bkey_exists(c, wbio->dev);
388
389         if (bch2_dev_io_err_on(bio->bi_status, ca, "data write"))
390                 set_bit(wbio->dev, op->failed.d);
391
392         if (wbio->have_ioref) {
393                 bch2_latency_acct(ca, wbio->submit_time, WRITE);
394                 percpu_ref_put(&ca->io_ref);
395         }
396
397         if (wbio->bounce)
398                 bch2_bio_free_pages_pool(c, bio);
399
400         if (wbio->put_bio)
401                 bio_put(bio);
402
403         if (parent)
404                 bio_endio(&parent->bio);
405         else
406                 closure_put(cl);
407 }
408
409 static void init_append_extent(struct bch_write_op *op,
410                                struct write_point *wp,
411                                struct bversion version,
412                                struct bch_extent_crc_unpacked crc)
413 {
414         struct bkey_i_extent *e = bkey_extent_init(op->insert_keys.top);
415
416         op->pos.offset += crc.uncompressed_size;
417         e->k.p = op->pos;
418         e->k.size = crc.uncompressed_size;
419         e->k.version = version;
420         bkey_extent_set_cached(&e->k, op->flags & BCH_WRITE_CACHED);
421
422         bch2_extent_crc_append(e, crc);
423         bch2_alloc_sectors_append_ptrs(op->c, wp, e, crc.compressed_size);
424
425         bch2_keylist_push(&op->insert_keys);
426 }
427
428 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
429                                         struct write_point *wp,
430                                         struct bio *src,
431                                         bool *page_alloc_failed)
432 {
433         struct bch_write_bio *wbio;
434         struct bio *bio;
435         unsigned output_available =
436                 min(wp->sectors_free << 9, src->bi_iter.bi_size);
437         unsigned pages = DIV_ROUND_UP(output_available, PAGE_SIZE);
438
439         bio = bio_alloc_bioset(GFP_NOIO, pages, &c->bio_write);
440         wbio                    = wbio_init(bio);
441         wbio->bounce            = true;
442         wbio->put_bio           = true;
443         /* copy WRITE_SYNC flag */
444         wbio->bio.bi_opf        = src->bi_opf;
445
446         /*
447          * We can't use mempool for more than c->sb.encoded_extent_max
448          * worth of pages, but we'd like to allocate more if we can:
449          */
450         while (bio->bi_iter.bi_size < output_available) {
451                 unsigned len = min_t(unsigned, PAGE_SIZE,
452                                      output_available - bio->bi_iter.bi_size);
453                 struct page *p;
454
455                 p = alloc_page(GFP_NOIO);
456                 if (!p) {
457                         unsigned pool_max =
458                                 min_t(unsigned, output_available,
459                                       c->sb.encoded_extent_max << 9);
460
461                         if (bio_sectors(bio) < pool_max)
462                                 bch2_bio_alloc_pages_pool(c, bio, pool_max);
463                         break;
464                 }
465
466                 bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) {
467                         .bv_page        = p,
468                         .bv_len         = len,
469                         .bv_offset      = 0,
470                 };
471                 bio->bi_iter.bi_size += len;
472         }
473
474         *page_alloc_failed = bio->bi_vcnt < pages;
475         return bio;
476 }
477
478 static int bch2_write_rechecksum(struct bch_fs *c,
479                                  struct bch_write_op *op,
480                                  unsigned new_csum_type)
481 {
482         struct bio *bio = &op->wbio.bio;
483         struct bch_extent_crc_unpacked new_crc;
484         int ret;
485
486         /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
487
488         if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
489             bch2_csum_type_is_encryption(new_csum_type))
490                 new_csum_type = op->crc.csum_type;
491
492         ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
493                                   NULL, &new_crc,
494                                   op->crc.offset, op->crc.live_size,
495                                   new_csum_type);
496         if (ret)
497                 return ret;
498
499         bio_advance(bio, op->crc.offset << 9);
500         bio->bi_iter.bi_size = op->crc.live_size << 9;
501         op->crc = new_crc;
502         return 0;
503 }
504
505 static int bch2_write_decrypt(struct bch_write_op *op)
506 {
507         struct bch_fs *c = op->c;
508         struct nonce nonce = extent_nonce(op->version, op->crc);
509         struct bch_csum csum;
510
511         if (!bch2_csum_type_is_encryption(op->crc.csum_type))
512                 return 0;
513
514         /*
515          * If we need to decrypt data in the write path, we'll no longer be able
516          * to verify the existing checksum (poly1305 mac, in this case) after
517          * it's decrypted - this is the last point we'll be able to reverify the
518          * checksum:
519          */
520         csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
521         if (bch2_crc_cmp(op->crc.csum, csum))
522                 return -EIO;
523
524         bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
525         op->crc.csum_type = 0;
526         op->crc.csum = (struct bch_csum) { 0, 0 };
527         return 0;
528 }
529
530 static enum prep_encoded_ret {
531         PREP_ENCODED_OK,
532         PREP_ENCODED_ERR,
533         PREP_ENCODED_CHECKSUM_ERR,
534         PREP_ENCODED_DO_WRITE,
535 } bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
536 {
537         struct bch_fs *c = op->c;
538         struct bio *bio = &op->wbio.bio;
539
540         if (!(op->flags & BCH_WRITE_DATA_ENCODED))
541                 return PREP_ENCODED_OK;
542
543         BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
544
545         /* Can we just write the entire extent as is? */
546         if (op->crc.uncompressed_size == op->crc.live_size &&
547             op->crc.compressed_size <= wp->sectors_free &&
548             op->crc.compression_type == op->compression_type) {
549                 if (!op->crc.compression_type &&
550                     op->csum_type != op->crc.csum_type &&
551                     bch2_write_rechecksum(c, op, op->csum_type))
552                         return PREP_ENCODED_CHECKSUM_ERR;
553
554                 return PREP_ENCODED_DO_WRITE;
555         }
556
557         /*
558          * If the data is compressed and we couldn't write the entire extent as
559          * is, we have to decompress it:
560          */
561         if (op->crc.compression_type) {
562                 struct bch_csum csum;
563
564                 if (bch2_write_decrypt(op))
565                         return PREP_ENCODED_CHECKSUM_ERR;
566
567                 /* Last point we can still verify checksum: */
568                 csum = bch2_checksum_bio(c, op->crc.csum_type,
569                                          extent_nonce(op->version, op->crc),
570                                          bio);
571                 if (bch2_crc_cmp(op->crc.csum, csum))
572                         return PREP_ENCODED_CHECKSUM_ERR;
573
574                 if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
575                         return PREP_ENCODED_ERR;
576         }
577
578         /*
579          * No longer have compressed data after this point - data might be
580          * encrypted:
581          */
582
583         /*
584          * If the data is checksummed and we're only writing a subset,
585          * rechecksum and adjust bio to point to currently live data:
586          */
587         if ((op->crc.live_size != op->crc.uncompressed_size ||
588              op->crc.csum_type != op->csum_type) &&
589             bch2_write_rechecksum(c, op, op->csum_type))
590                 return PREP_ENCODED_CHECKSUM_ERR;
591
592         /*
593          * If we want to compress the data, it has to be decrypted:
594          */
595         if ((op->compression_type ||
596              bch2_csum_type_is_encryption(op->crc.csum_type) !=
597              bch2_csum_type_is_encryption(op->csum_type)) &&
598             bch2_write_decrypt(op))
599                 return PREP_ENCODED_CHECKSUM_ERR;
600
601         return PREP_ENCODED_OK;
602 }
603
604 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp)
605 {
606         struct bch_fs *c = op->c;
607         struct bio *src = &op->wbio.bio, *dst = src;
608         struct bvec_iter saved_iter;
609         struct bkey_i *key_to_write;
610         unsigned key_to_write_offset = op->insert_keys.top_p -
611                 op->insert_keys.keys_p;
612         unsigned total_output = 0;
613         bool bounce = false, page_alloc_failed = false;
614         int ret, more = 0;
615
616         BUG_ON(!bio_sectors(src));
617
618         switch (bch2_write_prep_encoded_data(op, wp)) {
619         case PREP_ENCODED_OK:
620                 break;
621         case PREP_ENCODED_ERR:
622                 ret = -EIO;
623                 goto err;
624         case PREP_ENCODED_CHECKSUM_ERR:
625                 goto csum_err;
626         case PREP_ENCODED_DO_WRITE:
627                 init_append_extent(op, wp, op->version, op->crc);
628                 goto do_write;
629         }
630
631         if (op->compression_type ||
632             (op->csum_type &&
633              !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
634             (bch2_csum_type_is_encryption(op->csum_type) &&
635              !(op->flags & BCH_WRITE_PAGES_OWNED))) {
636                 dst = bch2_write_bio_alloc(c, wp, src, &page_alloc_failed);
637                 bounce = true;
638         }
639
640         saved_iter = dst->bi_iter;
641
642         do {
643                 struct bch_extent_crc_unpacked crc =
644                         (struct bch_extent_crc_unpacked) { 0 };
645                 struct bversion version = op->version;
646                 size_t dst_len, src_len;
647
648                 if (page_alloc_failed &&
649                     bio_sectors(dst) < wp->sectors_free &&
650                     bio_sectors(dst) < c->sb.encoded_extent_max)
651                         break;
652
653                 BUG_ON(op->compression_type &&
654                        (op->flags & BCH_WRITE_DATA_ENCODED) &&
655                        bch2_csum_type_is_encryption(op->crc.csum_type));
656                 BUG_ON(op->compression_type && !bounce);
657
658                 crc.compression_type = op->compression_type
659                         ?  bch2_bio_compress(c, dst, &dst_len, src, &src_len,
660                                              op->compression_type)
661                         : 0;
662                 if (!crc.compression_type) {
663                         dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
664                         dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
665
666                         if (op->csum_type)
667                                 dst_len = min_t(unsigned, dst_len,
668                                                 c->sb.encoded_extent_max << 9);
669
670                         if (bounce) {
671                                 swap(dst->bi_iter.bi_size, dst_len);
672                                 bio_copy_data(dst, src);
673                                 swap(dst->bi_iter.bi_size, dst_len);
674                         }
675
676                         src_len = dst_len;
677                 }
678
679                 BUG_ON(!src_len || !dst_len);
680
681                 if (bch2_csum_type_is_encryption(op->csum_type)) {
682                         if (bversion_zero(version)) {
683                                 version.lo = atomic64_inc_return(&c->key_version) + 1;
684                         } else {
685                                 crc.nonce = op->nonce;
686                                 op->nonce += src_len >> 9;
687                         }
688                 }
689
690                 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
691                     !crc.compression_type &&
692                     bch2_csum_type_is_encryption(op->crc.csum_type) ==
693                     bch2_csum_type_is_encryption(op->csum_type)) {
694                         /*
695                          * Note: when we're using rechecksum(), we need to be
696                          * checksumming @src because it has all the data our
697                          * existing checksum covers - if we bounced (because we
698                          * were trying to compress), @dst will only have the
699                          * part of the data the new checksum will cover.
700                          *
701                          * But normally we want to be checksumming post bounce,
702                          * because part of the reason for bouncing is so the
703                          * data can't be modified (by userspace) while it's in
704                          * flight.
705                          */
706                         if (bch2_rechecksum_bio(c, src, version, op->crc,
707                                         &crc, &op->crc,
708                                         src_len >> 9,
709                                         bio_sectors(src) - (src_len >> 9),
710                                         op->csum_type))
711                                 goto csum_err;
712                 } else {
713                         if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
714                             bch2_rechecksum_bio(c, src, version, op->crc,
715                                         NULL, &op->crc,
716                                         src_len >> 9,
717                                         bio_sectors(src) - (src_len >> 9),
718                                         op->crc.csum_type))
719                                 goto csum_err;
720
721                         crc.compressed_size     = dst_len >> 9;
722                         crc.uncompressed_size   = src_len >> 9;
723                         crc.live_size           = src_len >> 9;
724
725                         swap(dst->bi_iter.bi_size, dst_len);
726                         bch2_encrypt_bio(c, op->csum_type,
727                                          extent_nonce(version, crc), dst);
728                         crc.csum = bch2_checksum_bio(c, op->csum_type,
729                                          extent_nonce(version, crc), dst);
730                         crc.csum_type = op->csum_type;
731                         swap(dst->bi_iter.bi_size, dst_len);
732                 }
733
734                 init_append_extent(op, wp, version, crc);
735
736                 if (dst != src)
737                         bio_advance(dst, dst_len);
738                 bio_advance(src, src_len);
739                 total_output += dst_len;
740         } while (dst->bi_iter.bi_size &&
741                  src->bi_iter.bi_size &&
742                  wp->sectors_free &&
743                  !bch2_keylist_realloc(&op->insert_keys,
744                                       op->inline_keys,
745                                       ARRAY_SIZE(op->inline_keys),
746                                       BKEY_EXTENT_U64s_MAX));
747
748         more = src->bi_iter.bi_size != 0;
749
750         dst->bi_iter = saved_iter;
751
752         if (!bounce && more) {
753                 dst = bio_split(src, total_output >> 9,
754                                 GFP_NOIO, &c->bio_write);
755                 wbio_init(dst)->put_bio = true;
756         }
757
758         dst->bi_iter.bi_size = total_output;
759
760         /* Free unneeded pages after compressing: */
761         if (bounce)
762                 while (dst->bi_vcnt > DIV_ROUND_UP(dst->bi_iter.bi_size, PAGE_SIZE))
763                         mempool_free(dst->bi_io_vec[--dst->bi_vcnt].bv_page,
764                                      &c->bio_bounce_pages);
765 do_write:
766         /* might have done a realloc... */
767
768         key_to_write = (void *) (op->insert_keys.keys_p + key_to_write_offset);
769
770         dst->bi_end_io  = bch2_write_endio;
771         dst->bi_private = &op->cl;
772         bio_set_op_attrs(dst, REQ_OP_WRITE, 0);
773
774         closure_get(dst->bi_private);
775
776         bch2_submit_wbio_replicas(to_wbio(dst), c, BCH_DATA_USER,
777                                   key_to_write);
778         return more;
779 csum_err:
780         bch_err(c, "error verifying existing checksum while "
781                 "rewriting existing data (memory corruption?)");
782         ret = -EIO;
783 err:
784         if (bounce) {
785                 bch2_bio_free_pages_pool(c, dst);
786                 bio_put(dst);
787         }
788
789         return ret;
790 }
791
792 static void __bch2_write(struct closure *cl)
793 {
794         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
795         struct bch_fs *c = op->c;
796         struct write_point *wp;
797         int ret;
798 again:
799         do {
800                 /* +1 for possible cache device: */
801                 if (op->open_buckets_nr + op->nr_replicas + 1 >
802                     ARRAY_SIZE(op->open_buckets))
803                         goto flush_io;
804
805                 if (bch2_keylist_realloc(&op->insert_keys,
806                                         op->inline_keys,
807                                         ARRAY_SIZE(op->inline_keys),
808                                         BKEY_EXTENT_U64s_MAX))
809                         goto flush_io;
810
811                 wp = bch2_alloc_sectors_start(c,
812                         op->target,
813                         op->write_point,
814                         &op->devs_have,
815                         op->nr_replicas,
816                         op->nr_replicas_required,
817                         op->alloc_reserve,
818                         op->flags,
819                         (op->flags & BCH_WRITE_ALLOC_NOWAIT) ? NULL : cl);
820                 EBUG_ON(!wp);
821
822                 if (unlikely(IS_ERR(wp))) {
823                         if (unlikely(PTR_ERR(wp) != -EAGAIN)) {
824                                 ret = PTR_ERR(wp);
825                                 goto err;
826                         }
827
828                         goto flush_io;
829                 }
830
831                 ret = bch2_write_extent(op, wp);
832
833                 BUG_ON(op->open_buckets_nr + wp->nr_ptrs - wp->first_ptr >
834                        ARRAY_SIZE(op->open_buckets));
835                 bch2_open_bucket_get(c, wp,
836                                      &op->open_buckets_nr,
837                                      op->open_buckets);
838                 bch2_alloc_sectors_done(c, wp);
839
840                 if (ret < 0)
841                         goto err;
842         } while (ret);
843
844         continue_at(cl, bch2_write_index, index_update_wq(op));
845         return;
846 err:
847         op->error = ret;
848
849         continue_at(cl, !bch2_keylist_empty(&op->insert_keys)
850                     ? bch2_write_index
851                     : bch2_write_done, index_update_wq(op));
852         return;
853 flush_io:
854         closure_sync(cl);
855
856         if (!bch2_keylist_empty(&op->insert_keys)) {
857                 __bch2_write_index(op);
858
859                 if (op->error) {
860                         continue_at_nobarrier(cl, bch2_write_done, NULL);
861                         return;
862                 }
863         }
864
865         goto again;
866 }
867
868 /**
869  * bch_write - handle a write to a cache device or flash only volume
870  *
871  * This is the starting point for any data to end up in a cache device; it could
872  * be from a normal write, or a writeback write, or a write to a flash only
873  * volume - it's also used by the moving garbage collector to compact data in
874  * mostly empty buckets.
875  *
876  * It first writes the data to the cache, creating a list of keys to be inserted
877  * (if the data won't fit in a single open bucket, there will be multiple keys);
878  * after the data is written it calls bch_journal, and after the keys have been
879  * added to the next journal write they're inserted into the btree.
880  *
881  * If op->discard is true, instead of inserting the data it invalidates the
882  * region of the cache represented by op->bio and op->inode.
883  */
884 void bch2_write(struct closure *cl)
885 {
886         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
887         struct bch_fs *c = op->c;
888
889         BUG_ON(!op->nr_replicas);
890         BUG_ON(!op->write_point.v);
891         BUG_ON(!bkey_cmp(op->pos, POS_MAX));
892         BUG_ON(bio_sectors(&op->wbio.bio) > U16_MAX);
893
894         op->start_time = local_clock();
895
896         memset(&op->failed, 0, sizeof(op->failed));
897
898         bch2_keylist_init(&op->insert_keys, op->inline_keys);
899         wbio_init(&op->wbio.bio)->put_bio = false;
900
901         if (c->opts.nochanges ||
902             !percpu_ref_tryget(&c->writes)) {
903                 __bcache_io_error(c, "read only");
904                 op->error = -EROFS;
905                 if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
906                         bch2_disk_reservation_put(c, &op->res);
907                 closure_return(cl);
908                 return;
909         }
910
911         bch2_increment_clock(c, bio_sectors(&op->wbio.bio), WRITE);
912
913         continue_at_nobarrier(cl, __bch2_write, NULL);
914 }
915
916 /* Cache promotion on read */
917
918 struct promote_op {
919         struct closure          cl;
920         u64                     start_time;
921
922         struct rhash_head       hash;
923         struct bpos             pos;
924
925         struct migrate_write    write;
926         struct bio_vec          bi_inline_vecs[0]; /* must be last */
927 };
928
929 static const struct rhashtable_params bch_promote_params = {
930         .head_offset    = offsetof(struct promote_op, hash),
931         .key_offset     = offsetof(struct promote_op, pos),
932         .key_len        = sizeof(struct bpos),
933 };
934
935 static inline bool should_promote(struct bch_fs *c, struct bkey_s_c k,
936                                   struct bpos pos,
937                                   struct bch_io_opts opts,
938                                   unsigned flags)
939 {
940         if (!opts.promote_target)
941                 return false;
942
943         if (!(flags & BCH_READ_MAY_PROMOTE))
944                 return false;
945
946         if (percpu_ref_is_dying(&c->writes))
947                 return false;
948
949         if (!bkey_extent_is_data(k.k))
950                 return false;
951
952         if (bch2_extent_has_target(c, bkey_s_c_to_extent(k), opts.promote_target))
953                 return false;
954
955         if (bch2_target_congested(c, opts.promote_target))
956                 return false;
957
958         if (rhashtable_lookup_fast(&c->promote_table, &pos,
959                                    bch_promote_params))
960                 return false;
961
962         return true;
963 }
964
965 static void promote_free(struct bch_fs *c, struct promote_op *op)
966 {
967         int ret;
968
969         ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
970                                      bch_promote_params);
971         BUG_ON(ret);
972         percpu_ref_put(&c->writes);
973         kfree(op);
974 }
975
976 static void promote_done(struct closure *cl)
977 {
978         struct promote_op *op =
979                 container_of(cl, struct promote_op, cl);
980         struct bch_fs *c = op->write.op.c;
981
982         bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
983                                op->start_time);
984
985         bch2_bio_free_pages_pool(c, &op->write.op.wbio.bio);
986         promote_free(c, op);
987 }
988
989 static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
990 {
991         struct bch_fs *c = rbio->c;
992         struct closure *cl = &op->cl;
993         struct bio *bio = &op->write.op.wbio.bio;
994
995         trace_promote(&rbio->bio);
996
997         /* we now own pages: */
998         BUG_ON(!rbio->bounce);
999         BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs);
1000
1001         memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec,
1002                sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
1003         swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
1004
1005         bch2_migrate_read_done(&op->write, rbio);
1006
1007         closure_init(cl, NULL);
1008         closure_call(&op->write.op.cl, bch2_write, c->wq, cl);
1009         closure_return_with_destructor(cl, promote_done);
1010 }
1011
1012 noinline
1013 static struct promote_op *__promote_alloc(struct bch_fs *c,
1014                                           struct bpos pos,
1015                                           struct extent_pick_ptr *pick,
1016                                           struct bch_io_opts opts,
1017                                           unsigned rbio_sectors,
1018                                           struct bch_read_bio **rbio)
1019 {
1020         struct promote_op *op = NULL;
1021         struct bio *bio;
1022         unsigned rbio_pages = DIV_ROUND_UP(rbio_sectors, PAGE_SECTORS);
1023         /* data might have to be decompressed in the write path: */
1024         unsigned wbio_pages = DIV_ROUND_UP(pick->crc.uncompressed_size,
1025                                            PAGE_SECTORS);
1026         int ret;
1027
1028         if (!percpu_ref_tryget(&c->writes))
1029                 return NULL;
1030
1031         op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * wbio_pages,
1032                      GFP_NOIO);
1033         if (!op)
1034                 goto err;
1035
1036         op->start_time = local_clock();
1037         op->pos = pos;
1038
1039         /*
1040          * promotes require bouncing, but if the extent isn't
1041          * checksummed/compressed it might be too big for the mempool:
1042          */
1043         if (rbio_sectors > c->sb.encoded_extent_max) {
1044                 *rbio = kzalloc(sizeof(struct bch_read_bio) +
1045                                 sizeof(struct bio_vec) * rbio_pages,
1046                                 GFP_NOIO);
1047                 if (!*rbio)
1048                         goto err;
1049
1050                 rbio_init(&(*rbio)->bio, opts);
1051                 bio_init(&(*rbio)->bio, (*rbio)->bio.bi_inline_vecs,
1052                          rbio_pages);
1053
1054                 (*rbio)->bio.bi_iter.bi_size = rbio_sectors << 9;
1055                 bch2_bio_map(&(*rbio)->bio, NULL);
1056
1057                 if (bch2_bio_alloc_pages(&(*rbio)->bio, GFP_NOIO))
1058                         goto err;
1059
1060                 (*rbio)->bounce         = true;
1061                 (*rbio)->split          = true;
1062                 (*rbio)->kmalloc        = true;
1063         }
1064
1065         if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash,
1066                                           bch_promote_params))
1067                 goto err;
1068
1069         bio = &op->write.op.wbio.bio;
1070         bio_init(bio, bio->bi_inline_vecs, wbio_pages);
1071
1072         ret = bch2_migrate_write_init(c, &op->write,
1073                         writepoint_hashed((unsigned long) current),
1074                         opts,
1075                         DATA_PROMOTE,
1076                         (struct data_opts) {
1077                                 .target = opts.promote_target
1078                         },
1079                         bkey_s_c_null);
1080         BUG_ON(ret);
1081
1082         return op;
1083 err:
1084         if (*rbio)
1085                 bio_free_pages(&(*rbio)->bio);
1086         kfree(*rbio);
1087         *rbio = NULL;
1088         kfree(op);
1089         percpu_ref_put(&c->writes);
1090         return NULL;
1091 }
1092
1093 static inline struct promote_op *promote_alloc(struct bch_fs *c,
1094                                                struct bvec_iter iter,
1095                                                struct bkey_s_c k,
1096                                                struct extent_pick_ptr *pick,
1097                                                struct bch_io_opts opts,
1098                                                unsigned flags,
1099                                                struct bch_read_bio **rbio,
1100                                                bool *bounce,
1101                                                bool *read_full)
1102 {
1103         bool promote_full = *read_full || READ_ONCE(c->promote_whole_extents);
1104         unsigned sectors = promote_full
1105                 ? pick->crc.compressed_size
1106                 : bvec_iter_sectors(iter);
1107         struct bpos pos = promote_full
1108                 ? bkey_start_pos(k.k)
1109                 : POS(k.k->p.inode, iter.bi_sector);
1110         struct promote_op *promote;
1111
1112         if (!should_promote(c, k, pos, opts, flags))
1113                 return NULL;
1114
1115         promote = __promote_alloc(c, pos, pick, opts, sectors, rbio);
1116         if (!promote)
1117                 return NULL;
1118
1119         *bounce         = true;
1120         *read_full      = promote_full;
1121         return promote;
1122 }
1123
1124 /* Read */
1125
1126 #define READ_RETRY_AVOID        1
1127 #define READ_RETRY              2
1128 #define READ_ERR                3
1129
1130 enum rbio_context {
1131         RBIO_CONTEXT_NULL,
1132         RBIO_CONTEXT_HIGHPRI,
1133         RBIO_CONTEXT_UNBOUND,
1134 };
1135
1136 static inline struct bch_read_bio *
1137 bch2_rbio_parent(struct bch_read_bio *rbio)
1138 {
1139         return rbio->split ? rbio->parent : rbio;
1140 }
1141
1142 __always_inline
1143 static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn,
1144                            enum rbio_context context,
1145                            struct workqueue_struct *wq)
1146 {
1147         if (context <= rbio->context) {
1148                 fn(&rbio->work);
1149         } else {
1150                 rbio->work.func         = fn;
1151                 rbio->context           = context;
1152                 queue_work(wq, &rbio->work);
1153         }
1154 }
1155
1156 static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
1157 {
1158         BUG_ON(rbio->bounce && !rbio->split);
1159
1160         if (rbio->promote)
1161                 promote_free(rbio->c, rbio->promote);
1162         rbio->promote = NULL;
1163
1164         if (rbio->bounce)
1165                 bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
1166
1167         if (rbio->split) {
1168                 struct bch_read_bio *parent = rbio->parent;
1169
1170                 if (rbio->kmalloc)
1171                         kfree(rbio);
1172                 else
1173                         bio_put(&rbio->bio);
1174
1175                 rbio = parent;
1176         }
1177
1178         return rbio;
1179 }
1180
1181 static void bch2_rbio_done(struct bch_read_bio *rbio)
1182 {
1183         bch2_time_stats_update(&rbio->c->times[BCH_TIME_data_read],
1184                                rbio->start_time);
1185         bio_endio(&rbio->bio);
1186 }
1187
1188 static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio,
1189                                      struct bvec_iter bvec_iter, u64 inode,
1190                                      struct bch_devs_mask *avoid, unsigned flags)
1191 {
1192         struct btree_iter iter;
1193         BKEY_PADDED(k) tmp;
1194         struct bkey_s_c k;
1195         int ret;
1196
1197         flags &= ~BCH_READ_LAST_FRAGMENT;
1198
1199         bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
1200                              rbio->pos, BTREE_ITER_SLOTS);
1201 retry:
1202         rbio->bio.bi_status = 0;
1203
1204         k = bch2_btree_iter_peek_slot(&iter);
1205         if (btree_iter_err(k)) {
1206                 bch2_btree_iter_unlock(&iter);
1207                 goto err;
1208         }
1209
1210         bkey_reassemble(&tmp.k, k);
1211         k = bkey_i_to_s_c(&tmp.k);
1212         bch2_btree_iter_unlock(&iter);
1213
1214         if (!bkey_extent_is_data(k.k) ||
1215             !bch2_extent_matches_ptr(c, bkey_i_to_s_c_extent(&tmp.k),
1216                                      rbio->pick.ptr,
1217                                      rbio->pos.offset -
1218                                      rbio->pick.crc.offset)) {
1219                 /* extent we wanted to read no longer exists: */
1220                 rbio->hole = true;
1221                 goto out;
1222         }
1223
1224         ret = __bch2_read_extent(c, rbio, bvec_iter, k, avoid, flags);
1225         if (ret == READ_RETRY)
1226                 goto retry;
1227         if (ret)
1228                 goto err;
1229         goto out;
1230 err:
1231         rbio->bio.bi_status = BLK_STS_IOERR;
1232 out:
1233         bch2_rbio_done(rbio);
1234 }
1235
1236 static void bch2_read_retry(struct bch_fs *c, struct bch_read_bio *rbio,
1237                             struct bvec_iter bvec_iter, u64 inode,
1238                             struct bch_devs_mask *avoid, unsigned flags)
1239 {
1240         struct btree_iter iter;
1241         struct bkey_s_c k;
1242         int ret;
1243
1244         flags &= ~BCH_READ_LAST_FRAGMENT;
1245         flags |= BCH_READ_MUST_CLONE;
1246 retry:
1247         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
1248                            POS(inode, bvec_iter.bi_sector),
1249                            BTREE_ITER_SLOTS, k) {
1250                 BKEY_PADDED(k) tmp;
1251                 unsigned bytes;
1252
1253                 bkey_reassemble(&tmp.k, k);
1254                 k = bkey_i_to_s_c(&tmp.k);
1255                 bch2_btree_iter_unlock(&iter);
1256
1257                 bytes = min_t(unsigned, bvec_iter.bi_size,
1258                               (k.k->p.offset - bvec_iter.bi_sector) << 9);
1259                 swap(bvec_iter.bi_size, bytes);
1260
1261                 ret = __bch2_read_extent(c, rbio, bvec_iter, k, avoid, flags);
1262                 switch (ret) {
1263                 case READ_RETRY:
1264                         goto retry;
1265                 case READ_ERR:
1266                         goto err;
1267                 };
1268
1269                 if (bytes == bvec_iter.bi_size)
1270                         goto out;
1271
1272                 swap(bvec_iter.bi_size, bytes);
1273                 bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
1274         }
1275
1276         /*
1277          * If we get here, it better have been because there was an error
1278          * reading a btree node
1279          */
1280         ret = bch2_btree_iter_unlock(&iter);
1281         BUG_ON(!ret);
1282         __bcache_io_error(c, "btree IO error %i", ret);
1283 err:
1284         rbio->bio.bi_status = BLK_STS_IOERR;
1285 out:
1286         bch2_rbio_done(rbio);
1287 }
1288
1289 static void bch2_rbio_retry(struct work_struct *work)
1290 {
1291         struct bch_read_bio *rbio =
1292                 container_of(work, struct bch_read_bio, work);
1293         struct bch_fs *c        = rbio->c;
1294         struct bvec_iter iter   = rbio->bvec_iter;
1295         unsigned flags          = rbio->flags;
1296         u64 inode               = rbio->pos.inode;
1297         struct bch_devs_mask avoid;
1298
1299         trace_read_retry(&rbio->bio);
1300
1301         memset(&avoid, 0, sizeof(avoid));
1302
1303         if (rbio->retry == READ_RETRY_AVOID)
1304                 __set_bit(rbio->pick.ptr.dev, avoid.d);
1305
1306         rbio->bio.bi_status = 0;
1307
1308         rbio = bch2_rbio_free(rbio);
1309
1310         flags |= BCH_READ_IN_RETRY;
1311         flags &= ~BCH_READ_MAY_PROMOTE;
1312
1313         if (flags & BCH_READ_NODECODE)
1314                 bch2_read_retry_nodecode(c, rbio, iter, inode, &avoid, flags);
1315         else
1316                 bch2_read_retry(c, rbio, iter, inode, &avoid, flags);
1317 }
1318
1319 static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
1320                             blk_status_t error)
1321 {
1322         rbio->retry = retry;
1323
1324         if (rbio->flags & BCH_READ_IN_RETRY)
1325                 return;
1326
1327         if (retry == READ_ERR) {
1328                 rbio = bch2_rbio_free(rbio);
1329
1330                 rbio->bio.bi_status = error;
1331                 bch2_rbio_done(rbio);
1332         } else {
1333                 bch2_rbio_punt(rbio, bch2_rbio_retry,
1334                                RBIO_CONTEXT_UNBOUND, system_unbound_wq);
1335         }
1336 }
1337
1338 static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
1339 {
1340         struct bch_fs *c = rbio->c;
1341         struct btree_iter iter;
1342         struct bkey_s_c k;
1343         struct bkey_i_extent *e;
1344         BKEY_PADDED(k) new;
1345         struct bch_extent_crc_unpacked new_crc;
1346         unsigned offset;
1347         int ret;
1348
1349         if (rbio->pick.crc.compression_type)
1350                 return;
1351
1352         bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, rbio->pos,
1353                              BTREE_ITER_INTENT);
1354 retry:
1355         k = bch2_btree_iter_peek(&iter);
1356         if (IS_ERR_OR_NULL(k.k))
1357                 goto out;
1358
1359         if (!bkey_extent_is_data(k.k))
1360                 goto out;
1361
1362         bkey_reassemble(&new.k, k);
1363         e = bkey_i_to_extent(&new.k);
1364
1365         if (!bch2_extent_matches_ptr(c, extent_i_to_s_c(e),
1366                                      rbio->pick.ptr,
1367                                      rbio->pos.offset -
1368                                      rbio->pick.crc.offset) ||
1369             bversion_cmp(e->k.version, rbio->version))
1370                 goto out;
1371
1372         /* Extent was merged? */
1373         if (bkey_start_offset(&e->k) < rbio->pos.offset ||
1374             e->k.p.offset > rbio->pos.offset + rbio->pick.crc.uncompressed_size)
1375                 goto out;
1376
1377         /* The extent might have been partially overwritten since we read it: */
1378         offset = rbio->pick.crc.offset + (bkey_start_offset(&e->k) - rbio->pos.offset);
1379
1380         if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version,
1381                                 rbio->pick.crc, NULL, &new_crc,
1382                                 offset, e->k.size,
1383                                 rbio->pick.crc.csum_type)) {
1384                 bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)");
1385                 goto out;
1386         }
1387
1388         if (!bch2_extent_narrow_crcs(e, new_crc))
1389                 goto out;
1390
1391         ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
1392                                    BTREE_INSERT_ATOMIC|
1393                                    BTREE_INSERT_NOFAIL|
1394                                    BTREE_INSERT_NOWAIT,
1395                                    BTREE_INSERT_ENTRY(&iter, &e->k_i));
1396         if (ret == -EINTR)
1397                 goto retry;
1398 out:
1399         bch2_btree_iter_unlock(&iter);
1400 }
1401
1402 static bool should_narrow_crcs(struct bkey_s_c k,
1403                                struct extent_pick_ptr *pick,
1404                                unsigned flags)
1405 {
1406         return !(flags & BCH_READ_IN_RETRY) &&
1407                 bkey_extent_is_data(k.k) &&
1408                 bch2_can_narrow_extent_crcs(bkey_s_c_to_extent(k), pick->crc);
1409 }
1410
1411 /* Inner part that may run in process context */
1412 static void __bch2_read_endio(struct work_struct *work)
1413 {
1414         struct bch_read_bio *rbio =
1415                 container_of(work, struct bch_read_bio, work);
1416         struct bch_fs *c        = rbio->c;
1417         struct bch_dev *ca      = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
1418         struct bio *src         = &rbio->bio;
1419         struct bio *dst         = &bch2_rbio_parent(rbio)->bio;
1420         struct bvec_iter dst_iter = rbio->bvec_iter;
1421         struct bch_extent_crc_unpacked crc = rbio->pick.crc;
1422         struct nonce nonce = extent_nonce(rbio->version, crc);
1423         struct bch_csum csum;
1424
1425         /* Reset iterator for checksumming and copying bounced data: */
1426         if (rbio->bounce) {
1427                 src->bi_iter.bi_size            = crc.compressed_size << 9;
1428                 src->bi_iter.bi_idx             = 0;
1429                 src->bi_iter.bi_bvec_done       = 0;
1430         } else {
1431                 src->bi_iter                    = rbio->bvec_iter;
1432         }
1433
1434         csum = bch2_checksum_bio(c, crc.csum_type, nonce, src);
1435         if (bch2_crc_cmp(csum, rbio->pick.crc.csum))
1436                 goto csum_err;
1437
1438         if (unlikely(rbio->narrow_crcs))
1439                 bch2_rbio_narrow_crcs(rbio);
1440
1441         if (rbio->flags & BCH_READ_NODECODE)
1442                 goto nodecode;
1443
1444         /* Adjust crc to point to subset of data we want: */
1445         crc.offset     += rbio->bvec_iter.bi_sector - rbio->pos.offset;
1446         crc.live_size   = bvec_iter_sectors(rbio->bvec_iter);
1447
1448         if (crc.compression_type != BCH_COMPRESSION_NONE) {
1449                 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1450                 if (bch2_bio_uncompress(c, src, dst, dst_iter, crc))
1451                         goto decompression_err;
1452         } else {
1453                 /* don't need to decrypt the entire bio: */
1454                 nonce = nonce_add(nonce, crc.offset << 9);
1455                 bio_advance(src, crc.offset << 9);
1456
1457                 BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
1458                 src->bi_iter.bi_size = dst_iter.bi_size;
1459
1460                 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1461
1462                 if (rbio->bounce) {
1463                         struct bvec_iter src_iter = src->bi_iter;
1464                         bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1465                 }
1466         }
1467
1468         if (rbio->promote) {
1469                 /*
1470                  * Re encrypt data we decrypted, so it's consistent with
1471                  * rbio->crc:
1472                  */
1473                 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1474                 promote_start(rbio->promote, rbio);
1475                 rbio->promote = NULL;
1476         }
1477 nodecode:
1478         if (likely(!(rbio->flags & BCH_READ_IN_RETRY))) {
1479                 rbio = bch2_rbio_free(rbio);
1480                 bch2_rbio_done(rbio);
1481         }
1482         return;
1483 csum_err:
1484         /*
1485          * Checksum error: if the bio wasn't bounced, we may have been
1486          * reading into buffers owned by userspace (that userspace can
1487          * scribble over) - retry the read, bouncing it this time:
1488          */
1489         if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
1490                 rbio->flags |= BCH_READ_MUST_BOUNCE;
1491                 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
1492                 return;
1493         }
1494
1495         bch2_dev_io_error(ca,
1496                 "data checksum error, inode %llu offset %llu: expected %0llx:%0llx got %0llx:%0llx (type %u)",
1497                 rbio->pos.inode, (u64) rbio->bvec_iter.bi_sector,
1498                 rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
1499                 csum.hi, csum.lo, crc.csum_type);
1500         bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1501         return;
1502 decompression_err:
1503         __bcache_io_error(c, "decompression error, inode %llu offset %llu",
1504                           rbio->pos.inode,
1505                           (u64) rbio->bvec_iter.bi_sector);
1506         bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
1507         return;
1508 }
1509
1510 static void bch2_read_endio(struct bio *bio)
1511 {
1512         struct bch_read_bio *rbio =
1513                 container_of(bio, struct bch_read_bio, bio);
1514         struct bch_fs *c        = rbio->c;
1515         struct bch_dev *ca      = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
1516         struct workqueue_struct *wq = NULL;
1517         enum rbio_context context = RBIO_CONTEXT_NULL;
1518
1519         if (rbio->have_ioref) {
1520                 bch2_latency_acct(ca, rbio->submit_time, READ);
1521                 percpu_ref_put(&ca->io_ref);
1522         }
1523
1524         if (!rbio->split)
1525                 rbio->bio.bi_end_io = rbio->end_io;
1526
1527         if (bch2_dev_io_err_on(bio->bi_status, ca, "data read")) {
1528                 bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
1529                 return;
1530         }
1531
1532         if (rbio->pick.ptr.cached &&
1533             (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
1534              ptr_stale(ca, &rbio->pick.ptr))) {
1535                 atomic_long_inc(&c->read_realloc_races);
1536
1537                 if (rbio->flags & BCH_READ_RETRY_IF_STALE)
1538                         bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
1539                 else
1540                         bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN);
1541                 return;
1542         }
1543
1544         if (rbio->narrow_crcs ||
1545             rbio->pick.crc.compression_type ||
1546             bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
1547                 context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
1548         else if (rbio->pick.crc.csum_type)
1549                 context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq;
1550
1551         bch2_rbio_punt(rbio, __bch2_read_endio, context, wq);
1552 }
1553
1554 int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
1555                        struct bvec_iter iter, struct bkey_s_c k,
1556                        struct bch_devs_mask *avoid, unsigned flags)
1557 {
1558         struct extent_pick_ptr pick;
1559         struct bch_read_bio *rbio = NULL;
1560         struct bch_dev *ca;
1561         struct promote_op *promote = NULL;
1562         bool bounce = false, read_full = false, narrow_crcs = false;
1563         struct bpos pos = bkey_start_pos(k.k);
1564         int pick_ret;
1565
1566         pick_ret = bch2_extent_pick_ptr(c, k, avoid, &pick);
1567
1568         /* hole or reservation - just zero fill: */
1569         if (!pick_ret)
1570                 goto hole;
1571
1572         if (pick_ret < 0)
1573                 goto no_device;
1574
1575         if (pick_ret > 0)
1576                 ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1577
1578         if (flags & BCH_READ_NODECODE) {
1579                 /*
1580                  * can happen if we retry, and the extent we were going to read
1581                  * has been merged in the meantime:
1582                  */
1583                 if (pick.crc.compressed_size > orig->bio.bi_vcnt * PAGE_SECTORS)
1584                         goto hole;
1585
1586                 iter.bi_sector  = pos.offset;
1587                 iter.bi_size    = pick.crc.compressed_size << 9;
1588                 goto noclone;
1589         }
1590
1591         if (!(flags & BCH_READ_LAST_FRAGMENT) ||
1592             bio_flagged(&orig->bio, BIO_CHAIN))
1593                 flags |= BCH_READ_MUST_CLONE;
1594
1595         narrow_crcs = should_narrow_crcs(k, &pick, flags);
1596
1597         if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
1598                 flags |= BCH_READ_MUST_BOUNCE;
1599
1600         EBUG_ON(bkey_start_offset(k.k) > iter.bi_sector ||
1601                 k.k->p.offset < bvec_iter_end_sector(iter));
1602
1603         if (pick.crc.compression_type != BCH_COMPRESSION_NONE ||
1604             (pick.crc.csum_type != BCH_CSUM_NONE &&
1605              (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
1606               (bch2_csum_type_is_encryption(pick.crc.csum_type) &&
1607                (flags & BCH_READ_USER_MAPPED)) ||
1608               (flags & BCH_READ_MUST_BOUNCE)))) {
1609                 read_full = true;
1610                 bounce = true;
1611         }
1612
1613         promote = promote_alloc(c, iter, k, &pick, orig->opts, flags,
1614                                 &rbio, &bounce, &read_full);
1615
1616         if (!read_full) {
1617                 EBUG_ON(pick.crc.compression_type);
1618                 EBUG_ON(pick.crc.csum_type &&
1619                         (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
1620                          bvec_iter_sectors(iter) != pick.crc.live_size ||
1621                          pick.crc.offset ||
1622                          iter.bi_sector != pos.offset));
1623
1624                 pick.ptr.offset += pick.crc.offset +
1625                         (iter.bi_sector - pos.offset);
1626                 pick.crc.compressed_size        = bvec_iter_sectors(iter);
1627                 pick.crc.uncompressed_size      = bvec_iter_sectors(iter);
1628                 pick.crc.offset                 = 0;
1629                 pick.crc.live_size              = bvec_iter_sectors(iter);
1630                 pos.offset                      = iter.bi_sector;
1631         }
1632
1633         if (rbio) {
1634                 /* promote already allocated bounce rbio */
1635         } else if (bounce) {
1636                 unsigned sectors = pick.crc.compressed_size;
1637
1638                 rbio = rbio_init(bio_alloc_bioset(GFP_NOIO,
1639                                                   DIV_ROUND_UP(sectors, PAGE_SECTORS),
1640                                                   &c->bio_read_split),
1641                                  orig->opts);
1642
1643                 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
1644                 rbio->bounce    = true;
1645                 rbio->split     = true;
1646         } else if (flags & BCH_READ_MUST_CLONE) {
1647                 /*
1648                  * Have to clone if there were any splits, due to error
1649                  * reporting issues (if a split errored, and retrying didn't
1650                  * work, when it reports the error to its parent (us) we don't
1651                  * know if the error was from our bio, and we should retry, or
1652                  * from the whole bio, in which case we don't want to retry and
1653                  * lose the error)
1654                  */
1655                 rbio = rbio_init(bio_clone_fast(&orig->bio, GFP_NOIO,
1656                                                 &c->bio_read_split),
1657                                  orig->opts);
1658                 rbio->bio.bi_iter = iter;
1659                 rbio->split     = true;
1660         } else {
1661 noclone:
1662                 rbio = orig;
1663                 rbio->bio.bi_iter = iter;
1664                 BUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
1665         }
1666
1667         BUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size);
1668
1669         rbio->c                 = c;
1670         rbio->submit_time       = local_clock();
1671         if (rbio->split)
1672                 rbio->parent    = orig;
1673         else
1674                 rbio->end_io    = orig->bio.bi_end_io;
1675         rbio->bvec_iter         = iter;
1676         rbio->flags             = flags;
1677         rbio->have_ioref        = pick_ret > 0 && bch2_dev_get_ioref(ca, READ);
1678         rbio->narrow_crcs       = narrow_crcs;
1679         rbio->hole              = 0;
1680         rbio->retry             = 0;
1681         rbio->context           = 0;
1682         rbio->devs_have         = bch2_bkey_devs(k);
1683         rbio->pick              = pick;
1684         rbio->pos               = pos;
1685         rbio->version           = k.k->version;
1686         rbio->promote           = promote;
1687         INIT_WORK(&rbio->work, NULL);
1688
1689         rbio->bio.bi_opf        = orig->bio.bi_opf;
1690         rbio->bio.bi_iter.bi_sector = pick.ptr.offset;
1691         rbio->bio.bi_end_io     = bch2_read_endio;
1692
1693         if (rbio->bounce)
1694                 trace_read_bounce(&rbio->bio);
1695
1696         bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
1697
1698         if (!rbio->have_ioref)
1699                 goto no_device_postclone;
1700
1701         lg_local_lock(&c->usage_lock);
1702         bucket_io_clock_reset(c, ca, PTR_BUCKET_NR(ca, &pick.ptr), READ);
1703         lg_local_unlock(&c->usage_lock);
1704
1705         this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_USER],
1706                      bio_sectors(&rbio->bio));
1707
1708         bio_set_dev(&rbio->bio, ca->disk_sb.bdev);
1709
1710         if (likely(!(flags & BCH_READ_IN_RETRY))) {
1711                 if (!(flags & BCH_READ_LAST_FRAGMENT)) {
1712                         bio_inc_remaining(&orig->bio);
1713                         trace_read_split(&orig->bio);
1714                 }
1715
1716                 submit_bio(&rbio->bio);
1717                 return 0;
1718         } else {
1719                 int ret;
1720
1721                 submit_bio_wait(&rbio->bio);
1722
1723                 rbio->context = RBIO_CONTEXT_UNBOUND;
1724                 bch2_read_endio(&rbio->bio);
1725
1726                 ret = rbio->retry;
1727                 rbio = bch2_rbio_free(rbio);
1728
1729                 if (ret == READ_RETRY_AVOID) {
1730                         __set_bit(pick.ptr.dev, avoid->d);
1731                         ret = READ_RETRY;
1732                 }
1733
1734                 return ret;
1735         }
1736
1737 no_device_postclone:
1738         if (!rbio->split)
1739                 rbio->bio.bi_end_io = rbio->end_io;
1740         bch2_rbio_free(rbio);
1741 no_device:
1742         __bcache_io_error(c, "no device to read from");
1743
1744         if (likely(!(flags & BCH_READ_IN_RETRY))) {
1745                 orig->bio.bi_status = BLK_STS_IOERR;
1746
1747                 if (flags & BCH_READ_LAST_FRAGMENT)
1748                         bch2_rbio_done(orig);
1749                 return 0;
1750         } else {
1751                 return READ_ERR;
1752         }
1753
1754 hole:
1755         /*
1756          * won't normally happen in the BCH_READ_NODECODE
1757          * (bch2_move_extent()) path, but if we retry and the extent we wanted
1758          * to read no longer exists we have to signal that:
1759          */
1760         if (flags & BCH_READ_NODECODE)
1761                 orig->hole = true;
1762
1763         zero_fill_bio_iter(&orig->bio, iter);
1764
1765         if (flags & BCH_READ_LAST_FRAGMENT)
1766                 bch2_rbio_done(orig);
1767         return 0;
1768 }
1769
1770 void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode)
1771 {
1772         struct btree_iter iter;
1773         struct bkey_s_c k;
1774         unsigned flags = BCH_READ_RETRY_IF_STALE|
1775                 BCH_READ_MAY_PROMOTE|
1776                 BCH_READ_USER_MAPPED;
1777         int ret;
1778
1779         BUG_ON(rbio->_state);
1780         BUG_ON(flags & BCH_READ_NODECODE);
1781         BUG_ON(flags & BCH_READ_IN_RETRY);
1782
1783         rbio->c = c;
1784         rbio->start_time = local_clock();
1785
1786         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
1787                            POS(inode, rbio->bio.bi_iter.bi_sector),
1788                            BTREE_ITER_SLOTS, k) {
1789                 BKEY_PADDED(k) tmp;
1790                 unsigned bytes;
1791
1792                 /*
1793                  * Unlock the iterator while the btree node's lock is still in
1794                  * cache, before doing the IO:
1795                  */
1796                 bkey_reassemble(&tmp.k, k);
1797                 k = bkey_i_to_s_c(&tmp.k);
1798                 bch2_btree_iter_unlock(&iter);
1799
1800                 bytes = min_t(unsigned, rbio->bio.bi_iter.bi_size,
1801                               (k.k->p.offset - rbio->bio.bi_iter.bi_sector) << 9);
1802                 swap(rbio->bio.bi_iter.bi_size, bytes);
1803
1804                 if (rbio->bio.bi_iter.bi_size == bytes)
1805                         flags |= BCH_READ_LAST_FRAGMENT;
1806
1807                 bch2_read_extent(c, rbio, k, flags);
1808
1809                 if (flags & BCH_READ_LAST_FRAGMENT)
1810                         return;
1811
1812                 swap(rbio->bio.bi_iter.bi_size, bytes);
1813                 bio_advance(&rbio->bio, bytes);
1814         }
1815
1816         /*
1817          * If we get here, it better have been because there was an error
1818          * reading a btree node
1819          */
1820         ret = bch2_btree_iter_unlock(&iter);
1821         BUG_ON(!ret);
1822         bcache_io_error(c, &rbio->bio, "btree IO error %i", ret);
1823         bch2_rbio_done(rbio);
1824 }
1825
1826 void bch2_fs_io_exit(struct bch_fs *c)
1827 {
1828         if (c->promote_table.tbl)
1829                 rhashtable_destroy(&c->promote_table);
1830         mempool_exit(&c->bio_bounce_pages);
1831         bioset_exit(&c->bio_write);
1832         bioset_exit(&c->bio_read_split);
1833         bioset_exit(&c->bio_read);
1834 }
1835
1836 int bch2_fs_io_init(struct bch_fs *c)
1837 {
1838         if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
1839                         BIOSET_NEED_BVECS) ||
1840             bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
1841                         BIOSET_NEED_BVECS) ||
1842             bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio),
1843                         BIOSET_NEED_BVECS) ||
1844             mempool_init_page_pool(&c->bio_bounce_pages,
1845                                    max_t(unsigned,
1846                                          c->opts.btree_node_size,
1847                                          c->sb.encoded_extent_max) /
1848                                    PAGE_SECTORS, 0) ||
1849             rhashtable_init(&c->promote_table, &bch_promote_params))
1850                 return -ENOMEM;
1851
1852         return 0;
1853 }