]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/io.c
Update bcachefs sources to 3b4024f944
[bcachefs-tools-debian] / libbcachefs / io.c
1 /*
2  * Some low level IO code, and hacks for various block layer limitations
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc.h"
10 #include "bset.h"
11 #include "btree_update.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "compress.h"
15 #include "clock.h"
16 #include "debug.h"
17 #include "error.h"
18 #include "extents.h"
19 #include "io.h"
20 #include "journal.h"
21 #include "keylist.h"
22 #include "move.h"
23 #include "super-io.h"
24
25 #include <linux/blkdev.h>
26 #include <linux/random.h>
27
28 #include <trace/events/bcachefs.h>
29
30 static inline void __bio_inc_remaining(struct bio *bio)
31 {
32         bio_set_flag(bio, BIO_CHAIN);
33         smp_mb__before_atomic();
34         atomic_inc(&bio->__bi_remaining);
35 }
36
37 /* Allocate, free from mempool: */
38
39 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
40 {
41         struct bio_vec *bv;
42         unsigned i;
43
44         bio_for_each_segment_all(bv, bio, i)
45                 if (bv->bv_page != ZERO_PAGE(0))
46                         mempool_free(bv->bv_page, &c->bio_bounce_pages);
47         bio->bi_vcnt = 0;
48 }
49
50 static void bch2_bio_alloc_page_pool(struct bch_fs *c, struct bio *bio,
51                                     bool *using_mempool)
52 {
53         struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt++];
54
55         if (likely(!*using_mempool)) {
56                 bv->bv_page = alloc_page(GFP_NOIO);
57                 if (unlikely(!bv->bv_page)) {
58                         mutex_lock(&c->bio_bounce_pages_lock);
59                         *using_mempool = true;
60                         goto pool_alloc;
61
62                 }
63         } else {
64 pool_alloc:
65                 bv->bv_page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
66         }
67
68         bv->bv_len = PAGE_SIZE;
69         bv->bv_offset = 0;
70 }
71
72 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
73                               size_t bytes)
74 {
75         bool using_mempool = false;
76
77         bio->bi_iter.bi_size = bytes;
78
79         while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE))
80                 bch2_bio_alloc_page_pool(c, bio, &using_mempool);
81
82         if (using_mempool)
83                 mutex_unlock(&c->bio_bounce_pages_lock);
84 }
85
86 /* Bios with headers */
87
88 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
89                                const struct bkey_i *k)
90 {
91         struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k);
92         const struct bch_extent_ptr *ptr;
93         struct bch_write_bio *n;
94         struct bch_dev *ca;
95
96         BUG_ON(c->opts.nochanges);
97
98         wbio->split = false;
99         wbio->c = c;
100
101         extent_for_each_ptr(e, ptr) {
102                 ca = c->devs[ptr->dev];
103
104                 if (ptr + 1 < &extent_entry_last(e)->ptr) {
105                         n = to_wbio(bio_clone_fast(&wbio->bio, GFP_NOIO,
106                                                    &ca->replica_set));
107
108                         n->bio.bi_end_io        = wbio->bio.bi_end_io;
109                         n->bio.bi_private       = wbio->bio.bi_private;
110                         n->c                    = c;
111                         n->orig                 = &wbio->bio;
112                         n->bounce               = false;
113                         n->split                = true;
114                         n->put_bio              = true;
115                         n->bio.bi_opf           = wbio->bio.bi_opf;
116                         __bio_inc_remaining(n->orig);
117                 } else {
118                         n = wbio;
119                 }
120
121                 if (!journal_flushes_device(ca))
122                         n->bio.bi_opf |= REQ_FUA;
123
124                 n->ca                   = ca;
125                 n->submit_time_us       = local_clock_us();
126                 n->bio.bi_iter.bi_sector = ptr->offset;
127
128                 if (likely(percpu_ref_tryget(&ca->io_ref))) {
129                         n->have_io_ref          = true;
130                         n->bio.bi_bdev          = ca->disk_sb.bdev;
131                         generic_make_request(&n->bio);
132                 } else {
133                         n->have_io_ref          = false;
134                         bcache_io_error(c, &n->bio, "device has been removed");
135                         bio_endio(&n->bio);
136                 }
137         }
138 }
139
140 /* IO errors */
141
142 /* Writes */
143
144 static struct workqueue_struct *index_update_wq(struct bch_write_op *op)
145 {
146         return op->alloc_reserve == RESERVE_MOVINGGC
147                 ? op->c->copygc_wq
148                 : op->c->wq;
149 }
150
151 static void __bch2_write(struct closure *);
152
153 static void bch2_write_done(struct closure *cl)
154 {
155         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
156
157         BUG_ON(!(op->flags & BCH_WRITE_DONE));
158
159         if (!op->error && (op->flags & BCH_WRITE_FLUSH))
160                 op->error = bch2_journal_error(&op->c->journal);
161
162         bch2_disk_reservation_put(op->c, &op->res);
163         percpu_ref_put(&op->c->writes);
164         bch2_keylist_free(&op->insert_keys, op->inline_keys);
165         closure_return(cl);
166 }
167
168 static u64 keylist_sectors(struct keylist *keys)
169 {
170         struct bkey_i *k;
171         u64 ret = 0;
172
173         for_each_keylist_key(keys, k)
174                 ret += k->k.size;
175
176         return ret;
177 }
178
179 static int bch2_write_index_default(struct bch_write_op *op)
180 {
181         struct keylist *keys = &op->insert_keys;
182         struct btree_iter iter;
183         int ret;
184
185         bch2_btree_iter_init_intent(&iter, op->c, BTREE_ID_EXTENTS,
186                 bkey_start_pos(&bch2_keylist_front(keys)->k));
187
188         ret = bch2_btree_insert_list_at(&iter, keys, &op->res,
189                                        NULL, op_journal_seq(op),
190                                        BTREE_INSERT_NOFAIL);
191         bch2_btree_iter_unlock(&iter);
192
193         return ret;
194 }
195
196 /**
197  * bch_write_index - after a write, update index to point to new data
198  */
199 static void bch2_write_index(struct closure *cl)
200 {
201         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
202         struct bch_fs *c = op->c;
203         struct keylist *keys = &op->insert_keys;
204         unsigned i;
205
206         op->flags |= BCH_WRITE_LOOPED;
207
208         if (!bch2_keylist_empty(keys)) {
209                 u64 sectors_start = keylist_sectors(keys);
210                 int ret = op->index_update_fn(op);
211
212                 BUG_ON(keylist_sectors(keys) && !ret);
213
214                 op->written += sectors_start - keylist_sectors(keys);
215
216                 if (ret) {
217                         __bcache_io_error(c, "btree IO error %i", ret);
218                         op->error = ret;
219                 }
220         }
221
222         for (i = 0; i < ARRAY_SIZE(op->open_buckets); i++)
223                 if (op->open_buckets[i]) {
224                         bch2_open_bucket_put(c,
225                                              c->open_buckets +
226                                              op->open_buckets[i]);
227                         op->open_buckets[i] = 0;
228                 }
229
230         if (!(op->flags & BCH_WRITE_DONE))
231                 continue_at(cl, __bch2_write, op->io_wq);
232
233         if (!op->error && (op->flags & BCH_WRITE_FLUSH)) {
234                 bch2_journal_flush_seq_async(&c->journal,
235                                              *op_journal_seq(op),
236                                              cl);
237                 continue_at(cl, bch2_write_done, index_update_wq(op));
238         } else {
239                 continue_at_nobarrier(cl, bch2_write_done, NULL);
240         }
241 }
242
243 /**
244  * bch_write_discard - discard range of keys
245  *
246  * Used to implement discard, and to handle when writethrough write hits
247  * a write error on the cache device.
248  */
249 static void bch2_write_discard(struct closure *cl)
250 {
251         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
252         struct bio *bio = &op->bio->bio;
253         struct bpos end = op->pos;
254
255         end.offset += bio_sectors(bio);
256
257         op->error = bch2_discard(op->c, op->pos, end, op->version,
258                                 &op->res, NULL, NULL);
259 }
260
261 /*
262  * Convert extents to be inserted to discards after an error:
263  */
264 static void bch2_write_io_error(struct closure *cl)
265 {
266         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
267
268         if (op->flags & BCH_WRITE_DISCARD_ON_ERROR) {
269                 struct bkey_i *src = bch2_keylist_front(&op->insert_keys);
270                 struct bkey_i *dst = bch2_keylist_front(&op->insert_keys);
271
272                 /*
273                  * Our data write just errored, which means we've got a bunch
274                  * of keys to insert that point to data that wasn't
275                  * successfully written.
276                  *
277                  * We don't have to insert those keys but we still have to
278                  * invalidate that region of the cache - so, if we just strip
279                  * off all the pointers from the keys we'll accomplish just
280                  * that.
281                  */
282
283                 while (src != op->insert_keys.top) {
284                         struct bkey_i *n = bkey_next(src);
285
286                         set_bkey_val_u64s(&src->k, 0);
287                         src->k.type = KEY_TYPE_DISCARD;
288                         bkey_copy(dst, src);
289
290                         dst = bkey_next(dst);
291                         src = n;
292                 }
293
294                 op->insert_keys.top = dst;
295                 op->flags |= BCH_WRITE_DISCARD;
296         } else {
297                 /* TODO: We could try to recover from this. */
298                 while (!bch2_keylist_empty(&op->insert_keys))
299                         bch2_keylist_pop_front(&op->insert_keys);
300
301                 op->error = -EIO;
302                 op->flags |= BCH_WRITE_DONE;
303         }
304
305         bch2_write_index(cl);
306 }
307
308 static void bch2_write_endio(struct bio *bio)
309 {
310         struct closure *cl = bio->bi_private;
311         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
312         struct bch_write_bio *wbio = to_wbio(bio);
313         struct bch_fs *c = wbio->c;
314         struct bio *orig = wbio->orig;
315         struct bch_dev *ca = wbio->ca;
316
317         if (bch2_dev_nonfatal_io_err_on(bio->bi_error, ca,
318                                        "data write"))
319                 set_closure_fn(cl, bch2_write_io_error, index_update_wq(op));
320
321         if (wbio->have_io_ref)
322                 percpu_ref_put(&ca->io_ref);
323
324         if (bio->bi_error && orig)
325                 orig->bi_error = bio->bi_error;
326
327         if (wbio->bounce)
328                 bch2_bio_free_pages_pool(c, bio);
329
330         if (wbio->put_bio)
331                 bio_put(bio);
332
333         if (orig)
334                 bio_endio(orig);
335         else
336                 closure_put(cl);
337 }
338
339 static struct nonce extent_nonce(struct bversion version,
340                                  unsigned nonce,
341                                  unsigned uncompressed_size,
342                                  unsigned compression_type)
343 {
344         return (struct nonce) {{
345                 [0] = cpu_to_le32((nonce                << 12) |
346                                   (uncompressed_size    << 22)),
347                 [1] = cpu_to_le32(version.lo),
348                 [2] = cpu_to_le32(version.lo >> 32),
349                 [3] = cpu_to_le32(version.hi|
350                                   (compression_type << 24))^BCH_NONCE_EXTENT,
351         }};
352 }
353
354 static void init_append_extent(struct bch_write_op *op,
355                                unsigned compressed_size,
356                                unsigned uncompressed_size,
357                                unsigned compression_type,
358                                unsigned nonce,
359                                struct bch_csum csum, unsigned csum_type,
360                                struct open_bucket *ob)
361 {
362         struct bkey_i_extent *e = bkey_extent_init(op->insert_keys.top);
363
364         op->pos.offset += uncompressed_size;
365         e->k.p = op->pos;
366         e->k.size = uncompressed_size;
367         e->k.version = op->version;
368         bkey_extent_set_cached(&e->k, op->flags & BCH_WRITE_CACHED);
369
370         bch2_extent_crc_append(e, compressed_size,
371                               uncompressed_size,
372                               compression_type,
373                               nonce, csum, csum_type);
374
375         bch2_alloc_sectors_append_ptrs(op->c, e, op->nr_replicas,
376                                       ob, compressed_size);
377
378         bkey_extent_set_cached(&e->k, (op->flags & BCH_WRITE_CACHED));
379         bch2_keylist_push(&op->insert_keys);
380 }
381
382 static int bch2_write_extent(struct bch_write_op *op,
383                             struct open_bucket *ob,
384                             struct bio *orig)
385 {
386         struct bch_fs *c = op->c;
387         struct bio *bio;
388         struct bch_write_bio *wbio;
389         unsigned key_to_write_offset = op->insert_keys.top_p -
390                 op->insert_keys.keys_p;
391         struct bkey_i *key_to_write;
392         unsigned csum_type = op->csum_type;
393         unsigned compression_type = op->compression_type;
394         int ret;
395
396         /* don't refetch csum type/compression type */
397         barrier();
398
399         /* Need to decompress data? */
400         if ((op->flags & BCH_WRITE_DATA_COMPRESSED) &&
401             (crc_uncompressed_size(NULL, &op->crc) != op->size ||
402              crc_compressed_size(NULL, &op->crc) > ob->sectors_free)) {
403                 int ret;
404
405                 ret = bch2_bio_uncompress_inplace(c, orig, op->size, op->crc);
406                 if (ret)
407                         return ret;
408
409                 op->flags &= ~BCH_WRITE_DATA_COMPRESSED;
410         }
411
412         if (op->flags & BCH_WRITE_DATA_COMPRESSED) {
413                 init_append_extent(op,
414                                    crc_compressed_size(NULL, &op->crc),
415                                    crc_uncompressed_size(NULL, &op->crc),
416                                    op->crc.compression_type,
417                                    op->crc.nonce,
418                                    op->crc.csum,
419                                    op->crc.csum_type,
420                                    ob);
421
422                 bio                     = orig;
423                 wbio                    = to_wbio(bio);
424                 wbio->orig              = NULL;
425                 wbio->bounce            = false;
426                 wbio->put_bio           = false;
427                 ret                     = 0;
428         } else if (csum_type != BCH_CSUM_NONE ||
429                    compression_type != BCH_COMPRESSION_NONE) {
430                 /* all units here in bytes */
431                 unsigned total_output = 0, output_available =
432                         min(ob->sectors_free << 9, orig->bi_iter.bi_size);
433                 unsigned crc_nonce = bch2_csum_type_is_encryption(csum_type)
434                         ? op->nonce : 0;
435                 struct bch_csum csum;
436                 struct nonce nonce;
437
438                 bio = bio_alloc_bioset(GFP_NOIO,
439                                        DIV_ROUND_UP(output_available, PAGE_SIZE),
440                                        &c->bio_write);
441                 /*
442                  * XXX: can't use mempool for more than
443                  * BCH_COMPRESSED_EXTENT_MAX worth of pages
444                  */
445                 bch2_bio_alloc_pages_pool(c, bio, output_available);
446
447                 /* copy WRITE_SYNC flag */
448                 bio->bi_opf             = orig->bi_opf;
449                 wbio                    = to_wbio(bio);
450                 wbio->orig              = NULL;
451                 wbio->bounce            = true;
452                 wbio->put_bio           = true;
453
454                 do {
455                         unsigned fragment_compression_type = compression_type;
456                         size_t dst_len, src_len;
457
458                         bch2_bio_compress(c, bio, &dst_len,
459                                          orig, &src_len,
460                                          &fragment_compression_type);
461
462                         BUG_ON(!dst_len || dst_len > bio->bi_iter.bi_size);
463                         BUG_ON(!src_len || src_len > orig->bi_iter.bi_size);
464                         BUG_ON(dst_len & (block_bytes(c) - 1));
465                         BUG_ON(src_len & (block_bytes(c) - 1));
466
467                         swap(bio->bi_iter.bi_size, dst_len);
468                         nonce = extent_nonce(op->version,
469                                              crc_nonce,
470                                              src_len >> 9,
471                                              fragment_compression_type),
472
473                         bch2_encrypt_bio(c, csum_type, nonce, bio);
474
475                         csum = bch2_checksum_bio(c, csum_type, nonce, bio);
476                         swap(bio->bi_iter.bi_size, dst_len);
477
478                         init_append_extent(op,
479                                            dst_len >> 9, src_len >> 9,
480                                            fragment_compression_type,
481                                            crc_nonce, csum, csum_type, ob);
482
483                         total_output += dst_len;
484                         bio_advance(bio, dst_len);
485                         bio_advance(orig, src_len);
486                 } while (bio->bi_iter.bi_size &&
487                          orig->bi_iter.bi_size &&
488                          !bch2_keylist_realloc(&op->insert_keys,
489                                               op->inline_keys,
490                                               ARRAY_SIZE(op->inline_keys),
491                                               BKEY_EXTENT_U64s_MAX));
492
493                 BUG_ON(total_output > output_available);
494
495                 memset(&bio->bi_iter, 0, sizeof(bio->bi_iter));
496                 bio->bi_iter.bi_size = total_output;
497
498                 /*
499                  * Free unneeded pages after compressing:
500                  */
501                 while (bio->bi_vcnt * PAGE_SIZE >
502                        round_up(bio->bi_iter.bi_size, PAGE_SIZE))
503                         mempool_free(bio->bi_io_vec[--bio->bi_vcnt].bv_page,
504                                      &c->bio_bounce_pages);
505
506                 ret = orig->bi_iter.bi_size != 0;
507         } else {
508                 bio = bio_next_split(orig, ob->sectors_free, GFP_NOIO,
509                                      &c->bio_write);
510
511                 wbio                    = to_wbio(bio);
512                 wbio->orig              = NULL;
513                 wbio->bounce            = false;
514                 wbio->put_bio           = bio != orig;
515
516                 init_append_extent(op, bio_sectors(bio), bio_sectors(bio),
517                                    compression_type, 0,
518                                    (struct bch_csum) { 0 }, csum_type, ob);
519
520                 ret = bio != orig;
521         }
522
523         bio->bi_end_io  = bch2_write_endio;
524         bio->bi_private = &op->cl;
525         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
526
527         closure_get(bio->bi_private);
528
529         /* might have done a realloc... */
530
531         key_to_write = (void *) (op->insert_keys.keys_p + key_to_write_offset);
532
533         bch2_check_mark_super(c, key_to_write, false);
534
535         bch2_submit_wbio_replicas(to_wbio(bio), c, key_to_write);
536         return ret;
537 }
538
539 static void __bch2_write(struct closure *cl)
540 {
541         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
542         struct bch_fs *c = op->c;
543         struct bio *bio = &op->bio->bio;
544         unsigned open_bucket_nr = 0;
545         struct open_bucket *b;
546         int ret;
547
548         memset(op->open_buckets, 0, sizeof(op->open_buckets));
549
550         if (op->flags & BCH_WRITE_DISCARD) {
551                 op->flags |= BCH_WRITE_DONE;
552                 bch2_write_discard(cl);
553                 bio_put(bio);
554                 continue_at(cl, bch2_write_done, index_update_wq(op));
555         }
556
557         /*
558          * Journal writes are marked REQ_PREFLUSH; if the original write was a
559          * flush, it'll wait on the journal write.
560          */
561         bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
562
563         do {
564                 EBUG_ON(bio->bi_iter.bi_sector != op->pos.offset);
565                 EBUG_ON(!bio_sectors(bio));
566
567                 if (open_bucket_nr == ARRAY_SIZE(op->open_buckets))
568                         continue_at(cl, bch2_write_index, index_update_wq(op));
569
570                 /* for the device pointers and 1 for the chksum */
571                 if (bch2_keylist_realloc(&op->insert_keys,
572                                         op->inline_keys,
573                                         ARRAY_SIZE(op->inline_keys),
574                                         BKEY_EXTENT_U64s_MAX))
575                         continue_at(cl, bch2_write_index, index_update_wq(op));
576
577                 b = bch2_alloc_sectors_start(c, op->wp,
578                         op->nr_replicas,
579                         c->opts.data_replicas_required,
580                         op->alloc_reserve,
581                         (op->flags & BCH_WRITE_ALLOC_NOWAIT) ? NULL : cl);
582                 EBUG_ON(!b);
583
584                 if (unlikely(IS_ERR(b))) {
585                         if (unlikely(PTR_ERR(b) != -EAGAIN)) {
586                                 ret = PTR_ERR(b);
587                                 goto err;
588                         }
589
590                         /*
591                          * If we already have some keys, must insert them first
592                          * before allocating another open bucket. We only hit
593                          * this case if open_bucket_nr > 1.
594                          */
595                         if (!bch2_keylist_empty(&op->insert_keys))
596                                 continue_at(cl, bch2_write_index,
597                                             index_update_wq(op));
598
599                         /*
600                          * If we've looped, we're running out of a workqueue -
601                          * not the bch2_write() caller's context - and we don't
602                          * want to block the workqueue:
603                          */
604                         if (op->flags & BCH_WRITE_LOOPED)
605                                 continue_at(cl, __bch2_write, op->io_wq);
606
607                         /*
608                          * Otherwise, we do want to block the caller on alloc
609                          * failure instead of letting it queue up more and more
610                          * writes:
611                          * XXX: this technically needs a try_to_freeze() -
612                          * except that that's not safe because caller may have
613                          * issued other IO... hmm..
614                          */
615                         closure_sync(cl);
616                         continue;
617                 }
618
619                 BUG_ON(b - c->open_buckets == 0 ||
620                        b - c->open_buckets > U8_MAX);
621                 op->open_buckets[open_bucket_nr++] = b - c->open_buckets;
622
623                 ret = bch2_write_extent(op, b, bio);
624
625                 bch2_alloc_sectors_done(c, op->wp, b);
626
627                 if (ret < 0)
628                         goto err;
629         } while (ret);
630
631         op->flags |= BCH_WRITE_DONE;
632         continue_at(cl, bch2_write_index, index_update_wq(op));
633 err:
634         if (op->flags & BCH_WRITE_DISCARD_ON_ERROR) {
635                 /*
636                  * If we were writing cached data, not doing the write is fine
637                  * so long as we discard whatever would have been overwritten -
638                  * then it's equivalent to doing the write and immediately
639                  * reclaiming it.
640                  */
641
642                 bch2_write_discard(cl);
643         } else {
644                 /*
645                  * Right now we can only error here if we went RO - the
646                  * allocation failed, but we already checked for -ENOSPC when we
647                  * got our reservation.
648                  *
649                  * XXX capacity might have changed, but we don't check for that
650                  * yet:
651                  */
652                 op->error = ret;
653         }
654
655         op->flags |= BCH_WRITE_DONE;
656
657         /*
658          * No reason not to insert keys for whatever data was successfully
659          * written (especially for a cmpxchg operation that's moving data
660          * around)
661          */
662         continue_at(cl, !bch2_keylist_empty(&op->insert_keys)
663                     ? bch2_write_index
664                     : bch2_write_done, index_update_wq(op));
665 }
666
667 void bch2_wake_delayed_writes(unsigned long data)
668 {
669         struct bch_fs *c = (void *) data;
670         struct bch_write_op *op;
671         unsigned long flags;
672
673         spin_lock_irqsave(&c->foreground_write_pd_lock, flags);
674
675         while ((op = c->write_wait_head)) {
676                 if (time_after(op->expires, jiffies)) {
677                         mod_timer(&c->foreground_write_wakeup, op->expires);
678                         break;
679                 }
680
681                 c->write_wait_head = op->next;
682                 if (!c->write_wait_head)
683                         c->write_wait_tail = NULL;
684
685                 closure_put(&op->cl);
686         }
687
688         spin_unlock_irqrestore(&c->foreground_write_pd_lock, flags);
689 }
690
691 /**
692  * bch_write - handle a write to a cache device or flash only volume
693  *
694  * This is the starting point for any data to end up in a cache device; it could
695  * be from a normal write, or a writeback write, or a write to a flash only
696  * volume - it's also used by the moving garbage collector to compact data in
697  * mostly empty buckets.
698  *
699  * It first writes the data to the cache, creating a list of keys to be inserted
700  * (if the data won't fit in a single open bucket, there will be multiple keys);
701  * after the data is written it calls bch_journal, and after the keys have been
702  * added to the next journal write they're inserted into the btree.
703  *
704  * It inserts the data in op->bio; bi_sector is used for the key offset, and
705  * op->inode is used for the key inode.
706  *
707  * If op->discard is true, instead of inserting the data it invalidates the
708  * region of the cache represented by op->bio and op->inode.
709  */
710 void bch2_write(struct closure *cl)
711 {
712         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
713         struct bio *bio = &op->bio->bio;
714         struct bch_fs *c = op->c;
715         u64 inode = op->pos.inode;
716
717         if (c->opts.nochanges ||
718             !percpu_ref_tryget(&c->writes)) {
719                 __bcache_io_error(c, "read only");
720                 op->error = -EROFS;
721                 bch2_disk_reservation_put(c, &op->res);
722                 closure_return(cl);
723         }
724
725         if (bversion_zero(op->version) &&
726             bch2_csum_type_is_encryption(op->csum_type))
727                 op->version.lo =
728                         atomic64_inc_return(&c->key_version) + 1;
729
730         if (!(op->flags & BCH_WRITE_DISCARD))
731                 bch2_increment_clock(c, bio_sectors(bio), WRITE);
732
733         /* Don't call bch2_next_delay() if rate is >= 1 GB/sec */
734
735         if (c->foreground_write_ratelimit_enabled &&
736             c->foreground_write_pd.rate.rate < (1 << 30) &&
737             !(op->flags & BCH_WRITE_DISCARD) && op->wp->throttle) {
738                 unsigned long flags;
739                 u64 delay;
740
741                 spin_lock_irqsave(&c->foreground_write_pd_lock, flags);
742                 bch2_ratelimit_increment(&c->foreground_write_pd.rate,
743                                         bio->bi_iter.bi_size);
744
745                 delay = bch2_ratelimit_delay(&c->foreground_write_pd.rate);
746
747                 if (delay >= HZ / 100) {
748                         trace_write_throttle(c, inode, bio, delay);
749
750                         closure_get(&op->cl); /* list takes a ref */
751
752                         op->expires = jiffies + delay;
753                         op->next = NULL;
754
755                         if (c->write_wait_tail)
756                                 c->write_wait_tail->next = op;
757                         else
758                                 c->write_wait_head = op;
759                         c->write_wait_tail = op;
760
761                         if (!timer_pending(&c->foreground_write_wakeup))
762                                 mod_timer(&c->foreground_write_wakeup,
763                                           op->expires);
764
765                         spin_unlock_irqrestore(&c->foreground_write_pd_lock,
766                                                flags);
767                         continue_at(cl, __bch2_write, index_update_wq(op));
768                 }
769
770                 spin_unlock_irqrestore(&c->foreground_write_pd_lock, flags);
771         }
772
773         continue_at_nobarrier(cl, __bch2_write, NULL);
774 }
775
776 void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
777                        struct bch_write_bio *bio, struct disk_reservation res,
778                        struct write_point *wp, struct bpos pos,
779                        u64 *journal_seq, unsigned flags)
780 {
781         EBUG_ON(res.sectors && !res.nr_replicas);
782
783         op->c           = c;
784         op->io_wq       = index_update_wq(op);
785         op->bio         = bio;
786         op->written     = 0;
787         op->error       = 0;
788         op->flags       = flags;
789         op->csum_type   = bch2_data_checksum_type(c);
790         op->compression_type = c->opts.compression;
791         op->nr_replicas = res.nr_replicas;
792         op->alloc_reserve = RESERVE_NONE;
793         op->nonce       = 0;
794         op->pos         = pos;
795         op->version     = ZERO_VERSION;
796         op->res         = res;
797         op->wp          = wp;
798
799         if (journal_seq) {
800                 op->journal_seq_p = journal_seq;
801                 op->flags |= BCH_WRITE_JOURNAL_SEQ_PTR;
802         } else {
803                 op->journal_seq = 0;
804         }
805
806         op->index_update_fn = bch2_write_index_default;
807
808         bch2_keylist_init(&op->insert_keys,
809                           op->inline_keys,
810                           ARRAY_SIZE(op->inline_keys));
811
812         if (version_stress_test(c))
813                 get_random_bytes(&op->version, sizeof(op->version));
814 }
815
816 /* Discard */
817
818 /* bch_discard - discard a range of keys from start_key to end_key.
819  * @c           filesystem
820  * @start_key   pointer to start location
821  *              NOTE: discard starts at bkey_start_offset(start_key)
822  * @end_key     pointer to end location
823  *              NOTE: discard ends at KEY_OFFSET(end_key)
824  * @version     version of discard (0ULL if none)
825  *
826  * Returns:
827  *       0 on success
828  *      <0 on error
829  *
830  * XXX: this needs to be refactored with inode_truncate, or more
831  *      appropriately inode_truncate should call this
832  */
833 int bch2_discard(struct bch_fs *c, struct bpos start,
834                  struct bpos end, struct bversion version,
835                  struct disk_reservation *disk_res,
836                  struct extent_insert_hook *hook,
837                  u64 *journal_seq)
838 {
839         return bch2_btree_delete_range(c, BTREE_ID_EXTENTS, start, end, version,
840                                       disk_res, hook, journal_seq);
841 }
842
843 /* Cache promotion on read */
844
845 struct cache_promote_op {
846         struct closure          cl;
847         struct migrate_write    write;
848         struct bio_vec          bi_inline_vecs[0]; /* must be last */
849 };
850
851 /* Read */
852
853 static int bio_checksum_uncompress(struct bch_fs *c,
854                                    struct bch_read_bio *rbio)
855 {
856         struct bio *src = &rbio->bio;
857         struct bio *dst = &bch2_rbio_parent(rbio)->bio;
858         struct bvec_iter dst_iter = rbio->parent_iter;
859         struct nonce nonce = extent_nonce(rbio->version,
860                                 rbio->crc.nonce,
861                                 crc_uncompressed_size(NULL, &rbio->crc),
862                                 rbio->crc.compression_type);
863         struct bch_csum csum;
864         int ret = 0;
865
866         /*
867          * reset iterator for checksumming and copying bounced data: here we've
868          * set rbio->compressed_size to the amount of data we actually read,
869          * which was not necessarily the full extent if we were only bouncing
870          * in order to promote
871          */
872         if (rbio->bounce) {
873                 src->bi_iter.bi_size    = crc_compressed_size(NULL, &rbio->crc) << 9;
874                 src->bi_iter.bi_idx     = 0;
875                 src->bi_iter.bi_bvec_done = 0;
876         } else {
877                 src->bi_iter = rbio->parent_iter;
878         }
879
880         csum = bch2_checksum_bio(c, rbio->crc.csum_type, nonce, src);
881         if (bch2_dev_nonfatal_io_err_on(bch2_crc_cmp(rbio->crc.csum, csum),
882                                         rbio->ca,
883                         "data checksum error, inode %llu offset %llu: expected %0llx%0llx got %0llx%0llx (type %u)",
884                         rbio->inode, (u64) rbio->parent_iter.bi_sector << 9,
885                         rbio->crc.csum.hi, rbio->crc.csum.lo, csum.hi, csum.lo,
886                         rbio->crc.csum_type))
887                 ret = -EIO;
888
889         /*
890          * If there was a checksum error, still copy the data back - unless it
891          * was compressed, we don't want to decompress bad data:
892          */
893         if (rbio->crc.compression_type != BCH_COMPRESSION_NONE) {
894                 if (!ret) {
895                         bch2_encrypt_bio(c, rbio->crc.csum_type, nonce, src);
896                         ret = bch2_bio_uncompress(c, src, dst,
897                                                  dst_iter, rbio->crc);
898                         if (ret)
899                                 __bcache_io_error(c, "decompression error");
900                 }
901         } else if (rbio->bounce) {
902                 bio_advance(src, rbio->crc.offset << 9);
903
904                 /* don't need to decrypt the entire bio: */
905                 BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
906                 src->bi_iter.bi_size = dst_iter.bi_size;
907
908                 nonce = nonce_add(nonce, rbio->crc.offset << 9);
909
910                 bch2_encrypt_bio(c, rbio->crc.csum_type,
911                                 nonce, src);
912
913                 bio_copy_data_iter(dst, dst_iter,
914                                    src, src->bi_iter);
915         } else {
916                 bch2_encrypt_bio(c, rbio->crc.csum_type, nonce, src);
917         }
918
919         return ret;
920 }
921
922 static void bch2_rbio_free(struct bch_read_bio *rbio)
923 {
924         struct bch_fs *c = rbio->c;
925         struct bio *bio = &rbio->bio;
926
927         BUG_ON(rbio->ca);
928         BUG_ON(!rbio->split);
929
930         if (rbio->promote)
931                 kfree(rbio->promote);
932         if (rbio->bounce)
933                 bch2_bio_free_pages_pool(c, bio);
934
935         bio_put(bio);
936 }
937
938 static void bch2_rbio_done(struct bch_read_bio *rbio)
939 {
940         struct bio *orig = &bch2_rbio_parent(rbio)->bio;
941
942         percpu_ref_put(&rbio->ca->io_ref);
943         rbio->ca = NULL;
944
945         if (rbio->split) {
946                 if (rbio->bio.bi_error)
947                         orig->bi_error = rbio->bio.bi_error;
948
949                 bio_endio(orig);
950                 bch2_rbio_free(rbio);
951         } else {
952                 if (rbio->promote)
953                         kfree(rbio->promote);
954
955                 orig->bi_end_io = rbio->orig_bi_end_io;
956                 bio_endio_nodec(orig);
957         }
958 }
959
960 static void bch2_rbio_error(struct bch_read_bio *rbio, int error)
961 {
962         bch2_rbio_parent(rbio)->bio.bi_error = error;
963         bch2_rbio_done(rbio);
964 }
965
966 static void bch2_rbio_retry(struct bch_fs *c, struct bch_read_bio *rbio)
967 {
968         unsigned long flags;
969
970         percpu_ref_put(&rbio->ca->io_ref);
971         rbio->ca = NULL;
972
973         spin_lock_irqsave(&c->read_retry_lock, flags);
974         bio_list_add(&c->read_retry_list, &rbio->bio);
975         spin_unlock_irqrestore(&c->read_retry_lock, flags);
976         queue_work(c->wq, &c->read_retry_work);
977 }
978
979 static void cache_promote_done(struct closure *cl)
980 {
981         struct cache_promote_op *op =
982                 container_of(cl, struct cache_promote_op, cl);
983
984         bch2_bio_free_pages_pool(op->write.op.c, &op->write.wbio.bio);
985         kfree(op);
986 }
987
988 /* Inner part that may run in process context */
989 static void __bch2_read_endio(struct work_struct *work)
990 {
991         struct bch_read_bio *rbio =
992                 container_of(work, struct bch_read_bio, work);
993         struct bch_fs *c = rbio->c;
994         int ret;
995
996         ret = bio_checksum_uncompress(c, rbio);
997         if (ret) {
998                 /*
999                  * Checksum error: if the bio wasn't bounced, we may have been
1000                  * reading into buffers owned by userspace (that userspace can
1001                  * scribble over) - retry the read, bouncing it this time:
1002                  */
1003                 if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
1004                         rbio->flags |= BCH_READ_FORCE_BOUNCE;
1005                         bch2_rbio_retry(c, rbio);
1006                 } else {
1007                         bch2_rbio_error(rbio, -EIO);
1008                 }
1009                 return;
1010         }
1011
1012         if (rbio->promote) {
1013                 struct cache_promote_op *promote = rbio->promote;
1014                 struct closure *cl = &promote->cl;
1015
1016                 BUG_ON(!rbio->split || !rbio->bounce);
1017
1018                 trace_promote(&rbio->bio);
1019
1020                 /* we now own pages: */
1021                 swap(promote->write.wbio.bio.bi_vcnt, rbio->bio.bi_vcnt);
1022                 rbio->promote = NULL;
1023
1024                 bch2_rbio_done(rbio);
1025
1026                 closure_init(cl, &c->cl);
1027                 closure_call(&promote->write.op.cl, bch2_write, c->wq, cl);
1028                 closure_return_with_destructor(cl, cache_promote_done);
1029         } else {
1030                 bch2_rbio_done(rbio);
1031         }
1032 }
1033
1034 static void bch2_read_endio(struct bio *bio)
1035 {
1036         struct bch_read_bio *rbio =
1037                 container_of(bio, struct bch_read_bio, bio);
1038         struct bch_fs *c = rbio->c;
1039
1040         if (bch2_dev_nonfatal_io_err_on(bio->bi_error, rbio->ca, "data read")) {
1041                 /* XXX: retry IO errors when we have another replica */
1042                 bch2_rbio_error(rbio, bio->bi_error);
1043                 return;
1044         }
1045
1046         if (rbio->ptr.cached &&
1047             (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
1048              ptr_stale(rbio->ca, &rbio->ptr))) {
1049                 atomic_long_inc(&c->read_realloc_races);
1050
1051                 if (rbio->flags & BCH_READ_RETRY_IF_STALE)
1052                         bch2_rbio_retry(c, rbio);
1053                 else
1054                         bch2_rbio_error(rbio, -EINTR);
1055                 return;
1056         }
1057
1058         if (rbio->crc.compression_type ||
1059             bch2_csum_type_is_encryption(rbio->crc.csum_type))
1060                 queue_work(system_unbound_wq, &rbio->work);
1061         else if (rbio->crc.csum_type)
1062                 queue_work(system_highpri_wq, &rbio->work);
1063         else
1064                 __bch2_read_endio(&rbio->work);
1065 }
1066
1067 static bool should_promote(struct bch_fs *c,
1068                            struct extent_pick_ptr *pick, unsigned flags)
1069 {
1070         if (!(flags & BCH_READ_PROMOTE))
1071                 return false;
1072
1073         if (percpu_ref_is_dying(&c->writes))
1074                 return false;
1075
1076         return c->fastest_tier &&
1077                 c->fastest_tier < c->tiers + pick->ca->mi.tier;
1078 }
1079
1080 void bch2_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig,
1081                           struct bvec_iter iter, struct bkey_s_c k,
1082                           struct extent_pick_ptr *pick, unsigned flags)
1083 {
1084         struct bch_read_bio *rbio;
1085         struct cache_promote_op *promote_op = NULL;
1086         unsigned skip = iter.bi_sector - bkey_start_offset(k.k);
1087         bool bounce = false, split, read_full = false;
1088
1089         bch2_increment_clock(c, bio_sectors(&orig->bio), READ);
1090
1091         EBUG_ON(bkey_start_offset(k.k) > iter.bi_sector ||
1092                 k.k->p.offset < bvec_iter_end_sector(iter));
1093
1094         /* only promote if we're not reading from the fastest tier: */
1095
1096         /*
1097          * XXX: multiple promotes can race with each other, wastefully. Keep a
1098          * list of outstanding promotes?
1099          */
1100         if (should_promote(c, pick, flags)) {
1101                 /*
1102                  * biovec needs to be big enough to hold decompressed data, if
1103                  * the bch2_write_extent() has to decompress/recompress it:
1104                  */
1105                 unsigned sectors =
1106                         max_t(unsigned, k.k->size,
1107                               crc_uncompressed_size(NULL, &pick->crc));
1108                 unsigned pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
1109
1110                 promote_op = kmalloc(sizeof(*promote_op) +
1111                                 sizeof(struct bio_vec) * pages, GFP_NOIO);
1112                 if (promote_op) {
1113                         struct bio *promote_bio = &promote_op->write.wbio.bio;
1114
1115                         bio_init(promote_bio);
1116                         promote_bio->bi_max_vecs = pages;
1117                         promote_bio->bi_io_vec  = promote_bio->bi_inline_vecs;
1118                         bounce = true;
1119                         /* could also set read_full */
1120                 }
1121         }
1122
1123         /*
1124          * note: if compression_type and crc_type both == none, then
1125          * compressed/uncompressed size is zero
1126          */
1127         if (pick->crc.compression_type != BCH_COMPRESSION_NONE ||
1128             (pick->crc.csum_type != BCH_CSUM_NONE &&
1129              (bvec_iter_sectors(iter) != crc_uncompressed_size(NULL, &pick->crc) ||
1130               (bch2_csum_type_is_encryption(pick->crc.csum_type) &&
1131                (flags & BCH_READ_USER_MAPPED)) ||
1132               (flags & BCH_READ_FORCE_BOUNCE)))) {
1133                 read_full = true;
1134                 bounce = true;
1135         }
1136
1137         if (bounce) {
1138                 unsigned sectors = read_full
1139                         ? (crc_compressed_size(NULL, &pick->crc) ?: k.k->size)
1140                         : bvec_iter_sectors(iter);
1141
1142                 rbio = container_of(bio_alloc_bioset(GFP_NOIO,
1143                                         DIV_ROUND_UP(sectors, PAGE_SECTORS),
1144                                         &c->bio_read_split),
1145                                     struct bch_read_bio, bio);
1146
1147                 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
1148                 split = true;
1149         } else if (!(flags & BCH_READ_MAY_REUSE_BIO) ||
1150                    !(flags & BCH_READ_IS_LAST)) {
1151                 /*
1152                  * Have to clone if there were any splits, due to error
1153                  * reporting issues (if a split errored, and retrying didn't
1154                  * work, when it reports the error to its parent (us) we don't
1155                  * know if the error was from our bio, and we should retry, or
1156                  * from the whole bio, in which case we don't want to retry and
1157                  * lose the error)
1158                  */
1159                 rbio = container_of(bio_clone_fast(&orig->bio,
1160                                         GFP_NOIO, &c->bio_read_split),
1161                                     struct bch_read_bio, bio);
1162                 rbio->bio.bi_iter = iter;
1163                 split = true;
1164         } else {
1165                 rbio = orig;
1166                 rbio->bio.bi_iter = iter;
1167                 split = false;
1168                 BUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
1169         }
1170
1171         if (!(flags & BCH_READ_IS_LAST))
1172                 __bio_inc_remaining(&orig->bio);
1173
1174         if (split)
1175                 rbio->parent    = orig;
1176         else
1177                 rbio->orig_bi_end_io = orig->bio.bi_end_io;
1178         rbio->parent_iter       = iter;
1179
1180         rbio->flags             = flags;
1181         rbio->bounce            = bounce;
1182         rbio->split             = split;
1183         rbio->c                 = c;
1184         rbio->ca                = pick->ca;
1185         rbio->ptr               = pick->ptr;
1186         rbio->crc               = pick->crc;
1187         /*
1188          * crc.compressed_size will be 0 if there wasn't any checksum
1189          * information, also we need to stash the original size of the bio if we
1190          * bounced (which isn't necessarily the original key size, if we bounced
1191          * only for promoting)
1192          */
1193         rbio->crc._compressed_size = bio_sectors(&rbio->bio) - 1;
1194         rbio->version           = k.k->version;
1195         rbio->promote           = promote_op;
1196         rbio->inode             = k.k->p.inode;
1197         INIT_WORK(&rbio->work, __bch2_read_endio);
1198
1199         rbio->bio.bi_bdev       = pick->ca->disk_sb.bdev;
1200         rbio->bio.bi_opf        = orig->bio.bi_opf;
1201         rbio->bio.bi_iter.bi_sector = pick->ptr.offset;
1202         rbio->bio.bi_end_io     = bch2_read_endio;
1203
1204         if (promote_op) {
1205                 struct bio *promote_bio = &promote_op->write.wbio.bio;
1206
1207                 promote_bio->bi_iter = rbio->bio.bi_iter;
1208                 memcpy(promote_bio->bi_io_vec, rbio->bio.bi_io_vec,
1209                        sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
1210
1211                 bch2_migrate_write_init(c, &promote_op->write,
1212                                        &c->promote_write_point,
1213                                        k, NULL,
1214                                        BCH_WRITE_ALLOC_NOWAIT|
1215                                        BCH_WRITE_CACHED);
1216                 promote_op->write.promote = true;
1217
1218                 if (rbio->crc.compression_type) {
1219                         promote_op->write.op.flags |= BCH_WRITE_DATA_COMPRESSED;
1220                         promote_op->write.op.crc = rbio->crc;
1221                         promote_op->write.op.size = k.k->size;
1222                 } else if (read_full) {
1223                         /*
1224                          * Adjust bio to correspond to _live_ portion of @k -
1225                          * which might be less than what we're actually reading:
1226                          */
1227                         bio_advance(promote_bio, rbio->crc.offset << 9);
1228                         BUG_ON(bio_sectors(promote_bio) < k.k->size);
1229                         promote_bio->bi_iter.bi_size = k.k->size << 9;
1230                 } else {
1231                         /*
1232                          * Set insert pos to correspond to what we're actually
1233                          * reading:
1234                          */
1235                         promote_op->write.op.pos.offset = iter.bi_sector;
1236                 }
1237
1238                 promote_bio->bi_iter.bi_sector =
1239                         promote_op->write.op.pos.offset;
1240         }
1241
1242         /* _after_ promete stuff has looked at rbio->crc.offset */
1243         if (read_full)
1244                 rbio->crc.offset += skip;
1245         else
1246                 rbio->bio.bi_iter.bi_sector += skip;
1247
1248         rbio->submit_time_us = local_clock_us();
1249
1250         if (bounce)
1251                 trace_read_bounce(&rbio->bio);
1252
1253         if (!(flags & BCH_READ_IS_LAST))
1254                 trace_read_split(&rbio->bio);
1255
1256         generic_make_request(&rbio->bio);
1257 }
1258
1259 static void bch2_read_iter(struct bch_fs *c, struct bch_read_bio *rbio,
1260                           struct bvec_iter bvec_iter, u64 inode,
1261                           unsigned flags)
1262 {
1263         struct bio *bio = &rbio->bio;
1264         struct btree_iter iter;
1265         struct bkey_s_c k;
1266         int ret;
1267
1268         for_each_btree_key_with_holes(&iter, c, BTREE_ID_EXTENTS,
1269                                       POS(inode, bvec_iter.bi_sector), k) {
1270                 BKEY_PADDED(k) tmp;
1271                 struct extent_pick_ptr pick;
1272                 unsigned bytes, sectors;
1273                 bool is_last;
1274
1275                 /*
1276                  * Unlock the iterator while the btree node's lock is still in
1277                  * cache, before doing the IO:
1278                  */
1279                 bkey_reassemble(&tmp.k, k);
1280                 k = bkey_i_to_s_c(&tmp.k);
1281                 bch2_btree_iter_unlock(&iter);
1282
1283                 bch2_extent_pick_ptr(c, k, &pick);
1284                 if (IS_ERR(pick.ca)) {
1285                         bcache_io_error(c, bio, "no device to read from");
1286                         bio_endio(bio);
1287                         return;
1288                 }
1289
1290                 sectors = min_t(u64, k.k->p.offset,
1291                                 bvec_iter_end_sector(bvec_iter)) -
1292                         bvec_iter.bi_sector;
1293                 bytes = sectors << 9;
1294                 is_last = bytes == bvec_iter.bi_size;
1295                 swap(bvec_iter.bi_size, bytes);
1296
1297                 if (is_last)
1298                         flags |= BCH_READ_IS_LAST;
1299
1300                 if (pick.ca) {
1301                         PTR_BUCKET(pick.ca, &pick.ptr)->read_prio =
1302                                 c->prio_clock[READ].hand;
1303
1304                         bch2_read_extent_iter(c, rbio, bvec_iter,
1305                                              k, &pick, flags);
1306
1307                         flags &= ~BCH_READ_MAY_REUSE_BIO;
1308                 } else {
1309                         zero_fill_bio_iter(bio, bvec_iter);
1310
1311                         if (is_last)
1312                                 bio_endio(bio);
1313                 }
1314
1315                 if (is_last)
1316                         return;
1317
1318                 swap(bvec_iter.bi_size, bytes);
1319                 bio_advance_iter(bio, &bvec_iter, bytes);
1320         }
1321
1322         /*
1323          * If we get here, it better have been because there was an error
1324          * reading a btree node
1325          */
1326         ret = bch2_btree_iter_unlock(&iter);
1327         BUG_ON(!ret);
1328         bcache_io_error(c, bio, "btree IO error %i", ret);
1329         bio_endio(bio);
1330 }
1331
1332 void bch2_read(struct bch_fs *c, struct bch_read_bio *bio, u64 inode)
1333 {
1334         bch2_read_iter(c, bio, bio->bio.bi_iter, inode,
1335                       BCH_READ_RETRY_IF_STALE|
1336                       BCH_READ_PROMOTE|
1337                       BCH_READ_MAY_REUSE_BIO|
1338                       BCH_READ_USER_MAPPED);
1339 }
1340
1341 /**
1342  * bch_read_retry - re-submit a bio originally from bch2_read()
1343  */
1344 static void bch2_read_retry(struct bch_fs *c, struct bch_read_bio *rbio)
1345 {
1346         struct bch_read_bio *parent = bch2_rbio_parent(rbio);
1347         struct bvec_iter iter = rbio->parent_iter;
1348         unsigned flags = rbio->flags;
1349         u64 inode = rbio->inode;
1350
1351         trace_read_retry(&rbio->bio);
1352
1353         if (rbio->split)
1354                 bch2_rbio_free(rbio);
1355         else
1356                 rbio->bio.bi_end_io = rbio->orig_bi_end_io;
1357
1358         bch2_read_iter(c, parent, iter, inode, flags);
1359 }
1360
1361 void bch2_read_retry_work(struct work_struct *work)
1362 {
1363         struct bch_fs *c = container_of(work, struct bch_fs,
1364                                            read_retry_work);
1365         struct bch_read_bio *rbio;
1366         struct bio *bio;
1367         unsigned long flags;
1368
1369         while (1) {
1370                 spin_lock_irqsave(&c->read_retry_lock, flags);
1371                 bio = bio_list_pop(&c->read_retry_list);
1372                 spin_unlock_irqrestore(&c->read_retry_lock, flags);
1373
1374                 if (!bio)
1375                         break;
1376
1377                 rbio = container_of(bio, struct bch_read_bio, bio);
1378                 bch2_read_retry(c, rbio);
1379         }
1380 }