]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/io.c
Update bcachefs sources to 9ceb982d77 bcachefs: Store bucket gens in a btree
[bcachefs-tools-debian] / libbcachefs / io.c
1 /*
2  * Some low level IO code, and hacks for various block layer limitations
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc.h"
10 #include "bset.h"
11 #include "btree_update.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "compress.h"
15 #include "clock.h"
16 #include "debug.h"
17 #include "error.h"
18 #include "extents.h"
19 #include "io.h"
20 #include "journal.h"
21 #include "keylist.h"
22 #include "move.h"
23 #include "super-io.h"
24
25 #include <linux/blkdev.h>
26 #include <linux/random.h>
27
28 #include <trace/events/bcachefs.h>
29
30 static inline void __bio_inc_remaining(struct bio *bio)
31 {
32         bio_set_flag(bio, BIO_CHAIN);
33         smp_mb__before_atomic();
34         atomic_inc(&bio->__bi_remaining);
35 }
36
37 /* Allocate, free from mempool: */
38
39 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
40 {
41         struct bio_vec *bv;
42         unsigned i;
43
44         bio_for_each_segment_all(bv, bio, i)
45                 if (bv->bv_page != ZERO_PAGE(0))
46                         mempool_free(bv->bv_page, &c->bio_bounce_pages);
47         bio->bi_vcnt = 0;
48 }
49
50 static void bch2_bio_alloc_page_pool(struct bch_fs *c, struct bio *bio,
51                                     bool *using_mempool)
52 {
53         struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt++];
54
55         if (likely(!*using_mempool)) {
56                 bv->bv_page = alloc_page(GFP_NOIO);
57                 if (unlikely(!bv->bv_page)) {
58                         mutex_lock(&c->bio_bounce_pages_lock);
59                         *using_mempool = true;
60                         goto pool_alloc;
61
62                 }
63         } else {
64 pool_alloc:
65                 bv->bv_page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
66         }
67
68         bv->bv_len = PAGE_SIZE;
69         bv->bv_offset = 0;
70 }
71
72 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
73                               size_t bytes)
74 {
75         bool using_mempool = false;
76
77         bio->bi_iter.bi_size = bytes;
78
79         while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE))
80                 bch2_bio_alloc_page_pool(c, bio, &using_mempool);
81
82         if (using_mempool)
83                 mutex_unlock(&c->bio_bounce_pages_lock);
84 }
85
86 /* Bios with headers */
87
88 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
89                                const struct bkey_i *k)
90 {
91         struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k);
92         const struct bch_extent_ptr *ptr;
93         struct bch_write_bio *n;
94         struct bch_dev *ca;
95
96         BUG_ON(c->opts.nochanges);
97
98         wbio->split = false;
99         wbio->c = c;
100
101         extent_for_each_ptr(e, ptr) {
102                 ca = c->devs[ptr->dev];
103
104                 if (ptr + 1 < &extent_entry_last(e)->ptr) {
105                         n = to_wbio(bio_clone_fast(&wbio->bio, GFP_NOIO,
106                                                    &ca->replica_set));
107
108                         n->bio.bi_end_io        = wbio->bio.bi_end_io;
109                         n->bio.bi_private       = wbio->bio.bi_private;
110                         n->c                    = c;
111                         n->orig                 = &wbio->bio;
112                         n->bounce               = false;
113                         n->split                = true;
114                         n->put_bio              = true;
115                         n->bio.bi_opf           = wbio->bio.bi_opf;
116                         __bio_inc_remaining(n->orig);
117                 } else {
118                         n = wbio;
119                 }
120
121                 if (!journal_flushes_device(ca))
122                         n->bio.bi_opf |= REQ_FUA;
123
124                 n->ca                   = ca;
125                 n->submit_time_us       = local_clock_us();
126                 n->bio.bi_iter.bi_sector = ptr->offset;
127
128                 if (likely(percpu_ref_tryget(&ca->io_ref))) {
129                         n->have_io_ref          = true;
130                         n->bio.bi_bdev          = ca->disk_sb.bdev;
131                         generic_make_request(&n->bio);
132                 } else {
133                         n->have_io_ref          = false;
134                         bcache_io_error(c, &n->bio, "device has been removed");
135                         bio_endio(&n->bio);
136                 }
137         }
138 }
139
140 /* IO errors */
141
142 /* Writes */
143
144 static struct workqueue_struct *index_update_wq(struct bch_write_op *op)
145 {
146         return op->alloc_reserve == RESERVE_MOVINGGC
147                 ? op->c->copygc_wq
148                 : op->c->wq;
149 }
150
151 static void __bch2_write(struct closure *);
152
153 static void bch2_write_done(struct closure *cl)
154 {
155         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
156
157         BUG_ON(!(op->flags & BCH_WRITE_DONE));
158
159         if (!op->error && (op->flags & BCH_WRITE_FLUSH))
160                 op->error = bch2_journal_error(&op->c->journal);
161
162         bch2_disk_reservation_put(op->c, &op->res);
163         percpu_ref_put(&op->c->writes);
164         bch2_keylist_free(&op->insert_keys, op->inline_keys);
165         closure_return(cl);
166 }
167
168 static u64 keylist_sectors(struct keylist *keys)
169 {
170         struct bkey_i *k;
171         u64 ret = 0;
172
173         for_each_keylist_key(keys, k)
174                 ret += k->k.size;
175
176         return ret;
177 }
178
179 static int bch2_write_index_default(struct bch_write_op *op)
180 {
181         struct keylist *keys = &op->insert_keys;
182         struct btree_iter iter;
183         int ret;
184
185         bch2_btree_iter_init(&iter, op->c, BTREE_ID_EXTENTS,
186                              bkey_start_pos(&bch2_keylist_front(keys)->k),
187                              BTREE_ITER_INTENT);
188
189         ret = bch2_btree_insert_list_at(&iter, keys, &op->res,
190                                        NULL, op_journal_seq(op),
191                                        BTREE_INSERT_NOFAIL);
192         bch2_btree_iter_unlock(&iter);
193
194         return ret;
195 }
196
197 /**
198  * bch_write_index - after a write, update index to point to new data
199  */
200 static void bch2_write_index(struct closure *cl)
201 {
202         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
203         struct bch_fs *c = op->c;
204         struct keylist *keys = &op->insert_keys;
205         unsigned i;
206
207         op->flags |= BCH_WRITE_LOOPED;
208
209         if (!bch2_keylist_empty(keys)) {
210                 u64 sectors_start = keylist_sectors(keys);
211                 int ret = op->index_update_fn(op);
212
213                 BUG_ON(keylist_sectors(keys) && !ret);
214
215                 op->written += sectors_start - keylist_sectors(keys);
216
217                 if (ret) {
218                         __bcache_io_error(c, "btree IO error %i", ret);
219                         op->error = ret;
220                 }
221         }
222
223         for (i = 0; i < ARRAY_SIZE(op->open_buckets); i++)
224                 if (op->open_buckets[i]) {
225                         bch2_open_bucket_put(c,
226                                              c->open_buckets +
227                                              op->open_buckets[i]);
228                         op->open_buckets[i] = 0;
229                 }
230
231         if (!(op->flags & BCH_WRITE_DONE))
232                 continue_at(cl, __bch2_write, op->io_wq);
233
234         if (!op->error && (op->flags & BCH_WRITE_FLUSH)) {
235                 bch2_journal_flush_seq_async(&c->journal,
236                                              *op_journal_seq(op),
237                                              cl);
238                 continue_at(cl, bch2_write_done, index_update_wq(op));
239         } else {
240                 continue_at_nobarrier(cl, bch2_write_done, NULL);
241         }
242 }
243
244 /**
245  * bch_write_discard - discard range of keys
246  *
247  * Used to implement discard, and to handle when writethrough write hits
248  * a write error on the cache device.
249  */
250 static void bch2_write_discard(struct closure *cl)
251 {
252         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
253         struct bio *bio = &op->bio->bio;
254         struct bpos end = op->pos;
255
256         end.offset += bio_sectors(bio);
257
258         op->error = bch2_discard(op->c, op->pos, end, op->version,
259                                 &op->res, NULL, NULL);
260 }
261
262 /*
263  * Convert extents to be inserted to discards after an error:
264  */
265 static void bch2_write_io_error(struct closure *cl)
266 {
267         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
268
269         if (op->flags & BCH_WRITE_DISCARD_ON_ERROR) {
270                 struct bkey_i *src = bch2_keylist_front(&op->insert_keys);
271                 struct bkey_i *dst = bch2_keylist_front(&op->insert_keys);
272
273                 /*
274                  * Our data write just errored, which means we've got a bunch
275                  * of keys to insert that point to data that wasn't
276                  * successfully written.
277                  *
278                  * We don't have to insert those keys but we still have to
279                  * invalidate that region of the cache - so, if we just strip
280                  * off all the pointers from the keys we'll accomplish just
281                  * that.
282                  */
283
284                 while (src != op->insert_keys.top) {
285                         struct bkey_i *n = bkey_next(src);
286
287                         set_bkey_val_u64s(&src->k, 0);
288                         src->k.type = KEY_TYPE_DISCARD;
289                         bkey_copy(dst, src);
290
291                         dst = bkey_next(dst);
292                         src = n;
293                 }
294
295                 op->insert_keys.top = dst;
296                 op->flags |= BCH_WRITE_DISCARD;
297         } else {
298                 /* TODO: We could try to recover from this. */
299                 while (!bch2_keylist_empty(&op->insert_keys))
300                         bch2_keylist_pop_front(&op->insert_keys);
301
302                 op->error = -EIO;
303                 op->flags |= BCH_WRITE_DONE;
304         }
305
306         bch2_write_index(cl);
307 }
308
309 static void bch2_write_endio(struct bio *bio)
310 {
311         struct closure *cl = bio->bi_private;
312         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
313         struct bch_write_bio *wbio = to_wbio(bio);
314         struct bch_fs *c = wbio->c;
315         struct bio *orig = wbio->orig;
316         struct bch_dev *ca = wbio->ca;
317
318         if (bch2_dev_nonfatal_io_err_on(bio->bi_error, ca,
319                                        "data write"))
320                 set_closure_fn(cl, bch2_write_io_error, index_update_wq(op));
321
322         if (wbio->have_io_ref)
323                 percpu_ref_put(&ca->io_ref);
324
325         if (bio->bi_error && orig)
326                 orig->bi_error = bio->bi_error;
327
328         if (wbio->bounce)
329                 bch2_bio_free_pages_pool(c, bio);
330
331         if (wbio->put_bio)
332                 bio_put(bio);
333
334         if (orig)
335                 bio_endio(orig);
336         else
337                 closure_put(cl);
338 }
339
340 static struct nonce extent_nonce(struct bversion version,
341                                  unsigned nonce,
342                                  unsigned uncompressed_size,
343                                  unsigned compression_type)
344 {
345         return (struct nonce) {{
346                 [0] = cpu_to_le32((nonce                << 12) |
347                                   (uncompressed_size    << 22)),
348                 [1] = cpu_to_le32(version.lo),
349                 [2] = cpu_to_le32(version.lo >> 32),
350                 [3] = cpu_to_le32(version.hi|
351                                   (compression_type << 24))^BCH_NONCE_EXTENT,
352         }};
353 }
354
355 static void init_append_extent(struct bch_write_op *op,
356                                unsigned compressed_size,
357                                unsigned uncompressed_size,
358                                unsigned compression_type,
359                                unsigned nonce,
360                                struct bch_csum csum, unsigned csum_type,
361                                struct open_bucket *ob)
362 {
363         struct bkey_i_extent *e = bkey_extent_init(op->insert_keys.top);
364
365         op->pos.offset += uncompressed_size;
366         e->k.p = op->pos;
367         e->k.size = uncompressed_size;
368         e->k.version = op->version;
369         bkey_extent_set_cached(&e->k, op->flags & BCH_WRITE_CACHED);
370
371         bch2_extent_crc_append(e, compressed_size,
372                               uncompressed_size,
373                               compression_type,
374                               nonce, csum, csum_type);
375
376         bch2_alloc_sectors_append_ptrs(op->c, e, op->nr_replicas,
377                                       ob, compressed_size);
378
379         bkey_extent_set_cached(&e->k, (op->flags & BCH_WRITE_CACHED));
380         bch2_keylist_push(&op->insert_keys);
381 }
382
383 static int bch2_write_extent(struct bch_write_op *op,
384                             struct open_bucket *ob,
385                             struct bio *orig)
386 {
387         struct bch_fs *c = op->c;
388         struct bio *bio;
389         struct bch_write_bio *wbio;
390         unsigned key_to_write_offset = op->insert_keys.top_p -
391                 op->insert_keys.keys_p;
392         struct bkey_i *key_to_write;
393         unsigned csum_type = op->csum_type;
394         unsigned compression_type = op->compression_type;
395         int ret;
396
397         /* don't refetch csum type/compression type */
398         barrier();
399
400         /* Need to decompress data? */
401         if ((op->flags & BCH_WRITE_DATA_COMPRESSED) &&
402             (crc_uncompressed_size(NULL, &op->crc) != op->size ||
403              crc_compressed_size(NULL, &op->crc) > ob->sectors_free)) {
404                 int ret;
405
406                 ret = bch2_bio_uncompress_inplace(c, orig, op->size, op->crc);
407                 if (ret)
408                         return ret;
409
410                 op->flags &= ~BCH_WRITE_DATA_COMPRESSED;
411         }
412
413         if (op->flags & BCH_WRITE_DATA_COMPRESSED) {
414                 init_append_extent(op,
415                                    crc_compressed_size(NULL, &op->crc),
416                                    crc_uncompressed_size(NULL, &op->crc),
417                                    op->crc.compression_type,
418                                    op->crc.nonce,
419                                    op->crc.csum,
420                                    op->crc.csum_type,
421                                    ob);
422
423                 bio                     = orig;
424                 wbio                    = to_wbio(bio);
425                 wbio->orig              = NULL;
426                 wbio->bounce            = false;
427                 wbio->put_bio           = false;
428                 ret                     = 0;
429         } else if (csum_type != BCH_CSUM_NONE ||
430                    compression_type != BCH_COMPRESSION_NONE) {
431                 /* all units here in bytes */
432                 unsigned total_output = 0, output_available =
433                         min(ob->sectors_free << 9, orig->bi_iter.bi_size);
434                 unsigned crc_nonce = bch2_csum_type_is_encryption(csum_type)
435                         ? op->nonce : 0;
436                 struct bch_csum csum;
437                 struct nonce nonce;
438
439                 bio = bio_alloc_bioset(GFP_NOIO,
440                                        DIV_ROUND_UP(output_available, PAGE_SIZE),
441                                        &c->bio_write);
442                 /*
443                  * XXX: can't use mempool for more than
444                  * BCH_COMPRESSED_EXTENT_MAX worth of pages
445                  */
446                 bch2_bio_alloc_pages_pool(c, bio, output_available);
447
448                 /* copy WRITE_SYNC flag */
449                 bio->bi_opf             = orig->bi_opf;
450                 wbio                    = to_wbio(bio);
451                 wbio->orig              = NULL;
452                 wbio->bounce            = true;
453                 wbio->put_bio           = true;
454
455                 do {
456                         unsigned fragment_compression_type = compression_type;
457                         size_t dst_len, src_len;
458
459                         bch2_bio_compress(c, bio, &dst_len,
460                                          orig, &src_len,
461                                          &fragment_compression_type);
462
463                         BUG_ON(!dst_len || dst_len > bio->bi_iter.bi_size);
464                         BUG_ON(!src_len || src_len > orig->bi_iter.bi_size);
465                         BUG_ON(dst_len & (block_bytes(c) - 1));
466                         BUG_ON(src_len & (block_bytes(c) - 1));
467
468                         swap(bio->bi_iter.bi_size, dst_len);
469                         nonce = extent_nonce(op->version,
470                                              crc_nonce,
471                                              src_len >> 9,
472                                              fragment_compression_type),
473
474                         bch2_encrypt_bio(c, csum_type, nonce, bio);
475
476                         csum = bch2_checksum_bio(c, csum_type, nonce, bio);
477                         swap(bio->bi_iter.bi_size, dst_len);
478
479                         init_append_extent(op,
480                                            dst_len >> 9, src_len >> 9,
481                                            fragment_compression_type,
482                                            crc_nonce, csum, csum_type, ob);
483
484                         total_output += dst_len;
485                         bio_advance(bio, dst_len);
486                         bio_advance(orig, src_len);
487                 } while (bio->bi_iter.bi_size &&
488                          orig->bi_iter.bi_size &&
489                          !bch2_keylist_realloc(&op->insert_keys,
490                                               op->inline_keys,
491                                               ARRAY_SIZE(op->inline_keys),
492                                               BKEY_EXTENT_U64s_MAX));
493
494                 BUG_ON(total_output > output_available);
495
496                 memset(&bio->bi_iter, 0, sizeof(bio->bi_iter));
497                 bio->bi_iter.bi_size = total_output;
498
499                 /*
500                  * Free unneeded pages after compressing:
501                  */
502                 while (bio->bi_vcnt * PAGE_SIZE >
503                        round_up(bio->bi_iter.bi_size, PAGE_SIZE))
504                         mempool_free(bio->bi_io_vec[--bio->bi_vcnt].bv_page,
505                                      &c->bio_bounce_pages);
506
507                 ret = orig->bi_iter.bi_size != 0;
508         } else {
509                 bio = bio_next_split(orig, ob->sectors_free, GFP_NOIO,
510                                      &c->bio_write);
511
512                 wbio                    = to_wbio(bio);
513                 wbio->orig              = NULL;
514                 wbio->bounce            = false;
515                 wbio->put_bio           = bio != orig;
516
517                 init_append_extent(op, bio_sectors(bio), bio_sectors(bio),
518                                    compression_type, 0,
519                                    (struct bch_csum) { 0 }, csum_type, ob);
520
521                 ret = bio != orig;
522         }
523
524         bio->bi_end_io  = bch2_write_endio;
525         bio->bi_private = &op->cl;
526         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
527
528         closure_get(bio->bi_private);
529
530         /* might have done a realloc... */
531
532         key_to_write = (void *) (op->insert_keys.keys_p + key_to_write_offset);
533
534         bch2_check_mark_super(c, bkey_i_to_s_c_extent(key_to_write),
535                               BCH_DATA_USER);
536
537         bch2_submit_wbio_replicas(to_wbio(bio), c, key_to_write);
538         return ret;
539 }
540
541 static void __bch2_write(struct closure *cl)
542 {
543         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
544         struct bch_fs *c = op->c;
545         struct bio *bio = &op->bio->bio;
546         unsigned open_bucket_nr = 0;
547         struct open_bucket *b;
548         int ret;
549
550         memset(op->open_buckets, 0, sizeof(op->open_buckets));
551
552         if (op->flags & BCH_WRITE_DISCARD) {
553                 op->flags |= BCH_WRITE_DONE;
554                 bch2_write_discard(cl);
555                 bio_put(bio);
556                 continue_at(cl, bch2_write_done, index_update_wq(op));
557         }
558
559         /*
560          * Journal writes are marked REQ_PREFLUSH; if the original write was a
561          * flush, it'll wait on the journal write.
562          */
563         bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
564
565         do {
566                 EBUG_ON(bio->bi_iter.bi_sector != op->pos.offset);
567                 EBUG_ON(!bio_sectors(bio));
568
569                 if (open_bucket_nr == ARRAY_SIZE(op->open_buckets))
570                         continue_at(cl, bch2_write_index, index_update_wq(op));
571
572                 /* for the device pointers and 1 for the chksum */
573                 if (bch2_keylist_realloc(&op->insert_keys,
574                                         op->inline_keys,
575                                         ARRAY_SIZE(op->inline_keys),
576                                         BKEY_EXTENT_U64s_MAX))
577                         continue_at(cl, bch2_write_index, index_update_wq(op));
578
579                 b = bch2_alloc_sectors_start(c, op->wp,
580                         op->nr_replicas,
581                         c->opts.data_replicas_required,
582                         op->alloc_reserve,
583                         (op->flags & BCH_WRITE_ALLOC_NOWAIT) ? NULL : cl);
584                 EBUG_ON(!b);
585
586                 if (unlikely(IS_ERR(b))) {
587                         if (unlikely(PTR_ERR(b) != -EAGAIN)) {
588                                 ret = PTR_ERR(b);
589                                 goto err;
590                         }
591
592                         /*
593                          * If we already have some keys, must insert them first
594                          * before allocating another open bucket. We only hit
595                          * this case if open_bucket_nr > 1.
596                          */
597                         if (!bch2_keylist_empty(&op->insert_keys))
598                                 continue_at(cl, bch2_write_index,
599                                             index_update_wq(op));
600
601                         /*
602                          * If we've looped, we're running out of a workqueue -
603                          * not the bch2_write() caller's context - and we don't
604                          * want to block the workqueue:
605                          */
606                         if (op->flags & BCH_WRITE_LOOPED)
607                                 continue_at(cl, __bch2_write, op->io_wq);
608
609                         /*
610                          * Otherwise, we do want to block the caller on alloc
611                          * failure instead of letting it queue up more and more
612                          * writes:
613                          * XXX: this technically needs a try_to_freeze() -
614                          * except that that's not safe because caller may have
615                          * issued other IO... hmm..
616                          */
617                         closure_sync(cl);
618                         continue;
619                 }
620
621                 BUG_ON(b - c->open_buckets == 0 ||
622                        b - c->open_buckets > U8_MAX);
623                 op->open_buckets[open_bucket_nr++] = b - c->open_buckets;
624
625                 ret = bch2_write_extent(op, b, bio);
626
627                 bch2_alloc_sectors_done(c, op->wp, b);
628
629                 if (ret < 0)
630                         goto err;
631         } while (ret);
632
633         op->flags |= BCH_WRITE_DONE;
634         continue_at(cl, bch2_write_index, index_update_wq(op));
635 err:
636         if (op->flags & BCH_WRITE_DISCARD_ON_ERROR) {
637                 /*
638                  * If we were writing cached data, not doing the write is fine
639                  * so long as we discard whatever would have been overwritten -
640                  * then it's equivalent to doing the write and immediately
641                  * reclaiming it.
642                  */
643
644                 bch2_write_discard(cl);
645         } else {
646                 /*
647                  * Right now we can only error here if we went RO - the
648                  * allocation failed, but we already checked for -ENOSPC when we
649                  * got our reservation.
650                  *
651                  * XXX capacity might have changed, but we don't check for that
652                  * yet:
653                  */
654                 op->error = ret;
655         }
656
657         op->flags |= BCH_WRITE_DONE;
658
659         /*
660          * No reason not to insert keys for whatever data was successfully
661          * written (especially for a cmpxchg operation that's moving data
662          * around)
663          */
664         continue_at(cl, !bch2_keylist_empty(&op->insert_keys)
665                     ? bch2_write_index
666                     : bch2_write_done, index_update_wq(op));
667 }
668
669 void bch2_wake_delayed_writes(unsigned long data)
670 {
671         struct bch_fs *c = (void *) data;
672         struct bch_write_op *op;
673         unsigned long flags;
674
675         spin_lock_irqsave(&c->foreground_write_pd_lock, flags);
676
677         while ((op = c->write_wait_head)) {
678                 if (time_after(op->expires, jiffies)) {
679                         mod_timer(&c->foreground_write_wakeup, op->expires);
680                         break;
681                 }
682
683                 c->write_wait_head = op->next;
684                 if (!c->write_wait_head)
685                         c->write_wait_tail = NULL;
686
687                 closure_put(&op->cl);
688         }
689
690         spin_unlock_irqrestore(&c->foreground_write_pd_lock, flags);
691 }
692
693 /**
694  * bch_write - handle a write to a cache device or flash only volume
695  *
696  * This is the starting point for any data to end up in a cache device; it could
697  * be from a normal write, or a writeback write, or a write to a flash only
698  * volume - it's also used by the moving garbage collector to compact data in
699  * mostly empty buckets.
700  *
701  * It first writes the data to the cache, creating a list of keys to be inserted
702  * (if the data won't fit in a single open bucket, there will be multiple keys);
703  * after the data is written it calls bch_journal, and after the keys have been
704  * added to the next journal write they're inserted into the btree.
705  *
706  * It inserts the data in op->bio; bi_sector is used for the key offset, and
707  * op->inode is used for the key inode.
708  *
709  * If op->discard is true, instead of inserting the data it invalidates the
710  * region of the cache represented by op->bio and op->inode.
711  */
712 void bch2_write(struct closure *cl)
713 {
714         struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
715         struct bio *bio = &op->bio->bio;
716         struct bch_fs *c = op->c;
717         u64 inode = op->pos.inode;
718
719         if (c->opts.nochanges ||
720             !percpu_ref_tryget(&c->writes)) {
721                 __bcache_io_error(c, "read only");
722                 op->error = -EROFS;
723                 bch2_disk_reservation_put(c, &op->res);
724                 closure_return(cl);
725         }
726
727         if (bversion_zero(op->version) &&
728             bch2_csum_type_is_encryption(op->csum_type))
729                 op->version.lo =
730                         atomic64_inc_return(&c->key_version) + 1;
731
732         if (!(op->flags & BCH_WRITE_DISCARD))
733                 bch2_increment_clock(c, bio_sectors(bio), WRITE);
734
735         /* Don't call bch2_next_delay() if rate is >= 1 GB/sec */
736
737         if (c->foreground_write_ratelimit_enabled &&
738             c->foreground_write_pd.rate.rate < (1 << 30) &&
739             !(op->flags & BCH_WRITE_DISCARD) && op->wp->throttle) {
740                 unsigned long flags;
741                 u64 delay;
742
743                 spin_lock_irqsave(&c->foreground_write_pd_lock, flags);
744                 bch2_ratelimit_increment(&c->foreground_write_pd.rate,
745                                         bio->bi_iter.bi_size);
746
747                 delay = bch2_ratelimit_delay(&c->foreground_write_pd.rate);
748
749                 if (delay >= HZ / 100) {
750                         trace_write_throttle(c, inode, bio, delay);
751
752                         closure_get(&op->cl); /* list takes a ref */
753
754                         op->expires = jiffies + delay;
755                         op->next = NULL;
756
757                         if (c->write_wait_tail)
758                                 c->write_wait_tail->next = op;
759                         else
760                                 c->write_wait_head = op;
761                         c->write_wait_tail = op;
762
763                         if (!timer_pending(&c->foreground_write_wakeup))
764                                 mod_timer(&c->foreground_write_wakeup,
765                                           op->expires);
766
767                         spin_unlock_irqrestore(&c->foreground_write_pd_lock,
768                                                flags);
769                         continue_at(cl, __bch2_write, index_update_wq(op));
770                 }
771
772                 spin_unlock_irqrestore(&c->foreground_write_pd_lock, flags);
773         }
774
775         continue_at_nobarrier(cl, __bch2_write, NULL);
776 }
777
778 void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
779                        struct bch_write_bio *bio, struct disk_reservation res,
780                        struct write_point *wp, struct bpos pos,
781                        u64 *journal_seq, unsigned flags)
782 {
783         EBUG_ON(res.sectors && !res.nr_replicas);
784
785         op->c           = c;
786         op->io_wq       = index_update_wq(op);
787         op->bio         = bio;
788         op->written     = 0;
789         op->error       = 0;
790         op->flags       = flags;
791         op->csum_type   = bch2_data_checksum_type(c);
792         op->compression_type = c->opts.compression;
793         op->nr_replicas = res.nr_replicas;
794         op->alloc_reserve = RESERVE_NONE;
795         op->nonce       = 0;
796         op->pos         = pos;
797         op->version     = ZERO_VERSION;
798         op->res         = res;
799         op->wp          = wp;
800
801         if (journal_seq) {
802                 op->journal_seq_p = journal_seq;
803                 op->flags |= BCH_WRITE_JOURNAL_SEQ_PTR;
804         } else {
805                 op->journal_seq = 0;
806         }
807
808         op->index_update_fn = bch2_write_index_default;
809
810         bch2_keylist_init(&op->insert_keys,
811                           op->inline_keys,
812                           ARRAY_SIZE(op->inline_keys));
813
814         if (version_stress_test(c))
815                 get_random_bytes(&op->version, sizeof(op->version));
816 }
817
818 /* Discard */
819
820 /* bch_discard - discard a range of keys from start_key to end_key.
821  * @c           filesystem
822  * @start_key   pointer to start location
823  *              NOTE: discard starts at bkey_start_offset(start_key)
824  * @end_key     pointer to end location
825  *              NOTE: discard ends at KEY_OFFSET(end_key)
826  * @version     version of discard (0ULL if none)
827  *
828  * Returns:
829  *       0 on success
830  *      <0 on error
831  *
832  * XXX: this needs to be refactored with inode_truncate, or more
833  *      appropriately inode_truncate should call this
834  */
835 int bch2_discard(struct bch_fs *c, struct bpos start,
836                  struct bpos end, struct bversion version,
837                  struct disk_reservation *disk_res,
838                  struct extent_insert_hook *hook,
839                  u64 *journal_seq)
840 {
841         return bch2_btree_delete_range(c, BTREE_ID_EXTENTS, start, end, version,
842                                       disk_res, hook, journal_seq);
843 }
844
845 /* Cache promotion on read */
846
847 struct cache_promote_op {
848         struct closure          cl;
849         struct migrate_write    write;
850         struct bio_vec          bi_inline_vecs[0]; /* must be last */
851 };
852
853 /* Read */
854
855 static int bio_checksum_uncompress(struct bch_fs *c,
856                                    struct bch_read_bio *rbio)
857 {
858         struct bio *src = &rbio->bio;
859         struct bio *dst = &bch2_rbio_parent(rbio)->bio;
860         struct bvec_iter dst_iter = rbio->parent_iter;
861         struct nonce nonce = extent_nonce(rbio->version,
862                                 rbio->crc.nonce,
863                                 crc_uncompressed_size(NULL, &rbio->crc),
864                                 rbio->crc.compression_type);
865         struct bch_csum csum;
866         int ret = 0;
867
868         /*
869          * reset iterator for checksumming and copying bounced data: here we've
870          * set rbio->compressed_size to the amount of data we actually read,
871          * which was not necessarily the full extent if we were only bouncing
872          * in order to promote
873          */
874         if (rbio->bounce) {
875                 src->bi_iter.bi_size    = crc_compressed_size(NULL, &rbio->crc) << 9;
876                 src->bi_iter.bi_idx     = 0;
877                 src->bi_iter.bi_bvec_done = 0;
878         } else {
879                 src->bi_iter = rbio->parent_iter;
880         }
881
882         csum = bch2_checksum_bio(c, rbio->crc.csum_type, nonce, src);
883         if (bch2_dev_nonfatal_io_err_on(bch2_crc_cmp(rbio->crc.csum, csum),
884                                         rbio->ca,
885                         "data checksum error, inode %llu offset %llu: expected %0llx%0llx got %0llx%0llx (type %u)",
886                         rbio->inode, (u64) rbio->parent_iter.bi_sector << 9,
887                         rbio->crc.csum.hi, rbio->crc.csum.lo, csum.hi, csum.lo,
888                         rbio->crc.csum_type))
889                 ret = -EIO;
890
891         /*
892          * If there was a checksum error, still copy the data back - unless it
893          * was compressed, we don't want to decompress bad data:
894          */
895         if (rbio->crc.compression_type != BCH_COMPRESSION_NONE) {
896                 if (!ret) {
897                         bch2_encrypt_bio(c, rbio->crc.csum_type, nonce, src);
898                         ret = bch2_bio_uncompress(c, src, dst,
899                                                  dst_iter, rbio->crc);
900                         if (ret)
901                                 __bcache_io_error(c, "decompression error");
902                 }
903         } else if (rbio->bounce) {
904                 bio_advance(src, rbio->crc.offset << 9);
905
906                 /* don't need to decrypt the entire bio: */
907                 BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
908                 src->bi_iter.bi_size = dst_iter.bi_size;
909
910                 nonce = nonce_add(nonce, rbio->crc.offset << 9);
911
912                 bch2_encrypt_bio(c, rbio->crc.csum_type,
913                                 nonce, src);
914
915                 bio_copy_data_iter(dst, &dst_iter,
916                                    src, &src->bi_iter);
917         } else {
918                 bch2_encrypt_bio(c, rbio->crc.csum_type, nonce, src);
919         }
920
921         return ret;
922 }
923
924 static void bch2_rbio_free(struct bch_read_bio *rbio)
925 {
926         struct bch_fs *c = rbio->c;
927         struct bio *bio = &rbio->bio;
928
929         BUG_ON(rbio->ca);
930         BUG_ON(!rbio->split);
931
932         if (rbio->promote)
933                 kfree(rbio->promote);
934         if (rbio->bounce)
935                 bch2_bio_free_pages_pool(c, bio);
936
937         bio_put(bio);
938 }
939
940 static void bch2_rbio_done(struct bch_read_bio *rbio)
941 {
942         struct bio *orig = &bch2_rbio_parent(rbio)->bio;
943
944         percpu_ref_put(&rbio->ca->io_ref);
945         rbio->ca = NULL;
946
947         if (rbio->split) {
948                 if (rbio->bio.bi_error)
949                         orig->bi_error = rbio->bio.bi_error;
950
951                 bio_endio(orig);
952                 bch2_rbio_free(rbio);
953         } else {
954                 if (rbio->promote)
955                         kfree(rbio->promote);
956
957                 orig->bi_end_io = rbio->orig_bi_end_io;
958                 bio_endio_nodec(orig);
959         }
960 }
961
962 static void bch2_rbio_error(struct bch_read_bio *rbio, int error)
963 {
964         bch2_rbio_parent(rbio)->bio.bi_error = error;
965         bch2_rbio_done(rbio);
966 }
967
968 static void bch2_rbio_retry(struct bch_fs *c, struct bch_read_bio *rbio)
969 {
970         unsigned long flags;
971
972         percpu_ref_put(&rbio->ca->io_ref);
973         rbio->ca = NULL;
974
975         spin_lock_irqsave(&c->read_retry_lock, flags);
976         bio_list_add(&c->read_retry_list, &rbio->bio);
977         spin_unlock_irqrestore(&c->read_retry_lock, flags);
978         queue_work(c->wq, &c->read_retry_work);
979 }
980
981 static void cache_promote_done(struct closure *cl)
982 {
983         struct cache_promote_op *op =
984                 container_of(cl, struct cache_promote_op, cl);
985
986         bch2_bio_free_pages_pool(op->write.op.c, &op->write.wbio.bio);
987         kfree(op);
988 }
989
990 /* Inner part that may run in process context */
991 static void __bch2_read_endio(struct work_struct *work)
992 {
993         struct bch_read_bio *rbio =
994                 container_of(work, struct bch_read_bio, work);
995         struct bch_fs *c = rbio->c;
996         int ret;
997
998         ret = bio_checksum_uncompress(c, rbio);
999         if (ret) {
1000                 /*
1001                  * Checksum error: if the bio wasn't bounced, we may have been
1002                  * reading into buffers owned by userspace (that userspace can
1003                  * scribble over) - retry the read, bouncing it this time:
1004                  */
1005                 if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
1006                         rbio->flags |= BCH_READ_FORCE_BOUNCE;
1007                         bch2_rbio_retry(c, rbio);
1008                 } else {
1009                         bch2_rbio_error(rbio, -EIO);
1010                 }
1011                 return;
1012         }
1013
1014         if (rbio->promote) {
1015                 struct cache_promote_op *promote = rbio->promote;
1016                 struct closure *cl = &promote->cl;
1017
1018                 BUG_ON(!rbio->split || !rbio->bounce);
1019
1020                 trace_promote(&rbio->bio);
1021
1022                 /* we now own pages: */
1023                 swap(promote->write.wbio.bio.bi_vcnt, rbio->bio.bi_vcnt);
1024                 rbio->promote = NULL;
1025
1026                 bch2_rbio_done(rbio);
1027
1028                 closure_init(cl, &c->cl);
1029                 closure_call(&promote->write.op.cl, bch2_write, c->wq, cl);
1030                 closure_return_with_destructor(cl, cache_promote_done);
1031         } else {
1032                 bch2_rbio_done(rbio);
1033         }
1034 }
1035
1036 static void bch2_read_endio(struct bio *bio)
1037 {
1038         struct bch_read_bio *rbio =
1039                 container_of(bio, struct bch_read_bio, bio);
1040         struct bch_fs *c = rbio->c;
1041
1042         if (bch2_dev_nonfatal_io_err_on(bio->bi_error, rbio->ca, "data read")) {
1043                 /* XXX: retry IO errors when we have another replica */
1044                 bch2_rbio_error(rbio, bio->bi_error);
1045                 return;
1046         }
1047
1048         if (rbio->ptr.cached &&
1049             (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
1050              ptr_stale(rbio->ca, &rbio->ptr))) {
1051                 atomic_long_inc(&c->read_realloc_races);
1052
1053                 if (rbio->flags & BCH_READ_RETRY_IF_STALE)
1054                         bch2_rbio_retry(c, rbio);
1055                 else
1056                         bch2_rbio_error(rbio, -EINTR);
1057                 return;
1058         }
1059
1060         if (rbio->crc.compression_type ||
1061             bch2_csum_type_is_encryption(rbio->crc.csum_type))
1062                 queue_work(system_unbound_wq, &rbio->work);
1063         else if (rbio->crc.csum_type)
1064                 queue_work(system_highpri_wq, &rbio->work);
1065         else
1066                 __bch2_read_endio(&rbio->work);
1067 }
1068
1069 static bool should_promote(struct bch_fs *c,
1070                            struct extent_pick_ptr *pick, unsigned flags)
1071 {
1072         if (!(flags & BCH_READ_PROMOTE))
1073                 return false;
1074
1075         if (percpu_ref_is_dying(&c->writes))
1076                 return false;
1077
1078         return c->fastest_tier &&
1079                 c->fastest_tier < c->tiers + pick->ca->mi.tier;
1080 }
1081
1082 void bch2_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig,
1083                           struct bvec_iter iter, struct bkey_s_c k,
1084                           struct extent_pick_ptr *pick, unsigned flags)
1085 {
1086         struct bch_read_bio *rbio;
1087         struct cache_promote_op *promote_op = NULL;
1088         unsigned skip = iter.bi_sector - bkey_start_offset(k.k);
1089         bool bounce = false, split, read_full = false;
1090
1091         bch2_increment_clock(c, bio_sectors(&orig->bio), READ);
1092
1093         EBUG_ON(bkey_start_offset(k.k) > iter.bi_sector ||
1094                 k.k->p.offset < bvec_iter_end_sector(iter));
1095
1096         /* only promote if we're not reading from the fastest tier: */
1097
1098         /*
1099          * XXX: multiple promotes can race with each other, wastefully. Keep a
1100          * list of outstanding promotes?
1101          */
1102         if (should_promote(c, pick, flags)) {
1103                 /*
1104                  * biovec needs to be big enough to hold decompressed data, if
1105                  * the bch2_write_extent() has to decompress/recompress it:
1106                  */
1107                 unsigned sectors =
1108                         max_t(unsigned, k.k->size,
1109                               crc_uncompressed_size(NULL, &pick->crc));
1110                 unsigned pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
1111
1112                 promote_op = kmalloc(sizeof(*promote_op) +
1113                                 sizeof(struct bio_vec) * pages, GFP_NOIO);
1114                 if (promote_op) {
1115                         struct bio *promote_bio = &promote_op->write.wbio.bio;
1116
1117                         bio_init(promote_bio,
1118                                  promote_bio->bi_inline_vecs,
1119                                  pages);
1120                         bounce = true;
1121                         /* could also set read_full */
1122                 }
1123         }
1124
1125         /*
1126          * note: if compression_type and crc_type both == none, then
1127          * compressed/uncompressed size is zero
1128          */
1129         if (pick->crc.compression_type != BCH_COMPRESSION_NONE ||
1130             (pick->crc.csum_type != BCH_CSUM_NONE &&
1131              (bvec_iter_sectors(iter) != crc_uncompressed_size(NULL, &pick->crc) ||
1132               (bch2_csum_type_is_encryption(pick->crc.csum_type) &&
1133                (flags & BCH_READ_USER_MAPPED)) ||
1134               (flags & BCH_READ_FORCE_BOUNCE)))) {
1135                 read_full = true;
1136                 bounce = true;
1137         }
1138
1139         if (bounce) {
1140                 unsigned sectors = read_full
1141                         ? (crc_compressed_size(NULL, &pick->crc) ?: k.k->size)
1142                         : bvec_iter_sectors(iter);
1143
1144                 rbio = container_of(bio_alloc_bioset(GFP_NOIO,
1145                                         DIV_ROUND_UP(sectors, PAGE_SECTORS),
1146                                         &c->bio_read_split),
1147                                     struct bch_read_bio, bio);
1148
1149                 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
1150                 split = true;
1151         } else if (!(flags & BCH_READ_MAY_REUSE_BIO) ||
1152                    !(flags & BCH_READ_IS_LAST)) {
1153                 /*
1154                  * Have to clone if there were any splits, due to error
1155                  * reporting issues (if a split errored, and retrying didn't
1156                  * work, when it reports the error to its parent (us) we don't
1157                  * know if the error was from our bio, and we should retry, or
1158                  * from the whole bio, in which case we don't want to retry and
1159                  * lose the error)
1160                  */
1161                 rbio = container_of(bio_clone_fast(&orig->bio,
1162                                         GFP_NOIO, &c->bio_read_split),
1163                                     struct bch_read_bio, bio);
1164                 rbio->bio.bi_iter = iter;
1165                 split = true;
1166         } else {
1167                 rbio = orig;
1168                 rbio->bio.bi_iter = iter;
1169                 split = false;
1170                 BUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
1171         }
1172
1173         if (!(flags & BCH_READ_IS_LAST))
1174                 __bio_inc_remaining(&orig->bio);
1175
1176         if (split)
1177                 rbio->parent    = orig;
1178         else
1179                 rbio->orig_bi_end_io = orig->bio.bi_end_io;
1180         rbio->parent_iter       = iter;
1181
1182         rbio->flags             = flags;
1183         rbio->bounce            = bounce;
1184         rbio->split             = split;
1185         rbio->c                 = c;
1186         rbio->ca                = pick->ca;
1187         rbio->ptr               = pick->ptr;
1188         rbio->crc               = pick->crc;
1189         /*
1190          * crc.compressed_size will be 0 if there wasn't any checksum
1191          * information, also we need to stash the original size of the bio if we
1192          * bounced (which isn't necessarily the original key size, if we bounced
1193          * only for promoting)
1194          */
1195         rbio->crc._compressed_size = bio_sectors(&rbio->bio) - 1;
1196         rbio->version           = k.k->version;
1197         rbio->promote           = promote_op;
1198         rbio->inode             = k.k->p.inode;
1199         INIT_WORK(&rbio->work, __bch2_read_endio);
1200
1201         rbio->bio.bi_bdev       = pick->ca->disk_sb.bdev;
1202         rbio->bio.bi_opf        = orig->bio.bi_opf;
1203         rbio->bio.bi_iter.bi_sector = pick->ptr.offset;
1204         rbio->bio.bi_end_io     = bch2_read_endio;
1205
1206         if (promote_op) {
1207                 struct bio *promote_bio = &promote_op->write.wbio.bio;
1208
1209                 promote_bio->bi_iter = rbio->bio.bi_iter;
1210                 memcpy(promote_bio->bi_io_vec, rbio->bio.bi_io_vec,
1211                        sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
1212
1213                 bch2_migrate_write_init(c, &promote_op->write,
1214                                        &c->promote_write_point,
1215                                        k, NULL,
1216                                        BCH_WRITE_ALLOC_NOWAIT|
1217                                        BCH_WRITE_CACHED);
1218                 promote_op->write.promote = true;
1219
1220                 if (rbio->crc.compression_type) {
1221                         promote_op->write.op.flags |= BCH_WRITE_DATA_COMPRESSED;
1222                         promote_op->write.op.crc = rbio->crc;
1223                         promote_op->write.op.size = k.k->size;
1224                 } else if (read_full) {
1225                         /*
1226                          * Adjust bio to correspond to _live_ portion of @k -
1227                          * which might be less than what we're actually reading:
1228                          */
1229                         bio_advance(promote_bio, rbio->crc.offset << 9);
1230                         BUG_ON(bio_sectors(promote_bio) < k.k->size);
1231                         promote_bio->bi_iter.bi_size = k.k->size << 9;
1232                 } else {
1233                         /*
1234                          * Set insert pos to correspond to what we're actually
1235                          * reading:
1236                          */
1237                         promote_op->write.op.pos.offset = iter.bi_sector;
1238                 }
1239
1240                 promote_bio->bi_iter.bi_sector =
1241                         promote_op->write.op.pos.offset;
1242         }
1243
1244         /* _after_ promete stuff has looked at rbio->crc.offset */
1245         if (read_full)
1246                 rbio->crc.offset += skip;
1247         else
1248                 rbio->bio.bi_iter.bi_sector += skip;
1249
1250         rbio->submit_time_us = local_clock_us();
1251
1252         if (bounce)
1253                 trace_read_bounce(&rbio->bio);
1254
1255         if (!(flags & BCH_READ_IS_LAST))
1256                 trace_read_split(&rbio->bio);
1257
1258         generic_make_request(&rbio->bio);
1259 }
1260
1261 static void bch2_read_iter(struct bch_fs *c, struct bch_read_bio *rbio,
1262                           struct bvec_iter bvec_iter, u64 inode,
1263                           unsigned flags)
1264 {
1265         struct bio *bio = &rbio->bio;
1266         struct btree_iter iter;
1267         struct bkey_s_c k;
1268         int ret;
1269
1270         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
1271                            POS(inode, bvec_iter.bi_sector),
1272                            BTREE_ITER_WITH_HOLES, k) {
1273                 BKEY_PADDED(k) tmp;
1274                 struct extent_pick_ptr pick;
1275                 unsigned bytes, sectors;
1276                 bool is_last;
1277
1278                 /*
1279                  * Unlock the iterator while the btree node's lock is still in
1280                  * cache, before doing the IO:
1281                  */
1282                 bkey_reassemble(&tmp.k, k);
1283                 k = bkey_i_to_s_c(&tmp.k);
1284                 bch2_btree_iter_unlock(&iter);
1285
1286                 bch2_extent_pick_ptr(c, k, &pick);
1287                 if (IS_ERR(pick.ca)) {
1288                         bcache_io_error(c, bio, "no device to read from");
1289                         bio_endio(bio);
1290                         return;
1291                 }
1292
1293                 sectors = min_t(u64, k.k->p.offset,
1294                                 bvec_iter_end_sector(bvec_iter)) -
1295                         bvec_iter.bi_sector;
1296                 bytes = sectors << 9;
1297                 is_last = bytes == bvec_iter.bi_size;
1298                 swap(bvec_iter.bi_size, bytes);
1299
1300                 if (is_last)
1301                         flags |= BCH_READ_IS_LAST;
1302
1303                 if (pick.ca) {
1304                         PTR_BUCKET(pick.ca, &pick.ptr)->prio[READ] =
1305                                 c->prio_clock[READ].hand;
1306
1307                         bch2_read_extent_iter(c, rbio, bvec_iter,
1308                                               k, &pick, flags);
1309
1310                         flags &= ~BCH_READ_MAY_REUSE_BIO;
1311                 } else {
1312                         zero_fill_bio_iter(bio, bvec_iter);
1313
1314                         if (is_last)
1315                                 bio_endio(bio);
1316                 }
1317
1318                 if (is_last)
1319                         return;
1320
1321                 swap(bvec_iter.bi_size, bytes);
1322                 bio_advance_iter(bio, &bvec_iter, bytes);
1323         }
1324
1325         /*
1326          * If we get here, it better have been because there was an error
1327          * reading a btree node
1328          */
1329         ret = bch2_btree_iter_unlock(&iter);
1330         BUG_ON(!ret);
1331         bcache_io_error(c, bio, "btree IO error %i", ret);
1332         bio_endio(bio);
1333 }
1334
1335 void bch2_read(struct bch_fs *c, struct bch_read_bio *bio, u64 inode)
1336 {
1337         bch2_read_iter(c, bio, bio->bio.bi_iter, inode,
1338                       BCH_READ_RETRY_IF_STALE|
1339                       BCH_READ_PROMOTE|
1340                       BCH_READ_MAY_REUSE_BIO|
1341                       BCH_READ_USER_MAPPED);
1342 }
1343
1344 /**
1345  * bch_read_retry - re-submit a bio originally from bch2_read()
1346  */
1347 static void bch2_read_retry(struct bch_fs *c, struct bch_read_bio *rbio)
1348 {
1349         struct bch_read_bio *parent = bch2_rbio_parent(rbio);
1350         struct bvec_iter iter = rbio->parent_iter;
1351         unsigned flags = rbio->flags;
1352         u64 inode = rbio->inode;
1353
1354         trace_read_retry(&rbio->bio);
1355
1356         if (rbio->split)
1357                 bch2_rbio_free(rbio);
1358         else
1359                 rbio->bio.bi_end_io = rbio->orig_bi_end_io;
1360
1361         bch2_read_iter(c, parent, iter, inode, flags);
1362 }
1363
1364 void bch2_read_retry_work(struct work_struct *work)
1365 {
1366         struct bch_fs *c = container_of(work, struct bch_fs,
1367                                            read_retry_work);
1368         struct bch_read_bio *rbio;
1369         struct bio *bio;
1370         unsigned long flags;
1371
1372         while (1) {
1373                 spin_lock_irqsave(&c->read_retry_lock, flags);
1374                 bio = bio_list_pop(&c->read_retry_list);
1375                 spin_unlock_irqrestore(&c->read_retry_lock, flags);
1376
1377                 if (!bio)
1378                         break;
1379
1380                 rbio = container_of(bio, struct bch_read_bio, bio);
1381                 bch2_read_retry(c, rbio);
1382         }
1383 }