2 * Some low level IO code, and hacks for various block layer limitations
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
11 #include "btree_update.h"
26 #include <linux/blkdev.h>
27 #include <linux/random.h>
29 #include <trace/events/bcachefs.h>
31 /* Allocate, free from mempool: */
33 void bch2_latency_acct(struct bch_dev *ca, unsigned submit_time_us, int rw)
35 u64 now = local_clock();
36 unsigned io_latency = (now >> 10) - submit_time_us;
37 atomic_t *latency = &ca->latency[rw];
38 unsigned old, new, v = atomic_read(latency);
44 * If the io latency was reasonably close to the current
45 * latency, skip doing the update and atomic operation - most of
48 if (abs((int) (old - io_latency)) < (old >> 1) &&
52 new = ewma_add((u64) old, io_latency, 6);
53 } while ((v = atomic_cmpxchg(latency, old, new)) != old);
56 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
61 bio_for_each_segment_all(bv, bio, i)
62 if (bv->bv_page != ZERO_PAGE(0))
63 mempool_free(bv->bv_page, &c->bio_bounce_pages);
67 static void bch2_bio_alloc_page_pool(struct bch_fs *c, struct bio *bio,
70 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt++];
72 if (likely(!*using_mempool)) {
73 bv->bv_page = alloc_page(GFP_NOIO);
74 if (unlikely(!bv->bv_page)) {
75 mutex_lock(&c->bio_bounce_pages_lock);
76 *using_mempool = true;
82 bv->bv_page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
85 bv->bv_len = PAGE_SIZE;
89 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
92 bool using_mempool = false;
94 BUG_ON(DIV_ROUND_UP(bytes, PAGE_SIZE) > bio->bi_max_vecs);
96 bio->bi_iter.bi_size = bytes;
98 while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE))
99 bch2_bio_alloc_page_pool(c, bio, &using_mempool);
102 mutex_unlock(&c->bio_bounce_pages_lock);
105 void bch2_bio_alloc_more_pages_pool(struct bch_fs *c, struct bio *bio,
108 while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE)) {
109 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
111 BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
113 bv->bv_page = alloc_page(GFP_NOIO);
116 * We already allocated from mempool, we can't allocate from it again
117 * without freeing the pages we already allocated or else we could
120 bch2_bio_free_pages_pool(c, bio);
121 bch2_bio_alloc_pages_pool(c, bio, bytes);
125 bv->bv_len = PAGE_SIZE;
130 bio->bi_iter.bi_size = bytes;
135 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
136 enum bch_data_type type,
137 const struct bkey_i *k)
139 struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k);
140 const struct bch_extent_ptr *ptr;
141 struct bch_write_bio *n;
144 BUG_ON(c->opts.nochanges);
146 extent_for_each_ptr(e, ptr) {
147 BUG_ON(ptr->dev >= BCH_SB_MEMBERS_MAX ||
150 ca = bch_dev_bkey_exists(c, ptr->dev);
152 if (ptr + 1 < &extent_entry_last(e)->ptr) {
153 n = to_wbio(bio_clone_fast(&wbio->bio, GFP_NOIO,
156 n->bio.bi_end_io = wbio->bio.bi_end_io;
157 n->bio.bi_private = wbio->bio.bi_private;
162 n->bio.bi_opf = wbio->bio.bi_opf;
163 bio_inc_remaining(&wbio->bio);
171 n->submit_time_us = local_clock_us();
172 n->bio.bi_iter.bi_sector = ptr->offset;
174 if (!journal_flushes_device(ca))
175 n->bio.bi_opf |= REQ_FUA;
177 if (likely(percpu_ref_tryget(&ca->io_ref))) {
178 this_cpu_add(ca->io_done->sectors[WRITE][type],
179 bio_sectors(&n->bio));
181 n->have_io_ref = true;
182 bio_set_dev(&n->bio, ca->disk_sb.bdev);
185 n->have_io_ref = false;
186 n->bio.bi_status = BLK_STS_REMOVED;
192 static void __bch2_write(struct closure *);
194 static void bch2_write_done(struct closure *cl)
196 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
198 BUG_ON(!(op->flags & BCH_WRITE_DONE));
200 if (!op->error && (op->flags & BCH_WRITE_FLUSH))
201 op->error = bch2_journal_error(&op->c->journal);
203 if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
204 bch2_disk_reservation_put(op->c, &op->res);
205 percpu_ref_put(&op->c->writes);
206 bch2_keylist_free(&op->insert_keys, op->inline_keys);
207 op->flags &= ~(BCH_WRITE_DONE|BCH_WRITE_LOOPED);
212 int bch2_write_index_default(struct bch_write_op *op)
214 struct keylist *keys = &op->insert_keys;
215 struct btree_iter iter;
218 bch2_btree_iter_init(&iter, op->c, BTREE_ID_EXTENTS,
219 bkey_start_pos(&bch2_keylist_front(keys)->k),
222 ret = bch2_btree_insert_list_at(&iter, keys, &op->res,
223 NULL, op_journal_seq(op),
225 BTREE_INSERT_USE_RESERVE);
226 bch2_btree_iter_unlock(&iter);
232 * bch_write_index - after a write, update index to point to new data
234 static void bch2_write_index(struct closure *cl)
236 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
237 struct bch_fs *c = op->c;
238 struct keylist *keys = &op->insert_keys;
239 struct bkey_s_extent e;
240 struct bch_extent_ptr *ptr;
241 struct bkey_i *src, *dst = keys->keys, *n;
244 op->flags |= BCH_WRITE_LOOPED;
246 for (src = keys->keys; src != keys->top; src = n) {
250 e = bkey_i_to_s_extent(dst);
251 extent_for_each_ptr_backwards(e, ptr)
252 if (test_bit(ptr->dev, op->failed.d))
253 bch2_extent_drop_ptr(e, ptr);
255 if (!bch2_extent_nr_ptrs(e.c)) {
260 if (!(op->flags & BCH_WRITE_NOMARK_REPLICAS)) {
261 ret = bch2_mark_bkey_replicas(c, BCH_DATA_USER, e.s_c);
266 dst = bkey_next(dst);
271 if (!bch2_keylist_empty(keys)) {
272 u64 sectors_start = keylist_sectors(keys);
273 int ret = op->index_update_fn(op);
275 BUG_ON(keylist_sectors(keys) && !ret);
277 op->written += sectors_start - keylist_sectors(keys);
280 __bcache_io_error(c, "btree IO error %i", ret);
285 bch2_open_bucket_put_refs(c, &op->open_buckets_nr, op->open_buckets);
287 if (!(op->flags & BCH_WRITE_DONE))
288 continue_at(cl, __bch2_write, op->io_wq);
290 if (!op->error && (op->flags & BCH_WRITE_FLUSH)) {
291 bch2_journal_flush_seq_async(&c->journal,
294 continue_at(cl, bch2_write_done, index_update_wq(op));
296 continue_at_nobarrier(cl, bch2_write_done, NULL);
300 keys->top = keys->keys;
302 op->flags |= BCH_WRITE_DONE;
306 static void bch2_write_endio(struct bio *bio)
308 struct closure *cl = bio->bi_private;
309 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
310 struct bch_write_bio *wbio = to_wbio(bio);
311 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
312 struct bch_fs *c = wbio->c;
313 struct bch_dev *ca = wbio->ca;
315 bch2_latency_acct(ca, wbio->submit_time_us, WRITE);
317 if (bch2_dev_io_err_on(bio->bi_status, ca, "data write"))
318 set_bit(ca->dev_idx, op->failed.d);
320 if (wbio->have_io_ref)
321 percpu_ref_put(&ca->io_ref);
324 bch2_bio_free_pages_pool(c, bio);
330 bio_endio(&parent->bio);
335 static void init_append_extent(struct bch_write_op *op,
336 struct write_point *wp,
337 struct bversion version,
338 struct bch_extent_crc_unpacked crc)
340 struct bkey_i_extent *e = bkey_extent_init(op->insert_keys.top);
342 op->pos.offset += crc.uncompressed_size;
344 e->k.size = crc.uncompressed_size;
345 e->k.version = version;
346 bkey_extent_set_cached(&e->k, op->flags & BCH_WRITE_CACHED);
348 bch2_extent_crc_append(e, crc);
349 bch2_alloc_sectors_append_ptrs(op->c, wp, e, crc.compressed_size);
351 bch2_keylist_push(&op->insert_keys);
354 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
355 struct write_point *wp,
357 bool *page_alloc_failed)
359 struct bch_write_bio *wbio;
361 unsigned output_available =
362 min(wp->sectors_free << 9, src->bi_iter.bi_size);
363 unsigned pages = DIV_ROUND_UP(output_available, PAGE_SIZE);
365 bio = bio_alloc_bioset(GFP_NOIO, pages, &c->bio_write);
366 wbio = wbio_init(bio);
368 wbio->put_bio = true;
369 /* copy WRITE_SYNC flag */
370 wbio->bio.bi_opf = src->bi_opf;
373 * We can't use mempool for more than c->sb.encoded_extent_max
374 * worth of pages, but we'd like to allocate more if we can:
376 while (bio->bi_iter.bi_size < output_available) {
377 unsigned len = min_t(unsigned, PAGE_SIZE,
378 output_available - bio->bi_iter.bi_size);
381 p = alloc_page(GFP_NOIO);
384 min_t(unsigned, output_available,
385 c->sb.encoded_extent_max << 9);
387 if (bio_sectors(bio) < pool_max)
388 bch2_bio_alloc_pages_pool(c, bio, pool_max);
392 bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) {
397 bio->bi_iter.bi_size += len;
400 *page_alloc_failed = bio->bi_vcnt < pages;
404 static int bch2_write_rechecksum(struct bch_fs *c,
405 struct bch_write_op *op,
406 unsigned new_csum_type)
408 struct bio *bio = &op->wbio.bio;
409 struct bch_extent_crc_unpacked new_crc;
412 /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
414 if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
415 bch2_csum_type_is_encryption(new_csum_type))
416 new_csum_type = op->crc.csum_type;
418 ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
420 op->crc.offset, op->crc.live_size,
425 bio_advance(bio, op->crc.offset << 9);
426 bio->bi_iter.bi_size = op->crc.live_size << 9;
431 static int bch2_write_decrypt(struct bch_write_op *op)
433 struct bch_fs *c = op->c;
434 struct nonce nonce = extent_nonce(op->version, op->crc);
435 struct bch_csum csum;
437 if (!bch2_csum_type_is_encryption(op->crc.csum_type))
441 * If we need to decrypt data in the write path, we'll no longer be able
442 * to verify the existing checksum (poly1305 mac, in this case) after
443 * it's decrypted - this is the last point we'll be able to reverify the
446 csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
447 if (bch2_crc_cmp(op->crc.csum, csum))
450 bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
451 op->crc.csum_type = 0;
452 op->crc.csum = (struct bch_csum) { 0, 0 };
456 static enum prep_encoded_ret {
459 PREP_ENCODED_CHECKSUM_ERR,
460 PREP_ENCODED_DO_WRITE,
461 } bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
463 struct bch_fs *c = op->c;
464 struct bio *bio = &op->wbio.bio;
466 if (!(op->flags & BCH_WRITE_DATA_ENCODED))
467 return PREP_ENCODED_OK;
469 BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
471 /* Can we just write the entire extent as is? */
472 if (op->crc.uncompressed_size == op->crc.live_size &&
473 op->crc.compressed_size <= wp->sectors_free &&
474 op->crc.compression_type == op->compression_type) {
475 if (!op->crc.compression_type &&
476 op->csum_type != op->crc.csum_type &&
477 bch2_write_rechecksum(c, op, op->csum_type))
478 return PREP_ENCODED_CHECKSUM_ERR;
480 return PREP_ENCODED_DO_WRITE;
484 * If the data is compressed and we couldn't write the entire extent as
485 * is, we have to decompress it:
487 if (op->crc.compression_type) {
488 struct bch_csum csum;
490 if (bch2_write_decrypt(op))
491 return PREP_ENCODED_CHECKSUM_ERR;
493 /* Last point we can still verify checksum: */
494 csum = bch2_checksum_bio(c, op->crc.csum_type,
495 extent_nonce(op->version, op->crc),
497 if (bch2_crc_cmp(op->crc.csum, csum))
498 return PREP_ENCODED_CHECKSUM_ERR;
500 if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
501 return PREP_ENCODED_ERR;
505 * No longer have compressed data after this point - data might be
510 * If the data is checksummed and we're only writing a subset,
511 * rechecksum and adjust bio to point to currently live data:
513 if ((op->crc.live_size != op->crc.uncompressed_size ||
514 op->crc.csum_type != op->csum_type) &&
515 bch2_write_rechecksum(c, op, op->csum_type))
516 return PREP_ENCODED_CHECKSUM_ERR;
519 * If we want to compress the data, it has to be decrypted:
521 if ((op->compression_type ||
522 bch2_csum_type_is_encryption(op->crc.csum_type) !=
523 bch2_csum_type_is_encryption(op->csum_type)) &&
524 bch2_write_decrypt(op))
525 return PREP_ENCODED_CHECKSUM_ERR;
527 return PREP_ENCODED_OK;
530 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp)
532 struct bch_fs *c = op->c;
533 struct bio *src = &op->wbio.bio, *dst = src;
534 struct bvec_iter saved_iter;
535 struct bkey_i *key_to_write;
536 unsigned key_to_write_offset = op->insert_keys.top_p -
537 op->insert_keys.keys_p;
538 unsigned total_output = 0;
539 bool bounce = false, page_alloc_failed = false;
542 BUG_ON(!bio_sectors(src));
544 switch (bch2_write_prep_encoded_data(op, wp)) {
545 case PREP_ENCODED_OK:
547 case PREP_ENCODED_ERR:
550 case PREP_ENCODED_CHECKSUM_ERR:
552 case PREP_ENCODED_DO_WRITE:
553 init_append_extent(op, wp, op->version, op->crc);
557 if (op->compression_type ||
559 !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
560 (bch2_csum_type_is_encryption(op->csum_type) &&
561 !(op->flags & BCH_WRITE_PAGES_OWNED))) {
562 dst = bch2_write_bio_alloc(c, wp, src, &page_alloc_failed);
566 saved_iter = dst->bi_iter;
569 struct bch_extent_crc_unpacked crc =
570 (struct bch_extent_crc_unpacked) { 0 };
571 struct bversion version = op->version;
572 size_t dst_len, src_len;
574 if (page_alloc_failed &&
575 bio_sectors(dst) < wp->sectors_free &&
576 bio_sectors(dst) < c->sb.encoded_extent_max)
579 BUG_ON(op->compression_type &&
580 (op->flags & BCH_WRITE_DATA_ENCODED) &&
581 bch2_csum_type_is_encryption(op->crc.csum_type));
582 BUG_ON(op->compression_type && !bounce);
584 crc.compression_type = op->compression_type
585 ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
586 op->compression_type)
588 if (!crc.compression_type) {
589 dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
590 dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
593 dst_len = min_t(unsigned, dst_len,
594 c->sb.encoded_extent_max << 9);
597 swap(dst->bi_iter.bi_size, dst_len);
598 bio_copy_data(dst, src);
599 swap(dst->bi_iter.bi_size, dst_len);
605 BUG_ON(!src_len || !dst_len);
607 if (bch2_csum_type_is_encryption(op->csum_type)) {
608 if (bversion_zero(version)) {
609 version.lo = atomic64_inc_return(&c->key_version) + 1;
611 crc.nonce = op->nonce;
612 op->nonce += src_len >> 9;
616 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
617 !crc.compression_type &&
618 bch2_csum_type_is_encryption(op->crc.csum_type) ==
619 bch2_csum_type_is_encryption(op->csum_type)) {
621 * Note: when we're using rechecksum(), we need to be
622 * checksumming @src because it has all the data our
623 * existing checksum covers - if we bounced (because we
624 * were trying to compress), @dst will only have the
625 * part of the data the new checksum will cover.
627 * But normally we want to be checksumming post bounce,
628 * because part of the reason for bouncing is so the
629 * data can't be modified (by userspace) while it's in
632 if (bch2_rechecksum_bio(c, src, version, op->crc,
635 bio_sectors(src) - (src_len >> 9),
639 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
640 bch2_rechecksum_bio(c, src, version, op->crc,
643 bio_sectors(src) - (src_len >> 9),
647 crc.compressed_size = dst_len >> 9;
648 crc.uncompressed_size = src_len >> 9;
649 crc.live_size = src_len >> 9;
651 swap(dst->bi_iter.bi_size, dst_len);
652 bch2_encrypt_bio(c, op->csum_type,
653 extent_nonce(version, crc), dst);
654 crc.csum = bch2_checksum_bio(c, op->csum_type,
655 extent_nonce(version, crc), dst);
656 crc.csum_type = op->csum_type;
657 swap(dst->bi_iter.bi_size, dst_len);
660 init_append_extent(op, wp, version, crc);
663 bio_advance(dst, dst_len);
664 bio_advance(src, src_len);
665 total_output += dst_len;
666 } while (dst->bi_iter.bi_size &&
667 src->bi_iter.bi_size &&
669 !bch2_keylist_realloc(&op->insert_keys,
671 ARRAY_SIZE(op->inline_keys),
672 BKEY_EXTENT_U64s_MAX));
674 more = src->bi_iter.bi_size != 0;
676 dst->bi_iter = saved_iter;
678 if (!bounce && more) {
679 dst = bio_split(src, total_output >> 9,
680 GFP_NOIO, &c->bio_write);
681 wbio_init(dst)->put_bio = true;
684 dst->bi_iter.bi_size = total_output;
686 /* Free unneeded pages after compressing: */
688 while (dst->bi_vcnt > DIV_ROUND_UP(dst->bi_iter.bi_size, PAGE_SIZE))
689 mempool_free(dst->bi_io_vec[--dst->bi_vcnt].bv_page,
690 &c->bio_bounce_pages);
692 /* might have done a realloc... */
694 key_to_write = (void *) (op->insert_keys.keys_p + key_to_write_offset);
696 dst->bi_end_io = bch2_write_endio;
697 dst->bi_private = &op->cl;
698 bio_set_op_attrs(dst, REQ_OP_WRITE, 0);
700 closure_get(dst->bi_private);
702 bch2_submit_wbio_replicas(to_wbio(dst), c, BCH_DATA_USER,
706 bch_err(c, "error verifying existing checksum while "
707 "rewriting existing data (memory corruption?)");
711 bch2_bio_free_pages_pool(c, dst);
718 static void __bch2_write(struct closure *cl)
720 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
721 struct bch_fs *c = op->c;
722 struct write_point *wp;
726 if (op->open_buckets_nr + op->nr_replicas >
727 ARRAY_SIZE(op->open_buckets))
728 continue_at(cl, bch2_write_index, index_update_wq(op));
730 /* for the device pointers and 1 for the chksum */
731 if (bch2_keylist_realloc(&op->insert_keys,
733 ARRAY_SIZE(op->inline_keys),
734 BKEY_EXTENT_U64s_MAX))
735 continue_at(cl, bch2_write_index, index_update_wq(op));
737 wp = bch2_alloc_sectors_start(c,
742 op->nr_replicas_required,
745 (op->flags & BCH_WRITE_ALLOC_NOWAIT) ? NULL : cl);
748 if (unlikely(IS_ERR(wp))) {
749 if (unlikely(PTR_ERR(wp) != -EAGAIN)) {
755 * If we already have some keys, must insert them first
756 * before allocating another open bucket. We only hit
757 * this case if open_bucket_nr > 1.
759 if (!bch2_keylist_empty(&op->insert_keys))
760 continue_at(cl, bch2_write_index,
761 index_update_wq(op));
764 * If we've looped, we're running out of a workqueue -
765 * not the bch2_write() caller's context - and we don't
766 * want to block the workqueue:
768 if (op->flags & BCH_WRITE_LOOPED)
769 continue_at(cl, __bch2_write, op->io_wq);
772 * Otherwise, we do want to block the caller on alloc
773 * failure instead of letting it queue up more and more
775 * XXX: this technically needs a try_to_freeze() -
776 * except that that's not safe because caller may have
777 * issued other IO... hmm..
783 ret = bch2_write_extent(op, wp);
785 BUG_ON(op->open_buckets_nr + wp->nr_ptrs_can_use >
786 ARRAY_SIZE(op->open_buckets));
787 bch2_open_bucket_get(c, wp,
788 &op->open_buckets_nr,
790 bch2_alloc_sectors_done(c, wp);
796 op->flags |= BCH_WRITE_DONE;
797 continue_at(cl, bch2_write_index, index_update_wq(op));
800 * Right now we can only error here if we went RO - the
801 * allocation failed, but we already checked for -ENOSPC when we
802 * got our reservation.
804 * XXX capacity might have changed, but we don't check for that
808 op->flags |= BCH_WRITE_DONE;
811 * No reason not to insert keys for whatever data was successfully
812 * written (especially for a cmpxchg operation that's moving data
815 continue_at(cl, !bch2_keylist_empty(&op->insert_keys)
817 : bch2_write_done, index_update_wq(op));
821 * bch_write - handle a write to a cache device or flash only volume
823 * This is the starting point for any data to end up in a cache device; it could
824 * be from a normal write, or a writeback write, or a write to a flash only
825 * volume - it's also used by the moving garbage collector to compact data in
826 * mostly empty buckets.
828 * It first writes the data to the cache, creating a list of keys to be inserted
829 * (if the data won't fit in a single open bucket, there will be multiple keys);
830 * after the data is written it calls bch_journal, and after the keys have been
831 * added to the next journal write they're inserted into the btree.
833 * If op->discard is true, instead of inserting the data it invalidates the
834 * region of the cache represented by op->bio and op->inode.
836 void bch2_write(struct closure *cl)
838 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
839 struct bch_fs *c = op->c;
841 BUG_ON(!op->nr_replicas);
842 BUG_ON(!op->write_point.v);
843 BUG_ON(!bkey_cmp(op->pos, POS_MAX));
844 BUG_ON(bio_sectors(&op->wbio.bio) > U16_MAX);
846 memset(&op->failed, 0, sizeof(op->failed));
848 bch2_keylist_init(&op->insert_keys, op->inline_keys);
849 wbio_init(&op->wbio.bio)->put_bio = false;
851 if (c->opts.nochanges ||
852 !percpu_ref_tryget(&c->writes)) {
853 __bcache_io_error(c, "read only");
855 if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
856 bch2_disk_reservation_put(c, &op->res);
860 bch2_increment_clock(c, bio_sectors(&op->wbio.bio), WRITE);
862 continue_at_nobarrier(cl, __bch2_write, NULL);
865 /* Cache promotion on read */
869 struct migrate_write write;
870 struct bio_vec bi_inline_vecs[0]; /* must be last */
873 static void promote_done(struct closure *cl)
875 struct promote_op *op =
876 container_of(cl, struct promote_op, cl);
877 struct bch_fs *c = op->write.op.c;
879 percpu_ref_put(&c->writes);
880 bch2_bio_free_pages_pool(c, &op->write.op.wbio.bio);
884 static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
886 struct bch_fs *c = rbio->c;
887 struct closure *cl = &op->cl;
888 struct bio *bio = &op->write.op.wbio.bio;
890 BUG_ON(!rbio->split || !rbio->bounce);
892 if (!percpu_ref_tryget(&c->writes))
895 trace_promote(&rbio->bio);
897 /* we now own pages: */
898 BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs);
899 swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
900 rbio->promote = NULL;
902 bch2_migrate_read_done(&op->write, rbio);
904 closure_init(cl, NULL);
905 closure_call(&op->write.op.cl, bch2_write, c->wq, cl);
906 closure_return_with_destructor(cl, promote_done);
910 * XXX: multiple promotes can race with each other, wastefully. Keep a list of
911 * outstanding promotes?
913 static struct promote_op *promote_alloc(struct bch_read_bio *rbio,
916 struct bch_fs *c = rbio->c;
917 struct promote_op *op;
919 /* data might have to be decompressed in the write path: */
920 unsigned pages = DIV_ROUND_UP(rbio->pick.crc.uncompressed_size,
924 BUG_ON(!rbio->bounce);
925 BUG_ON(pages < rbio->bio.bi_vcnt);
927 op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages,
932 bio = &op->write.op.wbio.bio;
933 bio_init(bio, bio->bi_inline_vecs, pages);
935 memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec,
936 sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
938 ret = bch2_migrate_write_init(c, &op->write, c->fastest_devs,
939 writepoint_hashed((unsigned long) current),
942 (struct data_opts) { 0 },
949 /* only promote if we're not reading from the fastest tier: */
950 static bool should_promote(struct bch_fs *c,
951 struct extent_pick_ptr *pick, unsigned flags)
953 if (!(flags & BCH_READ_MAY_PROMOTE))
956 if (percpu_ref_is_dying(&c->writes))
959 return c->fastest_tier &&
960 c->fastest_tier < c->tiers + pick->ca->mi.tier;
965 static void bch2_read_nodecode_retry(struct bch_fs *, struct bch_read_bio *,
966 struct bvec_iter, u64,
967 struct bch_devs_mask *, unsigned);
969 #define READ_RETRY_AVOID 1
975 RBIO_CONTEXT_HIGHPRI,
976 RBIO_CONTEXT_UNBOUND,
979 static inline struct bch_read_bio *
980 bch2_rbio_parent(struct bch_read_bio *rbio)
982 return rbio->split ? rbio->parent : rbio;
986 static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn,
987 enum rbio_context context,
988 struct workqueue_struct *wq)
990 if (context <= rbio->context) {
993 rbio->work.func = fn;
994 rbio->context = context;
995 queue_work(wq, &rbio->work);
999 static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
1001 struct bch_read_bio *parent = rbio->parent;
1003 BUG_ON(!rbio->split);
1006 kfree(rbio->promote);
1008 bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
1009 bio_put(&rbio->bio);
1014 static void bch2_rbio_done(struct bch_read_bio *rbio)
1017 kfree(rbio->promote);
1018 rbio->promote = NULL;
1021 rbio = bch2_rbio_free(rbio);
1022 bio_endio(&rbio->bio);
1025 static void bch2_rbio_retry(struct work_struct *work)
1027 struct bch_read_bio *rbio =
1028 container_of(work, struct bch_read_bio, work);
1029 struct bch_fs *c = rbio->c;
1030 struct bvec_iter iter = rbio->bvec_iter;
1031 unsigned flags = rbio->flags;
1032 u64 inode = rbio->pos.inode;
1033 struct bch_devs_mask avoid;
1035 trace_read_retry(&rbio->bio);
1037 memset(&avoid, 0, sizeof(avoid));
1039 if (rbio->retry == READ_RETRY_AVOID)
1040 __set_bit(rbio->pick.ca->dev_idx, avoid.d);
1043 kfree(rbio->promote);
1044 rbio->promote = NULL;
1047 rbio = bch2_rbio_free(rbio);
1049 rbio->bio.bi_status = 0;
1051 if (!(flags & BCH_READ_NODECODE))
1052 flags |= BCH_READ_MUST_CLONE;
1053 flags |= BCH_READ_IN_RETRY;
1054 flags &= ~BCH_READ_MAY_PROMOTE;
1056 if (flags & BCH_READ_NODECODE)
1057 bch2_read_nodecode_retry(c, rbio, iter, inode, &avoid, flags);
1059 __bch2_read(c, rbio, iter, inode, &avoid, flags);
1062 static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
1065 rbio->retry = retry;
1067 if (rbio->flags & BCH_READ_IN_RETRY)
1070 if (retry == READ_ERR) {
1071 bch2_rbio_parent(rbio)->bio.bi_status = error;
1072 bch2_rbio_done(rbio);
1074 bch2_rbio_punt(rbio, bch2_rbio_retry,
1075 RBIO_CONTEXT_UNBOUND, system_unbound_wq);
1079 static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
1081 struct bch_fs *c = rbio->c;
1082 struct btree_iter iter;
1084 struct bkey_i_extent *e;
1086 struct bch_extent_crc_unpacked new_crc;
1090 if (rbio->pick.crc.compression_type)
1093 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, rbio->pos,
1096 k = bch2_btree_iter_peek(&iter);
1097 if (IS_ERR_OR_NULL(k.k))
1100 if (!bkey_extent_is_data(k.k))
1103 bkey_reassemble(&new.k, k);
1104 e = bkey_i_to_extent(&new.k);
1106 if (!bch2_extent_matches_ptr(c, extent_i_to_s_c(e),
1109 rbio->pick.crc.offset) ||
1110 bversion_cmp(e->k.version, rbio->version))
1113 /* Extent was merged? */
1114 if (bkey_start_offset(&e->k) < rbio->pos.offset ||
1115 e->k.p.offset > rbio->pos.offset + rbio->pick.crc.uncompressed_size)
1118 /* The extent might have been partially overwritten since we read it: */
1119 offset = rbio->pick.crc.offset + (bkey_start_offset(&e->k) - rbio->pos.offset);
1121 if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version,
1122 rbio->pick.crc, NULL, &new_crc,
1124 rbio->pick.crc.csum_type)) {
1125 bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)");
1129 if (!bch2_extent_narrow_crcs(e, new_crc))
1132 ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
1133 BTREE_INSERT_ATOMIC|
1134 BTREE_INSERT_NOFAIL|
1135 BTREE_INSERT_NOWAIT,
1136 BTREE_INSERT_ENTRY(&iter, &e->k_i));
1140 bch2_btree_iter_unlock(&iter);
1143 static bool should_narrow_crcs(struct bkey_s_c_extent e,
1144 struct extent_pick_ptr *pick,
1147 return !(flags & BCH_READ_IN_RETRY) &&
1148 bch2_can_narrow_extent_crcs(e, pick->crc);
1151 /* Inner part that may run in process context */
1152 static void __bch2_read_endio(struct work_struct *work)
1154 struct bch_read_bio *rbio =
1155 container_of(work, struct bch_read_bio, work);
1156 struct bch_fs *c = rbio->c;
1157 struct bio *src = &rbio->bio, *dst = &bch2_rbio_parent(rbio)->bio;
1158 struct bvec_iter dst_iter = rbio->bvec_iter;
1159 struct bch_extent_crc_unpacked crc = rbio->pick.crc;
1160 struct nonce nonce = extent_nonce(rbio->version, crc);
1161 struct bch_csum csum;
1163 /* Reset iterator for checksumming and copying bounced data: */
1165 src->bi_iter.bi_size = crc.compressed_size << 9;
1166 src->bi_iter.bi_idx = 0;
1167 src->bi_iter.bi_bvec_done = 0;
1169 src->bi_iter = rbio->bvec_iter;
1172 csum = bch2_checksum_bio(c, crc.csum_type, nonce, src);
1173 if (bch2_crc_cmp(csum, rbio->pick.crc.csum))
1176 if (unlikely(rbio->narrow_crcs))
1177 bch2_rbio_narrow_crcs(rbio);
1179 if (rbio->flags & BCH_READ_NODECODE)
1182 /* Adjust crc to point to subset of data we want: */
1183 crc.offset += rbio->bvec_iter.bi_sector - rbio->pos.offset;
1184 crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
1186 if (crc.compression_type != BCH_COMPRESSION_NONE) {
1187 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1188 if (bch2_bio_uncompress(c, src, dst, dst_iter, crc))
1189 goto decompression_err;
1191 /* don't need to decrypt the entire bio: */
1192 nonce = nonce_add(nonce, crc.offset << 9);
1193 bio_advance(src, crc.offset << 9);
1195 BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
1196 src->bi_iter.bi_size = dst_iter.bi_size;
1198 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1201 struct bvec_iter src_iter = src->bi_iter;
1202 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1206 if (rbio->promote) {
1208 * Re encrypt data we decrypted, so it's consistent with
1211 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1212 promote_start(rbio->promote, rbio);
1215 if (likely(!(rbio->flags & BCH_READ_IN_RETRY)))
1216 bch2_rbio_done(rbio);
1220 * Checksum error: if the bio wasn't bounced, we may have been
1221 * reading into buffers owned by userspace (that userspace can
1222 * scribble over) - retry the read, bouncing it this time:
1224 if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
1225 rbio->flags |= BCH_READ_MUST_BOUNCE;
1226 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
1230 bch2_dev_io_error(rbio->pick.ca,
1231 "data checksum error, inode %llu offset %llu: expected %0llx%0llx got %0llx%0llx (type %u)",
1232 rbio->pos.inode, (u64) rbio->bvec_iter.bi_sector,
1233 rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
1234 csum.hi, csum.lo, crc.csum_type);
1235 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1238 __bcache_io_error(c, "decompression error, inode %llu offset %llu",
1240 (u64) rbio->bvec_iter.bi_sector);
1241 bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
1245 static void bch2_read_endio(struct bio *bio)
1247 struct bch_read_bio *rbio =
1248 container_of(bio, struct bch_read_bio, bio);
1249 struct bch_fs *c = rbio->c;
1250 struct workqueue_struct *wq = NULL;
1251 enum rbio_context context = RBIO_CONTEXT_NULL;
1253 bch2_latency_acct(rbio->pick.ca, rbio->submit_time_us, READ);
1255 percpu_ref_put(&rbio->pick.ca->io_ref);
1258 rbio->bio.bi_end_io = rbio->end_io;
1260 if (bch2_dev_io_err_on(bio->bi_status, rbio->pick.ca, "data read")) {
1261 bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
1265 if (rbio->pick.ptr.cached &&
1266 (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
1267 ptr_stale(rbio->pick.ca, &rbio->pick.ptr))) {
1268 atomic_long_inc(&c->read_realloc_races);
1270 if (rbio->flags & BCH_READ_RETRY_IF_STALE)
1271 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
1273 bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN);
1277 if (rbio->narrow_crcs ||
1278 rbio->pick.crc.compression_type ||
1279 bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
1280 context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
1281 else if (rbio->pick.crc.csum_type)
1282 context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq;
1284 bch2_rbio_punt(rbio, __bch2_read_endio, context, wq);
1287 int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
1288 struct bvec_iter iter, struct bkey_s_c_extent e,
1289 struct extent_pick_ptr *pick, unsigned flags)
1291 struct bch_read_bio *rbio;
1292 bool split = false, bounce = false, read_full = false;
1293 bool promote = false, narrow_crcs = false;
1294 struct bpos pos = bkey_start_pos(e.k);
1297 lg_local_lock(&c->usage_lock);
1298 bucket_io_clock_reset(c, pick->ca,
1299 PTR_BUCKET_NR(pick->ca, &pick->ptr), READ);
1300 lg_local_unlock(&c->usage_lock);
1302 narrow_crcs = should_narrow_crcs(e, pick, flags);
1304 if (flags & BCH_READ_NODECODE) {
1305 BUG_ON(iter.bi_size < pick->crc.compressed_size << 9);
1306 iter.bi_size = pick->crc.compressed_size << 9;
1310 if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
1311 flags |= BCH_READ_MUST_BOUNCE;
1313 EBUG_ON(bkey_start_offset(e.k) > iter.bi_sector ||
1314 e.k->p.offset < bvec_iter_end_sector(iter));
1316 if (pick->crc.compression_type != BCH_COMPRESSION_NONE ||
1317 (pick->crc.csum_type != BCH_CSUM_NONE &&
1318 (bvec_iter_sectors(iter) != pick->crc.uncompressed_size ||
1319 (bch2_csum_type_is_encryption(pick->crc.csum_type) &&
1320 (flags & BCH_READ_USER_MAPPED)) ||
1321 (flags & BCH_READ_MUST_BOUNCE)))) {
1326 promote = should_promote(c, pick, flags);
1327 /* could also set read_full */
1332 EBUG_ON(pick->crc.compression_type);
1333 EBUG_ON(pick->crc.csum_type &&
1334 (bvec_iter_sectors(iter) != pick->crc.uncompressed_size ||
1335 bvec_iter_sectors(iter) != pick->crc.live_size ||
1337 iter.bi_sector != pos.offset));
1339 pick->ptr.offset += pick->crc.offset +
1340 (iter.bi_sector - pos.offset);
1341 pick->crc.compressed_size = bvec_iter_sectors(iter);
1342 pick->crc.uncompressed_size = bvec_iter_sectors(iter);
1343 pick->crc.offset = 0;
1344 pick->crc.live_size = bvec_iter_sectors(iter);
1345 pos.offset = iter.bi_sector;
1349 unsigned sectors = pick->crc.compressed_size;
1351 rbio = rbio_init(bio_alloc_bioset(GFP_NOIO,
1352 DIV_ROUND_UP(sectors, PAGE_SECTORS),
1353 &c->bio_read_split),
1356 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
1358 } else if (flags & BCH_READ_MUST_CLONE) {
1360 * Have to clone if there were any splits, due to error
1361 * reporting issues (if a split errored, and retrying didn't
1362 * work, when it reports the error to its parent (us) we don't
1363 * know if the error was from our bio, and we should retry, or
1364 * from the whole bio, in which case we don't want to retry and
1367 rbio = rbio_init(bio_clone_fast(&orig->bio, GFP_NOIO,
1368 &c->bio_read_split),
1370 rbio->bio.bi_iter = iter;
1375 rbio->bio.bi_iter = iter;
1377 BUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
1380 BUG_ON(bio_sectors(&rbio->bio) != pick->crc.compressed_size);
1384 rbio->parent = orig;
1386 rbio->end_io = orig->bio.bi_end_io;
1387 rbio->bvec_iter = iter;
1388 rbio->submit_time_us = local_clock_us();
1389 rbio->flags = flags;
1390 rbio->bounce = bounce;
1391 rbio->split = split;
1392 rbio->narrow_crcs = narrow_crcs;
1395 rbio->devs_have = bch2_extent_devs(e);
1398 rbio->version = e.k->version;
1399 rbio->promote = promote ? promote_alloc(rbio, e.s_c) : NULL;
1400 INIT_WORK(&rbio->work, NULL);
1402 bio_set_dev(&rbio->bio, pick->ca->disk_sb.bdev);
1403 rbio->bio.bi_opf = orig->bio.bi_opf;
1404 rbio->bio.bi_iter.bi_sector = pick->ptr.offset;
1405 rbio->bio.bi_end_io = bch2_read_endio;
1408 trace_read_bounce(&rbio->bio);
1410 bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
1411 this_cpu_add(pick->ca->io_done->sectors[READ][BCH_DATA_USER],
1412 bio_sectors(&rbio->bio));
1414 if (likely(!(flags & BCH_READ_IN_RETRY))) {
1415 submit_bio(&rbio->bio);
1417 submit_bio_wait(&rbio->bio);
1419 rbio->context = RBIO_CONTEXT_UNBOUND;
1420 bch2_read_endio(&rbio->bio);
1424 rbio = bch2_rbio_free(rbio);
1426 bch2_rbio_done(rbio);
1432 static void bch2_read_nodecode_retry(struct bch_fs *c, struct bch_read_bio *rbio,
1433 struct bvec_iter bvec_iter, u64 inode,
1434 struct bch_devs_mask *avoid, unsigned flags)
1436 struct extent_pick_ptr pick;
1437 struct btree_iter iter;
1442 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
1443 POS(inode, bvec_iter.bi_sector),
1446 k = bch2_btree_iter_peek_slot(&iter);
1447 if (btree_iter_err(k)) {
1448 bch2_btree_iter_unlock(&iter);
1452 bkey_reassemble(&tmp.k, k);
1453 k = bkey_i_to_s_c(&tmp.k);
1454 bch2_btree_iter_unlock(&iter);
1456 if (!bkey_extent_is_data(k.k) ||
1457 !bch2_extent_matches_ptr(c, bkey_i_to_s_c_extent(&tmp.k),
1460 rbio->pick.crc.offset) ||
1461 bkey_start_offset(k.k) != bvec_iter.bi_sector)
1464 bch2_extent_pick_ptr(c, k, avoid, &pick);
1465 if (IS_ERR(pick.ca)) {
1466 bcache_io_error(c, &rbio->bio, "no device to read from");
1467 bio_endio(&rbio->bio);
1474 if (pick.crc.compressed_size > bvec_iter_sectors(bvec_iter)) {
1475 percpu_ref_put(&pick.ca->io_ref);
1480 ret = __bch2_read_extent(c, rbio, bvec_iter, bkey_s_c_to_extent(k),
1483 case READ_RETRY_AVOID:
1484 __set_bit(pick.ca->dev_idx, avoid->d);
1488 bio_endio(&rbio->bio);
1495 * extent we wanted to read no longer exists, or
1496 * was merged or partially overwritten (and thus
1497 * possibly bigger than the memory that was
1498 * originally allocated)
1500 rbio->bio.bi_status = BLK_STS_AGAIN;
1501 bio_endio(&rbio->bio);
1505 void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
1506 struct bvec_iter bvec_iter, u64 inode,
1507 struct bch_devs_mask *avoid, unsigned flags)
1509 struct btree_iter iter;
1513 EBUG_ON(flags & BCH_READ_NODECODE);
1515 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
1516 POS(inode, bvec_iter.bi_sector),
1517 BTREE_ITER_SLOTS, k) {
1519 struct extent_pick_ptr pick;
1520 struct bvec_iter fragment;
1523 * Unlock the iterator while the btree node's lock is still in
1524 * cache, before doing the IO:
1526 bkey_reassemble(&tmp.k, k);
1527 k = bkey_i_to_s_c(&tmp.k);
1528 bch2_btree_iter_unlock(&iter);
1530 bch2_extent_pick_ptr(c, k, avoid, &pick);
1531 if (IS_ERR(pick.ca)) {
1532 bcache_io_error(c, &rbio->bio, "no device to read from");
1533 bio_endio(&rbio->bio);
1537 fragment = bvec_iter;
1538 fragment.bi_size = (min_t(u64, k.k->p.offset,
1539 bvec_iter_end_sector(bvec_iter)) -
1540 bvec_iter.bi_sector) << 9;
1543 if (fragment.bi_size != bvec_iter.bi_size) {
1544 bio_inc_remaining(&rbio->bio);
1545 flags |= BCH_READ_MUST_CLONE;
1546 trace_read_split(&rbio->bio);
1549 ret = __bch2_read_extent(c, rbio, fragment,
1550 bkey_s_c_to_extent(k),
1553 case READ_RETRY_AVOID:
1554 __set_bit(pick.ca->dev_idx, avoid->d);
1558 rbio->bio.bi_status = BLK_STS_IOERR;
1559 bio_endio(&rbio->bio);
1563 zero_fill_bio_iter(&rbio->bio, fragment);
1565 if (fragment.bi_size == bvec_iter.bi_size)
1566 bio_endio(&rbio->bio);
1569 if (fragment.bi_size == bvec_iter.bi_size)
1572 bio_advance_iter(&rbio->bio, &bvec_iter, fragment.bi_size);
1576 * If we get here, it better have been because there was an error
1577 * reading a btree node
1579 ret = bch2_btree_iter_unlock(&iter);
1581 bcache_io_error(c, &rbio->bio, "btree IO error %i", ret);
1582 bio_endio(&rbio->bio);