2 * Some low level IO code, and hacks for various block layer limitations
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
11 #include "btree_update.h"
26 #include <linux/blkdev.h>
27 #include <linux/random.h>
29 #include <trace/events/bcachefs.h>
31 /* Allocate, free from mempool: */
33 void bch2_latency_acct(struct bch_dev *ca, unsigned submit_time_us, int rw)
35 u64 now = local_clock();
36 unsigned io_latency = (now >> 10) - submit_time_us;
37 atomic_t *latency = &ca->latency[rw];
38 unsigned old, new, v = atomic_read(latency);
44 * If the io latency was reasonably close to the current
45 * latency, skip doing the update and atomic operation - most of
48 if (abs((int) (old - io_latency)) < (old >> 1) &&
52 new = ewma_add((u64) old, io_latency, 6);
53 } while ((v = atomic_cmpxchg(latency, old, new)) != old);
56 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
61 bio_for_each_segment_all(bv, bio, i)
62 if (bv->bv_page != ZERO_PAGE(0))
63 mempool_free(bv->bv_page, &c->bio_bounce_pages);
67 static void bch2_bio_alloc_page_pool(struct bch_fs *c, struct bio *bio,
70 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt++];
72 if (likely(!*using_mempool)) {
73 bv->bv_page = alloc_page(GFP_NOIO);
74 if (unlikely(!bv->bv_page)) {
75 mutex_lock(&c->bio_bounce_pages_lock);
76 *using_mempool = true;
82 bv->bv_page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
85 bv->bv_len = PAGE_SIZE;
89 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
92 bool using_mempool = false;
94 BUG_ON(DIV_ROUND_UP(bytes, PAGE_SIZE) > bio->bi_max_vecs);
96 bio->bi_iter.bi_size = bytes;
98 while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE))
99 bch2_bio_alloc_page_pool(c, bio, &using_mempool);
102 mutex_unlock(&c->bio_bounce_pages_lock);
105 void bch2_bio_alloc_more_pages_pool(struct bch_fs *c, struct bio *bio,
108 while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE)) {
109 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
111 BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
113 bv->bv_page = alloc_page(GFP_NOIO);
116 * We already allocated from mempool, we can't allocate from it again
117 * without freeing the pages we already allocated or else we could
120 bch2_bio_free_pages_pool(c, bio);
121 bch2_bio_alloc_pages_pool(c, bio, bytes);
125 bv->bv_len = PAGE_SIZE;
130 bio->bi_iter.bi_size = bytes;
135 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
136 enum bch_data_type type,
137 const struct bkey_i *k)
139 struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k);
140 const struct bch_extent_ptr *ptr;
141 struct bch_write_bio *n;
144 BUG_ON(c->opts.nochanges);
146 extent_for_each_ptr(e, ptr) {
147 BUG_ON(ptr->dev >= BCH_SB_MEMBERS_MAX ||
150 ca = bch_dev_bkey_exists(c, ptr->dev);
152 if (ptr + 1 < &extent_entry_last(e)->ptr) {
153 n = to_wbio(bio_clone_fast(&wbio->bio, GFP_NOIO,
156 n->bio.bi_end_io = wbio->bio.bi_end_io;
157 n->bio.bi_private = wbio->bio.bi_private;
162 n->bio.bi_opf = wbio->bio.bi_opf;
163 bio_inc_remaining(&wbio->bio);
171 n->submit_time_us = local_clock_us();
172 n->bio.bi_iter.bi_sector = ptr->offset;
174 if (!journal_flushes_device(ca))
175 n->bio.bi_opf |= REQ_FUA;
177 if (likely(percpu_ref_tryget(&ca->io_ref))) {
178 this_cpu_add(ca->io_done->sectors[WRITE][type],
179 bio_sectors(&n->bio));
181 n->have_io_ref = true;
182 bio_set_dev(&n->bio, ca->disk_sb.bdev);
185 n->have_io_ref = false;
186 n->bio.bi_status = BLK_STS_REMOVED;
192 static void __bch2_write(struct closure *);
194 static void bch2_write_done(struct closure *cl)
196 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
198 BUG_ON(!(op->flags & BCH_WRITE_DONE));
200 if (!op->error && (op->flags & BCH_WRITE_FLUSH))
201 op->error = bch2_journal_error(&op->c->journal);
203 if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
204 bch2_disk_reservation_put(op->c, &op->res);
205 percpu_ref_put(&op->c->writes);
206 bch2_keylist_free(&op->insert_keys, op->inline_keys);
207 op->flags &= ~(BCH_WRITE_DONE|BCH_WRITE_LOOPED);
212 static u64 keylist_sectors(struct keylist *keys)
217 for_each_keylist_key(keys, k)
223 int bch2_write_index_default(struct bch_write_op *op)
225 struct keylist *keys = &op->insert_keys;
226 struct btree_iter iter;
229 bch2_btree_iter_init(&iter, op->c, BTREE_ID_EXTENTS,
230 bkey_start_pos(&bch2_keylist_front(keys)->k),
233 ret = bch2_btree_insert_list_at(&iter, keys, &op->res,
234 NULL, op_journal_seq(op),
235 BTREE_INSERT_NOFAIL);
236 bch2_btree_iter_unlock(&iter);
242 * bch_write_index - after a write, update index to point to new data
244 static void bch2_write_index(struct closure *cl)
246 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
247 struct bch_fs *c = op->c;
248 struct keylist *keys = &op->insert_keys;
249 struct bkey_s_extent e;
250 struct bch_extent_ptr *ptr;
251 struct bkey_i *src, *dst = keys->keys, *n;
254 op->flags |= BCH_WRITE_LOOPED;
256 for (src = keys->keys; src != keys->top; src = n) {
260 e = bkey_i_to_s_extent(dst);
261 extent_for_each_ptr_backwards(e, ptr)
262 if (test_bit(ptr->dev, op->failed.d))
263 bch2_extent_drop_ptr(e, ptr);
265 if (!bch2_extent_nr_ptrs(e.c)) {
270 if (!(op->flags & BCH_WRITE_NOMARK_REPLICAS)) {
271 ret = bch2_check_mark_super(c, BCH_DATA_USER,
272 bch2_extent_devs(e.c));
277 dst = bkey_next(dst);
282 if (!bch2_keylist_empty(keys)) {
283 u64 sectors_start = keylist_sectors(keys);
284 int ret = op->index_update_fn(op);
286 BUG_ON(keylist_sectors(keys) && !ret);
288 op->written += sectors_start - keylist_sectors(keys);
291 __bcache_io_error(c, "btree IO error %i", ret);
296 bch2_open_bucket_put_refs(c, &op->open_buckets_nr, op->open_buckets);
298 if (!(op->flags & BCH_WRITE_DONE))
299 continue_at(cl, __bch2_write, op->io_wq);
301 if (!op->error && (op->flags & BCH_WRITE_FLUSH)) {
302 bch2_journal_flush_seq_async(&c->journal,
305 continue_at(cl, bch2_write_done, index_update_wq(op));
307 continue_at_nobarrier(cl, bch2_write_done, NULL);
311 keys->top = keys->keys;
313 op->flags |= BCH_WRITE_DONE;
317 static void bch2_write_endio(struct bio *bio)
319 struct closure *cl = bio->bi_private;
320 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
321 struct bch_write_bio *wbio = to_wbio(bio);
322 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
323 struct bch_fs *c = wbio->c;
324 struct bch_dev *ca = wbio->ca;
326 bch2_latency_acct(ca, wbio->submit_time_us, WRITE);
328 if (bch2_dev_io_err_on(bio->bi_status, ca, "data write"))
329 set_bit(ca->dev_idx, op->failed.d);
331 if (wbio->have_io_ref)
332 percpu_ref_put(&ca->io_ref);
335 bch2_bio_free_pages_pool(c, bio);
341 bio_endio(&parent->bio);
346 static void init_append_extent(struct bch_write_op *op,
347 struct write_point *wp,
348 struct bversion version,
349 struct bch_extent_crc_unpacked crc)
351 struct bkey_i_extent *e = bkey_extent_init(op->insert_keys.top);
353 op->pos.offset += crc.uncompressed_size;
355 e->k.size = crc.uncompressed_size;
356 e->k.version = version;
357 bkey_extent_set_cached(&e->k, op->flags & BCH_WRITE_CACHED);
359 bch2_extent_crc_append(e, crc);
360 bch2_alloc_sectors_append_ptrs(op->c, wp, e, crc.compressed_size);
362 bch2_keylist_push(&op->insert_keys);
365 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
366 struct write_point *wp,
368 bool *page_alloc_failed)
370 struct bch_write_bio *wbio;
372 unsigned output_available =
373 min(wp->sectors_free << 9, src->bi_iter.bi_size);
374 unsigned pages = DIV_ROUND_UP(output_available, PAGE_SIZE);
376 bio = bio_alloc_bioset(GFP_NOIO, pages, &c->bio_write);
377 wbio = wbio_init(bio);
379 wbio->put_bio = true;
380 /* copy WRITE_SYNC flag */
381 wbio->bio.bi_opf = src->bi_opf;
384 * We can't use mempool for more than c->sb.encoded_extent_max
385 * worth of pages, but we'd like to allocate more if we can:
387 while (bio->bi_iter.bi_size < output_available) {
388 unsigned len = min_t(unsigned, PAGE_SIZE,
389 output_available - bio->bi_iter.bi_size);
392 p = alloc_page(GFP_NOIO);
395 min_t(unsigned, output_available,
396 c->sb.encoded_extent_max << 9);
398 if (bio_sectors(bio) < pool_max)
399 bch2_bio_alloc_pages_pool(c, bio, pool_max);
403 bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) {
408 bio->bi_iter.bi_size += len;
411 *page_alloc_failed = bio->bi_vcnt < pages;
415 static int bch2_write_rechecksum(struct bch_fs *c,
416 struct bch_write_op *op,
417 unsigned new_csum_type)
419 struct bio *bio = &op->wbio.bio;
420 struct bch_extent_crc_unpacked new_crc;
423 /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
425 if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
426 bch2_csum_type_is_encryption(new_csum_type))
427 new_csum_type = op->crc.csum_type;
429 ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
431 op->crc.offset, op->crc.live_size,
436 bio_advance(bio, op->crc.offset << 9);
437 bio->bi_iter.bi_size = op->crc.live_size << 9;
442 static int bch2_write_decrypt(struct bch_write_op *op)
444 struct bch_fs *c = op->c;
445 struct nonce nonce = extent_nonce(op->version, op->crc);
446 struct bch_csum csum;
448 if (!bch2_csum_type_is_encryption(op->crc.csum_type))
452 * If we need to decrypt data in the write path, we'll no longer be able
453 * to verify the existing checksum (poly1305 mac, in this case) after
454 * it's decrypted - this is the last point we'll be able to reverify the
457 csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
458 if (bch2_crc_cmp(op->crc.csum, csum))
461 bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
462 op->crc.csum_type = 0;
463 op->crc.csum = (struct bch_csum) { 0, 0 };
467 static enum prep_encoded_ret {
470 PREP_ENCODED_CHECKSUM_ERR,
471 PREP_ENCODED_DO_WRITE,
472 } bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
474 struct bch_fs *c = op->c;
475 struct bio *bio = &op->wbio.bio;
477 if (!(op->flags & BCH_WRITE_DATA_ENCODED))
478 return PREP_ENCODED_OK;
480 BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
482 /* Can we just write the entire extent as is? */
483 if (op->crc.uncompressed_size == op->crc.live_size &&
484 op->crc.compressed_size <= wp->sectors_free &&
485 op->crc.compression_type == op->compression_type) {
486 if (!op->crc.compression_type &&
487 op->csum_type != op->crc.csum_type &&
488 bch2_write_rechecksum(c, op, op->csum_type))
489 return PREP_ENCODED_CHECKSUM_ERR;
491 return PREP_ENCODED_DO_WRITE;
495 * If the data is compressed and we couldn't write the entire extent as
496 * is, we have to decompress it:
498 if (op->crc.compression_type) {
499 struct bch_csum csum;
501 if (bch2_write_decrypt(op))
502 return PREP_ENCODED_CHECKSUM_ERR;
504 /* Last point we can still verify checksum: */
505 csum = bch2_checksum_bio(c, op->crc.csum_type,
506 extent_nonce(op->version, op->crc),
508 if (bch2_crc_cmp(op->crc.csum, csum))
509 return PREP_ENCODED_CHECKSUM_ERR;
511 if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
512 return PREP_ENCODED_ERR;
516 * No longer have compressed data after this point - data might be
521 * If the data is checksummed and we're only writing a subset,
522 * rechecksum and adjust bio to point to currently live data:
524 if ((op->crc.live_size != op->crc.uncompressed_size ||
525 op->crc.csum_type != op->csum_type) &&
526 bch2_write_rechecksum(c, op, op->csum_type))
527 return PREP_ENCODED_CHECKSUM_ERR;
530 * If we want to compress the data, it has to be decrypted:
532 if ((op->compression_type ||
533 bch2_csum_type_is_encryption(op->crc.csum_type) !=
534 bch2_csum_type_is_encryption(op->csum_type)) &&
535 bch2_write_decrypt(op))
536 return PREP_ENCODED_CHECKSUM_ERR;
538 return PREP_ENCODED_OK;
541 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp)
543 struct bch_fs *c = op->c;
544 struct bio *src = &op->wbio.bio, *dst = src;
545 struct bvec_iter saved_iter;
546 struct bkey_i *key_to_write;
547 unsigned key_to_write_offset = op->insert_keys.top_p -
548 op->insert_keys.keys_p;
549 unsigned total_output = 0;
550 bool bounce = false, page_alloc_failed = false;
553 BUG_ON(!bio_sectors(src));
555 switch (bch2_write_prep_encoded_data(op, wp)) {
556 case PREP_ENCODED_OK:
558 case PREP_ENCODED_ERR:
561 case PREP_ENCODED_CHECKSUM_ERR:
563 case PREP_ENCODED_DO_WRITE:
564 init_append_extent(op, wp, op->version, op->crc);
568 if (op->compression_type ||
570 !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
571 (bch2_csum_type_is_encryption(op->csum_type) &&
572 !(op->flags & BCH_WRITE_PAGES_OWNED))) {
573 dst = bch2_write_bio_alloc(c, wp, src, &page_alloc_failed);
577 saved_iter = dst->bi_iter;
580 struct bch_extent_crc_unpacked crc =
581 (struct bch_extent_crc_unpacked) { 0 };
582 struct bversion version = op->version;
583 size_t dst_len, src_len;
585 if (page_alloc_failed &&
586 bio_sectors(dst) < wp->sectors_free &&
587 bio_sectors(dst) < c->sb.encoded_extent_max)
590 BUG_ON(op->compression_type &&
591 (op->flags & BCH_WRITE_DATA_ENCODED) &&
592 bch2_csum_type_is_encryption(op->crc.csum_type));
593 BUG_ON(op->compression_type && !bounce);
595 crc.compression_type = op->compression_type
596 ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
597 op->compression_type)
599 if (!crc.compression_type) {
600 dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
601 dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
604 dst_len = min_t(unsigned, dst_len,
605 c->sb.encoded_extent_max << 9);
608 swap(dst->bi_iter.bi_size, dst_len);
609 bio_copy_data(dst, src);
610 swap(dst->bi_iter.bi_size, dst_len);
616 BUG_ON(!src_len || !dst_len);
618 if (bch2_csum_type_is_encryption(op->csum_type)) {
619 if (bversion_zero(version)) {
620 version.lo = atomic64_inc_return(&c->key_version) + 1;
622 crc.nonce = op->nonce;
623 op->nonce += src_len >> 9;
627 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
628 !crc.compression_type &&
629 bch2_csum_type_is_encryption(op->crc.csum_type) ==
630 bch2_csum_type_is_encryption(op->csum_type)) {
632 * Note: when we're using rechecksum(), we need to be
633 * checksumming @src because it has all the data our
634 * existing checksum covers - if we bounced (because we
635 * were trying to compress), @dst will only have the
636 * part of the data the new checksum will cover.
638 * But normally we want to be checksumming post bounce,
639 * because part of the reason for bouncing is so the
640 * data can't be modified (by userspace) while it's in
643 if (bch2_rechecksum_bio(c, src, version, op->crc,
646 bio_sectors(src) - (src_len >> 9),
650 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
651 bch2_rechecksum_bio(c, src, version, op->crc,
654 bio_sectors(src) - (src_len >> 9),
658 crc.compressed_size = dst_len >> 9;
659 crc.uncompressed_size = src_len >> 9;
660 crc.live_size = src_len >> 9;
662 swap(dst->bi_iter.bi_size, dst_len);
663 bch2_encrypt_bio(c, op->csum_type,
664 extent_nonce(version, crc), dst);
665 crc.csum = bch2_checksum_bio(c, op->csum_type,
666 extent_nonce(version, crc), dst);
667 crc.csum_type = op->csum_type;
668 swap(dst->bi_iter.bi_size, dst_len);
671 init_append_extent(op, wp, version, crc);
674 bio_advance(dst, dst_len);
675 bio_advance(src, src_len);
676 total_output += dst_len;
677 } while (dst->bi_iter.bi_size &&
678 src->bi_iter.bi_size &&
680 !bch2_keylist_realloc(&op->insert_keys,
682 ARRAY_SIZE(op->inline_keys),
683 BKEY_EXTENT_U64s_MAX));
685 more = src->bi_iter.bi_size != 0;
687 dst->bi_iter = saved_iter;
689 if (!bounce && more) {
690 dst = bio_split(src, total_output >> 9,
691 GFP_NOIO, &c->bio_write);
692 wbio_init(dst)->put_bio = true;
695 dst->bi_iter.bi_size = total_output;
697 /* Free unneeded pages after compressing: */
699 while (dst->bi_vcnt > DIV_ROUND_UP(dst->bi_iter.bi_size, PAGE_SIZE))
700 mempool_free(dst->bi_io_vec[--dst->bi_vcnt].bv_page,
701 &c->bio_bounce_pages);
703 /* might have done a realloc... */
705 key_to_write = (void *) (op->insert_keys.keys_p + key_to_write_offset);
707 dst->bi_end_io = bch2_write_endio;
708 dst->bi_private = &op->cl;
709 bio_set_op_attrs(dst, REQ_OP_WRITE, 0);
711 closure_get(dst->bi_private);
713 bch2_submit_wbio_replicas(to_wbio(dst), c, BCH_DATA_USER,
717 bch_err(c, "error verifying existing checksum while "
718 "rewriting existing data (memory corruption?)");
722 bch2_bio_free_pages_pool(c, dst);
729 static void __bch2_write(struct closure *cl)
731 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
732 struct bch_fs *c = op->c;
733 struct write_point *wp;
737 if (op->open_buckets_nr + op->nr_replicas >
738 ARRAY_SIZE(op->open_buckets))
739 continue_at(cl, bch2_write_index, index_update_wq(op));
741 /* for the device pointers and 1 for the chksum */
742 if (bch2_keylist_realloc(&op->insert_keys,
744 ARRAY_SIZE(op->inline_keys),
745 BKEY_EXTENT_U64s_MAX))
746 continue_at(cl, bch2_write_index, index_update_wq(op));
748 wp = bch2_alloc_sectors_start(c,
753 op->nr_replicas_required,
756 (op->flags & BCH_WRITE_ALLOC_NOWAIT) ? NULL : cl);
759 if (unlikely(IS_ERR(wp))) {
760 if (unlikely(PTR_ERR(wp) != -EAGAIN)) {
766 * If we already have some keys, must insert them first
767 * before allocating another open bucket. We only hit
768 * this case if open_bucket_nr > 1.
770 if (!bch2_keylist_empty(&op->insert_keys))
771 continue_at(cl, bch2_write_index,
772 index_update_wq(op));
775 * If we've looped, we're running out of a workqueue -
776 * not the bch2_write() caller's context - and we don't
777 * want to block the workqueue:
779 if (op->flags & BCH_WRITE_LOOPED)
780 continue_at(cl, __bch2_write, op->io_wq);
783 * Otherwise, we do want to block the caller on alloc
784 * failure instead of letting it queue up more and more
786 * XXX: this technically needs a try_to_freeze() -
787 * except that that's not safe because caller may have
788 * issued other IO... hmm..
794 ret = bch2_write_extent(op, wp);
796 BUG_ON(op->open_buckets_nr + wp->nr_ptrs_can_use >
797 ARRAY_SIZE(op->open_buckets));
798 bch2_open_bucket_get(c, wp,
799 &op->open_buckets_nr,
801 bch2_alloc_sectors_done(c, wp);
807 op->flags |= BCH_WRITE_DONE;
808 continue_at(cl, bch2_write_index, index_update_wq(op));
811 * Right now we can only error here if we went RO - the
812 * allocation failed, but we already checked for -ENOSPC when we
813 * got our reservation.
815 * XXX capacity might have changed, but we don't check for that
819 op->flags |= BCH_WRITE_DONE;
822 * No reason not to insert keys for whatever data was successfully
823 * written (especially for a cmpxchg operation that's moving data
826 continue_at(cl, !bch2_keylist_empty(&op->insert_keys)
828 : bch2_write_done, index_update_wq(op));
832 * bch_write - handle a write to a cache device or flash only volume
834 * This is the starting point for any data to end up in a cache device; it could
835 * be from a normal write, or a writeback write, or a write to a flash only
836 * volume - it's also used by the moving garbage collector to compact data in
837 * mostly empty buckets.
839 * It first writes the data to the cache, creating a list of keys to be inserted
840 * (if the data won't fit in a single open bucket, there will be multiple keys);
841 * after the data is written it calls bch_journal, and after the keys have been
842 * added to the next journal write they're inserted into the btree.
844 * If op->discard is true, instead of inserting the data it invalidates the
845 * region of the cache represented by op->bio and op->inode.
847 void bch2_write(struct closure *cl)
849 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
850 struct bch_fs *c = op->c;
852 BUG_ON(!op->nr_replicas);
853 BUG_ON(!op->write_point.v);
854 BUG_ON(!bkey_cmp(op->pos, POS_MAX));
855 BUG_ON(bio_sectors(&op->wbio.bio) > U16_MAX);
857 memset(&op->failed, 0, sizeof(op->failed));
859 bch2_keylist_init(&op->insert_keys, op->inline_keys);
860 wbio_init(&op->wbio.bio)->put_bio = false;
862 if (c->opts.nochanges ||
863 !percpu_ref_tryget(&c->writes)) {
864 __bcache_io_error(c, "read only");
866 if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
867 bch2_disk_reservation_put(c, &op->res);
871 bch2_increment_clock(c, bio_sectors(&op->wbio.bio), WRITE);
873 continue_at_nobarrier(cl, __bch2_write, NULL);
876 /* Cache promotion on read */
880 struct migrate_write write;
881 struct bio_vec bi_inline_vecs[0]; /* must be last */
884 static void promote_done(struct closure *cl)
886 struct promote_op *op =
887 container_of(cl, struct promote_op, cl);
888 struct bch_fs *c = op->write.op.c;
890 percpu_ref_put(&c->writes);
891 bch2_bio_free_pages_pool(c, &op->write.op.wbio.bio);
895 static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
897 struct bch_fs *c = rbio->c;
898 struct closure *cl = &op->cl;
899 struct bio *bio = &op->write.op.wbio.bio;
901 BUG_ON(!rbio->split || !rbio->bounce);
903 if (!percpu_ref_tryget(&c->writes))
906 trace_promote(&rbio->bio);
908 /* we now own pages: */
909 BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs);
910 swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
911 rbio->promote = NULL;
913 bch2_write_op_init(&op->write.op, c);
914 op->write.op.csum_type = bch2_data_checksum_type(c, rbio->opts.data_checksum);
915 op->write.op.compression_type =
916 bch2_compression_opt_to_type(rbio->opts.compression);
918 op->write.move_dev = -1;
919 op->write.op.devs = c->fastest_devs;
920 op->write.op.write_point = writepoint_hashed((unsigned long) current);
921 op->write.op.flags |= BCH_WRITE_ALLOC_NOWAIT;
922 op->write.op.flags |= BCH_WRITE_CACHED;
924 bch2_migrate_write_init(&op->write, rbio);
926 closure_init(cl, NULL);
927 closure_call(&op->write.op.cl, bch2_write, c->wq, cl);
928 closure_return_with_destructor(cl, promote_done);
932 * XXX: multiple promotes can race with each other, wastefully. Keep a list of
933 * outstanding promotes?
935 static struct promote_op *promote_alloc(struct bch_read_bio *rbio)
937 struct promote_op *op;
939 /* data might have to be decompressed in the write path: */
940 unsigned pages = DIV_ROUND_UP(rbio->pick.crc.uncompressed_size,
943 BUG_ON(!rbio->bounce);
944 BUG_ON(pages < rbio->bio.bi_vcnt);
946 op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages,
951 bio = &op->write.op.wbio.bio;
952 bio_init(bio, bio->bi_inline_vecs, pages);
954 memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec,
955 sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
960 /* only promote if we're not reading from the fastest tier: */
961 static bool should_promote(struct bch_fs *c,
962 struct extent_pick_ptr *pick, unsigned flags)
964 if (!(flags & BCH_READ_MAY_PROMOTE))
967 if (percpu_ref_is_dying(&c->writes))
970 return c->fastest_tier &&
971 c->fastest_tier < c->tiers + pick->ca->mi.tier;
976 static void bch2_read_nodecode_retry(struct bch_fs *, struct bch_read_bio *,
977 struct bvec_iter, u64,
978 struct bch_devs_mask *, unsigned);
980 #define READ_RETRY_AVOID 1
986 RBIO_CONTEXT_HIGHPRI,
987 RBIO_CONTEXT_UNBOUND,
990 static inline struct bch_read_bio *
991 bch2_rbio_parent(struct bch_read_bio *rbio)
993 return rbio->split ? rbio->parent : rbio;
997 static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn,
998 enum rbio_context context,
999 struct workqueue_struct *wq)
1001 if (context <= rbio->context) {
1004 rbio->work.func = fn;
1005 rbio->context = context;
1006 queue_work(wq, &rbio->work);
1010 static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
1012 struct bch_read_bio *parent = rbio->parent;
1014 BUG_ON(!rbio->split);
1017 kfree(rbio->promote);
1019 bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
1020 bio_put(&rbio->bio);
1025 static void bch2_rbio_done(struct bch_read_bio *rbio)
1028 kfree(rbio->promote);
1029 rbio->promote = NULL;
1032 rbio = bch2_rbio_free(rbio);
1033 bio_endio(&rbio->bio);
1036 static void bch2_rbio_retry(struct work_struct *work)
1038 struct bch_read_bio *rbio =
1039 container_of(work, struct bch_read_bio, work);
1040 struct bch_fs *c = rbio->c;
1041 struct bvec_iter iter = rbio->bvec_iter;
1042 unsigned flags = rbio->flags;
1043 u64 inode = rbio->pos.inode;
1044 struct bch_devs_mask avoid;
1046 trace_read_retry(&rbio->bio);
1048 memset(&avoid, 0, sizeof(avoid));
1050 if (rbio->retry == READ_RETRY_AVOID)
1051 __set_bit(rbio->pick.ca->dev_idx, avoid.d);
1054 kfree(rbio->promote);
1055 rbio->promote = NULL;
1058 rbio = bch2_rbio_free(rbio);
1060 rbio->bio.bi_status = 0;
1062 if (!(flags & BCH_READ_NODECODE))
1063 flags |= BCH_READ_MUST_CLONE;
1064 flags |= BCH_READ_IN_RETRY;
1065 flags &= ~BCH_READ_MAY_PROMOTE;
1067 if (flags & BCH_READ_NODECODE)
1068 bch2_read_nodecode_retry(c, rbio, iter, inode, &avoid, flags);
1070 __bch2_read(c, rbio, iter, inode, &avoid, flags);
1073 static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
1076 rbio->retry = retry;
1078 if (rbio->flags & BCH_READ_IN_RETRY)
1081 if (retry == READ_ERR) {
1082 bch2_rbio_parent(rbio)->bio.bi_status = error;
1083 bch2_rbio_done(rbio);
1085 bch2_rbio_punt(rbio, bch2_rbio_retry,
1086 RBIO_CONTEXT_UNBOUND, system_unbound_wq);
1090 static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
1092 struct bch_fs *c = rbio->c;
1093 struct btree_iter iter;
1095 struct bkey_i_extent *e;
1097 struct bch_extent_crc_unpacked new_crc;
1101 if (rbio->pick.crc.compression_type)
1104 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, rbio->pos,
1107 k = bch2_btree_iter_peek(&iter);
1108 if (IS_ERR_OR_NULL(k.k))
1111 if (!bkey_extent_is_data(k.k))
1114 bkey_reassemble(&new.k, k);
1115 e = bkey_i_to_extent(&new.k);
1117 if (!bch2_extent_matches_ptr(c, extent_i_to_s_c(e),
1120 rbio->pick.crc.offset) ||
1121 bversion_cmp(e->k.version, rbio->version))
1124 /* Extent was merged? */
1125 if (bkey_start_offset(&e->k) < rbio->pos.offset ||
1126 e->k.p.offset > rbio->pos.offset + rbio->pick.crc.uncompressed_size)
1129 /* The extent might have been partially overwritten since we read it: */
1130 offset = rbio->pick.crc.offset + (bkey_start_offset(&e->k) - rbio->pos.offset);
1132 if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version,
1133 rbio->pick.crc, NULL, &new_crc,
1135 rbio->pick.crc.csum_type)) {
1136 bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)");
1140 if (!bch2_extent_narrow_crcs(e, new_crc))
1143 ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
1144 BTREE_INSERT_ATOMIC|
1145 BTREE_INSERT_NOFAIL|
1146 BTREE_INSERT_NOWAIT,
1147 BTREE_INSERT_ENTRY(&iter, &e->k_i));
1151 bch2_btree_iter_unlock(&iter);
1154 static bool should_narrow_crcs(struct bkey_s_c_extent e,
1155 struct extent_pick_ptr *pick,
1158 return !(flags & BCH_READ_IN_RETRY) &&
1159 bch2_can_narrow_extent_crcs(e, pick->crc);
1162 /* Inner part that may run in process context */
1163 static void __bch2_read_endio(struct work_struct *work)
1165 struct bch_read_bio *rbio =
1166 container_of(work, struct bch_read_bio, work);
1167 struct bch_fs *c = rbio->c;
1168 struct bio *src = &rbio->bio, *dst = &bch2_rbio_parent(rbio)->bio;
1169 struct bvec_iter dst_iter = rbio->bvec_iter;
1170 struct bch_extent_crc_unpacked crc = rbio->pick.crc;
1171 struct nonce nonce = extent_nonce(rbio->version, crc);
1172 struct bch_csum csum;
1174 /* Reset iterator for checksumming and copying bounced data: */
1176 src->bi_iter.bi_size = crc.compressed_size << 9;
1177 src->bi_iter.bi_idx = 0;
1178 src->bi_iter.bi_bvec_done = 0;
1180 src->bi_iter = rbio->bvec_iter;
1183 csum = bch2_checksum_bio(c, crc.csum_type, nonce, src);
1184 if (bch2_crc_cmp(csum, rbio->pick.crc.csum))
1187 if (unlikely(rbio->narrow_crcs))
1188 bch2_rbio_narrow_crcs(rbio);
1190 if (rbio->flags & BCH_READ_NODECODE)
1193 /* Adjust crc to point to subset of data we want: */
1194 crc.offset += rbio->bvec_iter.bi_sector - rbio->pos.offset;
1195 crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
1197 if (crc.compression_type != BCH_COMPRESSION_NONE) {
1198 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1199 if (bch2_bio_uncompress(c, src, dst, dst_iter, crc))
1200 goto decompression_err;
1202 /* don't need to decrypt the entire bio: */
1203 nonce = nonce_add(nonce, crc.offset << 9);
1204 bio_advance(src, crc.offset << 9);
1206 BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
1207 src->bi_iter.bi_size = dst_iter.bi_size;
1209 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1212 struct bvec_iter src_iter = src->bi_iter;
1213 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1217 if (rbio->promote) {
1219 * Re encrypt data we decrypted, so it's consistent with
1222 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1223 promote_start(rbio->promote, rbio);
1226 if (likely(!(rbio->flags & BCH_READ_IN_RETRY)))
1227 bch2_rbio_done(rbio);
1231 * Checksum error: if the bio wasn't bounced, we may have been
1232 * reading into buffers owned by userspace (that userspace can
1233 * scribble over) - retry the read, bouncing it this time:
1235 if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
1236 rbio->flags |= BCH_READ_MUST_BOUNCE;
1237 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
1241 bch2_dev_io_error(rbio->pick.ca,
1242 "data checksum error, inode %llu offset %llu: expected %0llx%0llx got %0llx%0llx (type %u)",
1243 rbio->pos.inode, (u64) rbio->bvec_iter.bi_sector,
1244 rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
1245 csum.hi, csum.lo, crc.csum_type);
1246 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1249 __bcache_io_error(c, "decompression error, inode %llu offset %llu",
1251 (u64) rbio->bvec_iter.bi_sector);
1252 bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
1256 static void bch2_read_endio(struct bio *bio)
1258 struct bch_read_bio *rbio =
1259 container_of(bio, struct bch_read_bio, bio);
1260 struct bch_fs *c = rbio->c;
1261 struct workqueue_struct *wq = NULL;
1262 enum rbio_context context = RBIO_CONTEXT_NULL;
1264 bch2_latency_acct(rbio->pick.ca, rbio->submit_time_us, READ);
1266 percpu_ref_put(&rbio->pick.ca->io_ref);
1269 rbio->bio.bi_end_io = rbio->end_io;
1271 if (bch2_dev_io_err_on(bio->bi_status, rbio->pick.ca, "data read")) {
1272 bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
1276 if (rbio->pick.ptr.cached &&
1277 (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
1278 ptr_stale(rbio->pick.ca, &rbio->pick.ptr))) {
1279 atomic_long_inc(&c->read_realloc_races);
1281 if (rbio->flags & BCH_READ_RETRY_IF_STALE)
1282 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
1284 bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN);
1288 if (rbio->narrow_crcs ||
1289 rbio->pick.crc.compression_type ||
1290 bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
1291 context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
1292 else if (rbio->pick.crc.csum_type)
1293 context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq;
1295 bch2_rbio_punt(rbio, __bch2_read_endio, context, wq);
1298 int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
1299 struct bvec_iter iter, struct bkey_s_c_extent e,
1300 struct extent_pick_ptr *pick, unsigned flags)
1302 struct bch_read_bio *rbio;
1303 bool split = false, bounce = false, read_full = false;
1304 bool promote = false, narrow_crcs = false;
1305 struct bpos pos = bkey_start_pos(e.k);
1308 lg_local_lock(&c->usage_lock);
1309 bucket_io_clock_reset(c, pick->ca,
1310 PTR_BUCKET_NR(pick->ca, &pick->ptr), READ);
1311 lg_local_unlock(&c->usage_lock);
1313 narrow_crcs = should_narrow_crcs(e, pick, flags);
1315 if (flags & BCH_READ_NODECODE) {
1316 BUG_ON(iter.bi_size < pick->crc.compressed_size << 9);
1317 iter.bi_size = pick->crc.compressed_size << 9;
1321 if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
1322 flags |= BCH_READ_MUST_BOUNCE;
1324 EBUG_ON(bkey_start_offset(e.k) > iter.bi_sector ||
1325 e.k->p.offset < bvec_iter_end_sector(iter));
1327 if (pick->crc.compression_type != BCH_COMPRESSION_NONE ||
1328 (pick->crc.csum_type != BCH_CSUM_NONE &&
1329 (bvec_iter_sectors(iter) != pick->crc.uncompressed_size ||
1330 (bch2_csum_type_is_encryption(pick->crc.csum_type) &&
1331 (flags & BCH_READ_USER_MAPPED)) ||
1332 (flags & BCH_READ_MUST_BOUNCE)))) {
1337 promote = should_promote(c, pick, flags);
1338 /* could also set read_full */
1343 EBUG_ON(pick->crc.compression_type);
1344 EBUG_ON(pick->crc.csum_type &&
1345 (bvec_iter_sectors(iter) != pick->crc.uncompressed_size ||
1346 bvec_iter_sectors(iter) != pick->crc.live_size ||
1348 iter.bi_sector != pos.offset));
1350 pick->ptr.offset += pick->crc.offset +
1351 (iter.bi_sector - pos.offset);
1352 pick->crc.compressed_size = bvec_iter_sectors(iter);
1353 pick->crc.uncompressed_size = bvec_iter_sectors(iter);
1354 pick->crc.offset = 0;
1355 pick->crc.live_size = bvec_iter_sectors(iter);
1356 pos.offset = iter.bi_sector;
1360 unsigned sectors = pick->crc.compressed_size;
1362 rbio = rbio_init(bio_alloc_bioset(GFP_NOIO,
1363 DIV_ROUND_UP(sectors, PAGE_SECTORS),
1364 &c->bio_read_split),
1367 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
1369 } else if (flags & BCH_READ_MUST_CLONE) {
1371 * Have to clone if there were any splits, due to error
1372 * reporting issues (if a split errored, and retrying didn't
1373 * work, when it reports the error to its parent (us) we don't
1374 * know if the error was from our bio, and we should retry, or
1375 * from the whole bio, in which case we don't want to retry and
1378 rbio = rbio_init(bio_clone_fast(&orig->bio, GFP_NOIO,
1379 &c->bio_read_split),
1381 rbio->bio.bi_iter = iter;
1386 rbio->bio.bi_iter = iter;
1388 BUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
1391 BUG_ON(bio_sectors(&rbio->bio) != pick->crc.compressed_size);
1395 rbio->parent = orig;
1397 rbio->end_io = orig->bio.bi_end_io;
1398 rbio->bvec_iter = iter;
1399 rbio->submit_time_us = local_clock_us();
1400 rbio->flags = flags;
1401 rbio->bounce = bounce;
1402 rbio->split = split;
1403 rbio->narrow_crcs = narrow_crcs;
1406 rbio->devs_have = bch2_extent_devs(e);
1409 rbio->version = e.k->version;
1410 rbio->promote = promote ? promote_alloc(rbio) : NULL;
1411 INIT_WORK(&rbio->work, NULL);
1413 bio_set_dev(&rbio->bio, pick->ca->disk_sb.bdev);
1414 rbio->bio.bi_opf = orig->bio.bi_opf;
1415 rbio->bio.bi_iter.bi_sector = pick->ptr.offset;
1416 rbio->bio.bi_end_io = bch2_read_endio;
1419 trace_read_bounce(&rbio->bio);
1421 bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
1422 this_cpu_add(pick->ca->io_done->sectors[READ][BCH_DATA_USER],
1423 bio_sectors(&rbio->bio));
1425 if (likely(!(flags & BCH_READ_IN_RETRY))) {
1426 submit_bio(&rbio->bio);
1428 submit_bio_wait(&rbio->bio);
1430 rbio->context = RBIO_CONTEXT_UNBOUND;
1431 bch2_read_endio(&rbio->bio);
1435 rbio = bch2_rbio_free(rbio);
1437 bch2_rbio_done(rbio);
1443 static void bch2_read_nodecode_retry(struct bch_fs *c, struct bch_read_bio *rbio,
1444 struct bvec_iter bvec_iter, u64 inode,
1445 struct bch_devs_mask *avoid, unsigned flags)
1447 struct extent_pick_ptr pick;
1448 struct btree_iter iter;
1453 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
1454 POS(inode, bvec_iter.bi_sector),
1457 k = bch2_btree_iter_peek_slot(&iter);
1458 if (btree_iter_err(k)) {
1459 bch2_btree_iter_unlock(&iter);
1463 bkey_reassemble(&tmp.k, k);
1464 k = bkey_i_to_s_c(&tmp.k);
1465 bch2_btree_iter_unlock(&iter);
1467 if (!bkey_extent_is_data(k.k) ||
1468 !bch2_extent_matches_ptr(c, bkey_i_to_s_c_extent(&tmp.k),
1471 rbio->pick.crc.offset) ||
1472 bkey_start_offset(k.k) != bvec_iter.bi_sector)
1475 bch2_extent_pick_ptr(c, k, avoid, &pick);
1476 if (IS_ERR(pick.ca)) {
1477 bcache_io_error(c, &rbio->bio, "no device to read from");
1478 bio_endio(&rbio->bio);
1485 if (pick.crc.compressed_size > bvec_iter_sectors(bvec_iter)) {
1486 percpu_ref_put(&pick.ca->io_ref);
1491 ret = __bch2_read_extent(c, rbio, bvec_iter, bkey_s_c_to_extent(k),
1494 case READ_RETRY_AVOID:
1495 __set_bit(pick.ca->dev_idx, avoid->d);
1499 bio_endio(&rbio->bio);
1506 * extent we wanted to read no longer exists, or
1507 * was merged or partially overwritten (and thus
1508 * possibly bigger than the memory that was
1509 * originally allocated)
1511 rbio->bio.bi_status = BLK_STS_AGAIN;
1512 bio_endio(&rbio->bio);
1516 void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
1517 struct bvec_iter bvec_iter, u64 inode,
1518 struct bch_devs_mask *avoid, unsigned flags)
1520 struct btree_iter iter;
1524 EBUG_ON(flags & BCH_READ_NODECODE);
1526 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
1527 POS(inode, bvec_iter.bi_sector),
1528 BTREE_ITER_SLOTS, k) {
1530 struct extent_pick_ptr pick;
1531 struct bvec_iter fragment;
1534 * Unlock the iterator while the btree node's lock is still in
1535 * cache, before doing the IO:
1537 bkey_reassemble(&tmp.k, k);
1538 k = bkey_i_to_s_c(&tmp.k);
1539 bch2_btree_iter_unlock(&iter);
1541 bch2_extent_pick_ptr(c, k, avoid, &pick);
1542 if (IS_ERR(pick.ca)) {
1543 bcache_io_error(c, &rbio->bio, "no device to read from");
1544 bio_endio(&rbio->bio);
1548 fragment = bvec_iter;
1549 fragment.bi_size = (min_t(u64, k.k->p.offset,
1550 bvec_iter_end_sector(bvec_iter)) -
1551 bvec_iter.bi_sector) << 9;
1554 if (fragment.bi_size != bvec_iter.bi_size) {
1555 bio_inc_remaining(&rbio->bio);
1556 flags |= BCH_READ_MUST_CLONE;
1557 trace_read_split(&rbio->bio);
1560 ret = __bch2_read_extent(c, rbio, fragment,
1561 bkey_s_c_to_extent(k),
1564 case READ_RETRY_AVOID:
1565 __set_bit(pick.ca->dev_idx, avoid->d);
1569 rbio->bio.bi_status = BLK_STS_IOERR;
1570 bio_endio(&rbio->bio);
1574 zero_fill_bio_iter(&rbio->bio, fragment);
1576 if (fragment.bi_size == bvec_iter.bi_size)
1577 bio_endio(&rbio->bio);
1580 if (fragment.bi_size == bvec_iter.bi_size)
1583 bio_advance_iter(&rbio->bio, &bvec_iter, fragment.bi_size);
1587 * If we get here, it better have been because there was an error
1588 * reading a btree node
1590 ret = bch2_btree_iter_unlock(&iter);
1592 bcache_io_error(c, &rbio->bio, "btree IO error %i", ret);
1593 bio_endio(&rbio->bio);