1 // SPDX-License-Identifier: GPL-2.0
3 * Some low level IO code, and hacks for various block layer limitations
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
10 #include "alloc_foreground.h"
12 #include "btree_update.h"
18 #include "disk_groups.h"
26 #include "rebalance.h"
30 #include <linux/blkdev.h>
31 #include <linux/random.h>
33 #include <trace/events/bcachefs.h>
35 static bool bch2_target_congested(struct bch_fs *c, u16 target)
37 const struct bch_devs_mask *devs;
38 unsigned d, nr = 0, total = 0;
39 u64 now = local_clock(), last;
47 devs = bch2_target_to_mask(c, target);
48 for_each_set_bit(d, devs->d, BCH_SB_MEMBERS_MAX) {
49 ca = rcu_dereference(c->devs[d]);
53 congested = atomic_read(&ca->congested);
54 last = READ_ONCE(ca->congested_last);
55 if (time_after64(now, last))
56 congested -= (now - last) >> 12;
58 total += max(congested, 0LL);
63 return bch2_rand_range(nr * CONGESTED_MAX) < total;
66 static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
70 ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
71 /* ideally we'd be taking into account the device's variance here: */
72 u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
73 s64 latency_over = io_latency - latency_threshold;
75 if (latency_threshold && latency_over > 0) {
77 * bump up congested by approximately latency_over * 4 /
78 * latency_threshold - we don't need much accuracy here so don't
79 * bother with the divide:
81 if (atomic_read(&ca->congested) < CONGESTED_MAX)
82 atomic_add(latency_over >>
83 max_t(int, ilog2(latency_threshold) - 2, 0),
86 ca->congested_last = now;
87 } else if (atomic_read(&ca->congested) > 0) {
88 atomic_dec(&ca->congested);
92 void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
94 atomic64_t *latency = &ca->cur_latency[rw];
95 u64 now = local_clock();
96 u64 io_latency = time_after64(now, submit_time)
99 u64 old, new, v = atomic64_read(latency);
105 * If the io latency was reasonably close to the current
106 * latency, skip doing the update and atomic operation - most of
109 if (abs((int) (old - io_latency)) < (old >> 1) &&
113 new = ewma_add(old, io_latency, 5);
114 } while ((v = atomic64_cmpxchg(latency, old, new)) != old);
116 bch2_congested_acct(ca, io_latency, now, rw);
118 __bch2_time_stats_update(&ca->io_latency[rw], submit_time, now);
121 /* Allocate, free from mempool: */
123 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
125 struct bvec_iter_all iter;
129 bio_for_each_segment_all(bv, bio, i, iter)
130 if (bv->bv_page != ZERO_PAGE(0))
131 mempool_free(bv->bv_page, &c->bio_bounce_pages);
135 static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
139 if (likely(!*using_mempool)) {
140 page = alloc_page(GFP_NOIO);
141 if (unlikely(!page)) {
142 mutex_lock(&c->bio_bounce_pages_lock);
143 *using_mempool = true;
149 page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
155 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
158 bool using_mempool = false;
161 struct page *page = __bio_alloc_page_pool(c, &using_mempool);
162 unsigned len = min(PAGE_SIZE, size);
164 BUG_ON(!bio_add_page(bio, page, len, 0));
169 mutex_unlock(&c->bio_bounce_pages_lock);
174 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
175 enum bch_data_type type,
176 const struct bkey_i *k)
178 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
179 const struct bch_extent_ptr *ptr;
180 struct bch_write_bio *n;
183 BUG_ON(c->opts.nochanges);
185 bkey_for_each_ptr(ptrs, ptr) {
186 BUG_ON(ptr->dev >= BCH_SB_MEMBERS_MAX ||
189 ca = bch_dev_bkey_exists(c, ptr->dev);
191 if (to_entry(ptr + 1) < ptrs.end) {
192 n = to_wbio(bio_clone_fast(&wbio->bio, GFP_NOIO,
195 n->bio.bi_end_io = wbio->bio.bi_end_io;
196 n->bio.bi_private = wbio->bio.bi_private;
201 n->bio.bi_opf = wbio->bio.bi_opf;
202 bio_inc_remaining(&wbio->bio);
210 n->have_ioref = bch2_dev_get_ioref(ca, WRITE);
211 n->submit_time = local_clock();
212 n->bio.bi_iter.bi_sector = ptr->offset;
214 if (!journal_flushes_device(ca))
215 n->bio.bi_opf |= REQ_FUA;
217 if (likely(n->have_ioref)) {
218 this_cpu_add(ca->io_done->sectors[WRITE][type],
219 bio_sectors(&n->bio));
221 bio_set_dev(&n->bio, ca->disk_sb.bdev);
224 n->bio.bi_status = BLK_STS_REMOVED;
230 static void __bch2_write(struct closure *);
232 static void bch2_write_done(struct closure *cl)
234 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
235 struct bch_fs *c = op->c;
237 if (!op->error && (op->flags & BCH_WRITE_FLUSH))
238 op->error = bch2_journal_error(&c->journal);
240 if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
241 bch2_disk_reservation_put(c, &op->res);
242 percpu_ref_put(&c->writes);
243 bch2_keylist_free(&op->insert_keys, op->inline_keys);
245 bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
250 int bch2_write_index_default(struct bch_write_op *op)
252 struct bch_fs *c = op->c;
253 struct btree_trans trans;
254 struct btree_iter *iter;
255 struct keylist *keys = &op->insert_keys;
258 BUG_ON(bch2_keylist_empty(keys));
259 bch2_verify_keylist_sorted(keys);
261 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
263 bch2_trans_begin(&trans);
265 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
266 bkey_start_pos(&bch2_keylist_front(keys)->k),
270 BKEY_PADDED(k) split;
272 bkey_copy(&split.k, bch2_keylist_front(keys));
274 ret = bch2_extent_trim_atomic(&split.k, iter);
278 bch2_trans_update(&trans,
279 BTREE_INSERT_ENTRY(iter, &split.k));
281 ret = bch2_trans_commit(&trans, &op->res, op_journal_seq(op),
283 BTREE_INSERT_USE_RESERVE);
287 if (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) < 0)
288 bch2_cut_front(iter->pos, bch2_keylist_front(keys));
290 bch2_keylist_pop_front(keys);
291 } while (!bch2_keylist_empty(keys));
298 bch2_trans_exit(&trans);
304 * bch_write_index - after a write, update index to point to new data
306 static void __bch2_write_index(struct bch_write_op *op)
308 struct bch_fs *c = op->c;
309 struct keylist *keys = &op->insert_keys;
310 struct bch_extent_ptr *ptr;
311 struct bkey_i *src, *dst = keys->keys, *n, *k;
315 for (src = keys->keys; src != keys->top; src = n) {
319 bch2_bkey_drop_ptrs(bkey_i_to_s(dst), ptr,
320 test_bit(ptr->dev, op->failed.d));
322 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(dst))) {
327 dst = bkey_next(dst);
333 * probably not the ideal place to hook this in, but I don't
334 * particularly want to plumb io_opts all the way through the btree
335 * update stack right now
337 for_each_keylist_key(keys, k)
338 bch2_rebalance_add_key(c, bkey_i_to_s_c(k), &op->opts);
340 if (!bch2_keylist_empty(keys)) {
341 u64 sectors_start = keylist_sectors(keys);
342 int ret = op->index_update_fn(op);
344 BUG_ON(keylist_sectors(keys) && !ret);
346 op->written += sectors_start - keylist_sectors(keys);
349 __bcache_io_error(c, "btree IO error %i", ret);
354 /* If some a bucket wasn't written, we can't erasure code it: */
355 for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
356 bch2_open_bucket_write_error(c, &op->open_buckets, dev);
358 bch2_open_buckets_put(c, &op->open_buckets);
361 keys->top = keys->keys;
366 static void bch2_write_index(struct closure *cl)
368 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
369 struct bch_fs *c = op->c;
371 __bch2_write_index(op);
373 if (!op->error && (op->flags & BCH_WRITE_FLUSH)) {
374 bch2_journal_flush_seq_async(&c->journal,
377 continue_at(cl, bch2_write_done, index_update_wq(op));
379 continue_at_nobarrier(cl, bch2_write_done, NULL);
383 static void bch2_write_endio(struct bio *bio)
385 struct closure *cl = bio->bi_private;
386 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
387 struct bch_write_bio *wbio = to_wbio(bio);
388 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
389 struct bch_fs *c = wbio->c;
390 struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
392 if (bch2_dev_io_err_on(bio->bi_status, ca, "data write"))
393 set_bit(wbio->dev, op->failed.d);
395 if (wbio->have_ioref) {
396 bch2_latency_acct(ca, wbio->submit_time, WRITE);
397 percpu_ref_put(&ca->io_ref);
401 bch2_bio_free_pages_pool(c, bio);
407 bio_endio(&parent->bio);
412 static void init_append_extent(struct bch_write_op *op,
413 struct write_point *wp,
414 struct bversion version,
415 struct bch_extent_crc_unpacked crc)
417 struct bch_fs *c = op->c;
418 struct bkey_i_extent *e = bkey_extent_init(op->insert_keys.top);
419 struct extent_ptr_decoded p = { .crc = crc };
420 struct open_bucket *ob;
423 op->pos.offset += crc.uncompressed_size;
425 e->k.size = crc.uncompressed_size;
426 e->k.version = version;
428 BUG_ON(crc.compressed_size > wp->sectors_free);
429 wp->sectors_free -= crc.compressed_size;
431 open_bucket_for_each(c, &wp->ptrs, ob, i) {
432 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
435 p.ptr.cached = !ca->mi.durability ||
436 (op->flags & BCH_WRITE_CACHED) != 0;
437 p.ptr.offset += ca->mi.bucket_size - ob->sectors_free;
438 bch2_extent_ptr_decoded_append(&e->k_i, &p);
440 BUG_ON(crc.compressed_size > ob->sectors_free);
441 ob->sectors_free -= crc.compressed_size;
444 bch2_keylist_push(&op->insert_keys);
447 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
448 struct write_point *wp,
450 bool *page_alloc_failed,
453 struct bch_write_bio *wbio;
455 unsigned output_available =
456 min(wp->sectors_free << 9, src->bi_iter.bi_size);
457 unsigned pages = DIV_ROUND_UP(output_available +
459 ? ((unsigned long) buf & (PAGE_SIZE - 1))
462 bio = bio_alloc_bioset(GFP_NOIO, pages, &c->bio_write);
463 wbio = wbio_init(bio);
464 wbio->put_bio = true;
465 /* copy WRITE_SYNC flag */
466 wbio->bio.bi_opf = src->bi_opf;
469 bch2_bio_map(bio, buf, output_available);
476 * We can't use mempool for more than c->sb.encoded_extent_max
477 * worth of pages, but we'd like to allocate more if we can:
479 bch2_bio_alloc_pages_pool(c, bio,
480 min_t(unsigned, output_available,
481 c->sb.encoded_extent_max << 9));
483 if (bio->bi_iter.bi_size < output_available)
485 bch2_bio_alloc_pages(bio,
487 bio->bi_iter.bi_size,
493 static int bch2_write_rechecksum(struct bch_fs *c,
494 struct bch_write_op *op,
495 unsigned new_csum_type)
497 struct bio *bio = &op->wbio.bio;
498 struct bch_extent_crc_unpacked new_crc;
501 /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
503 if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
504 bch2_csum_type_is_encryption(new_csum_type))
505 new_csum_type = op->crc.csum_type;
507 ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
509 op->crc.offset, op->crc.live_size,
514 bio_advance(bio, op->crc.offset << 9);
515 bio->bi_iter.bi_size = op->crc.live_size << 9;
520 static int bch2_write_decrypt(struct bch_write_op *op)
522 struct bch_fs *c = op->c;
523 struct nonce nonce = extent_nonce(op->version, op->crc);
524 struct bch_csum csum;
526 if (!bch2_csum_type_is_encryption(op->crc.csum_type))
530 * If we need to decrypt data in the write path, we'll no longer be able
531 * to verify the existing checksum (poly1305 mac, in this case) after
532 * it's decrypted - this is the last point we'll be able to reverify the
535 csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
536 if (bch2_crc_cmp(op->crc.csum, csum))
539 bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
540 op->crc.csum_type = 0;
541 op->crc.csum = (struct bch_csum) { 0, 0 };
545 static enum prep_encoded_ret {
548 PREP_ENCODED_CHECKSUM_ERR,
549 PREP_ENCODED_DO_WRITE,
550 } bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
552 struct bch_fs *c = op->c;
553 struct bio *bio = &op->wbio.bio;
555 if (!(op->flags & BCH_WRITE_DATA_ENCODED))
556 return PREP_ENCODED_OK;
558 BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
560 /* Can we just write the entire extent as is? */
561 if (op->crc.uncompressed_size == op->crc.live_size &&
562 op->crc.compressed_size <= wp->sectors_free &&
563 op->crc.compression_type == op->compression_type) {
564 if (!op->crc.compression_type &&
565 op->csum_type != op->crc.csum_type &&
566 bch2_write_rechecksum(c, op, op->csum_type))
567 return PREP_ENCODED_CHECKSUM_ERR;
569 return PREP_ENCODED_DO_WRITE;
573 * If the data is compressed and we couldn't write the entire extent as
574 * is, we have to decompress it:
576 if (op->crc.compression_type) {
577 struct bch_csum csum;
579 if (bch2_write_decrypt(op))
580 return PREP_ENCODED_CHECKSUM_ERR;
582 /* Last point we can still verify checksum: */
583 csum = bch2_checksum_bio(c, op->crc.csum_type,
584 extent_nonce(op->version, op->crc),
586 if (bch2_crc_cmp(op->crc.csum, csum))
587 return PREP_ENCODED_CHECKSUM_ERR;
589 if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
590 return PREP_ENCODED_ERR;
594 * No longer have compressed data after this point - data might be
599 * If the data is checksummed and we're only writing a subset,
600 * rechecksum and adjust bio to point to currently live data:
602 if ((op->crc.live_size != op->crc.uncompressed_size ||
603 op->crc.csum_type != op->csum_type) &&
604 bch2_write_rechecksum(c, op, op->csum_type))
605 return PREP_ENCODED_CHECKSUM_ERR;
608 * If we want to compress the data, it has to be decrypted:
610 if ((op->compression_type ||
611 bch2_csum_type_is_encryption(op->crc.csum_type) !=
612 bch2_csum_type_is_encryption(op->csum_type)) &&
613 bch2_write_decrypt(op))
614 return PREP_ENCODED_CHECKSUM_ERR;
616 return PREP_ENCODED_OK;
619 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp)
621 struct bch_fs *c = op->c;
622 struct bio *src = &op->wbio.bio, *dst = src;
623 struct bvec_iter saved_iter;
624 struct bkey_i *key_to_write;
626 unsigned key_to_write_offset = op->insert_keys.top_p -
627 op->insert_keys.keys_p;
628 unsigned total_output = 0, total_input = 0;
630 bool page_alloc_failed = false;
633 BUG_ON(!bio_sectors(src));
635 ec_buf = bch2_writepoint_ec_buf(c, wp);
637 switch (bch2_write_prep_encoded_data(op, wp)) {
638 case PREP_ENCODED_OK:
640 case PREP_ENCODED_ERR:
643 case PREP_ENCODED_CHECKSUM_ERR:
645 case PREP_ENCODED_DO_WRITE:
647 dst = bch2_write_bio_alloc(c, wp, src,
650 bio_copy_data(dst, src);
653 init_append_extent(op, wp, op->version, op->crc);
658 op->compression_type ||
660 !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
661 (bch2_csum_type_is_encryption(op->csum_type) &&
662 !(op->flags & BCH_WRITE_PAGES_OWNED))) {
663 dst = bch2_write_bio_alloc(c, wp, src,
669 saved_iter = dst->bi_iter;
672 struct bch_extent_crc_unpacked crc =
673 (struct bch_extent_crc_unpacked) { 0 };
674 struct bversion version = op->version;
675 size_t dst_len, src_len;
677 if (page_alloc_failed &&
678 bio_sectors(dst) < wp->sectors_free &&
679 bio_sectors(dst) < c->sb.encoded_extent_max)
682 BUG_ON(op->compression_type &&
683 (op->flags & BCH_WRITE_DATA_ENCODED) &&
684 bch2_csum_type_is_encryption(op->crc.csum_type));
685 BUG_ON(op->compression_type && !bounce);
687 crc.compression_type = op->compression_type
688 ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
689 op->compression_type)
691 if (!crc.compression_type) {
692 dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
693 dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
696 dst_len = min_t(unsigned, dst_len,
697 c->sb.encoded_extent_max << 9);
700 swap(dst->bi_iter.bi_size, dst_len);
701 bio_copy_data(dst, src);
702 swap(dst->bi_iter.bi_size, dst_len);
708 BUG_ON(!src_len || !dst_len);
710 if (bch2_csum_type_is_encryption(op->csum_type)) {
711 if (bversion_zero(version)) {
712 version.lo = atomic64_inc_return(&c->key_version) + 1;
714 crc.nonce = op->nonce;
715 op->nonce += src_len >> 9;
719 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
720 !crc.compression_type &&
721 bch2_csum_type_is_encryption(op->crc.csum_type) ==
722 bch2_csum_type_is_encryption(op->csum_type)) {
724 * Note: when we're using rechecksum(), we need to be
725 * checksumming @src because it has all the data our
726 * existing checksum covers - if we bounced (because we
727 * were trying to compress), @dst will only have the
728 * part of the data the new checksum will cover.
730 * But normally we want to be checksumming post bounce,
731 * because part of the reason for bouncing is so the
732 * data can't be modified (by userspace) while it's in
735 if (bch2_rechecksum_bio(c, src, version, op->crc,
738 bio_sectors(src) - (src_len >> 9),
742 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
743 bch2_rechecksum_bio(c, src, version, op->crc,
746 bio_sectors(src) - (src_len >> 9),
750 crc.compressed_size = dst_len >> 9;
751 crc.uncompressed_size = src_len >> 9;
752 crc.live_size = src_len >> 9;
754 swap(dst->bi_iter.bi_size, dst_len);
755 bch2_encrypt_bio(c, op->csum_type,
756 extent_nonce(version, crc), dst);
757 crc.csum = bch2_checksum_bio(c, op->csum_type,
758 extent_nonce(version, crc), dst);
759 crc.csum_type = op->csum_type;
760 swap(dst->bi_iter.bi_size, dst_len);
763 init_append_extent(op, wp, version, crc);
766 bio_advance(dst, dst_len);
767 bio_advance(src, src_len);
768 total_output += dst_len;
769 total_input += src_len;
770 } while (dst->bi_iter.bi_size &&
771 src->bi_iter.bi_size &&
773 !bch2_keylist_realloc(&op->insert_keys,
775 ARRAY_SIZE(op->inline_keys),
776 BKEY_EXTENT_U64s_MAX));
778 more = src->bi_iter.bi_size != 0;
780 dst->bi_iter = saved_iter;
782 if (dst == src && more) {
783 BUG_ON(total_output != total_input);
785 dst = bio_split(src, total_input >> 9,
786 GFP_NOIO, &c->bio_write);
787 wbio_init(dst)->put_bio = true;
788 /* copy WRITE_SYNC flag */
789 dst->bi_opf = src->bi_opf;
792 dst->bi_iter.bi_size = total_output;
794 /* might have done a realloc... */
796 key_to_write = (void *) (op->insert_keys.keys_p + key_to_write_offset);
798 bch2_ec_add_backpointer(c, wp,
799 bkey_start_pos(&key_to_write->k),
802 dst->bi_end_io = bch2_write_endio;
803 dst->bi_private = &op->cl;
804 bio_set_op_attrs(dst, REQ_OP_WRITE, 0);
806 closure_get(dst->bi_private);
808 bch2_submit_wbio_replicas(to_wbio(dst), c, BCH_DATA_USER,
812 bch_err(c, "error verifying existing checksum while "
813 "rewriting existing data (memory corruption?)");
816 if (to_wbio(dst)->bounce)
817 bch2_bio_free_pages_pool(c, dst);
818 if (to_wbio(dst)->put_bio)
824 static void __bch2_write(struct closure *cl)
826 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
827 struct bch_fs *c = op->c;
828 struct write_point *wp;
831 memset(&op->failed, 0, sizeof(op->failed));
834 /* +1 for possible cache device: */
835 if (op->open_buckets.nr + op->nr_replicas + 1 >
836 ARRAY_SIZE(op->open_buckets.v))
839 if (bch2_keylist_realloc(&op->insert_keys,
841 ARRAY_SIZE(op->inline_keys),
842 BKEY_EXTENT_U64s_MAX))
845 wp = bch2_alloc_sectors_start(c,
847 op->opts.erasure_code,
851 op->nr_replicas_required,
854 (op->flags & BCH_WRITE_ALLOC_NOWAIT) ? NULL : cl);
857 if (unlikely(IS_ERR(wp))) {
858 if (unlikely(PTR_ERR(wp) != -EAGAIN)) {
866 ret = bch2_write_extent(op, wp);
868 bch2_open_bucket_get(c, wp, &op->open_buckets);
869 bch2_alloc_sectors_done(c, wp);
875 continue_at(cl, bch2_write_index, index_update_wq(op));
880 continue_at(cl, !bch2_keylist_empty(&op->insert_keys)
882 : bch2_write_done, index_update_wq(op));
887 if (!bch2_keylist_empty(&op->insert_keys)) {
888 __bch2_write_index(op);
891 continue_at_nobarrier(cl, bch2_write_done, NULL);
900 * bch_write - handle a write to a cache device or flash only volume
902 * This is the starting point for any data to end up in a cache device; it could
903 * be from a normal write, or a writeback write, or a write to a flash only
904 * volume - it's also used by the moving garbage collector to compact data in
905 * mostly empty buckets.
907 * It first writes the data to the cache, creating a list of keys to be inserted
908 * (if the data won't fit in a single open bucket, there will be multiple keys);
909 * after the data is written it calls bch_journal, and after the keys have been
910 * added to the next journal write they're inserted into the btree.
912 * If op->discard is true, instead of inserting the data it invalidates the
913 * region of the cache represented by op->bio and op->inode.
915 void bch2_write(struct closure *cl)
917 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
918 struct bio *bio = &op->wbio.bio;
919 struct bch_fs *c = op->c;
921 BUG_ON(!op->nr_replicas);
922 BUG_ON(!op->write_point.v);
923 BUG_ON(!bkey_cmp(op->pos, POS_MAX));
925 if (bio_sectors(bio) & (c->opts.block_size - 1)) {
926 __bcache_io_error(c, "misaligned write");
931 op->start_time = local_clock();
933 bch2_keylist_init(&op->insert_keys, op->inline_keys);
934 wbio_init(bio)->put_bio = false;
936 if (c->opts.nochanges ||
937 !percpu_ref_tryget(&c->writes)) {
938 __bcache_io_error(c, "read only");
943 bch2_increment_clock(c, bio_sectors(bio), WRITE);
945 continue_at_nobarrier(cl, __bch2_write, NULL);
948 if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
949 bch2_disk_reservation_put(c, &op->res);
953 /* Cache promotion on read */
960 struct rhash_head hash;
963 struct migrate_write write;
964 struct bio_vec bi_inline_vecs[0]; /* must be last */
967 static const struct rhashtable_params bch_promote_params = {
968 .head_offset = offsetof(struct promote_op, hash),
969 .key_offset = offsetof(struct promote_op, pos),
970 .key_len = sizeof(struct bpos),
973 static inline bool should_promote(struct bch_fs *c, struct bkey_s_c k,
975 struct bch_io_opts opts,
978 if (!(flags & BCH_READ_MAY_PROMOTE))
981 if (!opts.promote_target)
984 if (bch2_bkey_has_target(c, k, opts.promote_target))
987 if (bch2_target_congested(c, opts.promote_target)) {
992 if (rhashtable_lookup_fast(&c->promote_table, &pos,
999 static void promote_free(struct bch_fs *c, struct promote_op *op)
1003 ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
1004 bch_promote_params);
1006 percpu_ref_put(&c->writes);
1010 static void promote_done(struct closure *cl)
1012 struct promote_op *op =
1013 container_of(cl, struct promote_op, cl);
1014 struct bch_fs *c = op->write.op.c;
1016 bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
1019 bch2_bio_free_pages_pool(c, &op->write.op.wbio.bio);
1020 promote_free(c, op);
1023 static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
1025 struct bch_fs *c = rbio->c;
1026 struct closure *cl = &op->cl;
1027 struct bio *bio = &op->write.op.wbio.bio;
1029 trace_promote(&rbio->bio);
1031 /* we now own pages: */
1032 BUG_ON(!rbio->bounce);
1033 BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs);
1035 memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec,
1036 sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
1037 swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
1039 bch2_migrate_read_done(&op->write, rbio);
1041 closure_init(cl, NULL);
1042 closure_call(&op->write.op.cl, bch2_write, c->wq, cl);
1043 closure_return_with_destructor(cl, promote_done);
1047 static struct promote_op *__promote_alloc(struct bch_fs *c,
1048 enum btree_id btree_id,
1050 struct extent_ptr_decoded *pick,
1051 struct bch_io_opts opts,
1053 struct bch_read_bio **rbio)
1055 struct promote_op *op = NULL;
1057 unsigned pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
1060 if (!percpu_ref_tryget(&c->writes))
1063 op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOIO);
1067 op->start_time = local_clock();
1071 * We don't use the mempool here because extents that aren't
1072 * checksummed or compressed can be too big for the mempool:
1074 *rbio = kzalloc(sizeof(struct bch_read_bio) +
1075 sizeof(struct bio_vec) * pages,
1080 rbio_init(&(*rbio)->bio, opts);
1081 bio_init(&(*rbio)->bio, (*rbio)->bio.bi_inline_vecs, pages);
1083 if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
1087 (*rbio)->bounce = true;
1088 (*rbio)->split = true;
1089 (*rbio)->kmalloc = true;
1091 if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash,
1092 bch_promote_params))
1095 bio = &op->write.op.wbio.bio;
1096 bio_init(bio, bio->bi_inline_vecs, pages);
1098 ret = bch2_migrate_write_init(c, &op->write,
1099 writepoint_hashed((unsigned long) current),
1102 (struct data_opts) {
1103 .target = opts.promote_target
1112 bio_free_pages(&(*rbio)->bio);
1116 percpu_ref_put(&c->writes);
1120 static inline struct promote_op *promote_alloc(struct bch_fs *c,
1121 struct bvec_iter iter,
1123 struct extent_ptr_decoded *pick,
1124 struct bch_io_opts opts,
1126 struct bch_read_bio **rbio,
1130 bool promote_full = *read_full || READ_ONCE(c->promote_whole_extents);
1131 /* data might have to be decompressed in the write path: */
1132 unsigned sectors = promote_full
1133 ? max(pick->crc.compressed_size, pick->crc.live_size)
1134 : bvec_iter_sectors(iter);
1135 struct bpos pos = promote_full
1136 ? bkey_start_pos(k.k)
1137 : POS(k.k->p.inode, iter.bi_sector);
1138 struct promote_op *promote;
1140 if (!should_promote(c, k, pos, opts, flags))
1143 promote = __promote_alloc(c,
1144 k.k->type == KEY_TYPE_reflink_v
1147 pos, pick, opts, sectors, rbio);
1152 *read_full = promote_full;
1158 #define READ_RETRY_AVOID 1
1159 #define READ_RETRY 2
1164 RBIO_CONTEXT_HIGHPRI,
1165 RBIO_CONTEXT_UNBOUND,
1168 static inline struct bch_read_bio *
1169 bch2_rbio_parent(struct bch_read_bio *rbio)
1171 return rbio->split ? rbio->parent : rbio;
1175 static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn,
1176 enum rbio_context context,
1177 struct workqueue_struct *wq)
1179 if (context <= rbio->context) {
1182 rbio->work.func = fn;
1183 rbio->context = context;
1184 queue_work(wq, &rbio->work);
1188 static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
1190 BUG_ON(rbio->bounce && !rbio->split);
1193 promote_free(rbio->c, rbio->promote);
1194 rbio->promote = NULL;
1197 bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
1200 struct bch_read_bio *parent = rbio->parent;
1205 bio_put(&rbio->bio);
1213 static void bch2_rbio_done(struct bch_read_bio *rbio)
1215 bch2_time_stats_update(&rbio->c->times[BCH_TIME_data_read],
1217 bio_endio(&rbio->bio);
1220 static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio,
1221 struct bvec_iter bvec_iter, u64 inode,
1222 struct bch_io_failures *failed,
1225 struct btree_trans trans;
1226 struct btree_iter *iter;
1231 flags &= ~BCH_READ_LAST_FRAGMENT;
1233 bch2_trans_init(&trans, c, 0, 0);
1235 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
1236 rbio->pos, BTREE_ITER_SLOTS);
1238 rbio->bio.bi_status = 0;
1240 k = bch2_btree_iter_peek_slot(iter);
1244 bkey_reassemble(&tmp.k, k);
1245 k = bkey_i_to_s_c(&tmp.k);
1246 bch2_trans_unlock(&trans);
1248 if (!bch2_bkey_matches_ptr(c, bkey_i_to_s_c(&tmp.k),
1251 rbio->pick.crc.offset)) {
1252 /* extent we wanted to read no longer exists: */
1257 ret = __bch2_read_extent(c, rbio, bvec_iter, k, 0, failed, flags);
1258 if (ret == READ_RETRY)
1263 bch2_rbio_done(rbio);
1264 bch2_trans_exit(&trans);
1267 rbio->bio.bi_status = BLK_STS_IOERR;
1271 static void bch2_read_retry(struct bch_fs *c, struct bch_read_bio *rbio,
1272 struct bvec_iter bvec_iter, u64 inode,
1273 struct bch_io_failures *failed, unsigned flags)
1275 struct btree_trans trans;
1276 struct btree_iter *iter;
1280 flags &= ~BCH_READ_LAST_FRAGMENT;
1281 flags |= BCH_READ_MUST_CLONE;
1283 bch2_trans_init(&trans, c, 0, 0);
1285 bch2_trans_begin(&trans);
1287 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
1288 POS(inode, bvec_iter.bi_sector),
1289 BTREE_ITER_SLOTS, k, ret) {
1291 unsigned bytes, sectors, offset_into_extent;
1293 bkey_reassemble(&tmp.k, k);
1294 k = bkey_i_to_s_c(&tmp.k);
1296 offset_into_extent = iter->pos.offset -
1297 bkey_start_offset(k.k);
1298 sectors = k.k->size - offset_into_extent;
1300 ret = bch2_read_indirect_extent(&trans,
1301 &offset_into_extent, &tmp.k);
1305 sectors = min(sectors, k.k->size - offset_into_extent);
1307 bch2_trans_unlock(&trans);
1309 bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9;
1310 swap(bvec_iter.bi_size, bytes);
1312 ret = __bch2_read_extent(c, rbio, bvec_iter, k,
1313 offset_into_extent, failed, flags);
1321 if (bytes == bvec_iter.bi_size)
1324 swap(bvec_iter.bi_size, bytes);
1325 bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
1329 * If we get here, it better have been because there was an error
1330 * reading a btree node
1333 __bcache_io_error(c, "btree IO error: %i", ret);
1335 rbio->bio.bi_status = BLK_STS_IOERR;
1337 bch2_trans_exit(&trans);
1338 bch2_rbio_done(rbio);
1341 static void bch2_rbio_retry(struct work_struct *work)
1343 struct bch_read_bio *rbio =
1344 container_of(work, struct bch_read_bio, work);
1345 struct bch_fs *c = rbio->c;
1346 struct bvec_iter iter = rbio->bvec_iter;
1347 unsigned flags = rbio->flags;
1348 u64 inode = rbio->pos.inode;
1349 struct bch_io_failures failed = { .nr = 0 };
1351 trace_read_retry(&rbio->bio);
1353 if (rbio->retry == READ_RETRY_AVOID)
1354 bch2_mark_io_failure(&failed, &rbio->pick);
1356 rbio->bio.bi_status = 0;
1358 rbio = bch2_rbio_free(rbio);
1360 flags |= BCH_READ_IN_RETRY;
1361 flags &= ~BCH_READ_MAY_PROMOTE;
1363 if (flags & BCH_READ_NODECODE)
1364 bch2_read_retry_nodecode(c, rbio, iter, inode, &failed, flags);
1366 bch2_read_retry(c, rbio, iter, inode, &failed, flags);
1369 static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
1372 rbio->retry = retry;
1374 if (rbio->flags & BCH_READ_IN_RETRY)
1377 if (retry == READ_ERR) {
1378 rbio = bch2_rbio_free(rbio);
1380 rbio->bio.bi_status = error;
1381 bch2_rbio_done(rbio);
1383 bch2_rbio_punt(rbio, bch2_rbio_retry,
1384 RBIO_CONTEXT_UNBOUND, system_unbound_wq);
1388 static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
1390 struct bch_fs *c = rbio->c;
1391 struct btree_trans trans;
1392 struct btree_iter *iter;
1395 struct bch_extent_crc_unpacked new_crc;
1396 u64 data_offset = rbio->pos.offset - rbio->pick.crc.offset;
1399 if (rbio->pick.crc.compression_type)
1402 bch2_trans_init(&trans, c, 0, 0);
1404 bch2_trans_begin(&trans);
1406 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, rbio->pos,
1408 k = bch2_btree_iter_peek(iter);
1409 if (IS_ERR_OR_NULL(k.k))
1412 bkey_reassemble(&new.k, k);
1413 k = bkey_i_to_s_c(&new.k);
1415 if (bversion_cmp(k.k->version, rbio->version) ||
1416 !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset))
1419 /* Extent was merged? */
1420 if (bkey_start_offset(k.k) < data_offset ||
1421 k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size)
1424 if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version,
1425 rbio->pick.crc, NULL, &new_crc,
1426 bkey_start_offset(k.k) - data_offset, k.k->size,
1427 rbio->pick.crc.csum_type)) {
1428 bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)");
1432 if (!bch2_bkey_narrow_crcs(&new.k, new_crc))
1435 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &new.k));
1436 ret = bch2_trans_commit(&trans, NULL, NULL,
1437 BTREE_INSERT_ATOMIC|
1438 BTREE_INSERT_NOFAIL|
1439 BTREE_INSERT_NOWAIT);
1443 bch2_trans_exit(&trans);
1446 /* Inner part that may run in process context */
1447 static void __bch2_read_endio(struct work_struct *work)
1449 struct bch_read_bio *rbio =
1450 container_of(work, struct bch_read_bio, work);
1451 struct bch_fs *c = rbio->c;
1452 struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
1453 struct bio *src = &rbio->bio;
1454 struct bio *dst = &bch2_rbio_parent(rbio)->bio;
1455 struct bvec_iter dst_iter = rbio->bvec_iter;
1456 struct bch_extent_crc_unpacked crc = rbio->pick.crc;
1457 struct nonce nonce = extent_nonce(rbio->version, crc);
1458 struct bch_csum csum;
1460 /* Reset iterator for checksumming and copying bounced data: */
1462 src->bi_iter.bi_size = crc.compressed_size << 9;
1463 src->bi_iter.bi_idx = 0;
1464 src->bi_iter.bi_bvec_done = 0;
1466 src->bi_iter = rbio->bvec_iter;
1469 csum = bch2_checksum_bio(c, crc.csum_type, nonce, src);
1470 if (bch2_crc_cmp(csum, rbio->pick.crc.csum))
1473 if (unlikely(rbio->narrow_crcs))
1474 bch2_rbio_narrow_crcs(rbio);
1476 if (rbio->flags & BCH_READ_NODECODE)
1479 /* Adjust crc to point to subset of data we want: */
1480 crc.offset += rbio->offset_into_extent;
1481 crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
1483 if (crc.compression_type != BCH_COMPRESSION_NONE) {
1484 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1485 if (bch2_bio_uncompress(c, src, dst, dst_iter, crc))
1486 goto decompression_err;
1488 /* don't need to decrypt the entire bio: */
1489 nonce = nonce_add(nonce, crc.offset << 9);
1490 bio_advance(src, crc.offset << 9);
1492 BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
1493 src->bi_iter.bi_size = dst_iter.bi_size;
1495 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1498 struct bvec_iter src_iter = src->bi_iter;
1499 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1503 if (rbio->promote) {
1505 * Re encrypt data we decrypted, so it's consistent with
1508 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1509 promote_start(rbio->promote, rbio);
1510 rbio->promote = NULL;
1513 if (likely(!(rbio->flags & BCH_READ_IN_RETRY))) {
1514 rbio = bch2_rbio_free(rbio);
1515 bch2_rbio_done(rbio);
1520 * Checksum error: if the bio wasn't bounced, we may have been
1521 * reading into buffers owned by userspace (that userspace can
1522 * scribble over) - retry the read, bouncing it this time:
1524 if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
1525 rbio->flags |= BCH_READ_MUST_BOUNCE;
1526 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
1530 bch2_dev_io_error(ca,
1531 "data checksum error, inode %llu offset %llu: expected %0llx:%0llx got %0llx:%0llx (type %u)",
1532 rbio->pos.inode, (u64) rbio->bvec_iter.bi_sector,
1533 rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
1534 csum.hi, csum.lo, crc.csum_type);
1535 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1538 __bcache_io_error(c, "decompression error, inode %llu offset %llu",
1540 (u64) rbio->bvec_iter.bi_sector);
1541 bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
1545 static void bch2_read_endio(struct bio *bio)
1547 struct bch_read_bio *rbio =
1548 container_of(bio, struct bch_read_bio, bio);
1549 struct bch_fs *c = rbio->c;
1550 struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
1551 struct workqueue_struct *wq = NULL;
1552 enum rbio_context context = RBIO_CONTEXT_NULL;
1554 if (rbio->have_ioref) {
1555 bch2_latency_acct(ca, rbio->submit_time, READ);
1556 percpu_ref_put(&ca->io_ref);
1560 rbio->bio.bi_end_io = rbio->end_io;
1562 if (bch2_dev_io_err_on(bio->bi_status, ca, "data read")) {
1563 bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
1567 if (rbio->pick.ptr.cached &&
1568 (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
1569 ptr_stale(ca, &rbio->pick.ptr))) {
1570 atomic_long_inc(&c->read_realloc_races);
1572 if (rbio->flags & BCH_READ_RETRY_IF_STALE)
1573 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
1575 bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN);
1579 if (rbio->narrow_crcs ||
1580 rbio->pick.crc.compression_type ||
1581 bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
1582 context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
1583 else if (rbio->pick.crc.csum_type)
1584 context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq;
1586 bch2_rbio_punt(rbio, __bch2_read_endio, context, wq);
1589 int __bch2_read_indirect_extent(struct btree_trans *trans,
1590 unsigned *offset_into_extent,
1591 struct bkey_i *orig_k)
1593 struct btree_iter *iter;
1598 reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k)->v.idx) +
1599 *offset_into_extent;
1601 iter = __bch2_trans_get_iter(trans, BTREE_ID_REFLINK,
1602 POS(0, reflink_offset),
1603 BTREE_ITER_SLOTS, 1);
1604 ret = PTR_ERR_OR_ZERO(iter);
1608 k = bch2_btree_iter_peek_slot(iter);
1613 if (k.k->type != KEY_TYPE_reflink_v) {
1614 __bcache_io_error(trans->c,
1615 "pointer to nonexistent indirect extent");
1620 *offset_into_extent = iter->pos.offset - bkey_start_offset(k.k);
1621 bkey_reassemble(orig_k, k);
1623 bch2_trans_iter_put(trans, iter);
1627 int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
1628 struct bvec_iter iter, struct bkey_s_c k,
1629 unsigned offset_into_extent,
1630 struct bch_io_failures *failed, unsigned flags)
1632 struct extent_ptr_decoded pick;
1633 struct bch_read_bio *rbio = NULL;
1635 struct promote_op *promote = NULL;
1636 bool bounce = false, read_full = false, narrow_crcs = false;
1637 struct bpos pos = bkey_start_pos(k.k);
1640 pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick);
1642 /* hole or reservation - just zero fill: */
1647 __bcache_io_error(c, "no device to read from");
1652 ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1654 if (flags & BCH_READ_NODECODE) {
1656 * can happen if we retry, and the extent we were going to read
1657 * has been merged in the meantime:
1659 if (pick.crc.compressed_size > orig->bio.bi_vcnt * PAGE_SECTORS)
1662 iter.bi_size = pick.crc.compressed_size << 9;
1666 if (!(flags & BCH_READ_LAST_FRAGMENT) ||
1667 bio_flagged(&orig->bio, BIO_CHAIN))
1668 flags |= BCH_READ_MUST_CLONE;
1670 narrow_crcs = !(flags & BCH_READ_IN_RETRY) &&
1671 bch2_can_narrow_extent_crcs(k, pick.crc);
1673 if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
1674 flags |= BCH_READ_MUST_BOUNCE;
1676 BUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
1678 if (pick.crc.compression_type != BCH_COMPRESSION_NONE ||
1679 (pick.crc.csum_type != BCH_CSUM_NONE &&
1680 (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
1681 (bch2_csum_type_is_encryption(pick.crc.csum_type) &&
1682 (flags & BCH_READ_USER_MAPPED)) ||
1683 (flags & BCH_READ_MUST_BOUNCE)))) {
1688 promote = promote_alloc(c, iter, k, &pick, orig->opts, flags,
1689 &rbio, &bounce, &read_full);
1692 EBUG_ON(pick.crc.compression_type);
1693 EBUG_ON(pick.crc.csum_type &&
1694 (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
1695 bvec_iter_sectors(iter) != pick.crc.live_size ||
1697 offset_into_extent));
1699 pos.offset += offset_into_extent;
1700 pick.ptr.offset += pick.crc.offset +
1702 offset_into_extent = 0;
1703 pick.crc.compressed_size = bvec_iter_sectors(iter);
1704 pick.crc.uncompressed_size = bvec_iter_sectors(iter);
1705 pick.crc.offset = 0;
1706 pick.crc.live_size = bvec_iter_sectors(iter);
1707 offset_into_extent = 0;
1712 * promote already allocated bounce rbio:
1713 * promote needs to allocate a bio big enough for uncompressing
1714 * data in the write path, but we're not going to use it all
1717 BUG_ON(rbio->bio.bi_iter.bi_size <
1718 pick.crc.compressed_size << 9);
1719 rbio->bio.bi_iter.bi_size =
1720 pick.crc.compressed_size << 9;
1721 } else if (bounce) {
1722 unsigned sectors = pick.crc.compressed_size;
1724 rbio = rbio_init(bio_alloc_bioset(GFP_NOIO,
1725 DIV_ROUND_UP(sectors, PAGE_SECTORS),
1726 &c->bio_read_split),
1729 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
1730 rbio->bounce = true;
1732 } else if (flags & BCH_READ_MUST_CLONE) {
1734 * Have to clone if there were any splits, due to error
1735 * reporting issues (if a split errored, and retrying didn't
1736 * work, when it reports the error to its parent (us) we don't
1737 * know if the error was from our bio, and we should retry, or
1738 * from the whole bio, in which case we don't want to retry and
1741 rbio = rbio_init(bio_clone_fast(&orig->bio, GFP_NOIO,
1742 &c->bio_read_split),
1744 rbio->bio.bi_iter = iter;
1749 rbio->bio.bi_iter = iter;
1750 BUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
1753 BUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size);
1756 rbio->submit_time = local_clock();
1758 rbio->parent = orig;
1760 rbio->end_io = orig->bio.bi_end_io;
1761 rbio->bvec_iter = iter;
1762 rbio->offset_into_extent= offset_into_extent;
1763 rbio->flags = flags;
1764 rbio->have_ioref = pick_ret > 0 && bch2_dev_get_ioref(ca, READ);
1765 rbio->narrow_crcs = narrow_crcs;
1769 rbio->devs_have = bch2_bkey_devs(k);
1772 rbio->version = k.k->version;
1773 rbio->promote = promote;
1774 INIT_WORK(&rbio->work, NULL);
1776 rbio->bio.bi_opf = orig->bio.bi_opf;
1777 rbio->bio.bi_iter.bi_sector = pick.ptr.offset;
1778 rbio->bio.bi_end_io = bch2_read_endio;
1781 trace_read_bounce(&rbio->bio);
1783 bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
1785 percpu_down_read(&c->mark_lock);
1786 bucket_io_clock_reset(c, ca, PTR_BUCKET_NR(ca, &pick.ptr), READ);
1787 percpu_up_read(&c->mark_lock);
1789 if (likely(!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT)))) {
1790 bio_inc_remaining(&orig->bio);
1791 trace_read_split(&orig->bio);
1794 if (!rbio->pick.idx) {
1795 if (!rbio->have_ioref) {
1796 __bcache_io_error(c, "no device to read from");
1797 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1801 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_USER],
1802 bio_sectors(&rbio->bio));
1803 bio_set_dev(&rbio->bio, ca->disk_sb.bdev);
1805 if (likely(!(flags & BCH_READ_IN_RETRY)))
1806 submit_bio(&rbio->bio);
1808 submit_bio_wait(&rbio->bio);
1810 /* Attempting reconstruct read: */
1811 if (bch2_ec_read_extent(c, rbio)) {
1812 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1816 if (likely(!(flags & BCH_READ_IN_RETRY)))
1817 bio_endio(&rbio->bio);
1820 if (likely(!(flags & BCH_READ_IN_RETRY))) {
1825 rbio->context = RBIO_CONTEXT_UNBOUND;
1826 bch2_read_endio(&rbio->bio);
1829 rbio = bch2_rbio_free(rbio);
1831 if (ret == READ_RETRY_AVOID) {
1832 bch2_mark_io_failure(failed, &pick);
1840 if (flags & BCH_READ_IN_RETRY)
1843 orig->bio.bi_status = BLK_STS_IOERR;
1848 * won't normally happen in the BCH_READ_NODECODE
1849 * (bch2_move_extent()) path, but if we retry and the extent we wanted
1850 * to read no longer exists we have to signal that:
1852 if (flags & BCH_READ_NODECODE)
1855 zero_fill_bio_iter(&orig->bio, iter);
1857 if (flags & BCH_READ_LAST_FRAGMENT)
1858 bch2_rbio_done(orig);
1862 void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode)
1864 struct btree_trans trans;
1865 struct btree_iter *iter;
1867 unsigned flags = BCH_READ_RETRY_IF_STALE|
1868 BCH_READ_MAY_PROMOTE|
1869 BCH_READ_USER_MAPPED;
1872 bch2_trans_init(&trans, c, 0, 0);
1874 BUG_ON(rbio->_state);
1875 BUG_ON(flags & BCH_READ_NODECODE);
1876 BUG_ON(flags & BCH_READ_IN_RETRY);
1879 rbio->start_time = local_clock();
1881 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
1882 POS(inode, rbio->bio.bi_iter.bi_sector),
1887 unsigned bytes, sectors, offset_into_extent;
1889 bch2_btree_iter_set_pos(iter,
1890 POS(inode, rbio->bio.bi_iter.bi_sector));
1892 k = bch2_btree_iter_peek_slot(iter);
1897 bkey_reassemble(&tmp.k, k);
1898 k = bkey_i_to_s_c(&tmp.k);
1900 offset_into_extent = iter->pos.offset -
1901 bkey_start_offset(k.k);
1902 sectors = k.k->size - offset_into_extent;
1904 ret = bch2_read_indirect_extent(&trans,
1905 &offset_into_extent, &tmp.k);
1910 * With indirect extents, the amount of data to read is the min
1911 * of the original extent and the indirect extent:
1913 sectors = min(sectors, k.k->size - offset_into_extent);
1916 * Unlock the iterator while the btree node's lock is still in
1917 * cache, before doing the IO:
1919 bch2_trans_unlock(&trans);
1921 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
1922 swap(rbio->bio.bi_iter.bi_size, bytes);
1924 if (rbio->bio.bi_iter.bi_size == bytes)
1925 flags |= BCH_READ_LAST_FRAGMENT;
1927 bch2_read_extent(c, rbio, k, offset_into_extent, flags);
1929 if (flags & BCH_READ_LAST_FRAGMENT)
1932 swap(rbio->bio.bi_iter.bi_size, bytes);
1933 bio_advance(&rbio->bio, bytes);
1936 bch2_trans_exit(&trans);
1939 bcache_io_error(c, &rbio->bio, "btree IO error: %i", ret);
1940 bch2_rbio_done(rbio);
1944 void bch2_fs_io_exit(struct bch_fs *c)
1946 if (c->promote_table.tbl)
1947 rhashtable_destroy(&c->promote_table);
1948 mempool_exit(&c->bio_bounce_pages);
1949 bioset_exit(&c->bio_write);
1950 bioset_exit(&c->bio_read_split);
1951 bioset_exit(&c->bio_read);
1954 int bch2_fs_io_init(struct bch_fs *c)
1956 if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
1957 BIOSET_NEED_BVECS) ||
1958 bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
1959 BIOSET_NEED_BVECS) ||
1960 bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio),
1961 BIOSET_NEED_BVECS) ||
1962 mempool_init_page_pool(&c->bio_bounce_pages,
1964 c->opts.btree_node_size,
1965 c->sb.encoded_extent_max) /
1967 rhashtable_init(&c->promote_table, &bch_promote_params))