1 // SPDX-License-Identifier: GPL-2.0
3 * Some low level IO code, and hacks for various block layer limitations
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
10 #include "alloc_background.h"
11 #include "alloc_foreground.h"
12 #include "btree_update.h"
17 #include "data_update.h"
18 #include "disk_groups.h"
24 #include "subvolume.h"
27 #include <linux/sched/mm.h>
29 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
31 static bool bch2_target_congested(struct bch_fs *c, u16 target)
33 const struct bch_devs_mask *devs;
34 unsigned d, nr = 0, total = 0;
35 u64 now = local_clock(), last;
43 devs = bch2_target_to_mask(c, target) ?:
44 &c->rw_devs[BCH_DATA_user];
46 for_each_set_bit(d, devs->d, BCH_SB_MEMBERS_MAX) {
47 ca = rcu_dereference(c->devs[d]);
51 congested = atomic_read(&ca->congested);
52 last = READ_ONCE(ca->congested_last);
53 if (time_after64(now, last))
54 congested -= (now - last) >> 12;
56 total += max(congested, 0LL);
61 return bch2_rand_range(nr * CONGESTED_MAX) < total;
66 static bool bch2_target_congested(struct bch_fs *c, u16 target)
73 /* Cache promotion on read */
79 struct rhash_head hash;
82 struct data_update write;
83 struct bio_vec bi_inline_vecs[0]; /* must be last */
86 static const struct rhashtable_params bch_promote_params = {
87 .head_offset = offsetof(struct promote_op, hash),
88 .key_offset = offsetof(struct promote_op, pos),
89 .key_len = sizeof(struct bpos),
92 static inline int should_promote(struct bch_fs *c, struct bkey_s_c k,
94 struct bch_io_opts opts,
97 BUG_ON(!opts.promote_target);
99 if (!(flags & BCH_READ_MAY_PROMOTE))
100 return -BCH_ERR_nopromote_may_not;
102 if (bch2_bkey_has_target(c, k, opts.promote_target))
103 return -BCH_ERR_nopromote_already_promoted;
105 if (bkey_extent_is_unwritten(k))
106 return -BCH_ERR_nopromote_unwritten;
108 if (bch2_target_congested(c, opts.promote_target))
109 return -BCH_ERR_nopromote_congested;
111 if (rhashtable_lookup_fast(&c->promote_table, &pos,
113 return -BCH_ERR_nopromote_in_flight;
118 static void promote_free(struct bch_fs *c, struct promote_op *op)
122 bch2_data_update_exit(&op->write);
124 ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
127 bch2_write_ref_put(c, BCH_WRITE_REF_promote);
131 static void promote_done(struct bch_write_op *wop)
133 struct promote_op *op =
134 container_of(wop, struct promote_op, write.op);
135 struct bch_fs *c = op->write.op.c;
137 bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
142 static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
144 struct bio *bio = &op->write.op.wbio.bio;
146 trace_and_count(op->write.op.c, read_promote, &rbio->bio);
148 /* we now own pages: */
149 BUG_ON(!rbio->bounce);
150 BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs);
152 memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec,
153 sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
154 swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
156 bch2_data_update_read_done(&op->write, rbio->pick.crc);
159 static struct promote_op *__promote_alloc(struct btree_trans *trans,
160 enum btree_id btree_id,
163 struct extent_ptr_decoded *pick,
164 struct bch_io_opts opts,
166 struct bch_read_bio **rbio)
168 struct bch_fs *c = trans->c;
169 struct promote_op *op = NULL;
171 unsigned pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
174 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_promote))
177 op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOFS);
181 op->start_time = local_clock();
185 * We don't use the mempool here because extents that aren't
186 * checksummed or compressed can be too big for the mempool:
188 *rbio = kzalloc(sizeof(struct bch_read_bio) +
189 sizeof(struct bio_vec) * pages,
194 rbio_init(&(*rbio)->bio, opts);
195 bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
197 if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
201 (*rbio)->bounce = true;
202 (*rbio)->split = true;
203 (*rbio)->kmalloc = true;
205 if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash,
209 bio = &op->write.op.wbio.bio;
210 bio_init(bio, NULL, bio->bi_inline_vecs, pages, 0);
212 ret = bch2_data_update_init(trans, NULL, &op->write,
213 writepoint_hashed((unsigned long) current),
215 (struct data_update_opts) {
216 .target = opts.promote_target,
218 .write_flags = BCH_WRITE_ALLOC_NOWAIT|BCH_WRITE_CACHED,
222 * possible errors: -BCH_ERR_nocow_lock_blocked,
223 * -BCH_ERR_ENOSPC_disk_reservation:
226 ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
232 op->write.op.end_io = promote_done;
237 bio_free_pages(&(*rbio)->bio);
241 bch2_write_ref_put(c, BCH_WRITE_REF_promote);
246 static struct promote_op *promote_alloc(struct btree_trans *trans,
247 struct bvec_iter iter,
249 struct extent_ptr_decoded *pick,
250 struct bch_io_opts opts,
252 struct bch_read_bio **rbio,
256 struct bch_fs *c = trans->c;
257 bool promote_full = *read_full || READ_ONCE(c->promote_whole_extents);
258 /* data might have to be decompressed in the write path: */
259 unsigned sectors = promote_full
260 ? max(pick->crc.compressed_size, pick->crc.live_size)
261 : bvec_iter_sectors(iter);
262 struct bpos pos = promote_full
263 ? bkey_start_pos(k.k)
264 : POS(k.k->p.inode, iter.bi_sector);
265 struct promote_op *promote;
268 ret = should_promote(c, k, pos, opts, flags);
272 promote = __promote_alloc(trans,
273 k.k->type == KEY_TYPE_reflink_v
276 k, pos, pick, opts, sectors, rbio);
278 ret = -BCH_ERR_nopromote_enomem;
283 *read_full = promote_full;
286 trace_read_nopromote(c, ret);
292 #define READ_RETRY_AVOID 1
298 RBIO_CONTEXT_HIGHPRI,
299 RBIO_CONTEXT_UNBOUND,
302 static inline struct bch_read_bio *
303 bch2_rbio_parent(struct bch_read_bio *rbio)
305 return rbio->split ? rbio->parent : rbio;
309 static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn,
310 enum rbio_context context,
311 struct workqueue_struct *wq)
313 if (context <= rbio->context) {
316 rbio->work.func = fn;
317 rbio->context = context;
318 queue_work(wq, &rbio->work);
322 static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
324 BUG_ON(rbio->bounce && !rbio->split);
327 promote_free(rbio->c, rbio->promote);
328 rbio->promote = NULL;
331 bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
334 struct bch_read_bio *parent = rbio->parent;
348 * Only called on a top level bch_read_bio to complete an entire read request,
351 static void bch2_rbio_done(struct bch_read_bio *rbio)
353 if (rbio->start_time)
354 bch2_time_stats_update(&rbio->c->times[BCH_TIME_data_read],
356 bio_endio(&rbio->bio);
359 static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio,
360 struct bvec_iter bvec_iter,
361 struct bch_io_failures *failed,
364 struct btree_trans *trans = bch2_trans_get(c);
365 struct btree_iter iter;
370 flags &= ~BCH_READ_LAST_FRAGMENT;
371 flags |= BCH_READ_MUST_CLONE;
373 bch2_bkey_buf_init(&sk);
375 bch2_trans_iter_init(trans, &iter, rbio->data_btree,
376 rbio->read_pos, BTREE_ITER_SLOTS);
378 rbio->bio.bi_status = 0;
380 k = bch2_btree_iter_peek_slot(&iter);
384 bch2_bkey_buf_reassemble(&sk, c, k);
385 k = bkey_i_to_s_c(sk.k);
386 bch2_trans_unlock(trans);
388 if (!bch2_bkey_matches_ptr(c, k,
390 rbio->data_pos.offset -
391 rbio->pick.crc.offset)) {
392 /* extent we wanted to read no longer exists: */
397 ret = __bch2_read_extent(trans, rbio, bvec_iter,
400 k, 0, failed, flags);
401 if (ret == READ_RETRY)
406 bch2_rbio_done(rbio);
407 bch2_trans_iter_exit(trans, &iter);
408 bch2_trans_put(trans);
409 bch2_bkey_buf_exit(&sk, c);
412 rbio->bio.bi_status = BLK_STS_IOERR;
416 static void bch2_rbio_retry(struct work_struct *work)
418 struct bch_read_bio *rbio =
419 container_of(work, struct bch_read_bio, work);
420 struct bch_fs *c = rbio->c;
421 struct bvec_iter iter = rbio->bvec_iter;
422 unsigned flags = rbio->flags;
424 .subvol = rbio->subvol,
425 .inum = rbio->read_pos.inode,
427 struct bch_io_failures failed = { .nr = 0 };
429 trace_and_count(c, read_retry, &rbio->bio);
431 if (rbio->retry == READ_RETRY_AVOID)
432 bch2_mark_io_failure(&failed, &rbio->pick);
434 rbio->bio.bi_status = 0;
436 rbio = bch2_rbio_free(rbio);
438 flags |= BCH_READ_IN_RETRY;
439 flags &= ~BCH_READ_MAY_PROMOTE;
441 if (flags & BCH_READ_NODECODE) {
442 bch2_read_retry_nodecode(c, rbio, iter, &failed, flags);
444 flags &= ~BCH_READ_LAST_FRAGMENT;
445 flags |= BCH_READ_MUST_CLONE;
447 __bch2_read(c, rbio, iter, inum, &failed, flags);
451 static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
456 if (rbio->flags & BCH_READ_IN_RETRY)
459 if (retry == READ_ERR) {
460 rbio = bch2_rbio_free(rbio);
462 rbio->bio.bi_status = error;
463 bch2_rbio_done(rbio);
465 bch2_rbio_punt(rbio, bch2_rbio_retry,
466 RBIO_CONTEXT_UNBOUND, system_unbound_wq);
470 static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
471 struct bch_read_bio *rbio)
473 struct bch_fs *c = rbio->c;
474 u64 data_offset = rbio->data_pos.offset - rbio->pick.crc.offset;
475 struct bch_extent_crc_unpacked new_crc;
476 struct btree_iter iter;
481 if (crc_is_compressed(rbio->pick.crc))
484 k = bch2_bkey_get_iter(trans, &iter, rbio->data_btree, rbio->data_pos,
485 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
486 if ((ret = bkey_err(k)))
489 if (bversion_cmp(k.k->version, rbio->version) ||
490 !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset))
493 /* Extent was merged? */
494 if (bkey_start_offset(k.k) < data_offset ||
495 k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size)
498 if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version,
499 rbio->pick.crc, NULL, &new_crc,
500 bkey_start_offset(k.k) - data_offset, k.k->size,
501 rbio->pick.crc.csum_type)) {
502 bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)");
508 * going to be temporarily appending another checksum entry:
510 new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) +
511 sizeof(struct bch_extent_crc128));
512 if ((ret = PTR_ERR_OR_ZERO(new)))
515 bkey_reassemble(new, k);
517 if (!bch2_bkey_narrow_crcs(new, new_crc))
520 ret = bch2_trans_update(trans, &iter, new,
521 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
523 bch2_trans_iter_exit(trans, &iter);
527 static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
529 bch2_trans_do(rbio->c, NULL, NULL, BTREE_INSERT_NOFAIL,
530 __bch2_rbio_narrow_crcs(trans, rbio));
533 /* Inner part that may run in process context */
534 static void __bch2_read_endio(struct work_struct *work)
536 struct bch_read_bio *rbio =
537 container_of(work, struct bch_read_bio, work);
538 struct bch_fs *c = rbio->c;
539 struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
540 struct bio *src = &rbio->bio;
541 struct bio *dst = &bch2_rbio_parent(rbio)->bio;
542 struct bvec_iter dst_iter = rbio->bvec_iter;
543 struct bch_extent_crc_unpacked crc = rbio->pick.crc;
544 struct nonce nonce = extent_nonce(rbio->version, crc);
546 struct bch_csum csum;
549 nofs_flags = memalloc_nofs_save();
551 /* Reset iterator for checksumming and copying bounced data: */
553 src->bi_iter.bi_size = crc.compressed_size << 9;
554 src->bi_iter.bi_idx = 0;
555 src->bi_iter.bi_bvec_done = 0;
557 src->bi_iter = rbio->bvec_iter;
560 csum = bch2_checksum_bio(c, crc.csum_type, nonce, src);
561 if (bch2_crc_cmp(csum, rbio->pick.crc.csum) && !c->opts.no_data_io)
566 * We need to rework the narrow_crcs path to deliver the read completion
567 * first, and then punt to a different workqueue, otherwise we're
568 * holding up reads while doing btree updates which is bad for memory
571 if (unlikely(rbio->narrow_crcs))
572 bch2_rbio_narrow_crcs(rbio);
574 if (rbio->flags & BCH_READ_NODECODE)
577 /* Adjust crc to point to subset of data we want: */
578 crc.offset += rbio->offset_into_extent;
579 crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
581 if (crc_is_compressed(crc)) {
582 ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
586 if (bch2_bio_uncompress(c, src, dst, dst_iter, crc) &&
588 goto decompression_err;
590 /* don't need to decrypt the entire bio: */
591 nonce = nonce_add(nonce, crc.offset << 9);
592 bio_advance(src, crc.offset << 9);
594 BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
595 src->bi_iter.bi_size = dst_iter.bi_size;
597 ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
602 struct bvec_iter src_iter = src->bi_iter;
604 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
610 * Re encrypt data we decrypted, so it's consistent with
613 ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
617 promote_start(rbio->promote, rbio);
618 rbio->promote = NULL;
621 if (likely(!(rbio->flags & BCH_READ_IN_RETRY))) {
622 rbio = bch2_rbio_free(rbio);
623 bch2_rbio_done(rbio);
626 memalloc_nofs_restore(nofs_flags);
630 * Checksum error: if the bio wasn't bounced, we may have been
631 * reading into buffers owned by userspace (that userspace can
632 * scribble over) - retry the read, bouncing it this time:
634 if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
635 rbio->flags |= BCH_READ_MUST_BOUNCE;
636 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
640 bch_err_inum_offset_ratelimited(ca,
641 rbio->read_pos.inode,
642 rbio->read_pos.offset << 9,
643 "data checksum error: expected %0llx:%0llx got %0llx:%0llx (type %s)",
644 rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
645 csum.hi, csum.lo, bch2_csum_types[crc.csum_type]);
647 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
650 bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
651 rbio->read_pos.offset << 9,
652 "decompression error");
653 bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
656 bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
657 rbio->read_pos.offset << 9,
659 bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
663 static void bch2_read_endio(struct bio *bio)
665 struct bch_read_bio *rbio =
666 container_of(bio, struct bch_read_bio, bio);
667 struct bch_fs *c = rbio->c;
668 struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
669 struct workqueue_struct *wq = NULL;
670 enum rbio_context context = RBIO_CONTEXT_NULL;
672 if (rbio->have_ioref) {
673 bch2_latency_acct(ca, rbio->submit_time, READ);
674 percpu_ref_put(&ca->io_ref);
678 rbio->bio.bi_end_io = rbio->end_io;
680 if (bch2_dev_inum_io_err_on(bio->bi_status, ca,
681 rbio->read_pos.inode,
682 rbio->read_pos.offset,
683 "data read error: %s",
684 bch2_blk_status_to_str(bio->bi_status))) {
685 bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
689 if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
690 ptr_stale(ca, &rbio->pick.ptr)) {
691 trace_and_count(c, read_reuse_race, &rbio->bio);
693 if (rbio->flags & BCH_READ_RETRY_IF_STALE)
694 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
696 bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN);
700 if (rbio->narrow_crcs ||
702 crc_is_compressed(rbio->pick.crc) ||
703 bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
704 context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
705 else if (rbio->pick.crc.csum_type)
706 context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq;
708 bch2_rbio_punt(rbio, __bch2_read_endio, context, wq);
711 int __bch2_read_indirect_extent(struct btree_trans *trans,
712 unsigned *offset_into_extent,
713 struct bkey_buf *orig_k)
715 struct btree_iter iter;
720 reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k->k)->v.idx) +
723 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_reflink,
724 POS(0, reflink_offset), 0);
729 if (k.k->type != KEY_TYPE_reflink_v &&
730 k.k->type != KEY_TYPE_indirect_inline_data) {
731 bch_err_inum_offset_ratelimited(trans->c,
732 orig_k->k->k.p.inode,
733 orig_k->k->k.p.offset << 9,
734 "%llu len %u points to nonexistent indirect extent %llu",
735 orig_k->k->k.p.offset,
738 bch2_inconsistent_error(trans->c);
743 *offset_into_extent = iter.pos.offset - bkey_start_offset(k.k);
744 bch2_bkey_buf_reassemble(orig_k, trans->c, k);
746 bch2_trans_iter_exit(trans, &iter);
750 static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
752 struct bch_extent_ptr ptr)
754 struct bch_fs *c = trans->c;
755 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr.dev);
756 struct btree_iter iter;
757 struct printbuf buf = PRINTBUF;
760 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
761 PTR_BUCKET_POS(c, &ptr),
764 prt_printf(&buf, "Attempting to read from stale dirty pointer:");
765 printbuf_indent_add(&buf, 2);
768 bch2_bkey_val_to_text(&buf, c, k);
771 prt_printf(&buf, "memory gen: %u", *bucket_gen(ca, iter.pos.offset));
773 ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
776 bch2_bkey_val_to_text(&buf, c, k);
779 bch2_fs_inconsistent(c, "%s", buf.buf);
781 bch2_trans_iter_exit(trans, &iter);
785 int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
786 struct bvec_iter iter, struct bpos read_pos,
787 enum btree_id data_btree, struct bkey_s_c k,
788 unsigned offset_into_extent,
789 struct bch_io_failures *failed, unsigned flags)
791 struct bch_fs *c = trans->c;
792 struct extent_ptr_decoded pick;
793 struct bch_read_bio *rbio = NULL;
794 struct bch_dev *ca = NULL;
795 struct promote_op *promote = NULL;
796 bool bounce = false, read_full = false, narrow_crcs = false;
797 struct bpos data_pos = bkey_start_pos(k.k);
800 if (bkey_extent_is_inline_data(k.k)) {
801 unsigned bytes = min_t(unsigned, iter.bi_size,
802 bkey_inline_data_bytes(k.k));
804 swap(iter.bi_size, bytes);
805 memcpy_to_bio(&orig->bio, iter, bkey_inline_data_p(k));
806 swap(iter.bi_size, bytes);
807 bio_advance_iter(&orig->bio, &iter, bytes);
808 zero_fill_bio_iter(&orig->bio, iter);
812 pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick);
814 /* hole or reservation - just zero fill: */
819 bch_err_inum_offset_ratelimited(c,
820 read_pos.inode, read_pos.offset << 9,
821 "no device to read from");
825 ca = bch_dev_bkey_exists(c, pick.ptr.dev);
828 * Stale dirty pointers are treated as IO errors, but @failed isn't
829 * allocated unless we're in the retry path - so if we're not in the
830 * retry path, don't check here, it'll be caught in bch2_read_endio()
831 * and we'll end up in the retry path:
833 if ((flags & BCH_READ_IN_RETRY) &&
835 unlikely(ptr_stale(ca, &pick.ptr))) {
836 read_from_stale_dirty_pointer(trans, k, pick.ptr);
837 bch2_mark_io_failure(failed, &pick);
842 * Unlock the iterator while the btree node's lock is still in
843 * cache, before doing the IO:
845 bch2_trans_unlock(trans);
847 if (flags & BCH_READ_NODECODE) {
849 * can happen if we retry, and the extent we were going to read
850 * has been merged in the meantime:
852 if (pick.crc.compressed_size > orig->bio.bi_vcnt * PAGE_SECTORS)
855 iter.bi_size = pick.crc.compressed_size << 9;
859 if (!(flags & BCH_READ_LAST_FRAGMENT) ||
860 bio_flagged(&orig->bio, BIO_CHAIN))
861 flags |= BCH_READ_MUST_CLONE;
863 narrow_crcs = !(flags & BCH_READ_IN_RETRY) &&
864 bch2_can_narrow_extent_crcs(k, pick.crc);
866 if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
867 flags |= BCH_READ_MUST_BOUNCE;
869 EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
871 if (crc_is_compressed(pick.crc) ||
872 (pick.crc.csum_type != BCH_CSUM_none &&
873 (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
874 (bch2_csum_type_is_encryption(pick.crc.csum_type) &&
875 (flags & BCH_READ_USER_MAPPED)) ||
876 (flags & BCH_READ_MUST_BOUNCE)))) {
881 if (orig->opts.promote_target)
882 promote = promote_alloc(trans, iter, k, &pick, orig->opts, flags,
883 &rbio, &bounce, &read_full);
886 EBUG_ON(crc_is_compressed(pick.crc));
887 EBUG_ON(pick.crc.csum_type &&
888 (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
889 bvec_iter_sectors(iter) != pick.crc.live_size ||
891 offset_into_extent));
893 data_pos.offset += offset_into_extent;
894 pick.ptr.offset += pick.crc.offset +
896 offset_into_extent = 0;
897 pick.crc.compressed_size = bvec_iter_sectors(iter);
898 pick.crc.uncompressed_size = bvec_iter_sectors(iter);
900 pick.crc.live_size = bvec_iter_sectors(iter);
905 * promote already allocated bounce rbio:
906 * promote needs to allocate a bio big enough for uncompressing
907 * data in the write path, but we're not going to use it all
910 EBUG_ON(rbio->bio.bi_iter.bi_size <
911 pick.crc.compressed_size << 9);
912 rbio->bio.bi_iter.bi_size =
913 pick.crc.compressed_size << 9;
915 unsigned sectors = pick.crc.compressed_size;
917 rbio = rbio_init(bio_alloc_bioset(NULL,
918 DIV_ROUND_UP(sectors, PAGE_SECTORS),
924 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
927 } else if (flags & BCH_READ_MUST_CLONE) {
929 * Have to clone if there were any splits, due to error
930 * reporting issues (if a split errored, and retrying didn't
931 * work, when it reports the error to its parent (us) we don't
932 * know if the error was from our bio, and we should retry, or
933 * from the whole bio, in which case we don't want to retry and
936 rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOFS,
939 rbio->bio.bi_iter = iter;
943 rbio->bio.bi_iter = iter;
944 EBUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
947 EBUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size);
950 rbio->submit_time = local_clock();
954 rbio->end_io = orig->bio.bi_end_io;
955 rbio->bvec_iter = iter;
956 rbio->offset_into_extent= offset_into_extent;
958 rbio->have_ioref = pick_ret > 0 && bch2_dev_get_ioref(ca, READ);
959 rbio->narrow_crcs = narrow_crcs;
963 /* XXX: only initialize this if needed */
964 rbio->devs_have = bch2_bkey_devs(k);
966 rbio->subvol = orig->subvol;
967 rbio->read_pos = read_pos;
968 rbio->data_btree = data_btree;
969 rbio->data_pos = data_pos;
970 rbio->version = k.k->version;
971 rbio->promote = promote;
972 INIT_WORK(&rbio->work, NULL);
974 rbio->bio.bi_opf = orig->bio.bi_opf;
975 rbio->bio.bi_iter.bi_sector = pick.ptr.offset;
976 rbio->bio.bi_end_io = bch2_read_endio;
979 trace_and_count(c, read_bounce, &rbio->bio);
981 this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio));
982 bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
985 * If it's being moved internally, we don't want to flag it as a cache
988 if (pick.ptr.cached && !(flags & BCH_READ_NODECODE))
989 bch2_bucket_io_time_reset(trans, pick.ptr.dev,
990 PTR_BUCKET_NR(ca, &pick.ptr), READ);
992 if (!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT))) {
993 bio_inc_remaining(&orig->bio);
994 trace_and_count(c, read_split, &orig->bio);
997 if (!rbio->pick.idx) {
998 if (!rbio->have_ioref) {
999 bch_err_inum_offset_ratelimited(c,
1001 read_pos.offset << 9,
1002 "no device to read from");
1003 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1007 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_user],
1008 bio_sectors(&rbio->bio));
1009 bio_set_dev(&rbio->bio, ca->disk_sb.bdev);
1011 if (unlikely(c->opts.no_data_io)) {
1012 if (likely(!(flags & BCH_READ_IN_RETRY)))
1013 bio_endio(&rbio->bio);
1015 if (likely(!(flags & BCH_READ_IN_RETRY)))
1016 submit_bio(&rbio->bio);
1018 submit_bio_wait(&rbio->bio);
1022 * We just submitted IO which may block, we expect relock fail
1023 * events and shouldn't count them:
1025 trans->notrace_relock_fail = true;
1027 /* Attempting reconstruct read: */
1028 if (bch2_ec_read_extent(c, rbio)) {
1029 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1033 if (likely(!(flags & BCH_READ_IN_RETRY)))
1034 bio_endio(&rbio->bio);
1037 if (likely(!(flags & BCH_READ_IN_RETRY))) {
1042 rbio->context = RBIO_CONTEXT_UNBOUND;
1043 bch2_read_endio(&rbio->bio);
1046 rbio = bch2_rbio_free(rbio);
1048 if (ret == READ_RETRY_AVOID) {
1049 bch2_mark_io_failure(failed, &pick);
1060 if (flags & BCH_READ_IN_RETRY)
1063 orig->bio.bi_status = BLK_STS_IOERR;
1068 * won't normally happen in the BCH_READ_NODECODE
1069 * (bch2_move_extent()) path, but if we retry and the extent we wanted
1070 * to read no longer exists we have to signal that:
1072 if (flags & BCH_READ_NODECODE)
1075 zero_fill_bio_iter(&orig->bio, iter);
1077 if (flags & BCH_READ_LAST_FRAGMENT)
1078 bch2_rbio_done(orig);
1082 void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
1083 struct bvec_iter bvec_iter, subvol_inum inum,
1084 struct bch_io_failures *failed, unsigned flags)
1086 struct btree_trans *trans = bch2_trans_get(c);
1087 struct btree_iter iter;
1093 BUG_ON(flags & BCH_READ_NODECODE);
1095 bch2_bkey_buf_init(&sk);
1097 bch2_trans_begin(trans);
1098 iter = (struct btree_iter) { NULL };
1100 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
1104 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1105 SPOS(inum.inum, bvec_iter.bi_sector, snapshot),
1108 unsigned bytes, sectors, offset_into_extent;
1109 enum btree_id data_btree = BTREE_ID_extents;
1112 * read_extent -> io_time_reset may cause a transaction restart
1113 * without returning an error, we need to check for that here:
1115 ret = bch2_trans_relock(trans);
1119 bch2_btree_iter_set_pos(&iter,
1120 POS(inum.inum, bvec_iter.bi_sector));
1122 k = bch2_btree_iter_peek_slot(&iter);
1127 offset_into_extent = iter.pos.offset -
1128 bkey_start_offset(k.k);
1129 sectors = k.k->size - offset_into_extent;
1131 bch2_bkey_buf_reassemble(&sk, c, k);
1133 ret = bch2_read_indirect_extent(trans, &data_btree,
1134 &offset_into_extent, &sk);
1138 k = bkey_i_to_s_c(sk.k);
1141 * With indirect extents, the amount of data to read is the min
1142 * of the original extent and the indirect extent:
1144 sectors = min(sectors, k.k->size - offset_into_extent);
1146 bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9;
1147 swap(bvec_iter.bi_size, bytes);
1149 if (bvec_iter.bi_size == bytes)
1150 flags |= BCH_READ_LAST_FRAGMENT;
1152 ret = __bch2_read_extent(trans, rbio, bvec_iter, iter.pos,
1154 offset_into_extent, failed, flags);
1158 if (flags & BCH_READ_LAST_FRAGMENT)
1161 swap(bvec_iter.bi_size, bytes);
1162 bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
1164 ret = btree_trans_too_many_iters(trans);
1169 bch2_trans_iter_exit(trans, &iter);
1171 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1172 ret == READ_RETRY ||
1173 ret == READ_RETRY_AVOID)
1176 bch2_trans_put(trans);
1177 bch2_bkey_buf_exit(&sk, c);
1180 bch_err_inum_offset_ratelimited(c, inum.inum,
1181 bvec_iter.bi_sector << 9,
1182 "read error %i from btree lookup", ret);
1183 rbio->bio.bi_status = BLK_STS_IOERR;
1184 bch2_rbio_done(rbio);
1188 void bch2_fs_io_read_exit(struct bch_fs *c)
1190 if (c->promote_table.tbl)
1191 rhashtable_destroy(&c->promote_table);
1192 bioset_exit(&c->bio_read_split);
1193 bioset_exit(&c->bio_read);
1196 int bch2_fs_io_read_init(struct bch_fs *c)
1198 if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
1200 return -BCH_ERR_ENOMEM_bio_read_init;
1202 if (bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
1204 return -BCH_ERR_ENOMEM_bio_read_split_init;
1206 if (rhashtable_init(&c->promote_table, &bch_promote_params))
1207 return -BCH_ERR_ENOMEM_promote_table_init;