1 // SPDX-License-Identifier: GPL-2.0
3 * Some low level IO code, and hacks for various block layer limitations
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
10 #include "alloc_foreground.h"
11 #include "bkey_on_stack.h"
13 #include "btree_update.h"
19 #include "disk_groups.h"
22 #include "extent_update.h"
28 #include "rebalance.h"
32 #include <linux/blkdev.h>
33 #include <linux/random.h>
35 #include <trace/events/bcachefs.h>
37 static bool bch2_target_congested(struct bch_fs *c, u16 target)
39 const struct bch_devs_mask *devs;
40 unsigned d, nr = 0, total = 0;
41 u64 now = local_clock(), last;
49 devs = bch2_target_to_mask(c, target);
50 for_each_set_bit(d, devs->d, BCH_SB_MEMBERS_MAX) {
51 ca = rcu_dereference(c->devs[d]);
55 congested = atomic_read(&ca->congested);
56 last = READ_ONCE(ca->congested_last);
57 if (time_after64(now, last))
58 congested -= (now - last) >> 12;
60 total += max(congested, 0LL);
65 return bch2_rand_range(nr * CONGESTED_MAX) < total;
68 static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
72 ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
73 /* ideally we'd be taking into account the device's variance here: */
74 u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
75 s64 latency_over = io_latency - latency_threshold;
77 if (latency_threshold && latency_over > 0) {
79 * bump up congested by approximately latency_over * 4 /
80 * latency_threshold - we don't need much accuracy here so don't
81 * bother with the divide:
83 if (atomic_read(&ca->congested) < CONGESTED_MAX)
84 atomic_add(latency_over >>
85 max_t(int, ilog2(latency_threshold) - 2, 0),
88 ca->congested_last = now;
89 } else if (atomic_read(&ca->congested) > 0) {
90 atomic_dec(&ca->congested);
94 void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
96 atomic64_t *latency = &ca->cur_latency[rw];
97 u64 now = local_clock();
98 u64 io_latency = time_after64(now, submit_time)
101 u64 old, new, v = atomic64_read(latency);
107 * If the io latency was reasonably close to the current
108 * latency, skip doing the update and atomic operation - most of
111 if (abs((int) (old - io_latency)) < (old >> 1) &&
115 new = ewma_add(old, io_latency, 5);
116 } while ((v = atomic64_cmpxchg(latency, old, new)) != old);
118 bch2_congested_acct(ca, io_latency, now, rw);
120 __bch2_time_stats_update(&ca->io_latency[rw], submit_time, now);
123 /* Allocate, free from mempool: */
125 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
127 struct bvec_iter_all iter;
130 bio_for_each_segment_all(bv, bio, iter)
131 if (bv->bv_page != ZERO_PAGE(0))
132 mempool_free(bv->bv_page, &c->bio_bounce_pages);
136 static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
140 if (likely(!*using_mempool)) {
141 page = alloc_page(GFP_NOIO);
142 if (unlikely(!page)) {
143 mutex_lock(&c->bio_bounce_pages_lock);
144 *using_mempool = true;
150 page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
156 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
159 bool using_mempool = false;
162 struct page *page = __bio_alloc_page_pool(c, &using_mempool);
163 unsigned len = min(PAGE_SIZE, size);
165 BUG_ON(!bio_add_page(bio, page, len, 0));
170 mutex_unlock(&c->bio_bounce_pages_lock);
173 /* Extent update path: */
175 static int sum_sector_overwrites(struct btree_trans *trans,
176 struct btree_iter *extent_iter,
179 bool *maybe_extending,
182 struct btree_iter *iter;
186 *maybe_extending = true;
189 iter = bch2_trans_copy_iter(trans, extent_iter);
191 return PTR_ERR(iter);
193 for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, old, ret) {
195 bch2_bkey_nr_ptrs_fully_allocated(old) <
196 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new))) {
201 *delta += (min(new->k.p.offset,
203 max(bkey_start_offset(&new->k),
204 bkey_start_offset(old.k))) *
205 (bkey_extent_is_allocation(&new->k) -
206 bkey_extent_is_allocation(old.k));
208 if (bkey_cmp(old.k->p, new->k.p) >= 0) {
210 * Check if there's already data above where we're
211 * going to be writing to - this means we're definitely
212 * not extending the file:
214 * Note that it's not sufficient to check if there's
215 * data up to the sector offset we're going to be
216 * writing to, because i_size could be up to one block
219 if (!bkey_cmp(old.k->p, new->k.p))
220 old = bch2_btree_iter_next(iter);
222 if (old.k && !bkey_err(old) &&
223 old.k->p.inode == extent_iter->pos.inode &&
224 bkey_extent_is_data(old.k))
225 *maybe_extending = false;
231 bch2_trans_iter_put(trans, iter);
235 int bch2_extent_update(struct btree_trans *trans,
236 struct btree_iter *iter,
238 struct disk_reservation *disk_res,
241 s64 *i_sectors_delta)
243 /* this must live until after bch2_trans_commit(): */
244 struct bkey_inode_buf inode_p;
245 bool extending = false;
249 ret = bch2_extent_trim_atomic(k, iter);
253 ret = sum_sector_overwrites(trans, iter, k,
254 disk_res && disk_res->sectors != 0,
259 new_i_size = extending
260 ? min(k->k.p.offset << 9, new_i_size)
263 if (delta || new_i_size) {
264 struct btree_iter *inode_iter;
265 struct bch_inode_unpacked inode_u;
267 inode_iter = bch2_inode_peek(trans, &inode_u,
268 k->k.p.inode, BTREE_ITER_INTENT);
269 if (IS_ERR(inode_iter))
270 return PTR_ERR(inode_iter);
274 * writeback can race a bit with truncate, because truncate
275 * first updates the inode then truncates the pagecache. This is
276 * ugly, but lets us preserve the invariant that the in memory
277 * i_size is always >= the on disk i_size.
279 BUG_ON(new_i_size > inode_u.bi_size &&
280 (inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY));
282 BUG_ON(new_i_size > inode_u.bi_size && !extending);
284 if (!(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
285 new_i_size > inode_u.bi_size)
286 inode_u.bi_size = new_i_size;
290 inode_u.bi_sectors += delta;
292 if (delta || new_i_size) {
293 bch2_inode_pack(&inode_p, &inode_u);
294 bch2_trans_update(trans, inode_iter,
298 bch2_trans_iter_put(trans, inode_iter);
301 bch2_trans_update(trans, iter, k);
303 ret = bch2_trans_commit(trans, disk_res, journal_seq,
304 BTREE_INSERT_NOCHECK_RW|
307 BTREE_INSERT_USE_RESERVE);
308 if (!ret && i_sectors_delta)
309 *i_sectors_delta += delta;
314 int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
315 struct bpos end, u64 *journal_seq,
316 s64 *i_sectors_delta)
318 struct bch_fs *c = trans->c;
319 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
321 int ret = 0, ret2 = 0;
323 while ((k = bch2_btree_iter_peek(iter)).k &&
324 bkey_cmp(iter->pos, end) < 0) {
325 struct disk_reservation disk_res =
326 bch2_disk_reservation_init(c, 0);
327 struct bkey_i delete;
333 bkey_init(&delete.k);
334 delete.k.p = iter->pos;
336 /* create the biggest key we can */
337 bch2_key_resize(&delete.k, max_sectors);
338 bch2_cut_back(end, &delete);
340 bch2_trans_begin_updates(trans);
342 ret = bch2_extent_update(trans, iter, &delete,
343 &disk_res, journal_seq,
345 bch2_disk_reservation_put(c, &disk_res);
355 if (bkey_cmp(iter->pos, end) > 0) {
356 bch2_btree_iter_set_pos(iter, end);
357 ret = bch2_btree_iter_traverse(iter);
363 int bch2_fpunch(struct bch_fs *c, u64 inum, u64 start, u64 end,
364 u64 *journal_seq, s64 *i_sectors_delta)
366 struct btree_trans trans;
367 struct btree_iter *iter;
370 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
371 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
375 ret = bch2_fpunch_at(&trans, iter, POS(inum, end),
376 journal_seq, i_sectors_delta);
377 bch2_trans_exit(&trans);
385 int bch2_write_index_default(struct bch_write_op *op)
387 struct bch_fs *c = op->c;
388 struct bkey_on_stack sk;
389 struct keylist *keys = &op->insert_keys;
390 struct bkey_i *k = bch2_keylist_front(keys);
391 struct btree_trans trans;
392 struct btree_iter *iter;
395 bkey_on_stack_init(&sk);
396 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
398 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
399 bkey_start_pos(&k->k),
400 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
403 k = bch2_keylist_front(keys);
405 bkey_on_stack_realloc(&sk, c, k->k.u64s);
407 bch2_cut_front(iter->pos, sk.k);
409 bch2_trans_begin_updates(&trans);
411 ret = bch2_extent_update(&trans, iter, sk.k,
412 &op->res, op_journal_seq(op),
413 op->new_i_size, &op->i_sectors_delta);
419 if (bkey_cmp(iter->pos, k->k.p) >= 0)
420 bch2_keylist_pop_front(keys);
421 } while (!bch2_keylist_empty(keys));
423 bch2_trans_exit(&trans);
424 bkey_on_stack_exit(&sk, c);
431 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
432 enum bch_data_type type,
433 const struct bkey_i *k)
435 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
436 const struct bch_extent_ptr *ptr;
437 struct bch_write_bio *n;
440 BUG_ON(c->opts.nochanges);
442 bkey_for_each_ptr(ptrs, ptr) {
443 BUG_ON(ptr->dev >= BCH_SB_MEMBERS_MAX ||
446 ca = bch_dev_bkey_exists(c, ptr->dev);
448 if (to_entry(ptr + 1) < ptrs.end) {
449 n = to_wbio(bio_clone_fast(&wbio->bio, GFP_NOIO,
452 n->bio.bi_end_io = wbio->bio.bi_end_io;
453 n->bio.bi_private = wbio->bio.bi_private;
458 n->bio.bi_opf = wbio->bio.bi_opf;
459 bio_inc_remaining(&wbio->bio);
467 n->have_ioref = bch2_dev_get_ioref(ca, WRITE);
468 n->submit_time = local_clock();
469 n->bio.bi_iter.bi_sector = ptr->offset;
471 if (!journal_flushes_device(ca))
472 n->bio.bi_opf |= REQ_FUA;
474 if (likely(n->have_ioref)) {
475 this_cpu_add(ca->io_done->sectors[WRITE][type],
476 bio_sectors(&n->bio));
478 bio_set_dev(&n->bio, ca->disk_sb.bdev);
481 n->bio.bi_status = BLK_STS_REMOVED;
487 static void __bch2_write(struct closure *);
489 static void bch2_write_done(struct closure *cl)
491 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
492 struct bch_fs *c = op->c;
494 if (!op->error && (op->flags & BCH_WRITE_FLUSH))
495 op->error = bch2_journal_error(&c->journal);
497 if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
498 bch2_disk_reservation_put(c, &op->res);
499 percpu_ref_put(&c->writes);
500 bch2_keylist_free(&op->insert_keys, op->inline_keys);
502 bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
509 closure_debug_destroy(cl);
513 * bch_write_index - after a write, update index to point to new data
515 static void __bch2_write_index(struct bch_write_op *op)
517 struct bch_fs *c = op->c;
518 struct keylist *keys = &op->insert_keys;
519 struct bch_extent_ptr *ptr;
520 struct bkey_i *src, *dst = keys->keys, *n, *k;
524 for (src = keys->keys; src != keys->top; src = n) {
527 if (bkey_extent_is_direct_data(&src->k)) {
528 bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
529 test_bit(ptr->dev, op->failed.d));
531 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src))) {
538 memmove_u64s_down(dst, src, src->u64s);
539 dst = bkey_next(dst);
545 * probably not the ideal place to hook this in, but I don't
546 * particularly want to plumb io_opts all the way through the btree
547 * update stack right now
549 for_each_keylist_key(keys, k)
550 bch2_rebalance_add_key(c, bkey_i_to_s_c(k), &op->opts);
552 if (!bch2_keylist_empty(keys)) {
553 u64 sectors_start = keylist_sectors(keys);
554 int ret = op->index_update_fn(op);
556 BUG_ON(ret == -EINTR);
557 BUG_ON(keylist_sectors(keys) && !ret);
559 op->written += sectors_start - keylist_sectors(keys);
562 __bcache_io_error(c, "btree IO error %i", ret);
567 /* If some a bucket wasn't written, we can't erasure code it: */
568 for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
569 bch2_open_bucket_write_error(c, &op->open_buckets, dev);
571 bch2_open_buckets_put(c, &op->open_buckets);
574 keys->top = keys->keys;
579 static void bch2_write_index(struct closure *cl)
581 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
582 struct bch_fs *c = op->c;
584 __bch2_write_index(op);
586 if (!op->error && (op->flags & BCH_WRITE_FLUSH)) {
587 bch2_journal_flush_seq_async(&c->journal,
590 continue_at(cl, bch2_write_done, index_update_wq(op));
592 continue_at_nobarrier(cl, bch2_write_done, NULL);
596 static void bch2_write_endio(struct bio *bio)
598 struct closure *cl = bio->bi_private;
599 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
600 struct bch_write_bio *wbio = to_wbio(bio);
601 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
602 struct bch_fs *c = wbio->c;
603 struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
605 if (bch2_dev_io_err_on(bio->bi_status, ca, "data write"))
606 set_bit(wbio->dev, op->failed.d);
608 if (wbio->have_ioref) {
609 bch2_latency_acct(ca, wbio->submit_time, WRITE);
610 percpu_ref_put(&ca->io_ref);
614 bch2_bio_free_pages_pool(c, bio);
620 bio_endio(&parent->bio);
621 else if (!(op->flags & BCH_WRITE_SKIP_CLOSURE_PUT))
624 continue_at_nobarrier(cl, bch2_write_index, index_update_wq(op));
627 static void init_append_extent(struct bch_write_op *op,
628 struct write_point *wp,
629 struct bversion version,
630 struct bch_extent_crc_unpacked crc)
632 struct bch_fs *c = op->c;
633 struct bkey_i_extent *e;
634 struct open_bucket *ob;
637 BUG_ON(crc.compressed_size > wp->sectors_free);
638 wp->sectors_free -= crc.compressed_size;
639 op->pos.offset += crc.uncompressed_size;
641 e = bkey_extent_init(op->insert_keys.top);
643 e->k.size = crc.uncompressed_size;
644 e->k.version = version;
647 crc.compression_type ||
649 bch2_extent_crc_append(&e->k_i, crc);
651 open_bucket_for_each(c, &wp->ptrs, ob, i) {
652 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
653 union bch_extent_entry *end =
654 bkey_val_end(bkey_i_to_s(&e->k_i));
657 end->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
658 end->ptr.cached = !ca->mi.durability ||
659 (op->flags & BCH_WRITE_CACHED) != 0;
660 end->ptr.offset += ca->mi.bucket_size - ob->sectors_free;
664 BUG_ON(crc.compressed_size > ob->sectors_free);
665 ob->sectors_free -= crc.compressed_size;
668 bch2_keylist_push(&op->insert_keys);
671 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
672 struct write_point *wp,
674 bool *page_alloc_failed,
677 struct bch_write_bio *wbio;
679 unsigned output_available =
680 min(wp->sectors_free << 9, src->bi_iter.bi_size);
681 unsigned pages = DIV_ROUND_UP(output_available +
683 ? ((unsigned long) buf & (PAGE_SIZE - 1))
686 bio = bio_alloc_bioset(GFP_NOIO, pages, &c->bio_write);
687 wbio = wbio_init(bio);
688 wbio->put_bio = true;
689 /* copy WRITE_SYNC flag */
690 wbio->bio.bi_opf = src->bi_opf;
693 bch2_bio_map(bio, buf, output_available);
700 * We can't use mempool for more than c->sb.encoded_extent_max
701 * worth of pages, but we'd like to allocate more if we can:
703 bch2_bio_alloc_pages_pool(c, bio,
704 min_t(unsigned, output_available,
705 c->sb.encoded_extent_max << 9));
707 if (bio->bi_iter.bi_size < output_available)
709 bch2_bio_alloc_pages(bio,
711 bio->bi_iter.bi_size,
717 static int bch2_write_rechecksum(struct bch_fs *c,
718 struct bch_write_op *op,
719 unsigned new_csum_type)
721 struct bio *bio = &op->wbio.bio;
722 struct bch_extent_crc_unpacked new_crc;
725 /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
727 if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
728 bch2_csum_type_is_encryption(new_csum_type))
729 new_csum_type = op->crc.csum_type;
731 ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
733 op->crc.offset, op->crc.live_size,
738 bio_advance(bio, op->crc.offset << 9);
739 bio->bi_iter.bi_size = op->crc.live_size << 9;
744 static int bch2_write_decrypt(struct bch_write_op *op)
746 struct bch_fs *c = op->c;
747 struct nonce nonce = extent_nonce(op->version, op->crc);
748 struct bch_csum csum;
750 if (!bch2_csum_type_is_encryption(op->crc.csum_type))
754 * If we need to decrypt data in the write path, we'll no longer be able
755 * to verify the existing checksum (poly1305 mac, in this case) after
756 * it's decrypted - this is the last point we'll be able to reverify the
759 csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
760 if (bch2_crc_cmp(op->crc.csum, csum))
763 bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
764 op->crc.csum_type = 0;
765 op->crc.csum = (struct bch_csum) { 0, 0 };
769 static enum prep_encoded_ret {
772 PREP_ENCODED_CHECKSUM_ERR,
773 PREP_ENCODED_DO_WRITE,
774 } bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
776 struct bch_fs *c = op->c;
777 struct bio *bio = &op->wbio.bio;
779 if (!(op->flags & BCH_WRITE_DATA_ENCODED))
780 return PREP_ENCODED_OK;
782 BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
784 /* Can we just write the entire extent as is? */
785 if (op->crc.uncompressed_size == op->crc.live_size &&
786 op->crc.compressed_size <= wp->sectors_free &&
787 op->crc.compression_type == op->compression_type) {
788 if (!op->crc.compression_type &&
789 op->csum_type != op->crc.csum_type &&
790 bch2_write_rechecksum(c, op, op->csum_type))
791 return PREP_ENCODED_CHECKSUM_ERR;
793 return PREP_ENCODED_DO_WRITE;
797 * If the data is compressed and we couldn't write the entire extent as
798 * is, we have to decompress it:
800 if (op->crc.compression_type) {
801 struct bch_csum csum;
803 if (bch2_write_decrypt(op))
804 return PREP_ENCODED_CHECKSUM_ERR;
806 /* Last point we can still verify checksum: */
807 csum = bch2_checksum_bio(c, op->crc.csum_type,
808 extent_nonce(op->version, op->crc),
810 if (bch2_crc_cmp(op->crc.csum, csum))
811 return PREP_ENCODED_CHECKSUM_ERR;
813 if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
814 return PREP_ENCODED_ERR;
818 * No longer have compressed data after this point - data might be
823 * If the data is checksummed and we're only writing a subset,
824 * rechecksum and adjust bio to point to currently live data:
826 if ((op->crc.live_size != op->crc.uncompressed_size ||
827 op->crc.csum_type != op->csum_type) &&
828 bch2_write_rechecksum(c, op, op->csum_type))
829 return PREP_ENCODED_CHECKSUM_ERR;
832 * If we want to compress the data, it has to be decrypted:
834 if ((op->compression_type ||
835 bch2_csum_type_is_encryption(op->crc.csum_type) !=
836 bch2_csum_type_is_encryption(op->csum_type)) &&
837 bch2_write_decrypt(op))
838 return PREP_ENCODED_CHECKSUM_ERR;
840 return PREP_ENCODED_OK;
843 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
846 struct bch_fs *c = op->c;
847 struct bio *src = &op->wbio.bio, *dst = src;
848 struct bvec_iter saved_iter;
850 struct bpos ec_pos = op->pos;
851 unsigned total_output = 0, total_input = 0;
853 bool page_alloc_failed = false;
856 BUG_ON(!bio_sectors(src));
858 ec_buf = bch2_writepoint_ec_buf(c, wp);
860 switch (bch2_write_prep_encoded_data(op, wp)) {
861 case PREP_ENCODED_OK:
863 case PREP_ENCODED_ERR:
866 case PREP_ENCODED_CHECKSUM_ERR:
868 case PREP_ENCODED_DO_WRITE:
869 /* XXX look for bug here */
871 dst = bch2_write_bio_alloc(c, wp, src,
874 bio_copy_data(dst, src);
877 init_append_extent(op, wp, op->version, op->crc);
882 op->compression_type ||
884 !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
885 (bch2_csum_type_is_encryption(op->csum_type) &&
886 !(op->flags & BCH_WRITE_PAGES_OWNED))) {
887 dst = bch2_write_bio_alloc(c, wp, src,
893 saved_iter = dst->bi_iter;
896 struct bch_extent_crc_unpacked crc =
897 (struct bch_extent_crc_unpacked) { 0 };
898 struct bversion version = op->version;
899 size_t dst_len, src_len;
901 if (page_alloc_failed &&
902 bio_sectors(dst) < wp->sectors_free &&
903 bio_sectors(dst) < c->sb.encoded_extent_max)
906 BUG_ON(op->compression_type &&
907 (op->flags & BCH_WRITE_DATA_ENCODED) &&
908 bch2_csum_type_is_encryption(op->crc.csum_type));
909 BUG_ON(op->compression_type && !bounce);
911 crc.compression_type = op->compression_type
912 ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
913 op->compression_type)
915 if (!crc.compression_type) {
916 dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
917 dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
920 dst_len = min_t(unsigned, dst_len,
921 c->sb.encoded_extent_max << 9);
924 swap(dst->bi_iter.bi_size, dst_len);
925 bio_copy_data(dst, src);
926 swap(dst->bi_iter.bi_size, dst_len);
932 BUG_ON(!src_len || !dst_len);
934 if (bch2_csum_type_is_encryption(op->csum_type)) {
935 if (bversion_zero(version)) {
936 version.lo = atomic64_inc_return(&c->key_version) + 1;
938 crc.nonce = op->nonce;
939 op->nonce += src_len >> 9;
943 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
944 !crc.compression_type &&
945 bch2_csum_type_is_encryption(op->crc.csum_type) ==
946 bch2_csum_type_is_encryption(op->csum_type)) {
948 * Note: when we're using rechecksum(), we need to be
949 * checksumming @src because it has all the data our
950 * existing checksum covers - if we bounced (because we
951 * were trying to compress), @dst will only have the
952 * part of the data the new checksum will cover.
954 * But normally we want to be checksumming post bounce,
955 * because part of the reason for bouncing is so the
956 * data can't be modified (by userspace) while it's in
959 if (bch2_rechecksum_bio(c, src, version, op->crc,
962 bio_sectors(src) - (src_len >> 9),
966 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
967 bch2_rechecksum_bio(c, src, version, op->crc,
970 bio_sectors(src) - (src_len >> 9),
974 crc.compressed_size = dst_len >> 9;
975 crc.uncompressed_size = src_len >> 9;
976 crc.live_size = src_len >> 9;
978 swap(dst->bi_iter.bi_size, dst_len);
979 bch2_encrypt_bio(c, op->csum_type,
980 extent_nonce(version, crc), dst);
981 crc.csum = bch2_checksum_bio(c, op->csum_type,
982 extent_nonce(version, crc), dst);
983 crc.csum_type = op->csum_type;
984 swap(dst->bi_iter.bi_size, dst_len);
987 init_append_extent(op, wp, version, crc);
990 bio_advance(dst, dst_len);
991 bio_advance(src, src_len);
992 total_output += dst_len;
993 total_input += src_len;
994 } while (dst->bi_iter.bi_size &&
995 src->bi_iter.bi_size &&
997 !bch2_keylist_realloc(&op->insert_keys,
999 ARRAY_SIZE(op->inline_keys),
1000 BKEY_EXTENT_U64s_MAX));
1002 more = src->bi_iter.bi_size != 0;
1004 dst->bi_iter = saved_iter;
1006 if (dst == src && more) {
1007 BUG_ON(total_output != total_input);
1009 dst = bio_split(src, total_input >> 9,
1010 GFP_NOIO, &c->bio_write);
1011 wbio_init(dst)->put_bio = true;
1012 /* copy WRITE_SYNC flag */
1013 dst->bi_opf = src->bi_opf;
1016 dst->bi_iter.bi_size = total_output;
1018 /* might have done a realloc... */
1019 bch2_ec_add_backpointer(c, wp, ec_pos, total_input >> 9);
1024 bch_err(c, "error verifying existing checksum while "
1025 "rewriting existing data (memory corruption?)");
1028 if (to_wbio(dst)->bounce)
1029 bch2_bio_free_pages_pool(c, dst);
1030 if (to_wbio(dst)->put_bio)
1036 static void __bch2_write(struct closure *cl)
1038 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
1039 struct bch_fs *c = op->c;
1040 struct write_point *wp;
1042 bool skip_put = true;
1045 memset(&op->failed, 0, sizeof(op->failed));
1048 struct bkey_i *key_to_write;
1049 unsigned key_to_write_offset = op->insert_keys.top_p -
1050 op->insert_keys.keys_p;
1052 /* +1 for possible cache device: */
1053 if (op->open_buckets.nr + op->nr_replicas + 1 >
1054 ARRAY_SIZE(op->open_buckets.v))
1057 if (bch2_keylist_realloc(&op->insert_keys,
1059 ARRAY_SIZE(op->inline_keys),
1060 BKEY_EXTENT_U64s_MAX))
1063 wp = bch2_alloc_sectors_start(c,
1065 op->opts.erasure_code,
1069 op->nr_replicas_required,
1072 (op->flags & BCH_WRITE_ALLOC_NOWAIT) ? NULL : cl);
1075 if (unlikely(IS_ERR(wp))) {
1076 if (unlikely(PTR_ERR(wp) != -EAGAIN)) {
1084 bch2_open_bucket_get(c, wp, &op->open_buckets);
1085 ret = bch2_write_extent(op, wp, &bio);
1086 bch2_alloc_sectors_done(c, wp);
1094 bio->bi_end_io = bch2_write_endio;
1095 bio->bi_private = &op->cl;
1096 bio->bi_opf |= REQ_OP_WRITE;
1099 closure_get(bio->bi_private);
1101 op->flags |= BCH_WRITE_SKIP_CLOSURE_PUT;
1103 key_to_write = (void *) (op->insert_keys.keys_p +
1104 key_to_write_offset);
1106 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_USER,
1111 continue_at(cl, bch2_write_index, index_update_wq(op));
1116 continue_at(cl, bch2_write_index, index_update_wq(op));
1121 if (!bch2_keylist_empty(&op->insert_keys)) {
1122 __bch2_write_index(op);
1125 continue_at_nobarrier(cl, bch2_write_done, NULL);
1133 static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
1135 struct closure *cl = &op->cl;
1136 struct bio *bio = &op->wbio.bio;
1137 struct bvec_iter iter;
1138 struct bkey_i_inline_data *id;
1142 ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
1143 ARRAY_SIZE(op->inline_keys),
1144 BKEY_U64s + DIV_ROUND_UP(data_len, 8));
1150 sectors = bio_sectors(bio);
1151 op->pos.offset += sectors;
1153 id = bkey_inline_data_init(op->insert_keys.top);
1155 id->k.version = op->version;
1156 id->k.size = sectors;
1158 iter = bio->bi_iter;
1159 iter.bi_size = data_len;
1160 memcpy_from_bio(id->v.data, bio, iter);
1162 while (data_len & 7)
1163 id->v.data[data_len++] = '\0';
1164 set_bkey_val_bytes(&id->k, data_len);
1165 bch2_keylist_push(&op->insert_keys);
1167 op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
1168 continue_at_nobarrier(cl, bch2_write_index, NULL);
1171 bch2_write_done(&op->cl);
1175 * bch_write - handle a write to a cache device or flash only volume
1177 * This is the starting point for any data to end up in a cache device; it could
1178 * be from a normal write, or a writeback write, or a write to a flash only
1179 * volume - it's also used by the moving garbage collector to compact data in
1180 * mostly empty buckets.
1182 * It first writes the data to the cache, creating a list of keys to be inserted
1183 * (if the data won't fit in a single open bucket, there will be multiple keys);
1184 * after the data is written it calls bch_journal, and after the keys have been
1185 * added to the next journal write they're inserted into the btree.
1187 * If op->discard is true, instead of inserting the data it invalidates the
1188 * region of the cache represented by op->bio and op->inode.
1190 void bch2_write(struct closure *cl)
1192 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
1193 struct bio *bio = &op->wbio.bio;
1194 struct bch_fs *c = op->c;
1197 BUG_ON(!op->nr_replicas);
1198 BUG_ON(!op->write_point.v);
1199 BUG_ON(!bkey_cmp(op->pos, POS_MAX));
1201 op->start_time = local_clock();
1202 bch2_keylist_init(&op->insert_keys, op->inline_keys);
1203 wbio_init(bio)->put_bio = false;
1205 if (bio_sectors(bio) & (c->opts.block_size - 1)) {
1206 __bcache_io_error(c, "misaligned write");
1211 if (c->opts.nochanges ||
1212 !percpu_ref_tryget(&c->writes)) {
1213 __bcache_io_error(c, "read only");
1218 bch2_increment_clock(c, bio_sectors(bio), WRITE);
1220 data_len = min_t(u64, bio->bi_iter.bi_size,
1221 op->new_i_size - (op->pos.offset << 9));
1223 if (data_len <= min(block_bytes(c) / 2, 1024U)) {
1224 bch2_write_data_inline(op, data_len);
1228 continue_at_nobarrier(cl, __bch2_write, NULL);
1231 if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
1232 bch2_disk_reservation_put(c, &op->res);
1238 closure_debug_destroy(cl);
1241 /* Cache promotion on read */
1245 struct rcu_head rcu;
1248 struct rhash_head hash;
1251 struct migrate_write write;
1252 struct bio_vec bi_inline_vecs[0]; /* must be last */
1255 static const struct rhashtable_params bch_promote_params = {
1256 .head_offset = offsetof(struct promote_op, hash),
1257 .key_offset = offsetof(struct promote_op, pos),
1258 .key_len = sizeof(struct bpos),
1261 static inline bool should_promote(struct bch_fs *c, struct bkey_s_c k,
1263 struct bch_io_opts opts,
1266 if (!(flags & BCH_READ_MAY_PROMOTE))
1269 if (!opts.promote_target)
1272 if (bch2_bkey_has_target(c, k, opts.promote_target))
1275 if (bch2_target_congested(c, opts.promote_target)) {
1276 /* XXX trace this */
1280 if (rhashtable_lookup_fast(&c->promote_table, &pos,
1281 bch_promote_params))
1287 static void promote_free(struct bch_fs *c, struct promote_op *op)
1291 ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
1292 bch_promote_params);
1294 percpu_ref_put(&c->writes);
1298 static void promote_done(struct closure *cl)
1300 struct promote_op *op =
1301 container_of(cl, struct promote_op, cl);
1302 struct bch_fs *c = op->write.op.c;
1304 bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
1307 bch2_bio_free_pages_pool(c, &op->write.op.wbio.bio);
1308 promote_free(c, op);
1311 static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
1313 struct bch_fs *c = rbio->c;
1314 struct closure *cl = &op->cl;
1315 struct bio *bio = &op->write.op.wbio.bio;
1317 trace_promote(&rbio->bio);
1319 /* we now own pages: */
1320 BUG_ON(!rbio->bounce);
1321 BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs);
1323 memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec,
1324 sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
1325 swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
1327 bch2_migrate_read_done(&op->write, rbio);
1329 closure_init(cl, NULL);
1330 closure_call(&op->write.op.cl, bch2_write, c->wq, cl);
1331 closure_return_with_destructor(cl, promote_done);
1334 static struct promote_op *__promote_alloc(struct bch_fs *c,
1335 enum btree_id btree_id,
1337 struct extent_ptr_decoded *pick,
1338 struct bch_io_opts opts,
1340 struct bch_read_bio **rbio)
1342 struct promote_op *op = NULL;
1344 unsigned pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
1347 if (!percpu_ref_tryget(&c->writes))
1350 op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOIO);
1354 op->start_time = local_clock();
1358 * We don't use the mempool here because extents that aren't
1359 * checksummed or compressed can be too big for the mempool:
1361 *rbio = kzalloc(sizeof(struct bch_read_bio) +
1362 sizeof(struct bio_vec) * pages,
1367 rbio_init(&(*rbio)->bio, opts);
1368 bio_init(&(*rbio)->bio, (*rbio)->bio.bi_inline_vecs, pages);
1370 if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
1374 (*rbio)->bounce = true;
1375 (*rbio)->split = true;
1376 (*rbio)->kmalloc = true;
1378 if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash,
1379 bch_promote_params))
1382 bio = &op->write.op.wbio.bio;
1383 bio_init(bio, bio->bi_inline_vecs, pages);
1385 ret = bch2_migrate_write_init(c, &op->write,
1386 writepoint_hashed((unsigned long) current),
1389 (struct data_opts) {
1390 .target = opts.promote_target
1399 bio_free_pages(&(*rbio)->bio);
1403 percpu_ref_put(&c->writes);
1408 static struct promote_op *promote_alloc(struct bch_fs *c,
1409 struct bvec_iter iter,
1411 struct extent_ptr_decoded *pick,
1412 struct bch_io_opts opts,
1414 struct bch_read_bio **rbio,
1418 bool promote_full = *read_full || READ_ONCE(c->promote_whole_extents);
1419 /* data might have to be decompressed in the write path: */
1420 unsigned sectors = promote_full
1421 ? max(pick->crc.compressed_size, pick->crc.live_size)
1422 : bvec_iter_sectors(iter);
1423 struct bpos pos = promote_full
1424 ? bkey_start_pos(k.k)
1425 : POS(k.k->p.inode, iter.bi_sector);
1426 struct promote_op *promote;
1428 if (!should_promote(c, k, pos, opts, flags))
1431 promote = __promote_alloc(c,
1432 k.k->type == KEY_TYPE_reflink_v
1435 pos, pick, opts, sectors, rbio);
1440 *read_full = promote_full;
1446 #define READ_RETRY_AVOID 1
1447 #define READ_RETRY 2
1452 RBIO_CONTEXT_HIGHPRI,
1453 RBIO_CONTEXT_UNBOUND,
1456 static inline struct bch_read_bio *
1457 bch2_rbio_parent(struct bch_read_bio *rbio)
1459 return rbio->split ? rbio->parent : rbio;
1463 static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn,
1464 enum rbio_context context,
1465 struct workqueue_struct *wq)
1467 if (context <= rbio->context) {
1470 rbio->work.func = fn;
1471 rbio->context = context;
1472 queue_work(wq, &rbio->work);
1476 static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
1478 BUG_ON(rbio->bounce && !rbio->split);
1481 promote_free(rbio->c, rbio->promote);
1482 rbio->promote = NULL;
1485 bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
1488 struct bch_read_bio *parent = rbio->parent;
1493 bio_put(&rbio->bio);
1502 * Only called on a top level bch_read_bio to complete an entire read request,
1505 static void bch2_rbio_done(struct bch_read_bio *rbio)
1507 if (rbio->start_time)
1508 bch2_time_stats_update(&rbio->c->times[BCH_TIME_data_read],
1510 bio_endio(&rbio->bio);
1513 static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio,
1514 struct bvec_iter bvec_iter, u64 inode,
1515 struct bch_io_failures *failed,
1518 struct btree_trans trans;
1519 struct btree_iter *iter;
1520 struct bkey_on_stack sk;
1524 flags &= ~BCH_READ_LAST_FRAGMENT;
1525 flags |= BCH_READ_MUST_CLONE;
1527 bkey_on_stack_init(&sk);
1528 bch2_trans_init(&trans, c, 0, 0);
1530 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
1531 rbio->pos, BTREE_ITER_SLOTS);
1533 rbio->bio.bi_status = 0;
1535 k = bch2_btree_iter_peek_slot(iter);
1539 bkey_on_stack_realloc(&sk, c, k.k->u64s);
1540 bkey_reassemble(sk.k, k);
1541 k = bkey_i_to_s_c(sk.k);
1542 bch2_trans_unlock(&trans);
1544 if (!bch2_bkey_matches_ptr(c, k,
1547 rbio->pick.crc.offset)) {
1548 /* extent we wanted to read no longer exists: */
1553 ret = __bch2_read_extent(c, rbio, bvec_iter, k, 0, failed, flags);
1554 if (ret == READ_RETRY)
1559 bch2_rbio_done(rbio);
1560 bch2_trans_exit(&trans);
1561 bkey_on_stack_exit(&sk, c);
1564 rbio->bio.bi_status = BLK_STS_IOERR;
1568 static void bch2_read_retry(struct bch_fs *c, struct bch_read_bio *rbio,
1569 struct bvec_iter bvec_iter, u64 inode,
1570 struct bch_io_failures *failed, unsigned flags)
1572 struct btree_trans trans;
1573 struct btree_iter *iter;
1574 struct bkey_on_stack sk;
1578 flags &= ~BCH_READ_LAST_FRAGMENT;
1579 flags |= BCH_READ_MUST_CLONE;
1581 bkey_on_stack_init(&sk);
1582 bch2_trans_init(&trans, c, 0, 0);
1584 bch2_trans_begin(&trans);
1586 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
1587 POS(inode, bvec_iter.bi_sector),
1588 BTREE_ITER_SLOTS, k, ret) {
1589 unsigned bytes, sectors, offset_into_extent;
1591 bkey_on_stack_realloc(&sk, c, k.k->u64s);
1592 bkey_reassemble(sk.k, k);
1593 k = bkey_i_to_s_c(sk.k);
1595 offset_into_extent = iter->pos.offset -
1596 bkey_start_offset(k.k);
1597 sectors = k.k->size - offset_into_extent;
1599 ret = bch2_read_indirect_extent(&trans,
1600 &offset_into_extent, sk.k);
1604 sectors = min(sectors, k.k->size - offset_into_extent);
1606 bch2_trans_unlock(&trans);
1608 bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9;
1609 swap(bvec_iter.bi_size, bytes);
1611 ret = __bch2_read_extent(c, rbio, bvec_iter, k,
1612 offset_into_extent, failed, flags);
1620 if (bytes == bvec_iter.bi_size)
1623 swap(bvec_iter.bi_size, bytes);
1624 bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
1630 * If we get here, it better have been because there was an error
1631 * reading a btree node
1634 __bcache_io_error(c, "btree IO error: %i", ret);
1636 rbio->bio.bi_status = BLK_STS_IOERR;
1638 bch2_trans_exit(&trans);
1639 bkey_on_stack_exit(&sk, c);
1640 bch2_rbio_done(rbio);
1643 static void bch2_rbio_retry(struct work_struct *work)
1645 struct bch_read_bio *rbio =
1646 container_of(work, struct bch_read_bio, work);
1647 struct bch_fs *c = rbio->c;
1648 struct bvec_iter iter = rbio->bvec_iter;
1649 unsigned flags = rbio->flags;
1650 u64 inode = rbio->pos.inode;
1651 struct bch_io_failures failed = { .nr = 0 };
1653 trace_read_retry(&rbio->bio);
1655 if (rbio->retry == READ_RETRY_AVOID)
1656 bch2_mark_io_failure(&failed, &rbio->pick);
1658 rbio->bio.bi_status = 0;
1660 rbio = bch2_rbio_free(rbio);
1662 flags |= BCH_READ_IN_RETRY;
1663 flags &= ~BCH_READ_MAY_PROMOTE;
1665 if (flags & BCH_READ_NODECODE)
1666 bch2_read_retry_nodecode(c, rbio, iter, inode, &failed, flags);
1668 bch2_read_retry(c, rbio, iter, inode, &failed, flags);
1671 static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
1674 rbio->retry = retry;
1676 if (rbio->flags & BCH_READ_IN_RETRY)
1679 if (retry == READ_ERR) {
1680 rbio = bch2_rbio_free(rbio);
1682 rbio->bio.bi_status = error;
1683 bch2_rbio_done(rbio);
1685 bch2_rbio_punt(rbio, bch2_rbio_retry,
1686 RBIO_CONTEXT_UNBOUND, system_unbound_wq);
1690 static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
1692 struct bch_fs *c = rbio->c;
1693 struct btree_trans trans;
1694 struct btree_iter *iter;
1696 struct bkey_on_stack new;
1697 struct bch_extent_crc_unpacked new_crc;
1698 u64 data_offset = rbio->pos.offset - rbio->pick.crc.offset;
1701 if (rbio->pick.crc.compression_type)
1704 bkey_on_stack_init(&new);
1705 bch2_trans_init(&trans, c, 0, 0);
1707 bch2_trans_begin(&trans);
1709 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, rbio->pos,
1710 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
1711 k = bch2_btree_iter_peek_slot(iter);
1712 if (IS_ERR_OR_NULL(k.k))
1715 bkey_on_stack_realloc(&new, c, k.k->u64s);
1716 bkey_reassemble(new.k, k);
1717 k = bkey_i_to_s_c(new.k);
1719 if (bversion_cmp(k.k->version, rbio->version) ||
1720 !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset))
1723 /* Extent was merged? */
1724 if (bkey_start_offset(k.k) < data_offset ||
1725 k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size)
1728 if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version,
1729 rbio->pick.crc, NULL, &new_crc,
1730 bkey_start_offset(k.k) - data_offset, k.k->size,
1731 rbio->pick.crc.csum_type)) {
1732 bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)");
1736 if (!bch2_bkey_narrow_crcs(new.k, new_crc))
1739 bch2_trans_update(&trans, iter, new.k);
1740 ret = bch2_trans_commit(&trans, NULL, NULL,
1741 BTREE_INSERT_ATOMIC|
1742 BTREE_INSERT_NOFAIL|
1743 BTREE_INSERT_NOWAIT);
1747 bch2_trans_exit(&trans);
1748 bkey_on_stack_exit(&new, c);
1751 /* Inner part that may run in process context */
1752 static void __bch2_read_endio(struct work_struct *work)
1754 struct bch_read_bio *rbio =
1755 container_of(work, struct bch_read_bio, work);
1756 struct bch_fs *c = rbio->c;
1757 struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
1758 struct bio *src = &rbio->bio;
1759 struct bio *dst = &bch2_rbio_parent(rbio)->bio;
1760 struct bvec_iter dst_iter = rbio->bvec_iter;
1761 struct bch_extent_crc_unpacked crc = rbio->pick.crc;
1762 struct nonce nonce = extent_nonce(rbio->version, crc);
1763 struct bch_csum csum;
1765 /* Reset iterator for checksumming and copying bounced data: */
1767 src->bi_iter.bi_size = crc.compressed_size << 9;
1768 src->bi_iter.bi_idx = 0;
1769 src->bi_iter.bi_bvec_done = 0;
1771 src->bi_iter = rbio->bvec_iter;
1774 csum = bch2_checksum_bio(c, crc.csum_type, nonce, src);
1775 if (bch2_crc_cmp(csum, rbio->pick.crc.csum))
1778 if (unlikely(rbio->narrow_crcs))
1779 bch2_rbio_narrow_crcs(rbio);
1781 if (rbio->flags & BCH_READ_NODECODE)
1784 /* Adjust crc to point to subset of data we want: */
1785 crc.offset += rbio->offset_into_extent;
1786 crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
1788 if (crc.compression_type != BCH_COMPRESSION_NONE) {
1789 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1790 if (bch2_bio_uncompress(c, src, dst, dst_iter, crc))
1791 goto decompression_err;
1793 /* don't need to decrypt the entire bio: */
1794 nonce = nonce_add(nonce, crc.offset << 9);
1795 bio_advance(src, crc.offset << 9);
1797 BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
1798 src->bi_iter.bi_size = dst_iter.bi_size;
1800 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1803 struct bvec_iter src_iter = src->bi_iter;
1804 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1808 if (rbio->promote) {
1810 * Re encrypt data we decrypted, so it's consistent with
1813 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1814 promote_start(rbio->promote, rbio);
1815 rbio->promote = NULL;
1818 if (likely(!(rbio->flags & BCH_READ_IN_RETRY))) {
1819 rbio = bch2_rbio_free(rbio);
1820 bch2_rbio_done(rbio);
1825 * Checksum error: if the bio wasn't bounced, we may have been
1826 * reading into buffers owned by userspace (that userspace can
1827 * scribble over) - retry the read, bouncing it this time:
1829 if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
1830 rbio->flags |= BCH_READ_MUST_BOUNCE;
1831 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
1835 bch2_dev_io_error(ca,
1836 "data checksum error, inode %llu offset %llu: expected %0llx:%0llx got %0llx:%0llx (type %u)",
1837 rbio->pos.inode, (u64) rbio->bvec_iter.bi_sector,
1838 rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
1839 csum.hi, csum.lo, crc.csum_type);
1840 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1843 __bcache_io_error(c, "decompression error, inode %llu offset %llu",
1845 (u64) rbio->bvec_iter.bi_sector);
1846 bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
1850 static void bch2_read_endio(struct bio *bio)
1852 struct bch_read_bio *rbio =
1853 container_of(bio, struct bch_read_bio, bio);
1854 struct bch_fs *c = rbio->c;
1855 struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
1856 struct workqueue_struct *wq = NULL;
1857 enum rbio_context context = RBIO_CONTEXT_NULL;
1859 if (rbio->have_ioref) {
1860 bch2_latency_acct(ca, rbio->submit_time, READ);
1861 percpu_ref_put(&ca->io_ref);
1865 rbio->bio.bi_end_io = rbio->end_io;
1867 if (bch2_dev_io_err_on(bio->bi_status, ca, "data read")) {
1868 bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
1872 if (rbio->pick.ptr.cached &&
1873 (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
1874 ptr_stale(ca, &rbio->pick.ptr))) {
1875 atomic_long_inc(&c->read_realloc_races);
1877 if (rbio->flags & BCH_READ_RETRY_IF_STALE)
1878 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
1880 bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN);
1884 if (rbio->narrow_crcs ||
1885 rbio->pick.crc.compression_type ||
1886 bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
1887 context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
1888 else if (rbio->pick.crc.csum_type)
1889 context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq;
1891 bch2_rbio_punt(rbio, __bch2_read_endio, context, wq);
1894 int __bch2_read_indirect_extent(struct btree_trans *trans,
1895 unsigned *offset_into_extent,
1896 struct bkey_i *orig_k)
1898 struct btree_iter *iter;
1903 reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k)->v.idx) +
1904 *offset_into_extent;
1906 iter = bch2_trans_get_iter(trans, BTREE_ID_REFLINK,
1907 POS(0, reflink_offset),
1909 ret = PTR_ERR_OR_ZERO(iter);
1913 k = bch2_btree_iter_peek_slot(iter);
1918 if (k.k->type != KEY_TYPE_reflink_v) {
1919 __bcache_io_error(trans->c,
1920 "pointer to nonexistent indirect extent");
1925 *offset_into_extent = iter->pos.offset - bkey_start_offset(k.k);
1926 bkey_reassemble(orig_k, k);
1928 bch2_trans_iter_put(trans, iter);
1932 int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
1933 struct bvec_iter iter, struct bkey_s_c k,
1934 unsigned offset_into_extent,
1935 struct bch_io_failures *failed, unsigned flags)
1937 struct extent_ptr_decoded pick;
1938 struct bch_read_bio *rbio = NULL;
1940 struct promote_op *promote = NULL;
1941 bool bounce = false, read_full = false, narrow_crcs = false;
1942 struct bpos pos = bkey_start_pos(k.k);
1945 if (k.k->type == KEY_TYPE_inline_data) {
1946 struct bkey_s_c_inline_data d = bkey_s_c_to_inline_data(k);
1947 unsigned bytes = min_t(unsigned, iter.bi_size,
1948 bkey_val_bytes(d.k));
1950 swap(iter.bi_size, bytes);
1951 memcpy_to_bio(&orig->bio, iter, d.v->data);
1952 swap(iter.bi_size, bytes);
1953 bio_advance_iter(&orig->bio, &iter, bytes);
1954 zero_fill_bio_iter(&orig->bio, iter);
1958 pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick);
1960 /* hole or reservation - just zero fill: */
1965 __bcache_io_error(c, "no device to read from");
1970 ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1972 if (flags & BCH_READ_NODECODE) {
1974 * can happen if we retry, and the extent we were going to read
1975 * has been merged in the meantime:
1977 if (pick.crc.compressed_size > orig->bio.bi_vcnt * PAGE_SECTORS)
1980 iter.bi_size = pick.crc.compressed_size << 9;
1984 if (!(flags & BCH_READ_LAST_FRAGMENT) ||
1985 bio_flagged(&orig->bio, BIO_CHAIN))
1986 flags |= BCH_READ_MUST_CLONE;
1988 narrow_crcs = !(flags & BCH_READ_IN_RETRY) &&
1989 bch2_can_narrow_extent_crcs(k, pick.crc);
1991 if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
1992 flags |= BCH_READ_MUST_BOUNCE;
1994 EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
1996 if (pick.crc.compression_type != BCH_COMPRESSION_NONE ||
1997 (pick.crc.csum_type != BCH_CSUM_NONE &&
1998 (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
1999 (bch2_csum_type_is_encryption(pick.crc.csum_type) &&
2000 (flags & BCH_READ_USER_MAPPED)) ||
2001 (flags & BCH_READ_MUST_BOUNCE)))) {
2006 if (orig->opts.promote_target)
2007 promote = promote_alloc(c, iter, k, &pick, orig->opts, flags,
2008 &rbio, &bounce, &read_full);
2011 EBUG_ON(pick.crc.compression_type);
2012 EBUG_ON(pick.crc.csum_type &&
2013 (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
2014 bvec_iter_sectors(iter) != pick.crc.live_size ||
2016 offset_into_extent));
2018 pos.offset += offset_into_extent;
2019 pick.ptr.offset += pick.crc.offset +
2021 offset_into_extent = 0;
2022 pick.crc.compressed_size = bvec_iter_sectors(iter);
2023 pick.crc.uncompressed_size = bvec_iter_sectors(iter);
2024 pick.crc.offset = 0;
2025 pick.crc.live_size = bvec_iter_sectors(iter);
2026 offset_into_extent = 0;
2031 * promote already allocated bounce rbio:
2032 * promote needs to allocate a bio big enough for uncompressing
2033 * data in the write path, but we're not going to use it all
2036 EBUG_ON(rbio->bio.bi_iter.bi_size <
2037 pick.crc.compressed_size << 9);
2038 rbio->bio.bi_iter.bi_size =
2039 pick.crc.compressed_size << 9;
2040 } else if (bounce) {
2041 unsigned sectors = pick.crc.compressed_size;
2043 rbio = rbio_init(bio_alloc_bioset(GFP_NOIO,
2044 DIV_ROUND_UP(sectors, PAGE_SECTORS),
2045 &c->bio_read_split),
2048 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
2049 rbio->bounce = true;
2051 } else if (flags & BCH_READ_MUST_CLONE) {
2053 * Have to clone if there were any splits, due to error
2054 * reporting issues (if a split errored, and retrying didn't
2055 * work, when it reports the error to its parent (us) we don't
2056 * know if the error was from our bio, and we should retry, or
2057 * from the whole bio, in which case we don't want to retry and
2060 rbio = rbio_init(bio_clone_fast(&orig->bio, GFP_NOIO,
2061 &c->bio_read_split),
2063 rbio->bio.bi_iter = iter;
2068 rbio->bio.bi_iter = iter;
2069 EBUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
2072 EBUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size);
2075 rbio->submit_time = local_clock();
2077 rbio->parent = orig;
2079 rbio->end_io = orig->bio.bi_end_io;
2080 rbio->bvec_iter = iter;
2081 rbio->offset_into_extent= offset_into_extent;
2082 rbio->flags = flags;
2083 rbio->have_ioref = pick_ret > 0 && bch2_dev_get_ioref(ca, READ);
2084 rbio->narrow_crcs = narrow_crcs;
2088 /* XXX: only initialize this if needed */
2089 rbio->devs_have = bch2_bkey_devs(k);
2092 rbio->version = k.k->version;
2093 rbio->promote = promote;
2094 INIT_WORK(&rbio->work, NULL);
2096 rbio->bio.bi_opf = orig->bio.bi_opf;
2097 rbio->bio.bi_iter.bi_sector = pick.ptr.offset;
2098 rbio->bio.bi_end_io = bch2_read_endio;
2101 trace_read_bounce(&rbio->bio);
2103 bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
2106 bucket_io_clock_reset(c, ca, PTR_BUCKET_NR(ca, &pick.ptr), READ);
2109 if (!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT))) {
2110 bio_inc_remaining(&orig->bio);
2111 trace_read_split(&orig->bio);
2114 if (!rbio->pick.idx) {
2115 if (!rbio->have_ioref) {
2116 __bcache_io_error(c, "no device to read from");
2117 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
2121 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_USER],
2122 bio_sectors(&rbio->bio));
2123 bio_set_dev(&rbio->bio, ca->disk_sb.bdev);
2125 if (likely(!(flags & BCH_READ_IN_RETRY)))
2126 submit_bio(&rbio->bio);
2128 submit_bio_wait(&rbio->bio);
2130 /* Attempting reconstruct read: */
2131 if (bch2_ec_read_extent(c, rbio)) {
2132 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
2136 if (likely(!(flags & BCH_READ_IN_RETRY)))
2137 bio_endio(&rbio->bio);
2140 if (likely(!(flags & BCH_READ_IN_RETRY))) {
2145 rbio->context = RBIO_CONTEXT_UNBOUND;
2146 bch2_read_endio(&rbio->bio);
2149 rbio = bch2_rbio_free(rbio);
2151 if (ret == READ_RETRY_AVOID) {
2152 bch2_mark_io_failure(failed, &pick);
2160 if (flags & BCH_READ_IN_RETRY)
2163 orig->bio.bi_status = BLK_STS_IOERR;
2168 * won't normally happen in the BCH_READ_NODECODE
2169 * (bch2_move_extent()) path, but if we retry and the extent we wanted
2170 * to read no longer exists we have to signal that:
2172 if (flags & BCH_READ_NODECODE)
2175 zero_fill_bio_iter(&orig->bio, iter);
2177 if (flags & BCH_READ_LAST_FRAGMENT)
2178 bch2_rbio_done(orig);
2182 void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode)
2184 struct btree_trans trans;
2185 struct btree_iter *iter;
2186 struct bkey_on_stack sk;
2188 unsigned flags = BCH_READ_RETRY_IF_STALE|
2189 BCH_READ_MAY_PROMOTE|
2190 BCH_READ_USER_MAPPED;
2193 BUG_ON(rbio->_state);
2194 BUG_ON(flags & BCH_READ_NODECODE);
2195 BUG_ON(flags & BCH_READ_IN_RETRY);
2198 rbio->start_time = local_clock();
2200 bkey_on_stack_init(&sk);
2201 bch2_trans_init(&trans, c, 0, 0);
2203 bch2_trans_begin(&trans);
2205 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2206 POS(inode, rbio->bio.bi_iter.bi_sector),
2209 unsigned bytes, sectors, offset_into_extent;
2211 bch2_btree_iter_set_pos(iter,
2212 POS(inode, rbio->bio.bi_iter.bi_sector));
2214 k = bch2_btree_iter_peek_slot(iter);
2219 offset_into_extent = iter->pos.offset -
2220 bkey_start_offset(k.k);
2221 sectors = k.k->size - offset_into_extent;
2223 bkey_on_stack_realloc(&sk, c, k.k->u64s);
2224 bkey_reassemble(sk.k, k);
2225 k = bkey_i_to_s_c(sk.k);
2227 ret = bch2_read_indirect_extent(&trans,
2228 &offset_into_extent, sk.k);
2233 * With indirect extents, the amount of data to read is the min
2234 * of the original extent and the indirect extent:
2236 sectors = min(sectors, k.k->size - offset_into_extent);
2239 * Unlock the iterator while the btree node's lock is still in
2240 * cache, before doing the IO:
2242 bch2_trans_unlock(&trans);
2244 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
2245 swap(rbio->bio.bi_iter.bi_size, bytes);
2247 if (rbio->bio.bi_iter.bi_size == bytes)
2248 flags |= BCH_READ_LAST_FRAGMENT;
2250 bch2_read_extent(c, rbio, k, offset_into_extent, flags);
2252 if (flags & BCH_READ_LAST_FRAGMENT)
2255 swap(rbio->bio.bi_iter.bi_size, bytes);
2256 bio_advance(&rbio->bio, bytes);
2259 bch2_trans_exit(&trans);
2260 bkey_on_stack_exit(&sk, c);
2266 bcache_io_error(c, &rbio->bio, "btree IO error: %i", ret);
2267 bch2_rbio_done(rbio);
2271 void bch2_fs_io_exit(struct bch_fs *c)
2273 if (c->promote_table.tbl)
2274 rhashtable_destroy(&c->promote_table);
2275 mempool_exit(&c->bio_bounce_pages);
2276 bioset_exit(&c->bio_write);
2277 bioset_exit(&c->bio_read_split);
2278 bioset_exit(&c->bio_read);
2281 int bch2_fs_io_init(struct bch_fs *c)
2283 if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
2284 BIOSET_NEED_BVECS) ||
2285 bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
2286 BIOSET_NEED_BVECS) ||
2287 bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio),
2288 BIOSET_NEED_BVECS) ||
2289 mempool_init_page_pool(&c->bio_bounce_pages,
2291 c->opts.btree_node_size,
2292 c->sb.encoded_extent_max) /
2294 rhashtable_init(&c->promote_table, &bch_promote_params))