1 // SPDX-License-Identifier: GPL-2.0
3 * Some low level IO code, and hacks for various block layer limitations
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
10 #include "alloc_background.h"
11 #include "alloc_foreground.h"
14 #include "btree_update.h"
20 #include "disk_groups.h"
23 #include "extent_update.h"
29 #include "rebalance.h"
33 #include <linux/blkdev.h>
34 #include <linux/random.h>
35 #include <linux/sched/mm.h>
37 #include <trace/events/bcachefs.h>
39 const char *bch2_blk_status_to_str(blk_status_t status)
41 if (status == BLK_STS_REMOVED)
42 return "device removed";
43 return blk_status_to_str(status);
46 static bool bch2_target_congested(struct bch_fs *c, u16 target)
48 const struct bch_devs_mask *devs;
49 unsigned d, nr = 0, total = 0;
50 u64 now = local_clock(), last;
58 devs = bch2_target_to_mask(c, target) ?:
59 &c->rw_devs[BCH_DATA_user];
61 for_each_set_bit(d, devs->d, BCH_SB_MEMBERS_MAX) {
62 ca = rcu_dereference(c->devs[d]);
66 congested = atomic_read(&ca->congested);
67 last = READ_ONCE(ca->congested_last);
68 if (time_after64(now, last))
69 congested -= (now - last) >> 12;
71 total += max(congested, 0LL);
76 return bch2_rand_range(nr * CONGESTED_MAX) < total;
79 static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
83 ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
84 /* ideally we'd be taking into account the device's variance here: */
85 u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
86 s64 latency_over = io_latency - latency_threshold;
88 if (latency_threshold && latency_over > 0) {
90 * bump up congested by approximately latency_over * 4 /
91 * latency_threshold - we don't need much accuracy here so don't
92 * bother with the divide:
94 if (atomic_read(&ca->congested) < CONGESTED_MAX)
95 atomic_add(latency_over >>
96 max_t(int, ilog2(latency_threshold) - 2, 0),
99 ca->congested_last = now;
100 } else if (atomic_read(&ca->congested) > 0) {
101 atomic_dec(&ca->congested);
105 void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
107 atomic64_t *latency = &ca->cur_latency[rw];
108 u64 now = local_clock();
109 u64 io_latency = time_after64(now, submit_time)
112 u64 old, new, v = atomic64_read(latency);
118 * If the io latency was reasonably close to the current
119 * latency, skip doing the update and atomic operation - most of
122 if (abs((int) (old - io_latency)) < (old >> 1) &&
126 new = ewma_add(old, io_latency, 5);
127 } while ((v = atomic64_cmpxchg(latency, old, new)) != old);
129 bch2_congested_acct(ca, io_latency, now, rw);
131 __bch2_time_stats_update(&ca->io_latency[rw], submit_time, now);
134 /* Allocate, free from mempool: */
136 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
138 struct bvec_iter_all iter;
141 bio_for_each_segment_all(bv, bio, iter)
142 if (bv->bv_page != ZERO_PAGE(0))
143 mempool_free(bv->bv_page, &c->bio_bounce_pages);
147 static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
151 if (likely(!*using_mempool)) {
152 page = alloc_page(GFP_NOIO);
153 if (unlikely(!page)) {
154 mutex_lock(&c->bio_bounce_pages_lock);
155 *using_mempool = true;
161 page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
167 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
170 bool using_mempool = false;
173 struct page *page = __bio_alloc_page_pool(c, &using_mempool);
174 unsigned len = min_t(size_t, PAGE_SIZE, size);
176 BUG_ON(!bio_add_page(bio, page, len, 0));
181 mutex_unlock(&c->bio_bounce_pages_lock);
184 /* Extent update path: */
186 int bch2_sum_sector_overwrites(struct btree_trans *trans,
187 struct btree_iter *extent_iter,
189 bool *maybe_extending,
190 bool *usage_increasing,
191 s64 *i_sectors_delta,
192 s64 *disk_sectors_delta)
194 struct bch_fs *c = trans->c;
195 struct btree_iter *iter;
197 unsigned new_replicas = bch2_bkey_replicas(c, bkey_i_to_s_c(new));
198 bool new_compressed = bch2_bkey_sectors_compressed(bkey_i_to_s_c(new));
201 *maybe_extending = true;
202 *usage_increasing = false;
203 *i_sectors_delta = 0;
204 *disk_sectors_delta = 0;
206 iter = bch2_trans_copy_iter(trans, extent_iter);
208 for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, old, ret) {
209 s64 sectors = min(new->k.p.offset, old.k->p.offset) -
210 max(bkey_start_offset(&new->k),
211 bkey_start_offset(old.k));
213 *i_sectors_delta += sectors *
214 (bkey_extent_is_allocation(&new->k) -
215 bkey_extent_is_allocation(old.k));
217 *disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new));
218 *disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot
219 ? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
222 if (!*usage_increasing &&
223 (new_replicas > bch2_bkey_replicas(c, old) ||
224 (!new_compressed && bch2_bkey_sectors_compressed(old))))
225 *usage_increasing = true;
227 if (bkey_cmp(old.k->p, new->k.p) >= 0) {
229 * Check if there's already data above where we're
230 * going to be writing to - this means we're definitely
231 * not extending the file:
233 * Note that it's not sufficient to check if there's
234 * data up to the sector offset we're going to be
235 * writing to, because i_size could be up to one block
238 if (!bkey_cmp(old.k->p, new->k.p))
239 old = bch2_btree_iter_next(iter);
241 if (old.k && !bkey_err(old) &&
242 old.k->p.inode == extent_iter->pos.inode &&
243 bkey_extent_is_data(old.k))
244 *maybe_extending = false;
250 bch2_trans_iter_put(trans, iter);
254 int bch2_extent_update(struct btree_trans *trans,
255 struct btree_iter *iter,
257 struct disk_reservation *disk_res,
260 s64 *i_sectors_delta_total,
263 /* this must live until after bch2_trans_commit(): */
264 struct bkey_inode_buf inode_p;
265 bool extending = false, usage_increasing;
266 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
269 ret = bch2_extent_trim_atomic(k, iter);
273 ret = bch2_sum_sector_overwrites(trans, iter, k,
277 &disk_sectors_delta);
281 if (!usage_increasing)
282 check_enospc = false;
285 disk_sectors_delta > (s64) disk_res->sectors) {
286 ret = bch2_disk_reservation_add(trans->c, disk_res,
287 disk_sectors_delta - disk_res->sectors,
289 ? BCH_DISK_RESERVATION_NOFAIL : 0);
294 new_i_size = extending
295 ? min(k->k.p.offset << 9, new_i_size)
298 if (i_sectors_delta || new_i_size) {
299 struct btree_iter *inode_iter;
300 struct bch_inode_unpacked inode_u;
302 inode_iter = bch2_inode_peek(trans, &inode_u,
303 k->k.p.inode, BTREE_ITER_INTENT);
304 ret = PTR_ERR_OR_ZERO(inode_iter);
310 * writeback can race a bit with truncate, because truncate
311 * first updates the inode then truncates the pagecache. This is
312 * ugly, but lets us preserve the invariant that the in memory
313 * i_size is always >= the on disk i_size.
315 BUG_ON(new_i_size > inode_u.bi_size &&
316 (inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY));
318 BUG_ON(new_i_size > inode_u.bi_size && !extending);
320 if (!(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
321 new_i_size > inode_u.bi_size)
322 inode_u.bi_size = new_i_size;
326 inode_u.bi_sectors += i_sectors_delta;
328 if (i_sectors_delta || new_i_size) {
329 bch2_inode_pack(trans->c, &inode_p, &inode_u);
331 inode_p.inode.k.p.snapshot = iter->snapshot;
333 ret = bch2_trans_update(trans, inode_iter,
334 &inode_p.inode.k_i, 0);
337 bch2_trans_iter_put(trans, inode_iter);
343 ret = bch2_trans_update(trans, iter, k, 0) ?:
344 bch2_trans_commit(trans, disk_res, journal_seq,
345 BTREE_INSERT_NOCHECK_RW|
346 BTREE_INSERT_NOFAIL);
347 BUG_ON(ret == -ENOSPC);
351 if (i_sectors_delta_total)
352 *i_sectors_delta_total += i_sectors_delta;
356 int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
357 struct bpos end, u64 *journal_seq,
358 s64 *i_sectors_delta)
360 struct bch_fs *c = trans->c;
361 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
363 int ret = 0, ret2 = 0;
365 while ((k = bch2_btree_iter_peek(iter)).k &&
366 bkey_cmp(iter->pos, end) < 0) {
367 struct disk_reservation disk_res =
368 bch2_disk_reservation_init(c, 0);
369 struct bkey_i delete;
371 bch2_trans_begin(trans);
377 bkey_init(&delete.k);
378 delete.k.p = iter->pos;
380 /* create the biggest key we can */
381 bch2_key_resize(&delete.k, max_sectors);
382 bch2_cut_back(end, &delete);
384 ret = bch2_extent_update(trans, iter, &delete,
385 &disk_res, journal_seq,
386 0, i_sectors_delta, false);
387 bch2_disk_reservation_put(c, &disk_res);
397 if (bkey_cmp(iter->pos, end) > 0) {
398 bch2_btree_iter_set_pos(iter, end);
399 ret = bch2_btree_iter_traverse(iter);
405 int bch2_fpunch(struct bch_fs *c, u64 inum, u64 start, u64 end,
406 u64 *journal_seq, s64 *i_sectors_delta)
408 struct btree_trans trans;
409 struct btree_iter *iter;
412 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
413 iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
417 ret = bch2_fpunch_at(&trans, iter, POS(inum, end),
418 journal_seq, i_sectors_delta);
420 bch2_trans_iter_put(&trans, iter);
421 bch2_trans_exit(&trans);
429 int bch2_write_index_default(struct bch_write_op *op)
431 struct bch_fs *c = op->c;
433 struct keylist *keys = &op->insert_keys;
434 struct bkey_i *k = bch2_keylist_front(keys);
435 struct btree_trans trans;
436 struct btree_iter *iter;
439 bch2_bkey_buf_init(&sk);
440 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
442 iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
443 bkey_start_pos(&k->k),
444 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
447 bch2_trans_begin(&trans);
449 k = bch2_keylist_front(keys);
451 k->k.p.snapshot = iter->snapshot;
453 bch2_bkey_buf_realloc(&sk, c, k->k.u64s);
455 bch2_cut_front(iter->pos, sk.k);
457 ret = bch2_extent_update(&trans, iter, sk.k,
458 &op->res, op_journal_seq(op),
459 op->new_i_size, &op->i_sectors_delta,
460 op->flags & BCH_WRITE_CHECK_ENOSPC);
466 if (bkey_cmp(iter->pos, k->k.p) >= 0)
467 bch2_keylist_pop_front(keys);
468 } while (!bch2_keylist_empty(keys));
470 bch2_trans_iter_put(&trans, iter);
471 bch2_trans_exit(&trans);
472 bch2_bkey_buf_exit(&sk, c);
479 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
480 enum bch_data_type type,
481 const struct bkey_i *k)
483 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
484 const struct bch_extent_ptr *ptr;
485 struct bch_write_bio *n;
488 BUG_ON(c->opts.nochanges);
490 bkey_for_each_ptr(ptrs, ptr) {
491 BUG_ON(ptr->dev >= BCH_SB_MEMBERS_MAX ||
494 ca = bch_dev_bkey_exists(c, ptr->dev);
496 if (to_entry(ptr + 1) < ptrs.end) {
497 n = to_wbio(bio_clone_fast(&wbio->bio, GFP_NOIO,
500 n->bio.bi_end_io = wbio->bio.bi_end_io;
501 n->bio.bi_private = wbio->bio.bi_private;
506 n->bio.bi_opf = wbio->bio.bi_opf;
507 bio_inc_remaining(&wbio->bio);
515 n->have_ioref = bch2_dev_get_ioref(ca,
516 type == BCH_DATA_btree ? READ : WRITE);
517 n->submit_time = local_clock();
518 n->bio.bi_iter.bi_sector = ptr->offset;
520 if (likely(n->have_ioref)) {
521 this_cpu_add(ca->io_done->sectors[WRITE][type],
522 bio_sectors(&n->bio));
524 bio_set_dev(&n->bio, ca->disk_sb.bdev);
527 n->bio.bi_status = BLK_STS_REMOVED;
533 static void __bch2_write(struct closure *);
535 static void bch2_write_done(struct closure *cl)
537 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
538 struct bch_fs *c = op->c;
540 if (!op->error && (op->flags & BCH_WRITE_FLUSH))
541 op->error = bch2_journal_error(&c->journal);
543 bch2_disk_reservation_put(c, &op->res);
544 percpu_ref_put(&c->writes);
545 bch2_keylist_free(&op->insert_keys, op->inline_keys);
547 bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
551 closure_debug_destroy(cl);
559 * bch_write_index - after a write, update index to point to new data
561 static void __bch2_write_index(struct bch_write_op *op)
563 struct bch_fs *c = op->c;
564 struct keylist *keys = &op->insert_keys;
565 struct bch_extent_ptr *ptr;
566 struct bkey_i *src, *dst = keys->keys, *n, *k;
570 for (src = keys->keys; src != keys->top; src = n) {
573 if (bkey_extent_is_direct_data(&src->k)) {
574 bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
575 test_bit(ptr->dev, op->failed.d));
577 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src))) {
584 memmove_u64s_down(dst, src, src->u64s);
585 dst = bkey_next(dst);
591 * probably not the ideal place to hook this in, but I don't
592 * particularly want to plumb io_opts all the way through the btree
593 * update stack right now
595 for_each_keylist_key(keys, k) {
596 bch2_rebalance_add_key(c, bkey_i_to_s_c(k), &op->opts);
598 if (bch2_bkey_is_incompressible(bkey_i_to_s_c(k)))
599 bch2_check_set_feature(op->c, BCH_FEATURE_incompressible);
603 if (!bch2_keylist_empty(keys)) {
604 u64 sectors_start = keylist_sectors(keys);
605 int ret = op->index_update_fn(op);
607 BUG_ON(ret == -EINTR);
608 BUG_ON(keylist_sectors(keys) && !ret);
610 op->written += sectors_start - keylist_sectors(keys);
613 bch_err_inum_ratelimited(c, op->pos.inode,
614 "write error %i from btree update", ret);
619 /* If some a bucket wasn't written, we can't erasure code it: */
620 for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
621 bch2_open_bucket_write_error(c, &op->open_buckets, dev);
623 bch2_open_buckets_put(c, &op->open_buckets);
626 keys->top = keys->keys;
631 static void bch2_write_index(struct closure *cl)
633 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
634 struct bch_fs *c = op->c;
636 __bch2_write_index(op);
638 if (!(op->flags & BCH_WRITE_DONE)) {
639 continue_at(cl, __bch2_write, index_update_wq(op));
640 } else if (!op->error && (op->flags & BCH_WRITE_FLUSH)) {
641 bch2_journal_flush_seq_async(&c->journal,
644 continue_at(cl, bch2_write_done, index_update_wq(op));
646 continue_at_nobarrier(cl, bch2_write_done, NULL);
650 static void bch2_write_endio(struct bio *bio)
652 struct closure *cl = bio->bi_private;
653 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
654 struct bch_write_bio *wbio = to_wbio(bio);
655 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
656 struct bch_fs *c = wbio->c;
657 struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
659 if (bch2_dev_inum_io_err_on(bio->bi_status, ca,
661 op->pos.offset - bio_sectors(bio), /* XXX definitely wrong */
662 "data write error: %s",
663 bch2_blk_status_to_str(bio->bi_status)))
664 set_bit(wbio->dev, op->failed.d);
666 if (wbio->have_ioref) {
667 bch2_latency_acct(ca, wbio->submit_time, WRITE);
668 percpu_ref_put(&ca->io_ref);
672 bch2_bio_free_pages_pool(c, bio);
678 bio_endio(&parent->bio);
679 else if (!(op->flags & BCH_WRITE_SKIP_CLOSURE_PUT))
682 continue_at_nobarrier(cl, bch2_write_index, index_update_wq(op));
685 static void init_append_extent(struct bch_write_op *op,
686 struct write_point *wp,
687 struct bversion version,
688 struct bch_extent_crc_unpacked crc)
690 struct bch_fs *c = op->c;
691 struct bkey_i_extent *e;
692 struct open_bucket *ob;
695 BUG_ON(crc.compressed_size > wp->sectors_free);
696 wp->sectors_free -= crc.compressed_size;
697 op->pos.offset += crc.uncompressed_size;
699 e = bkey_extent_init(op->insert_keys.top);
701 e->k.size = crc.uncompressed_size;
702 e->k.version = version;
705 crc.compression_type ||
707 bch2_extent_crc_append(&e->k_i, crc);
709 open_bucket_for_each(c, &wp->ptrs, ob, i) {
710 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
711 union bch_extent_entry *end =
712 bkey_val_end(bkey_i_to_s(&e->k_i));
715 end->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
716 end->ptr.cached = !ca->mi.durability ||
717 (op->flags & BCH_WRITE_CACHED) != 0;
718 end->ptr.offset += ca->mi.bucket_size - ob->sectors_free;
722 BUG_ON(crc.compressed_size > ob->sectors_free);
723 ob->sectors_free -= crc.compressed_size;
726 bch2_keylist_push(&op->insert_keys);
729 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
730 struct write_point *wp,
732 bool *page_alloc_failed,
735 struct bch_write_bio *wbio;
737 unsigned output_available =
738 min(wp->sectors_free << 9, src->bi_iter.bi_size);
739 unsigned pages = DIV_ROUND_UP(output_available +
741 ? ((unsigned long) buf & (PAGE_SIZE - 1))
744 bio = bio_alloc_bioset(GFP_NOIO, pages, &c->bio_write);
745 wbio = wbio_init(bio);
746 wbio->put_bio = true;
747 /* copy WRITE_SYNC flag */
748 wbio->bio.bi_opf = src->bi_opf;
751 bch2_bio_map(bio, buf, output_available);
758 * We can't use mempool for more than c->sb.encoded_extent_max
759 * worth of pages, but we'd like to allocate more if we can:
761 bch2_bio_alloc_pages_pool(c, bio,
762 min_t(unsigned, output_available,
763 c->sb.encoded_extent_max << 9));
765 if (bio->bi_iter.bi_size < output_available)
767 bch2_bio_alloc_pages(bio,
769 bio->bi_iter.bi_size,
775 static int bch2_write_rechecksum(struct bch_fs *c,
776 struct bch_write_op *op,
777 unsigned new_csum_type)
779 struct bio *bio = &op->wbio.bio;
780 struct bch_extent_crc_unpacked new_crc;
783 /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
785 if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
786 bch2_csum_type_is_encryption(new_csum_type))
787 new_csum_type = op->crc.csum_type;
789 ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
791 op->crc.offset, op->crc.live_size,
796 bio_advance(bio, op->crc.offset << 9);
797 bio->bi_iter.bi_size = op->crc.live_size << 9;
802 static int bch2_write_decrypt(struct bch_write_op *op)
804 struct bch_fs *c = op->c;
805 struct nonce nonce = extent_nonce(op->version, op->crc);
806 struct bch_csum csum;
808 if (!bch2_csum_type_is_encryption(op->crc.csum_type))
812 * If we need to decrypt data in the write path, we'll no longer be able
813 * to verify the existing checksum (poly1305 mac, in this case) after
814 * it's decrypted - this is the last point we'll be able to reverify the
817 csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
818 if (bch2_crc_cmp(op->crc.csum, csum))
821 bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
822 op->crc.csum_type = 0;
823 op->crc.csum = (struct bch_csum) { 0, 0 };
827 static enum prep_encoded_ret {
830 PREP_ENCODED_CHECKSUM_ERR,
831 PREP_ENCODED_DO_WRITE,
832 } bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
834 struct bch_fs *c = op->c;
835 struct bio *bio = &op->wbio.bio;
837 if (!(op->flags & BCH_WRITE_DATA_ENCODED))
838 return PREP_ENCODED_OK;
840 BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
842 /* Can we just write the entire extent as is? */
843 if (op->crc.uncompressed_size == op->crc.live_size &&
844 op->crc.compressed_size <= wp->sectors_free &&
845 (op->crc.compression_type == op->compression_type ||
846 op->incompressible)) {
847 if (!crc_is_compressed(op->crc) &&
848 op->csum_type != op->crc.csum_type &&
849 bch2_write_rechecksum(c, op, op->csum_type))
850 return PREP_ENCODED_CHECKSUM_ERR;
852 return PREP_ENCODED_DO_WRITE;
856 * If the data is compressed and we couldn't write the entire extent as
857 * is, we have to decompress it:
859 if (crc_is_compressed(op->crc)) {
860 struct bch_csum csum;
862 if (bch2_write_decrypt(op))
863 return PREP_ENCODED_CHECKSUM_ERR;
865 /* Last point we can still verify checksum: */
866 csum = bch2_checksum_bio(c, op->crc.csum_type,
867 extent_nonce(op->version, op->crc),
869 if (bch2_crc_cmp(op->crc.csum, csum))
870 return PREP_ENCODED_CHECKSUM_ERR;
872 if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
873 return PREP_ENCODED_ERR;
877 * No longer have compressed data after this point - data might be
882 * If the data is checksummed and we're only writing a subset,
883 * rechecksum and adjust bio to point to currently live data:
885 if ((op->crc.live_size != op->crc.uncompressed_size ||
886 op->crc.csum_type != op->csum_type) &&
887 bch2_write_rechecksum(c, op, op->csum_type))
888 return PREP_ENCODED_CHECKSUM_ERR;
891 * If we want to compress the data, it has to be decrypted:
893 if ((op->compression_type ||
894 bch2_csum_type_is_encryption(op->crc.csum_type) !=
895 bch2_csum_type_is_encryption(op->csum_type)) &&
896 bch2_write_decrypt(op))
897 return PREP_ENCODED_CHECKSUM_ERR;
899 return PREP_ENCODED_OK;
902 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
905 struct bch_fs *c = op->c;
906 struct bio *src = &op->wbio.bio, *dst = src;
907 struct bvec_iter saved_iter;
909 struct bpos ec_pos = op->pos;
910 unsigned total_output = 0, total_input = 0;
912 bool page_alloc_failed = false;
915 BUG_ON(!bio_sectors(src));
917 ec_buf = bch2_writepoint_ec_buf(c, wp);
919 switch (bch2_write_prep_encoded_data(op, wp)) {
920 case PREP_ENCODED_OK:
922 case PREP_ENCODED_ERR:
925 case PREP_ENCODED_CHECKSUM_ERR:
928 case PREP_ENCODED_DO_WRITE:
929 /* XXX look for bug here */
931 dst = bch2_write_bio_alloc(c, wp, src,
934 bio_copy_data(dst, src);
937 init_append_extent(op, wp, op->version, op->crc);
942 op->compression_type ||
944 !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
945 (bch2_csum_type_is_encryption(op->csum_type) &&
946 !(op->flags & BCH_WRITE_PAGES_OWNED))) {
947 dst = bch2_write_bio_alloc(c, wp, src,
953 saved_iter = dst->bi_iter;
956 struct bch_extent_crc_unpacked crc =
957 (struct bch_extent_crc_unpacked) { 0 };
958 struct bversion version = op->version;
959 size_t dst_len, src_len;
961 if (page_alloc_failed &&
962 bio_sectors(dst) < wp->sectors_free &&
963 bio_sectors(dst) < c->sb.encoded_extent_max)
966 BUG_ON(op->compression_type &&
967 (op->flags & BCH_WRITE_DATA_ENCODED) &&
968 bch2_csum_type_is_encryption(op->crc.csum_type));
969 BUG_ON(op->compression_type && !bounce);
971 crc.compression_type = op->incompressible
972 ? BCH_COMPRESSION_TYPE_incompressible
973 : op->compression_type
974 ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
975 op->compression_type)
977 if (!crc_is_compressed(crc)) {
978 dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
979 dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
982 dst_len = min_t(unsigned, dst_len,
983 c->sb.encoded_extent_max << 9);
986 swap(dst->bi_iter.bi_size, dst_len);
987 bio_copy_data(dst, src);
988 swap(dst->bi_iter.bi_size, dst_len);
994 BUG_ON(!src_len || !dst_len);
996 if (bch2_csum_type_is_encryption(op->csum_type)) {
997 if (bversion_zero(version)) {
998 version.lo = atomic64_inc_return(&c->key_version);
1000 crc.nonce = op->nonce;
1001 op->nonce += src_len >> 9;
1005 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
1006 !crc_is_compressed(crc) &&
1007 bch2_csum_type_is_encryption(op->crc.csum_type) ==
1008 bch2_csum_type_is_encryption(op->csum_type)) {
1010 * Note: when we're using rechecksum(), we need to be
1011 * checksumming @src because it has all the data our
1012 * existing checksum covers - if we bounced (because we
1013 * were trying to compress), @dst will only have the
1014 * part of the data the new checksum will cover.
1016 * But normally we want to be checksumming post bounce,
1017 * because part of the reason for bouncing is so the
1018 * data can't be modified (by userspace) while it's in
1021 if (bch2_rechecksum_bio(c, src, version, op->crc,
1024 bio_sectors(src) - (src_len >> 9),
1028 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
1029 bch2_rechecksum_bio(c, src, version, op->crc,
1032 bio_sectors(src) - (src_len >> 9),
1036 crc.compressed_size = dst_len >> 9;
1037 crc.uncompressed_size = src_len >> 9;
1038 crc.live_size = src_len >> 9;
1040 swap(dst->bi_iter.bi_size, dst_len);
1041 bch2_encrypt_bio(c, op->csum_type,
1042 extent_nonce(version, crc), dst);
1043 crc.csum = bch2_checksum_bio(c, op->csum_type,
1044 extent_nonce(version, crc), dst);
1045 crc.csum_type = op->csum_type;
1046 swap(dst->bi_iter.bi_size, dst_len);
1049 init_append_extent(op, wp, version, crc);
1052 bio_advance(dst, dst_len);
1053 bio_advance(src, src_len);
1054 total_output += dst_len;
1055 total_input += src_len;
1056 } while (dst->bi_iter.bi_size &&
1057 src->bi_iter.bi_size &&
1059 !bch2_keylist_realloc(&op->insert_keys,
1061 ARRAY_SIZE(op->inline_keys),
1062 BKEY_EXTENT_U64s_MAX));
1064 more = src->bi_iter.bi_size != 0;
1066 dst->bi_iter = saved_iter;
1068 if (dst == src && more) {
1069 BUG_ON(total_output != total_input);
1071 dst = bio_split(src, total_input >> 9,
1072 GFP_NOIO, &c->bio_write);
1073 wbio_init(dst)->put_bio = true;
1074 /* copy WRITE_SYNC flag */
1075 dst->bi_opf = src->bi_opf;
1078 dst->bi_iter.bi_size = total_output;
1080 /* might have done a realloc... */
1081 bch2_ec_add_backpointer(c, wp, ec_pos, total_input >> 9);
1086 bch_err(c, "error verifying existing checksum while "
1087 "rewriting existing data (memory corruption?)");
1090 if (to_wbio(dst)->bounce)
1091 bch2_bio_free_pages_pool(c, dst);
1092 if (to_wbio(dst)->put_bio)
1098 static void __bch2_write(struct closure *cl)
1100 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
1101 struct bch_fs *c = op->c;
1102 struct write_point *wp;
1104 bool skip_put = true;
1105 unsigned nofs_flags;
1108 nofs_flags = memalloc_nofs_save();
1110 memset(&op->failed, 0, sizeof(op->failed));
1113 struct bkey_i *key_to_write;
1114 unsigned key_to_write_offset = op->insert_keys.top_p -
1115 op->insert_keys.keys_p;
1117 /* +1 for possible cache device: */
1118 if (op->open_buckets.nr + op->nr_replicas + 1 >
1119 ARRAY_SIZE(op->open_buckets.v))
1122 if (bch2_keylist_realloc(&op->insert_keys,
1124 ARRAY_SIZE(op->inline_keys),
1125 BKEY_EXTENT_U64s_MAX))
1128 if ((op->flags & BCH_WRITE_FROM_INTERNAL) &&
1129 percpu_ref_is_dying(&c->writes)) {
1135 * The copygc thread is now global, which means it's no longer
1136 * freeing up space on specific disks, which means that
1137 * allocations for specific disks may hang arbitrarily long:
1139 wp = bch2_alloc_sectors_start(c,
1141 op->opts.erasure_code,
1145 op->nr_replicas_required,
1148 (op->flags & (BCH_WRITE_ALLOC_NOWAIT|
1149 BCH_WRITE_ONLY_SPECIFIED_DEVS)) ? NULL : cl);
1152 if (unlikely(IS_ERR(wp))) {
1153 if (unlikely(PTR_ERR(wp) != -EAGAIN)) {
1162 * It's possible for the allocator to fail, put us on the
1163 * freelist waitlist, and then succeed in one of various retry
1164 * paths: if that happens, we need to disable the skip_put
1165 * optimization because otherwise there won't necessarily be a
1166 * barrier before we free the bch_write_op:
1168 if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
1171 bch2_open_bucket_get(c, wp, &op->open_buckets);
1172 ret = bch2_write_extent(op, wp, &bio);
1173 bch2_alloc_sectors_done(c, wp);
1182 * for the skip_put optimization this has to be set
1183 * before we submit the bio:
1185 op->flags |= BCH_WRITE_DONE;
1188 bio->bi_end_io = bch2_write_endio;
1189 bio->bi_private = &op->cl;
1190 bio->bi_opf |= REQ_OP_WRITE;
1193 closure_get(bio->bi_private);
1195 op->flags |= BCH_WRITE_SKIP_CLOSURE_PUT;
1197 key_to_write = (void *) (op->insert_keys.keys_p +
1198 key_to_write_offset);
1200 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1205 continue_at(cl, bch2_write_index, index_update_wq(op));
1207 memalloc_nofs_restore(nofs_flags);
1211 op->flags |= BCH_WRITE_DONE;
1213 continue_at(cl, bch2_write_index, index_update_wq(op));
1217 * If the write can't all be submitted at once, we generally want to
1218 * block synchronously as that signals backpressure to the caller.
1220 * However, if we're running out of a workqueue, we can't block here
1221 * because we'll be blocking other work items from completing:
1223 if (current->flags & PF_WQ_WORKER) {
1224 continue_at(cl, bch2_write_index, index_update_wq(op));
1230 if (!bch2_keylist_empty(&op->insert_keys)) {
1231 __bch2_write_index(op);
1234 op->flags |= BCH_WRITE_DONE;
1235 continue_at_nobarrier(cl, bch2_write_done, NULL);
1243 static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
1245 struct closure *cl = &op->cl;
1246 struct bio *bio = &op->wbio.bio;
1247 struct bvec_iter iter;
1248 struct bkey_i_inline_data *id;
1252 bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);
1254 ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
1255 ARRAY_SIZE(op->inline_keys),
1256 BKEY_U64s + DIV_ROUND_UP(data_len, 8));
1262 sectors = bio_sectors(bio);
1263 op->pos.offset += sectors;
1265 id = bkey_inline_data_init(op->insert_keys.top);
1267 id->k.version = op->version;
1268 id->k.size = sectors;
1270 iter = bio->bi_iter;
1271 iter.bi_size = data_len;
1272 memcpy_from_bio(id->v.data, bio, iter);
1274 while (data_len & 7)
1275 id->v.data[data_len++] = '\0';
1276 set_bkey_val_bytes(&id->k, data_len);
1277 bch2_keylist_push(&op->insert_keys);
1279 op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
1280 op->flags |= BCH_WRITE_DONE;
1282 continue_at_nobarrier(cl, bch2_write_index, NULL);
1285 bch2_write_done(&op->cl);
1289 * bch_write - handle a write to a cache device or flash only volume
1291 * This is the starting point for any data to end up in a cache device; it could
1292 * be from a normal write, or a writeback write, or a write to a flash only
1293 * volume - it's also used by the moving garbage collector to compact data in
1294 * mostly empty buckets.
1296 * It first writes the data to the cache, creating a list of keys to be inserted
1297 * (if the data won't fit in a single open bucket, there will be multiple keys);
1298 * after the data is written it calls bch_journal, and after the keys have been
1299 * added to the next journal write they're inserted into the btree.
1301 * If op->discard is true, instead of inserting the data it invalidates the
1302 * region of the cache represented by op->bio and op->inode.
1304 void bch2_write(struct closure *cl)
1306 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
1307 struct bio *bio = &op->wbio.bio;
1308 struct bch_fs *c = op->c;
1311 BUG_ON(!op->nr_replicas);
1312 BUG_ON(!op->write_point.v);
1313 BUG_ON(!bkey_cmp(op->pos, POS_MAX));
1315 op->start_time = local_clock();
1316 bch2_keylist_init(&op->insert_keys, op->inline_keys);
1317 wbio_init(bio)->put_bio = false;
1319 if (bio_sectors(bio) & (c->opts.block_size - 1)) {
1320 bch_err_inum_ratelimited(c, op->pos.inode,
1321 "misaligned write");
1326 if (c->opts.nochanges ||
1327 !percpu_ref_tryget(&c->writes)) {
1332 bch2_increment_clock(c, bio_sectors(bio), WRITE);
1334 data_len = min_t(u64, bio->bi_iter.bi_size,
1335 op->new_i_size - (op->pos.offset << 9));
1337 if (c->opts.inline_data &&
1338 data_len <= min(block_bytes(c) / 2, 1024U)) {
1339 bch2_write_data_inline(op, data_len);
1343 continue_at_nobarrier(cl, __bch2_write, NULL);
1346 bch2_disk_reservation_put(c, &op->res);
1349 EBUG_ON(cl->parent);
1350 closure_debug_destroy(cl);
1357 /* Cache promotion on read */
1361 struct rcu_head rcu;
1364 struct rhash_head hash;
1367 struct migrate_write write;
1368 struct bio_vec bi_inline_vecs[0]; /* must be last */
1371 static const struct rhashtable_params bch_promote_params = {
1372 .head_offset = offsetof(struct promote_op, hash),
1373 .key_offset = offsetof(struct promote_op, pos),
1374 .key_len = sizeof(struct bpos),
1377 static inline bool should_promote(struct bch_fs *c, struct bkey_s_c k,
1379 struct bch_io_opts opts,
1382 if (!(flags & BCH_READ_MAY_PROMOTE))
1385 if (!opts.promote_target)
1388 if (bch2_bkey_has_target(c, k, opts.promote_target))
1391 if (bch2_target_congested(c, opts.promote_target)) {
1392 /* XXX trace this */
1396 if (rhashtable_lookup_fast(&c->promote_table, &pos,
1397 bch_promote_params))
1403 static void promote_free(struct bch_fs *c, struct promote_op *op)
1407 ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
1408 bch_promote_params);
1410 percpu_ref_put(&c->writes);
1414 static void promote_done(struct closure *cl)
1416 struct promote_op *op =
1417 container_of(cl, struct promote_op, cl);
1418 struct bch_fs *c = op->write.op.c;
1420 bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
1423 bch2_bio_free_pages_pool(c, &op->write.op.wbio.bio);
1424 promote_free(c, op);
1427 static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
1429 struct bch_fs *c = rbio->c;
1430 struct closure *cl = &op->cl;
1431 struct bio *bio = &op->write.op.wbio.bio;
1433 trace_promote(&rbio->bio);
1435 /* we now own pages: */
1436 BUG_ON(!rbio->bounce);
1437 BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs);
1439 memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec,
1440 sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
1441 swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
1443 bch2_migrate_read_done(&op->write, rbio);
1445 closure_init(cl, NULL);
1446 closure_call(&op->write.op.cl, bch2_write, c->btree_update_wq, cl);
1447 closure_return_with_destructor(cl, promote_done);
1450 static struct promote_op *__promote_alloc(struct bch_fs *c,
1451 enum btree_id btree_id,
1454 struct extent_ptr_decoded *pick,
1455 struct bch_io_opts opts,
1457 struct bch_read_bio **rbio)
1459 struct promote_op *op = NULL;
1461 unsigned pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
1464 if (!percpu_ref_tryget(&c->writes))
1467 op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOIO);
1471 op->start_time = local_clock();
1475 * We don't use the mempool here because extents that aren't
1476 * checksummed or compressed can be too big for the mempool:
1478 *rbio = kzalloc(sizeof(struct bch_read_bio) +
1479 sizeof(struct bio_vec) * pages,
1484 rbio_init(&(*rbio)->bio, opts);
1485 bio_init(&(*rbio)->bio, (*rbio)->bio.bi_inline_vecs, pages);
1487 if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
1491 (*rbio)->bounce = true;
1492 (*rbio)->split = true;
1493 (*rbio)->kmalloc = true;
1495 if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash,
1496 bch_promote_params))
1499 bio = &op->write.op.wbio.bio;
1500 bio_init(bio, bio->bi_inline_vecs, pages);
1502 ret = bch2_migrate_write_init(c, &op->write,
1503 writepoint_hashed((unsigned long) current),
1506 (struct data_opts) {
1507 .target = opts.promote_target,
1516 bio_free_pages(&(*rbio)->bio);
1520 percpu_ref_put(&c->writes);
1525 static struct promote_op *promote_alloc(struct bch_fs *c,
1526 struct bvec_iter iter,
1528 struct extent_ptr_decoded *pick,
1529 struct bch_io_opts opts,
1531 struct bch_read_bio **rbio,
1535 bool promote_full = *read_full || READ_ONCE(c->promote_whole_extents);
1536 /* data might have to be decompressed in the write path: */
1537 unsigned sectors = promote_full
1538 ? max(pick->crc.compressed_size, pick->crc.live_size)
1539 : bvec_iter_sectors(iter);
1540 struct bpos pos = promote_full
1541 ? bkey_start_pos(k.k)
1542 : POS(k.k->p.inode, iter.bi_sector);
1543 struct promote_op *promote;
1545 if (!should_promote(c, k, pos, opts, flags))
1548 promote = __promote_alloc(c,
1549 k.k->type == KEY_TYPE_reflink_v
1552 k, pos, pick, opts, sectors, rbio);
1557 *read_full = promote_full;
1563 #define READ_RETRY_AVOID 1
1564 #define READ_RETRY 2
1569 RBIO_CONTEXT_HIGHPRI,
1570 RBIO_CONTEXT_UNBOUND,
1573 static inline struct bch_read_bio *
1574 bch2_rbio_parent(struct bch_read_bio *rbio)
1576 return rbio->split ? rbio->parent : rbio;
1580 static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn,
1581 enum rbio_context context,
1582 struct workqueue_struct *wq)
1584 if (context <= rbio->context) {
1587 rbio->work.func = fn;
1588 rbio->context = context;
1589 queue_work(wq, &rbio->work);
1593 static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
1595 BUG_ON(rbio->bounce && !rbio->split);
1598 promote_free(rbio->c, rbio->promote);
1599 rbio->promote = NULL;
1602 bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
1605 struct bch_read_bio *parent = rbio->parent;
1610 bio_put(&rbio->bio);
1619 * Only called on a top level bch_read_bio to complete an entire read request,
1622 static void bch2_rbio_done(struct bch_read_bio *rbio)
1624 if (rbio->start_time)
1625 bch2_time_stats_update(&rbio->c->times[BCH_TIME_data_read],
1627 bio_endio(&rbio->bio);
1630 static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio,
1631 struct bvec_iter bvec_iter, u64 inode,
1632 struct bch_io_failures *failed,
1635 struct btree_trans trans;
1636 struct btree_iter *iter;
1641 flags &= ~BCH_READ_LAST_FRAGMENT;
1642 flags |= BCH_READ_MUST_CLONE;
1644 bch2_bkey_buf_init(&sk);
1645 bch2_trans_init(&trans, c, 0, 0);
1647 iter = bch2_trans_get_iter(&trans, rbio->data_btree,
1648 rbio->read_pos, BTREE_ITER_SLOTS);
1650 rbio->bio.bi_status = 0;
1652 k = bch2_btree_iter_peek_slot(iter);
1656 bch2_bkey_buf_reassemble(&sk, c, k);
1657 k = bkey_i_to_s_c(sk.k);
1658 bch2_trans_unlock(&trans);
1660 if (!bch2_bkey_matches_ptr(c, k,
1662 rbio->data_pos.offset -
1663 rbio->pick.crc.offset)) {
1664 /* extent we wanted to read no longer exists: */
1669 ret = __bch2_read_extent(&trans, rbio, bvec_iter,
1672 k, 0, failed, flags);
1673 if (ret == READ_RETRY)
1678 bch2_rbio_done(rbio);
1679 bch2_trans_iter_put(&trans, iter);
1680 bch2_trans_exit(&trans);
1681 bch2_bkey_buf_exit(&sk, c);
1684 rbio->bio.bi_status = BLK_STS_IOERR;
1688 static void bch2_rbio_retry(struct work_struct *work)
1690 struct bch_read_bio *rbio =
1691 container_of(work, struct bch_read_bio, work);
1692 struct bch_fs *c = rbio->c;
1693 struct bvec_iter iter = rbio->bvec_iter;
1694 unsigned flags = rbio->flags;
1695 u64 inode = rbio->read_pos.inode;
1696 struct bch_io_failures failed = { .nr = 0 };
1698 trace_read_retry(&rbio->bio);
1700 if (rbio->retry == READ_RETRY_AVOID)
1701 bch2_mark_io_failure(&failed, &rbio->pick);
1703 rbio->bio.bi_status = 0;
1705 rbio = bch2_rbio_free(rbio);
1707 flags |= BCH_READ_IN_RETRY;
1708 flags &= ~BCH_READ_MAY_PROMOTE;
1710 if (flags & BCH_READ_NODECODE) {
1711 bch2_read_retry_nodecode(c, rbio, iter, inode, &failed, flags);
1713 flags &= ~BCH_READ_LAST_FRAGMENT;
1714 flags |= BCH_READ_MUST_CLONE;
1716 __bch2_read(c, rbio, iter, inode, &failed, flags);
1720 static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
1723 rbio->retry = retry;
1725 if (rbio->flags & BCH_READ_IN_RETRY)
1728 if (retry == READ_ERR) {
1729 rbio = bch2_rbio_free(rbio);
1731 rbio->bio.bi_status = error;
1732 bch2_rbio_done(rbio);
1734 bch2_rbio_punt(rbio, bch2_rbio_retry,
1735 RBIO_CONTEXT_UNBOUND, system_unbound_wq);
1739 static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
1740 struct bch_read_bio *rbio)
1742 struct bch_fs *c = rbio->c;
1743 u64 data_offset = rbio->data_pos.offset - rbio->pick.crc.offset;
1744 struct bch_extent_crc_unpacked new_crc;
1745 struct btree_iter *iter = NULL;
1750 if (crc_is_compressed(rbio->pick.crc))
1753 iter = bch2_trans_get_iter(trans, rbio->data_btree, rbio->data_pos,
1754 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
1755 k = bch2_btree_iter_peek_slot(iter);
1756 if ((ret = bkey_err(k)))
1759 if (bversion_cmp(k.k->version, rbio->version) ||
1760 !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset))
1763 /* Extent was merged? */
1764 if (bkey_start_offset(k.k) < data_offset ||
1765 k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size)
1768 if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version,
1769 rbio->pick.crc, NULL, &new_crc,
1770 bkey_start_offset(k.k) - data_offset, k.k->size,
1771 rbio->pick.crc.csum_type)) {
1772 bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)");
1778 * going to be temporarily appending another checksum entry:
1780 new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) +
1781 sizeof(struct bch_extent_crc128));
1782 if ((ret = PTR_ERR_OR_ZERO(new)))
1785 bkey_reassemble(new, k);
1787 if (!bch2_bkey_narrow_crcs(new, new_crc))
1790 ret = bch2_trans_update(trans, iter, new, 0);
1792 bch2_trans_iter_put(trans, iter);
1796 static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
1798 bch2_trans_do(rbio->c, NULL, NULL, BTREE_INSERT_NOFAIL,
1799 __bch2_rbio_narrow_crcs(&trans, rbio));
1802 /* Inner part that may run in process context */
1803 static void __bch2_read_endio(struct work_struct *work)
1805 struct bch_read_bio *rbio =
1806 container_of(work, struct bch_read_bio, work);
1807 struct bch_fs *c = rbio->c;
1808 struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
1809 struct bio *src = &rbio->bio;
1810 struct bio *dst = &bch2_rbio_parent(rbio)->bio;
1811 struct bvec_iter dst_iter = rbio->bvec_iter;
1812 struct bch_extent_crc_unpacked crc = rbio->pick.crc;
1813 struct nonce nonce = extent_nonce(rbio->version, crc);
1814 unsigned nofs_flags;
1815 struct bch_csum csum;
1817 nofs_flags = memalloc_nofs_save();
1819 /* Reset iterator for checksumming and copying bounced data: */
1821 src->bi_iter.bi_size = crc.compressed_size << 9;
1822 src->bi_iter.bi_idx = 0;
1823 src->bi_iter.bi_bvec_done = 0;
1825 src->bi_iter = rbio->bvec_iter;
1828 csum = bch2_checksum_bio(c, crc.csum_type, nonce, src);
1829 if (bch2_crc_cmp(csum, rbio->pick.crc.csum))
1834 * We need to rework the narrow_crcs path to deliver the read completion
1835 * first, and then punt to a different workqueue, otherwise we're
1836 * holding up reads while doing btree updates which is bad for memory
1839 if (unlikely(rbio->narrow_crcs))
1840 bch2_rbio_narrow_crcs(rbio);
1842 if (rbio->flags & BCH_READ_NODECODE)
1845 /* Adjust crc to point to subset of data we want: */
1846 crc.offset += rbio->offset_into_extent;
1847 crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
1849 if (crc_is_compressed(crc)) {
1850 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1851 if (bch2_bio_uncompress(c, src, dst, dst_iter, crc))
1852 goto decompression_err;
1854 /* don't need to decrypt the entire bio: */
1855 nonce = nonce_add(nonce, crc.offset << 9);
1856 bio_advance(src, crc.offset << 9);
1858 BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
1859 src->bi_iter.bi_size = dst_iter.bi_size;
1861 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1864 struct bvec_iter src_iter = src->bi_iter;
1865 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1869 if (rbio->promote) {
1871 * Re encrypt data we decrypted, so it's consistent with
1874 bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1875 promote_start(rbio->promote, rbio);
1876 rbio->promote = NULL;
1879 if (likely(!(rbio->flags & BCH_READ_IN_RETRY))) {
1880 rbio = bch2_rbio_free(rbio);
1881 bch2_rbio_done(rbio);
1884 memalloc_nofs_restore(nofs_flags);
1888 * Checksum error: if the bio wasn't bounced, we may have been
1889 * reading into buffers owned by userspace (that userspace can
1890 * scribble over) - retry the read, bouncing it this time:
1892 if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
1893 rbio->flags |= BCH_READ_MUST_BOUNCE;
1894 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
1898 bch2_dev_inum_io_error(ca, rbio->read_pos.inode, (u64) rbio->bvec_iter.bi_sector,
1899 "data checksum error: expected %0llx:%0llx got %0llx:%0llx (type %u)",
1900 rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
1901 csum.hi, csum.lo, crc.csum_type);
1902 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1905 bch_err_inum_ratelimited(c, rbio->read_pos.inode,
1906 "decompression error");
1907 bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
1911 static void bch2_read_endio(struct bio *bio)
1913 struct bch_read_bio *rbio =
1914 container_of(bio, struct bch_read_bio, bio);
1915 struct bch_fs *c = rbio->c;
1916 struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
1917 struct workqueue_struct *wq = NULL;
1918 enum rbio_context context = RBIO_CONTEXT_NULL;
1920 if (rbio->have_ioref) {
1921 bch2_latency_acct(ca, rbio->submit_time, READ);
1922 percpu_ref_put(&ca->io_ref);
1926 rbio->bio.bi_end_io = rbio->end_io;
1928 if (bch2_dev_inum_io_err_on(bio->bi_status, ca,
1929 rbio->read_pos.inode,
1930 rbio->read_pos.offset,
1931 "data read error: %s",
1932 bch2_blk_status_to_str(bio->bi_status))) {
1933 bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
1937 if (rbio->pick.ptr.cached &&
1938 (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
1939 ptr_stale(ca, &rbio->pick.ptr))) {
1940 atomic_long_inc(&c->read_realloc_races);
1942 if (rbio->flags & BCH_READ_RETRY_IF_STALE)
1943 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
1945 bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN);
1949 if (rbio->narrow_crcs ||
1950 crc_is_compressed(rbio->pick.crc) ||
1951 bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
1952 context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
1953 else if (rbio->pick.crc.csum_type)
1954 context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq;
1956 bch2_rbio_punt(rbio, __bch2_read_endio, context, wq);
1959 int __bch2_read_indirect_extent(struct btree_trans *trans,
1960 unsigned *offset_into_extent,
1961 struct bkey_buf *orig_k)
1963 struct btree_iter *iter;
1968 reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k->k)->v.idx) +
1969 *offset_into_extent;
1971 iter = bch2_trans_get_iter(trans, BTREE_ID_reflink,
1972 POS(0, reflink_offset),
1974 k = bch2_btree_iter_peek_slot(iter);
1979 if (k.k->type != KEY_TYPE_reflink_v &&
1980 k.k->type != KEY_TYPE_indirect_inline_data) {
1981 bch_err_inum_ratelimited(trans->c, orig_k->k->k.p.inode,
1982 "%llu len %u points to nonexistent indirect extent %llu",
1983 orig_k->k->k.p.offset,
1986 bch2_inconsistent_error(trans->c);
1991 *offset_into_extent = iter->pos.offset - bkey_start_offset(k.k);
1992 bch2_bkey_buf_reassemble(orig_k, trans->c, k);
1994 bch2_trans_iter_put(trans, iter);
1998 int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
1999 struct bvec_iter iter, struct bpos read_pos,
2000 enum btree_id data_btree, struct bkey_s_c k,
2001 unsigned offset_into_extent,
2002 struct bch_io_failures *failed, unsigned flags)
2004 struct bch_fs *c = trans->c;
2005 struct extent_ptr_decoded pick;
2006 struct bch_read_bio *rbio = NULL;
2008 struct promote_op *promote = NULL;
2009 bool bounce = false, read_full = false, narrow_crcs = false;
2010 struct bpos data_pos = bkey_start_pos(k.k);
2013 if (bkey_extent_is_inline_data(k.k)) {
2014 unsigned bytes = min_t(unsigned, iter.bi_size,
2015 bkey_inline_data_bytes(k.k));
2017 swap(iter.bi_size, bytes);
2018 memcpy_to_bio(&orig->bio, iter, bkey_inline_data_p(k));
2019 swap(iter.bi_size, bytes);
2020 bio_advance_iter(&orig->bio, &iter, bytes);
2021 zero_fill_bio_iter(&orig->bio, iter);
2025 pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick);
2027 /* hole or reservation - just zero fill: */
2032 bch_err_inum_ratelimited(c, k.k->p.inode,
2033 "no device to read from");
2038 ca = bch_dev_bkey_exists(c, pick.ptr.dev);
2040 if (flags & BCH_READ_NODECODE) {
2042 * can happen if we retry, and the extent we were going to read
2043 * has been merged in the meantime:
2045 if (pick.crc.compressed_size > orig->bio.bi_vcnt * PAGE_SECTORS)
2048 iter.bi_size = pick.crc.compressed_size << 9;
2052 if (!(flags & BCH_READ_LAST_FRAGMENT) ||
2053 bio_flagged(&orig->bio, BIO_CHAIN))
2054 flags |= BCH_READ_MUST_CLONE;
2056 narrow_crcs = !(flags & BCH_READ_IN_RETRY) &&
2057 bch2_can_narrow_extent_crcs(k, pick.crc);
2059 if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
2060 flags |= BCH_READ_MUST_BOUNCE;
2062 EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
2064 if (crc_is_compressed(pick.crc) ||
2065 (pick.crc.csum_type != BCH_CSUM_NONE &&
2066 (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
2067 (bch2_csum_type_is_encryption(pick.crc.csum_type) &&
2068 (flags & BCH_READ_USER_MAPPED)) ||
2069 (flags & BCH_READ_MUST_BOUNCE)))) {
2074 if (orig->opts.promote_target)
2075 promote = promote_alloc(c, iter, k, &pick, orig->opts, flags,
2076 &rbio, &bounce, &read_full);
2079 EBUG_ON(crc_is_compressed(pick.crc));
2080 EBUG_ON(pick.crc.csum_type &&
2081 (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
2082 bvec_iter_sectors(iter) != pick.crc.live_size ||
2084 offset_into_extent));
2086 data_pos.offset += offset_into_extent;
2087 pick.ptr.offset += pick.crc.offset +
2089 offset_into_extent = 0;
2090 pick.crc.compressed_size = bvec_iter_sectors(iter);
2091 pick.crc.uncompressed_size = bvec_iter_sectors(iter);
2092 pick.crc.offset = 0;
2093 pick.crc.live_size = bvec_iter_sectors(iter);
2094 offset_into_extent = 0;
2099 * promote already allocated bounce rbio:
2100 * promote needs to allocate a bio big enough for uncompressing
2101 * data in the write path, but we're not going to use it all
2104 EBUG_ON(rbio->bio.bi_iter.bi_size <
2105 pick.crc.compressed_size << 9);
2106 rbio->bio.bi_iter.bi_size =
2107 pick.crc.compressed_size << 9;
2108 } else if (bounce) {
2109 unsigned sectors = pick.crc.compressed_size;
2111 rbio = rbio_init(bio_alloc_bioset(GFP_NOIO,
2112 DIV_ROUND_UP(sectors, PAGE_SECTORS),
2113 &c->bio_read_split),
2116 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
2117 rbio->bounce = true;
2119 } else if (flags & BCH_READ_MUST_CLONE) {
2121 * Have to clone if there were any splits, due to error
2122 * reporting issues (if a split errored, and retrying didn't
2123 * work, when it reports the error to its parent (us) we don't
2124 * know if the error was from our bio, and we should retry, or
2125 * from the whole bio, in which case we don't want to retry and
2128 rbio = rbio_init(bio_clone_fast(&orig->bio, GFP_NOIO,
2129 &c->bio_read_split),
2131 rbio->bio.bi_iter = iter;
2135 rbio->bio.bi_iter = iter;
2136 EBUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
2139 EBUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size);
2142 rbio->submit_time = local_clock();
2144 rbio->parent = orig;
2146 rbio->end_io = orig->bio.bi_end_io;
2147 rbio->bvec_iter = iter;
2148 rbio->offset_into_extent= offset_into_extent;
2149 rbio->flags = flags;
2150 rbio->have_ioref = pick_ret > 0 && bch2_dev_get_ioref(ca, READ);
2151 rbio->narrow_crcs = narrow_crcs;
2155 /* XXX: only initialize this if needed */
2156 rbio->devs_have = bch2_bkey_devs(k);
2158 rbio->read_pos = read_pos;
2159 rbio->data_btree = data_btree;
2160 rbio->data_pos = data_pos;
2161 rbio->version = k.k->version;
2162 rbio->promote = promote;
2163 INIT_WORK(&rbio->work, NULL);
2165 rbio->bio.bi_opf = orig->bio.bi_opf;
2166 rbio->bio.bi_iter.bi_sector = pick.ptr.offset;
2167 rbio->bio.bi_end_io = bch2_read_endio;
2170 trace_read_bounce(&rbio->bio);
2172 bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
2175 * If it's being moved internally, we don't want to flag it as a cache
2178 if (pick.ptr.cached && !(flags & BCH_READ_NODECODE))
2179 bch2_bucket_io_time_reset(trans, pick.ptr.dev,
2180 PTR_BUCKET_NR(ca, &pick.ptr), READ);
2182 if (!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT))) {
2183 bio_inc_remaining(&orig->bio);
2184 trace_read_split(&orig->bio);
2187 if (!rbio->pick.idx) {
2188 if (!rbio->have_ioref) {
2189 bch_err_inum_ratelimited(c, k.k->p.inode,
2190 "no device to read from");
2191 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
2195 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_user],
2196 bio_sectors(&rbio->bio));
2197 bio_set_dev(&rbio->bio, ca->disk_sb.bdev);
2199 if (likely(!(flags & BCH_READ_IN_RETRY)))
2200 submit_bio(&rbio->bio);
2202 submit_bio_wait(&rbio->bio);
2204 /* Attempting reconstruct read: */
2205 if (bch2_ec_read_extent(c, rbio)) {
2206 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
2210 if (likely(!(flags & BCH_READ_IN_RETRY)))
2211 bio_endio(&rbio->bio);
2214 if (likely(!(flags & BCH_READ_IN_RETRY))) {
2219 rbio->context = RBIO_CONTEXT_UNBOUND;
2220 bch2_read_endio(&rbio->bio);
2223 rbio = bch2_rbio_free(rbio);
2225 if (ret == READ_RETRY_AVOID) {
2226 bch2_mark_io_failure(failed, &pick);
2237 if (flags & BCH_READ_IN_RETRY)
2240 orig->bio.bi_status = BLK_STS_IOERR;
2245 * won't normally happen in the BCH_READ_NODECODE
2246 * (bch2_move_extent()) path, but if we retry and the extent we wanted
2247 * to read no longer exists we have to signal that:
2249 if (flags & BCH_READ_NODECODE)
2252 zero_fill_bio_iter(&orig->bio, iter);
2254 if (flags & BCH_READ_LAST_FRAGMENT)
2255 bch2_rbio_done(orig);
2259 void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
2260 struct bvec_iter bvec_iter, u64 inode,
2261 struct bch_io_failures *failed, unsigned flags)
2263 struct btree_trans trans;
2264 struct btree_iter *iter;
2269 BUG_ON(flags & BCH_READ_NODECODE);
2271 bch2_bkey_buf_init(&sk);
2272 bch2_trans_init(&trans, c, 0, 0);
2274 bch2_trans_begin(&trans);
2276 iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
2277 POS(inode, bvec_iter.bi_sector),
2280 unsigned bytes, sectors, offset_into_extent;
2281 enum btree_id data_btree = BTREE_ID_extents;
2283 bch2_btree_iter_set_pos(iter,
2284 POS(inode, bvec_iter.bi_sector));
2286 k = bch2_btree_iter_peek_slot(iter);
2291 offset_into_extent = iter->pos.offset -
2292 bkey_start_offset(k.k);
2293 sectors = k.k->size - offset_into_extent;
2295 bch2_bkey_buf_reassemble(&sk, c, k);
2297 ret = bch2_read_indirect_extent(&trans, &data_btree,
2298 &offset_into_extent, &sk);
2302 k = bkey_i_to_s_c(sk.k);
2305 * With indirect extents, the amount of data to read is the min
2306 * of the original extent and the indirect extent:
2308 sectors = min(sectors, k.k->size - offset_into_extent);
2311 * Unlock the iterator while the btree node's lock is still in
2312 * cache, before doing the IO:
2314 bch2_trans_unlock(&trans);
2316 bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9;
2317 swap(bvec_iter.bi_size, bytes);
2319 if (bvec_iter.bi_size == bytes)
2320 flags |= BCH_READ_LAST_FRAGMENT;
2322 ret = __bch2_read_extent(&trans, rbio, bvec_iter, iter->pos,
2324 offset_into_extent, failed, flags);
2328 if (flags & BCH_READ_LAST_FRAGMENT)
2331 swap(bvec_iter.bi_size, bytes);
2332 bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
2334 bch2_trans_iter_put(&trans, iter);
2336 if (ret == -EINTR || ret == READ_RETRY || ret == READ_RETRY_AVOID)
2340 bch_err_inum_ratelimited(c, inode,
2341 "read error %i from btree lookup", ret);
2342 rbio->bio.bi_status = BLK_STS_IOERR;
2343 bch2_rbio_done(rbio);
2345 bch2_trans_exit(&trans);
2346 bch2_bkey_buf_exit(&sk, c);
2349 void bch2_fs_io_exit(struct bch_fs *c)
2351 if (c->promote_table.tbl)
2352 rhashtable_destroy(&c->promote_table);
2353 mempool_exit(&c->bio_bounce_pages);
2354 bioset_exit(&c->bio_write);
2355 bioset_exit(&c->bio_read_split);
2356 bioset_exit(&c->bio_read);
2359 int bch2_fs_io_init(struct bch_fs *c)
2361 if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
2362 BIOSET_NEED_BVECS) ||
2363 bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
2364 BIOSET_NEED_BVECS) ||
2365 bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio),
2366 BIOSET_NEED_BVECS) ||
2367 mempool_init_page_pool(&c->bio_bounce_pages,
2369 c->opts.btree_node_size,
2370 c->sb.encoded_extent_max) /
2372 rhashtable_init(&c->promote_table, &bch_promote_params))