1 // SPDX-License-Identifier: GPL-2.0
3 * Some low level IO code, and hacks for various block layer limitations
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
10 #include "alloc_background.h"
11 #include "alloc_foreground.h"
14 #include "btree_update.h"
20 #include "disk_groups.h"
23 #include "extent_update.h"
29 #include "rebalance.h"
30 #include "subvolume.h"
34 #include <linux/blkdev.h>
35 #include <linux/random.h>
36 #include <linux/sched/mm.h>
38 #include <trace/events/bcachefs.h>
40 const char *bch2_blk_status_to_str(blk_status_t status)
42 if (status == BLK_STS_REMOVED)
43 return "device removed";
44 return blk_status_to_str(status);
47 static bool bch2_target_congested(struct bch_fs *c, u16 target)
49 const struct bch_devs_mask *devs;
50 unsigned d, nr = 0, total = 0;
51 u64 now = local_clock(), last;
59 devs = bch2_target_to_mask(c, target) ?:
60 &c->rw_devs[BCH_DATA_user];
62 for_each_set_bit(d, devs->d, BCH_SB_MEMBERS_MAX) {
63 ca = rcu_dereference(c->devs[d]);
67 congested = atomic_read(&ca->congested);
68 last = READ_ONCE(ca->congested_last);
69 if (time_after64(now, last))
70 congested -= (now - last) >> 12;
72 total += max(congested, 0LL);
77 return bch2_rand_range(nr * CONGESTED_MAX) < total;
80 static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
84 ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
85 /* ideally we'd be taking into account the device's variance here: */
86 u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
87 s64 latency_over = io_latency - latency_threshold;
89 if (latency_threshold && latency_over > 0) {
91 * bump up congested by approximately latency_over * 4 /
92 * latency_threshold - we don't need much accuracy here so don't
93 * bother with the divide:
95 if (atomic_read(&ca->congested) < CONGESTED_MAX)
96 atomic_add(latency_over >>
97 max_t(int, ilog2(latency_threshold) - 2, 0),
100 ca->congested_last = now;
101 } else if (atomic_read(&ca->congested) > 0) {
102 atomic_dec(&ca->congested);
106 void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
108 atomic64_t *latency = &ca->cur_latency[rw];
109 u64 now = local_clock();
110 u64 io_latency = time_after64(now, submit_time)
113 u64 old, new, v = atomic64_read(latency);
119 * If the io latency was reasonably close to the current
120 * latency, skip doing the update and atomic operation - most of
123 if (abs((int) (old - io_latency)) < (old >> 1) &&
127 new = ewma_add(old, io_latency, 5);
128 } while ((v = atomic64_cmpxchg(latency, old, new)) != old);
130 bch2_congested_acct(ca, io_latency, now, rw);
132 __bch2_time_stats_update(&ca->io_latency[rw], submit_time, now);
135 /* Allocate, free from mempool: */
137 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
139 struct bvec_iter_all iter;
142 bio_for_each_segment_all(bv, bio, iter)
143 if (bv->bv_page != ZERO_PAGE(0))
144 mempool_free(bv->bv_page, &c->bio_bounce_pages);
148 static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
152 if (likely(!*using_mempool)) {
153 page = alloc_page(GFP_NOIO);
154 if (unlikely(!page)) {
155 mutex_lock(&c->bio_bounce_pages_lock);
156 *using_mempool = true;
162 page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
168 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
171 bool using_mempool = false;
174 struct page *page = __bio_alloc_page_pool(c, &using_mempool);
175 unsigned len = min_t(size_t, PAGE_SIZE, size);
177 BUG_ON(!bio_add_page(bio, page, len, 0));
182 mutex_unlock(&c->bio_bounce_pages_lock);
185 /* Extent update path: */
187 int bch2_sum_sector_overwrites(struct btree_trans *trans,
188 struct btree_iter *extent_iter,
190 bool *usage_increasing,
191 s64 *i_sectors_delta,
192 s64 *disk_sectors_delta)
194 struct bch_fs *c = trans->c;
195 struct btree_iter iter;
197 unsigned new_replicas = bch2_bkey_replicas(c, bkey_i_to_s_c(new));
198 bool new_compressed = bch2_bkey_sectors_compressed(bkey_i_to_s_c(new));
201 *usage_increasing = false;
202 *i_sectors_delta = 0;
203 *disk_sectors_delta = 0;
205 bch2_trans_copy_iter(&iter, extent_iter);
207 for_each_btree_key_continue_norestart(iter, BTREE_ITER_SLOTS, old, ret) {
208 s64 sectors = min(new->k.p.offset, old.k->p.offset) -
209 max(bkey_start_offset(&new->k),
210 bkey_start_offset(old.k));
212 *i_sectors_delta += sectors *
213 (bkey_extent_is_allocation(&new->k) -
214 bkey_extent_is_allocation(old.k));
216 *disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new));
217 *disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot
218 ? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
221 if (!*usage_increasing &&
222 (new->k.p.snapshot != old.k->p.snapshot ||
223 new_replicas > bch2_bkey_replicas(c, old) ||
224 (!new_compressed && bch2_bkey_sectors_compressed(old))))
225 *usage_increasing = true;
227 if (bkey_cmp(old.k->p, new->k.p) >= 0)
231 bch2_trans_iter_exit(trans, &iter);
235 int bch2_extent_update(struct btree_trans *trans,
237 struct btree_iter *iter,
239 struct disk_reservation *disk_res,
242 s64 *i_sectors_delta_total,
245 struct btree_iter inode_iter = { NULL };
246 struct bpos next_pos;
247 bool usage_increasing;
248 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
252 * This traverses us the iterator without changing iter->path->pos to
253 * search_key() (which is pos + 1 for extents): we want there to be a
254 * path already traversed at iter->pos because
255 * bch2_trans_extent_update() will use it to attempt extent merging
257 ret = __bch2_btree_iter_traverse(iter);
261 ret = bch2_extent_trim_atomic(trans, iter, k);
265 new_i_size = min(k->k.p.offset << 9, new_i_size);
268 ret = bch2_sum_sector_overwrites(trans, iter, k,
271 &disk_sectors_delta);
276 disk_sectors_delta > (s64) disk_res->sectors) {
277 ret = bch2_disk_reservation_add(trans->c, disk_res,
278 disk_sectors_delta - disk_res->sectors,
279 !check_enospc || !usage_increasing
280 ? BCH_DISK_RESERVATION_NOFAIL : 0);
285 if (new_i_size || i_sectors_delta) {
287 struct bkey_s_c_inode_v3 inode;
288 struct bkey_i_inode_v3 *new_inode;
291 bch2_trans_iter_init(trans, &inode_iter, BTREE_ID_inodes,
292 SPOS(0, inum.inum, iter->snapshot),
293 BTREE_ITER_INTENT|BTREE_ITER_CACHED);
294 k = bch2_btree_iter_peek_slot(&inode_iter);
299 ret = bkey_is_inode(k.k) ? 0 : -ENOENT;
303 if (unlikely(k.k->type != KEY_TYPE_inode_v3)) {
304 k = bch2_inode_to_v3(trans, k);
310 inode = bkey_s_c_to_inode_v3(k);
311 i_size_update = !(le64_to_cpu(inode.v->bi_flags) & BCH_INODE_I_SIZE_DIRTY) &&
312 new_i_size > le64_to_cpu(inode.v->bi_size);
314 if (!i_sectors_delta && !i_size_update)
315 goto no_inode_update;
317 new_inode = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
318 ret = PTR_ERR_OR_ZERO(new_inode);
322 bkey_reassemble(&new_inode->k_i, k);
325 new_inode->v.bi_size = cpu_to_le64(new_i_size);
327 le64_add_cpu(&new_inode->v.bi_sectors, i_sectors_delta);
328 ret = bch2_trans_update(trans, &inode_iter, &new_inode->k_i, 0);
333 ret = bch2_trans_update(trans, iter, k, 0) ?:
334 bch2_trans_commit(trans, disk_res, journal_seq,
335 BTREE_INSERT_NOCHECK_RW|
336 BTREE_INSERT_NOFAIL);
340 if (i_sectors_delta_total)
341 *i_sectors_delta_total += i_sectors_delta;
342 bch2_btree_iter_set_pos(iter, next_pos);
344 bch2_trans_iter_exit(trans, &inode_iter);
349 * Returns -BCH_ERR_transacton_restart if we had to drop locks:
351 int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
352 subvol_inum inum, u64 end,
353 s64 *i_sectors_delta)
355 struct bch_fs *c = trans->c;
356 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
357 struct bpos end_pos = POS(inum.inum, end);
359 int ret = 0, ret2 = 0;
363 bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
364 struct disk_reservation disk_res =
365 bch2_disk_reservation_init(c, 0);
366 struct bkey_i delete;
371 bch2_trans_begin(trans);
373 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
377 bch2_btree_iter_set_snapshot(iter, snapshot);
379 k = bch2_btree_iter_peek(iter);
380 if (bkey_cmp(iter->pos, end_pos) >= 0) {
381 bch2_btree_iter_set_pos(iter, end_pos);
389 bkey_init(&delete.k);
390 delete.k.p = iter->pos;
392 /* create the biggest key we can */
393 bch2_key_resize(&delete.k, max_sectors);
394 bch2_cut_back(end_pos, &delete);
396 ret = bch2_extent_update(trans, inum, iter, &delete,
398 0, i_sectors_delta, false);
399 bch2_disk_reservation_put(c, &disk_res);
405 int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end,
406 s64 *i_sectors_delta)
408 struct btree_trans trans;
409 struct btree_iter iter;
412 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
413 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
414 POS(inum.inum, start),
417 ret = bch2_fpunch_at(&trans, &iter, inum, end, i_sectors_delta);
419 bch2_trans_iter_exit(&trans, &iter);
420 bch2_trans_exit(&trans);
422 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
428 int bch2_write_index_default(struct bch_write_op *op)
430 struct bch_fs *c = op->c;
432 struct keylist *keys = &op->insert_keys;
433 struct bkey_i *k = bch2_keylist_front(keys);
434 struct btree_trans trans;
435 struct btree_iter iter;
437 .subvol = op->subvol,
438 .inum = k->k.p.inode,
442 BUG_ON(!inum.subvol);
444 bch2_bkey_buf_init(&sk);
445 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
448 bch2_trans_begin(&trans);
450 k = bch2_keylist_front(keys);
451 bch2_bkey_buf_copy(&sk, c, k);
453 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol,
454 &sk.k->k.p.snapshot);
455 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
460 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
461 bkey_start_pos(&sk.k->k),
462 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
464 ret = bch2_extent_update(&trans, inum, &iter, sk.k,
465 &op->res, op_journal_seq(op),
466 op->new_i_size, &op->i_sectors_delta,
467 op->flags & BCH_WRITE_CHECK_ENOSPC);
468 bch2_trans_iter_exit(&trans, &iter);
470 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
475 if (bkey_cmp(iter.pos, k->k.p) >= 0)
476 bch2_keylist_pop_front(&op->insert_keys);
478 bch2_cut_front(iter.pos, k);
479 } while (!bch2_keylist_empty(keys));
481 bch2_trans_exit(&trans);
482 bch2_bkey_buf_exit(&sk, c);
489 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
490 enum bch_data_type type,
491 const struct bkey_i *k)
493 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
494 const struct bch_extent_ptr *ptr;
495 struct bch_write_bio *n;
498 BUG_ON(c->opts.nochanges);
500 bkey_for_each_ptr(ptrs, ptr) {
501 BUG_ON(ptr->dev >= BCH_SB_MEMBERS_MAX ||
504 ca = bch_dev_bkey_exists(c, ptr->dev);
506 if (to_entry(ptr + 1) < ptrs.end) {
507 n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
508 GFP_NOIO, &ca->replica_set));
510 n->bio.bi_end_io = wbio->bio.bi_end_io;
511 n->bio.bi_private = wbio->bio.bi_private;
516 n->bio.bi_opf = wbio->bio.bi_opf;
517 bio_inc_remaining(&wbio->bio);
525 n->have_ioref = bch2_dev_get_ioref(ca,
526 type == BCH_DATA_btree ? READ : WRITE);
527 n->submit_time = local_clock();
528 n->bio.bi_iter.bi_sector = ptr->offset;
530 if (likely(n->have_ioref)) {
531 this_cpu_add(ca->io_done->sectors[WRITE][type],
532 bio_sectors(&n->bio));
534 bio_set_dev(&n->bio, ca->disk_sb.bdev);
537 n->bio.bi_status = BLK_STS_REMOVED;
543 static void __bch2_write(struct closure *);
545 static void bch2_write_done(struct closure *cl)
547 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
548 struct bch_fs *c = op->c;
550 if (!op->error && (op->flags & BCH_WRITE_FLUSH))
551 op->error = bch2_journal_error(&c->journal);
553 bch2_disk_reservation_put(c, &op->res);
554 percpu_ref_put(&c->writes);
555 bch2_keylist_free(&op->insert_keys, op->inline_keys);
557 bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
561 closure_debug_destroy(cl);
568 static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
570 struct keylist *keys = &op->insert_keys;
571 struct bch_extent_ptr *ptr;
572 struct bkey_i *src, *dst = keys->keys, *n;
574 for (src = keys->keys; src != keys->top; src = n) {
577 if (bkey_extent_is_direct_data(&src->k)) {
578 bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
579 test_bit(ptr->dev, op->failed.d));
581 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src)))
586 memmove_u64s_down(dst, src, src->u64s);
587 dst = bkey_next(dst);
595 * bch_write_index - after a write, update index to point to new data
597 static void __bch2_write_index(struct bch_write_op *op)
599 struct bch_fs *c = op->c;
600 struct keylist *keys = &op->insert_keys;
605 if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
606 ret = bch2_write_drop_io_error_ptrs(op);
612 * probably not the ideal place to hook this in, but I don't
613 * particularly want to plumb io_opts all the way through the btree
614 * update stack right now
616 for_each_keylist_key(keys, k) {
617 bch2_rebalance_add_key(c, bkey_i_to_s_c(k), &op->opts);
619 if (bch2_bkey_is_incompressible(bkey_i_to_s_c(k)))
620 bch2_check_set_feature(op->c, BCH_FEATURE_incompressible);
624 if (!bch2_keylist_empty(keys)) {
625 u64 sectors_start = keylist_sectors(keys);
626 int ret = op->index_update_fn(op);
628 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
629 BUG_ON(keylist_sectors(keys) && !ret);
631 op->written += sectors_start - keylist_sectors(keys);
634 bch_err_inum_ratelimited(c, op->pos.inode,
635 "write error while doing btree update: %s", bch2_err_str(ret));
640 /* If some a bucket wasn't written, we can't erasure code it: */
641 for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
642 bch2_open_bucket_write_error(c, &op->open_buckets, dev);
644 bch2_open_buckets_put(c, &op->open_buckets);
647 keys->top = keys->keys;
652 static void bch2_write_index(struct closure *cl)
654 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
655 struct bch_fs *c = op->c;
657 __bch2_write_index(op);
659 if (!(op->flags & BCH_WRITE_DONE)) {
660 continue_at(cl, __bch2_write, index_update_wq(op));
661 } else if (!op->error && (op->flags & BCH_WRITE_FLUSH)) {
662 bch2_journal_flush_seq_async(&c->journal,
665 continue_at(cl, bch2_write_done, index_update_wq(op));
667 continue_at_nobarrier(cl, bch2_write_done, NULL);
671 static void bch2_write_endio(struct bio *bio)
673 struct closure *cl = bio->bi_private;
674 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
675 struct bch_write_bio *wbio = to_wbio(bio);
676 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
677 struct bch_fs *c = wbio->c;
678 struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
680 if (bch2_dev_inum_io_err_on(bio->bi_status, ca,
682 op->pos.offset - bio_sectors(bio), /* XXX definitely wrong */
683 "data write error: %s",
684 bch2_blk_status_to_str(bio->bi_status))) {
685 set_bit(wbio->dev, op->failed.d);
686 op->flags |= BCH_WRITE_IO_ERROR;
689 if (wbio->have_ioref) {
690 bch2_latency_acct(ca, wbio->submit_time, WRITE);
691 percpu_ref_put(&ca->io_ref);
695 bch2_bio_free_pages_pool(c, bio);
701 bio_endio(&parent->bio);
702 else if (!(op->flags & BCH_WRITE_SKIP_CLOSURE_PUT))
705 continue_at_nobarrier(cl, bch2_write_index, index_update_wq(op));
708 static void init_append_extent(struct bch_write_op *op,
709 struct write_point *wp,
710 struct bversion version,
711 struct bch_extent_crc_unpacked crc)
713 struct bch_fs *c = op->c;
714 struct bkey_i_extent *e;
716 op->pos.offset += crc.uncompressed_size;
718 e = bkey_extent_init(op->insert_keys.top);
720 e->k.size = crc.uncompressed_size;
721 e->k.version = version;
724 crc.compression_type ||
726 bch2_extent_crc_append(&e->k_i, crc);
728 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, crc.compressed_size,
729 op->flags & BCH_WRITE_CACHED);
731 bch2_keylist_push(&op->insert_keys);
734 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
735 struct write_point *wp,
737 bool *page_alloc_failed,
740 struct bch_write_bio *wbio;
742 unsigned output_available =
743 min(wp->sectors_free << 9, src->bi_iter.bi_size);
744 unsigned pages = DIV_ROUND_UP(output_available +
746 ? ((unsigned long) buf & (PAGE_SIZE - 1))
749 pages = min(pages, BIO_MAX_VECS);
751 bio = bio_alloc_bioset(NULL, pages, 0,
752 GFP_NOIO, &c->bio_write);
753 wbio = wbio_init(bio);
754 wbio->put_bio = true;
755 /* copy WRITE_SYNC flag */
756 wbio->bio.bi_opf = src->bi_opf;
759 bch2_bio_map(bio, buf, output_available);
766 * We can't use mempool for more than c->sb.encoded_extent_max
767 * worth of pages, but we'd like to allocate more if we can:
769 bch2_bio_alloc_pages_pool(c, bio,
770 min_t(unsigned, output_available,
771 c->opts.encoded_extent_max));
773 if (bio->bi_iter.bi_size < output_available)
775 bch2_bio_alloc_pages(bio,
777 bio->bi_iter.bi_size,
783 static int bch2_write_rechecksum(struct bch_fs *c,
784 struct bch_write_op *op,
785 unsigned new_csum_type)
787 struct bio *bio = &op->wbio.bio;
788 struct bch_extent_crc_unpacked new_crc;
791 /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
793 if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
794 bch2_csum_type_is_encryption(new_csum_type))
795 new_csum_type = op->crc.csum_type;
797 ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
799 op->crc.offset, op->crc.live_size,
804 bio_advance(bio, op->crc.offset << 9);
805 bio->bi_iter.bi_size = op->crc.live_size << 9;
810 static int bch2_write_decrypt(struct bch_write_op *op)
812 struct bch_fs *c = op->c;
813 struct nonce nonce = extent_nonce(op->version, op->crc);
814 struct bch_csum csum;
817 if (!bch2_csum_type_is_encryption(op->crc.csum_type))
821 * If we need to decrypt data in the write path, we'll no longer be able
822 * to verify the existing checksum (poly1305 mac, in this case) after
823 * it's decrypted - this is the last point we'll be able to reverify the
826 csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
827 if (bch2_crc_cmp(op->crc.csum, csum))
830 ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
831 op->crc.csum_type = 0;
832 op->crc.csum = (struct bch_csum) { 0, 0 };
836 static enum prep_encoded_ret {
839 PREP_ENCODED_CHECKSUM_ERR,
840 PREP_ENCODED_DO_WRITE,
841 } bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
843 struct bch_fs *c = op->c;
844 struct bio *bio = &op->wbio.bio;
846 if (!(op->flags & BCH_WRITE_DATA_ENCODED))
847 return PREP_ENCODED_OK;
849 BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
851 /* Can we just write the entire extent as is? */
852 if (op->crc.uncompressed_size == op->crc.live_size &&
853 op->crc.compressed_size <= wp->sectors_free &&
854 (op->crc.compression_type == op->compression_type ||
855 op->incompressible)) {
856 if (!crc_is_compressed(op->crc) &&
857 op->csum_type != op->crc.csum_type &&
858 bch2_write_rechecksum(c, op, op->csum_type))
859 return PREP_ENCODED_CHECKSUM_ERR;
861 return PREP_ENCODED_DO_WRITE;
865 * If the data is compressed and we couldn't write the entire extent as
866 * is, we have to decompress it:
868 if (crc_is_compressed(op->crc)) {
869 struct bch_csum csum;
871 if (bch2_write_decrypt(op))
872 return PREP_ENCODED_CHECKSUM_ERR;
874 /* Last point we can still verify checksum: */
875 csum = bch2_checksum_bio(c, op->crc.csum_type,
876 extent_nonce(op->version, op->crc),
878 if (bch2_crc_cmp(op->crc.csum, csum))
879 return PREP_ENCODED_CHECKSUM_ERR;
881 if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
882 return PREP_ENCODED_ERR;
886 * No longer have compressed data after this point - data might be
891 * If the data is checksummed and we're only writing a subset,
892 * rechecksum and adjust bio to point to currently live data:
894 if ((op->crc.live_size != op->crc.uncompressed_size ||
895 op->crc.csum_type != op->csum_type) &&
896 bch2_write_rechecksum(c, op, op->csum_type))
897 return PREP_ENCODED_CHECKSUM_ERR;
900 * If we want to compress the data, it has to be decrypted:
902 if ((op->compression_type ||
903 bch2_csum_type_is_encryption(op->crc.csum_type) !=
904 bch2_csum_type_is_encryption(op->csum_type)) &&
905 bch2_write_decrypt(op))
906 return PREP_ENCODED_CHECKSUM_ERR;
908 return PREP_ENCODED_OK;
911 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
914 struct bch_fs *c = op->c;
915 struct bio *src = &op->wbio.bio, *dst = src;
916 struct bvec_iter saved_iter;
918 unsigned total_output = 0, total_input = 0;
920 bool page_alloc_failed = false;
923 BUG_ON(!bio_sectors(src));
925 ec_buf = bch2_writepoint_ec_buf(c, wp);
927 switch (bch2_write_prep_encoded_data(op, wp)) {
928 case PREP_ENCODED_OK:
930 case PREP_ENCODED_ERR:
933 case PREP_ENCODED_CHECKSUM_ERR:
935 case PREP_ENCODED_DO_WRITE:
936 /* XXX look for bug here */
938 dst = bch2_write_bio_alloc(c, wp, src,
941 bio_copy_data(dst, src);
944 init_append_extent(op, wp, op->version, op->crc);
949 op->compression_type ||
951 !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
952 (bch2_csum_type_is_encryption(op->csum_type) &&
953 !(op->flags & BCH_WRITE_PAGES_OWNED))) {
954 dst = bch2_write_bio_alloc(c, wp, src,
960 saved_iter = dst->bi_iter;
963 struct bch_extent_crc_unpacked crc = { 0 };
964 struct bversion version = op->version;
965 size_t dst_len, src_len;
967 if (page_alloc_failed &&
968 dst->bi_iter.bi_size < (wp->sectors_free << 9) &&
969 dst->bi_iter.bi_size < c->opts.encoded_extent_max)
972 BUG_ON(op->compression_type &&
973 (op->flags & BCH_WRITE_DATA_ENCODED) &&
974 bch2_csum_type_is_encryption(op->crc.csum_type));
975 BUG_ON(op->compression_type && !bounce);
977 crc.compression_type = op->incompressible
978 ? BCH_COMPRESSION_TYPE_incompressible
979 : op->compression_type
980 ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
981 op->compression_type)
983 if (!crc_is_compressed(crc)) {
984 dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
985 dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
988 dst_len = min_t(unsigned, dst_len,
989 c->opts.encoded_extent_max);
992 swap(dst->bi_iter.bi_size, dst_len);
993 bio_copy_data(dst, src);
994 swap(dst->bi_iter.bi_size, dst_len);
1000 BUG_ON(!src_len || !dst_len);
1002 if (bch2_csum_type_is_encryption(op->csum_type)) {
1003 if (bversion_zero(version)) {
1004 version.lo = atomic64_inc_return(&c->key_version);
1006 crc.nonce = op->nonce;
1007 op->nonce += src_len >> 9;
1011 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
1012 !crc_is_compressed(crc) &&
1013 bch2_csum_type_is_encryption(op->crc.csum_type) ==
1014 bch2_csum_type_is_encryption(op->csum_type)) {
1015 u8 compression_type = crc.compression_type;
1016 u16 nonce = crc.nonce;
1018 * Note: when we're using rechecksum(), we need to be
1019 * checksumming @src because it has all the data our
1020 * existing checksum covers - if we bounced (because we
1021 * were trying to compress), @dst will only have the
1022 * part of the data the new checksum will cover.
1024 * But normally we want to be checksumming post bounce,
1025 * because part of the reason for bouncing is so the
1026 * data can't be modified (by userspace) while it's in
1029 if (bch2_rechecksum_bio(c, src, version, op->crc,
1032 bio_sectors(src) - (src_len >> 9),
1036 * rchecksum_bio sets compression_type on crc from op->crc,
1037 * this isn't always correct as sometimes we're changing
1038 * an extent from uncompressed to incompressible.
1040 crc.compression_type = compression_type;
1043 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
1044 bch2_rechecksum_bio(c, src, version, op->crc,
1047 bio_sectors(src) - (src_len >> 9),
1051 crc.compressed_size = dst_len >> 9;
1052 crc.uncompressed_size = src_len >> 9;
1053 crc.live_size = src_len >> 9;
1055 swap(dst->bi_iter.bi_size, dst_len);
1056 ret = bch2_encrypt_bio(c, op->csum_type,
1057 extent_nonce(version, crc), dst);
1061 crc.csum = bch2_checksum_bio(c, op->csum_type,
1062 extent_nonce(version, crc), dst);
1063 crc.csum_type = op->csum_type;
1064 swap(dst->bi_iter.bi_size, dst_len);
1067 init_append_extent(op, wp, version, crc);
1070 bio_advance(dst, dst_len);
1071 bio_advance(src, src_len);
1072 total_output += dst_len;
1073 total_input += src_len;
1074 } while (dst->bi_iter.bi_size &&
1075 src->bi_iter.bi_size &&
1077 !bch2_keylist_realloc(&op->insert_keys,
1079 ARRAY_SIZE(op->inline_keys),
1080 BKEY_EXTENT_U64s_MAX));
1082 more = src->bi_iter.bi_size != 0;
1084 dst->bi_iter = saved_iter;
1086 if (dst == src && more) {
1087 BUG_ON(total_output != total_input);
1089 dst = bio_split(src, total_input >> 9,
1090 GFP_NOIO, &c->bio_write);
1091 wbio_init(dst)->put_bio = true;
1092 /* copy WRITE_SYNC flag */
1093 dst->bi_opf = src->bi_opf;
1096 dst->bi_iter.bi_size = total_output;
1101 bch_err(c, "error verifying existing checksum while rewriting existing data (memory corruption?)");
1104 if (to_wbio(dst)->bounce)
1105 bch2_bio_free_pages_pool(c, dst);
1106 if (to_wbio(dst)->put_bio)
1112 static void __bch2_write(struct closure *cl)
1114 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
1115 struct bch_fs *c = op->c;
1116 struct write_point *wp;
1117 struct bio *bio = NULL;
1118 bool skip_put = true;
1119 unsigned nofs_flags;
1122 nofs_flags = memalloc_nofs_save();
1124 memset(&op->failed, 0, sizeof(op->failed));
1127 struct bkey_i *key_to_write;
1128 unsigned key_to_write_offset = op->insert_keys.top_p -
1129 op->insert_keys.keys_p;
1131 /* +1 for possible cache device: */
1132 if (op->open_buckets.nr + op->nr_replicas + 1 >
1133 ARRAY_SIZE(op->open_buckets.v))
1136 if (bch2_keylist_realloc(&op->insert_keys,
1138 ARRAY_SIZE(op->inline_keys),
1139 BKEY_EXTENT_U64s_MAX))
1143 * The copygc thread is now global, which means it's no longer
1144 * freeing up space on specific disks, which means that
1145 * allocations for specific disks may hang arbitrarily long:
1147 wp = bch2_alloc_sectors_start(c,
1149 op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
1153 op->nr_replicas_required,
1156 (op->flags & (BCH_WRITE_ALLOC_NOWAIT|
1157 BCH_WRITE_ONLY_SPECIFIED_DEVS)) ? NULL : cl);
1161 if (unlikely(wp != ERR_PTR(-EAGAIN))) {
1170 * It's possible for the allocator to fail, put us on the
1171 * freelist waitlist, and then succeed in one of various retry
1172 * paths: if that happens, we need to disable the skip_put
1173 * optimization because otherwise there won't necessarily be a
1174 * barrier before we free the bch_write_op:
1176 if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
1179 bch2_open_bucket_get(c, wp, &op->open_buckets);
1180 ret = bch2_write_extent(op, wp, &bio);
1181 bch2_alloc_sectors_done(c, wp);
1190 * for the skip_put optimization this has to be set
1191 * before we submit the bio:
1193 op->flags |= BCH_WRITE_DONE;
1196 bio->bi_end_io = bch2_write_endio;
1197 bio->bi_private = &op->cl;
1198 bio->bi_opf |= REQ_OP_WRITE;
1201 closure_get(bio->bi_private);
1203 op->flags |= BCH_WRITE_SKIP_CLOSURE_PUT;
1205 key_to_write = (void *) (op->insert_keys.keys_p +
1206 key_to_write_offset);
1208 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1213 continue_at(cl, bch2_write_index, index_update_wq(op));
1215 memalloc_nofs_restore(nofs_flags);
1219 op->flags |= BCH_WRITE_DONE;
1221 continue_at(cl, bch2_write_index, index_update_wq(op));
1225 * If the write can't all be submitted at once, we generally want to
1226 * block synchronously as that signals backpressure to the caller.
1228 * However, if we're running out of a workqueue, we can't block here
1229 * because we'll be blocking other work items from completing:
1231 if (current->flags & PF_WQ_WORKER) {
1232 continue_at(cl, bch2_write_index, index_update_wq(op));
1238 if (!bch2_keylist_empty(&op->insert_keys)) {
1239 __bch2_write_index(op);
1242 op->flags |= BCH_WRITE_DONE;
1243 continue_at_nobarrier(cl, bch2_write_done, NULL);
1251 static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
1253 struct closure *cl = &op->cl;
1254 struct bio *bio = &op->wbio.bio;
1255 struct bvec_iter iter;
1256 struct bkey_i_inline_data *id;
1260 bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);
1262 ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
1263 ARRAY_SIZE(op->inline_keys),
1264 BKEY_U64s + DIV_ROUND_UP(data_len, 8));
1270 sectors = bio_sectors(bio);
1271 op->pos.offset += sectors;
1273 id = bkey_inline_data_init(op->insert_keys.top);
1275 id->k.version = op->version;
1276 id->k.size = sectors;
1278 iter = bio->bi_iter;
1279 iter.bi_size = data_len;
1280 memcpy_from_bio(id->v.data, bio, iter);
1282 while (data_len & 7)
1283 id->v.data[data_len++] = '\0';
1284 set_bkey_val_bytes(&id->k, data_len);
1285 bch2_keylist_push(&op->insert_keys);
1287 op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
1288 op->flags |= BCH_WRITE_DONE;
1290 continue_at_nobarrier(cl, bch2_write_index, NULL);
1293 bch2_write_done(&op->cl);
1297 * bch_write - handle a write to a cache device or flash only volume
1299 * This is the starting point for any data to end up in a cache device; it could
1300 * be from a normal write, or a writeback write, or a write to a flash only
1301 * volume - it's also used by the moving garbage collector to compact data in
1302 * mostly empty buckets.
1304 * It first writes the data to the cache, creating a list of keys to be inserted
1305 * (if the data won't fit in a single open bucket, there will be multiple keys);
1306 * after the data is written it calls bch_journal, and after the keys have been
1307 * added to the next journal write they're inserted into the btree.
1309 * If op->discard is true, instead of inserting the data it invalidates the
1310 * region of the cache represented by op->bio and op->inode.
1312 void bch2_write(struct closure *cl)
1314 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
1315 struct bio *bio = &op->wbio.bio;
1316 struct bch_fs *c = op->c;
1319 BUG_ON(!op->nr_replicas);
1320 BUG_ON(!op->write_point.v);
1321 BUG_ON(!bkey_cmp(op->pos, POS_MAX));
1323 op->start_time = local_clock();
1324 bch2_keylist_init(&op->insert_keys, op->inline_keys);
1325 wbio_init(bio)->put_bio = false;
1327 if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) {
1328 bch_err_inum_ratelimited(c, op->pos.inode,
1329 "misaligned write");
1334 if (c->opts.nochanges ||
1335 !percpu_ref_tryget_live(&c->writes)) {
1340 this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
1341 bch2_increment_clock(c, bio_sectors(bio), WRITE);
1343 data_len = min_t(u64, bio->bi_iter.bi_size,
1344 op->new_i_size - (op->pos.offset << 9));
1346 if (c->opts.inline_data &&
1347 data_len <= min(block_bytes(c) / 2, 1024U)) {
1348 bch2_write_data_inline(op, data_len);
1352 continue_at_nobarrier(cl, __bch2_write, NULL);
1355 bch2_disk_reservation_put(c, &op->res);
1358 EBUG_ON(cl->parent);
1359 closure_debug_destroy(cl);
1366 /* Cache promotion on read */
1370 struct rcu_head rcu;
1373 struct rhash_head hash;
1376 struct data_update write;
1377 struct bio_vec bi_inline_vecs[0]; /* must be last */
1380 static const struct rhashtable_params bch_promote_params = {
1381 .head_offset = offsetof(struct promote_op, hash),
1382 .key_offset = offsetof(struct promote_op, pos),
1383 .key_len = sizeof(struct bpos),
1386 static inline bool should_promote(struct bch_fs *c, struct bkey_s_c k,
1388 struct bch_io_opts opts,
1391 if (!(flags & BCH_READ_MAY_PROMOTE))
1394 if (!opts.promote_target)
1397 if (bch2_bkey_has_target(c, k, opts.promote_target))
1400 if (bch2_target_congested(c, opts.promote_target)) {
1401 /* XXX trace this */
1405 if (rhashtable_lookup_fast(&c->promote_table, &pos,
1406 bch_promote_params))
1412 static void promote_free(struct bch_fs *c, struct promote_op *op)
1416 ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
1417 bch_promote_params);
1419 percpu_ref_put(&c->writes);
1423 static void promote_done(struct closure *cl)
1425 struct promote_op *op =
1426 container_of(cl, struct promote_op, cl);
1427 struct bch_fs *c = op->write.op.c;
1429 bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
1432 bch2_data_update_exit(&op->write);
1433 promote_free(c, op);
1436 static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
1438 struct closure *cl = &op->cl;
1439 struct bio *bio = &op->write.op.wbio.bio;
1441 trace_and_count(op->write.op.c, read_promote, &rbio->bio);
1443 /* we now own pages: */
1444 BUG_ON(!rbio->bounce);
1445 BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs);
1447 memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec,
1448 sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
1449 swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
1451 closure_init(cl, NULL);
1452 bch2_data_update_read_done(&op->write, rbio->pick.crc, cl);
1453 closure_return_with_destructor(cl, promote_done);
1456 static struct promote_op *__promote_alloc(struct bch_fs *c,
1457 enum btree_id btree_id,
1460 struct extent_ptr_decoded *pick,
1461 struct bch_io_opts opts,
1463 struct bch_read_bio **rbio)
1465 struct promote_op *op = NULL;
1467 unsigned pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
1470 if (!percpu_ref_tryget_live(&c->writes))
1473 op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOIO);
1477 op->start_time = local_clock();
1481 * We don't use the mempool here because extents that aren't
1482 * checksummed or compressed can be too big for the mempool:
1484 *rbio = kzalloc(sizeof(struct bch_read_bio) +
1485 sizeof(struct bio_vec) * pages,
1490 rbio_init(&(*rbio)->bio, opts);
1491 bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
1493 if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
1497 (*rbio)->bounce = true;
1498 (*rbio)->split = true;
1499 (*rbio)->kmalloc = true;
1501 if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash,
1502 bch_promote_params))
1505 bio = &op->write.op.wbio.bio;
1506 bio_init(bio, NULL, bio->bi_inline_vecs, pages, 0);
1508 ret = bch2_data_update_init(c, &op->write,
1509 writepoint_hashed((unsigned long) current),
1511 (struct data_update_opts) {
1512 .target = opts.promote_target,
1513 .extra_replicas = 1,
1514 .write_flags = BCH_WRITE_ALLOC_NOWAIT|BCH_WRITE_CACHED,
1522 bio_free_pages(&(*rbio)->bio);
1526 percpu_ref_put(&c->writes);
1531 static struct promote_op *promote_alloc(struct bch_fs *c,
1532 struct bvec_iter iter,
1534 struct extent_ptr_decoded *pick,
1535 struct bch_io_opts opts,
1537 struct bch_read_bio **rbio,
1541 bool promote_full = *read_full || READ_ONCE(c->promote_whole_extents);
1542 /* data might have to be decompressed in the write path: */
1543 unsigned sectors = promote_full
1544 ? max(pick->crc.compressed_size, pick->crc.live_size)
1545 : bvec_iter_sectors(iter);
1546 struct bpos pos = promote_full
1547 ? bkey_start_pos(k.k)
1548 : POS(k.k->p.inode, iter.bi_sector);
1549 struct promote_op *promote;
1551 if (!should_promote(c, k, pos, opts, flags))
1554 promote = __promote_alloc(c,
1555 k.k->type == KEY_TYPE_reflink_v
1558 k, pos, pick, opts, sectors, rbio);
1563 *read_full = promote_full;
1569 #define READ_RETRY_AVOID 1
1570 #define READ_RETRY 2
1575 RBIO_CONTEXT_HIGHPRI,
1576 RBIO_CONTEXT_UNBOUND,
1579 static inline struct bch_read_bio *
1580 bch2_rbio_parent(struct bch_read_bio *rbio)
1582 return rbio->split ? rbio->parent : rbio;
1586 static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn,
1587 enum rbio_context context,
1588 struct workqueue_struct *wq)
1590 if (context <= rbio->context) {
1593 rbio->work.func = fn;
1594 rbio->context = context;
1595 queue_work(wq, &rbio->work);
1599 static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
1601 BUG_ON(rbio->bounce && !rbio->split);
1604 promote_free(rbio->c, rbio->promote);
1605 rbio->promote = NULL;
1608 bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
1611 struct bch_read_bio *parent = rbio->parent;
1616 bio_put(&rbio->bio);
1625 * Only called on a top level bch_read_bio to complete an entire read request,
1628 static void bch2_rbio_done(struct bch_read_bio *rbio)
1630 if (rbio->start_time)
1631 bch2_time_stats_update(&rbio->c->times[BCH_TIME_data_read],
1633 bio_endio(&rbio->bio);
1636 static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio,
1637 struct bvec_iter bvec_iter,
1638 struct bch_io_failures *failed,
1641 struct btree_trans trans;
1642 struct btree_iter iter;
1647 flags &= ~BCH_READ_LAST_FRAGMENT;
1648 flags |= BCH_READ_MUST_CLONE;
1650 bch2_bkey_buf_init(&sk);
1651 bch2_trans_init(&trans, c, 0, 0);
1653 bch2_trans_iter_init(&trans, &iter, rbio->data_btree,
1654 rbio->read_pos, BTREE_ITER_SLOTS);
1656 rbio->bio.bi_status = 0;
1658 k = bch2_btree_iter_peek_slot(&iter);
1662 bch2_bkey_buf_reassemble(&sk, c, k);
1663 k = bkey_i_to_s_c(sk.k);
1664 bch2_trans_unlock(&trans);
1666 if (!bch2_bkey_matches_ptr(c, k,
1668 rbio->data_pos.offset -
1669 rbio->pick.crc.offset)) {
1670 /* extent we wanted to read no longer exists: */
1675 ret = __bch2_read_extent(&trans, rbio, bvec_iter,
1678 k, 0, failed, flags);
1679 if (ret == READ_RETRY)
1684 bch2_rbio_done(rbio);
1685 bch2_trans_iter_exit(&trans, &iter);
1686 bch2_trans_exit(&trans);
1687 bch2_bkey_buf_exit(&sk, c);
1690 rbio->bio.bi_status = BLK_STS_IOERR;
1694 static void bch2_rbio_retry(struct work_struct *work)
1696 struct bch_read_bio *rbio =
1697 container_of(work, struct bch_read_bio, work);
1698 struct bch_fs *c = rbio->c;
1699 struct bvec_iter iter = rbio->bvec_iter;
1700 unsigned flags = rbio->flags;
1701 subvol_inum inum = {
1702 .subvol = rbio->subvol,
1703 .inum = rbio->read_pos.inode,
1705 struct bch_io_failures failed = { .nr = 0 };
1707 trace_and_count(c, read_retry, &rbio->bio);
1709 if (rbio->retry == READ_RETRY_AVOID)
1710 bch2_mark_io_failure(&failed, &rbio->pick);
1712 rbio->bio.bi_status = 0;
1714 rbio = bch2_rbio_free(rbio);
1716 flags |= BCH_READ_IN_RETRY;
1717 flags &= ~BCH_READ_MAY_PROMOTE;
1719 if (flags & BCH_READ_NODECODE) {
1720 bch2_read_retry_nodecode(c, rbio, iter, &failed, flags);
1722 flags &= ~BCH_READ_LAST_FRAGMENT;
1723 flags |= BCH_READ_MUST_CLONE;
1725 __bch2_read(c, rbio, iter, inum, &failed, flags);
1729 static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
1732 rbio->retry = retry;
1734 if (rbio->flags & BCH_READ_IN_RETRY)
1737 if (retry == READ_ERR) {
1738 rbio = bch2_rbio_free(rbio);
1740 rbio->bio.bi_status = error;
1741 bch2_rbio_done(rbio);
1743 bch2_rbio_punt(rbio, bch2_rbio_retry,
1744 RBIO_CONTEXT_UNBOUND, system_unbound_wq);
1748 static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
1749 struct bch_read_bio *rbio)
1751 struct bch_fs *c = rbio->c;
1752 u64 data_offset = rbio->data_pos.offset - rbio->pick.crc.offset;
1753 struct bch_extent_crc_unpacked new_crc;
1754 struct btree_iter iter;
1759 if (crc_is_compressed(rbio->pick.crc))
1762 bch2_trans_iter_init(trans, &iter, rbio->data_btree, rbio->data_pos,
1763 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
1764 k = bch2_btree_iter_peek_slot(&iter);
1765 if ((ret = bkey_err(k)))
1768 if (bversion_cmp(k.k->version, rbio->version) ||
1769 !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset))
1772 /* Extent was merged? */
1773 if (bkey_start_offset(k.k) < data_offset ||
1774 k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size)
1777 if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version,
1778 rbio->pick.crc, NULL, &new_crc,
1779 bkey_start_offset(k.k) - data_offset, k.k->size,
1780 rbio->pick.crc.csum_type)) {
1781 bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)");
1787 * going to be temporarily appending another checksum entry:
1789 new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) +
1790 sizeof(struct bch_extent_crc128));
1791 if ((ret = PTR_ERR_OR_ZERO(new)))
1794 bkey_reassemble(new, k);
1796 if (!bch2_bkey_narrow_crcs(new, new_crc))
1799 ret = bch2_trans_update(trans, &iter, new,
1800 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1802 bch2_trans_iter_exit(trans, &iter);
1806 static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
1808 bch2_trans_do(rbio->c, NULL, NULL, BTREE_INSERT_NOFAIL,
1809 __bch2_rbio_narrow_crcs(&trans, rbio));
1812 /* Inner part that may run in process context */
1813 static void __bch2_read_endio(struct work_struct *work)
1815 struct bch_read_bio *rbio =
1816 container_of(work, struct bch_read_bio, work);
1817 struct bch_fs *c = rbio->c;
1818 struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
1819 struct bio *src = &rbio->bio;
1820 struct bio *dst = &bch2_rbio_parent(rbio)->bio;
1821 struct bvec_iter dst_iter = rbio->bvec_iter;
1822 struct bch_extent_crc_unpacked crc = rbio->pick.crc;
1823 struct nonce nonce = extent_nonce(rbio->version, crc);
1824 unsigned nofs_flags;
1825 struct bch_csum csum;
1828 nofs_flags = memalloc_nofs_save();
1830 /* Reset iterator for checksumming and copying bounced data: */
1832 src->bi_iter.bi_size = crc.compressed_size << 9;
1833 src->bi_iter.bi_idx = 0;
1834 src->bi_iter.bi_bvec_done = 0;
1836 src->bi_iter = rbio->bvec_iter;
1839 csum = bch2_checksum_bio(c, crc.csum_type, nonce, src);
1840 if (bch2_crc_cmp(csum, rbio->pick.crc.csum))
1845 * We need to rework the narrow_crcs path to deliver the read completion
1846 * first, and then punt to a different workqueue, otherwise we're
1847 * holding up reads while doing btree updates which is bad for memory
1850 if (unlikely(rbio->narrow_crcs))
1851 bch2_rbio_narrow_crcs(rbio);
1853 if (rbio->flags & BCH_READ_NODECODE)
1856 /* Adjust crc to point to subset of data we want: */
1857 crc.offset += rbio->offset_into_extent;
1858 crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
1860 if (crc_is_compressed(crc)) {
1861 ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1865 if (bch2_bio_uncompress(c, src, dst, dst_iter, crc))
1866 goto decompression_err;
1868 /* don't need to decrypt the entire bio: */
1869 nonce = nonce_add(nonce, crc.offset << 9);
1870 bio_advance(src, crc.offset << 9);
1872 BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
1873 src->bi_iter.bi_size = dst_iter.bi_size;
1875 ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1880 struct bvec_iter src_iter = src->bi_iter;
1881 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1885 if (rbio->promote) {
1887 * Re encrypt data we decrypted, so it's consistent with
1890 ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
1894 promote_start(rbio->promote, rbio);
1895 rbio->promote = NULL;
1898 if (likely(!(rbio->flags & BCH_READ_IN_RETRY))) {
1899 rbio = bch2_rbio_free(rbio);
1900 bch2_rbio_done(rbio);
1903 memalloc_nofs_restore(nofs_flags);
1907 * Checksum error: if the bio wasn't bounced, we may have been
1908 * reading into buffers owned by userspace (that userspace can
1909 * scribble over) - retry the read, bouncing it this time:
1911 if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
1912 rbio->flags |= BCH_READ_MUST_BOUNCE;
1913 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
1917 bch2_dev_inum_io_error(ca, rbio->read_pos.inode, (u64) rbio->bvec_iter.bi_sector,
1918 "data checksum error: expected %0llx:%0llx got %0llx:%0llx (type %s)",
1919 rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
1920 csum.hi, csum.lo, bch2_csum_types[crc.csum_type]);
1921 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1924 bch_err_inum_ratelimited(c, rbio->read_pos.inode,
1925 "decompression error");
1926 bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
1929 bch_err_inum_ratelimited(c, rbio->read_pos.inode,
1931 bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
1935 static void bch2_read_endio(struct bio *bio)
1937 struct bch_read_bio *rbio =
1938 container_of(bio, struct bch_read_bio, bio);
1939 struct bch_fs *c = rbio->c;
1940 struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
1941 struct workqueue_struct *wq = NULL;
1942 enum rbio_context context = RBIO_CONTEXT_NULL;
1944 if (rbio->have_ioref) {
1945 bch2_latency_acct(ca, rbio->submit_time, READ);
1946 percpu_ref_put(&ca->io_ref);
1950 rbio->bio.bi_end_io = rbio->end_io;
1952 if (bch2_dev_inum_io_err_on(bio->bi_status, ca,
1953 rbio->read_pos.inode,
1954 rbio->read_pos.offset,
1955 "data read error: %s",
1956 bch2_blk_status_to_str(bio->bi_status))) {
1957 bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
1961 if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
1962 ptr_stale(ca, &rbio->pick.ptr)) {
1963 trace_and_count(c, read_reuse_race, &rbio->bio);
1965 if (rbio->flags & BCH_READ_RETRY_IF_STALE)
1966 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
1968 bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN);
1972 if (rbio->narrow_crcs ||
1974 crc_is_compressed(rbio->pick.crc) ||
1975 bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
1976 context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
1977 else if (rbio->pick.crc.csum_type)
1978 context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq;
1980 bch2_rbio_punt(rbio, __bch2_read_endio, context, wq);
1983 int __bch2_read_indirect_extent(struct btree_trans *trans,
1984 unsigned *offset_into_extent,
1985 struct bkey_buf *orig_k)
1987 struct btree_iter iter;
1992 reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k->k)->v.idx) +
1993 *offset_into_extent;
1995 bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink,
1996 POS(0, reflink_offset),
1998 k = bch2_btree_iter_peek_slot(&iter);
2003 if (k.k->type != KEY_TYPE_reflink_v &&
2004 k.k->type != KEY_TYPE_indirect_inline_data) {
2005 bch_err_inum_ratelimited(trans->c, orig_k->k->k.p.inode,
2006 "%llu len %u points to nonexistent indirect extent %llu",
2007 orig_k->k->k.p.offset,
2010 bch2_inconsistent_error(trans->c);
2015 *offset_into_extent = iter.pos.offset - bkey_start_offset(k.k);
2016 bch2_bkey_buf_reassemble(orig_k, trans->c, k);
2018 bch2_trans_iter_exit(trans, &iter);
2022 static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
2024 struct bch_extent_ptr ptr)
2026 struct bch_fs *c = trans->c;
2027 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr.dev);
2028 struct btree_iter iter;
2029 struct printbuf buf = PRINTBUF;
2032 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
2033 PTR_BUCKET_POS(c, &ptr),
2036 prt_printf(&buf, "Attempting to read from stale dirty pointer:");
2037 printbuf_indent_add(&buf, 2);
2040 bch2_bkey_val_to_text(&buf, c, k);
2043 prt_printf(&buf, "memory gen: %u", *bucket_gen(ca, iter.pos.offset));
2045 ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
2048 bch2_bkey_val_to_text(&buf, c, k);
2051 bch2_fs_inconsistent(c, "%s", buf.buf);
2053 bch2_trans_iter_exit(trans, &iter);
2054 printbuf_exit(&buf);
2057 int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
2058 struct bvec_iter iter, struct bpos read_pos,
2059 enum btree_id data_btree, struct bkey_s_c k,
2060 unsigned offset_into_extent,
2061 struct bch_io_failures *failed, unsigned flags)
2063 struct bch_fs *c = trans->c;
2064 struct extent_ptr_decoded pick;
2065 struct bch_read_bio *rbio = NULL;
2066 struct bch_dev *ca = NULL;
2067 struct promote_op *promote = NULL;
2068 bool bounce = false, read_full = false, narrow_crcs = false;
2069 struct bpos data_pos = bkey_start_pos(k.k);
2072 if (bkey_extent_is_inline_data(k.k)) {
2073 unsigned bytes = min_t(unsigned, iter.bi_size,
2074 bkey_inline_data_bytes(k.k));
2076 swap(iter.bi_size, bytes);
2077 memcpy_to_bio(&orig->bio, iter, bkey_inline_data_p(k));
2078 swap(iter.bi_size, bytes);
2079 bio_advance_iter(&orig->bio, &iter, bytes);
2080 zero_fill_bio_iter(&orig->bio, iter);
2084 pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick);
2086 /* hole or reservation - just zero fill: */
2091 bch_err_inum_ratelimited(c, k.k->p.inode,
2092 "no device to read from");
2096 ca = bch_dev_bkey_exists(c, pick.ptr.dev);
2099 * Stale dirty pointers are treated as IO errors, but @failed isn't
2100 * allocated unless we're in the retry path - so if we're not in the
2101 * retry path, don't check here, it'll be caught in bch2_read_endio()
2102 * and we'll end up in the retry path:
2104 if ((flags & BCH_READ_IN_RETRY) &&
2106 unlikely(ptr_stale(ca, &pick.ptr))) {
2107 read_from_stale_dirty_pointer(trans, k, pick.ptr);
2108 bch2_mark_io_failure(failed, &pick);
2113 * Unlock the iterator while the btree node's lock is still in
2114 * cache, before doing the IO:
2116 bch2_trans_unlock(trans);
2118 if (flags & BCH_READ_NODECODE) {
2120 * can happen if we retry, and the extent we were going to read
2121 * has been merged in the meantime:
2123 if (pick.crc.compressed_size > orig->bio.bi_vcnt * PAGE_SECTORS)
2126 iter.bi_size = pick.crc.compressed_size << 9;
2130 if (!(flags & BCH_READ_LAST_FRAGMENT) ||
2131 bio_flagged(&orig->bio, BIO_CHAIN))
2132 flags |= BCH_READ_MUST_CLONE;
2134 narrow_crcs = !(flags & BCH_READ_IN_RETRY) &&
2135 bch2_can_narrow_extent_crcs(k, pick.crc);
2137 if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
2138 flags |= BCH_READ_MUST_BOUNCE;
2140 EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
2142 if (crc_is_compressed(pick.crc) ||
2143 (pick.crc.csum_type != BCH_CSUM_none &&
2144 (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
2145 (bch2_csum_type_is_encryption(pick.crc.csum_type) &&
2146 (flags & BCH_READ_USER_MAPPED)) ||
2147 (flags & BCH_READ_MUST_BOUNCE)))) {
2152 if (orig->opts.promote_target)
2153 promote = promote_alloc(c, iter, k, &pick, orig->opts, flags,
2154 &rbio, &bounce, &read_full);
2157 EBUG_ON(crc_is_compressed(pick.crc));
2158 EBUG_ON(pick.crc.csum_type &&
2159 (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
2160 bvec_iter_sectors(iter) != pick.crc.live_size ||
2162 offset_into_extent));
2164 data_pos.offset += offset_into_extent;
2165 pick.ptr.offset += pick.crc.offset +
2167 offset_into_extent = 0;
2168 pick.crc.compressed_size = bvec_iter_sectors(iter);
2169 pick.crc.uncompressed_size = bvec_iter_sectors(iter);
2170 pick.crc.offset = 0;
2171 pick.crc.live_size = bvec_iter_sectors(iter);
2172 offset_into_extent = 0;
2177 * promote already allocated bounce rbio:
2178 * promote needs to allocate a bio big enough for uncompressing
2179 * data in the write path, but we're not going to use it all
2182 EBUG_ON(rbio->bio.bi_iter.bi_size <
2183 pick.crc.compressed_size << 9);
2184 rbio->bio.bi_iter.bi_size =
2185 pick.crc.compressed_size << 9;
2186 } else if (bounce) {
2187 unsigned sectors = pick.crc.compressed_size;
2189 rbio = rbio_init(bio_alloc_bioset(NULL,
2190 DIV_ROUND_UP(sectors, PAGE_SECTORS),
2193 &c->bio_read_split),
2196 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
2197 rbio->bounce = true;
2199 } else if (flags & BCH_READ_MUST_CLONE) {
2201 * Have to clone if there were any splits, due to error
2202 * reporting issues (if a split errored, and retrying didn't
2203 * work, when it reports the error to its parent (us) we don't
2204 * know if the error was from our bio, and we should retry, or
2205 * from the whole bio, in which case we don't want to retry and
2208 rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOIO,
2209 &c->bio_read_split),
2211 rbio->bio.bi_iter = iter;
2215 rbio->bio.bi_iter = iter;
2216 EBUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
2219 EBUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size);
2222 rbio->submit_time = local_clock();
2224 rbio->parent = orig;
2226 rbio->end_io = orig->bio.bi_end_io;
2227 rbio->bvec_iter = iter;
2228 rbio->offset_into_extent= offset_into_extent;
2229 rbio->flags = flags;
2230 rbio->have_ioref = pick_ret > 0 && bch2_dev_get_ioref(ca, READ);
2231 rbio->narrow_crcs = narrow_crcs;
2235 /* XXX: only initialize this if needed */
2236 rbio->devs_have = bch2_bkey_devs(k);
2238 rbio->subvol = orig->subvol;
2239 rbio->read_pos = read_pos;
2240 rbio->data_btree = data_btree;
2241 rbio->data_pos = data_pos;
2242 rbio->version = k.k->version;
2243 rbio->promote = promote;
2244 INIT_WORK(&rbio->work, NULL);
2246 rbio->bio.bi_opf = orig->bio.bi_opf;
2247 rbio->bio.bi_iter.bi_sector = pick.ptr.offset;
2248 rbio->bio.bi_end_io = bch2_read_endio;
2251 trace_and_count(c, read_bounce, &rbio->bio);
2253 this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio));
2254 bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
2257 * If it's being moved internally, we don't want to flag it as a cache
2260 if (pick.ptr.cached && !(flags & BCH_READ_NODECODE))
2261 bch2_bucket_io_time_reset(trans, pick.ptr.dev,
2262 PTR_BUCKET_NR(ca, &pick.ptr), READ);
2264 if (!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT))) {
2265 bio_inc_remaining(&orig->bio);
2266 trace_and_count(c, read_split, &orig->bio);
2269 if (!rbio->pick.idx) {
2270 if (!rbio->have_ioref) {
2271 bch_err_inum_ratelimited(c, k.k->p.inode,
2272 "no device to read from");
2273 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
2277 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_user],
2278 bio_sectors(&rbio->bio));
2279 bio_set_dev(&rbio->bio, ca->disk_sb.bdev);
2281 if (likely(!(flags & BCH_READ_IN_RETRY)))
2282 submit_bio(&rbio->bio);
2284 submit_bio_wait(&rbio->bio);
2286 /* Attempting reconstruct read: */
2287 if (bch2_ec_read_extent(c, rbio)) {
2288 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
2292 if (likely(!(flags & BCH_READ_IN_RETRY)))
2293 bio_endio(&rbio->bio);
2296 if (likely(!(flags & BCH_READ_IN_RETRY))) {
2301 rbio->context = RBIO_CONTEXT_UNBOUND;
2302 bch2_read_endio(&rbio->bio);
2305 rbio = bch2_rbio_free(rbio);
2307 if (ret == READ_RETRY_AVOID) {
2308 bch2_mark_io_failure(failed, &pick);
2319 if (flags & BCH_READ_IN_RETRY)
2322 orig->bio.bi_status = BLK_STS_IOERR;
2327 * won't normally happen in the BCH_READ_NODECODE
2328 * (bch2_move_extent()) path, but if we retry and the extent we wanted
2329 * to read no longer exists we have to signal that:
2331 if (flags & BCH_READ_NODECODE)
2334 zero_fill_bio_iter(&orig->bio, iter);
2336 if (flags & BCH_READ_LAST_FRAGMENT)
2337 bch2_rbio_done(orig);
2341 void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
2342 struct bvec_iter bvec_iter, subvol_inum inum,
2343 struct bch_io_failures *failed, unsigned flags)
2345 struct btree_trans trans;
2346 struct btree_iter iter;
2352 BUG_ON(flags & BCH_READ_NODECODE);
2354 bch2_bkey_buf_init(&sk);
2355 bch2_trans_init(&trans, c, 0, 0);
2357 bch2_trans_begin(&trans);
2358 iter = (struct btree_iter) { NULL };
2360 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
2364 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
2365 SPOS(inum.inum, bvec_iter.bi_sector, snapshot),
2368 unsigned bytes, sectors, offset_into_extent;
2369 enum btree_id data_btree = BTREE_ID_extents;
2372 * read_extent -> io_time_reset may cause a transaction restart
2373 * without returning an error, we need to check for that here:
2375 ret = bch2_trans_relock(&trans);
2379 bch2_btree_iter_set_pos(&iter,
2380 POS(inum.inum, bvec_iter.bi_sector));
2382 k = bch2_btree_iter_peek_slot(&iter);
2387 offset_into_extent = iter.pos.offset -
2388 bkey_start_offset(k.k);
2389 sectors = k.k->size - offset_into_extent;
2391 bch2_bkey_buf_reassemble(&sk, c, k);
2393 ret = bch2_read_indirect_extent(&trans, &data_btree,
2394 &offset_into_extent, &sk);
2398 k = bkey_i_to_s_c(sk.k);
2401 * With indirect extents, the amount of data to read is the min
2402 * of the original extent and the indirect extent:
2404 sectors = min(sectors, k.k->size - offset_into_extent);
2406 bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9;
2407 swap(bvec_iter.bi_size, bytes);
2409 if (bvec_iter.bi_size == bytes)
2410 flags |= BCH_READ_LAST_FRAGMENT;
2412 ret = __bch2_read_extent(&trans, rbio, bvec_iter, iter.pos,
2414 offset_into_extent, failed, flags);
2418 if (flags & BCH_READ_LAST_FRAGMENT)
2421 swap(bvec_iter.bi_size, bytes);
2422 bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
2424 ret = btree_trans_too_many_iters(&trans);
2429 bch2_trans_iter_exit(&trans, &iter);
2431 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
2432 ret == READ_RETRY ||
2433 ret == READ_RETRY_AVOID)
2436 bch2_trans_exit(&trans);
2437 bch2_bkey_buf_exit(&sk, c);
2440 bch_err_inum_ratelimited(c, inum.inum,
2441 "read error %i from btree lookup", ret);
2442 rbio->bio.bi_status = BLK_STS_IOERR;
2443 bch2_rbio_done(rbio);
2447 void bch2_fs_io_exit(struct bch_fs *c)
2449 if (c->promote_table.tbl)
2450 rhashtable_destroy(&c->promote_table);
2451 mempool_exit(&c->bio_bounce_pages);
2452 bioset_exit(&c->bio_write);
2453 bioset_exit(&c->bio_read_split);
2454 bioset_exit(&c->bio_read);
2457 int bch2_fs_io_init(struct bch_fs *c)
2459 if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
2460 BIOSET_NEED_BVECS) ||
2461 bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
2462 BIOSET_NEED_BVECS) ||
2463 bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio),
2464 BIOSET_NEED_BVECS) ||
2465 mempool_init_page_pool(&c->bio_bounce_pages,
2467 c->opts.btree_node_size,
2468 c->opts.encoded_extent_max) /
2470 rhashtable_init(&c->promote_table, &bch_promote_params))