1 // SPDX-License-Identifier: GPL-2.0
3 * Some low level IO code, and hacks for various block layer limitations
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
10 #include "alloc_background.h"
11 #include "alloc_foreground.h"
14 #include "btree_update.h"
19 #include "data_update.h"
21 #include "disk_groups.h"
24 #include "extent_update.h"
30 #include "rebalance.h"
31 #include "subvolume.h"
35 #include <linux/blkdev.h>
36 #include <linux/random.h>
37 #include <linux/sched/mm.h>
39 #include <trace/events/bcachefs.h>
41 const char *bch2_blk_status_to_str(blk_status_t status)
43 if (status == BLK_STS_REMOVED)
44 return "device removed";
45 return blk_status_to_str(status);
48 static bool bch2_target_congested(struct bch_fs *c, u16 target)
50 const struct bch_devs_mask *devs;
51 unsigned d, nr = 0, total = 0;
52 u64 now = local_clock(), last;
60 devs = bch2_target_to_mask(c, target) ?:
61 &c->rw_devs[BCH_DATA_user];
63 for_each_set_bit(d, devs->d, BCH_SB_MEMBERS_MAX) {
64 ca = rcu_dereference(c->devs[d]);
68 congested = atomic_read(&ca->congested);
69 last = READ_ONCE(ca->congested_last);
70 if (time_after64(now, last))
71 congested -= (now - last) >> 12;
73 total += max(congested, 0LL);
78 return bch2_rand_range(nr * CONGESTED_MAX) < total;
81 static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
85 ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
86 /* ideally we'd be taking into account the device's variance here: */
87 u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
88 s64 latency_over = io_latency - latency_threshold;
90 if (latency_threshold && latency_over > 0) {
92 * bump up congested by approximately latency_over * 4 /
93 * latency_threshold - we don't need much accuracy here so don't
94 * bother with the divide:
96 if (atomic_read(&ca->congested) < CONGESTED_MAX)
97 atomic_add(latency_over >>
98 max_t(int, ilog2(latency_threshold) - 2, 0),
101 ca->congested_last = now;
102 } else if (atomic_read(&ca->congested) > 0) {
103 atomic_dec(&ca->congested);
107 void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
109 atomic64_t *latency = &ca->cur_latency[rw];
110 u64 now = local_clock();
111 u64 io_latency = time_after64(now, submit_time)
114 u64 old, new, v = atomic64_read(latency);
120 * If the io latency was reasonably close to the current
121 * latency, skip doing the update and atomic operation - most of
124 if (abs((int) (old - io_latency)) < (old >> 1) &&
128 new = ewma_add(old, io_latency, 5);
129 } while ((v = atomic64_cmpxchg(latency, old, new)) != old);
131 bch2_congested_acct(ca, io_latency, now, rw);
133 __bch2_time_stats_update(&ca->io_latency[rw], submit_time, now);
136 /* Allocate, free from mempool: */
138 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
140 struct bvec_iter_all iter;
143 bio_for_each_segment_all(bv, bio, iter)
144 if (bv->bv_page != ZERO_PAGE(0))
145 mempool_free(bv->bv_page, &c->bio_bounce_pages);
149 static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
153 if (likely(!*using_mempool)) {
154 page = alloc_page(GFP_NOIO);
155 if (unlikely(!page)) {
156 mutex_lock(&c->bio_bounce_pages_lock);
157 *using_mempool = true;
163 page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
169 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
172 bool using_mempool = false;
175 struct page *page = __bio_alloc_page_pool(c, &using_mempool);
176 unsigned len = min_t(size_t, PAGE_SIZE, size);
178 BUG_ON(!bio_add_page(bio, page, len, 0));
183 mutex_unlock(&c->bio_bounce_pages_lock);
186 /* Extent update path: */
188 int bch2_sum_sector_overwrites(struct btree_trans *trans,
189 struct btree_iter *extent_iter,
191 bool *usage_increasing,
192 s64 *i_sectors_delta,
193 s64 *disk_sectors_delta)
195 struct bch_fs *c = trans->c;
196 struct btree_iter iter;
198 unsigned new_replicas = bch2_bkey_replicas(c, bkey_i_to_s_c(new));
199 bool new_compressed = bch2_bkey_sectors_compressed(bkey_i_to_s_c(new));
202 *usage_increasing = false;
203 *i_sectors_delta = 0;
204 *disk_sectors_delta = 0;
206 bch2_trans_copy_iter(&iter, extent_iter);
208 for_each_btree_key_continue_norestart(iter, BTREE_ITER_SLOTS, old, ret) {
209 s64 sectors = min(new->k.p.offset, old.k->p.offset) -
210 max(bkey_start_offset(&new->k),
211 bkey_start_offset(old.k));
213 *i_sectors_delta += sectors *
214 (bkey_extent_is_allocation(&new->k) -
215 bkey_extent_is_allocation(old.k));
217 *disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new));
218 *disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot
219 ? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
222 if (!*usage_increasing &&
223 (new->k.p.snapshot != old.k->p.snapshot ||
224 new_replicas > bch2_bkey_replicas(c, old) ||
225 (!new_compressed && bch2_bkey_sectors_compressed(old))))
226 *usage_increasing = true;
228 if (bkey_cmp(old.k->p, new->k.p) >= 0)
232 bch2_trans_iter_exit(trans, &iter);
236 static int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
237 struct btree_iter *extent_iter,
241 struct btree_iter iter;
242 struct bkey_s_c inode_k;
243 struct bkey_s_c_inode_v3 inode;
244 struct bkey_i_inode_v3 *new_inode;
247 bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
249 extent_iter->pos.inode,
250 extent_iter->snapshot),
251 BTREE_ITER_INTENT|BTREE_ITER_CACHED);
252 inode_k = bch2_btree_iter_peek_slot(&iter);
253 ret = bkey_err(inode_k);
257 ret = bkey_is_inode(inode_k.k) ? 0 : -ENOENT;
261 if (unlikely(inode_k.k->type != KEY_TYPE_inode_v3)) {
262 inode_k = bch2_inode_to_v3(trans, inode_k);
263 ret = bkey_err(inode_k);
268 inode = bkey_s_c_to_inode_v3(inode_k);
270 new_inode = bch2_trans_kmalloc(trans, bkey_bytes(inode_k.k));
271 ret = PTR_ERR_OR_ZERO(new_inode);
275 bkey_reassemble(&new_inode->k_i, inode.s_c);
277 if (!(le64_to_cpu(inode.v->bi_flags) & BCH_INODE_I_SIZE_DIRTY) &&
278 new_i_size > le64_to_cpu(inode.v->bi_size))
279 new_inode->v.bi_size = cpu_to_le64(new_i_size);
281 le64_add_cpu(&new_inode->v.bi_sectors, i_sectors_delta);
283 new_inode->k.p.snapshot = iter.snapshot;
285 ret = bch2_trans_update(trans, &iter, &new_inode->k_i,
286 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
288 bch2_trans_iter_exit(trans, &iter);
292 int bch2_extent_update(struct btree_trans *trans,
294 struct btree_iter *iter,
296 struct disk_reservation *disk_res,
298 s64 *i_sectors_delta_total,
301 struct bpos next_pos;
302 bool usage_increasing;
303 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
307 * This traverses us the iterator without changing iter->path->pos to
308 * search_key() (which is pos + 1 for extents): we want there to be a
309 * path already traversed at iter->pos because
310 * bch2_trans_extent_update() will use it to attempt extent merging
312 ret = __bch2_btree_iter_traverse(iter);
316 ret = bch2_extent_trim_atomic(trans, iter, k);
322 ret = bch2_sum_sector_overwrites(trans, iter, k,
325 &disk_sectors_delta);
330 disk_sectors_delta > (s64) disk_res->sectors) {
331 ret = bch2_disk_reservation_add(trans->c, disk_res,
332 disk_sectors_delta - disk_res->sectors,
333 !check_enospc || !usage_increasing
334 ? BCH_DISK_RESERVATION_NOFAIL : 0);
341 * We always have to do an inode update - even when i_size/i_sectors
342 * aren't changing - for fsync to work properly; fsync relies on
343 * inode->bi_journal_seq which is updated by the trigger code:
345 ret = bch2_extent_update_i_size_sectors(trans, iter,
346 min(k->k.p.offset << 9, new_i_size),
348 bch2_trans_update(trans, iter, k, 0) ?:
349 bch2_trans_commit(trans, disk_res, NULL,
350 BTREE_INSERT_NOCHECK_RW|
351 BTREE_INSERT_NOFAIL);
355 if (i_sectors_delta_total)
356 *i_sectors_delta_total += i_sectors_delta;
357 bch2_btree_iter_set_pos(iter, next_pos);
361 /* Overwrites whatever was present with zeroes: */
362 int bch2_extent_fallocate(struct btree_trans *trans,
364 struct btree_iter *iter,
366 struct bch_io_opts opts,
367 s64 *i_sectors_delta,
368 struct write_point_specifier write_point)
370 struct bch_fs *c = trans->c;
371 struct disk_reservation disk_res = { 0 };
373 struct open_buckets open_buckets;
375 struct bkey_buf old, new;
376 bool have_reservation = false;
377 bool unwritten = opts.nocow &&
378 c->sb.version >= bcachefs_metadata_version_unwritten_extents;
381 bch2_bkey_buf_init(&old);
382 bch2_bkey_buf_init(&new);
383 closure_init_stack(&cl);
386 k = bch2_btree_iter_peek_slot(iter);
391 sectors = min_t(u64, sectors, k.k->p.offset - iter->pos.offset);
393 if (!have_reservation) {
394 unsigned new_replicas =
395 max(0, (int) opts.data_replicas -
396 (int) bch2_bkey_nr_ptrs_fully_allocated(k));
398 * Get a disk reservation before (in the nocow case) calling
399 * into the allocator:
401 ret = bch2_disk_reservation_get(c, &disk_res, sectors, new_replicas, 0);
405 bch2_bkey_buf_reassemble(&old, c, k);
408 if (have_reservation) {
409 if (!bch2_extents_match(k, bkey_i_to_s_c(old.k)))
412 bch2_key_resize(&new.k->k, sectors);
413 } else if (!unwritten) {
414 struct bkey_i_reservation *reservation;
416 bch2_bkey_buf_realloc(&new, c, sizeof(*reservation) / sizeof(u64));
417 reservation = bkey_reservation_init(new.k);
418 reservation->k.p = iter->pos;
419 bch2_key_resize(&reservation->k, sectors);
420 reservation->v.nr_replicas = opts.data_replicas;
422 struct bkey_i_extent *e;
423 struct bch_devs_list devs_have;
424 struct write_point *wp;
425 struct bch_extent_ptr *ptr;
429 bch2_bkey_buf_realloc(&new, c, BKEY_EXTENT_U64s_MAX);
431 e = bkey_extent_init(new.k);
434 ret = bch2_alloc_sectors_start_trans(trans,
435 opts.foreground_target,
441 RESERVE_none, 0, &cl, &wp);
442 if (ret == -EAGAIN) {
443 bch2_trans_unlock(trans);
450 sectors = min(sectors, wp->sectors_free);
452 bch2_key_resize(&e->k, sectors);
454 bch2_open_bucket_get(c, wp, &open_buckets);
455 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
456 bch2_alloc_sectors_done(c, wp);
458 extent_for_each_ptr(extent_i_to_s(e), ptr)
459 ptr->unwritten = true;
462 have_reservation = true;
464 ret = bch2_extent_update(trans, inum, iter, new.k, &disk_res,
465 0, i_sectors_delta, true);
467 if ((atomic_read(&cl.remaining) & CLOSURE_REMAINING_MASK) != 1) {
468 bch2_trans_unlock(trans);
472 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
473 bch2_trans_begin(trans);
477 bch2_open_buckets_put(c, &open_buckets);
478 bch2_disk_reservation_put(c, &disk_res);
479 bch2_bkey_buf_exit(&new, c);
480 bch2_bkey_buf_exit(&old, c);
486 * Returns -BCH_ERR_transacton_restart if we had to drop locks:
488 int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
489 subvol_inum inum, u64 end,
490 s64 *i_sectors_delta)
492 struct bch_fs *c = trans->c;
493 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
494 struct bpos end_pos = POS(inum.inum, end);
496 int ret = 0, ret2 = 0;
500 bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
501 struct disk_reservation disk_res =
502 bch2_disk_reservation_init(c, 0);
503 struct bkey_i delete;
508 bch2_trans_begin(trans);
510 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
514 bch2_btree_iter_set_snapshot(iter, snapshot);
516 k = bch2_btree_iter_peek(iter);
517 if (bkey_cmp(iter->pos, end_pos) >= 0) {
518 bch2_btree_iter_set_pos(iter, end_pos);
526 bkey_init(&delete.k);
527 delete.k.p = iter->pos;
529 /* create the biggest key we can */
530 bch2_key_resize(&delete.k, max_sectors);
531 bch2_cut_back(end_pos, &delete);
533 ret = bch2_extent_update(trans, inum, iter, &delete,
534 &disk_res, 0, i_sectors_delta, false);
535 bch2_disk_reservation_put(c, &disk_res);
541 int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end,
542 s64 *i_sectors_delta)
544 struct btree_trans trans;
545 struct btree_iter iter;
548 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
549 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
550 POS(inum.inum, start),
553 ret = bch2_fpunch_at(&trans, &iter, inum, end, i_sectors_delta);
555 bch2_trans_iter_exit(&trans, &iter);
556 bch2_trans_exit(&trans);
558 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
564 static int bch2_write_index_default(struct bch_write_op *op)
566 struct bch_fs *c = op->c;
568 struct keylist *keys = &op->insert_keys;
569 struct bkey_i *k = bch2_keylist_front(keys);
570 struct btree_trans trans;
571 struct btree_iter iter;
573 .subvol = op->subvol,
574 .inum = k->k.p.inode,
578 BUG_ON(!inum.subvol);
580 bch2_bkey_buf_init(&sk);
581 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
584 bch2_trans_begin(&trans);
586 k = bch2_keylist_front(keys);
587 bch2_bkey_buf_copy(&sk, c, k);
589 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol,
590 &sk.k->k.p.snapshot);
591 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
596 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
597 bkey_start_pos(&sk.k->k),
598 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
600 ret = bch2_extent_update(&trans, inum, &iter, sk.k,
602 op->new_i_size, &op->i_sectors_delta,
603 op->flags & BCH_WRITE_CHECK_ENOSPC);
604 bch2_trans_iter_exit(&trans, &iter);
606 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
611 if (bkey_cmp(iter.pos, k->k.p) >= 0)
612 bch2_keylist_pop_front(&op->insert_keys);
614 bch2_cut_front(iter.pos, k);
615 } while (!bch2_keylist_empty(keys));
617 bch2_trans_exit(&trans);
618 bch2_bkey_buf_exit(&sk, c);
625 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
626 enum bch_data_type type,
627 const struct bkey_i *k,
630 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
631 const struct bch_extent_ptr *ptr;
632 struct bch_write_bio *n;
635 BUG_ON(c->opts.nochanges);
637 bkey_for_each_ptr(ptrs, ptr) {
638 BUG_ON(ptr->dev >= BCH_SB_MEMBERS_MAX ||
641 ca = bch_dev_bkey_exists(c, ptr->dev);
643 if (to_entry(ptr + 1) < ptrs.end) {
644 n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
645 GFP_NOIO, &ca->replica_set));
647 n->bio.bi_end_io = wbio->bio.bi_end_io;
648 n->bio.bi_private = wbio->bio.bi_private;
653 n->bio.bi_opf = wbio->bio.bi_opf;
654 bio_inc_remaining(&wbio->bio);
662 n->have_ioref = nocow || bch2_dev_get_ioref(ca,
663 type == BCH_DATA_btree ? READ : WRITE);
665 n->submit_time = local_clock();
666 n->inode_offset = bkey_start_offset(&k->k);
667 n->bio.bi_iter.bi_sector = ptr->offset;
669 if (likely(n->have_ioref)) {
670 this_cpu_add(ca->io_done->sectors[WRITE][type],
671 bio_sectors(&n->bio));
673 bio_set_dev(&n->bio, ca->disk_sb.bdev);
676 n->bio.bi_status = BLK_STS_REMOVED;
682 static void __bch2_write(struct bch_write_op *);
684 static void bch2_write_done(struct closure *cl)
686 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
687 struct bch_fs *c = op->c;
689 bch2_disk_reservation_put(c, &op->res);
690 percpu_ref_put(&c->writes);
691 bch2_keylist_free(&op->insert_keys, op->inline_keys);
693 bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
695 closure_debug_destroy(cl);
700 static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
702 struct keylist *keys = &op->insert_keys;
703 struct bch_extent_ptr *ptr;
704 struct bkey_i *src, *dst = keys->keys, *n;
706 for (src = keys->keys; src != keys->top; src = n) {
709 if (bkey_extent_is_direct_data(&src->k)) {
710 bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
711 test_bit(ptr->dev, op->failed.d));
713 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src)))
718 memmove_u64s_down(dst, src, src->u64s);
719 dst = bkey_next(dst);
727 * bch_write_index - after a write, update index to point to new data
729 static void __bch2_write_index(struct bch_write_op *op)
731 struct bch_fs *c = op->c;
732 struct keylist *keys = &op->insert_keys;
737 if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
738 ret = bch2_write_drop_io_error_ptrs(op);
744 * probably not the ideal place to hook this in, but I don't
745 * particularly want to plumb io_opts all the way through the btree
746 * update stack right now
748 for_each_keylist_key(keys, k) {
749 bch2_rebalance_add_key(c, bkey_i_to_s_c(k), &op->opts);
751 if (bch2_bkey_is_incompressible(bkey_i_to_s_c(k)))
752 bch2_check_set_feature(op->c, BCH_FEATURE_incompressible);
756 if (!bch2_keylist_empty(keys)) {
757 u64 sectors_start = keylist_sectors(keys);
759 ret = !(op->flags & BCH_WRITE_MOVE)
760 ? bch2_write_index_default(op)
761 : bch2_data_update_index_update(op);
763 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
764 BUG_ON(keylist_sectors(keys) && !ret);
766 op->written += sectors_start - keylist_sectors(keys);
769 struct bkey_i *k = bch2_keylist_front(&op->insert_keys);
771 bch_err_inum_offset_ratelimited(c,
772 k->k.p.inode, k->k.p.offset << 9,
773 "write error while doing btree update: %s",
779 /* If some a bucket wasn't written, we can't erasure code it: */
780 for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
781 bch2_open_bucket_write_error(c, &op->open_buckets, dev);
783 bch2_open_buckets_put(c, &op->open_buckets);
786 keys->top = keys->keys;
788 op->flags |= BCH_WRITE_DONE;
792 static void bch2_write_index(struct closure *cl)
794 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
795 struct write_point *wp = op->wp;
796 struct workqueue_struct *wq = index_update_wq(op);
799 op->btree_update_ready = true;
800 queue_work(wq, &wp->index_update_work);
803 void bch2_write_point_do_index_updates(struct work_struct *work)
805 struct write_point *wp =
806 container_of(work, struct write_point, index_update_work);
807 struct bch_write_op *op;
810 spin_lock(&wp->writes_lock);
811 op = list_first_entry_or_null(&wp->writes, struct bch_write_op, wp_list);
812 if (op && !op->btree_update_ready)
815 list_del(&op->wp_list);
816 spin_unlock(&wp->writes_lock);
821 __bch2_write_index(op);
823 if (!(op->flags & BCH_WRITE_DONE))
826 bch2_write_done(&op->cl);
830 static void bch2_write_endio(struct bio *bio)
832 struct closure *cl = bio->bi_private;
833 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
834 struct bch_write_bio *wbio = to_wbio(bio);
835 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
836 struct bch_fs *c = wbio->c;
837 struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
839 if (bch2_dev_inum_io_err_on(bio->bi_status, ca,
841 wbio->inode_offset << 9,
842 "data write error: %s",
843 bch2_blk_status_to_str(bio->bi_status))) {
844 set_bit(wbio->dev, op->failed.d);
845 op->flags |= BCH_WRITE_IO_ERROR;
849 set_bit(wbio->dev, op->devs_need_flush->d);
851 if (wbio->have_ioref) {
852 bch2_latency_acct(ca, wbio->submit_time, WRITE);
853 percpu_ref_put(&ca->io_ref);
857 bch2_bio_free_pages_pool(c, bio);
863 bio_endio(&parent->bio);
870 static void init_append_extent(struct bch_write_op *op,
871 struct write_point *wp,
872 struct bversion version,
873 struct bch_extent_crc_unpacked crc)
875 struct bch_fs *c = op->c;
876 struct bkey_i_extent *e;
878 op->pos.offset += crc.uncompressed_size;
880 e = bkey_extent_init(op->insert_keys.top);
882 e->k.size = crc.uncompressed_size;
883 e->k.version = version;
886 crc.compression_type ||
888 bch2_extent_crc_append(&e->k_i, crc);
890 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, crc.compressed_size,
891 op->flags & BCH_WRITE_CACHED);
893 bch2_keylist_push(&op->insert_keys);
896 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
897 struct write_point *wp,
899 bool *page_alloc_failed,
902 struct bch_write_bio *wbio;
904 unsigned output_available =
905 min(wp->sectors_free << 9, src->bi_iter.bi_size);
906 unsigned pages = DIV_ROUND_UP(output_available +
908 ? ((unsigned long) buf & (PAGE_SIZE - 1))
911 pages = min(pages, BIO_MAX_VECS);
913 bio = bio_alloc_bioset(NULL, pages, 0,
914 GFP_NOIO, &c->bio_write);
915 wbio = wbio_init(bio);
916 wbio->put_bio = true;
917 /* copy WRITE_SYNC flag */
918 wbio->bio.bi_opf = src->bi_opf;
921 bch2_bio_map(bio, buf, output_available);
928 * We can't use mempool for more than c->sb.encoded_extent_max
929 * worth of pages, but we'd like to allocate more if we can:
931 bch2_bio_alloc_pages_pool(c, bio,
932 min_t(unsigned, output_available,
933 c->opts.encoded_extent_max));
935 if (bio->bi_iter.bi_size < output_available)
937 bch2_bio_alloc_pages(bio,
939 bio->bi_iter.bi_size,
945 static int bch2_write_rechecksum(struct bch_fs *c,
946 struct bch_write_op *op,
947 unsigned new_csum_type)
949 struct bio *bio = &op->wbio.bio;
950 struct bch_extent_crc_unpacked new_crc;
953 /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
955 if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
956 bch2_csum_type_is_encryption(new_csum_type))
957 new_csum_type = op->crc.csum_type;
959 ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
961 op->crc.offset, op->crc.live_size,
966 bio_advance(bio, op->crc.offset << 9);
967 bio->bi_iter.bi_size = op->crc.live_size << 9;
972 static int bch2_write_decrypt(struct bch_write_op *op)
974 struct bch_fs *c = op->c;
975 struct nonce nonce = extent_nonce(op->version, op->crc);
976 struct bch_csum csum;
979 if (!bch2_csum_type_is_encryption(op->crc.csum_type))
983 * If we need to decrypt data in the write path, we'll no longer be able
984 * to verify the existing checksum (poly1305 mac, in this case) after
985 * it's decrypted - this is the last point we'll be able to reverify the
988 csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
989 if (bch2_crc_cmp(op->crc.csum, csum))
992 ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
993 op->crc.csum_type = 0;
994 op->crc.csum = (struct bch_csum) { 0, 0 };
998 static enum prep_encoded_ret {
1001 PREP_ENCODED_CHECKSUM_ERR,
1002 PREP_ENCODED_DO_WRITE,
1003 } bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
1005 struct bch_fs *c = op->c;
1006 struct bio *bio = &op->wbio.bio;
1008 if (!(op->flags & BCH_WRITE_DATA_ENCODED))
1009 return PREP_ENCODED_OK;
1011 BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
1013 /* Can we just write the entire extent as is? */
1014 if (op->crc.uncompressed_size == op->crc.live_size &&
1015 op->crc.compressed_size <= wp->sectors_free &&
1016 (op->crc.compression_type == op->compression_type ||
1017 op->incompressible)) {
1018 if (!crc_is_compressed(op->crc) &&
1019 op->csum_type != op->crc.csum_type &&
1020 bch2_write_rechecksum(c, op, op->csum_type))
1021 return PREP_ENCODED_CHECKSUM_ERR;
1023 return PREP_ENCODED_DO_WRITE;
1027 * If the data is compressed and we couldn't write the entire extent as
1028 * is, we have to decompress it:
1030 if (crc_is_compressed(op->crc)) {
1031 struct bch_csum csum;
1033 if (bch2_write_decrypt(op))
1034 return PREP_ENCODED_CHECKSUM_ERR;
1036 /* Last point we can still verify checksum: */
1037 csum = bch2_checksum_bio(c, op->crc.csum_type,
1038 extent_nonce(op->version, op->crc),
1040 if (bch2_crc_cmp(op->crc.csum, csum))
1041 return PREP_ENCODED_CHECKSUM_ERR;
1043 if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
1044 return PREP_ENCODED_ERR;
1048 * No longer have compressed data after this point - data might be
1053 * If the data is checksummed and we're only writing a subset,
1054 * rechecksum and adjust bio to point to currently live data:
1056 if ((op->crc.live_size != op->crc.uncompressed_size ||
1057 op->crc.csum_type != op->csum_type) &&
1058 bch2_write_rechecksum(c, op, op->csum_type))
1059 return PREP_ENCODED_CHECKSUM_ERR;
1062 * If we want to compress the data, it has to be decrypted:
1064 if ((op->compression_type ||
1065 bch2_csum_type_is_encryption(op->crc.csum_type) !=
1066 bch2_csum_type_is_encryption(op->csum_type)) &&
1067 bch2_write_decrypt(op))
1068 return PREP_ENCODED_CHECKSUM_ERR;
1070 return PREP_ENCODED_OK;
1073 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
1076 struct bch_fs *c = op->c;
1077 struct bio *src = &op->wbio.bio, *dst = src;
1078 struct bvec_iter saved_iter;
1080 unsigned total_output = 0, total_input = 0;
1081 bool bounce = false;
1082 bool page_alloc_failed = false;
1085 BUG_ON(!bio_sectors(src));
1087 ec_buf = bch2_writepoint_ec_buf(c, wp);
1089 switch (bch2_write_prep_encoded_data(op, wp)) {
1090 case PREP_ENCODED_OK:
1092 case PREP_ENCODED_ERR:
1095 case PREP_ENCODED_CHECKSUM_ERR:
1097 case PREP_ENCODED_DO_WRITE:
1098 /* XXX look for bug here */
1100 dst = bch2_write_bio_alloc(c, wp, src,
1103 bio_copy_data(dst, src);
1106 init_append_extent(op, wp, op->version, op->crc);
1111 op->compression_type ||
1113 !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
1114 (bch2_csum_type_is_encryption(op->csum_type) &&
1115 !(op->flags & BCH_WRITE_PAGES_OWNED))) {
1116 dst = bch2_write_bio_alloc(c, wp, src,
1122 saved_iter = dst->bi_iter;
1125 struct bch_extent_crc_unpacked crc = { 0 };
1126 struct bversion version = op->version;
1127 size_t dst_len, src_len;
1129 if (page_alloc_failed &&
1130 dst->bi_iter.bi_size < (wp->sectors_free << 9) &&
1131 dst->bi_iter.bi_size < c->opts.encoded_extent_max)
1134 BUG_ON(op->compression_type &&
1135 (op->flags & BCH_WRITE_DATA_ENCODED) &&
1136 bch2_csum_type_is_encryption(op->crc.csum_type));
1137 BUG_ON(op->compression_type && !bounce);
1139 crc.compression_type = op->incompressible
1140 ? BCH_COMPRESSION_TYPE_incompressible
1141 : op->compression_type
1142 ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
1143 op->compression_type)
1145 if (!crc_is_compressed(crc)) {
1146 dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
1147 dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
1150 dst_len = min_t(unsigned, dst_len,
1151 c->opts.encoded_extent_max);
1154 swap(dst->bi_iter.bi_size, dst_len);
1155 bio_copy_data(dst, src);
1156 swap(dst->bi_iter.bi_size, dst_len);
1162 BUG_ON(!src_len || !dst_len);
1164 if (bch2_csum_type_is_encryption(op->csum_type)) {
1165 if (bversion_zero(version)) {
1166 version.lo = atomic64_inc_return(&c->key_version);
1168 crc.nonce = op->nonce;
1169 op->nonce += src_len >> 9;
1173 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
1174 !crc_is_compressed(crc) &&
1175 bch2_csum_type_is_encryption(op->crc.csum_type) ==
1176 bch2_csum_type_is_encryption(op->csum_type)) {
1177 u8 compression_type = crc.compression_type;
1178 u16 nonce = crc.nonce;
1180 * Note: when we're using rechecksum(), we need to be
1181 * checksumming @src because it has all the data our
1182 * existing checksum covers - if we bounced (because we
1183 * were trying to compress), @dst will only have the
1184 * part of the data the new checksum will cover.
1186 * But normally we want to be checksumming post bounce,
1187 * because part of the reason for bouncing is so the
1188 * data can't be modified (by userspace) while it's in
1191 if (bch2_rechecksum_bio(c, src, version, op->crc,
1194 bio_sectors(src) - (src_len >> 9),
1198 * rchecksum_bio sets compression_type on crc from op->crc,
1199 * this isn't always correct as sometimes we're changing
1200 * an extent from uncompressed to incompressible.
1202 crc.compression_type = compression_type;
1205 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
1206 bch2_rechecksum_bio(c, src, version, op->crc,
1209 bio_sectors(src) - (src_len >> 9),
1213 crc.compressed_size = dst_len >> 9;
1214 crc.uncompressed_size = src_len >> 9;
1215 crc.live_size = src_len >> 9;
1217 swap(dst->bi_iter.bi_size, dst_len);
1218 ret = bch2_encrypt_bio(c, op->csum_type,
1219 extent_nonce(version, crc), dst);
1223 crc.csum = bch2_checksum_bio(c, op->csum_type,
1224 extent_nonce(version, crc), dst);
1225 crc.csum_type = op->csum_type;
1226 swap(dst->bi_iter.bi_size, dst_len);
1229 init_append_extent(op, wp, version, crc);
1232 bio_advance(dst, dst_len);
1233 bio_advance(src, src_len);
1234 total_output += dst_len;
1235 total_input += src_len;
1236 } while (dst->bi_iter.bi_size &&
1237 src->bi_iter.bi_size &&
1239 !bch2_keylist_realloc(&op->insert_keys,
1241 ARRAY_SIZE(op->inline_keys),
1242 BKEY_EXTENT_U64s_MAX));
1244 more = src->bi_iter.bi_size != 0;
1246 dst->bi_iter = saved_iter;
1248 if (dst == src && more) {
1249 BUG_ON(total_output != total_input);
1251 dst = bio_split(src, total_input >> 9,
1252 GFP_NOIO, &c->bio_write);
1253 wbio_init(dst)->put_bio = true;
1254 /* copy WRITE_SYNC flag */
1255 dst->bi_opf = src->bi_opf;
1258 dst->bi_iter.bi_size = total_output;
1263 bch_err(c, "error verifying existing checksum while rewriting existing data (memory corruption?)");
1266 if (to_wbio(dst)->bounce)
1267 bch2_bio_free_pages_pool(c, dst);
1268 if (to_wbio(dst)->put_bio)
1274 static bool bch2_extent_is_writeable(struct bch_write_op *op,
1277 struct bch_fs *c = op->c;
1278 struct bkey_s_c_extent e;
1279 struct extent_ptr_decoded p;
1280 const union bch_extent_entry *entry;
1281 unsigned replicas = 0;
1283 if (k.k->type != KEY_TYPE_extent)
1286 e = bkey_s_c_to_extent(k);
1287 extent_for_each_ptr_decode(e, p, entry) {
1288 if (p.crc.csum_type ||
1289 crc_is_compressed(p.crc) ||
1293 replicas += bch2_extent_ptr_durability(c, &p);
1296 return replicas >= op->opts.data_replicas;
1299 static inline void bch2_nocow_write_unlock(struct bch_write_op *op)
1301 struct bch_fs *c = op->c;
1302 const struct bch_extent_ptr *ptr;
1305 for_each_keylist_key(&op->insert_keys, k) {
1306 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
1308 bkey_for_each_ptr(ptrs, ptr)
1309 bch2_bucket_nocow_unlock(&c->nocow_locks,
1310 PTR_BUCKET_POS(c, ptr),
1311 BUCKET_NOCOW_LOCK_UPDATE);
1315 static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
1316 struct btree_iter *iter,
1317 struct bkey_i *orig,
1322 struct bkey_ptrs ptrs;
1323 struct bch_extent_ptr *ptr;
1326 if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) {
1331 new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1332 ret = PTR_ERR_OR_ZERO(new);
1336 bkey_reassemble(new, k);
1338 bch2_cut_front(bkey_start_pos(&orig->k), new);
1339 bch2_cut_back(orig->k.p, new);
1341 ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
1342 bkey_for_each_ptr(ptrs, ptr)
1346 * Note that we're not calling bch2_subvol_get_snapshot() in this path -
1347 * that was done when we kicked off the write, and here it's important
1348 * that we update the extent that we wrote to - even if a snapshot has
1349 * since been created. The write is still outstanding, so we're ok
1350 * w.r.t. snapshot atomicity:
1352 return bch2_extent_update_i_size_sectors(trans, iter,
1353 min(new->k.p.offset << 9, new_i_size), 0) ?:
1354 bch2_trans_update(trans, iter, new,
1355 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1358 static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
1360 struct bch_fs *c = op->c;
1361 struct btree_trans trans;
1362 struct btree_iter iter;
1363 struct bkey_i *orig;
1367 bch2_trans_init(&trans, c, 0, 0);
1369 for_each_keylist_key(&op->insert_keys, orig) {
1370 ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_extents,
1371 bkey_start_pos(&orig->k),
1372 BTREE_ITER_INTENT, k,
1373 NULL, NULL, BTREE_INSERT_NOFAIL, ({
1374 if (bkey_cmp(bkey_start_pos(k.k), orig->k.p) >= 0)
1377 bch2_nocow_write_convert_one_unwritten(&trans, &iter, orig, k, op->new_i_size);
1381 struct bkey_i *k = bch2_keylist_front(&op->insert_keys);
1383 bch_err_inum_offset_ratelimited(c,
1384 k->k.p.inode, k->k.p.offset << 9,
1385 "write error while doing btree update: %s",
1392 bch2_trans_exit(&trans);
1395 static void __bch2_nocow_write_done(struct bch_write_op *op)
1397 bch2_nocow_write_unlock(op);
1399 if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
1401 } else if (unlikely(op->flags & BCH_WRITE_CONVERT_UNWRITTEN))
1402 bch2_nocow_write_convert_unwritten(op);
1405 static void bch2_nocow_write_done(struct closure *cl)
1407 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
1409 __bch2_nocow_write_done(op);
1410 bch2_write_done(cl);
1413 static void bch2_nocow_write(struct bch_write_op *op)
1415 struct bch_fs *c = op->c;
1416 struct btree_trans trans;
1417 struct btree_iter iter;
1419 struct bkey_ptrs_c ptrs;
1420 const struct bch_extent_ptr *ptr, *ptr2;
1424 if (op->flags & BCH_WRITE_MOVE)
1427 bch2_trans_init(&trans, c, 0, 0);
1429 bch2_trans_begin(&trans);
1431 ret = bch2_subvolume_get_snapshot(&trans, op->subvol, &snapshot);
1435 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
1436 SPOS(op->pos.inode, op->pos.offset, snapshot),
1439 struct bio *bio = &op->wbio.bio;
1441 k = bch2_btree_iter_peek_slot(&iter);
1446 /* fall back to normal cow write path? */
1447 if (unlikely(k.k->p.snapshot != snapshot ||
1448 !bch2_extent_is_writeable(op, k)))
1451 if (bch2_keylist_realloc(&op->insert_keys,
1453 ARRAY_SIZE(op->inline_keys),
1457 /* Get iorefs before dropping btree locks: */
1458 ptrs = bch2_bkey_ptrs_c(k);
1459 bkey_for_each_ptr(ptrs, ptr)
1460 if (unlikely(!bch2_dev_get_ioref(bch_dev_bkey_exists(c, ptr->dev), WRITE)))
1463 /* Unlock before taking nocow locks, doing IO: */
1464 bkey_reassemble(op->insert_keys.top, k);
1465 bch2_trans_unlock(&trans);
1467 bch2_cut_front(op->pos, op->insert_keys.top);
1468 bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
1470 ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(op->insert_keys.top));
1471 bkey_for_each_ptr(ptrs, ptr) {
1472 bch2_bucket_nocow_lock(&c->nocow_locks,
1473 PTR_BUCKET_POS(c, ptr),
1474 BUCKET_NOCOW_LOCK_UPDATE);
1475 if (unlikely(ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
1476 goto err_bucket_stale;
1479 op->flags |= BCH_WRITE_CONVERT_UNWRITTEN;
1482 bio = &op->wbio.bio;
1483 if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) {
1484 bio = bio_split(bio, k.k->p.offset - op->pos.offset,
1485 GFP_KERNEL, &c->bio_write);
1486 wbio_init(bio)->put_bio = true;
1487 bio->bi_opf = op->wbio.bio.bi_opf;
1489 op->flags |= BCH_WRITE_DONE;
1492 op->pos.offset += bio_sectors(bio);
1493 op->written += bio_sectors(bio);
1495 bio->bi_end_io = bch2_write_endio;
1496 bio->bi_private = &op->cl;
1497 bio->bi_opf |= REQ_OP_WRITE;
1498 closure_get(&op->cl);
1499 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1500 op->insert_keys.top, true);
1502 bch2_keylist_push(&op->insert_keys);
1503 if (op->flags & BCH_WRITE_DONE)
1505 bch2_btree_iter_advance(&iter);
1508 bch2_trans_iter_exit(&trans, &iter);
1510 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1514 bch_err_inum_offset_ratelimited(c,
1516 op->pos.offset << 9,
1517 "%s: btree lookup error %s",
1518 __func__, bch2_err_str(ret));
1520 op->flags |= BCH_WRITE_DONE;
1523 bch2_trans_exit(&trans);
1525 /* fallback to cow write path? */
1526 if (!(op->flags & BCH_WRITE_DONE)) {
1527 closure_sync(&op->cl);
1528 __bch2_nocow_write_done(op);
1529 op->insert_keys.top = op->insert_keys.keys;
1530 } else if (op->flags & BCH_WRITE_SYNC) {
1531 closure_sync(&op->cl);
1532 bch2_nocow_write_done(&op->cl);
1536 * needs to run out of process context because ei_quota_lock is
1539 continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op));
1543 bkey_for_each_ptr(ptrs, ptr2) {
1547 percpu_ref_put(&bch_dev_bkey_exists(c, ptr2->dev)->io_ref);
1550 /* Fall back to COW path: */
1553 bkey_for_each_ptr(ptrs, ptr2) {
1554 bch2_bucket_nocow_unlock(&c->nocow_locks,
1555 PTR_BUCKET_POS(c, ptr2),
1556 BUCKET_NOCOW_LOCK_UPDATE);
1561 bkey_for_each_ptr(ptrs, ptr2)
1562 percpu_ref_put(&bch_dev_bkey_exists(c, ptr2->dev)->io_ref);
1564 /* We can retry this: */
1565 ret = BCH_ERR_transaction_restart;
1569 static void __bch2_write(struct bch_write_op *op)
1571 struct bch_fs *c = op->c;
1572 struct write_point *wp = NULL;
1573 struct bio *bio = NULL;
1574 unsigned nofs_flags;
1577 nofs_flags = memalloc_nofs_save();
1579 if (unlikely(op->opts.nocow)) {
1580 bch2_nocow_write(op);
1581 if (op->flags & BCH_WRITE_DONE)
1582 goto out_nofs_restore;
1585 memset(&op->failed, 0, sizeof(op->failed));
1586 op->btree_update_ready = false;
1589 struct bkey_i *key_to_write;
1590 unsigned key_to_write_offset = op->insert_keys.top_p -
1591 op->insert_keys.keys_p;
1593 /* +1 for possible cache device: */
1594 if (op->open_buckets.nr + op->nr_replicas + 1 >
1595 ARRAY_SIZE(op->open_buckets.v))
1598 if (bch2_keylist_realloc(&op->insert_keys,
1600 ARRAY_SIZE(op->inline_keys),
1601 BKEY_EXTENT_U64s_MAX))
1605 * The copygc thread is now global, which means it's no longer
1606 * freeing up space on specific disks, which means that
1607 * allocations for specific disks may hang arbitrarily long:
1609 ret = bch2_trans_do(c, NULL, NULL, 0,
1610 bch2_alloc_sectors_start_trans(&trans,
1612 op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
1616 op->nr_replicas_required,
1619 (op->flags & (BCH_WRITE_ALLOC_NOWAIT|
1620 BCH_WRITE_ONLY_SPECIFIED_DEVS))
1621 ? NULL : &op->cl, &wp));
1622 if (unlikely(ret)) {
1623 if (unlikely(ret != -EAGAIN)) {
1625 op->flags |= BCH_WRITE_DONE;
1631 bch2_open_bucket_get(c, wp, &op->open_buckets);
1632 ret = bch2_write_extent(op, wp, &bio);
1634 bch2_alloc_sectors_done(c, wp);
1638 op->flags |= BCH_WRITE_DONE;
1643 op->flags |= BCH_WRITE_DONE;
1645 bio->bi_end_io = bch2_write_endio;
1646 bio->bi_private = &op->cl;
1647 bio->bi_opf |= REQ_OP_WRITE;
1649 closure_get(bio->bi_private);
1651 key_to_write = (void *) (op->insert_keys.keys_p +
1652 key_to_write_offset);
1654 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1655 key_to_write, false);
1661 * If we're running asynchronously, wne may still want to block
1662 * synchronously here if we weren't able to submit all of the IO at
1663 * once, as that signals backpressure to the caller.
1665 if ((op->flags & BCH_WRITE_SYNC) || !(op->flags & BCH_WRITE_DONE)) {
1666 closure_sync(&op->cl);
1667 __bch2_write_index(op);
1669 if (!(op->flags & BCH_WRITE_DONE))
1671 bch2_write_done(&op->cl);
1673 spin_lock(&wp->writes_lock);
1675 list_add_tail(&op->wp_list, &wp->writes);
1676 spin_unlock(&wp->writes_lock);
1678 continue_at(&op->cl, bch2_write_index, NULL);
1681 memalloc_nofs_restore(nofs_flags);
1684 static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
1686 struct bio *bio = &op->wbio.bio;
1687 struct bvec_iter iter;
1688 struct bkey_i_inline_data *id;
1692 bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);
1694 ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
1695 ARRAY_SIZE(op->inline_keys),
1696 BKEY_U64s + DIV_ROUND_UP(data_len, 8));
1702 sectors = bio_sectors(bio);
1703 op->pos.offset += sectors;
1705 id = bkey_inline_data_init(op->insert_keys.top);
1707 id->k.version = op->version;
1708 id->k.size = sectors;
1710 iter = bio->bi_iter;
1711 iter.bi_size = data_len;
1712 memcpy_from_bio(id->v.data, bio, iter);
1714 while (data_len & 7)
1715 id->v.data[data_len++] = '\0';
1716 set_bkey_val_bytes(&id->k, data_len);
1717 bch2_keylist_push(&op->insert_keys);
1719 op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
1720 op->flags |= BCH_WRITE_DONE;
1722 __bch2_write_index(op);
1724 bch2_write_done(&op->cl);
1728 * bch_write - handle a write to a cache device or flash only volume
1730 * This is the starting point for any data to end up in a cache device; it could
1731 * be from a normal write, or a writeback write, or a write to a flash only
1732 * volume - it's also used by the moving garbage collector to compact data in
1733 * mostly empty buckets.
1735 * It first writes the data to the cache, creating a list of keys to be inserted
1736 * (if the data won't fit in a single open bucket, there will be multiple keys);
1737 * after the data is written it calls bch_journal, and after the keys have been
1738 * added to the next journal write they're inserted into the btree.
1740 * If op->discard is true, instead of inserting the data it invalidates the
1741 * region of the cache represented by op->bio and op->inode.
1743 void bch2_write(struct closure *cl)
1745 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
1746 struct bio *bio = &op->wbio.bio;
1747 struct bch_fs *c = op->c;
1750 EBUG_ON(op->cl.parent);
1751 BUG_ON(!op->nr_replicas);
1752 BUG_ON(!op->write_point.v);
1753 BUG_ON(!bkey_cmp(op->pos, POS_MAX));
1755 op->start_time = local_clock();
1756 bch2_keylist_init(&op->insert_keys, op->inline_keys);
1757 wbio_init(bio)->put_bio = false;
1759 if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) {
1760 bch_err_inum_offset_ratelimited(c,
1762 op->pos.offset << 9,
1763 "misaligned write");
1768 if (c->opts.nochanges ||
1769 !percpu_ref_tryget_live(&c->writes)) {
1774 this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
1775 bch2_increment_clock(c, bio_sectors(bio), WRITE);
1777 data_len = min_t(u64, bio->bi_iter.bi_size,
1778 op->new_i_size - (op->pos.offset << 9));
1780 if (c->opts.inline_data &&
1781 data_len <= min(block_bytes(c) / 2, 1024U)) {
1782 bch2_write_data_inline(op, data_len);
1789 bch2_disk_reservation_put(c, &op->res);
1791 closure_debug_destroy(&op->cl);
1796 /* Cache promotion on read */
1799 struct rcu_head rcu;
1802 struct rhash_head hash;
1805 struct data_update write;
1806 struct bio_vec bi_inline_vecs[0]; /* must be last */
1809 static const struct rhashtable_params bch_promote_params = {
1810 .head_offset = offsetof(struct promote_op, hash),
1811 .key_offset = offsetof(struct promote_op, pos),
1812 .key_len = sizeof(struct bpos),
1815 static inline bool should_promote(struct bch_fs *c, struct bkey_s_c k,
1817 struct bch_io_opts opts,
1820 if (!(flags & BCH_READ_MAY_PROMOTE))
1823 if (!opts.promote_target)
1826 if (bch2_bkey_has_target(c, k, opts.promote_target))
1829 if (bkey_extent_is_unwritten(k))
1832 if (bch2_target_congested(c, opts.promote_target)) {
1833 /* XXX trace this */
1837 if (rhashtable_lookup_fast(&c->promote_table, &pos,
1838 bch_promote_params))
1844 static void promote_free(struct bch_fs *c, struct promote_op *op)
1848 ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
1849 bch_promote_params);
1851 percpu_ref_put(&c->writes);
1855 static void promote_done(struct bch_write_op *wop)
1857 struct promote_op *op =
1858 container_of(wop, struct promote_op, write.op);
1859 struct bch_fs *c = op->write.op.c;
1861 bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
1864 bch2_data_update_exit(&op->write);
1865 promote_free(c, op);
1868 static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
1870 struct bio *bio = &op->write.op.wbio.bio;
1872 trace_and_count(op->write.op.c, read_promote, &rbio->bio);
1874 /* we now own pages: */
1875 BUG_ON(!rbio->bounce);
1876 BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs);
1878 memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec,
1879 sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
1880 swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
1882 bch2_data_update_read_done(&op->write, rbio->pick.crc);
1885 static struct promote_op *__promote_alloc(struct bch_fs *c,
1886 enum btree_id btree_id,
1889 struct extent_ptr_decoded *pick,
1890 struct bch_io_opts opts,
1892 struct bch_read_bio **rbio)
1894 struct promote_op *op = NULL;
1896 unsigned pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
1899 if (!percpu_ref_tryget_live(&c->writes))
1902 op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOIO);
1906 op->start_time = local_clock();
1910 * We don't use the mempool here because extents that aren't
1911 * checksummed or compressed can be too big for the mempool:
1913 *rbio = kzalloc(sizeof(struct bch_read_bio) +
1914 sizeof(struct bio_vec) * pages,
1919 rbio_init(&(*rbio)->bio, opts);
1920 bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
1922 if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
1926 (*rbio)->bounce = true;
1927 (*rbio)->split = true;
1928 (*rbio)->kmalloc = true;
1930 if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash,
1931 bch_promote_params))
1934 bio = &op->write.op.wbio.bio;
1935 bio_init(bio, NULL, bio->bi_inline_vecs, pages, 0);
1937 ret = bch2_data_update_init(c, &op->write,
1938 writepoint_hashed((unsigned long) current),
1940 (struct data_update_opts) {
1941 .target = opts.promote_target,
1942 .extra_replicas = 1,
1943 .write_flags = BCH_WRITE_ALLOC_NOWAIT|BCH_WRITE_CACHED,
1947 op->write.op.end_io = promote_done;
1952 bio_free_pages(&(*rbio)->bio);
1956 percpu_ref_put(&c->writes);
1961 static struct promote_op *promote_alloc(struct bch_fs *c,
1962 struct bvec_iter iter,
1964 struct extent_ptr_decoded *pick,
1965 struct bch_io_opts opts,
1967 struct bch_read_bio **rbio,
1971 bool promote_full = *read_full || READ_ONCE(c->promote_whole_extents);
1972 /* data might have to be decompressed in the write path: */
1973 unsigned sectors = promote_full
1974 ? max(pick->crc.compressed_size, pick->crc.live_size)
1975 : bvec_iter_sectors(iter);
1976 struct bpos pos = promote_full
1977 ? bkey_start_pos(k.k)
1978 : POS(k.k->p.inode, iter.bi_sector);
1979 struct promote_op *promote;
1981 if (!should_promote(c, k, pos, opts, flags))
1984 promote = __promote_alloc(c,
1985 k.k->type == KEY_TYPE_reflink_v
1988 k, pos, pick, opts, sectors, rbio);
1993 *read_full = promote_full;
1999 #define READ_RETRY_AVOID 1
2000 #define READ_RETRY 2
2005 RBIO_CONTEXT_HIGHPRI,
2006 RBIO_CONTEXT_UNBOUND,
2009 static inline struct bch_read_bio *
2010 bch2_rbio_parent(struct bch_read_bio *rbio)
2012 return rbio->split ? rbio->parent : rbio;
2016 static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn,
2017 enum rbio_context context,
2018 struct workqueue_struct *wq)
2020 if (context <= rbio->context) {
2023 rbio->work.func = fn;
2024 rbio->context = context;
2025 queue_work(wq, &rbio->work);
2029 static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
2031 BUG_ON(rbio->bounce && !rbio->split);
2034 promote_free(rbio->c, rbio->promote);
2035 rbio->promote = NULL;
2038 bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
2041 struct bch_read_bio *parent = rbio->parent;
2046 bio_put(&rbio->bio);
2055 * Only called on a top level bch_read_bio to complete an entire read request,
2058 static void bch2_rbio_done(struct bch_read_bio *rbio)
2060 if (rbio->start_time)
2061 bch2_time_stats_update(&rbio->c->times[BCH_TIME_data_read],
2063 bio_endio(&rbio->bio);
2066 static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio,
2067 struct bvec_iter bvec_iter,
2068 struct bch_io_failures *failed,
2071 struct btree_trans trans;
2072 struct btree_iter iter;
2077 flags &= ~BCH_READ_LAST_FRAGMENT;
2078 flags |= BCH_READ_MUST_CLONE;
2080 bch2_bkey_buf_init(&sk);
2081 bch2_trans_init(&trans, c, 0, 0);
2083 bch2_trans_iter_init(&trans, &iter, rbio->data_btree,
2084 rbio->read_pos, BTREE_ITER_SLOTS);
2086 rbio->bio.bi_status = 0;
2088 k = bch2_btree_iter_peek_slot(&iter);
2092 bch2_bkey_buf_reassemble(&sk, c, k);
2093 k = bkey_i_to_s_c(sk.k);
2094 bch2_trans_unlock(&trans);
2096 if (!bch2_bkey_matches_ptr(c, k,
2098 rbio->data_pos.offset -
2099 rbio->pick.crc.offset)) {
2100 /* extent we wanted to read no longer exists: */
2105 ret = __bch2_read_extent(&trans, rbio, bvec_iter,
2108 k, 0, failed, flags);
2109 if (ret == READ_RETRY)
2114 bch2_rbio_done(rbio);
2115 bch2_trans_iter_exit(&trans, &iter);
2116 bch2_trans_exit(&trans);
2117 bch2_bkey_buf_exit(&sk, c);
2120 rbio->bio.bi_status = BLK_STS_IOERR;
2124 static void bch2_rbio_retry(struct work_struct *work)
2126 struct bch_read_bio *rbio =
2127 container_of(work, struct bch_read_bio, work);
2128 struct bch_fs *c = rbio->c;
2129 struct bvec_iter iter = rbio->bvec_iter;
2130 unsigned flags = rbio->flags;
2131 subvol_inum inum = {
2132 .subvol = rbio->subvol,
2133 .inum = rbio->read_pos.inode,
2135 struct bch_io_failures failed = { .nr = 0 };
2137 trace_and_count(c, read_retry, &rbio->bio);
2139 if (rbio->retry == READ_RETRY_AVOID)
2140 bch2_mark_io_failure(&failed, &rbio->pick);
2142 rbio->bio.bi_status = 0;
2144 rbio = bch2_rbio_free(rbio);
2146 flags |= BCH_READ_IN_RETRY;
2147 flags &= ~BCH_READ_MAY_PROMOTE;
2149 if (flags & BCH_READ_NODECODE) {
2150 bch2_read_retry_nodecode(c, rbio, iter, &failed, flags);
2152 flags &= ~BCH_READ_LAST_FRAGMENT;
2153 flags |= BCH_READ_MUST_CLONE;
2155 __bch2_read(c, rbio, iter, inum, &failed, flags);
2159 static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
2162 rbio->retry = retry;
2164 if (rbio->flags & BCH_READ_IN_RETRY)
2167 if (retry == READ_ERR) {
2168 rbio = bch2_rbio_free(rbio);
2170 rbio->bio.bi_status = error;
2171 bch2_rbio_done(rbio);
2173 bch2_rbio_punt(rbio, bch2_rbio_retry,
2174 RBIO_CONTEXT_UNBOUND, system_unbound_wq);
2178 static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
2179 struct bch_read_bio *rbio)
2181 struct bch_fs *c = rbio->c;
2182 u64 data_offset = rbio->data_pos.offset - rbio->pick.crc.offset;
2183 struct bch_extent_crc_unpacked new_crc;
2184 struct btree_iter iter;
2189 if (crc_is_compressed(rbio->pick.crc))
2192 bch2_trans_iter_init(trans, &iter, rbio->data_btree, rbio->data_pos,
2193 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2194 k = bch2_btree_iter_peek_slot(&iter);
2195 if ((ret = bkey_err(k)))
2198 if (bversion_cmp(k.k->version, rbio->version) ||
2199 !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset))
2202 /* Extent was merged? */
2203 if (bkey_start_offset(k.k) < data_offset ||
2204 k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size)
2207 if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version,
2208 rbio->pick.crc, NULL, &new_crc,
2209 bkey_start_offset(k.k) - data_offset, k.k->size,
2210 rbio->pick.crc.csum_type)) {
2211 bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)");
2217 * going to be temporarily appending another checksum entry:
2219 new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) +
2220 sizeof(struct bch_extent_crc128));
2221 if ((ret = PTR_ERR_OR_ZERO(new)))
2224 bkey_reassemble(new, k);
2226 if (!bch2_bkey_narrow_crcs(new, new_crc))
2229 ret = bch2_trans_update(trans, &iter, new,
2230 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
2232 bch2_trans_iter_exit(trans, &iter);
2236 static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
2238 bch2_trans_do(rbio->c, NULL, NULL, BTREE_INSERT_NOFAIL,
2239 __bch2_rbio_narrow_crcs(&trans, rbio));
2242 /* Inner part that may run in process context */
2243 static void __bch2_read_endio(struct work_struct *work)
2245 struct bch_read_bio *rbio =
2246 container_of(work, struct bch_read_bio, work);
2247 struct bch_fs *c = rbio->c;
2248 struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
2249 struct bio *src = &rbio->bio;
2250 struct bio *dst = &bch2_rbio_parent(rbio)->bio;
2251 struct bvec_iter dst_iter = rbio->bvec_iter;
2252 struct bch_extent_crc_unpacked crc = rbio->pick.crc;
2253 struct nonce nonce = extent_nonce(rbio->version, crc);
2254 unsigned nofs_flags;
2255 struct bch_csum csum;
2258 nofs_flags = memalloc_nofs_save();
2260 /* Reset iterator for checksumming and copying bounced data: */
2262 src->bi_iter.bi_size = crc.compressed_size << 9;
2263 src->bi_iter.bi_idx = 0;
2264 src->bi_iter.bi_bvec_done = 0;
2266 src->bi_iter = rbio->bvec_iter;
2269 csum = bch2_checksum_bio(c, crc.csum_type, nonce, src);
2270 if (bch2_crc_cmp(csum, rbio->pick.crc.csum))
2275 * We need to rework the narrow_crcs path to deliver the read completion
2276 * first, and then punt to a different workqueue, otherwise we're
2277 * holding up reads while doing btree updates which is bad for memory
2280 if (unlikely(rbio->narrow_crcs))
2281 bch2_rbio_narrow_crcs(rbio);
2283 if (rbio->flags & BCH_READ_NODECODE)
2286 /* Adjust crc to point to subset of data we want: */
2287 crc.offset += rbio->offset_into_extent;
2288 crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
2290 if (crc_is_compressed(crc)) {
2291 ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
2295 if (bch2_bio_uncompress(c, src, dst, dst_iter, crc))
2296 goto decompression_err;
2298 /* don't need to decrypt the entire bio: */
2299 nonce = nonce_add(nonce, crc.offset << 9);
2300 bio_advance(src, crc.offset << 9);
2302 BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
2303 src->bi_iter.bi_size = dst_iter.bi_size;
2305 ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
2310 struct bvec_iter src_iter = src->bi_iter;
2311 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
2315 if (rbio->promote) {
2317 * Re encrypt data we decrypted, so it's consistent with
2320 ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
2324 promote_start(rbio->promote, rbio);
2325 rbio->promote = NULL;
2328 if (likely(!(rbio->flags & BCH_READ_IN_RETRY))) {
2329 rbio = bch2_rbio_free(rbio);
2330 bch2_rbio_done(rbio);
2333 memalloc_nofs_restore(nofs_flags);
2337 * Checksum error: if the bio wasn't bounced, we may have been
2338 * reading into buffers owned by userspace (that userspace can
2339 * scribble over) - retry the read, bouncing it this time:
2341 if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
2342 rbio->flags |= BCH_READ_MUST_BOUNCE;
2343 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
2347 bch_err_inum_offset_ratelimited(ca,
2348 rbio->read_pos.inode,
2349 rbio->read_pos.offset << 9,
2350 "data checksum error: expected %0llx:%0llx got %0llx:%0llx (type %s)",
2351 rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
2352 csum.hi, csum.lo, bch2_csum_types[crc.csum_type]);
2354 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
2357 bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
2358 rbio->read_pos.offset << 9,
2359 "decompression error");
2360 bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
2363 bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
2364 rbio->read_pos.offset << 9,
2366 bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
2370 static void bch2_read_endio(struct bio *bio)
2372 struct bch_read_bio *rbio =
2373 container_of(bio, struct bch_read_bio, bio);
2374 struct bch_fs *c = rbio->c;
2375 struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
2376 struct workqueue_struct *wq = NULL;
2377 enum rbio_context context = RBIO_CONTEXT_NULL;
2379 if (rbio->have_ioref) {
2380 bch2_latency_acct(ca, rbio->submit_time, READ);
2381 percpu_ref_put(&ca->io_ref);
2385 rbio->bio.bi_end_io = rbio->end_io;
2387 if (bch2_dev_inum_io_err_on(bio->bi_status, ca,
2388 rbio->read_pos.inode,
2389 rbio->read_pos.offset,
2390 "data read error: %s",
2391 bch2_blk_status_to_str(bio->bi_status))) {
2392 bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
2396 if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
2397 ptr_stale(ca, &rbio->pick.ptr)) {
2398 trace_and_count(c, read_reuse_race, &rbio->bio);
2400 if (rbio->flags & BCH_READ_RETRY_IF_STALE)
2401 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
2403 bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN);
2407 if (rbio->narrow_crcs ||
2409 crc_is_compressed(rbio->pick.crc) ||
2410 bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
2411 context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
2412 else if (rbio->pick.crc.csum_type)
2413 context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq;
2415 bch2_rbio_punt(rbio, __bch2_read_endio, context, wq);
2418 int __bch2_read_indirect_extent(struct btree_trans *trans,
2419 unsigned *offset_into_extent,
2420 struct bkey_buf *orig_k)
2422 struct btree_iter iter;
2427 reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k->k)->v.idx) +
2428 *offset_into_extent;
2430 bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink,
2431 POS(0, reflink_offset),
2433 k = bch2_btree_iter_peek_slot(&iter);
2438 if (k.k->type != KEY_TYPE_reflink_v &&
2439 k.k->type != KEY_TYPE_indirect_inline_data) {
2440 bch_err_inum_offset_ratelimited(trans->c,
2441 orig_k->k->k.p.inode,
2442 orig_k->k->k.p.offset << 9,
2443 "%llu len %u points to nonexistent indirect extent %llu",
2444 orig_k->k->k.p.offset,
2447 bch2_inconsistent_error(trans->c);
2452 *offset_into_extent = iter.pos.offset - bkey_start_offset(k.k);
2453 bch2_bkey_buf_reassemble(orig_k, trans->c, k);
2455 bch2_trans_iter_exit(trans, &iter);
2459 static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
2461 struct bch_extent_ptr ptr)
2463 struct bch_fs *c = trans->c;
2464 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr.dev);
2465 struct btree_iter iter;
2466 struct printbuf buf = PRINTBUF;
2469 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
2470 PTR_BUCKET_POS(c, &ptr),
2473 prt_printf(&buf, "Attempting to read from stale dirty pointer:");
2474 printbuf_indent_add(&buf, 2);
2477 bch2_bkey_val_to_text(&buf, c, k);
2480 prt_printf(&buf, "memory gen: %u", *bucket_gen(ca, iter.pos.offset));
2482 ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
2485 bch2_bkey_val_to_text(&buf, c, k);
2488 bch2_fs_inconsistent(c, "%s", buf.buf);
2490 bch2_trans_iter_exit(trans, &iter);
2491 printbuf_exit(&buf);
2494 int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
2495 struct bvec_iter iter, struct bpos read_pos,
2496 enum btree_id data_btree, struct bkey_s_c k,
2497 unsigned offset_into_extent,
2498 struct bch_io_failures *failed, unsigned flags)
2500 struct bch_fs *c = trans->c;
2501 struct extent_ptr_decoded pick;
2502 struct bch_read_bio *rbio = NULL;
2503 struct bch_dev *ca = NULL;
2504 struct promote_op *promote = NULL;
2505 bool bounce = false, read_full = false, narrow_crcs = false;
2506 struct bpos data_pos = bkey_start_pos(k.k);
2509 if (bkey_extent_is_inline_data(k.k)) {
2510 unsigned bytes = min_t(unsigned, iter.bi_size,
2511 bkey_inline_data_bytes(k.k));
2513 swap(iter.bi_size, bytes);
2514 memcpy_to_bio(&orig->bio, iter, bkey_inline_data_p(k));
2515 swap(iter.bi_size, bytes);
2516 bio_advance_iter(&orig->bio, &iter, bytes);
2517 zero_fill_bio_iter(&orig->bio, iter);
2521 pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick);
2523 /* hole or reservation - just zero fill: */
2528 bch_err_inum_offset_ratelimited(c,
2529 read_pos.inode, read_pos.offset << 9,
2530 "no device to read from");
2534 ca = bch_dev_bkey_exists(c, pick.ptr.dev);
2537 * Stale dirty pointers are treated as IO errors, but @failed isn't
2538 * allocated unless we're in the retry path - so if we're not in the
2539 * retry path, don't check here, it'll be caught in bch2_read_endio()
2540 * and we'll end up in the retry path:
2542 if ((flags & BCH_READ_IN_RETRY) &&
2544 unlikely(ptr_stale(ca, &pick.ptr))) {
2545 read_from_stale_dirty_pointer(trans, k, pick.ptr);
2546 bch2_mark_io_failure(failed, &pick);
2551 * Unlock the iterator while the btree node's lock is still in
2552 * cache, before doing the IO:
2554 bch2_trans_unlock(trans);
2556 if (flags & BCH_READ_NODECODE) {
2558 * can happen if we retry, and the extent we were going to read
2559 * has been merged in the meantime:
2561 if (pick.crc.compressed_size > orig->bio.bi_vcnt * PAGE_SECTORS)
2564 iter.bi_size = pick.crc.compressed_size << 9;
2568 if (!(flags & BCH_READ_LAST_FRAGMENT) ||
2569 bio_flagged(&orig->bio, BIO_CHAIN))
2570 flags |= BCH_READ_MUST_CLONE;
2572 narrow_crcs = !(flags & BCH_READ_IN_RETRY) &&
2573 bch2_can_narrow_extent_crcs(k, pick.crc);
2575 if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
2576 flags |= BCH_READ_MUST_BOUNCE;
2578 EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
2580 if (crc_is_compressed(pick.crc) ||
2581 (pick.crc.csum_type != BCH_CSUM_none &&
2582 (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
2583 (bch2_csum_type_is_encryption(pick.crc.csum_type) &&
2584 (flags & BCH_READ_USER_MAPPED)) ||
2585 (flags & BCH_READ_MUST_BOUNCE)))) {
2590 if (orig->opts.promote_target)
2591 promote = promote_alloc(c, iter, k, &pick, orig->opts, flags,
2592 &rbio, &bounce, &read_full);
2595 EBUG_ON(crc_is_compressed(pick.crc));
2596 EBUG_ON(pick.crc.csum_type &&
2597 (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
2598 bvec_iter_sectors(iter) != pick.crc.live_size ||
2600 offset_into_extent));
2602 data_pos.offset += offset_into_extent;
2603 pick.ptr.offset += pick.crc.offset +
2605 offset_into_extent = 0;
2606 pick.crc.compressed_size = bvec_iter_sectors(iter);
2607 pick.crc.uncompressed_size = bvec_iter_sectors(iter);
2608 pick.crc.offset = 0;
2609 pick.crc.live_size = bvec_iter_sectors(iter);
2610 offset_into_extent = 0;
2615 * promote already allocated bounce rbio:
2616 * promote needs to allocate a bio big enough for uncompressing
2617 * data in the write path, but we're not going to use it all
2620 EBUG_ON(rbio->bio.bi_iter.bi_size <
2621 pick.crc.compressed_size << 9);
2622 rbio->bio.bi_iter.bi_size =
2623 pick.crc.compressed_size << 9;
2624 } else if (bounce) {
2625 unsigned sectors = pick.crc.compressed_size;
2627 rbio = rbio_init(bio_alloc_bioset(NULL,
2628 DIV_ROUND_UP(sectors, PAGE_SECTORS),
2631 &c->bio_read_split),
2634 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
2635 rbio->bounce = true;
2637 } else if (flags & BCH_READ_MUST_CLONE) {
2639 * Have to clone if there were any splits, due to error
2640 * reporting issues (if a split errored, and retrying didn't
2641 * work, when it reports the error to its parent (us) we don't
2642 * know if the error was from our bio, and we should retry, or
2643 * from the whole bio, in which case we don't want to retry and
2646 rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOIO,
2647 &c->bio_read_split),
2649 rbio->bio.bi_iter = iter;
2653 rbio->bio.bi_iter = iter;
2654 EBUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
2657 EBUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size);
2660 rbio->submit_time = local_clock();
2662 rbio->parent = orig;
2664 rbio->end_io = orig->bio.bi_end_io;
2665 rbio->bvec_iter = iter;
2666 rbio->offset_into_extent= offset_into_extent;
2667 rbio->flags = flags;
2668 rbio->have_ioref = pick_ret > 0 && bch2_dev_get_ioref(ca, READ);
2669 rbio->narrow_crcs = narrow_crcs;
2673 /* XXX: only initialize this if needed */
2674 rbio->devs_have = bch2_bkey_devs(k);
2676 rbio->subvol = orig->subvol;
2677 rbio->read_pos = read_pos;
2678 rbio->data_btree = data_btree;
2679 rbio->data_pos = data_pos;
2680 rbio->version = k.k->version;
2681 rbio->promote = promote;
2682 INIT_WORK(&rbio->work, NULL);
2684 rbio->bio.bi_opf = orig->bio.bi_opf;
2685 rbio->bio.bi_iter.bi_sector = pick.ptr.offset;
2686 rbio->bio.bi_end_io = bch2_read_endio;
2689 trace_and_count(c, read_bounce, &rbio->bio);
2691 this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio));
2692 bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
2695 * If it's being moved internally, we don't want to flag it as a cache
2698 if (pick.ptr.cached && !(flags & BCH_READ_NODECODE))
2699 bch2_bucket_io_time_reset(trans, pick.ptr.dev,
2700 PTR_BUCKET_NR(ca, &pick.ptr), READ);
2702 if (!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT))) {
2703 bio_inc_remaining(&orig->bio);
2704 trace_and_count(c, read_split, &orig->bio);
2707 if (!rbio->pick.idx) {
2708 if (!rbio->have_ioref) {
2709 bch_err_inum_offset_ratelimited(c,
2711 read_pos.offset << 9,
2712 "no device to read from");
2713 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
2717 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_user],
2718 bio_sectors(&rbio->bio));
2719 bio_set_dev(&rbio->bio, ca->disk_sb.bdev);
2721 if (likely(!(flags & BCH_READ_IN_RETRY)))
2722 submit_bio(&rbio->bio);
2724 submit_bio_wait(&rbio->bio);
2726 /* Attempting reconstruct read: */
2727 if (bch2_ec_read_extent(c, rbio)) {
2728 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
2732 if (likely(!(flags & BCH_READ_IN_RETRY)))
2733 bio_endio(&rbio->bio);
2736 if (likely(!(flags & BCH_READ_IN_RETRY))) {
2741 rbio->context = RBIO_CONTEXT_UNBOUND;
2742 bch2_read_endio(&rbio->bio);
2745 rbio = bch2_rbio_free(rbio);
2747 if (ret == READ_RETRY_AVOID) {
2748 bch2_mark_io_failure(failed, &pick);
2759 if (flags & BCH_READ_IN_RETRY)
2762 orig->bio.bi_status = BLK_STS_IOERR;
2767 * won't normally happen in the BCH_READ_NODECODE
2768 * (bch2_move_extent()) path, but if we retry and the extent we wanted
2769 * to read no longer exists we have to signal that:
2771 if (flags & BCH_READ_NODECODE)
2774 zero_fill_bio_iter(&orig->bio, iter);
2776 if (flags & BCH_READ_LAST_FRAGMENT)
2777 bch2_rbio_done(orig);
2781 void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
2782 struct bvec_iter bvec_iter, subvol_inum inum,
2783 struct bch_io_failures *failed, unsigned flags)
2785 struct btree_trans trans;
2786 struct btree_iter iter;
2792 BUG_ON(flags & BCH_READ_NODECODE);
2794 bch2_bkey_buf_init(&sk);
2795 bch2_trans_init(&trans, c, 0, 0);
2797 bch2_trans_begin(&trans);
2798 iter = (struct btree_iter) { NULL };
2800 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
2804 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
2805 SPOS(inum.inum, bvec_iter.bi_sector, snapshot),
2808 unsigned bytes, sectors, offset_into_extent;
2809 enum btree_id data_btree = BTREE_ID_extents;
2812 * read_extent -> io_time_reset may cause a transaction restart
2813 * without returning an error, we need to check for that here:
2815 ret = bch2_trans_relock(&trans);
2819 bch2_btree_iter_set_pos(&iter,
2820 POS(inum.inum, bvec_iter.bi_sector));
2822 k = bch2_btree_iter_peek_slot(&iter);
2827 offset_into_extent = iter.pos.offset -
2828 bkey_start_offset(k.k);
2829 sectors = k.k->size - offset_into_extent;
2831 bch2_bkey_buf_reassemble(&sk, c, k);
2833 ret = bch2_read_indirect_extent(&trans, &data_btree,
2834 &offset_into_extent, &sk);
2838 k = bkey_i_to_s_c(sk.k);
2841 * With indirect extents, the amount of data to read is the min
2842 * of the original extent and the indirect extent:
2844 sectors = min(sectors, k.k->size - offset_into_extent);
2846 bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9;
2847 swap(bvec_iter.bi_size, bytes);
2849 if (bvec_iter.bi_size == bytes)
2850 flags |= BCH_READ_LAST_FRAGMENT;
2852 ret = __bch2_read_extent(&trans, rbio, bvec_iter, iter.pos,
2854 offset_into_extent, failed, flags);
2858 if (flags & BCH_READ_LAST_FRAGMENT)
2861 swap(bvec_iter.bi_size, bytes);
2862 bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
2864 ret = btree_trans_too_many_iters(&trans);
2869 bch2_trans_iter_exit(&trans, &iter);
2871 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
2872 ret == READ_RETRY ||
2873 ret == READ_RETRY_AVOID)
2876 bch2_trans_exit(&trans);
2877 bch2_bkey_buf_exit(&sk, c);
2880 bch_err_inum_offset_ratelimited(c, inum.inum,
2881 bvec_iter.bi_sector << 9,
2882 "read error %i from btree lookup", ret);
2883 rbio->bio.bi_status = BLK_STS_IOERR;
2884 bch2_rbio_done(rbio);
2888 void bch2_fs_io_exit(struct bch_fs *c)
2890 if (c->promote_table.tbl)
2891 rhashtable_destroy(&c->promote_table);
2892 mempool_exit(&c->bio_bounce_pages);
2893 bioset_exit(&c->bio_write);
2894 bioset_exit(&c->bio_read_split);
2895 bioset_exit(&c->bio_read);
2898 int bch2_fs_io_init(struct bch_fs *c)
2902 for (i = 0; i < ARRAY_SIZE(c->nocow_locks.l); i++)
2903 two_state_lock_init(&c->nocow_locks.l[i]);
2905 if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
2906 BIOSET_NEED_BVECS) ||
2907 bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
2908 BIOSET_NEED_BVECS) ||
2909 bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio),
2910 BIOSET_NEED_BVECS) ||
2911 mempool_init_page_pool(&c->bio_bounce_pages,
2913 c->opts.btree_node_size,
2914 c->opts.encoded_extent_max) /
2916 rhashtable_init(&c->promote_table, &bch_promote_params))