1 // SPDX-License-Identifier: GPL-2.0
3 * Some low level IO code, and hacks for various block layer limitations
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
10 #include "alloc_background.h"
11 #include "alloc_foreground.h"
14 #include "btree_update.h"
19 #include "data_update.h"
21 #include "disk_groups.h"
24 #include "extent_update.h"
30 #include "nocow_locking.h"
31 #include "rebalance.h"
32 #include "subvolume.h"
36 #include <linux/blkdev.h>
37 #include <linux/prefetch.h>
38 #include <linux/random.h>
39 #include <linux/sched/mm.h>
41 #include <trace/events/bcachefs.h>
43 const char *bch2_blk_status_to_str(blk_status_t status)
45 if (status == BLK_STS_REMOVED)
46 return "device removed";
47 return blk_status_to_str(status);
50 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
52 static bool bch2_target_congested(struct bch_fs *c, u16 target)
54 const struct bch_devs_mask *devs;
55 unsigned d, nr = 0, total = 0;
56 u64 now = local_clock(), last;
64 devs = bch2_target_to_mask(c, target) ?:
65 &c->rw_devs[BCH_DATA_user];
67 for_each_set_bit(d, devs->d, BCH_SB_MEMBERS_MAX) {
68 ca = rcu_dereference(c->devs[d]);
72 congested = atomic_read(&ca->congested);
73 last = READ_ONCE(ca->congested_last);
74 if (time_after64(now, last))
75 congested -= (now - last) >> 12;
77 total += max(congested, 0LL);
82 return bch2_rand_range(nr * CONGESTED_MAX) < total;
85 static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
89 ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
90 /* ideally we'd be taking into account the device's variance here: */
91 u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
92 s64 latency_over = io_latency - latency_threshold;
94 if (latency_threshold && latency_over > 0) {
96 * bump up congested by approximately latency_over * 4 /
97 * latency_threshold - we don't need much accuracy here so don't
98 * bother with the divide:
100 if (atomic_read(&ca->congested) < CONGESTED_MAX)
101 atomic_add(latency_over >>
102 max_t(int, ilog2(latency_threshold) - 2, 0),
105 ca->congested_last = now;
106 } else if (atomic_read(&ca->congested) > 0) {
107 atomic_dec(&ca->congested);
111 void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
113 atomic64_t *latency = &ca->cur_latency[rw];
114 u64 now = local_clock();
115 u64 io_latency = time_after64(now, submit_time)
118 u64 old, new, v = atomic64_read(latency);
124 * If the io latency was reasonably close to the current
125 * latency, skip doing the update and atomic operation - most of
128 if (abs((int) (old - io_latency)) < (old >> 1) &&
132 new = ewma_add(old, io_latency, 5);
133 } while ((v = atomic64_cmpxchg(latency, old, new)) != old);
135 bch2_congested_acct(ca, io_latency, now, rw);
137 __bch2_time_stats_update(&ca->io_latency[rw], submit_time, now);
142 static bool bch2_target_congested(struct bch_fs *c, u16 target)
149 /* Allocate, free from mempool: */
151 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
153 struct bvec_iter_all iter;
156 bio_for_each_segment_all(bv, bio, iter)
157 if (bv.bv_page != ZERO_PAGE(0))
158 mempool_free(bv.bv_page, &c->bio_bounce_pages);
162 static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
166 if (likely(!*using_mempool)) {
167 page = alloc_page(GFP_NOIO);
168 if (unlikely(!page)) {
169 mutex_lock(&c->bio_bounce_pages_lock);
170 *using_mempool = true;
176 page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
182 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
185 bool using_mempool = false;
188 struct page *page = __bio_alloc_page_pool(c, &using_mempool);
189 unsigned len = min_t(size_t, PAGE_SIZE, size);
191 BUG_ON(!bio_add_page(bio, page, len, 0));
196 mutex_unlock(&c->bio_bounce_pages_lock);
199 /* Extent update path: */
201 int bch2_sum_sector_overwrites(struct btree_trans *trans,
202 struct btree_iter *extent_iter,
204 bool *usage_increasing,
205 s64 *i_sectors_delta,
206 s64 *disk_sectors_delta)
208 struct bch_fs *c = trans->c;
209 struct btree_iter iter;
211 unsigned new_replicas = bch2_bkey_replicas(c, bkey_i_to_s_c(new));
212 bool new_compressed = bch2_bkey_sectors_compressed(bkey_i_to_s_c(new));
215 *usage_increasing = false;
216 *i_sectors_delta = 0;
217 *disk_sectors_delta = 0;
219 bch2_trans_copy_iter(&iter, extent_iter);
221 for_each_btree_key_upto_continue_norestart(iter,
222 new->k.p, BTREE_ITER_SLOTS, old, ret) {
223 s64 sectors = min(new->k.p.offset, old.k->p.offset) -
224 max(bkey_start_offset(&new->k),
225 bkey_start_offset(old.k));
227 *i_sectors_delta += sectors *
228 (bkey_extent_is_allocation(&new->k) -
229 bkey_extent_is_allocation(old.k));
231 *disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new));
232 *disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot
233 ? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
236 if (!*usage_increasing &&
237 (new->k.p.snapshot != old.k->p.snapshot ||
238 new_replicas > bch2_bkey_replicas(c, old) ||
239 (!new_compressed && bch2_bkey_sectors_compressed(old))))
240 *usage_increasing = true;
242 if (bkey_ge(old.k->p, new->k.p))
246 bch2_trans_iter_exit(trans, &iter);
250 static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
251 struct btree_iter *extent_iter,
255 struct btree_iter iter;
257 struct bkey_i_inode_v3 *inode;
258 unsigned inode_update_flags = BTREE_UPDATE_NOJOURNAL;
261 bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
263 extent_iter->pos.inode,
264 extent_iter->snapshot),
265 BTREE_ITER_INTENT|BTREE_ITER_CACHED);
266 k = bch2_bkey_get_mut(trans, &iter);
267 ret = PTR_ERR_OR_ZERO(k);
271 if (unlikely(k->k.type != KEY_TYPE_inode_v3)) {
272 k = bch2_inode_to_v3(trans, k);
273 ret = PTR_ERR_OR_ZERO(k);
278 inode = bkey_i_to_inode_v3(k);
280 if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_I_SIZE_DIRTY) &&
281 new_i_size > le64_to_cpu(inode->v.bi_size)) {
282 inode->v.bi_size = cpu_to_le64(new_i_size);
283 inode_update_flags = 0;
286 if (i_sectors_delta) {
287 le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta);
288 inode_update_flags = 0;
291 if (inode->k.p.snapshot != iter.snapshot) {
292 inode->k.p.snapshot = iter.snapshot;
293 inode_update_flags = 0;
296 ret = bch2_trans_update(trans, &iter, &inode->k_i,
297 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
300 bch2_trans_iter_exit(trans, &iter);
304 int bch2_extent_update(struct btree_trans *trans,
306 struct btree_iter *iter,
308 struct disk_reservation *disk_res,
310 s64 *i_sectors_delta_total,
313 struct bpos next_pos;
314 bool usage_increasing;
315 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
319 * This traverses us the iterator without changing iter->path->pos to
320 * search_key() (which is pos + 1 for extents): we want there to be a
321 * path already traversed at iter->pos because
322 * bch2_trans_extent_update() will use it to attempt extent merging
324 ret = __bch2_btree_iter_traverse(iter);
328 ret = bch2_extent_trim_atomic(trans, iter, k);
334 ret = bch2_sum_sector_overwrites(trans, iter, k,
337 &disk_sectors_delta);
342 disk_sectors_delta > (s64) disk_res->sectors) {
343 ret = bch2_disk_reservation_add(trans->c, disk_res,
344 disk_sectors_delta - disk_res->sectors,
345 !check_enospc || !usage_increasing
346 ? BCH_DISK_RESERVATION_NOFAIL : 0);
353 * We always have to do an inode update - even when i_size/i_sectors
354 * aren't changing - for fsync to work properly; fsync relies on
355 * inode->bi_journal_seq which is updated by the trigger code:
357 ret = bch2_extent_update_i_size_sectors(trans, iter,
358 min(k->k.p.offset << 9, new_i_size),
360 bch2_trans_update(trans, iter, k, 0) ?:
361 bch2_trans_commit(trans, disk_res, NULL,
362 BTREE_INSERT_NOCHECK_RW|
363 BTREE_INSERT_NOFAIL);
367 if (i_sectors_delta_total)
368 *i_sectors_delta_total += i_sectors_delta;
369 bch2_btree_iter_set_pos(iter, next_pos);
373 /* Overwrites whatever was present with zeroes: */
374 int bch2_extent_fallocate(struct btree_trans *trans,
376 struct btree_iter *iter,
378 struct bch_io_opts opts,
379 s64 *i_sectors_delta,
380 struct write_point_specifier write_point)
382 struct bch_fs *c = trans->c;
383 struct disk_reservation disk_res = { 0 };
385 struct open_buckets open_buckets;
387 struct bkey_buf old, new;
388 unsigned sectors_allocated;
389 bool have_reservation = false;
390 bool unwritten = opts.nocow &&
391 c->sb.version >= bcachefs_metadata_version_unwritten_extents;
394 bch2_bkey_buf_init(&old);
395 bch2_bkey_buf_init(&new);
396 closure_init_stack(&cl);
399 sectors_allocated = 0;
401 k = bch2_btree_iter_peek_slot(iter);
406 sectors = min_t(u64, sectors, k.k->p.offset - iter->pos.offset);
408 if (!have_reservation) {
409 unsigned new_replicas =
410 max(0, (int) opts.data_replicas -
411 (int) bch2_bkey_nr_ptrs_fully_allocated(k));
413 * Get a disk reservation before (in the nocow case) calling
414 * into the allocator:
416 ret = bch2_disk_reservation_get(c, &disk_res, sectors, new_replicas, 0);
420 bch2_bkey_buf_reassemble(&old, c, k);
423 if (have_reservation) {
424 if (!bch2_extents_match(k, bkey_i_to_s_c(old.k)))
427 bch2_key_resize(&new.k->k, sectors);
428 } else if (!unwritten) {
429 struct bkey_i_reservation *reservation;
431 bch2_bkey_buf_realloc(&new, c, sizeof(*reservation) / sizeof(u64));
432 reservation = bkey_reservation_init(new.k);
433 reservation->k.p = iter->pos;
434 bch2_key_resize(&reservation->k, sectors);
435 reservation->v.nr_replicas = opts.data_replicas;
437 struct bkey_i_extent *e;
438 struct bch_devs_list devs_have;
439 struct write_point *wp;
440 struct bch_extent_ptr *ptr;
444 bch2_bkey_buf_realloc(&new, c, BKEY_EXTENT_U64s_MAX);
446 e = bkey_extent_init(new.k);
449 ret = bch2_alloc_sectors_start_trans(trans,
450 opts.foreground_target,
456 RESERVE_none, 0, &cl, &wp);
458 bch2_trans_unlock(trans);
460 if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
465 sectors = min(sectors, wp->sectors_free);
466 sectors_allocated = sectors;
468 bch2_key_resize(&e->k, sectors);
470 bch2_open_bucket_get(c, wp, &open_buckets);
471 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
472 bch2_alloc_sectors_done(c, wp);
474 extent_for_each_ptr(extent_i_to_s(e), ptr)
475 ptr->unwritten = true;
478 have_reservation = true;
480 ret = bch2_extent_update(trans, inum, iter, new.k, &disk_res,
481 0, i_sectors_delta, true);
483 if ((atomic_read(&cl.remaining) & CLOSURE_REMAINING_MASK) != 1) {
484 bch2_trans_unlock(trans);
488 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
489 bch2_trans_begin(trans);
493 if (!ret && sectors_allocated)
494 bch2_increment_clock(c, sectors_allocated, WRITE);
496 bch2_open_buckets_put(c, &open_buckets);
497 bch2_disk_reservation_put(c, &disk_res);
498 bch2_bkey_buf_exit(&new, c);
499 bch2_bkey_buf_exit(&old, c);
505 * Returns -BCH_ERR_transacton_restart if we had to drop locks:
507 int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
508 subvol_inum inum, u64 end,
509 s64 *i_sectors_delta)
511 struct bch_fs *c = trans->c;
512 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
513 struct bpos end_pos = POS(inum.inum, end);
515 int ret = 0, ret2 = 0;
519 bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
520 struct disk_reservation disk_res =
521 bch2_disk_reservation_init(c, 0);
522 struct bkey_i delete;
527 bch2_trans_begin(trans);
529 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
533 bch2_btree_iter_set_snapshot(iter, snapshot);
536 * peek_upto() doesn't have ideal semantics for extents:
538 k = bch2_btree_iter_peek_upto(iter, end_pos);
546 bkey_init(&delete.k);
547 delete.k.p = iter->pos;
549 /* create the biggest key we can */
550 bch2_key_resize(&delete.k, max_sectors);
551 bch2_cut_back(end_pos, &delete);
553 ret = bch2_extent_update(trans, inum, iter, &delete,
554 &disk_res, 0, i_sectors_delta, false);
555 bch2_disk_reservation_put(c, &disk_res);
561 int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end,
562 s64 *i_sectors_delta)
564 struct btree_trans trans;
565 struct btree_iter iter;
568 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
569 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
570 POS(inum.inum, start),
573 ret = bch2_fpunch_at(&trans, &iter, inum, end, i_sectors_delta);
575 bch2_trans_iter_exit(&trans, &iter);
576 bch2_trans_exit(&trans);
578 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
584 static int bch2_write_index_default(struct bch_write_op *op)
586 struct bch_fs *c = op->c;
588 struct keylist *keys = &op->insert_keys;
589 struct bkey_i *k = bch2_keylist_front(keys);
590 struct btree_trans trans;
591 struct btree_iter iter;
593 .subvol = op->subvol,
594 .inum = k->k.p.inode,
598 BUG_ON(!inum.subvol);
600 bch2_bkey_buf_init(&sk);
601 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
604 bch2_trans_begin(&trans);
606 k = bch2_keylist_front(keys);
607 bch2_bkey_buf_copy(&sk, c, k);
609 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol,
610 &sk.k->k.p.snapshot);
611 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
616 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
617 bkey_start_pos(&sk.k->k),
618 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
620 ret = bch2_extent_update(&trans, inum, &iter, sk.k,
622 op->new_i_size, &op->i_sectors_delta,
623 op->flags & BCH_WRITE_CHECK_ENOSPC);
624 bch2_trans_iter_exit(&trans, &iter);
626 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
631 if (bkey_ge(iter.pos, k->k.p))
632 bch2_keylist_pop_front(&op->insert_keys);
634 bch2_cut_front(iter.pos, k);
635 } while (!bch2_keylist_empty(keys));
637 bch2_trans_exit(&trans);
638 bch2_bkey_buf_exit(&sk, c);
645 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
646 enum bch_data_type type,
647 const struct bkey_i *k,
650 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
651 const struct bch_extent_ptr *ptr;
652 struct bch_write_bio *n;
655 BUG_ON(c->opts.nochanges);
657 bkey_for_each_ptr(ptrs, ptr) {
658 BUG_ON(ptr->dev >= BCH_SB_MEMBERS_MAX ||
661 ca = bch_dev_bkey_exists(c, ptr->dev);
663 if (to_entry(ptr + 1) < ptrs.end) {
664 n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
665 GFP_NOIO, &ca->replica_set));
667 n->bio.bi_end_io = wbio->bio.bi_end_io;
668 n->bio.bi_private = wbio->bio.bi_private;
673 n->bio.bi_opf = wbio->bio.bi_opf;
674 bio_inc_remaining(&wbio->bio);
682 n->have_ioref = nocow || bch2_dev_get_ioref(ca,
683 type == BCH_DATA_btree ? READ : WRITE);
685 n->submit_time = local_clock();
686 n->inode_offset = bkey_start_offset(&k->k);
687 n->bio.bi_iter.bi_sector = ptr->offset;
689 if (likely(n->have_ioref)) {
690 this_cpu_add(ca->io_done->sectors[WRITE][type],
691 bio_sectors(&n->bio));
693 bio_set_dev(&n->bio, ca->disk_sb.bdev);
695 if (type != BCH_DATA_btree && unlikely(c->opts.no_data_io)) {
702 n->bio.bi_status = BLK_STS_REMOVED;
708 static void __bch2_write(struct bch_write_op *);
710 static void bch2_write_done(struct closure *cl)
712 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
713 struct bch_fs *c = op->c;
715 bch2_disk_reservation_put(c, &op->res);
716 if (!(op->flags & BCH_WRITE_MOVE))
717 bch2_write_ref_put(c, BCH_WRITE_REF_write);
718 bch2_keylist_free(&op->insert_keys, op->inline_keys);
720 bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
723 closure_debug_destroy(cl);
728 static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
730 struct keylist *keys = &op->insert_keys;
731 struct bch_extent_ptr *ptr;
732 struct bkey_i *src, *dst = keys->keys, *n;
734 for (src = keys->keys; src != keys->top; src = n) {
737 if (bkey_extent_is_direct_data(&src->k)) {
738 bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
739 test_bit(ptr->dev, op->failed.d));
741 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src)))
746 memmove_u64s_down(dst, src, src->k.u64s);
747 dst = bkey_next(dst);
755 * bch_write_index - after a write, update index to point to new data
757 static void __bch2_write_index(struct bch_write_op *op)
759 struct bch_fs *c = op->c;
760 struct keylist *keys = &op->insert_keys;
765 if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
766 ret = bch2_write_drop_io_error_ptrs(op);
772 * probably not the ideal place to hook this in, but I don't
773 * particularly want to plumb io_opts all the way through the btree
774 * update stack right now
776 for_each_keylist_key(keys, k)
777 bch2_rebalance_add_key(c, bkey_i_to_s_c(k), &op->opts);
779 if (!bch2_keylist_empty(keys)) {
780 u64 sectors_start = keylist_sectors(keys);
782 ret = !(op->flags & BCH_WRITE_MOVE)
783 ? bch2_write_index_default(op)
784 : bch2_data_update_index_update(op);
786 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
787 BUG_ON(keylist_sectors(keys) && !ret);
789 op->written += sectors_start - keylist_sectors(keys);
791 if (ret && !bch2_err_matches(ret, EROFS)) {
792 struct bkey_i *k = bch2_keylist_front(&op->insert_keys);
794 bch_err_inum_offset_ratelimited(c,
795 k->k.p.inode, k->k.p.offset << 9,
796 "write error while doing btree update: %s",
804 /* If some a bucket wasn't written, we can't erasure code it: */
805 for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
806 bch2_open_bucket_write_error(c, &op->open_buckets, dev);
808 bch2_open_buckets_put(c, &op->open_buckets);
811 keys->top = keys->keys;
813 op->flags |= BCH_WRITE_DONE;
817 static inline void __wp_update_state(struct write_point *wp, enum write_point_state state)
819 if (state != wp->state) {
820 u64 now = ktime_get_ns();
822 if (wp->last_state_change &&
823 time_after64(now, wp->last_state_change))
824 wp->time[wp->state] += now - wp->last_state_change;
826 wp->last_state_change = now;
830 static inline void wp_update_state(struct write_point *wp, bool running)
832 enum write_point_state state;
834 state = running ? WRITE_POINT_running :
835 !list_empty(&wp->writes) ? WRITE_POINT_waiting_io
836 : WRITE_POINT_stopped;
838 __wp_update_state(wp, state);
841 static void bch2_write_index(struct closure *cl)
843 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
844 struct write_point *wp = op->wp;
845 struct workqueue_struct *wq = index_update_wq(op);
848 if ((op->flags & BCH_WRITE_DONE) &&
849 (op->flags & BCH_WRITE_MOVE))
850 bch2_bio_free_pages_pool(op->c, &op->wbio.bio);
852 spin_lock_irqsave(&wp->writes_lock, flags);
853 if (wp->state == WRITE_POINT_waiting_io)
854 __wp_update_state(wp, WRITE_POINT_waiting_work);
855 list_add_tail(&op->wp_list, &wp->writes);
856 spin_unlock_irqrestore (&wp->writes_lock, flags);
858 queue_work(wq, &wp->index_update_work);
861 static inline void bch2_write_queue(struct bch_write_op *op, struct write_point *wp)
865 if (wp->state == WRITE_POINT_stopped) {
866 spin_lock_irq(&wp->writes_lock);
867 __wp_update_state(wp, WRITE_POINT_waiting_io);
868 spin_unlock_irq(&wp->writes_lock);
872 void bch2_write_point_do_index_updates(struct work_struct *work)
874 struct write_point *wp =
875 container_of(work, struct write_point, index_update_work);
876 struct bch_write_op *op;
879 spin_lock_irq(&wp->writes_lock);
880 op = list_first_entry_or_null(&wp->writes, struct bch_write_op, wp_list);
882 list_del(&op->wp_list);
883 wp_update_state(wp, op != NULL);
884 spin_unlock_irq(&wp->writes_lock);
889 op->flags |= BCH_WRITE_IN_WORKER;
891 __bch2_write_index(op);
893 if (!(op->flags & BCH_WRITE_DONE))
896 bch2_write_done(&op->cl);
900 static void bch2_write_endio(struct bio *bio)
902 struct closure *cl = bio->bi_private;
903 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
904 struct bch_write_bio *wbio = to_wbio(bio);
905 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
906 struct bch_fs *c = wbio->c;
907 struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
909 if (bch2_dev_inum_io_err_on(bio->bi_status, ca,
911 wbio->inode_offset << 9,
912 "data write error: %s",
913 bch2_blk_status_to_str(bio->bi_status))) {
914 set_bit(wbio->dev, op->failed.d);
915 op->flags |= BCH_WRITE_IO_ERROR;
919 set_bit(wbio->dev, op->devs_need_flush->d);
921 if (wbio->have_ioref) {
922 bch2_latency_acct(ca, wbio->submit_time, WRITE);
923 percpu_ref_put(&ca->io_ref);
927 bch2_bio_free_pages_pool(c, bio);
933 bio_endio(&parent->bio);
938 static void init_append_extent(struct bch_write_op *op,
939 struct write_point *wp,
940 struct bversion version,
941 struct bch_extent_crc_unpacked crc)
943 struct bkey_i_extent *e;
945 op->pos.offset += crc.uncompressed_size;
947 e = bkey_extent_init(op->insert_keys.top);
949 e->k.size = crc.uncompressed_size;
950 e->k.version = version;
953 crc.compression_type ||
955 bch2_extent_crc_append(&e->k_i, crc);
957 bch2_alloc_sectors_append_ptrs_inlined(op->c, wp, &e->k_i, crc.compressed_size,
958 op->flags & BCH_WRITE_CACHED);
960 bch2_keylist_push(&op->insert_keys);
963 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
964 struct write_point *wp,
966 bool *page_alloc_failed,
969 struct bch_write_bio *wbio;
971 unsigned output_available =
972 min(wp->sectors_free << 9, src->bi_iter.bi_size);
973 unsigned pages = DIV_ROUND_UP(output_available +
975 ? ((unsigned long) buf & (PAGE_SIZE - 1))
978 pages = min(pages, BIO_MAX_VECS);
980 bio = bio_alloc_bioset(NULL, pages, 0,
981 GFP_NOIO, &c->bio_write);
982 wbio = wbio_init(bio);
983 wbio->put_bio = true;
984 /* copy WRITE_SYNC flag */
985 wbio->bio.bi_opf = src->bi_opf;
988 bch2_bio_map(bio, buf, output_available);
995 * We can't use mempool for more than c->sb.encoded_extent_max
996 * worth of pages, but we'd like to allocate more if we can:
998 bch2_bio_alloc_pages_pool(c, bio,
999 min_t(unsigned, output_available,
1000 c->opts.encoded_extent_max));
1002 if (bio->bi_iter.bi_size < output_available)
1003 *page_alloc_failed =
1004 bch2_bio_alloc_pages(bio,
1006 bio->bi_iter.bi_size,
1012 static int bch2_write_rechecksum(struct bch_fs *c,
1013 struct bch_write_op *op,
1014 unsigned new_csum_type)
1016 struct bio *bio = &op->wbio.bio;
1017 struct bch_extent_crc_unpacked new_crc;
1020 /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
1022 if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
1023 bch2_csum_type_is_encryption(new_csum_type))
1024 new_csum_type = op->crc.csum_type;
1026 ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
1028 op->crc.offset, op->crc.live_size,
1033 bio_advance(bio, op->crc.offset << 9);
1034 bio->bi_iter.bi_size = op->crc.live_size << 9;
1039 static int bch2_write_decrypt(struct bch_write_op *op)
1041 struct bch_fs *c = op->c;
1042 struct nonce nonce = extent_nonce(op->version, op->crc);
1043 struct bch_csum csum;
1046 if (!bch2_csum_type_is_encryption(op->crc.csum_type))
1050 * If we need to decrypt data in the write path, we'll no longer be able
1051 * to verify the existing checksum (poly1305 mac, in this case) after
1052 * it's decrypted - this is the last point we'll be able to reverify the
1055 csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
1056 if (bch2_crc_cmp(op->crc.csum, csum))
1059 ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
1060 op->crc.csum_type = 0;
1061 op->crc.csum = (struct bch_csum) { 0, 0 };
1065 static enum prep_encoded_ret {
1068 PREP_ENCODED_CHECKSUM_ERR,
1069 PREP_ENCODED_DO_WRITE,
1070 } bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
1072 struct bch_fs *c = op->c;
1073 struct bio *bio = &op->wbio.bio;
1075 if (!(op->flags & BCH_WRITE_DATA_ENCODED))
1076 return PREP_ENCODED_OK;
1078 BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
1080 /* Can we just write the entire extent as is? */
1081 if (op->crc.uncompressed_size == op->crc.live_size &&
1082 op->crc.compressed_size <= wp->sectors_free &&
1083 (op->crc.compression_type == op->compression_type ||
1084 op->incompressible)) {
1085 if (!crc_is_compressed(op->crc) &&
1086 op->csum_type != op->crc.csum_type &&
1087 bch2_write_rechecksum(c, op, op->csum_type))
1088 return PREP_ENCODED_CHECKSUM_ERR;
1090 return PREP_ENCODED_DO_WRITE;
1094 * If the data is compressed and we couldn't write the entire extent as
1095 * is, we have to decompress it:
1097 if (crc_is_compressed(op->crc)) {
1098 struct bch_csum csum;
1100 if (bch2_write_decrypt(op))
1101 return PREP_ENCODED_CHECKSUM_ERR;
1103 /* Last point we can still verify checksum: */
1104 csum = bch2_checksum_bio(c, op->crc.csum_type,
1105 extent_nonce(op->version, op->crc),
1107 if (bch2_crc_cmp(op->crc.csum, csum))
1108 return PREP_ENCODED_CHECKSUM_ERR;
1110 if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
1111 return PREP_ENCODED_ERR;
1115 * No longer have compressed data after this point - data might be
1120 * If the data is checksummed and we're only writing a subset,
1121 * rechecksum and adjust bio to point to currently live data:
1123 if ((op->crc.live_size != op->crc.uncompressed_size ||
1124 op->crc.csum_type != op->csum_type) &&
1125 bch2_write_rechecksum(c, op, op->csum_type))
1126 return PREP_ENCODED_CHECKSUM_ERR;
1129 * If we want to compress the data, it has to be decrypted:
1131 if ((op->compression_type ||
1132 bch2_csum_type_is_encryption(op->crc.csum_type) !=
1133 bch2_csum_type_is_encryption(op->csum_type)) &&
1134 bch2_write_decrypt(op))
1135 return PREP_ENCODED_CHECKSUM_ERR;
1137 return PREP_ENCODED_OK;
1140 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
1143 struct bch_fs *c = op->c;
1144 struct bio *src = &op->wbio.bio, *dst = src;
1145 struct bvec_iter saved_iter;
1147 unsigned total_output = 0, total_input = 0;
1148 bool bounce = false;
1149 bool page_alloc_failed = false;
1152 BUG_ON(!bio_sectors(src));
1154 ec_buf = bch2_writepoint_ec_buf(c, wp);
1156 switch (bch2_write_prep_encoded_data(op, wp)) {
1157 case PREP_ENCODED_OK:
1159 case PREP_ENCODED_ERR:
1162 case PREP_ENCODED_CHECKSUM_ERR:
1164 case PREP_ENCODED_DO_WRITE:
1165 /* XXX look for bug here */
1167 dst = bch2_write_bio_alloc(c, wp, src,
1170 bio_copy_data(dst, src);
1173 init_append_extent(op, wp, op->version, op->crc);
1178 op->compression_type ||
1180 !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
1181 (bch2_csum_type_is_encryption(op->csum_type) &&
1182 !(op->flags & BCH_WRITE_PAGES_OWNED))) {
1183 dst = bch2_write_bio_alloc(c, wp, src,
1189 saved_iter = dst->bi_iter;
1192 struct bch_extent_crc_unpacked crc = { 0 };
1193 struct bversion version = op->version;
1194 size_t dst_len, src_len;
1196 if (page_alloc_failed &&
1197 dst->bi_iter.bi_size < (wp->sectors_free << 9) &&
1198 dst->bi_iter.bi_size < c->opts.encoded_extent_max)
1201 BUG_ON(op->compression_type &&
1202 (op->flags & BCH_WRITE_DATA_ENCODED) &&
1203 bch2_csum_type_is_encryption(op->crc.csum_type));
1204 BUG_ON(op->compression_type && !bounce);
1206 crc.compression_type = op->incompressible
1207 ? BCH_COMPRESSION_TYPE_incompressible
1208 : op->compression_type
1209 ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
1210 op->compression_type)
1212 if (!crc_is_compressed(crc)) {
1213 dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
1214 dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
1217 dst_len = min_t(unsigned, dst_len,
1218 c->opts.encoded_extent_max);
1221 swap(dst->bi_iter.bi_size, dst_len);
1222 bio_copy_data(dst, src);
1223 swap(dst->bi_iter.bi_size, dst_len);
1229 BUG_ON(!src_len || !dst_len);
1231 if (bch2_csum_type_is_encryption(op->csum_type)) {
1232 if (bversion_zero(version)) {
1233 version.lo = atomic64_inc_return(&c->key_version);
1235 crc.nonce = op->nonce;
1236 op->nonce += src_len >> 9;
1240 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
1241 !crc_is_compressed(crc) &&
1242 bch2_csum_type_is_encryption(op->crc.csum_type) ==
1243 bch2_csum_type_is_encryption(op->csum_type)) {
1244 u8 compression_type = crc.compression_type;
1245 u16 nonce = crc.nonce;
1247 * Note: when we're using rechecksum(), we need to be
1248 * checksumming @src because it has all the data our
1249 * existing checksum covers - if we bounced (because we
1250 * were trying to compress), @dst will only have the
1251 * part of the data the new checksum will cover.
1253 * But normally we want to be checksumming post bounce,
1254 * because part of the reason for bouncing is so the
1255 * data can't be modified (by userspace) while it's in
1258 if (bch2_rechecksum_bio(c, src, version, op->crc,
1261 bio_sectors(src) - (src_len >> 9),
1265 * rchecksum_bio sets compression_type on crc from op->crc,
1266 * this isn't always correct as sometimes we're changing
1267 * an extent from uncompressed to incompressible.
1269 crc.compression_type = compression_type;
1272 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
1273 bch2_rechecksum_bio(c, src, version, op->crc,
1276 bio_sectors(src) - (src_len >> 9),
1280 crc.compressed_size = dst_len >> 9;
1281 crc.uncompressed_size = src_len >> 9;
1282 crc.live_size = src_len >> 9;
1284 swap(dst->bi_iter.bi_size, dst_len);
1285 ret = bch2_encrypt_bio(c, op->csum_type,
1286 extent_nonce(version, crc), dst);
1290 crc.csum = bch2_checksum_bio(c, op->csum_type,
1291 extent_nonce(version, crc), dst);
1292 crc.csum_type = op->csum_type;
1293 swap(dst->bi_iter.bi_size, dst_len);
1296 init_append_extent(op, wp, version, crc);
1299 bio_advance(dst, dst_len);
1300 bio_advance(src, src_len);
1301 total_output += dst_len;
1302 total_input += src_len;
1303 } while (dst->bi_iter.bi_size &&
1304 src->bi_iter.bi_size &&
1306 !bch2_keylist_realloc(&op->insert_keys,
1308 ARRAY_SIZE(op->inline_keys),
1309 BKEY_EXTENT_U64s_MAX));
1311 more = src->bi_iter.bi_size != 0;
1313 dst->bi_iter = saved_iter;
1315 if (dst == src && more) {
1316 BUG_ON(total_output != total_input);
1318 dst = bio_split(src, total_input >> 9,
1319 GFP_NOIO, &c->bio_write);
1320 wbio_init(dst)->put_bio = true;
1321 /* copy WRITE_SYNC flag */
1322 dst->bi_opf = src->bi_opf;
1325 dst->bi_iter.bi_size = total_output;
1330 bch_err(c, "error verifying existing checksum while rewriting existing data (memory corruption?)");
1333 if (to_wbio(dst)->bounce)
1334 bch2_bio_free_pages_pool(c, dst);
1335 if (to_wbio(dst)->put_bio)
1341 static bool bch2_extent_is_writeable(struct bch_write_op *op,
1344 struct bch_fs *c = op->c;
1345 struct bkey_s_c_extent e;
1346 struct extent_ptr_decoded p;
1347 const union bch_extent_entry *entry;
1348 unsigned replicas = 0;
1350 if (k.k->type != KEY_TYPE_extent)
1353 e = bkey_s_c_to_extent(k);
1354 extent_for_each_ptr_decode(e, p, entry) {
1355 if (p.crc.csum_type ||
1356 crc_is_compressed(p.crc) ||
1360 replicas += bch2_extent_ptr_durability(c, &p);
1363 return replicas >= op->opts.data_replicas;
1366 static inline void bch2_nocow_write_unlock(struct bch_write_op *op)
1368 struct bch_fs *c = op->c;
1369 const struct bch_extent_ptr *ptr;
1372 for_each_keylist_key(&op->insert_keys, k) {
1373 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
1375 bkey_for_each_ptr(ptrs, ptr)
1376 bch2_bucket_nocow_unlock(&c->nocow_locks,
1377 PTR_BUCKET_POS(c, ptr),
1378 BUCKET_NOCOW_LOCK_UPDATE);
1382 static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
1383 struct btree_iter *iter,
1384 struct bkey_i *orig,
1389 struct bkey_ptrs ptrs;
1390 struct bch_extent_ptr *ptr;
1393 if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) {
1398 new = bch2_bkey_make_mut(trans, k);
1399 ret = PTR_ERR_OR_ZERO(new);
1403 bch2_cut_front(bkey_start_pos(&orig->k), new);
1404 bch2_cut_back(orig->k.p, new);
1406 ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
1407 bkey_for_each_ptr(ptrs, ptr)
1411 * Note that we're not calling bch2_subvol_get_snapshot() in this path -
1412 * that was done when we kicked off the write, and here it's important
1413 * that we update the extent that we wrote to - even if a snapshot has
1414 * since been created. The write is still outstanding, so we're ok
1415 * w.r.t. snapshot atomicity:
1417 return bch2_extent_update_i_size_sectors(trans, iter,
1418 min(new->k.p.offset << 9, new_i_size), 0) ?:
1419 bch2_trans_update(trans, iter, new,
1420 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1423 static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
1425 struct bch_fs *c = op->c;
1426 struct btree_trans trans;
1427 struct btree_iter iter;
1428 struct bkey_i *orig;
1432 bch2_trans_init(&trans, c, 0, 0);
1434 for_each_keylist_key(&op->insert_keys, orig) {
1435 ret = for_each_btree_key_upto_commit(&trans, iter, BTREE_ID_extents,
1436 bkey_start_pos(&orig->k), orig->k.p,
1437 BTREE_ITER_INTENT, k,
1438 NULL, NULL, BTREE_INSERT_NOFAIL, ({
1439 bch2_nocow_write_convert_one_unwritten(&trans, &iter, orig, k, op->new_i_size);
1442 if (ret && !bch2_err_matches(ret, EROFS)) {
1443 struct bkey_i *k = bch2_keylist_front(&op->insert_keys);
1445 bch_err_inum_offset_ratelimited(c,
1446 k->k.p.inode, k->k.p.offset << 9,
1447 "write error while doing btree update: %s",
1457 bch2_trans_exit(&trans);
1460 static void __bch2_nocow_write_done(struct bch_write_op *op)
1462 bch2_nocow_write_unlock(op);
1464 if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
1466 } else if (unlikely(op->flags & BCH_WRITE_CONVERT_UNWRITTEN))
1467 bch2_nocow_write_convert_unwritten(op);
1470 static void bch2_nocow_write_done(struct closure *cl)
1472 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
1474 __bch2_nocow_write_done(op);
1475 bch2_write_done(cl);
1478 static void bch2_nocow_write(struct bch_write_op *op)
1480 struct bch_fs *c = op->c;
1481 struct btree_trans trans;
1482 struct btree_iter iter;
1484 struct bkey_ptrs_c ptrs;
1485 const struct bch_extent_ptr *ptr;
1489 struct nocow_lock_bucket *l;
1490 } buckets[BCH_REPLICAS_MAX];
1491 unsigned nr_buckets = 0;
1495 if (op->flags & BCH_WRITE_MOVE)
1498 bch2_trans_init(&trans, c, 0, 0);
1500 bch2_trans_begin(&trans);
1502 ret = bch2_subvolume_get_snapshot(&trans, op->subvol, &snapshot);
1506 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
1507 SPOS(op->pos.inode, op->pos.offset, snapshot),
1510 struct bio *bio = &op->wbio.bio;
1514 k = bch2_btree_iter_peek_slot(&iter);
1519 /* fall back to normal cow write path? */
1520 if (unlikely(k.k->p.snapshot != snapshot ||
1521 !bch2_extent_is_writeable(op, k)))
1524 if (bch2_keylist_realloc(&op->insert_keys,
1526 ARRAY_SIZE(op->inline_keys),
1530 /* Get iorefs before dropping btree locks: */
1531 ptrs = bch2_bkey_ptrs_c(k);
1532 bkey_for_each_ptr(ptrs, ptr) {
1533 buckets[nr_buckets].b = PTR_BUCKET_POS(c, ptr);
1534 buckets[nr_buckets].gen = ptr->gen;
1535 buckets[nr_buckets].l =
1536 bucket_nocow_lock(&c->nocow_locks,
1537 bucket_to_u64(buckets[nr_buckets].b));
1539 prefetch(buckets[nr_buckets].l);
1541 if (unlikely(!bch2_dev_get_ioref(bch_dev_bkey_exists(c, ptr->dev), WRITE)))
1547 op->flags |= BCH_WRITE_CONVERT_UNWRITTEN;
1550 /* Unlock before taking nocow locks, doing IO: */
1551 bkey_reassemble(op->insert_keys.top, k);
1552 bch2_trans_unlock(&trans);
1554 bch2_cut_front(op->pos, op->insert_keys.top);
1555 if (op->flags & BCH_WRITE_CONVERT_UNWRITTEN)
1556 bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
1558 for (i = 0; i < nr_buckets; i++) {
1559 struct bch_dev *ca = bch_dev_bkey_exists(c, buckets[i].b.inode);
1560 struct nocow_lock_bucket *l = buckets[i].l;
1563 __bch2_bucket_nocow_lock(&c->nocow_locks, l,
1564 bucket_to_u64(buckets[i].b),
1565 BUCKET_NOCOW_LOCK_UPDATE);
1568 stale = gen_after(*bucket_gen(ca, buckets[i].b.offset), buckets[i].gen);
1571 if (unlikely(stale))
1572 goto err_bucket_stale;
1575 bio = &op->wbio.bio;
1576 if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) {
1577 bio = bio_split(bio, k.k->p.offset - op->pos.offset,
1578 GFP_KERNEL, &c->bio_write);
1579 wbio_init(bio)->put_bio = true;
1580 bio->bi_opf = op->wbio.bio.bi_opf;
1582 op->flags |= BCH_WRITE_DONE;
1585 op->pos.offset += bio_sectors(bio);
1586 op->written += bio_sectors(bio);
1588 bio->bi_end_io = bch2_write_endio;
1589 bio->bi_private = &op->cl;
1590 bio->bi_opf |= REQ_OP_WRITE;
1591 closure_get(&op->cl);
1592 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1593 op->insert_keys.top, true);
1595 bch2_keylist_push(&op->insert_keys);
1596 if (op->flags & BCH_WRITE_DONE)
1598 bch2_btree_iter_advance(&iter);
1601 bch2_trans_iter_exit(&trans, &iter);
1603 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1607 bch_err_inum_offset_ratelimited(c,
1609 op->pos.offset << 9,
1610 "%s: btree lookup error %s",
1611 __func__, bch2_err_str(ret));
1613 op->flags |= BCH_WRITE_DONE;
1616 bch2_trans_exit(&trans);
1618 /* fallback to cow write path? */
1619 if (!(op->flags & BCH_WRITE_DONE)) {
1620 closure_sync(&op->cl);
1621 __bch2_nocow_write_done(op);
1622 op->insert_keys.top = op->insert_keys.keys;
1623 } else if (op->flags & BCH_WRITE_SYNC) {
1624 closure_sync(&op->cl);
1625 bch2_nocow_write_done(&op->cl);
1629 * needs to run out of process context because ei_quota_lock is
1632 continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op));
1636 for (i = 0; i < nr_buckets; i++)
1637 percpu_ref_put(&bch_dev_bkey_exists(c, buckets[i].b.inode)->io_ref);
1639 /* Fall back to COW path: */
1643 bch2_bucket_nocow_unlock(&c->nocow_locks,
1645 BUCKET_NOCOW_LOCK_UPDATE);
1646 for (i = 0; i < nr_buckets; i++)
1647 percpu_ref_put(&bch_dev_bkey_exists(c, buckets[i].b.inode)->io_ref);
1649 /* We can retry this: */
1650 ret = BCH_ERR_transaction_restart;
1654 static void __bch2_write(struct bch_write_op *op)
1656 struct bch_fs *c = op->c;
1657 struct write_point *wp = NULL;
1658 struct bio *bio = NULL;
1659 unsigned nofs_flags;
1662 nofs_flags = memalloc_nofs_save();
1664 if (unlikely(op->opts.nocow && c->opts.nocow_enabled)) {
1665 bch2_nocow_write(op);
1666 if (op->flags & BCH_WRITE_DONE)
1667 goto out_nofs_restore;
1670 memset(&op->failed, 0, sizeof(op->failed));
1673 struct bkey_i *key_to_write;
1674 unsigned key_to_write_offset = op->insert_keys.top_p -
1675 op->insert_keys.keys_p;
1677 /* +1 for possible cache device: */
1678 if (op->open_buckets.nr + op->nr_replicas + 1 >
1679 ARRAY_SIZE(op->open_buckets.v))
1682 if (bch2_keylist_realloc(&op->insert_keys,
1684 ARRAY_SIZE(op->inline_keys),
1685 BKEY_EXTENT_U64s_MAX))
1689 * The copygc thread is now global, which means it's no longer
1690 * freeing up space on specific disks, which means that
1691 * allocations for specific disks may hang arbitrarily long:
1693 ret = bch2_trans_do(c, NULL, NULL, 0,
1694 bch2_alloc_sectors_start_trans(&trans,
1696 op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
1700 op->nr_replicas_required,
1703 (op->flags & (BCH_WRITE_ALLOC_NOWAIT|
1704 BCH_WRITE_ONLY_SPECIFIED_DEVS))
1705 ? NULL : &op->cl, &wp));
1706 if (unlikely(ret)) {
1707 if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
1715 bch2_open_bucket_get(c, wp, &op->open_buckets);
1716 ret = bch2_write_extent(op, wp, &bio);
1718 bch2_alloc_sectors_done_inlined(c, wp);
1721 op->flags |= BCH_WRITE_DONE;
1729 bio->bi_end_io = bch2_write_endio;
1730 bio->bi_private = &op->cl;
1731 bio->bi_opf |= REQ_OP_WRITE;
1733 closure_get(bio->bi_private);
1735 key_to_write = (void *) (op->insert_keys.keys_p +
1736 key_to_write_offset);
1738 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1739 key_to_write, false);
1745 * If we're running asynchronously, wne may still want to block
1746 * synchronously here if we weren't able to submit all of the IO at
1747 * once, as that signals backpressure to the caller.
1749 if ((op->flags & BCH_WRITE_SYNC) ||
1750 (!(op->flags & BCH_WRITE_DONE) &&
1751 !(op->flags & BCH_WRITE_IN_WORKER))) {
1752 closure_sync(&op->cl);
1753 __bch2_write_index(op);
1755 if (!(op->flags & BCH_WRITE_DONE))
1757 bch2_write_done(&op->cl);
1759 bch2_write_queue(op, wp);
1760 continue_at(&op->cl, bch2_write_index, NULL);
1763 memalloc_nofs_restore(nofs_flags);
1766 static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
1768 struct bio *bio = &op->wbio.bio;
1769 struct bvec_iter iter;
1770 struct bkey_i_inline_data *id;
1774 op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
1775 op->flags |= BCH_WRITE_DONE;
1777 bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);
1779 ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
1780 ARRAY_SIZE(op->inline_keys),
1781 BKEY_U64s + DIV_ROUND_UP(data_len, 8));
1787 sectors = bio_sectors(bio);
1788 op->pos.offset += sectors;
1790 id = bkey_inline_data_init(op->insert_keys.top);
1792 id->k.version = op->version;
1793 id->k.size = sectors;
1795 iter = bio->bi_iter;
1796 iter.bi_size = data_len;
1797 memcpy_from_bio(id->v.data, bio, iter);
1799 while (data_len & 7)
1800 id->v.data[data_len++] = '\0';
1801 set_bkey_val_bytes(&id->k, data_len);
1802 bch2_keylist_push(&op->insert_keys);
1804 __bch2_write_index(op);
1806 bch2_write_done(&op->cl);
1810 * bch_write - handle a write to a cache device or flash only volume
1812 * This is the starting point for any data to end up in a cache device; it could
1813 * be from a normal write, or a writeback write, or a write to a flash only
1814 * volume - it's also used by the moving garbage collector to compact data in
1815 * mostly empty buckets.
1817 * It first writes the data to the cache, creating a list of keys to be inserted
1818 * (if the data won't fit in a single open bucket, there will be multiple keys);
1819 * after the data is written it calls bch_journal, and after the keys have been
1820 * added to the next journal write they're inserted into the btree.
1822 * If op->discard is true, instead of inserting the data it invalidates the
1823 * region of the cache represented by op->bio and op->inode.
1825 void bch2_write(struct closure *cl)
1827 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
1828 struct bio *bio = &op->wbio.bio;
1829 struct bch_fs *c = op->c;
1832 EBUG_ON(op->cl.parent);
1833 BUG_ON(!op->nr_replicas);
1834 BUG_ON(!op->write_point.v);
1835 BUG_ON(bkey_eq(op->pos, POS_MAX));
1837 op->start_time = local_clock();
1838 bch2_keylist_init(&op->insert_keys, op->inline_keys);
1839 wbio_init(bio)->put_bio = false;
1841 if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) {
1842 bch_err_inum_offset_ratelimited(c,
1844 op->pos.offset << 9,
1845 "misaligned write");
1850 if (c->opts.nochanges) {
1851 op->error = -BCH_ERR_erofs_no_writes;
1855 if (!(op->flags & BCH_WRITE_MOVE) &&
1856 !bch2_write_ref_tryget(c, BCH_WRITE_REF_write)) {
1857 op->error = -BCH_ERR_erofs_no_writes;
1861 this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
1862 bch2_increment_clock(c, bio_sectors(bio), WRITE);
1864 data_len = min_t(u64, bio->bi_iter.bi_size,
1865 op->new_i_size - (op->pos.offset << 9));
1867 if (c->opts.inline_data &&
1868 data_len <= min(block_bytes(c) / 2, 1024U)) {
1869 bch2_write_data_inline(op, data_len);
1876 bch2_disk_reservation_put(c, &op->res);
1878 closure_debug_destroy(&op->cl);
1883 const char * const bch2_write_flags[] = {
1890 void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
1892 prt_str(out, "pos: ");
1893 bch2_bpos_to_text(out, op->pos);
1895 printbuf_indent_add(out, 2);
1897 prt_str(out, "started: ");
1898 bch2_pr_time_units(out, local_clock() - op->start_time);
1901 prt_str(out, "flags: ");
1902 prt_bitflags(out, bch2_write_flags, op->flags);
1905 prt_printf(out, "ref: %u", closure_nr_remaining(&op->cl));
1908 printbuf_indent_sub(out, 2);
1911 /* Cache promotion on read */
1914 struct rcu_head rcu;
1917 struct rhash_head hash;
1920 struct data_update write;
1921 struct bio_vec bi_inline_vecs[0]; /* must be last */
1924 static const struct rhashtable_params bch_promote_params = {
1925 .head_offset = offsetof(struct promote_op, hash),
1926 .key_offset = offsetof(struct promote_op, pos),
1927 .key_len = sizeof(struct bpos),
1930 static inline bool should_promote(struct bch_fs *c, struct bkey_s_c k,
1932 struct bch_io_opts opts,
1935 if (!(flags & BCH_READ_MAY_PROMOTE))
1938 if (!opts.promote_target)
1941 if (bch2_bkey_has_target(c, k, opts.promote_target))
1944 if (bkey_extent_is_unwritten(k))
1947 if (bch2_target_congested(c, opts.promote_target)) {
1948 /* XXX trace this */
1952 if (rhashtable_lookup_fast(&c->promote_table, &pos,
1953 bch_promote_params))
1959 static void promote_free(struct bch_fs *c, struct promote_op *op)
1963 bch2_data_update_exit(&op->write);
1965 ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
1966 bch_promote_params);
1968 bch2_write_ref_put(c, BCH_WRITE_REF_promote);
1972 static void promote_done(struct bch_write_op *wop)
1974 struct promote_op *op =
1975 container_of(wop, struct promote_op, write.op);
1976 struct bch_fs *c = op->write.op.c;
1978 bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
1980 promote_free(c, op);
1983 static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
1985 struct bio *bio = &op->write.op.wbio.bio;
1987 trace_and_count(op->write.op.c, read_promote, &rbio->bio);
1989 /* we now own pages: */
1990 BUG_ON(!rbio->bounce);
1991 BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs);
1993 memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec,
1994 sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
1995 swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
1997 bch2_data_update_read_done(&op->write, rbio->pick.crc);
2000 static struct promote_op *__promote_alloc(struct btree_trans *trans,
2001 enum btree_id btree_id,
2004 struct extent_ptr_decoded *pick,
2005 struct bch_io_opts opts,
2007 struct bch_read_bio **rbio)
2009 struct bch_fs *c = trans->c;
2010 struct promote_op *op = NULL;
2012 unsigned pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
2015 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_promote))
2018 op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOIO);
2022 op->start_time = local_clock();
2026 * We don't use the mempool here because extents that aren't
2027 * checksummed or compressed can be too big for the mempool:
2029 *rbio = kzalloc(sizeof(struct bch_read_bio) +
2030 sizeof(struct bio_vec) * pages,
2035 rbio_init(&(*rbio)->bio, opts);
2036 bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
2038 if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
2042 (*rbio)->bounce = true;
2043 (*rbio)->split = true;
2044 (*rbio)->kmalloc = true;
2046 if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash,
2047 bch_promote_params))
2050 bio = &op->write.op.wbio.bio;
2051 bio_init(bio, NULL, bio->bi_inline_vecs, pages, 0);
2053 ret = bch2_data_update_init(trans, NULL, &op->write,
2054 writepoint_hashed((unsigned long) current),
2056 (struct data_update_opts) {
2057 .target = opts.promote_target,
2058 .extra_replicas = 1,
2059 .write_flags = BCH_WRITE_ALLOC_NOWAIT|BCH_WRITE_CACHED,
2062 if (ret == -BCH_ERR_nocow_lock_blocked) {
2063 ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
2064 bch_promote_params);
2070 op->write.op.end_io = promote_done;
2075 bio_free_pages(&(*rbio)->bio);
2079 bch2_write_ref_put(c, BCH_WRITE_REF_promote);
2084 static struct promote_op *promote_alloc(struct btree_trans *trans,
2085 struct bvec_iter iter,
2087 struct extent_ptr_decoded *pick,
2088 struct bch_io_opts opts,
2090 struct bch_read_bio **rbio,
2094 struct bch_fs *c = trans->c;
2095 bool promote_full = *read_full || READ_ONCE(c->promote_whole_extents);
2096 /* data might have to be decompressed in the write path: */
2097 unsigned sectors = promote_full
2098 ? max(pick->crc.compressed_size, pick->crc.live_size)
2099 : bvec_iter_sectors(iter);
2100 struct bpos pos = promote_full
2101 ? bkey_start_pos(k.k)
2102 : POS(k.k->p.inode, iter.bi_sector);
2103 struct promote_op *promote;
2105 if (!should_promote(c, k, pos, opts, flags))
2108 promote = __promote_alloc(trans,
2109 k.k->type == KEY_TYPE_reflink_v
2112 k, pos, pick, opts, sectors, rbio);
2117 *read_full = promote_full;
2123 #define READ_RETRY_AVOID 1
2124 #define READ_RETRY 2
2129 RBIO_CONTEXT_HIGHPRI,
2130 RBIO_CONTEXT_UNBOUND,
2133 static inline struct bch_read_bio *
2134 bch2_rbio_parent(struct bch_read_bio *rbio)
2136 return rbio->split ? rbio->parent : rbio;
2140 static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn,
2141 enum rbio_context context,
2142 struct workqueue_struct *wq)
2144 if (context <= rbio->context) {
2147 rbio->work.func = fn;
2148 rbio->context = context;
2149 queue_work(wq, &rbio->work);
2153 static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
2155 BUG_ON(rbio->bounce && !rbio->split);
2158 promote_free(rbio->c, rbio->promote);
2159 rbio->promote = NULL;
2162 bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
2165 struct bch_read_bio *parent = rbio->parent;
2170 bio_put(&rbio->bio);
2179 * Only called on a top level bch_read_bio to complete an entire read request,
2182 static void bch2_rbio_done(struct bch_read_bio *rbio)
2184 if (rbio->start_time)
2185 bch2_time_stats_update(&rbio->c->times[BCH_TIME_data_read],
2187 bio_endio(&rbio->bio);
2190 static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio,
2191 struct bvec_iter bvec_iter,
2192 struct bch_io_failures *failed,
2195 struct btree_trans trans;
2196 struct btree_iter iter;
2201 flags &= ~BCH_READ_LAST_FRAGMENT;
2202 flags |= BCH_READ_MUST_CLONE;
2204 bch2_bkey_buf_init(&sk);
2205 bch2_trans_init(&trans, c, 0, 0);
2207 bch2_trans_iter_init(&trans, &iter, rbio->data_btree,
2208 rbio->read_pos, BTREE_ITER_SLOTS);
2210 rbio->bio.bi_status = 0;
2212 k = bch2_btree_iter_peek_slot(&iter);
2216 bch2_bkey_buf_reassemble(&sk, c, k);
2217 k = bkey_i_to_s_c(sk.k);
2218 bch2_trans_unlock(&trans);
2220 if (!bch2_bkey_matches_ptr(c, k,
2222 rbio->data_pos.offset -
2223 rbio->pick.crc.offset)) {
2224 /* extent we wanted to read no longer exists: */
2229 ret = __bch2_read_extent(&trans, rbio, bvec_iter,
2232 k, 0, failed, flags);
2233 if (ret == READ_RETRY)
2238 bch2_rbio_done(rbio);
2239 bch2_trans_iter_exit(&trans, &iter);
2240 bch2_trans_exit(&trans);
2241 bch2_bkey_buf_exit(&sk, c);
2244 rbio->bio.bi_status = BLK_STS_IOERR;
2248 static void bch2_rbio_retry(struct work_struct *work)
2250 struct bch_read_bio *rbio =
2251 container_of(work, struct bch_read_bio, work);
2252 struct bch_fs *c = rbio->c;
2253 struct bvec_iter iter = rbio->bvec_iter;
2254 unsigned flags = rbio->flags;
2255 subvol_inum inum = {
2256 .subvol = rbio->subvol,
2257 .inum = rbio->read_pos.inode,
2259 struct bch_io_failures failed = { .nr = 0 };
2261 trace_and_count(c, read_retry, &rbio->bio);
2263 if (rbio->retry == READ_RETRY_AVOID)
2264 bch2_mark_io_failure(&failed, &rbio->pick);
2266 rbio->bio.bi_status = 0;
2268 rbio = bch2_rbio_free(rbio);
2270 flags |= BCH_READ_IN_RETRY;
2271 flags &= ~BCH_READ_MAY_PROMOTE;
2273 if (flags & BCH_READ_NODECODE) {
2274 bch2_read_retry_nodecode(c, rbio, iter, &failed, flags);
2276 flags &= ~BCH_READ_LAST_FRAGMENT;
2277 flags |= BCH_READ_MUST_CLONE;
2279 __bch2_read(c, rbio, iter, inum, &failed, flags);
2283 static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
2286 rbio->retry = retry;
2288 if (rbio->flags & BCH_READ_IN_RETRY)
2291 if (retry == READ_ERR) {
2292 rbio = bch2_rbio_free(rbio);
2294 rbio->bio.bi_status = error;
2295 bch2_rbio_done(rbio);
2297 bch2_rbio_punt(rbio, bch2_rbio_retry,
2298 RBIO_CONTEXT_UNBOUND, system_unbound_wq);
2302 static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
2303 struct bch_read_bio *rbio)
2305 struct bch_fs *c = rbio->c;
2306 u64 data_offset = rbio->data_pos.offset - rbio->pick.crc.offset;
2307 struct bch_extent_crc_unpacked new_crc;
2308 struct btree_iter iter;
2313 if (crc_is_compressed(rbio->pick.crc))
2316 bch2_trans_iter_init(trans, &iter, rbio->data_btree, rbio->data_pos,
2317 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2318 k = bch2_btree_iter_peek_slot(&iter);
2319 if ((ret = bkey_err(k)))
2322 if (bversion_cmp(k.k->version, rbio->version) ||
2323 !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset))
2326 /* Extent was merged? */
2327 if (bkey_start_offset(k.k) < data_offset ||
2328 k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size)
2331 if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version,
2332 rbio->pick.crc, NULL, &new_crc,
2333 bkey_start_offset(k.k) - data_offset, k.k->size,
2334 rbio->pick.crc.csum_type)) {
2335 bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)");
2341 * going to be temporarily appending another checksum entry:
2343 new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) +
2344 sizeof(struct bch_extent_crc128));
2345 if ((ret = PTR_ERR_OR_ZERO(new)))
2348 bkey_reassemble(new, k);
2350 if (!bch2_bkey_narrow_crcs(new, new_crc))
2353 ret = bch2_trans_update(trans, &iter, new,
2354 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
2356 bch2_trans_iter_exit(trans, &iter);
2360 static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
2362 bch2_trans_do(rbio->c, NULL, NULL, BTREE_INSERT_NOFAIL,
2363 __bch2_rbio_narrow_crcs(&trans, rbio));
2366 /* Inner part that may run in process context */
2367 static void __bch2_read_endio(struct work_struct *work)
2369 struct bch_read_bio *rbio =
2370 container_of(work, struct bch_read_bio, work);
2371 struct bch_fs *c = rbio->c;
2372 struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
2373 struct bio *src = &rbio->bio;
2374 struct bio *dst = &bch2_rbio_parent(rbio)->bio;
2375 struct bvec_iter dst_iter = rbio->bvec_iter;
2376 struct bch_extent_crc_unpacked crc = rbio->pick.crc;
2377 struct nonce nonce = extent_nonce(rbio->version, crc);
2378 unsigned nofs_flags;
2379 struct bch_csum csum;
2382 nofs_flags = memalloc_nofs_save();
2384 /* Reset iterator for checksumming and copying bounced data: */
2386 src->bi_iter.bi_size = crc.compressed_size << 9;
2387 src->bi_iter.bi_idx = 0;
2388 src->bi_iter.bi_bvec_done = 0;
2390 src->bi_iter = rbio->bvec_iter;
2393 csum = bch2_checksum_bio(c, crc.csum_type, nonce, src);
2394 if (bch2_crc_cmp(csum, rbio->pick.crc.csum) && !c->opts.no_data_io)
2399 * We need to rework the narrow_crcs path to deliver the read completion
2400 * first, and then punt to a different workqueue, otherwise we're
2401 * holding up reads while doing btree updates which is bad for memory
2404 if (unlikely(rbio->narrow_crcs))
2405 bch2_rbio_narrow_crcs(rbio);
2407 if (rbio->flags & BCH_READ_NODECODE)
2410 /* Adjust crc to point to subset of data we want: */
2411 crc.offset += rbio->offset_into_extent;
2412 crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
2414 if (crc_is_compressed(crc)) {
2415 ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
2419 if (bch2_bio_uncompress(c, src, dst, dst_iter, crc))
2420 goto decompression_err;
2422 /* don't need to decrypt the entire bio: */
2423 nonce = nonce_add(nonce, crc.offset << 9);
2424 bio_advance(src, crc.offset << 9);
2426 BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
2427 src->bi_iter.bi_size = dst_iter.bi_size;
2429 ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
2434 struct bvec_iter src_iter = src->bi_iter;
2435 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
2439 if (rbio->promote) {
2441 * Re encrypt data we decrypted, so it's consistent with
2444 ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
2448 promote_start(rbio->promote, rbio);
2449 rbio->promote = NULL;
2452 if (likely(!(rbio->flags & BCH_READ_IN_RETRY))) {
2453 rbio = bch2_rbio_free(rbio);
2454 bch2_rbio_done(rbio);
2457 memalloc_nofs_restore(nofs_flags);
2461 * Checksum error: if the bio wasn't bounced, we may have been
2462 * reading into buffers owned by userspace (that userspace can
2463 * scribble over) - retry the read, bouncing it this time:
2465 if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
2466 rbio->flags |= BCH_READ_MUST_BOUNCE;
2467 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
2471 bch_err_inum_offset_ratelimited(ca,
2472 rbio->read_pos.inode,
2473 rbio->read_pos.offset << 9,
2474 "data checksum error: expected %0llx:%0llx got %0llx:%0llx (type %s)",
2475 rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
2476 csum.hi, csum.lo, bch2_csum_types[crc.csum_type]);
2478 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
2481 bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
2482 rbio->read_pos.offset << 9,
2483 "decompression error");
2484 bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
2487 bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
2488 rbio->read_pos.offset << 9,
2490 bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
2494 static void bch2_read_endio(struct bio *bio)
2496 struct bch_read_bio *rbio =
2497 container_of(bio, struct bch_read_bio, bio);
2498 struct bch_fs *c = rbio->c;
2499 struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
2500 struct workqueue_struct *wq = NULL;
2501 enum rbio_context context = RBIO_CONTEXT_NULL;
2503 if (rbio->have_ioref) {
2504 bch2_latency_acct(ca, rbio->submit_time, READ);
2505 percpu_ref_put(&ca->io_ref);
2509 rbio->bio.bi_end_io = rbio->end_io;
2511 if (bch2_dev_inum_io_err_on(bio->bi_status, ca,
2512 rbio->read_pos.inode,
2513 rbio->read_pos.offset,
2514 "data read error: %s",
2515 bch2_blk_status_to_str(bio->bi_status))) {
2516 bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
2520 if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
2521 ptr_stale(ca, &rbio->pick.ptr)) {
2522 trace_and_count(c, read_reuse_race, &rbio->bio);
2524 if (rbio->flags & BCH_READ_RETRY_IF_STALE)
2525 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
2527 bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN);
2531 if (rbio->narrow_crcs ||
2533 crc_is_compressed(rbio->pick.crc) ||
2534 bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
2535 context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
2536 else if (rbio->pick.crc.csum_type)
2537 context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq;
2539 bch2_rbio_punt(rbio, __bch2_read_endio, context, wq);
2542 int __bch2_read_indirect_extent(struct btree_trans *trans,
2543 unsigned *offset_into_extent,
2544 struct bkey_buf *orig_k)
2546 struct btree_iter iter;
2551 reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k->k)->v.idx) +
2552 *offset_into_extent;
2554 bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink,
2555 POS(0, reflink_offset),
2557 k = bch2_btree_iter_peek_slot(&iter);
2562 if (k.k->type != KEY_TYPE_reflink_v &&
2563 k.k->type != KEY_TYPE_indirect_inline_data) {
2564 bch_err_inum_offset_ratelimited(trans->c,
2565 orig_k->k->k.p.inode,
2566 orig_k->k->k.p.offset << 9,
2567 "%llu len %u points to nonexistent indirect extent %llu",
2568 orig_k->k->k.p.offset,
2571 bch2_inconsistent_error(trans->c);
2576 *offset_into_extent = iter.pos.offset - bkey_start_offset(k.k);
2577 bch2_bkey_buf_reassemble(orig_k, trans->c, k);
2579 bch2_trans_iter_exit(trans, &iter);
2583 static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
2585 struct bch_extent_ptr ptr)
2587 struct bch_fs *c = trans->c;
2588 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr.dev);
2589 struct btree_iter iter;
2590 struct printbuf buf = PRINTBUF;
2593 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
2594 PTR_BUCKET_POS(c, &ptr),
2597 prt_printf(&buf, "Attempting to read from stale dirty pointer:");
2598 printbuf_indent_add(&buf, 2);
2601 bch2_bkey_val_to_text(&buf, c, k);
2604 prt_printf(&buf, "memory gen: %u", *bucket_gen(ca, iter.pos.offset));
2606 ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
2609 bch2_bkey_val_to_text(&buf, c, k);
2612 bch2_fs_inconsistent(c, "%s", buf.buf);
2614 bch2_trans_iter_exit(trans, &iter);
2615 printbuf_exit(&buf);
2618 int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
2619 struct bvec_iter iter, struct bpos read_pos,
2620 enum btree_id data_btree, struct bkey_s_c k,
2621 unsigned offset_into_extent,
2622 struct bch_io_failures *failed, unsigned flags)
2624 struct bch_fs *c = trans->c;
2625 struct extent_ptr_decoded pick;
2626 struct bch_read_bio *rbio = NULL;
2627 struct bch_dev *ca = NULL;
2628 struct promote_op *promote = NULL;
2629 bool bounce = false, read_full = false, narrow_crcs = false;
2630 struct bpos data_pos = bkey_start_pos(k.k);
2633 if (bkey_extent_is_inline_data(k.k)) {
2634 unsigned bytes = min_t(unsigned, iter.bi_size,
2635 bkey_inline_data_bytes(k.k));
2637 swap(iter.bi_size, bytes);
2638 memcpy_to_bio(&orig->bio, iter, bkey_inline_data_p(k));
2639 swap(iter.bi_size, bytes);
2640 bio_advance_iter(&orig->bio, &iter, bytes);
2641 zero_fill_bio_iter(&orig->bio, iter);
2645 pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick);
2647 /* hole or reservation - just zero fill: */
2652 bch_err_inum_offset_ratelimited(c,
2653 read_pos.inode, read_pos.offset << 9,
2654 "no device to read from");
2658 ca = bch_dev_bkey_exists(c, pick.ptr.dev);
2661 * Stale dirty pointers are treated as IO errors, but @failed isn't
2662 * allocated unless we're in the retry path - so if we're not in the
2663 * retry path, don't check here, it'll be caught in bch2_read_endio()
2664 * and we'll end up in the retry path:
2666 if ((flags & BCH_READ_IN_RETRY) &&
2668 unlikely(ptr_stale(ca, &pick.ptr))) {
2669 read_from_stale_dirty_pointer(trans, k, pick.ptr);
2670 bch2_mark_io_failure(failed, &pick);
2675 * Unlock the iterator while the btree node's lock is still in
2676 * cache, before doing the IO:
2678 bch2_trans_unlock(trans);
2680 if (flags & BCH_READ_NODECODE) {
2682 * can happen if we retry, and the extent we were going to read
2683 * has been merged in the meantime:
2685 if (pick.crc.compressed_size > orig->bio.bi_vcnt * PAGE_SECTORS)
2688 iter.bi_size = pick.crc.compressed_size << 9;
2692 if (!(flags & BCH_READ_LAST_FRAGMENT) ||
2693 bio_flagged(&orig->bio, BIO_CHAIN))
2694 flags |= BCH_READ_MUST_CLONE;
2696 narrow_crcs = !(flags & BCH_READ_IN_RETRY) &&
2697 bch2_can_narrow_extent_crcs(k, pick.crc);
2699 if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
2700 flags |= BCH_READ_MUST_BOUNCE;
2702 EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
2704 if (crc_is_compressed(pick.crc) ||
2705 (pick.crc.csum_type != BCH_CSUM_none &&
2706 (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
2707 (bch2_csum_type_is_encryption(pick.crc.csum_type) &&
2708 (flags & BCH_READ_USER_MAPPED)) ||
2709 (flags & BCH_READ_MUST_BOUNCE)))) {
2714 if (orig->opts.promote_target)
2715 promote = promote_alloc(trans, iter, k, &pick, orig->opts, flags,
2716 &rbio, &bounce, &read_full);
2719 EBUG_ON(crc_is_compressed(pick.crc));
2720 EBUG_ON(pick.crc.csum_type &&
2721 (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
2722 bvec_iter_sectors(iter) != pick.crc.live_size ||
2724 offset_into_extent));
2726 data_pos.offset += offset_into_extent;
2727 pick.ptr.offset += pick.crc.offset +
2729 offset_into_extent = 0;
2730 pick.crc.compressed_size = bvec_iter_sectors(iter);
2731 pick.crc.uncompressed_size = bvec_iter_sectors(iter);
2732 pick.crc.offset = 0;
2733 pick.crc.live_size = bvec_iter_sectors(iter);
2734 offset_into_extent = 0;
2739 * promote already allocated bounce rbio:
2740 * promote needs to allocate a bio big enough for uncompressing
2741 * data in the write path, but we're not going to use it all
2744 EBUG_ON(rbio->bio.bi_iter.bi_size <
2745 pick.crc.compressed_size << 9);
2746 rbio->bio.bi_iter.bi_size =
2747 pick.crc.compressed_size << 9;
2748 } else if (bounce) {
2749 unsigned sectors = pick.crc.compressed_size;
2751 rbio = rbio_init(bio_alloc_bioset(NULL,
2752 DIV_ROUND_UP(sectors, PAGE_SECTORS),
2755 &c->bio_read_split),
2758 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
2759 rbio->bounce = true;
2761 } else if (flags & BCH_READ_MUST_CLONE) {
2763 * Have to clone if there were any splits, due to error
2764 * reporting issues (if a split errored, and retrying didn't
2765 * work, when it reports the error to its parent (us) we don't
2766 * know if the error was from our bio, and we should retry, or
2767 * from the whole bio, in which case we don't want to retry and
2770 rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOIO,
2771 &c->bio_read_split),
2773 rbio->bio.bi_iter = iter;
2777 rbio->bio.bi_iter = iter;
2778 EBUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
2781 EBUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size);
2784 rbio->submit_time = local_clock();
2786 rbio->parent = orig;
2788 rbio->end_io = orig->bio.bi_end_io;
2789 rbio->bvec_iter = iter;
2790 rbio->offset_into_extent= offset_into_extent;
2791 rbio->flags = flags;
2792 rbio->have_ioref = pick_ret > 0 && bch2_dev_get_ioref(ca, READ);
2793 rbio->narrow_crcs = narrow_crcs;
2797 /* XXX: only initialize this if needed */
2798 rbio->devs_have = bch2_bkey_devs(k);
2800 rbio->subvol = orig->subvol;
2801 rbio->read_pos = read_pos;
2802 rbio->data_btree = data_btree;
2803 rbio->data_pos = data_pos;
2804 rbio->version = k.k->version;
2805 rbio->promote = promote;
2806 INIT_WORK(&rbio->work, NULL);
2808 rbio->bio.bi_opf = orig->bio.bi_opf;
2809 rbio->bio.bi_iter.bi_sector = pick.ptr.offset;
2810 rbio->bio.bi_end_io = bch2_read_endio;
2813 trace_and_count(c, read_bounce, &rbio->bio);
2815 this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio));
2816 bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
2819 * If it's being moved internally, we don't want to flag it as a cache
2822 if (pick.ptr.cached && !(flags & BCH_READ_NODECODE))
2823 bch2_bucket_io_time_reset(trans, pick.ptr.dev,
2824 PTR_BUCKET_NR(ca, &pick.ptr), READ);
2826 if (!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT))) {
2827 bio_inc_remaining(&orig->bio);
2828 trace_and_count(c, read_split, &orig->bio);
2831 if (!rbio->pick.idx) {
2832 if (!rbio->have_ioref) {
2833 bch_err_inum_offset_ratelimited(c,
2835 read_pos.offset << 9,
2836 "no device to read from");
2837 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
2841 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_user],
2842 bio_sectors(&rbio->bio));
2843 bio_set_dev(&rbio->bio, ca->disk_sb.bdev);
2845 if (unlikely(c->opts.no_data_io)) {
2846 if (likely(!(flags & BCH_READ_IN_RETRY)))
2847 bio_endio(&rbio->bio);
2849 if (likely(!(flags & BCH_READ_IN_RETRY)))
2850 submit_bio(&rbio->bio);
2852 submit_bio_wait(&rbio->bio);
2856 * We just submitted IO which may block, we expect relock fail
2857 * events and shouldn't count them:
2859 trans->notrace_relock_fail = true;
2861 /* Attempting reconstruct read: */
2862 if (bch2_ec_read_extent(c, rbio)) {
2863 bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
2867 if (likely(!(flags & BCH_READ_IN_RETRY)))
2868 bio_endio(&rbio->bio);
2871 if (likely(!(flags & BCH_READ_IN_RETRY))) {
2876 rbio->context = RBIO_CONTEXT_UNBOUND;
2877 bch2_read_endio(&rbio->bio);
2880 rbio = bch2_rbio_free(rbio);
2882 if (ret == READ_RETRY_AVOID) {
2883 bch2_mark_io_failure(failed, &pick);
2894 if (flags & BCH_READ_IN_RETRY)
2897 orig->bio.bi_status = BLK_STS_IOERR;
2902 * won't normally happen in the BCH_READ_NODECODE
2903 * (bch2_move_extent()) path, but if we retry and the extent we wanted
2904 * to read no longer exists we have to signal that:
2906 if (flags & BCH_READ_NODECODE)
2909 zero_fill_bio_iter(&orig->bio, iter);
2911 if (flags & BCH_READ_LAST_FRAGMENT)
2912 bch2_rbio_done(orig);
2916 void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
2917 struct bvec_iter bvec_iter, subvol_inum inum,
2918 struct bch_io_failures *failed, unsigned flags)
2920 struct btree_trans trans;
2921 struct btree_iter iter;
2927 BUG_ON(flags & BCH_READ_NODECODE);
2929 bch2_bkey_buf_init(&sk);
2930 bch2_trans_init(&trans, c, 0, 0);
2932 bch2_trans_begin(&trans);
2933 iter = (struct btree_iter) { NULL };
2935 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
2939 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
2940 SPOS(inum.inum, bvec_iter.bi_sector, snapshot),
2943 unsigned bytes, sectors, offset_into_extent;
2944 enum btree_id data_btree = BTREE_ID_extents;
2947 * read_extent -> io_time_reset may cause a transaction restart
2948 * without returning an error, we need to check for that here:
2950 ret = bch2_trans_relock(&trans);
2954 bch2_btree_iter_set_pos(&iter,
2955 POS(inum.inum, bvec_iter.bi_sector));
2957 k = bch2_btree_iter_peek_slot(&iter);
2962 offset_into_extent = iter.pos.offset -
2963 bkey_start_offset(k.k);
2964 sectors = k.k->size - offset_into_extent;
2966 bch2_bkey_buf_reassemble(&sk, c, k);
2968 ret = bch2_read_indirect_extent(&trans, &data_btree,
2969 &offset_into_extent, &sk);
2973 k = bkey_i_to_s_c(sk.k);
2976 * With indirect extents, the amount of data to read is the min
2977 * of the original extent and the indirect extent:
2979 sectors = min(sectors, k.k->size - offset_into_extent);
2981 bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9;
2982 swap(bvec_iter.bi_size, bytes);
2984 if (bvec_iter.bi_size == bytes)
2985 flags |= BCH_READ_LAST_FRAGMENT;
2987 ret = __bch2_read_extent(&trans, rbio, bvec_iter, iter.pos,
2989 offset_into_extent, failed, flags);
2993 if (flags & BCH_READ_LAST_FRAGMENT)
2996 swap(bvec_iter.bi_size, bytes);
2997 bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
2999 ret = btree_trans_too_many_iters(&trans);
3004 bch2_trans_iter_exit(&trans, &iter);
3006 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
3007 ret == READ_RETRY ||
3008 ret == READ_RETRY_AVOID)
3011 bch2_trans_exit(&trans);
3012 bch2_bkey_buf_exit(&sk, c);
3015 bch_err_inum_offset_ratelimited(c, inum.inum,
3016 bvec_iter.bi_sector << 9,
3017 "read error %i from btree lookup", ret);
3018 rbio->bio.bi_status = BLK_STS_IOERR;
3019 bch2_rbio_done(rbio);
3023 void bch2_fs_io_exit(struct bch_fs *c)
3025 if (c->promote_table.tbl)
3026 rhashtable_destroy(&c->promote_table);
3027 mempool_exit(&c->bio_bounce_pages);
3028 bioset_exit(&c->bio_write);
3029 bioset_exit(&c->bio_read_split);
3030 bioset_exit(&c->bio_read);
3033 int bch2_fs_io_init(struct bch_fs *c)
3035 if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
3037 return -BCH_ERR_ENOMEM_bio_read_init;
3039 if (bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
3041 return -BCH_ERR_ENOMEM_bio_read_split_init;
3043 if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio),
3045 return -BCH_ERR_ENOMEM_bio_write_init;
3047 if (mempool_init_page_pool(&c->bio_bounce_pages,
3049 c->opts.btree_node_size,
3050 c->opts.encoded_extent_max) /
3052 return -BCH_ERR_ENOMEM_bio_bounce_pages_init;
3054 if (rhashtable_init(&c->promote_table, &bch_promote_params))
3055 return -BCH_ERR_ENOMEM_promote_table_init;