1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
4 * Copyright 2012 Google, Inc.
8 #include "alloc_foreground.h"
11 #include "btree_update.h"
19 #include "extent_update.h"
25 #include "nocow_locking.h"
26 #include "rebalance.h"
27 #include "subvolume.h"
32 #include <linux/blkdev.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/sched/mm.h>
37 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
39 static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
43 ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
44 /* ideally we'd be taking into account the device's variance here: */
45 u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
46 s64 latency_over = io_latency - latency_threshold;
48 if (latency_threshold && latency_over > 0) {
50 * bump up congested by approximately latency_over * 4 /
51 * latency_threshold - we don't need much accuracy here so don't
52 * bother with the divide:
54 if (atomic_read(&ca->congested) < CONGESTED_MAX)
55 atomic_add(latency_over >>
56 max_t(int, ilog2(latency_threshold) - 2, 0),
59 ca->congested_last = now;
60 } else if (atomic_read(&ca->congested) > 0) {
61 atomic_dec(&ca->congested);
65 void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
67 atomic64_t *latency = &ca->cur_latency[rw];
68 u64 now = local_clock();
69 u64 io_latency = time_after64(now, submit_time)
72 u64 old, new, v = atomic64_read(latency);
78 * If the io latency was reasonably close to the current
79 * latency, skip doing the update and atomic operation - most of
82 if (abs((int) (old - io_latency)) < (old >> 1) &&
86 new = ewma_add(old, io_latency, 5);
87 } while ((v = atomic64_cmpxchg(latency, old, new)) != old);
89 bch2_congested_acct(ca, io_latency, now, rw);
91 __bch2_time_stats_update(&ca->io_latency[rw], submit_time, now);
96 /* Allocate, free from mempool: */
98 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
100 struct bvec_iter_all iter;
103 bio_for_each_segment_all(bv, bio, iter)
104 if (bv->bv_page != ZERO_PAGE(0))
105 mempool_free(bv->bv_page, &c->bio_bounce_pages);
109 static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
113 if (likely(!*using_mempool)) {
114 page = alloc_page(GFP_NOFS);
115 if (unlikely(!page)) {
116 mutex_lock(&c->bio_bounce_pages_lock);
117 *using_mempool = true;
123 page = mempool_alloc(&c->bio_bounce_pages, GFP_NOFS);
129 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
132 bool using_mempool = false;
135 struct page *page = __bio_alloc_page_pool(c, &using_mempool);
136 unsigned len = min_t(size_t, PAGE_SIZE, size);
138 BUG_ON(!bio_add_page(bio, page, len, 0));
143 mutex_unlock(&c->bio_bounce_pages_lock);
146 /* Extent update path: */
148 int bch2_sum_sector_overwrites(struct btree_trans *trans,
149 struct btree_iter *extent_iter,
151 bool *usage_increasing,
152 s64 *i_sectors_delta,
153 s64 *disk_sectors_delta)
155 struct bch_fs *c = trans->c;
156 struct btree_iter iter;
158 unsigned new_replicas = bch2_bkey_replicas(c, bkey_i_to_s_c(new));
159 bool new_compressed = bch2_bkey_sectors_compressed(bkey_i_to_s_c(new));
162 *usage_increasing = false;
163 *i_sectors_delta = 0;
164 *disk_sectors_delta = 0;
166 bch2_trans_copy_iter(&iter, extent_iter);
168 for_each_btree_key_upto_continue_norestart(iter,
169 new->k.p, BTREE_ITER_SLOTS, old, ret) {
170 s64 sectors = min(new->k.p.offset, old.k->p.offset) -
171 max(bkey_start_offset(&new->k),
172 bkey_start_offset(old.k));
174 *i_sectors_delta += sectors *
175 (bkey_extent_is_allocation(&new->k) -
176 bkey_extent_is_allocation(old.k));
178 *disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new));
179 *disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot
180 ? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
183 if (!*usage_increasing &&
184 (new->k.p.snapshot != old.k->p.snapshot ||
185 new_replicas > bch2_bkey_replicas(c, old) ||
186 (!new_compressed && bch2_bkey_sectors_compressed(old))))
187 *usage_increasing = true;
189 if (bkey_ge(old.k->p, new->k.p))
193 bch2_trans_iter_exit(trans, &iter);
197 static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
198 struct btree_iter *extent_iter,
202 struct btree_iter iter;
204 struct bkey_i_inode_v3 *inode;
205 unsigned inode_update_flags = BTREE_UPDATE_NOJOURNAL;
208 k = bch2_bkey_get_mut_noupdate(trans, &iter, BTREE_ID_inodes,
210 extent_iter->pos.inode,
211 extent_iter->snapshot),
213 ret = PTR_ERR_OR_ZERO(k);
217 if (unlikely(k->k.type != KEY_TYPE_inode_v3)) {
218 k = bch2_inode_to_v3(trans, k);
219 ret = PTR_ERR_OR_ZERO(k);
224 inode = bkey_i_to_inode_v3(k);
226 if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_I_SIZE_DIRTY) &&
227 new_i_size > le64_to_cpu(inode->v.bi_size)) {
228 inode->v.bi_size = cpu_to_le64(new_i_size);
229 inode_update_flags = 0;
232 if (i_sectors_delta) {
233 le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta);
234 inode_update_flags = 0;
237 if (inode->k.p.snapshot != iter.snapshot) {
238 inode->k.p.snapshot = iter.snapshot;
239 inode_update_flags = 0;
242 ret = bch2_trans_update(trans, &iter, &inode->k_i,
243 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
246 bch2_trans_iter_exit(trans, &iter);
250 int bch2_extent_update(struct btree_trans *trans,
252 struct btree_iter *iter,
254 struct disk_reservation *disk_res,
256 s64 *i_sectors_delta_total,
259 struct bpos next_pos;
260 bool usage_increasing;
261 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
265 * This traverses us the iterator without changing iter->path->pos to
266 * search_key() (which is pos + 1 for extents): we want there to be a
267 * path already traversed at iter->pos because
268 * bch2_trans_extent_update() will use it to attempt extent merging
270 ret = __bch2_btree_iter_traverse(iter);
274 ret = bch2_extent_trim_atomic(trans, iter, k);
280 ret = bch2_sum_sector_overwrites(trans, iter, k,
283 &disk_sectors_delta);
288 disk_sectors_delta > (s64) disk_res->sectors) {
289 ret = bch2_disk_reservation_add(trans->c, disk_res,
290 disk_sectors_delta - disk_res->sectors,
291 !check_enospc || !usage_increasing
292 ? BCH_DISK_RESERVATION_NOFAIL : 0);
299 * We always have to do an inode update - even when i_size/i_sectors
300 * aren't changing - for fsync to work properly; fsync relies on
301 * inode->bi_journal_seq which is updated by the trigger code:
303 ret = bch2_extent_update_i_size_sectors(trans, iter,
304 min(k->k.p.offset << 9, new_i_size),
306 bch2_trans_update(trans, iter, k, 0) ?:
307 bch2_trans_commit(trans, disk_res, NULL,
308 BTREE_INSERT_NOCHECK_RW|
309 BTREE_INSERT_NOFAIL);
313 if (i_sectors_delta_total)
314 *i_sectors_delta_total += i_sectors_delta;
315 bch2_btree_iter_set_pos(iter, next_pos);
319 static int bch2_write_index_default(struct bch_write_op *op)
321 struct bch_fs *c = op->c;
323 struct keylist *keys = &op->insert_keys;
324 struct bkey_i *k = bch2_keylist_front(keys);
325 struct btree_trans *trans = bch2_trans_get(c);
326 struct btree_iter iter;
328 .subvol = op->subvol,
329 .inum = k->k.p.inode,
333 BUG_ON(!inum.subvol);
335 bch2_bkey_buf_init(&sk);
338 bch2_trans_begin(trans);
340 k = bch2_keylist_front(keys);
341 bch2_bkey_buf_copy(&sk, c, k);
343 ret = bch2_subvolume_get_snapshot(trans, inum.subvol,
344 &sk.k->k.p.snapshot);
345 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
350 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
351 bkey_start_pos(&sk.k->k),
352 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
354 ret = bch2_extent_update(trans, inum, &iter, sk.k,
356 op->new_i_size, &op->i_sectors_delta,
357 op->flags & BCH_WRITE_CHECK_ENOSPC);
358 bch2_trans_iter_exit(trans, &iter);
360 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
365 if (bkey_ge(iter.pos, k->k.p))
366 bch2_keylist_pop_front(&op->insert_keys);
368 bch2_cut_front(iter.pos, k);
369 } while (!bch2_keylist_empty(keys));
371 bch2_trans_put(trans);
372 bch2_bkey_buf_exit(&sk, c);
379 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
380 enum bch_data_type type,
381 const struct bkey_i *k,
384 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
385 const struct bch_extent_ptr *ptr;
386 struct bch_write_bio *n;
389 BUG_ON(c->opts.nochanges);
391 bkey_for_each_ptr(ptrs, ptr) {
392 BUG_ON(ptr->dev >= BCH_SB_MEMBERS_MAX ||
395 ca = bch_dev_bkey_exists(c, ptr->dev);
397 if (to_entry(ptr + 1) < ptrs.end) {
398 n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
399 GFP_NOFS, &ca->replica_set));
401 n->bio.bi_end_io = wbio->bio.bi_end_io;
402 n->bio.bi_private = wbio->bio.bi_private;
407 n->bio.bi_opf = wbio->bio.bi_opf;
408 bio_inc_remaining(&wbio->bio);
416 n->have_ioref = nocow || bch2_dev_get_ioref(ca,
417 type == BCH_DATA_btree ? READ : WRITE);
419 n->submit_time = local_clock();
420 n->inode_offset = bkey_start_offset(&k->k);
421 n->bio.bi_iter.bi_sector = ptr->offset;
423 if (likely(n->have_ioref)) {
424 this_cpu_add(ca->io_done->sectors[WRITE][type],
425 bio_sectors(&n->bio));
427 bio_set_dev(&n->bio, ca->disk_sb.bdev);
429 if (type != BCH_DATA_btree && unlikely(c->opts.no_data_io)) {
436 n->bio.bi_status = BLK_STS_REMOVED;
442 static void __bch2_write(struct bch_write_op *);
444 static void bch2_write_done(struct closure *cl)
446 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
447 struct bch_fs *c = op->c;
449 EBUG_ON(op->open_buckets.nr);
451 bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
452 bch2_disk_reservation_put(c, &op->res);
454 if (!(op->flags & BCH_WRITE_MOVE))
455 bch2_write_ref_put(c, BCH_WRITE_REF_write);
456 bch2_keylist_free(&op->insert_keys, op->inline_keys);
459 closure_debug_destroy(cl);
464 static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
466 struct keylist *keys = &op->insert_keys;
467 struct bch_extent_ptr *ptr;
468 struct bkey_i *src, *dst = keys->keys, *n;
470 for (src = keys->keys; src != keys->top; src = n) {
473 if (bkey_extent_is_direct_data(&src->k)) {
474 bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
475 test_bit(ptr->dev, op->failed.d));
477 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src)))
482 memmove_u64s_down(dst, src, src->k.u64s);
483 dst = bkey_next(dst);
491 * __bch2_write_index - after a write, update index to point to new data
492 * @op: bch_write_op to process
494 static void __bch2_write_index(struct bch_write_op *op)
496 struct bch_fs *c = op->c;
497 struct keylist *keys = &op->insert_keys;
502 if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
503 ret = bch2_write_drop_io_error_ptrs(op);
509 * probably not the ideal place to hook this in, but I don't
510 * particularly want to plumb io_opts all the way through the btree
511 * update stack right now
513 for_each_keylist_key(keys, k)
514 bch2_rebalance_add_key(c, bkey_i_to_s_c(k), &op->opts);
516 if (!bch2_keylist_empty(keys)) {
517 u64 sectors_start = keylist_sectors(keys);
519 ret = !(op->flags & BCH_WRITE_MOVE)
520 ? bch2_write_index_default(op)
521 : bch2_data_update_index_update(op);
523 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
524 BUG_ON(keylist_sectors(keys) && !ret);
526 op->written += sectors_start - keylist_sectors(keys);
528 if (ret && !bch2_err_matches(ret, EROFS)) {
529 struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
531 bch_err_inum_offset_ratelimited(c,
532 insert->k.p.inode, insert->k.p.offset << 9,
533 "write error while doing btree update: %s",
541 /* If some a bucket wasn't written, we can't erasure code it: */
542 for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
543 bch2_open_bucket_write_error(c, &op->open_buckets, dev);
545 bch2_open_buckets_put(c, &op->open_buckets);
548 keys->top = keys->keys;
550 op->flags |= BCH_WRITE_DONE;
554 static inline void __wp_update_state(struct write_point *wp, enum write_point_state state)
556 if (state != wp->state) {
557 u64 now = ktime_get_ns();
559 if (wp->last_state_change &&
560 time_after64(now, wp->last_state_change))
561 wp->time[wp->state] += now - wp->last_state_change;
563 wp->last_state_change = now;
567 static inline void wp_update_state(struct write_point *wp, bool running)
569 enum write_point_state state;
571 state = running ? WRITE_POINT_running :
572 !list_empty(&wp->writes) ? WRITE_POINT_waiting_io
573 : WRITE_POINT_stopped;
575 __wp_update_state(wp, state);
578 static void bch2_write_index(struct closure *cl)
580 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
581 struct write_point *wp = op->wp;
582 struct workqueue_struct *wq = index_update_wq(op);
585 if ((op->flags & BCH_WRITE_DONE) &&
586 (op->flags & BCH_WRITE_MOVE))
587 bch2_bio_free_pages_pool(op->c, &op->wbio.bio);
589 spin_lock_irqsave(&wp->writes_lock, flags);
590 if (wp->state == WRITE_POINT_waiting_io)
591 __wp_update_state(wp, WRITE_POINT_waiting_work);
592 list_add_tail(&op->wp_list, &wp->writes);
593 spin_unlock_irqrestore (&wp->writes_lock, flags);
595 queue_work(wq, &wp->index_update_work);
598 static inline void bch2_write_queue(struct bch_write_op *op, struct write_point *wp)
602 if (wp->state == WRITE_POINT_stopped) {
603 spin_lock_irq(&wp->writes_lock);
604 __wp_update_state(wp, WRITE_POINT_waiting_io);
605 spin_unlock_irq(&wp->writes_lock);
609 void bch2_write_point_do_index_updates(struct work_struct *work)
611 struct write_point *wp =
612 container_of(work, struct write_point, index_update_work);
613 struct bch_write_op *op;
616 spin_lock_irq(&wp->writes_lock);
617 op = list_first_entry_or_null(&wp->writes, struct bch_write_op, wp_list);
619 list_del(&op->wp_list);
620 wp_update_state(wp, op != NULL);
621 spin_unlock_irq(&wp->writes_lock);
626 op->flags |= BCH_WRITE_IN_WORKER;
628 __bch2_write_index(op);
630 if (!(op->flags & BCH_WRITE_DONE))
633 bch2_write_done(&op->cl);
637 static void bch2_write_endio(struct bio *bio)
639 struct closure *cl = bio->bi_private;
640 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
641 struct bch_write_bio *wbio = to_wbio(bio);
642 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
643 struct bch_fs *c = wbio->c;
644 struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
646 if (bch2_dev_inum_io_err_on(bio->bi_status, ca,
648 wbio->inode_offset << 9,
649 "data write error: %s",
650 bch2_blk_status_to_str(bio->bi_status))) {
651 set_bit(wbio->dev, op->failed.d);
652 op->flags |= BCH_WRITE_IO_ERROR;
656 set_bit(wbio->dev, op->devs_need_flush->d);
658 if (wbio->have_ioref) {
659 bch2_latency_acct(ca, wbio->submit_time, WRITE);
660 percpu_ref_put(&ca->io_ref);
664 bch2_bio_free_pages_pool(c, bio);
670 bio_endio(&parent->bio);
675 static void init_append_extent(struct bch_write_op *op,
676 struct write_point *wp,
677 struct bversion version,
678 struct bch_extent_crc_unpacked crc)
680 struct bkey_i_extent *e;
682 op->pos.offset += crc.uncompressed_size;
684 e = bkey_extent_init(op->insert_keys.top);
686 e->k.size = crc.uncompressed_size;
687 e->k.version = version;
690 crc.compression_type ||
692 bch2_extent_crc_append(&e->k_i, crc);
694 bch2_alloc_sectors_append_ptrs_inlined(op->c, wp, &e->k_i, crc.compressed_size,
695 op->flags & BCH_WRITE_CACHED);
697 bch2_keylist_push(&op->insert_keys);
700 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
701 struct write_point *wp,
703 bool *page_alloc_failed,
706 struct bch_write_bio *wbio;
708 unsigned output_available =
709 min(wp->sectors_free << 9, src->bi_iter.bi_size);
710 unsigned pages = DIV_ROUND_UP(output_available +
712 ? ((unsigned long) buf & (PAGE_SIZE - 1))
715 pages = min(pages, BIO_MAX_VECS);
717 bio = bio_alloc_bioset(NULL, pages, 0,
718 GFP_NOFS, &c->bio_write);
719 wbio = wbio_init(bio);
720 wbio->put_bio = true;
721 /* copy WRITE_SYNC flag */
722 wbio->bio.bi_opf = src->bi_opf;
725 bch2_bio_map(bio, buf, output_available);
732 * We can't use mempool for more than c->sb.encoded_extent_max
733 * worth of pages, but we'd like to allocate more if we can:
735 bch2_bio_alloc_pages_pool(c, bio,
736 min_t(unsigned, output_available,
737 c->opts.encoded_extent_max));
739 if (bio->bi_iter.bi_size < output_available)
741 bch2_bio_alloc_pages(bio,
743 bio->bi_iter.bi_size,
749 static int bch2_write_rechecksum(struct bch_fs *c,
750 struct bch_write_op *op,
751 unsigned new_csum_type)
753 struct bio *bio = &op->wbio.bio;
754 struct bch_extent_crc_unpacked new_crc;
757 /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
759 if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
760 bch2_csum_type_is_encryption(new_csum_type))
761 new_csum_type = op->crc.csum_type;
763 ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
765 op->crc.offset, op->crc.live_size,
770 bio_advance(bio, op->crc.offset << 9);
771 bio->bi_iter.bi_size = op->crc.live_size << 9;
776 static int bch2_write_decrypt(struct bch_write_op *op)
778 struct bch_fs *c = op->c;
779 struct nonce nonce = extent_nonce(op->version, op->crc);
780 struct bch_csum csum;
783 if (!bch2_csum_type_is_encryption(op->crc.csum_type))
787 * If we need to decrypt data in the write path, we'll no longer be able
788 * to verify the existing checksum (poly1305 mac, in this case) after
789 * it's decrypted - this is the last point we'll be able to reverify the
792 csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
793 if (bch2_crc_cmp(op->crc.csum, csum))
796 ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
797 op->crc.csum_type = 0;
798 op->crc.csum = (struct bch_csum) { 0, 0 };
802 static enum prep_encoded_ret {
805 PREP_ENCODED_CHECKSUM_ERR,
806 PREP_ENCODED_DO_WRITE,
807 } bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
809 struct bch_fs *c = op->c;
810 struct bio *bio = &op->wbio.bio;
812 if (!(op->flags & BCH_WRITE_DATA_ENCODED))
813 return PREP_ENCODED_OK;
815 BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
817 /* Can we just write the entire extent as is? */
818 if (op->crc.uncompressed_size == op->crc.live_size &&
819 op->crc.compressed_size <= wp->sectors_free &&
820 (op->crc.compression_type == bch2_compression_opt_to_type(op->compression_opt) ||
821 op->incompressible)) {
822 if (!crc_is_compressed(op->crc) &&
823 op->csum_type != op->crc.csum_type &&
824 bch2_write_rechecksum(c, op, op->csum_type) &&
826 return PREP_ENCODED_CHECKSUM_ERR;
828 return PREP_ENCODED_DO_WRITE;
832 * If the data is compressed and we couldn't write the entire extent as
833 * is, we have to decompress it:
835 if (crc_is_compressed(op->crc)) {
836 struct bch_csum csum;
838 if (bch2_write_decrypt(op))
839 return PREP_ENCODED_CHECKSUM_ERR;
841 /* Last point we can still verify checksum: */
842 csum = bch2_checksum_bio(c, op->crc.csum_type,
843 extent_nonce(op->version, op->crc),
845 if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
846 return PREP_ENCODED_CHECKSUM_ERR;
848 if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
849 return PREP_ENCODED_ERR;
853 * No longer have compressed data after this point - data might be
858 * If the data is checksummed and we're only writing a subset,
859 * rechecksum and adjust bio to point to currently live data:
861 if ((op->crc.live_size != op->crc.uncompressed_size ||
862 op->crc.csum_type != op->csum_type) &&
863 bch2_write_rechecksum(c, op, op->csum_type) &&
865 return PREP_ENCODED_CHECKSUM_ERR;
868 * If we want to compress the data, it has to be decrypted:
870 if ((op->compression_opt ||
871 bch2_csum_type_is_encryption(op->crc.csum_type) !=
872 bch2_csum_type_is_encryption(op->csum_type)) &&
873 bch2_write_decrypt(op))
874 return PREP_ENCODED_CHECKSUM_ERR;
876 return PREP_ENCODED_OK;
879 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
882 struct bch_fs *c = op->c;
883 struct bio *src = &op->wbio.bio, *dst = src;
884 struct bvec_iter saved_iter;
886 unsigned total_output = 0, total_input = 0;
888 bool page_alloc_failed = false;
891 BUG_ON(!bio_sectors(src));
893 ec_buf = bch2_writepoint_ec_buf(c, wp);
895 switch (bch2_write_prep_encoded_data(op, wp)) {
896 case PREP_ENCODED_OK:
898 case PREP_ENCODED_ERR:
901 case PREP_ENCODED_CHECKSUM_ERR:
903 case PREP_ENCODED_DO_WRITE:
904 /* XXX look for bug here */
906 dst = bch2_write_bio_alloc(c, wp, src,
909 bio_copy_data(dst, src);
912 init_append_extent(op, wp, op->version, op->crc);
917 op->compression_opt ||
919 !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
920 (bch2_csum_type_is_encryption(op->csum_type) &&
921 !(op->flags & BCH_WRITE_PAGES_OWNED))) {
922 dst = bch2_write_bio_alloc(c, wp, src,
928 saved_iter = dst->bi_iter;
931 struct bch_extent_crc_unpacked crc = { 0 };
932 struct bversion version = op->version;
933 size_t dst_len = 0, src_len = 0;
935 if (page_alloc_failed &&
936 dst->bi_iter.bi_size < (wp->sectors_free << 9) &&
937 dst->bi_iter.bi_size < c->opts.encoded_extent_max)
940 BUG_ON(op->compression_opt &&
941 (op->flags & BCH_WRITE_DATA_ENCODED) &&
942 bch2_csum_type_is_encryption(op->crc.csum_type));
943 BUG_ON(op->compression_opt && !bounce);
945 crc.compression_type = op->incompressible
946 ? BCH_COMPRESSION_TYPE_incompressible
947 : op->compression_opt
948 ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
951 if (!crc_is_compressed(crc)) {
952 dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
953 dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
956 dst_len = min_t(unsigned, dst_len,
957 c->opts.encoded_extent_max);
960 swap(dst->bi_iter.bi_size, dst_len);
961 bio_copy_data(dst, src);
962 swap(dst->bi_iter.bi_size, dst_len);
968 BUG_ON(!src_len || !dst_len);
970 if (bch2_csum_type_is_encryption(op->csum_type)) {
971 if (bversion_zero(version)) {
972 version.lo = atomic64_inc_return(&c->key_version);
974 crc.nonce = op->nonce;
975 op->nonce += src_len >> 9;
979 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
980 !crc_is_compressed(crc) &&
981 bch2_csum_type_is_encryption(op->crc.csum_type) ==
982 bch2_csum_type_is_encryption(op->csum_type)) {
983 u8 compression_type = crc.compression_type;
984 u16 nonce = crc.nonce;
986 * Note: when we're using rechecksum(), we need to be
987 * checksumming @src because it has all the data our
988 * existing checksum covers - if we bounced (because we
989 * were trying to compress), @dst will only have the
990 * part of the data the new checksum will cover.
992 * But normally we want to be checksumming post bounce,
993 * because part of the reason for bouncing is so the
994 * data can't be modified (by userspace) while it's in
997 if (bch2_rechecksum_bio(c, src, version, op->crc,
1000 bio_sectors(src) - (src_len >> 9),
1004 * rchecksum_bio sets compression_type on crc from op->crc,
1005 * this isn't always correct as sometimes we're changing
1006 * an extent from uncompressed to incompressible.
1008 crc.compression_type = compression_type;
1011 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
1012 bch2_rechecksum_bio(c, src, version, op->crc,
1015 bio_sectors(src) - (src_len >> 9),
1019 crc.compressed_size = dst_len >> 9;
1020 crc.uncompressed_size = src_len >> 9;
1021 crc.live_size = src_len >> 9;
1023 swap(dst->bi_iter.bi_size, dst_len);
1024 ret = bch2_encrypt_bio(c, op->csum_type,
1025 extent_nonce(version, crc), dst);
1029 crc.csum = bch2_checksum_bio(c, op->csum_type,
1030 extent_nonce(version, crc), dst);
1031 crc.csum_type = op->csum_type;
1032 swap(dst->bi_iter.bi_size, dst_len);
1035 init_append_extent(op, wp, version, crc);
1038 bio_advance(dst, dst_len);
1039 bio_advance(src, src_len);
1040 total_output += dst_len;
1041 total_input += src_len;
1042 } while (dst->bi_iter.bi_size &&
1043 src->bi_iter.bi_size &&
1045 !bch2_keylist_realloc(&op->insert_keys,
1047 ARRAY_SIZE(op->inline_keys),
1048 BKEY_EXTENT_U64s_MAX));
1050 more = src->bi_iter.bi_size != 0;
1052 dst->bi_iter = saved_iter;
1054 if (dst == src && more) {
1055 BUG_ON(total_output != total_input);
1057 dst = bio_split(src, total_input >> 9,
1058 GFP_NOFS, &c->bio_write);
1059 wbio_init(dst)->put_bio = true;
1060 /* copy WRITE_SYNC flag */
1061 dst->bi_opf = src->bi_opf;
1064 dst->bi_iter.bi_size = total_output;
1069 bch_err(c, "error verifying existing checksum while rewriting existing data (memory corruption?)");
1072 if (to_wbio(dst)->bounce)
1073 bch2_bio_free_pages_pool(c, dst);
1074 if (to_wbio(dst)->put_bio)
1080 static bool bch2_extent_is_writeable(struct bch_write_op *op,
1083 struct bch_fs *c = op->c;
1084 struct bkey_s_c_extent e;
1085 struct extent_ptr_decoded p;
1086 const union bch_extent_entry *entry;
1087 unsigned replicas = 0;
1089 if (k.k->type != KEY_TYPE_extent)
1092 e = bkey_s_c_to_extent(k);
1093 extent_for_each_ptr_decode(e, p, entry) {
1094 if (p.crc.csum_type ||
1095 crc_is_compressed(p.crc) ||
1099 replicas += bch2_extent_ptr_durability(c, &p);
1102 return replicas >= op->opts.data_replicas;
1105 static inline void bch2_nocow_write_unlock(struct bch_write_op *op)
1107 struct bch_fs *c = op->c;
1108 const struct bch_extent_ptr *ptr;
1111 for_each_keylist_key(&op->insert_keys, k) {
1112 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
1114 bkey_for_each_ptr(ptrs, ptr)
1115 bch2_bucket_nocow_unlock(&c->nocow_locks,
1116 PTR_BUCKET_POS(c, ptr),
1117 BUCKET_NOCOW_LOCK_UPDATE);
1121 static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
1122 struct btree_iter *iter,
1123 struct bkey_i *orig,
1128 struct bkey_ptrs ptrs;
1129 struct bch_extent_ptr *ptr;
1132 if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) {
1137 new = bch2_bkey_make_mut_noupdate(trans, k);
1138 ret = PTR_ERR_OR_ZERO(new);
1142 bch2_cut_front(bkey_start_pos(&orig->k), new);
1143 bch2_cut_back(orig->k.p, new);
1145 ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
1146 bkey_for_each_ptr(ptrs, ptr)
1150 * Note that we're not calling bch2_subvol_get_snapshot() in this path -
1151 * that was done when we kicked off the write, and here it's important
1152 * that we update the extent that we wrote to - even if a snapshot has
1153 * since been created. The write is still outstanding, so we're ok
1154 * w.r.t. snapshot atomicity:
1156 return bch2_extent_update_i_size_sectors(trans, iter,
1157 min(new->k.p.offset << 9, new_i_size), 0) ?:
1158 bch2_trans_update(trans, iter, new,
1159 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1162 static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
1164 struct bch_fs *c = op->c;
1165 struct btree_trans *trans = bch2_trans_get(c);
1166 struct btree_iter iter;
1167 struct bkey_i *orig;
1171 for_each_keylist_key(&op->insert_keys, orig) {
1172 ret = for_each_btree_key_upto_commit(trans, iter, BTREE_ID_extents,
1173 bkey_start_pos(&orig->k), orig->k.p,
1174 BTREE_ITER_INTENT, k,
1175 NULL, NULL, BTREE_INSERT_NOFAIL, ({
1176 bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size);
1179 if (ret && !bch2_err_matches(ret, EROFS)) {
1180 struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
1182 bch_err_inum_offset_ratelimited(c,
1183 insert->k.p.inode, insert->k.p.offset << 9,
1184 "write error while doing btree update: %s",
1194 bch2_trans_put(trans);
1197 static void __bch2_nocow_write_done(struct bch_write_op *op)
1199 bch2_nocow_write_unlock(op);
1201 if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
1203 } else if (unlikely(op->flags & BCH_WRITE_CONVERT_UNWRITTEN))
1204 bch2_nocow_write_convert_unwritten(op);
1207 static void bch2_nocow_write_done(struct closure *cl)
1209 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
1211 __bch2_nocow_write_done(op);
1212 bch2_write_done(cl);
1215 static void bch2_nocow_write(struct bch_write_op *op)
1217 struct bch_fs *c = op->c;
1218 struct btree_trans *trans;
1219 struct btree_iter iter;
1221 struct bkey_ptrs_c ptrs;
1222 const struct bch_extent_ptr *ptr;
1226 struct nocow_lock_bucket *l;
1227 } buckets[BCH_REPLICAS_MAX];
1228 unsigned nr_buckets = 0;
1232 if (op->flags & BCH_WRITE_MOVE)
1235 trans = bch2_trans_get(c);
1237 bch2_trans_begin(trans);
1239 ret = bch2_subvolume_get_snapshot(trans, op->subvol, &snapshot);
1243 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1244 SPOS(op->pos.inode, op->pos.offset, snapshot),
1247 struct bio *bio = &op->wbio.bio;
1251 k = bch2_btree_iter_peek_slot(&iter);
1256 /* fall back to normal cow write path? */
1257 if (unlikely(k.k->p.snapshot != snapshot ||
1258 !bch2_extent_is_writeable(op, k)))
1261 if (bch2_keylist_realloc(&op->insert_keys,
1263 ARRAY_SIZE(op->inline_keys),
1267 /* Get iorefs before dropping btree locks: */
1268 ptrs = bch2_bkey_ptrs_c(k);
1269 bkey_for_each_ptr(ptrs, ptr) {
1270 buckets[nr_buckets].b = PTR_BUCKET_POS(c, ptr);
1271 buckets[nr_buckets].gen = ptr->gen;
1272 buckets[nr_buckets].l =
1273 bucket_nocow_lock(&c->nocow_locks,
1274 bucket_to_u64(buckets[nr_buckets].b));
1276 prefetch(buckets[nr_buckets].l);
1278 if (unlikely(!bch2_dev_get_ioref(bch_dev_bkey_exists(c, ptr->dev), WRITE)))
1284 op->flags |= BCH_WRITE_CONVERT_UNWRITTEN;
1287 /* Unlock before taking nocow locks, doing IO: */
1288 bkey_reassemble(op->insert_keys.top, k);
1289 bch2_trans_unlock(trans);
1291 bch2_cut_front(op->pos, op->insert_keys.top);
1292 if (op->flags & BCH_WRITE_CONVERT_UNWRITTEN)
1293 bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
1295 for (i = 0; i < nr_buckets; i++) {
1296 struct bch_dev *ca = bch_dev_bkey_exists(c, buckets[i].b.inode);
1297 struct nocow_lock_bucket *l = buckets[i].l;
1300 __bch2_bucket_nocow_lock(&c->nocow_locks, l,
1301 bucket_to_u64(buckets[i].b),
1302 BUCKET_NOCOW_LOCK_UPDATE);
1305 stale = gen_after(*bucket_gen(ca, buckets[i].b.offset), buckets[i].gen);
1308 if (unlikely(stale))
1309 goto err_bucket_stale;
1312 bio = &op->wbio.bio;
1313 if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) {
1314 bio = bio_split(bio, k.k->p.offset - op->pos.offset,
1315 GFP_KERNEL, &c->bio_write);
1316 wbio_init(bio)->put_bio = true;
1317 bio->bi_opf = op->wbio.bio.bi_opf;
1319 op->flags |= BCH_WRITE_DONE;
1322 op->pos.offset += bio_sectors(bio);
1323 op->written += bio_sectors(bio);
1325 bio->bi_end_io = bch2_write_endio;
1326 bio->bi_private = &op->cl;
1327 bio->bi_opf |= REQ_OP_WRITE;
1328 closure_get(&op->cl);
1329 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1330 op->insert_keys.top, true);
1332 bch2_keylist_push(&op->insert_keys);
1333 if (op->flags & BCH_WRITE_DONE)
1335 bch2_btree_iter_advance(&iter);
1338 bch2_trans_iter_exit(trans, &iter);
1340 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1344 bch_err_inum_offset_ratelimited(c,
1346 op->pos.offset << 9,
1347 "%s: btree lookup error %s",
1348 __func__, bch2_err_str(ret));
1350 op->flags |= BCH_WRITE_DONE;
1353 bch2_trans_put(trans);
1355 /* fallback to cow write path? */
1356 if (!(op->flags & BCH_WRITE_DONE)) {
1357 closure_sync(&op->cl);
1358 __bch2_nocow_write_done(op);
1359 op->insert_keys.top = op->insert_keys.keys;
1360 } else if (op->flags & BCH_WRITE_SYNC) {
1361 closure_sync(&op->cl);
1362 bch2_nocow_write_done(&op->cl);
1366 * needs to run out of process context because ei_quota_lock is
1369 continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op));
1373 for (i = 0; i < nr_buckets; i++)
1374 percpu_ref_put(&bch_dev_bkey_exists(c, buckets[i].b.inode)->io_ref);
1376 /* Fall back to COW path: */
1380 bch2_bucket_nocow_unlock(&c->nocow_locks,
1382 BUCKET_NOCOW_LOCK_UPDATE);
1383 for (i = 0; i < nr_buckets; i++)
1384 percpu_ref_put(&bch_dev_bkey_exists(c, buckets[i].b.inode)->io_ref);
1386 /* We can retry this: */
1387 ret = -BCH_ERR_transaction_restart;
1391 static void __bch2_write(struct bch_write_op *op)
1393 struct bch_fs *c = op->c;
1394 struct write_point *wp = NULL;
1395 struct bio *bio = NULL;
1396 unsigned nofs_flags;
1399 nofs_flags = memalloc_nofs_save();
1401 if (unlikely(op->opts.nocow && c->opts.nocow_enabled)) {
1402 bch2_nocow_write(op);
1403 if (op->flags & BCH_WRITE_DONE)
1404 goto out_nofs_restore;
1407 memset(&op->failed, 0, sizeof(op->failed));
1410 struct bkey_i *key_to_write;
1411 unsigned key_to_write_offset = op->insert_keys.top_p -
1412 op->insert_keys.keys_p;
1414 /* +1 for possible cache device: */
1415 if (op->open_buckets.nr + op->nr_replicas + 1 >
1416 ARRAY_SIZE(op->open_buckets.v))
1419 if (bch2_keylist_realloc(&op->insert_keys,
1421 ARRAY_SIZE(op->inline_keys),
1422 BKEY_EXTENT_U64s_MAX))
1426 * The copygc thread is now global, which means it's no longer
1427 * freeing up space on specific disks, which means that
1428 * allocations for specific disks may hang arbitrarily long:
1430 ret = bch2_trans_do(c, NULL, NULL, 0,
1431 bch2_alloc_sectors_start_trans(trans,
1433 op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
1437 op->nr_replicas_required,
1440 (op->flags & (BCH_WRITE_ALLOC_NOWAIT|
1441 BCH_WRITE_ONLY_SPECIFIED_DEVS))
1442 ? NULL : &op->cl, &wp));
1443 if (unlikely(ret)) {
1444 if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
1452 bch2_open_bucket_get(c, wp, &op->open_buckets);
1453 ret = bch2_write_extent(op, wp, &bio);
1455 bch2_alloc_sectors_done_inlined(c, wp);
1458 op->flags |= BCH_WRITE_DONE;
1466 bio->bi_end_io = bch2_write_endio;
1467 bio->bi_private = &op->cl;
1468 bio->bi_opf |= REQ_OP_WRITE;
1470 closure_get(bio->bi_private);
1472 key_to_write = (void *) (op->insert_keys.keys_p +
1473 key_to_write_offset);
1475 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1476 key_to_write, false);
1482 * If we're running asynchronously, wne may still want to block
1483 * synchronously here if we weren't able to submit all of the IO at
1484 * once, as that signals backpressure to the caller.
1486 if ((op->flags & BCH_WRITE_SYNC) ||
1487 (!(op->flags & BCH_WRITE_DONE) &&
1488 !(op->flags & BCH_WRITE_IN_WORKER))) {
1489 closure_sync(&op->cl);
1490 __bch2_write_index(op);
1492 if (!(op->flags & BCH_WRITE_DONE))
1494 bch2_write_done(&op->cl);
1496 bch2_write_queue(op, wp);
1497 continue_at(&op->cl, bch2_write_index, NULL);
1500 memalloc_nofs_restore(nofs_flags);
1503 static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
1505 struct bio *bio = &op->wbio.bio;
1506 struct bvec_iter iter;
1507 struct bkey_i_inline_data *id;
1511 op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
1512 op->flags |= BCH_WRITE_DONE;
1514 bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);
1516 ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
1517 ARRAY_SIZE(op->inline_keys),
1518 BKEY_U64s + DIV_ROUND_UP(data_len, 8));
1524 sectors = bio_sectors(bio);
1525 op->pos.offset += sectors;
1527 id = bkey_inline_data_init(op->insert_keys.top);
1529 id->k.version = op->version;
1530 id->k.size = sectors;
1532 iter = bio->bi_iter;
1533 iter.bi_size = data_len;
1534 memcpy_from_bio(id->v.data, bio, iter);
1536 while (data_len & 7)
1537 id->v.data[data_len++] = '\0';
1538 set_bkey_val_bytes(&id->k, data_len);
1539 bch2_keylist_push(&op->insert_keys);
1541 __bch2_write_index(op);
1543 bch2_write_done(&op->cl);
1547 * bch2_write() - handle a write to a cache device or flash only volume
1548 * @cl: &bch_write_op->cl
1550 * This is the starting point for any data to end up in a cache device; it could
1551 * be from a normal write, or a writeback write, or a write to a flash only
1552 * volume - it's also used by the moving garbage collector to compact data in
1553 * mostly empty buckets.
1555 * It first writes the data to the cache, creating a list of keys to be inserted
1556 * (if the data won't fit in a single open bucket, there will be multiple keys);
1557 * after the data is written it calls bch_journal, and after the keys have been
1558 * added to the next journal write they're inserted into the btree.
1560 * If op->discard is true, instead of inserting the data it invalidates the
1561 * region of the cache represented by op->bio and op->inode.
1563 void bch2_write(struct closure *cl)
1565 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
1566 struct bio *bio = &op->wbio.bio;
1567 struct bch_fs *c = op->c;
1570 EBUG_ON(op->cl.parent);
1571 BUG_ON(!op->nr_replicas);
1572 BUG_ON(!op->write_point.v);
1573 BUG_ON(bkey_eq(op->pos, POS_MAX));
1575 op->start_time = local_clock();
1576 bch2_keylist_init(&op->insert_keys, op->inline_keys);
1577 wbio_init(bio)->put_bio = false;
1579 if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) {
1580 bch_err_inum_offset_ratelimited(c,
1582 op->pos.offset << 9,
1583 "misaligned write");
1588 if (c->opts.nochanges) {
1589 op->error = -BCH_ERR_erofs_no_writes;
1593 if (!(op->flags & BCH_WRITE_MOVE) &&
1594 !bch2_write_ref_tryget(c, BCH_WRITE_REF_write)) {
1595 op->error = -BCH_ERR_erofs_no_writes;
1599 this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
1600 bch2_increment_clock(c, bio_sectors(bio), WRITE);
1602 data_len = min_t(u64, bio->bi_iter.bi_size,
1603 op->new_i_size - (op->pos.offset << 9));
1605 if (c->opts.inline_data &&
1606 data_len <= min(block_bytes(c) / 2, 1024U)) {
1607 bch2_write_data_inline(op, data_len);
1614 bch2_disk_reservation_put(c, &op->res);
1616 closure_debug_destroy(&op->cl);
1621 static const char * const bch2_write_flags[] = {
1628 void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
1630 prt_str(out, "pos: ");
1631 bch2_bpos_to_text(out, op->pos);
1633 printbuf_indent_add(out, 2);
1635 prt_str(out, "started: ");
1636 bch2_pr_time_units(out, local_clock() - op->start_time);
1639 prt_str(out, "flags: ");
1640 prt_bitflags(out, bch2_write_flags, op->flags);
1643 prt_printf(out, "ref: %u", closure_nr_remaining(&op->cl));
1646 printbuf_indent_sub(out, 2);
1649 void bch2_fs_io_write_exit(struct bch_fs *c)
1651 mempool_exit(&c->bio_bounce_pages);
1652 bioset_exit(&c->bio_write);
1655 int bch2_fs_io_write_init(struct bch_fs *c)
1657 if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio),
1659 return -BCH_ERR_ENOMEM_bio_write_init;
1661 if (mempool_init_page_pool(&c->bio_bounce_pages,
1663 c->opts.btree_node_size,
1664 c->opts.encoded_extent_max) /
1666 return -BCH_ERR_ENOMEM_bio_bounce_pages_init;