1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
4 * Copyright 2012 Google, Inc.
8 #include "alloc_foreground.h"
11 #include "btree_update.h"
19 #include "extent_update.h"
25 #include "nocow_locking.h"
26 #include "rebalance.h"
27 #include "subvolume.h"
32 #include <linux/blkdev.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/sched/mm.h>
37 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
39 static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
43 ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
44 /* ideally we'd be taking into account the device's variance here: */
45 u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
46 s64 latency_over = io_latency - latency_threshold;
48 if (latency_threshold && latency_over > 0) {
50 * bump up congested by approximately latency_over * 4 /
51 * latency_threshold - we don't need much accuracy here so don't
52 * bother with the divide:
54 if (atomic_read(&ca->congested) < CONGESTED_MAX)
55 atomic_add(latency_over >>
56 max_t(int, ilog2(latency_threshold) - 2, 0),
59 ca->congested_last = now;
60 } else if (atomic_read(&ca->congested) > 0) {
61 atomic_dec(&ca->congested);
65 void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
67 atomic64_t *latency = &ca->cur_latency[rw];
68 u64 now = local_clock();
69 u64 io_latency = time_after64(now, submit_time)
72 u64 old, new, v = atomic64_read(latency);
78 * If the io latency was reasonably close to the current
79 * latency, skip doing the update and atomic operation - most of
82 if (abs((int) (old - io_latency)) < (old >> 1) &&
86 new = ewma_add(old, io_latency, 5);
87 } while ((v = atomic64_cmpxchg(latency, old, new)) != old);
89 bch2_congested_acct(ca, io_latency, now, rw);
91 __bch2_time_stats_update(&ca->io_latency[rw], submit_time, now);
96 /* Allocate, free from mempool: */
98 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
100 struct bvec_iter_all iter;
103 bio_for_each_segment_all(bv, bio, iter)
104 if (bv->bv_page != ZERO_PAGE(0))
105 mempool_free(bv->bv_page, &c->bio_bounce_pages);
109 static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
113 if (likely(!*using_mempool)) {
114 page = alloc_page(GFP_NOFS);
115 if (unlikely(!page)) {
116 mutex_lock(&c->bio_bounce_pages_lock);
117 *using_mempool = true;
123 page = mempool_alloc(&c->bio_bounce_pages, GFP_NOFS);
129 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
132 bool using_mempool = false;
135 struct page *page = __bio_alloc_page_pool(c, &using_mempool);
136 unsigned len = min_t(size_t, PAGE_SIZE, size);
138 BUG_ON(!bio_add_page(bio, page, len, 0));
143 mutex_unlock(&c->bio_bounce_pages_lock);
146 /* Extent update path: */
148 int bch2_sum_sector_overwrites(struct btree_trans *trans,
149 struct btree_iter *extent_iter,
151 bool *usage_increasing,
152 s64 *i_sectors_delta,
153 s64 *disk_sectors_delta)
155 struct bch_fs *c = trans->c;
156 struct btree_iter iter;
158 unsigned new_replicas = bch2_bkey_replicas(c, bkey_i_to_s_c(new));
159 bool new_compressed = bch2_bkey_sectors_compressed(bkey_i_to_s_c(new));
162 *usage_increasing = false;
163 *i_sectors_delta = 0;
164 *disk_sectors_delta = 0;
166 bch2_trans_copy_iter(&iter, extent_iter);
168 for_each_btree_key_upto_continue_norestart(iter,
169 new->k.p, BTREE_ITER_SLOTS, old, ret) {
170 s64 sectors = min(new->k.p.offset, old.k->p.offset) -
171 max(bkey_start_offset(&new->k),
172 bkey_start_offset(old.k));
174 *i_sectors_delta += sectors *
175 (bkey_extent_is_allocation(&new->k) -
176 bkey_extent_is_allocation(old.k));
178 *disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new));
179 *disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot
180 ? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
183 if (!*usage_increasing &&
184 (new->k.p.snapshot != old.k->p.snapshot ||
185 new_replicas > bch2_bkey_replicas(c, old) ||
186 (!new_compressed && bch2_bkey_sectors_compressed(old))))
187 *usage_increasing = true;
189 if (bkey_ge(old.k->p, new->k.p))
193 bch2_trans_iter_exit(trans, &iter);
197 static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
198 struct btree_iter *extent_iter,
202 struct btree_iter iter;
204 struct bkey_i_inode_v3 *inode;
205 unsigned inode_update_flags = BTREE_UPDATE_NOJOURNAL;
208 k = bch2_bkey_get_mut_noupdate(trans, &iter, BTREE_ID_inodes,
210 extent_iter->pos.inode,
211 extent_iter->snapshot),
213 ret = PTR_ERR_OR_ZERO(k);
217 if (unlikely(k->k.type != KEY_TYPE_inode_v3)) {
218 k = bch2_inode_to_v3(trans, k);
219 ret = PTR_ERR_OR_ZERO(k);
224 inode = bkey_i_to_inode_v3(k);
226 if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_i_size_dirty) &&
227 new_i_size > le64_to_cpu(inode->v.bi_size)) {
228 inode->v.bi_size = cpu_to_le64(new_i_size);
229 inode_update_flags = 0;
232 if (i_sectors_delta) {
233 le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta);
234 inode_update_flags = 0;
237 if (inode->k.p.snapshot != iter.snapshot) {
238 inode->k.p.snapshot = iter.snapshot;
239 inode_update_flags = 0;
242 ret = bch2_trans_update(trans, &iter, &inode->k_i,
243 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
246 bch2_trans_iter_exit(trans, &iter);
250 int bch2_extent_update(struct btree_trans *trans,
252 struct btree_iter *iter,
254 struct disk_reservation *disk_res,
256 s64 *i_sectors_delta_total,
259 struct bpos next_pos;
260 bool usage_increasing;
261 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
265 * This traverses us the iterator without changing iter->path->pos to
266 * search_key() (which is pos + 1 for extents): we want there to be a
267 * path already traversed at iter->pos because
268 * bch2_trans_extent_update() will use it to attempt extent merging
270 ret = __bch2_btree_iter_traverse(iter);
274 ret = bch2_extent_trim_atomic(trans, iter, k);
280 ret = bch2_sum_sector_overwrites(trans, iter, k,
283 &disk_sectors_delta);
288 disk_sectors_delta > (s64) disk_res->sectors) {
289 ret = bch2_disk_reservation_add(trans->c, disk_res,
290 disk_sectors_delta - disk_res->sectors,
291 !check_enospc || !usage_increasing
292 ? BCH_DISK_RESERVATION_NOFAIL : 0);
299 * We always have to do an inode update - even when i_size/i_sectors
300 * aren't changing - for fsync to work properly; fsync relies on
301 * inode->bi_journal_seq which is updated by the trigger code:
303 ret = bch2_extent_update_i_size_sectors(trans, iter,
304 min(k->k.p.offset << 9, new_i_size),
306 bch2_trans_update(trans, iter, k, 0) ?:
307 bch2_trans_commit(trans, disk_res, NULL,
308 BTREE_INSERT_NOCHECK_RW|
309 BTREE_INSERT_NOFAIL);
313 if (i_sectors_delta_total)
314 *i_sectors_delta_total += i_sectors_delta;
315 bch2_btree_iter_set_pos(iter, next_pos);
319 static int bch2_write_index_default(struct bch_write_op *op)
321 struct bch_fs *c = op->c;
323 struct keylist *keys = &op->insert_keys;
324 struct bkey_i *k = bch2_keylist_front(keys);
325 struct btree_trans *trans = bch2_trans_get(c);
326 struct btree_iter iter;
328 .subvol = op->subvol,
329 .inum = k->k.p.inode,
333 BUG_ON(!inum.subvol);
335 bch2_bkey_buf_init(&sk);
338 bch2_trans_begin(trans);
340 k = bch2_keylist_front(keys);
341 bch2_bkey_buf_copy(&sk, c, k);
343 ret = bch2_subvolume_get_snapshot(trans, inum.subvol,
344 &sk.k->k.p.snapshot);
345 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
350 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
351 bkey_start_pos(&sk.k->k),
352 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
354 ret = bch2_bkey_set_needs_rebalance(c, sk.k,
355 op->opts.background_target,
356 op->opts.background_compression) ?:
357 bch2_extent_update(trans, inum, &iter, sk.k,
359 op->new_i_size, &op->i_sectors_delta,
360 op->flags & BCH_WRITE_CHECK_ENOSPC);
361 bch2_trans_iter_exit(trans, &iter);
363 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
368 if (bkey_ge(iter.pos, k->k.p))
369 bch2_keylist_pop_front(&op->insert_keys);
371 bch2_cut_front(iter.pos, k);
372 } while (!bch2_keylist_empty(keys));
374 bch2_trans_put(trans);
375 bch2_bkey_buf_exit(&sk, c);
382 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
383 enum bch_data_type type,
384 const struct bkey_i *k,
387 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
388 const struct bch_extent_ptr *ptr;
389 struct bch_write_bio *n;
392 BUG_ON(c->opts.nochanges);
394 bkey_for_each_ptr(ptrs, ptr) {
395 BUG_ON(ptr->dev >= BCH_SB_MEMBERS_MAX ||
398 ca = bch_dev_bkey_exists(c, ptr->dev);
400 if (to_entry(ptr + 1) < ptrs.end) {
401 n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
402 GFP_NOFS, &ca->replica_set));
404 n->bio.bi_end_io = wbio->bio.bi_end_io;
405 n->bio.bi_private = wbio->bio.bi_private;
410 n->bio.bi_opf = wbio->bio.bi_opf;
411 bio_inc_remaining(&wbio->bio);
419 n->have_ioref = nocow || bch2_dev_get_ioref(ca,
420 type == BCH_DATA_btree ? READ : WRITE);
422 n->submit_time = local_clock();
423 n->inode_offset = bkey_start_offset(&k->k);
424 n->bio.bi_iter.bi_sector = ptr->offset;
426 if (likely(n->have_ioref)) {
427 this_cpu_add(ca->io_done->sectors[WRITE][type],
428 bio_sectors(&n->bio));
430 bio_set_dev(&n->bio, ca->disk_sb.bdev);
432 if (type != BCH_DATA_btree && unlikely(c->opts.no_data_io)) {
439 n->bio.bi_status = BLK_STS_REMOVED;
445 static void __bch2_write(struct bch_write_op *);
447 static void bch2_write_done(struct closure *cl)
449 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
450 struct bch_fs *c = op->c;
452 EBUG_ON(op->open_buckets.nr);
454 bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
455 bch2_disk_reservation_put(c, &op->res);
457 if (!(op->flags & BCH_WRITE_MOVE))
458 bch2_write_ref_put(c, BCH_WRITE_REF_write);
459 bch2_keylist_free(&op->insert_keys, op->inline_keys);
462 closure_debug_destroy(cl);
467 static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
469 struct keylist *keys = &op->insert_keys;
470 struct bch_extent_ptr *ptr;
471 struct bkey_i *src, *dst = keys->keys, *n;
473 for (src = keys->keys; src != keys->top; src = n) {
476 if (bkey_extent_is_direct_data(&src->k)) {
477 bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
478 test_bit(ptr->dev, op->failed.d));
480 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src)))
485 memmove_u64s_down(dst, src, src->k.u64s);
486 dst = bkey_next(dst);
494 * __bch2_write_index - after a write, update index to point to new data
495 * @op: bch_write_op to process
497 static void __bch2_write_index(struct bch_write_op *op)
499 struct bch_fs *c = op->c;
500 struct keylist *keys = &op->insert_keys;
504 if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
505 ret = bch2_write_drop_io_error_ptrs(op);
510 if (!bch2_keylist_empty(keys)) {
511 u64 sectors_start = keylist_sectors(keys);
513 ret = !(op->flags & BCH_WRITE_MOVE)
514 ? bch2_write_index_default(op)
515 : bch2_data_update_index_update(op);
517 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
518 BUG_ON(keylist_sectors(keys) && !ret);
520 op->written += sectors_start - keylist_sectors(keys);
522 if (ret && !bch2_err_matches(ret, EROFS)) {
523 struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
525 bch_err_inum_offset_ratelimited(c,
526 insert->k.p.inode, insert->k.p.offset << 9,
527 "write error while doing btree update: %s",
535 /* If some a bucket wasn't written, we can't erasure code it: */
536 for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
537 bch2_open_bucket_write_error(c, &op->open_buckets, dev);
539 bch2_open_buckets_put(c, &op->open_buckets);
542 keys->top = keys->keys;
544 op->flags |= BCH_WRITE_DONE;
548 static inline void __wp_update_state(struct write_point *wp, enum write_point_state state)
550 if (state != wp->state) {
551 u64 now = ktime_get_ns();
553 if (wp->last_state_change &&
554 time_after64(now, wp->last_state_change))
555 wp->time[wp->state] += now - wp->last_state_change;
557 wp->last_state_change = now;
561 static inline void wp_update_state(struct write_point *wp, bool running)
563 enum write_point_state state;
565 state = running ? WRITE_POINT_running :
566 !list_empty(&wp->writes) ? WRITE_POINT_waiting_io
567 : WRITE_POINT_stopped;
569 __wp_update_state(wp, state);
572 static void bch2_write_index(struct closure *cl)
574 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
575 struct write_point *wp = op->wp;
576 struct workqueue_struct *wq = index_update_wq(op);
579 if ((op->flags & BCH_WRITE_DONE) &&
580 (op->flags & BCH_WRITE_MOVE))
581 bch2_bio_free_pages_pool(op->c, &op->wbio.bio);
583 spin_lock_irqsave(&wp->writes_lock, flags);
584 if (wp->state == WRITE_POINT_waiting_io)
585 __wp_update_state(wp, WRITE_POINT_waiting_work);
586 list_add_tail(&op->wp_list, &wp->writes);
587 spin_unlock_irqrestore (&wp->writes_lock, flags);
589 queue_work(wq, &wp->index_update_work);
592 static inline void bch2_write_queue(struct bch_write_op *op, struct write_point *wp)
596 if (wp->state == WRITE_POINT_stopped) {
597 spin_lock_irq(&wp->writes_lock);
598 __wp_update_state(wp, WRITE_POINT_waiting_io);
599 spin_unlock_irq(&wp->writes_lock);
603 void bch2_write_point_do_index_updates(struct work_struct *work)
605 struct write_point *wp =
606 container_of(work, struct write_point, index_update_work);
607 struct bch_write_op *op;
610 spin_lock_irq(&wp->writes_lock);
611 op = list_first_entry_or_null(&wp->writes, struct bch_write_op, wp_list);
613 list_del(&op->wp_list);
614 wp_update_state(wp, op != NULL);
615 spin_unlock_irq(&wp->writes_lock);
620 op->flags |= BCH_WRITE_IN_WORKER;
622 __bch2_write_index(op);
624 if (!(op->flags & BCH_WRITE_DONE))
627 bch2_write_done(&op->cl);
631 static void bch2_write_endio(struct bio *bio)
633 struct closure *cl = bio->bi_private;
634 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
635 struct bch_write_bio *wbio = to_wbio(bio);
636 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
637 struct bch_fs *c = wbio->c;
638 struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
640 if (bch2_dev_inum_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
642 wbio->inode_offset << 9,
643 "data write error: %s",
644 bch2_blk_status_to_str(bio->bi_status))) {
645 set_bit(wbio->dev, op->failed.d);
646 op->flags |= BCH_WRITE_IO_ERROR;
650 set_bit(wbio->dev, op->devs_need_flush->d);
652 if (wbio->have_ioref) {
653 bch2_latency_acct(ca, wbio->submit_time, WRITE);
654 percpu_ref_put(&ca->io_ref);
658 bch2_bio_free_pages_pool(c, bio);
664 bio_endio(&parent->bio);
669 static void init_append_extent(struct bch_write_op *op,
670 struct write_point *wp,
671 struct bversion version,
672 struct bch_extent_crc_unpacked crc)
674 struct bkey_i_extent *e;
676 op->pos.offset += crc.uncompressed_size;
678 e = bkey_extent_init(op->insert_keys.top);
680 e->k.size = crc.uncompressed_size;
681 e->k.version = version;
684 crc.compression_type ||
686 bch2_extent_crc_append(&e->k_i, crc);
688 bch2_alloc_sectors_append_ptrs_inlined(op->c, wp, &e->k_i, crc.compressed_size,
689 op->flags & BCH_WRITE_CACHED);
691 bch2_keylist_push(&op->insert_keys);
694 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
695 struct write_point *wp,
697 bool *page_alloc_failed,
700 struct bch_write_bio *wbio;
702 unsigned output_available =
703 min(wp->sectors_free << 9, src->bi_iter.bi_size);
704 unsigned pages = DIV_ROUND_UP(output_available +
706 ? ((unsigned long) buf & (PAGE_SIZE - 1))
709 pages = min(pages, BIO_MAX_VECS);
711 bio = bio_alloc_bioset(NULL, pages, 0,
712 GFP_NOFS, &c->bio_write);
713 wbio = wbio_init(bio);
714 wbio->put_bio = true;
715 /* copy WRITE_SYNC flag */
716 wbio->bio.bi_opf = src->bi_opf;
719 bch2_bio_map(bio, buf, output_available);
726 * We can't use mempool for more than c->sb.encoded_extent_max
727 * worth of pages, but we'd like to allocate more if we can:
729 bch2_bio_alloc_pages_pool(c, bio,
730 min_t(unsigned, output_available,
731 c->opts.encoded_extent_max));
733 if (bio->bi_iter.bi_size < output_available)
735 bch2_bio_alloc_pages(bio,
737 bio->bi_iter.bi_size,
743 static int bch2_write_rechecksum(struct bch_fs *c,
744 struct bch_write_op *op,
745 unsigned new_csum_type)
747 struct bio *bio = &op->wbio.bio;
748 struct bch_extent_crc_unpacked new_crc;
751 /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
753 if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
754 bch2_csum_type_is_encryption(new_csum_type))
755 new_csum_type = op->crc.csum_type;
757 ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
759 op->crc.offset, op->crc.live_size,
764 bio_advance(bio, op->crc.offset << 9);
765 bio->bi_iter.bi_size = op->crc.live_size << 9;
770 static int bch2_write_decrypt(struct bch_write_op *op)
772 struct bch_fs *c = op->c;
773 struct nonce nonce = extent_nonce(op->version, op->crc);
774 struct bch_csum csum;
777 if (!bch2_csum_type_is_encryption(op->crc.csum_type))
781 * If we need to decrypt data in the write path, we'll no longer be able
782 * to verify the existing checksum (poly1305 mac, in this case) after
783 * it's decrypted - this is the last point we'll be able to reverify the
786 csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
787 if (bch2_crc_cmp(op->crc.csum, csum))
790 ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
791 op->crc.csum_type = 0;
792 op->crc.csum = (struct bch_csum) { 0, 0 };
796 static enum prep_encoded_ret {
799 PREP_ENCODED_CHECKSUM_ERR,
800 PREP_ENCODED_DO_WRITE,
801 } bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
803 struct bch_fs *c = op->c;
804 struct bio *bio = &op->wbio.bio;
806 if (!(op->flags & BCH_WRITE_DATA_ENCODED))
807 return PREP_ENCODED_OK;
809 BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
811 /* Can we just write the entire extent as is? */
812 if (op->crc.uncompressed_size == op->crc.live_size &&
813 op->crc.uncompressed_size <= c->opts.encoded_extent_max >> 9 &&
814 op->crc.compressed_size <= wp->sectors_free &&
815 (op->crc.compression_type == bch2_compression_opt_to_type(op->compression_opt) ||
816 op->incompressible)) {
817 if (!crc_is_compressed(op->crc) &&
818 op->csum_type != op->crc.csum_type &&
819 bch2_write_rechecksum(c, op, op->csum_type) &&
821 return PREP_ENCODED_CHECKSUM_ERR;
823 return PREP_ENCODED_DO_WRITE;
827 * If the data is compressed and we couldn't write the entire extent as
828 * is, we have to decompress it:
830 if (crc_is_compressed(op->crc)) {
831 struct bch_csum csum;
833 if (bch2_write_decrypt(op))
834 return PREP_ENCODED_CHECKSUM_ERR;
836 /* Last point we can still verify checksum: */
837 csum = bch2_checksum_bio(c, op->crc.csum_type,
838 extent_nonce(op->version, op->crc),
840 if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
841 return PREP_ENCODED_CHECKSUM_ERR;
843 if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
844 return PREP_ENCODED_ERR;
848 * No longer have compressed data after this point - data might be
853 * If the data is checksummed and we're only writing a subset,
854 * rechecksum and adjust bio to point to currently live data:
856 if ((op->crc.live_size != op->crc.uncompressed_size ||
857 op->crc.csum_type != op->csum_type) &&
858 bch2_write_rechecksum(c, op, op->csum_type) &&
860 return PREP_ENCODED_CHECKSUM_ERR;
863 * If we want to compress the data, it has to be decrypted:
865 if ((op->compression_opt ||
866 bch2_csum_type_is_encryption(op->crc.csum_type) !=
867 bch2_csum_type_is_encryption(op->csum_type)) &&
868 bch2_write_decrypt(op))
869 return PREP_ENCODED_CHECKSUM_ERR;
871 return PREP_ENCODED_OK;
874 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
877 struct bch_fs *c = op->c;
878 struct bio *src = &op->wbio.bio, *dst = src;
879 struct bvec_iter saved_iter;
881 unsigned total_output = 0, total_input = 0;
883 bool page_alloc_failed = false;
886 BUG_ON(!bio_sectors(src));
888 ec_buf = bch2_writepoint_ec_buf(c, wp);
890 switch (bch2_write_prep_encoded_data(op, wp)) {
891 case PREP_ENCODED_OK:
893 case PREP_ENCODED_ERR:
896 case PREP_ENCODED_CHECKSUM_ERR:
898 case PREP_ENCODED_DO_WRITE:
899 /* XXX look for bug here */
901 dst = bch2_write_bio_alloc(c, wp, src,
904 bio_copy_data(dst, src);
907 init_append_extent(op, wp, op->version, op->crc);
912 op->compression_opt ||
914 !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
915 (bch2_csum_type_is_encryption(op->csum_type) &&
916 !(op->flags & BCH_WRITE_PAGES_OWNED))) {
917 dst = bch2_write_bio_alloc(c, wp, src,
923 saved_iter = dst->bi_iter;
926 struct bch_extent_crc_unpacked crc = { 0 };
927 struct bversion version = op->version;
928 size_t dst_len = 0, src_len = 0;
930 if (page_alloc_failed &&
931 dst->bi_iter.bi_size < (wp->sectors_free << 9) &&
932 dst->bi_iter.bi_size < c->opts.encoded_extent_max)
935 BUG_ON(op->compression_opt &&
936 (op->flags & BCH_WRITE_DATA_ENCODED) &&
937 bch2_csum_type_is_encryption(op->crc.csum_type));
938 BUG_ON(op->compression_opt && !bounce);
940 crc.compression_type = op->incompressible
941 ? BCH_COMPRESSION_TYPE_incompressible
942 : op->compression_opt
943 ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
946 if (!crc_is_compressed(crc)) {
947 dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
948 dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
951 dst_len = min_t(unsigned, dst_len,
952 c->opts.encoded_extent_max);
955 swap(dst->bi_iter.bi_size, dst_len);
956 bio_copy_data(dst, src);
957 swap(dst->bi_iter.bi_size, dst_len);
963 BUG_ON(!src_len || !dst_len);
965 if (bch2_csum_type_is_encryption(op->csum_type)) {
966 if (bversion_zero(version)) {
967 version.lo = atomic64_inc_return(&c->key_version);
969 crc.nonce = op->nonce;
970 op->nonce += src_len >> 9;
974 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
975 !crc_is_compressed(crc) &&
976 bch2_csum_type_is_encryption(op->crc.csum_type) ==
977 bch2_csum_type_is_encryption(op->csum_type)) {
978 u8 compression_type = crc.compression_type;
979 u16 nonce = crc.nonce;
981 * Note: when we're using rechecksum(), we need to be
982 * checksumming @src because it has all the data our
983 * existing checksum covers - if we bounced (because we
984 * were trying to compress), @dst will only have the
985 * part of the data the new checksum will cover.
987 * But normally we want to be checksumming post bounce,
988 * because part of the reason for bouncing is so the
989 * data can't be modified (by userspace) while it's in
992 if (bch2_rechecksum_bio(c, src, version, op->crc,
995 bio_sectors(src) - (src_len >> 9),
999 * rchecksum_bio sets compression_type on crc from op->crc,
1000 * this isn't always correct as sometimes we're changing
1001 * an extent from uncompressed to incompressible.
1003 crc.compression_type = compression_type;
1006 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
1007 bch2_rechecksum_bio(c, src, version, op->crc,
1010 bio_sectors(src) - (src_len >> 9),
1014 crc.compressed_size = dst_len >> 9;
1015 crc.uncompressed_size = src_len >> 9;
1016 crc.live_size = src_len >> 9;
1018 swap(dst->bi_iter.bi_size, dst_len);
1019 ret = bch2_encrypt_bio(c, op->csum_type,
1020 extent_nonce(version, crc), dst);
1024 crc.csum = bch2_checksum_bio(c, op->csum_type,
1025 extent_nonce(version, crc), dst);
1026 crc.csum_type = op->csum_type;
1027 swap(dst->bi_iter.bi_size, dst_len);
1030 init_append_extent(op, wp, version, crc);
1033 bio_advance(dst, dst_len);
1034 bio_advance(src, src_len);
1035 total_output += dst_len;
1036 total_input += src_len;
1037 } while (dst->bi_iter.bi_size &&
1038 src->bi_iter.bi_size &&
1040 !bch2_keylist_realloc(&op->insert_keys,
1042 ARRAY_SIZE(op->inline_keys),
1043 BKEY_EXTENT_U64s_MAX));
1045 more = src->bi_iter.bi_size != 0;
1047 dst->bi_iter = saved_iter;
1049 if (dst == src && more) {
1050 BUG_ON(total_output != total_input);
1052 dst = bio_split(src, total_input >> 9,
1053 GFP_NOFS, &c->bio_write);
1054 wbio_init(dst)->put_bio = true;
1055 /* copy WRITE_SYNC flag */
1056 dst->bi_opf = src->bi_opf;
1059 dst->bi_iter.bi_size = total_output;
1064 bch_err(c, "error verifying existing checksum while rewriting existing data (memory corruption?)");
1067 if (to_wbio(dst)->bounce)
1068 bch2_bio_free_pages_pool(c, dst);
1069 if (to_wbio(dst)->put_bio)
1075 static bool bch2_extent_is_writeable(struct bch_write_op *op,
1078 struct bch_fs *c = op->c;
1079 struct bkey_s_c_extent e;
1080 struct extent_ptr_decoded p;
1081 const union bch_extent_entry *entry;
1082 unsigned replicas = 0;
1084 if (k.k->type != KEY_TYPE_extent)
1087 e = bkey_s_c_to_extent(k);
1088 extent_for_each_ptr_decode(e, p, entry) {
1089 if (crc_is_encoded(p.crc) || p.has_ec)
1092 replicas += bch2_extent_ptr_durability(c, &p);
1095 return replicas >= op->opts.data_replicas;
1098 static inline void bch2_nocow_write_unlock(struct bch_write_op *op)
1100 struct bch_fs *c = op->c;
1101 const struct bch_extent_ptr *ptr;
1104 for_each_keylist_key(&op->insert_keys, k) {
1105 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
1107 bkey_for_each_ptr(ptrs, ptr)
1108 bch2_bucket_nocow_unlock(&c->nocow_locks,
1109 PTR_BUCKET_POS(c, ptr),
1110 BUCKET_NOCOW_LOCK_UPDATE);
1114 static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
1115 struct btree_iter *iter,
1116 struct bkey_i *orig,
1121 struct bkey_ptrs ptrs;
1122 struct bch_extent_ptr *ptr;
1125 if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) {
1130 new = bch2_bkey_make_mut_noupdate(trans, k);
1131 ret = PTR_ERR_OR_ZERO(new);
1135 bch2_cut_front(bkey_start_pos(&orig->k), new);
1136 bch2_cut_back(orig->k.p, new);
1138 ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
1139 bkey_for_each_ptr(ptrs, ptr)
1143 * Note that we're not calling bch2_subvol_get_snapshot() in this path -
1144 * that was done when we kicked off the write, and here it's important
1145 * that we update the extent that we wrote to - even if a snapshot has
1146 * since been created. The write is still outstanding, so we're ok
1147 * w.r.t. snapshot atomicity:
1149 return bch2_extent_update_i_size_sectors(trans, iter,
1150 min(new->k.p.offset << 9, new_i_size), 0) ?:
1151 bch2_trans_update(trans, iter, new,
1152 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1155 static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
1157 struct bch_fs *c = op->c;
1158 struct btree_trans *trans = bch2_trans_get(c);
1159 struct btree_iter iter;
1160 struct bkey_i *orig;
1164 for_each_keylist_key(&op->insert_keys, orig) {
1165 ret = for_each_btree_key_upto_commit(trans, iter, BTREE_ID_extents,
1166 bkey_start_pos(&orig->k), orig->k.p,
1167 BTREE_ITER_INTENT, k,
1168 NULL, NULL, BTREE_INSERT_NOFAIL, ({
1169 bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size);
1172 if (ret && !bch2_err_matches(ret, EROFS)) {
1173 struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
1175 bch_err_inum_offset_ratelimited(c,
1176 insert->k.p.inode, insert->k.p.offset << 9,
1177 "write error while doing btree update: %s",
1187 bch2_trans_put(trans);
1190 static void __bch2_nocow_write_done(struct bch_write_op *op)
1192 bch2_nocow_write_unlock(op);
1194 if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
1196 } else if (unlikely(op->flags & BCH_WRITE_CONVERT_UNWRITTEN))
1197 bch2_nocow_write_convert_unwritten(op);
1200 static void bch2_nocow_write_done(struct closure *cl)
1202 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
1204 __bch2_nocow_write_done(op);
1205 bch2_write_done(cl);
1208 static void bch2_nocow_write(struct bch_write_op *op)
1210 struct bch_fs *c = op->c;
1211 struct btree_trans *trans;
1212 struct btree_iter iter;
1214 struct bkey_ptrs_c ptrs;
1215 const struct bch_extent_ptr *ptr;
1219 struct nocow_lock_bucket *l;
1220 } buckets[BCH_REPLICAS_MAX];
1221 unsigned nr_buckets = 0;
1225 if (op->flags & BCH_WRITE_MOVE)
1228 trans = bch2_trans_get(c);
1230 bch2_trans_begin(trans);
1232 ret = bch2_subvolume_get_snapshot(trans, op->subvol, &snapshot);
1236 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1237 SPOS(op->pos.inode, op->pos.offset, snapshot),
1240 struct bio *bio = &op->wbio.bio;
1244 k = bch2_btree_iter_peek_slot(&iter);
1249 /* fall back to normal cow write path? */
1250 if (unlikely(k.k->p.snapshot != snapshot ||
1251 !bch2_extent_is_writeable(op, k)))
1254 if (bch2_keylist_realloc(&op->insert_keys,
1256 ARRAY_SIZE(op->inline_keys),
1260 /* Get iorefs before dropping btree locks: */
1261 ptrs = bch2_bkey_ptrs_c(k);
1262 bkey_for_each_ptr(ptrs, ptr) {
1263 buckets[nr_buckets].b = PTR_BUCKET_POS(c, ptr);
1264 buckets[nr_buckets].gen = ptr->gen;
1265 buckets[nr_buckets].l =
1266 bucket_nocow_lock(&c->nocow_locks,
1267 bucket_to_u64(buckets[nr_buckets].b));
1269 prefetch(buckets[nr_buckets].l);
1271 if (unlikely(!bch2_dev_get_ioref(bch_dev_bkey_exists(c, ptr->dev), WRITE)))
1277 op->flags |= BCH_WRITE_CONVERT_UNWRITTEN;
1280 /* Unlock before taking nocow locks, doing IO: */
1281 bkey_reassemble(op->insert_keys.top, k);
1282 bch2_trans_unlock(trans);
1284 bch2_cut_front(op->pos, op->insert_keys.top);
1285 if (op->flags & BCH_WRITE_CONVERT_UNWRITTEN)
1286 bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
1288 for (i = 0; i < nr_buckets; i++) {
1289 struct bch_dev *ca = bch_dev_bkey_exists(c, buckets[i].b.inode);
1290 struct nocow_lock_bucket *l = buckets[i].l;
1293 __bch2_bucket_nocow_lock(&c->nocow_locks, l,
1294 bucket_to_u64(buckets[i].b),
1295 BUCKET_NOCOW_LOCK_UPDATE);
1298 stale = gen_after(*bucket_gen(ca, buckets[i].b.offset), buckets[i].gen);
1301 if (unlikely(stale))
1302 goto err_bucket_stale;
1305 bio = &op->wbio.bio;
1306 if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) {
1307 bio = bio_split(bio, k.k->p.offset - op->pos.offset,
1308 GFP_KERNEL, &c->bio_write);
1309 wbio_init(bio)->put_bio = true;
1310 bio->bi_opf = op->wbio.bio.bi_opf;
1312 op->flags |= BCH_WRITE_DONE;
1315 op->pos.offset += bio_sectors(bio);
1316 op->written += bio_sectors(bio);
1318 bio->bi_end_io = bch2_write_endio;
1319 bio->bi_private = &op->cl;
1320 bio->bi_opf |= REQ_OP_WRITE;
1321 closure_get(&op->cl);
1322 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1323 op->insert_keys.top, true);
1325 bch2_keylist_push(&op->insert_keys);
1326 if (op->flags & BCH_WRITE_DONE)
1328 bch2_btree_iter_advance(&iter);
1331 bch2_trans_iter_exit(trans, &iter);
1333 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1337 bch_err_inum_offset_ratelimited(c,
1339 op->pos.offset << 9,
1340 "%s: btree lookup error %s",
1341 __func__, bch2_err_str(ret));
1343 op->flags |= BCH_WRITE_DONE;
1346 bch2_trans_put(trans);
1348 /* fallback to cow write path? */
1349 if (!(op->flags & BCH_WRITE_DONE)) {
1350 closure_sync(&op->cl);
1351 __bch2_nocow_write_done(op);
1352 op->insert_keys.top = op->insert_keys.keys;
1353 } else if (op->flags & BCH_WRITE_SYNC) {
1354 closure_sync(&op->cl);
1355 bch2_nocow_write_done(&op->cl);
1359 * needs to run out of process context because ei_quota_lock is
1362 continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op));
1366 for (i = 0; i < nr_buckets; i++)
1367 percpu_ref_put(&bch_dev_bkey_exists(c, buckets[i].b.inode)->io_ref);
1369 /* Fall back to COW path: */
1373 bch2_bucket_nocow_unlock(&c->nocow_locks,
1375 BUCKET_NOCOW_LOCK_UPDATE);
1378 for (i = 0; i < nr_buckets; i++)
1379 percpu_ref_put(&bch_dev_bkey_exists(c, buckets[i].b.inode)->io_ref);
1381 /* We can retry this: */
1382 ret = -BCH_ERR_transaction_restart;
1386 static void __bch2_write(struct bch_write_op *op)
1388 struct bch_fs *c = op->c;
1389 struct write_point *wp = NULL;
1390 struct bio *bio = NULL;
1391 unsigned nofs_flags;
1394 nofs_flags = memalloc_nofs_save();
1396 if (unlikely(op->opts.nocow && c->opts.nocow_enabled)) {
1397 bch2_nocow_write(op);
1398 if (op->flags & BCH_WRITE_DONE)
1399 goto out_nofs_restore;
1402 memset(&op->failed, 0, sizeof(op->failed));
1405 struct bkey_i *key_to_write;
1406 unsigned key_to_write_offset = op->insert_keys.top_p -
1407 op->insert_keys.keys_p;
1409 /* +1 for possible cache device: */
1410 if (op->open_buckets.nr + op->nr_replicas + 1 >
1411 ARRAY_SIZE(op->open_buckets.v))
1414 if (bch2_keylist_realloc(&op->insert_keys,
1416 ARRAY_SIZE(op->inline_keys),
1417 BKEY_EXTENT_U64s_MAX))
1421 * The copygc thread is now global, which means it's no longer
1422 * freeing up space on specific disks, which means that
1423 * allocations for specific disks may hang arbitrarily long:
1425 ret = bch2_trans_do(c, NULL, NULL, 0,
1426 bch2_alloc_sectors_start_trans(trans,
1428 op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
1432 op->nr_replicas_required,
1435 (op->flags & (BCH_WRITE_ALLOC_NOWAIT|
1436 BCH_WRITE_ONLY_SPECIFIED_DEVS))
1437 ? NULL : &op->cl, &wp));
1438 if (unlikely(ret)) {
1439 if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
1447 bch2_open_bucket_get(c, wp, &op->open_buckets);
1448 ret = bch2_write_extent(op, wp, &bio);
1450 bch2_alloc_sectors_done_inlined(c, wp);
1453 op->flags |= BCH_WRITE_DONE;
1461 bio->bi_end_io = bch2_write_endio;
1462 bio->bi_private = &op->cl;
1463 bio->bi_opf |= REQ_OP_WRITE;
1465 closure_get(bio->bi_private);
1467 key_to_write = (void *) (op->insert_keys.keys_p +
1468 key_to_write_offset);
1470 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1471 key_to_write, false);
1477 * If we're running asynchronously, wne may still want to block
1478 * synchronously here if we weren't able to submit all of the IO at
1479 * once, as that signals backpressure to the caller.
1481 if ((op->flags & BCH_WRITE_SYNC) ||
1482 (!(op->flags & BCH_WRITE_DONE) &&
1483 !(op->flags & BCH_WRITE_IN_WORKER))) {
1484 closure_sync(&op->cl);
1485 __bch2_write_index(op);
1487 if (!(op->flags & BCH_WRITE_DONE))
1489 bch2_write_done(&op->cl);
1491 bch2_write_queue(op, wp);
1492 continue_at(&op->cl, bch2_write_index, NULL);
1495 memalloc_nofs_restore(nofs_flags);
1498 static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
1500 struct bio *bio = &op->wbio.bio;
1501 struct bvec_iter iter;
1502 struct bkey_i_inline_data *id;
1506 op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
1507 op->flags |= BCH_WRITE_DONE;
1509 bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);
1511 ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
1512 ARRAY_SIZE(op->inline_keys),
1513 BKEY_U64s + DIV_ROUND_UP(data_len, 8));
1519 sectors = bio_sectors(bio);
1520 op->pos.offset += sectors;
1522 id = bkey_inline_data_init(op->insert_keys.top);
1524 id->k.version = op->version;
1525 id->k.size = sectors;
1527 iter = bio->bi_iter;
1528 iter.bi_size = data_len;
1529 memcpy_from_bio(id->v.data, bio, iter);
1531 while (data_len & 7)
1532 id->v.data[data_len++] = '\0';
1533 set_bkey_val_bytes(&id->k, data_len);
1534 bch2_keylist_push(&op->insert_keys);
1536 __bch2_write_index(op);
1538 bch2_write_done(&op->cl);
1542 * bch2_write() - handle a write to a cache device or flash only volume
1543 * @cl: &bch_write_op->cl
1545 * This is the starting point for any data to end up in a cache device; it could
1546 * be from a normal write, or a writeback write, or a write to a flash only
1547 * volume - it's also used by the moving garbage collector to compact data in
1548 * mostly empty buckets.
1550 * It first writes the data to the cache, creating a list of keys to be inserted
1551 * (if the data won't fit in a single open bucket, there will be multiple keys);
1552 * after the data is written it calls bch_journal, and after the keys have been
1553 * added to the next journal write they're inserted into the btree.
1555 * If op->discard is true, instead of inserting the data it invalidates the
1556 * region of the cache represented by op->bio and op->inode.
1558 void bch2_write(struct closure *cl)
1560 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
1561 struct bio *bio = &op->wbio.bio;
1562 struct bch_fs *c = op->c;
1565 EBUG_ON(op->cl.parent);
1566 BUG_ON(!op->nr_replicas);
1567 BUG_ON(!op->write_point.v);
1568 BUG_ON(bkey_eq(op->pos, POS_MAX));
1570 op->start_time = local_clock();
1571 bch2_keylist_init(&op->insert_keys, op->inline_keys);
1572 wbio_init(bio)->put_bio = false;
1574 if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) {
1575 bch_err_inum_offset_ratelimited(c,
1577 op->pos.offset << 9,
1578 "misaligned write");
1583 if (c->opts.nochanges) {
1584 op->error = -BCH_ERR_erofs_no_writes;
1588 if (!(op->flags & BCH_WRITE_MOVE) &&
1589 !bch2_write_ref_tryget(c, BCH_WRITE_REF_write)) {
1590 op->error = -BCH_ERR_erofs_no_writes;
1594 this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
1595 bch2_increment_clock(c, bio_sectors(bio), WRITE);
1597 data_len = min_t(u64, bio->bi_iter.bi_size,
1598 op->new_i_size - (op->pos.offset << 9));
1600 if (c->opts.inline_data &&
1601 data_len <= min(block_bytes(c) / 2, 1024U)) {
1602 bch2_write_data_inline(op, data_len);
1609 bch2_disk_reservation_put(c, &op->res);
1611 closure_debug_destroy(&op->cl);
1616 static const char * const bch2_write_flags[] = {
1623 void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
1625 prt_str(out, "pos: ");
1626 bch2_bpos_to_text(out, op->pos);
1628 printbuf_indent_add(out, 2);
1630 prt_str(out, "started: ");
1631 bch2_pr_time_units(out, local_clock() - op->start_time);
1634 prt_str(out, "flags: ");
1635 prt_bitflags(out, bch2_write_flags, op->flags);
1638 prt_printf(out, "ref: %u", closure_nr_remaining(&op->cl));
1641 printbuf_indent_sub(out, 2);
1644 void bch2_fs_io_write_exit(struct bch_fs *c)
1646 mempool_exit(&c->bio_bounce_pages);
1647 bioset_exit(&c->bio_write);
1650 int bch2_fs_io_write_init(struct bch_fs *c)
1652 if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio),
1654 return -BCH_ERR_ENOMEM_bio_write_init;
1656 if (mempool_init_page_pool(&c->bio_bounce_pages,
1658 c->opts.btree_node_size,
1659 c->opts.encoded_extent_max) /
1661 return -BCH_ERR_ENOMEM_bio_bounce_pages_init;