1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
4 * Copyright 2012 Google, Inc.
8 #include "alloc_foreground.h"
11 #include "btree_update.h"
19 #include "extent_update.h"
25 #include "nocow_locking.h"
26 #include "rebalance.h"
27 #include "subvolume.h"
32 #include <linux/blkdev.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/sched/mm.h>
37 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
39 static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
43 ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
44 /* ideally we'd be taking into account the device's variance here: */
45 u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
46 s64 latency_over = io_latency - latency_threshold;
48 if (latency_threshold && latency_over > 0) {
50 * bump up congested by approximately latency_over * 4 /
51 * latency_threshold - we don't need much accuracy here so don't
52 * bother with the divide:
54 if (atomic_read(&ca->congested) < CONGESTED_MAX)
55 atomic_add(latency_over >>
56 max_t(int, ilog2(latency_threshold) - 2, 0),
59 ca->congested_last = now;
60 } else if (atomic_read(&ca->congested) > 0) {
61 atomic_dec(&ca->congested);
65 void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
67 atomic64_t *latency = &ca->cur_latency[rw];
68 u64 now = local_clock();
69 u64 io_latency = time_after64(now, submit_time)
72 u64 old, new, v = atomic64_read(latency);
78 * If the io latency was reasonably close to the current
79 * latency, skip doing the update and atomic operation - most of
82 if (abs((int) (old - io_latency)) < (old >> 1) &&
86 new = ewma_add(old, io_latency, 5);
87 } while ((v = atomic64_cmpxchg(latency, old, new)) != old);
89 bch2_congested_acct(ca, io_latency, now, rw);
91 __time_stats_update(&ca->io_latency[rw].stats, submit_time, now);
96 /* Allocate, free from mempool: */
98 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
100 struct bvec_iter_all iter;
103 bio_for_each_segment_all(bv, bio, iter)
104 if (bv->bv_page != ZERO_PAGE(0))
105 mempool_free(bv->bv_page, &c->bio_bounce_pages);
109 static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
113 if (likely(!*using_mempool)) {
114 page = alloc_page(GFP_NOFS);
115 if (unlikely(!page)) {
116 mutex_lock(&c->bio_bounce_pages_lock);
117 *using_mempool = true;
123 page = mempool_alloc(&c->bio_bounce_pages, GFP_NOFS);
129 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
132 bool using_mempool = false;
135 struct page *page = __bio_alloc_page_pool(c, &using_mempool);
136 unsigned len = min_t(size_t, PAGE_SIZE, size);
138 BUG_ON(!bio_add_page(bio, page, len, 0));
143 mutex_unlock(&c->bio_bounce_pages_lock);
146 /* Extent update path: */
148 int bch2_sum_sector_overwrites(struct btree_trans *trans,
149 struct btree_iter *extent_iter,
151 bool *usage_increasing,
152 s64 *i_sectors_delta,
153 s64 *disk_sectors_delta)
155 struct bch_fs *c = trans->c;
156 struct btree_iter iter;
158 unsigned new_replicas = bch2_bkey_replicas(c, bkey_i_to_s_c(new));
159 bool new_compressed = bch2_bkey_sectors_compressed(bkey_i_to_s_c(new));
162 *usage_increasing = false;
163 *i_sectors_delta = 0;
164 *disk_sectors_delta = 0;
166 bch2_trans_copy_iter(&iter, extent_iter);
168 for_each_btree_key_upto_continue_norestart(iter,
169 new->k.p, BTREE_ITER_SLOTS, old, ret) {
170 s64 sectors = min(new->k.p.offset, old.k->p.offset) -
171 max(bkey_start_offset(&new->k),
172 bkey_start_offset(old.k));
174 *i_sectors_delta += sectors *
175 (bkey_extent_is_allocation(&new->k) -
176 bkey_extent_is_allocation(old.k));
178 *disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new));
179 *disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot
180 ? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
183 if (!*usage_increasing &&
184 (new->k.p.snapshot != old.k->p.snapshot ||
185 new_replicas > bch2_bkey_replicas(c, old) ||
186 (!new_compressed && bch2_bkey_sectors_compressed(old))))
187 *usage_increasing = true;
189 if (bkey_ge(old.k->p, new->k.p))
193 bch2_trans_iter_exit(trans, &iter);
197 static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
198 struct btree_iter *extent_iter,
202 struct btree_iter iter;
204 struct bkey_i_inode_v3 *inode;
206 * Crazy performance optimization:
207 * Every extent update needs to also update the inode: the inode trigger
208 * will set bi->journal_seq to the journal sequence number of this
209 * transaction - for fsync.
211 * But if that's the only reason we're updating the inode (we're not
212 * updating bi_size or bi_sectors), then we don't need the inode update
213 * to be journalled - if we crash, the bi_journal_seq update will be
214 * lost, but that's fine.
216 unsigned inode_update_flags = BTREE_UPDATE_NOJOURNAL;
219 k = bch2_bkey_get_mut_noupdate(trans, &iter, BTREE_ID_inodes,
221 extent_iter->pos.inode,
222 extent_iter->snapshot),
224 ret = PTR_ERR_OR_ZERO(k);
228 if (unlikely(k->k.type != KEY_TYPE_inode_v3)) {
229 k = bch2_inode_to_v3(trans, k);
230 ret = PTR_ERR_OR_ZERO(k);
235 inode = bkey_i_to_inode_v3(k);
237 if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_i_size_dirty) &&
238 new_i_size > le64_to_cpu(inode->v.bi_size)) {
239 inode->v.bi_size = cpu_to_le64(new_i_size);
240 inode_update_flags = 0;
243 if (i_sectors_delta) {
244 le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta);
245 inode_update_flags = 0;
248 if (inode->k.p.snapshot != iter.snapshot) {
249 inode->k.p.snapshot = iter.snapshot;
250 inode_update_flags = 0;
253 ret = bch2_trans_update(trans, &iter, &inode->k_i,
254 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
257 bch2_trans_iter_exit(trans, &iter);
261 int bch2_extent_update(struct btree_trans *trans,
263 struct btree_iter *iter,
265 struct disk_reservation *disk_res,
267 s64 *i_sectors_delta_total,
270 struct bpos next_pos;
271 bool usage_increasing;
272 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
276 * This traverses us the iterator without changing iter->path->pos to
277 * search_key() (which is pos + 1 for extents): we want there to be a
278 * path already traversed at iter->pos because
279 * bch2_trans_extent_update() will use it to attempt extent merging
281 ret = __bch2_btree_iter_traverse(iter);
285 ret = bch2_extent_trim_atomic(trans, iter, k);
291 ret = bch2_sum_sector_overwrites(trans, iter, k,
294 &disk_sectors_delta);
299 disk_sectors_delta > (s64) disk_res->sectors) {
300 ret = bch2_disk_reservation_add(trans->c, disk_res,
301 disk_sectors_delta - disk_res->sectors,
302 !check_enospc || !usage_increasing
303 ? BCH_DISK_RESERVATION_NOFAIL : 0);
310 * We always have to do an inode update - even when i_size/i_sectors
311 * aren't changing - for fsync to work properly; fsync relies on
312 * inode->bi_journal_seq which is updated by the trigger code:
314 ret = bch2_extent_update_i_size_sectors(trans, iter,
315 min(k->k.p.offset << 9, new_i_size),
317 bch2_trans_update(trans, iter, k, 0) ?:
318 bch2_trans_commit(trans, disk_res, NULL,
319 BCH_TRANS_COMMIT_no_check_rw|
320 BCH_TRANS_COMMIT_no_enospc);
324 if (i_sectors_delta_total)
325 *i_sectors_delta_total += i_sectors_delta;
326 bch2_btree_iter_set_pos(iter, next_pos);
330 static int bch2_write_index_default(struct bch_write_op *op)
332 struct bch_fs *c = op->c;
334 struct keylist *keys = &op->insert_keys;
335 struct bkey_i *k = bch2_keylist_front(keys);
336 struct btree_trans *trans = bch2_trans_get(c);
337 struct btree_iter iter;
339 .subvol = op->subvol,
340 .inum = k->k.p.inode,
344 BUG_ON(!inum.subvol);
346 bch2_bkey_buf_init(&sk);
349 bch2_trans_begin(trans);
351 k = bch2_keylist_front(keys);
352 bch2_bkey_buf_copy(&sk, c, k);
354 ret = bch2_subvolume_get_snapshot(trans, inum.subvol,
355 &sk.k->k.p.snapshot);
356 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
361 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
362 bkey_start_pos(&sk.k->k),
363 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
365 ret = bch2_bkey_set_needs_rebalance(c, sk.k, &op->opts) ?:
366 bch2_extent_update(trans, inum, &iter, sk.k,
368 op->new_i_size, &op->i_sectors_delta,
369 op->flags & BCH_WRITE_CHECK_ENOSPC);
370 bch2_trans_iter_exit(trans, &iter);
372 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
377 if (bkey_ge(iter.pos, k->k.p))
378 bch2_keylist_pop_front(&op->insert_keys);
380 bch2_cut_front(iter.pos, k);
381 } while (!bch2_keylist_empty(keys));
383 bch2_trans_put(trans);
384 bch2_bkey_buf_exit(&sk, c);
391 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
392 enum bch_data_type type,
393 const struct bkey_i *k,
396 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
397 struct bch_write_bio *n;
399 BUG_ON(c->opts.nochanges);
401 bkey_for_each_ptr(ptrs, ptr) {
402 BUG_ON(!bch2_dev_exists2(c, ptr->dev));
404 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
406 if (to_entry(ptr + 1) < ptrs.end) {
407 n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
408 GFP_NOFS, &ca->replica_set));
410 n->bio.bi_end_io = wbio->bio.bi_end_io;
411 n->bio.bi_private = wbio->bio.bi_private;
416 n->bio.bi_opf = wbio->bio.bi_opf;
417 bio_inc_remaining(&wbio->bio);
425 n->have_ioref = nocow || bch2_dev_get_ioref(ca,
426 type == BCH_DATA_btree ? READ : WRITE);
428 n->submit_time = local_clock();
429 n->inode_offset = bkey_start_offset(&k->k);
430 n->bio.bi_iter.bi_sector = ptr->offset;
432 if (likely(n->have_ioref)) {
433 this_cpu_add(ca->io_done->sectors[WRITE][type],
434 bio_sectors(&n->bio));
436 bio_set_dev(&n->bio, ca->disk_sb.bdev);
438 if (type != BCH_DATA_btree && unlikely(c->opts.no_data_io)) {
445 n->bio.bi_status = BLK_STS_REMOVED;
451 static void __bch2_write(struct bch_write_op *);
453 static void bch2_write_done(struct closure *cl)
455 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
456 struct bch_fs *c = op->c;
458 EBUG_ON(op->open_buckets.nr);
460 time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
461 bch2_disk_reservation_put(c, &op->res);
463 if (!(op->flags & BCH_WRITE_MOVE))
464 bch2_write_ref_put(c, BCH_WRITE_REF_write);
465 bch2_keylist_free(&op->insert_keys, op->inline_keys);
468 closure_debug_destroy(cl);
473 static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
475 struct keylist *keys = &op->insert_keys;
476 struct bch_extent_ptr *ptr;
477 struct bkey_i *src, *dst = keys->keys, *n;
479 for (src = keys->keys; src != keys->top; src = n) {
482 if (bkey_extent_is_direct_data(&src->k)) {
483 bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
484 test_bit(ptr->dev, op->failed.d));
486 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src)))
491 memmove_u64s_down(dst, src, src->k.u64s);
492 dst = bkey_next(dst);
500 * __bch2_write_index - after a write, update index to point to new data
501 * @op: bch_write_op to process
503 static void __bch2_write_index(struct bch_write_op *op)
505 struct bch_fs *c = op->c;
506 struct keylist *keys = &op->insert_keys;
510 if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
511 ret = bch2_write_drop_io_error_ptrs(op);
516 if (!bch2_keylist_empty(keys)) {
517 u64 sectors_start = keylist_sectors(keys);
519 ret = !(op->flags & BCH_WRITE_MOVE)
520 ? bch2_write_index_default(op)
521 : bch2_data_update_index_update(op);
523 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
524 BUG_ON(keylist_sectors(keys) && !ret);
526 op->written += sectors_start - keylist_sectors(keys);
528 if (ret && !bch2_err_matches(ret, EROFS)) {
529 struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
531 bch_err_inum_offset_ratelimited(c,
532 insert->k.p.inode, insert->k.p.offset << 9,
533 "write error while doing btree update: %s",
541 /* If some a bucket wasn't written, we can't erasure code it: */
542 for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
543 bch2_open_bucket_write_error(c, &op->open_buckets, dev);
545 bch2_open_buckets_put(c, &op->open_buckets);
548 keys->top = keys->keys;
550 op->flags |= BCH_WRITE_DONE;
554 static inline void __wp_update_state(struct write_point *wp, enum write_point_state state)
556 if (state != wp->state) {
557 u64 now = ktime_get_ns();
559 if (wp->last_state_change &&
560 time_after64(now, wp->last_state_change))
561 wp->time[wp->state] += now - wp->last_state_change;
563 wp->last_state_change = now;
567 static inline void wp_update_state(struct write_point *wp, bool running)
569 enum write_point_state state;
571 state = running ? WRITE_POINT_running :
572 !list_empty(&wp->writes) ? WRITE_POINT_waiting_io
573 : WRITE_POINT_stopped;
575 __wp_update_state(wp, state);
578 static CLOSURE_CALLBACK(bch2_write_index)
580 closure_type(op, struct bch_write_op, cl);
581 struct write_point *wp = op->wp;
582 struct workqueue_struct *wq = index_update_wq(op);
585 if ((op->flags & BCH_WRITE_DONE) &&
586 (op->flags & BCH_WRITE_MOVE))
587 bch2_bio_free_pages_pool(op->c, &op->wbio.bio);
589 spin_lock_irqsave(&wp->writes_lock, flags);
590 if (wp->state == WRITE_POINT_waiting_io)
591 __wp_update_state(wp, WRITE_POINT_waiting_work);
592 list_add_tail(&op->wp_list, &wp->writes);
593 spin_unlock_irqrestore (&wp->writes_lock, flags);
595 queue_work(wq, &wp->index_update_work);
598 static inline void bch2_write_queue(struct bch_write_op *op, struct write_point *wp)
602 if (wp->state == WRITE_POINT_stopped) {
603 spin_lock_irq(&wp->writes_lock);
604 __wp_update_state(wp, WRITE_POINT_waiting_io);
605 spin_unlock_irq(&wp->writes_lock);
609 void bch2_write_point_do_index_updates(struct work_struct *work)
611 struct write_point *wp =
612 container_of(work, struct write_point, index_update_work);
613 struct bch_write_op *op;
616 spin_lock_irq(&wp->writes_lock);
617 op = list_first_entry_or_null(&wp->writes, struct bch_write_op, wp_list);
619 list_del(&op->wp_list);
620 wp_update_state(wp, op != NULL);
621 spin_unlock_irq(&wp->writes_lock);
626 op->flags |= BCH_WRITE_IN_WORKER;
628 __bch2_write_index(op);
630 if (!(op->flags & BCH_WRITE_DONE))
633 bch2_write_done(&op->cl);
637 static void bch2_write_endio(struct bio *bio)
639 struct closure *cl = bio->bi_private;
640 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
641 struct bch_write_bio *wbio = to_wbio(bio);
642 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
643 struct bch_fs *c = wbio->c;
644 struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
646 if (bch2_dev_inum_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
648 wbio->inode_offset << 9,
649 "data write error: %s",
650 bch2_blk_status_to_str(bio->bi_status))) {
651 set_bit(wbio->dev, op->failed.d);
652 op->flags |= BCH_WRITE_IO_ERROR;
656 set_bit(wbio->dev, op->devs_need_flush->d);
658 if (wbio->have_ioref) {
659 bch2_latency_acct(ca, wbio->submit_time, WRITE);
660 percpu_ref_put(&ca->io_ref);
664 bch2_bio_free_pages_pool(c, bio);
670 bio_endio(&parent->bio);
675 static void init_append_extent(struct bch_write_op *op,
676 struct write_point *wp,
677 struct bversion version,
678 struct bch_extent_crc_unpacked crc)
680 struct bkey_i_extent *e;
682 op->pos.offset += crc.uncompressed_size;
684 e = bkey_extent_init(op->insert_keys.top);
686 e->k.size = crc.uncompressed_size;
687 e->k.version = version;
690 crc.compression_type ||
692 bch2_extent_crc_append(&e->k_i, crc);
694 bch2_alloc_sectors_append_ptrs_inlined(op->c, wp, &e->k_i, crc.compressed_size,
695 op->flags & BCH_WRITE_CACHED);
697 bch2_keylist_push(&op->insert_keys);
700 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
701 struct write_point *wp,
703 bool *page_alloc_failed,
706 struct bch_write_bio *wbio;
708 unsigned output_available =
709 min(wp->sectors_free << 9, src->bi_iter.bi_size);
710 unsigned pages = DIV_ROUND_UP(output_available +
712 ? ((unsigned long) buf & (PAGE_SIZE - 1))
715 pages = min(pages, BIO_MAX_VECS);
717 bio = bio_alloc_bioset(NULL, pages, 0,
718 GFP_NOFS, &c->bio_write);
719 wbio = wbio_init(bio);
720 wbio->put_bio = true;
721 /* copy WRITE_SYNC flag */
722 wbio->bio.bi_opf = src->bi_opf;
725 bch2_bio_map(bio, buf, output_available);
732 * We can't use mempool for more than c->sb.encoded_extent_max
733 * worth of pages, but we'd like to allocate more if we can:
735 bch2_bio_alloc_pages_pool(c, bio,
736 min_t(unsigned, output_available,
737 c->opts.encoded_extent_max));
739 if (bio->bi_iter.bi_size < output_available)
741 bch2_bio_alloc_pages(bio,
743 bio->bi_iter.bi_size,
749 static int bch2_write_rechecksum(struct bch_fs *c,
750 struct bch_write_op *op,
751 unsigned new_csum_type)
753 struct bio *bio = &op->wbio.bio;
754 struct bch_extent_crc_unpacked new_crc;
757 /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
759 if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
760 bch2_csum_type_is_encryption(new_csum_type))
761 new_csum_type = op->crc.csum_type;
763 ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
765 op->crc.offset, op->crc.live_size,
770 bio_advance(bio, op->crc.offset << 9);
771 bio->bi_iter.bi_size = op->crc.live_size << 9;
776 static int bch2_write_decrypt(struct bch_write_op *op)
778 struct bch_fs *c = op->c;
779 struct nonce nonce = extent_nonce(op->version, op->crc);
780 struct bch_csum csum;
783 if (!bch2_csum_type_is_encryption(op->crc.csum_type))
787 * If we need to decrypt data in the write path, we'll no longer be able
788 * to verify the existing checksum (poly1305 mac, in this case) after
789 * it's decrypted - this is the last point we'll be able to reverify the
792 csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
793 if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
796 ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
797 op->crc.csum_type = 0;
798 op->crc.csum = (struct bch_csum) { 0, 0 };
802 static enum prep_encoded_ret {
805 PREP_ENCODED_CHECKSUM_ERR,
806 PREP_ENCODED_DO_WRITE,
807 } bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
809 struct bch_fs *c = op->c;
810 struct bio *bio = &op->wbio.bio;
812 if (!(op->flags & BCH_WRITE_DATA_ENCODED))
813 return PREP_ENCODED_OK;
815 BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
817 /* Can we just write the entire extent as is? */
818 if (op->crc.uncompressed_size == op->crc.live_size &&
819 op->crc.uncompressed_size <= c->opts.encoded_extent_max >> 9 &&
820 op->crc.compressed_size <= wp->sectors_free &&
821 (op->crc.compression_type == bch2_compression_opt_to_type(op->compression_opt) ||
822 op->incompressible)) {
823 if (!crc_is_compressed(op->crc) &&
824 op->csum_type != op->crc.csum_type &&
825 bch2_write_rechecksum(c, op, op->csum_type) &&
827 return PREP_ENCODED_CHECKSUM_ERR;
829 return PREP_ENCODED_DO_WRITE;
833 * If the data is compressed and we couldn't write the entire extent as
834 * is, we have to decompress it:
836 if (crc_is_compressed(op->crc)) {
837 struct bch_csum csum;
839 if (bch2_write_decrypt(op))
840 return PREP_ENCODED_CHECKSUM_ERR;
842 /* Last point we can still verify checksum: */
843 csum = bch2_checksum_bio(c, op->crc.csum_type,
844 extent_nonce(op->version, op->crc),
846 if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
847 return PREP_ENCODED_CHECKSUM_ERR;
849 if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
850 return PREP_ENCODED_ERR;
854 * No longer have compressed data after this point - data might be
859 * If the data is checksummed and we're only writing a subset,
860 * rechecksum and adjust bio to point to currently live data:
862 if ((op->crc.live_size != op->crc.uncompressed_size ||
863 op->crc.csum_type != op->csum_type) &&
864 bch2_write_rechecksum(c, op, op->csum_type) &&
866 return PREP_ENCODED_CHECKSUM_ERR;
869 * If we want to compress the data, it has to be decrypted:
871 if ((op->compression_opt ||
872 bch2_csum_type_is_encryption(op->crc.csum_type) !=
873 bch2_csum_type_is_encryption(op->csum_type)) &&
874 bch2_write_decrypt(op))
875 return PREP_ENCODED_CHECKSUM_ERR;
877 return PREP_ENCODED_OK;
880 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
883 struct bch_fs *c = op->c;
884 struct bio *src = &op->wbio.bio, *dst = src;
885 struct bvec_iter saved_iter;
887 unsigned total_output = 0, total_input = 0;
889 bool page_alloc_failed = false;
892 BUG_ON(!bio_sectors(src));
894 ec_buf = bch2_writepoint_ec_buf(c, wp);
896 switch (bch2_write_prep_encoded_data(op, wp)) {
897 case PREP_ENCODED_OK:
899 case PREP_ENCODED_ERR:
902 case PREP_ENCODED_CHECKSUM_ERR:
904 case PREP_ENCODED_DO_WRITE:
905 /* XXX look for bug here */
907 dst = bch2_write_bio_alloc(c, wp, src,
910 bio_copy_data(dst, src);
913 init_append_extent(op, wp, op->version, op->crc);
918 op->compression_opt ||
920 !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
921 (bch2_csum_type_is_encryption(op->csum_type) &&
922 !(op->flags & BCH_WRITE_PAGES_OWNED))) {
923 dst = bch2_write_bio_alloc(c, wp, src,
929 saved_iter = dst->bi_iter;
932 struct bch_extent_crc_unpacked crc = { 0 };
933 struct bversion version = op->version;
934 size_t dst_len = 0, src_len = 0;
936 if (page_alloc_failed &&
937 dst->bi_iter.bi_size < (wp->sectors_free << 9) &&
938 dst->bi_iter.bi_size < c->opts.encoded_extent_max)
941 BUG_ON(op->compression_opt &&
942 (op->flags & BCH_WRITE_DATA_ENCODED) &&
943 bch2_csum_type_is_encryption(op->crc.csum_type));
944 BUG_ON(op->compression_opt && !bounce);
946 crc.compression_type = op->incompressible
947 ? BCH_COMPRESSION_TYPE_incompressible
948 : op->compression_opt
949 ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
952 if (!crc_is_compressed(crc)) {
953 dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
954 dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
957 dst_len = min_t(unsigned, dst_len,
958 c->opts.encoded_extent_max);
961 swap(dst->bi_iter.bi_size, dst_len);
962 bio_copy_data(dst, src);
963 swap(dst->bi_iter.bi_size, dst_len);
969 BUG_ON(!src_len || !dst_len);
971 if (bch2_csum_type_is_encryption(op->csum_type)) {
972 if (bversion_zero(version)) {
973 version.lo = atomic64_inc_return(&c->key_version);
975 crc.nonce = op->nonce;
976 op->nonce += src_len >> 9;
980 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
981 !crc_is_compressed(crc) &&
982 bch2_csum_type_is_encryption(op->crc.csum_type) ==
983 bch2_csum_type_is_encryption(op->csum_type)) {
984 u8 compression_type = crc.compression_type;
985 u16 nonce = crc.nonce;
987 * Note: when we're using rechecksum(), we need to be
988 * checksumming @src because it has all the data our
989 * existing checksum covers - if we bounced (because we
990 * were trying to compress), @dst will only have the
991 * part of the data the new checksum will cover.
993 * But normally we want to be checksumming post bounce,
994 * because part of the reason for bouncing is so the
995 * data can't be modified (by userspace) while it's in
998 if (bch2_rechecksum_bio(c, src, version, op->crc,
1001 bio_sectors(src) - (src_len >> 9),
1005 * rchecksum_bio sets compression_type on crc from op->crc,
1006 * this isn't always correct as sometimes we're changing
1007 * an extent from uncompressed to incompressible.
1009 crc.compression_type = compression_type;
1012 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
1013 bch2_rechecksum_bio(c, src, version, op->crc,
1016 bio_sectors(src) - (src_len >> 9),
1020 crc.compressed_size = dst_len >> 9;
1021 crc.uncompressed_size = src_len >> 9;
1022 crc.live_size = src_len >> 9;
1024 swap(dst->bi_iter.bi_size, dst_len);
1025 ret = bch2_encrypt_bio(c, op->csum_type,
1026 extent_nonce(version, crc), dst);
1030 crc.csum = bch2_checksum_bio(c, op->csum_type,
1031 extent_nonce(version, crc), dst);
1032 crc.csum_type = op->csum_type;
1033 swap(dst->bi_iter.bi_size, dst_len);
1036 init_append_extent(op, wp, version, crc);
1039 bio_advance(dst, dst_len);
1040 bio_advance(src, src_len);
1041 total_output += dst_len;
1042 total_input += src_len;
1043 } while (dst->bi_iter.bi_size &&
1044 src->bi_iter.bi_size &&
1046 !bch2_keylist_realloc(&op->insert_keys,
1048 ARRAY_SIZE(op->inline_keys),
1049 BKEY_EXTENT_U64s_MAX));
1051 more = src->bi_iter.bi_size != 0;
1053 dst->bi_iter = saved_iter;
1055 if (dst == src && more) {
1056 BUG_ON(total_output != total_input);
1058 dst = bio_split(src, total_input >> 9,
1059 GFP_NOFS, &c->bio_write);
1060 wbio_init(dst)->put_bio = true;
1061 /* copy WRITE_SYNC flag */
1062 dst->bi_opf = src->bi_opf;
1065 dst->bi_iter.bi_size = total_output;
1070 bch_err(c, "error verifying existing checksum while rewriting existing data (memory corruption?)");
1073 if (to_wbio(dst)->bounce)
1074 bch2_bio_free_pages_pool(c, dst);
1075 if (to_wbio(dst)->put_bio)
1081 static bool bch2_extent_is_writeable(struct bch_write_op *op,
1084 struct bch_fs *c = op->c;
1085 struct bkey_s_c_extent e;
1086 struct extent_ptr_decoded p;
1087 const union bch_extent_entry *entry;
1088 unsigned replicas = 0;
1090 if (k.k->type != KEY_TYPE_extent)
1093 e = bkey_s_c_to_extent(k);
1094 extent_for_each_ptr_decode(e, p, entry) {
1095 if (crc_is_encoded(p.crc) || p.has_ec)
1098 replicas += bch2_extent_ptr_durability(c, &p);
1101 return replicas >= op->opts.data_replicas;
1104 static inline void bch2_nocow_write_unlock(struct bch_write_op *op)
1106 struct bch_fs *c = op->c;
1108 for_each_keylist_key(&op->insert_keys, k) {
1109 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
1111 bkey_for_each_ptr(ptrs, ptr)
1112 bch2_bucket_nocow_unlock(&c->nocow_locks,
1113 PTR_BUCKET_POS(c, ptr),
1114 BUCKET_NOCOW_LOCK_UPDATE);
1118 static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
1119 struct btree_iter *iter,
1120 struct bkey_i *orig,
1124 if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) {
1129 struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
1130 int ret = PTR_ERR_OR_ZERO(new);
1134 bch2_cut_front(bkey_start_pos(&orig->k), new);
1135 bch2_cut_back(orig->k.p, new);
1137 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
1138 bkey_for_each_ptr(ptrs, ptr)
1142 * Note that we're not calling bch2_subvol_get_snapshot() in this path -
1143 * that was done when we kicked off the write, and here it's important
1144 * that we update the extent that we wrote to - even if a snapshot has
1145 * since been created. The write is still outstanding, so we're ok
1146 * w.r.t. snapshot atomicity:
1148 return bch2_extent_update_i_size_sectors(trans, iter,
1149 min(new->k.p.offset << 9, new_i_size), 0) ?:
1150 bch2_trans_update(trans, iter, new,
1151 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1154 static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
1156 struct bch_fs *c = op->c;
1157 struct btree_trans *trans = bch2_trans_get(c);
1159 for_each_keylist_key(&op->insert_keys, orig) {
1160 int ret = for_each_btree_key_upto_commit(trans, iter, BTREE_ID_extents,
1161 bkey_start_pos(&orig->k), orig->k.p,
1162 BTREE_ITER_INTENT, k,
1163 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
1164 bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size);
1167 if (ret && !bch2_err_matches(ret, EROFS)) {
1168 struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
1170 bch_err_inum_offset_ratelimited(c,
1171 insert->k.p.inode, insert->k.p.offset << 9,
1172 "write error while doing btree update: %s",
1182 bch2_trans_put(trans);
1185 static void __bch2_nocow_write_done(struct bch_write_op *op)
1187 bch2_nocow_write_unlock(op);
1189 if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
1191 } else if (unlikely(op->flags & BCH_WRITE_CONVERT_UNWRITTEN))
1192 bch2_nocow_write_convert_unwritten(op);
1195 static CLOSURE_CALLBACK(bch2_nocow_write_done)
1197 closure_type(op, struct bch_write_op, cl);
1199 __bch2_nocow_write_done(op);
1200 bch2_write_done(cl);
1203 struct bucket_to_lock {
1206 struct nocow_lock_bucket *l;
1209 static void bch2_nocow_write(struct bch_write_op *op)
1211 struct bch_fs *c = op->c;
1212 struct btree_trans *trans;
1213 struct btree_iter iter;
1215 DARRAY_PREALLOCATED(struct bucket_to_lock, 3) buckets;
1217 struct bucket_to_lock *stale_at;
1220 if (op->flags & BCH_WRITE_MOVE)
1223 darray_init(&buckets);
1224 trans = bch2_trans_get(c);
1226 bch2_trans_begin(trans);
1228 ret = bch2_subvolume_get_snapshot(trans, op->subvol, &snapshot);
1232 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1233 SPOS(op->pos.inode, op->pos.offset, snapshot),
1236 struct bio *bio = &op->wbio.bio;
1240 k = bch2_btree_iter_peek_slot(&iter);
1245 /* fall back to normal cow write path? */
1246 if (unlikely(k.k->p.snapshot != snapshot ||
1247 !bch2_extent_is_writeable(op, k)))
1250 if (bch2_keylist_realloc(&op->insert_keys,
1252 ARRAY_SIZE(op->inline_keys),
1256 /* Get iorefs before dropping btree locks: */
1257 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1258 bkey_for_each_ptr(ptrs, ptr) {
1259 struct bpos b = PTR_BUCKET_POS(c, ptr);
1260 struct nocow_lock_bucket *l =
1261 bucket_nocow_lock(&c->nocow_locks, bucket_to_u64(b));
1264 if (unlikely(!bch2_dev_get_ioref(bch_dev_bkey_exists(c, ptr->dev), WRITE)))
1267 /* XXX allocating memory with btree locks held - rare */
1268 darray_push_gfp(&buckets, ((struct bucket_to_lock) {
1269 .b = b, .gen = ptr->gen, .l = l,
1270 }), GFP_KERNEL|__GFP_NOFAIL);
1273 op->flags |= BCH_WRITE_CONVERT_UNWRITTEN;
1276 /* Unlock before taking nocow locks, doing IO: */
1277 bkey_reassemble(op->insert_keys.top, k);
1278 bch2_trans_unlock(trans);
1280 bch2_cut_front(op->pos, op->insert_keys.top);
1281 if (op->flags & BCH_WRITE_CONVERT_UNWRITTEN)
1282 bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
1284 darray_for_each(buckets, i) {
1285 struct bch_dev *ca = bch_dev_bkey_exists(c, i->b.inode);
1287 __bch2_bucket_nocow_lock(&c->nocow_locks, i->l,
1288 bucket_to_u64(i->b),
1289 BUCKET_NOCOW_LOCK_UPDATE);
1292 bool stale = gen_after(*bucket_gen(ca, i->b.offset), i->gen);
1295 if (unlikely(stale)) {
1297 goto err_bucket_stale;
1301 bio = &op->wbio.bio;
1302 if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) {
1303 bio = bio_split(bio, k.k->p.offset - op->pos.offset,
1304 GFP_KERNEL, &c->bio_write);
1305 wbio_init(bio)->put_bio = true;
1306 bio->bi_opf = op->wbio.bio.bi_opf;
1308 op->flags |= BCH_WRITE_DONE;
1311 op->pos.offset += bio_sectors(bio);
1312 op->written += bio_sectors(bio);
1314 bio->bi_end_io = bch2_write_endio;
1315 bio->bi_private = &op->cl;
1316 bio->bi_opf |= REQ_OP_WRITE;
1317 closure_get(&op->cl);
1318 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1319 op->insert_keys.top, true);
1321 bch2_keylist_push(&op->insert_keys);
1322 if (op->flags & BCH_WRITE_DONE)
1324 bch2_btree_iter_advance(&iter);
1327 bch2_trans_iter_exit(trans, &iter);
1329 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1333 bch_err_inum_offset_ratelimited(c,
1334 op->pos.inode, op->pos.offset << 9,
1335 "%s: btree lookup error %s", __func__, bch2_err_str(ret));
1337 op->flags |= BCH_WRITE_DONE;
1340 bch2_trans_put(trans);
1341 darray_exit(&buckets);
1343 /* fallback to cow write path? */
1344 if (!(op->flags & BCH_WRITE_DONE)) {
1345 closure_sync(&op->cl);
1346 __bch2_nocow_write_done(op);
1347 op->insert_keys.top = op->insert_keys.keys;
1348 } else if (op->flags & BCH_WRITE_SYNC) {
1349 closure_sync(&op->cl);
1350 bch2_nocow_write_done(&op->cl.work);
1354 * needs to run out of process context because ei_quota_lock is
1357 continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op));
1361 darray_for_each(buckets, i)
1362 percpu_ref_put(&bch_dev_bkey_exists(c, i->b.inode)->io_ref);
1364 /* Fall back to COW path: */
1367 darray_for_each(buckets, i) {
1368 bch2_bucket_nocow_unlock(&c->nocow_locks, i->b, BUCKET_NOCOW_LOCK_UPDATE);
1373 /* We can retry this: */
1374 ret = -BCH_ERR_transaction_restart;
1378 static void __bch2_write(struct bch_write_op *op)
1380 struct bch_fs *c = op->c;
1381 struct write_point *wp = NULL;
1382 struct bio *bio = NULL;
1383 unsigned nofs_flags;
1386 nofs_flags = memalloc_nofs_save();
1388 if (unlikely(op->opts.nocow && c->opts.nocow_enabled)) {
1389 bch2_nocow_write(op);
1390 if (op->flags & BCH_WRITE_DONE)
1391 goto out_nofs_restore;
1394 memset(&op->failed, 0, sizeof(op->failed));
1397 struct bkey_i *key_to_write;
1398 unsigned key_to_write_offset = op->insert_keys.top_p -
1399 op->insert_keys.keys_p;
1401 /* +1 for possible cache device: */
1402 if (op->open_buckets.nr + op->nr_replicas + 1 >
1403 ARRAY_SIZE(op->open_buckets.v))
1406 if (bch2_keylist_realloc(&op->insert_keys,
1408 ARRAY_SIZE(op->inline_keys),
1409 BKEY_EXTENT_U64s_MAX))
1413 * The copygc thread is now global, which means it's no longer
1414 * freeing up space on specific disks, which means that
1415 * allocations for specific disks may hang arbitrarily long:
1417 ret = bch2_trans_do(c, NULL, NULL, 0,
1418 bch2_alloc_sectors_start_trans(trans,
1420 op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
1424 op->nr_replicas_required,
1427 (op->flags & (BCH_WRITE_ALLOC_NOWAIT|
1428 BCH_WRITE_ONLY_SPECIFIED_DEVS))
1429 ? NULL : &op->cl, &wp));
1430 if (unlikely(ret)) {
1431 if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
1439 bch2_open_bucket_get(c, wp, &op->open_buckets);
1440 ret = bch2_write_extent(op, wp, &bio);
1442 bch2_alloc_sectors_done_inlined(c, wp);
1445 op->flags |= BCH_WRITE_DONE;
1448 if (!(op->flags & BCH_WRITE_ALLOC_NOWAIT))
1449 bch_err_inum_offset_ratelimited(c,
1451 op->pos.offset << 9,
1452 "%s(): error: %s", __func__, bch2_err_str(ret));
1458 bio->bi_end_io = bch2_write_endio;
1459 bio->bi_private = &op->cl;
1460 bio->bi_opf |= REQ_OP_WRITE;
1462 closure_get(bio->bi_private);
1464 key_to_write = (void *) (op->insert_keys.keys_p +
1465 key_to_write_offset);
1467 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1468 key_to_write, false);
1474 * If we're running asynchronously, wne may still want to block
1475 * synchronously here if we weren't able to submit all of the IO at
1476 * once, as that signals backpressure to the caller.
1478 if ((op->flags & BCH_WRITE_SYNC) ||
1479 (!(op->flags & BCH_WRITE_DONE) &&
1480 !(op->flags & BCH_WRITE_IN_WORKER))) {
1481 closure_sync(&op->cl);
1482 __bch2_write_index(op);
1484 if (!(op->flags & BCH_WRITE_DONE))
1486 bch2_write_done(&op->cl);
1488 bch2_write_queue(op, wp);
1489 continue_at(&op->cl, bch2_write_index, NULL);
1492 memalloc_nofs_restore(nofs_flags);
1495 static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
1497 struct bio *bio = &op->wbio.bio;
1498 struct bvec_iter iter;
1499 struct bkey_i_inline_data *id;
1503 op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
1504 op->flags |= BCH_WRITE_DONE;
1506 bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);
1508 ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
1509 ARRAY_SIZE(op->inline_keys),
1510 BKEY_U64s + DIV_ROUND_UP(data_len, 8));
1516 sectors = bio_sectors(bio);
1517 op->pos.offset += sectors;
1519 id = bkey_inline_data_init(op->insert_keys.top);
1521 id->k.version = op->version;
1522 id->k.size = sectors;
1524 iter = bio->bi_iter;
1525 iter.bi_size = data_len;
1526 memcpy_from_bio(id->v.data, bio, iter);
1528 while (data_len & 7)
1529 id->v.data[data_len++] = '\0';
1530 set_bkey_val_bytes(&id->k, data_len);
1531 bch2_keylist_push(&op->insert_keys);
1533 __bch2_write_index(op);
1535 bch2_write_done(&op->cl);
1539 * bch2_write() - handle a write to a cache device or flash only volume
1540 * @cl: &bch_write_op->cl
1542 * This is the starting point for any data to end up in a cache device; it could
1543 * be from a normal write, or a writeback write, or a write to a flash only
1544 * volume - it's also used by the moving garbage collector to compact data in
1545 * mostly empty buckets.
1547 * It first writes the data to the cache, creating a list of keys to be inserted
1548 * (if the data won't fit in a single open bucket, there will be multiple keys);
1549 * after the data is written it calls bch_journal, and after the keys have been
1550 * added to the next journal write they're inserted into the btree.
1552 * If op->discard is true, instead of inserting the data it invalidates the
1553 * region of the cache represented by op->bio and op->inode.
1555 CLOSURE_CALLBACK(bch2_write)
1557 closure_type(op, struct bch_write_op, cl);
1558 struct bio *bio = &op->wbio.bio;
1559 struct bch_fs *c = op->c;
1562 EBUG_ON(op->cl.parent);
1563 BUG_ON(!op->nr_replicas);
1564 BUG_ON(!op->write_point.v);
1565 BUG_ON(bkey_eq(op->pos, POS_MAX));
1567 op->start_time = local_clock();
1568 bch2_keylist_init(&op->insert_keys, op->inline_keys);
1569 wbio_init(bio)->put_bio = false;
1571 if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) {
1572 bch_err_inum_offset_ratelimited(c,
1574 op->pos.offset << 9,
1575 "misaligned write");
1580 if (c->opts.nochanges) {
1581 op->error = -BCH_ERR_erofs_no_writes;
1585 if (!(op->flags & BCH_WRITE_MOVE) &&
1586 !bch2_write_ref_tryget(c, BCH_WRITE_REF_write)) {
1587 op->error = -BCH_ERR_erofs_no_writes;
1591 this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
1592 bch2_increment_clock(c, bio_sectors(bio), WRITE);
1594 data_len = min_t(u64, bio->bi_iter.bi_size,
1595 op->new_i_size - (op->pos.offset << 9));
1597 if (c->opts.inline_data &&
1598 data_len <= min(block_bytes(c) / 2, 1024U)) {
1599 bch2_write_data_inline(op, data_len);
1606 bch2_disk_reservation_put(c, &op->res);
1608 closure_debug_destroy(&op->cl);
1613 static const char * const bch2_write_flags[] = {
1620 void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
1622 prt_str(out, "pos: ");
1623 bch2_bpos_to_text(out, op->pos);
1625 printbuf_indent_add(out, 2);
1627 prt_str(out, "started: ");
1628 bch2_pr_time_units(out, local_clock() - op->start_time);
1631 prt_str(out, "flags: ");
1632 prt_bitflags(out, bch2_write_flags, op->flags);
1635 prt_printf(out, "ref: %u", closure_nr_remaining(&op->cl));
1638 printbuf_indent_sub(out, 2);
1641 void bch2_fs_io_write_exit(struct bch_fs *c)
1643 mempool_exit(&c->bio_bounce_pages);
1644 bioset_exit(&c->bio_write);
1647 int bch2_fs_io_write_init(struct bch_fs *c)
1649 if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio),
1651 return -BCH_ERR_ENOMEM_bio_write_init;
1653 if (mempool_init_page_pool(&c->bio_bounce_pages,
1655 c->opts.btree_node_size,
1656 c->opts.encoded_extent_max) /
1658 return -BCH_ERR_ENOMEM_bio_bounce_pages_init;