1 // SPDX-License-Identifier: GPL-2.0
5 #include "alloc_foreground.h"
8 #include "fs-io-buffered.h"
9 #include "fs-io-direct.h"
10 #include "fs-io-pagecache.h"
13 #include <linux/backing-dev.h>
14 #include <linux/pagemap.h>
15 #include <linux/writeback.h>
17 static inline bool bio_full(struct bio *bio, unsigned len)
19 if (bio->bi_vcnt >= bio->bi_max_vecs)
21 if (bio->bi_iter.bi_size > UINT_MAX - len)
28 static void bch2_readpages_end_io(struct bio *bio)
32 bio_for_each_folio_all(fi, bio) {
33 if (!bio->bi_status) {
34 folio_mark_uptodate(fi.folio);
36 folio_clear_uptodate(fi.folio);
37 folio_set_error(fi.folio);
39 folio_unlock(fi.folio);
45 struct readpages_iter {
46 struct address_space *mapping;
51 static int readpages_iter_init(struct readpages_iter *iter,
52 struct readahead_control *ractl)
57 memset(iter, 0, sizeof(*iter));
59 iter->mapping = ractl->mapping;
61 ret = bch2_filemap_get_contig_folios_d(iter->mapping,
62 ractl->_index << PAGE_SHIFT,
63 (ractl->_index + ractl->_nr_pages) << PAGE_SHIFT,
64 0, mapping_gfp_mask(iter->mapping),
69 darray_for_each(iter->folios, fi) {
70 ractl->_nr_pages -= 1U << folio_order(*fi);
71 __bch2_folio_create(*fi, __GFP_NOFAIL|GFP_KERNEL);
79 static inline struct folio *readpage_iter_peek(struct readpages_iter *iter)
81 if (iter->idx >= iter->folios.nr)
83 return iter->folios.data[iter->idx];
86 static inline void readpage_iter_advance(struct readpages_iter *iter)
91 static bool extent_partial_reads_expensive(struct bkey_s_c k)
93 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
94 struct bch_extent_crc_unpacked crc;
95 const union bch_extent_entry *i;
97 bkey_for_each_crc(k.k, ptrs, crc, i)
98 if (crc.csum_type || crc.compression_type)
103 static int readpage_bio_extend(struct btree_trans *trans,
104 struct readpages_iter *iter,
106 unsigned sectors_this_extent,
109 /* Don't hold btree locks while allocating memory: */
110 bch2_trans_unlock(trans);
112 while (bio_sectors(bio) < sectors_this_extent &&
113 bio->bi_vcnt < bio->bi_max_vecs) {
114 struct folio *folio = readpage_iter_peek(iter);
118 readpage_iter_advance(iter);
120 pgoff_t folio_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
125 folio = xa_load(&iter->mapping->i_pages, folio_offset);
126 if (folio && !xa_is_value(folio))
129 folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), 0);
133 if (!__bch2_folio_create(folio, GFP_KERNEL)) {
138 ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_KERNEL);
140 __bch2_folio_release(folio);
148 BUG_ON(folio_sector(folio) != bio_end_sector(bio));
150 BUG_ON(!bio_add_folio(bio, folio, folio_size(folio), 0));
153 return bch2_trans_relock(trans);
156 static void bchfs_read(struct btree_trans *trans,
157 struct bch_read_bio *rbio,
159 struct readpages_iter *readpages_iter)
161 struct bch_fs *c = trans->c;
162 struct btree_iter iter;
164 int flags = BCH_READ_RETRY_IF_STALE|
165 BCH_READ_MAY_PROMOTE;
170 rbio->start_time = local_clock();
171 rbio->subvol = inum.subvol;
173 bch2_bkey_buf_init(&sk);
175 bch2_trans_begin(trans);
176 iter = (struct btree_iter) { NULL };
178 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
182 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
183 SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
187 unsigned bytes, sectors, offset_into_extent;
188 enum btree_id data_btree = BTREE_ID_extents;
191 * read_extent -> io_time_reset may cause a transaction restart
192 * without returning an error, we need to check for that here:
194 ret = bch2_trans_relock(trans);
198 bch2_btree_iter_set_pos(&iter,
199 POS(inum.inum, rbio->bio.bi_iter.bi_sector));
201 k = bch2_btree_iter_peek_slot(&iter);
206 offset_into_extent = iter.pos.offset -
207 bkey_start_offset(k.k);
208 sectors = k.k->size - offset_into_extent;
210 bch2_bkey_buf_reassemble(&sk, c, k);
212 ret = bch2_read_indirect_extent(trans, &data_btree,
213 &offset_into_extent, &sk);
217 k = bkey_i_to_s_c(sk.k);
219 sectors = min(sectors, k.k->size - offset_into_extent);
221 if (readpages_iter) {
222 ret = readpage_bio_extend(trans, readpages_iter, &rbio->bio, sectors,
223 extent_partial_reads_expensive(k));
228 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
229 swap(rbio->bio.bi_iter.bi_size, bytes);
231 if (rbio->bio.bi_iter.bi_size == bytes)
232 flags |= BCH_READ_LAST_FRAGMENT;
234 bch2_bio_page_state_set(&rbio->bio, k);
236 bch2_read_extent(trans, rbio, iter.pos,
237 data_btree, k, offset_into_extent, flags);
239 if (flags & BCH_READ_LAST_FRAGMENT)
242 swap(rbio->bio.bi_iter.bi_size, bytes);
243 bio_advance(&rbio->bio, bytes);
245 ret = btree_trans_too_many_iters(trans);
250 bch2_trans_iter_exit(trans, &iter);
252 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
256 bch_err_inum_offset_ratelimited(c,
258 iter.pos.offset << 9,
259 "read error %i from btree lookup", ret);
260 rbio->bio.bi_status = BLK_STS_IOERR;
261 bio_endio(&rbio->bio);
264 bch2_bkey_buf_exit(&sk, c);
267 void bch2_readahead(struct readahead_control *ractl)
269 struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
270 struct bch_fs *c = inode->v.i_sb->s_fs_info;
271 struct bch_io_opts opts;
272 struct btree_trans trans;
274 struct readpages_iter readpages_iter;
277 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
279 ret = readpages_iter_init(&readpages_iter, ractl);
282 bch2_trans_init(&trans, c, 0, 0);
284 bch2_pagecache_add_get(inode);
286 while ((folio = readpage_iter_peek(&readpages_iter))) {
287 unsigned n = min_t(unsigned,
288 readpages_iter.folios.nr -
291 struct bch_read_bio *rbio =
292 rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
293 GFP_KERNEL, &c->bio_read),
296 readpage_iter_advance(&readpages_iter);
298 rbio->bio.bi_iter.bi_sector = folio_sector(folio);
299 rbio->bio.bi_end_io = bch2_readpages_end_io;
300 BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
302 bchfs_read(&trans, rbio, inode_inum(inode),
304 bch2_trans_unlock(&trans);
307 bch2_pagecache_add_put(inode);
309 bch2_trans_exit(&trans);
310 darray_exit(&readpages_iter.folios);
313 static void __bchfs_readfolio(struct bch_fs *c, struct bch_read_bio *rbio,
314 subvol_inum inum, struct folio *folio)
316 struct btree_trans trans;
318 bch2_folio_create(folio, __GFP_NOFAIL);
320 rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
321 rbio->bio.bi_iter.bi_sector = folio_sector(folio);
322 BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
324 bch2_trans_init(&trans, c, 0, 0);
325 bchfs_read(&trans, rbio, inum, NULL);
326 bch2_trans_exit(&trans);
329 static void bch2_read_single_folio_end_io(struct bio *bio)
331 complete(bio->bi_private);
334 int bch2_read_single_folio(struct folio *folio, struct address_space *mapping)
336 struct bch_inode_info *inode = to_bch_ei(mapping->host);
337 struct bch_fs *c = inode->v.i_sb->s_fs_info;
338 struct bch_read_bio *rbio;
339 struct bch_io_opts opts;
341 DECLARE_COMPLETION_ONSTACK(done);
343 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
345 rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_KERNEL, &c->bio_read),
347 rbio->bio.bi_private = &done;
348 rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
350 __bchfs_readfolio(c, rbio, inode_inum(inode), folio);
351 wait_for_completion(&done);
353 ret = blk_status_to_errno(rbio->bio.bi_status);
359 folio_mark_uptodate(folio);
363 int bch2_read_folio(struct file *file, struct folio *folio)
367 ret = bch2_read_single_folio(folio, folio->mapping);
369 return bch2_err_class(ret);
374 struct bch_writepage_io {
375 struct bch_inode_info *inode;
378 struct bch_write_op op;
381 struct bch_writepage_state {
382 struct bch_writepage_io *io;
383 struct bch_io_opts opts;
384 struct bch_folio_sector *tmp;
385 unsigned tmp_sectors;
388 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
389 struct bch_inode_info *inode)
391 struct bch_writepage_state ret = { 0 };
393 bch2_inode_opts_get(&ret.opts, c, &inode->ei_inode);
397 static void bch2_writepage_io_done(struct bch_write_op *op)
399 struct bch_writepage_io *io =
400 container_of(op, struct bch_writepage_io, op);
401 struct bch_fs *c = io->op.c;
402 struct bio *bio = &io->op.wbio.bio;
403 struct folio_iter fi;
407 set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
409 bio_for_each_folio_all(fi, bio) {
412 folio_set_error(fi.folio);
413 mapping_set_error(fi.folio->mapping, -EIO);
415 s = __bch2_folio(fi.folio);
417 for (i = 0; i < folio_sectors(fi.folio); i++)
418 s->s[i].nr_replicas = 0;
419 spin_unlock(&s->lock);
423 if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
424 bio_for_each_folio_all(fi, bio) {
427 s = __bch2_folio(fi.folio);
429 for (i = 0; i < folio_sectors(fi.folio); i++)
430 s->s[i].nr_replicas = 0;
431 spin_unlock(&s->lock);
436 * racing with fallocate can cause us to add fewer sectors than
437 * expected - but we shouldn't add more sectors than expected:
439 WARN_ON_ONCE(io->op.i_sectors_delta > 0);
442 * (error (due to going RO) halfway through a page can screw that up
445 BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
449 * PageWriteback is effectively our ref on the inode - fixup i_blocks
450 * before calling end_page_writeback:
452 bch2_i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
454 bio_for_each_folio_all(fi, bio) {
455 struct bch_folio *s = __bch2_folio(fi.folio);
457 if (atomic_dec_and_test(&s->write_count))
458 folio_end_writeback(fi.folio);
461 bio_put(&io->op.wbio.bio);
464 static void bch2_writepage_do_io(struct bch_writepage_state *w)
466 struct bch_writepage_io *io = w->io;
469 closure_call(&io->op.cl, bch2_write, NULL, NULL);
473 * Get a bch_writepage_io and add @page to it - appending to an existing one if
474 * possible, else allocating a new one:
476 static void bch2_writepage_io_alloc(struct bch_fs *c,
477 struct writeback_control *wbc,
478 struct bch_writepage_state *w,
479 struct bch_inode_info *inode,
481 unsigned nr_replicas)
483 struct bch_write_op *op;
485 w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
488 &c->writepage_bioset),
489 struct bch_writepage_io, op.wbio.bio);
491 w->io->inode = inode;
493 bch2_write_op_init(op, c, w->opts);
494 op->target = w->opts.foreground_target;
495 op->nr_replicas = nr_replicas;
496 op->res.nr_replicas = nr_replicas;
497 op->write_point = writepoint_hashed(inode->ei_last_dirtied);
498 op->subvol = inode->ei_subvol;
499 op->pos = POS(inode->v.i_ino, sector);
500 op->end_io = bch2_writepage_io_done;
501 op->devs_need_flush = &inode->ei_devs_need_flush;
502 op->wbio.bio.bi_iter.bi_sector = sector;
503 op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
506 static int __bch2_writepage(struct folio *folio,
507 struct writeback_control *wbc,
510 struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
511 struct bch_fs *c = inode->v.i_sb->s_fs_info;
512 struct bch_writepage_state *w = data;
514 unsigned i, offset, f_sectors, nr_replicas_this_write = U32_MAX;
515 loff_t i_size = i_size_read(&inode->v);
518 EBUG_ON(!folio_test_uptodate(folio));
520 /* Is the folio fully inside i_size? */
521 if (folio_end_pos(folio) <= i_size)
524 /* Is the folio fully outside i_size? (truncate in progress) */
525 if (folio_pos(folio) >= i_size) {
531 * The folio straddles i_size. It must be zeroed out on each and every
532 * writepage invocation because it may be mmapped. "A file is mapped
533 * in multiples of the folio size. For a file that is not a multiple of
534 * the folio size, the remaining memory is zeroed when mapped, and
535 * writes to that region are not written out to the file."
537 folio_zero_segment(folio,
538 i_size - folio_pos(folio),
541 f_sectors = folio_sectors(folio);
542 s = bch2_folio(folio);
544 if (f_sectors > w->tmp_sectors) {
546 w->tmp = kcalloc(f_sectors, sizeof(struct bch_folio_sector), __GFP_NOFAIL);
547 w->tmp_sectors = f_sectors;
551 * Things get really hairy with errors during writeback:
553 ret = bch2_get_folio_disk_reservation(c, inode, folio, false);
556 /* Before unlocking the page, get copy of reservations: */
558 memcpy(w->tmp, s->s, sizeof(struct bch_folio_sector) * f_sectors);
560 for (i = 0; i < f_sectors; i++) {
561 if (s->s[i].state < SECTOR_dirty)
564 nr_replicas_this_write =
565 min_t(unsigned, nr_replicas_this_write,
566 s->s[i].nr_replicas +
567 s->s[i].replicas_reserved);
570 for (i = 0; i < f_sectors; i++) {
571 if (s->s[i].state < SECTOR_dirty)
574 s->s[i].nr_replicas = w->opts.compression
575 ? 0 : nr_replicas_this_write;
577 s->s[i].replicas_reserved = 0;
578 bch2_folio_sector_set(folio, s, i, SECTOR_allocated);
580 spin_unlock(&s->lock);
582 BUG_ON(atomic_read(&s->write_count));
583 atomic_set(&s->write_count, 1);
585 BUG_ON(folio_test_writeback(folio));
586 folio_start_writeback(folio);
592 unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
595 while (offset < f_sectors &&
596 w->tmp[offset].state < SECTOR_dirty)
599 if (offset == f_sectors)
602 while (offset + sectors < f_sectors &&
603 w->tmp[offset + sectors].state >= SECTOR_dirty) {
604 reserved_sectors += w->tmp[offset + sectors].replicas_reserved;
605 dirty_sectors += w->tmp[offset + sectors].state == SECTOR_dirty;
610 sector = folio_sector(folio) + offset;
613 (w->io->op.res.nr_replicas != nr_replicas_this_write ||
614 bio_full(&w->io->op.wbio.bio, sectors << 9) ||
615 w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
616 (BIO_MAX_VECS * PAGE_SIZE) ||
617 bio_end_sector(&w->io->op.wbio.bio) != sector))
618 bch2_writepage_do_io(w);
621 bch2_writepage_io_alloc(c, wbc, w, inode, sector,
622 nr_replicas_this_write);
624 atomic_inc(&s->write_count);
626 BUG_ON(inode != w->io->inode);
627 BUG_ON(!bio_add_folio(&w->io->op.wbio.bio, folio,
628 sectors << 9, offset << 9));
630 /* Check for writing past i_size: */
631 WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
632 round_up(i_size, block_bytes(c)) &&
633 !test_bit(BCH_FS_EMERGENCY_RO, &c->flags),
634 "writing past i_size: %llu > %llu (unrounded %llu)\n",
635 bio_end_sector(&w->io->op.wbio.bio) << 9,
636 round_up(i_size, block_bytes(c)),
639 w->io->op.res.sectors += reserved_sectors;
640 w->io->op.i_sectors_delta -= dirty_sectors;
641 w->io->op.new_i_size = i_size;
646 if (atomic_dec_and_test(&s->write_count))
647 folio_end_writeback(folio);
652 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
654 struct bch_fs *c = mapping->host->i_sb->s_fs_info;
655 struct bch_writepage_state w =
656 bch_writepage_state_init(c, to_bch_ei(mapping->host));
657 struct blk_plug plug;
660 blk_start_plug(&plug);
661 ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
663 bch2_writepage_do_io(&w);
664 blk_finish_plug(&plug);
666 return bch2_err_class(ret);
669 /* buffered writes: */
671 int bch2_write_begin(struct file *file, struct address_space *mapping,
672 loff_t pos, unsigned len,
673 struct page **pagep, void **fsdata)
675 struct bch_inode_info *inode = to_bch_ei(mapping->host);
676 struct bch_fs *c = inode->v.i_sb->s_fs_info;
677 struct bch2_folio_reservation *res;
682 res = kmalloc(sizeof(*res), GFP_KERNEL);
686 bch2_folio_reservation_init(c, inode, res);
689 bch2_pagecache_add_get(inode);
691 folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
692 FGP_LOCK|FGP_WRITE|FGP_CREAT|FGP_STABLE,
693 mapping_gfp_mask(mapping));
694 if (IS_ERR_OR_NULL(folio))
697 if (folio_test_uptodate(folio))
700 offset = pos - folio_pos(folio);
701 len = min_t(size_t, len, folio_end_pos(folio) - pos);
703 /* If we're writing entire folio, don't need to read it in first: */
704 if (!offset && len == folio_size(folio))
707 if (!offset && pos + len >= inode->v.i_size) {
708 folio_zero_segment(folio, len, folio_size(folio));
709 flush_dcache_folio(folio);
713 if (folio_pos(folio) >= inode->v.i_size) {
714 folio_zero_segments(folio, 0, offset, offset + len, folio_size(folio));
715 flush_dcache_folio(folio);
719 ret = bch2_read_single_folio(folio, mapping);
723 ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
727 ret = bch2_folio_reservation_get(c, inode, folio, res, offset, len);
729 if (!folio_test_uptodate(folio)) {
731 * If the folio hasn't been read in, we won't know if we
732 * actually need a reservation - we don't actually need
733 * to read here, we just need to check if the folio is
734 * fully backed by uncompressed data:
742 *pagep = &folio->page;
749 bch2_pagecache_add_put(inode);
752 return bch2_err_class(ret);
755 int bch2_write_end(struct file *file, struct address_space *mapping,
756 loff_t pos, unsigned len, unsigned copied,
757 struct page *page, void *fsdata)
759 struct bch_inode_info *inode = to_bch_ei(mapping->host);
760 struct bch_fs *c = inode->v.i_sb->s_fs_info;
761 struct bch2_folio_reservation *res = fsdata;
762 struct folio *folio = page_folio(page);
763 unsigned offset = pos - folio_pos(folio);
765 lockdep_assert_held(&inode->v.i_rwsem);
766 BUG_ON(offset + copied > folio_size(folio));
768 if (unlikely(copied < len && !folio_test_uptodate(folio))) {
770 * The folio needs to be read in, but that would destroy
771 * our partial write - simplest thing is to just force
772 * userspace to redo the write:
774 folio_zero_range(folio, 0, folio_size(folio));
775 flush_dcache_folio(folio);
779 spin_lock(&inode->v.i_lock);
780 if (pos + copied > inode->v.i_size)
781 i_size_write(&inode->v, pos + copied);
782 spin_unlock(&inode->v.i_lock);
785 if (!folio_test_uptodate(folio))
786 folio_mark_uptodate(folio);
788 bch2_set_folio_dirty(c, inode, folio, res, offset, copied);
790 inode->ei_last_dirtied = (unsigned long) current;
795 bch2_pagecache_add_put(inode);
797 bch2_folio_reservation_put(c, inode, res);
803 static noinline void folios_trunc(folios *folios, struct folio **fi)
805 while (folios->data + folios->nr > fi) {
806 struct folio *f = darray_pop(folios);
813 static int __bch2_buffered_write(struct bch_inode_info *inode,
814 struct address_space *mapping,
815 struct iov_iter *iter,
816 loff_t pos, unsigned len)
818 struct bch_fs *c = inode->v.i_sb->s_fs_info;
819 struct bch2_folio_reservation res;
821 struct folio **fi, *f;
822 unsigned copied = 0, f_offset;
823 u64 end = pos + len, f_pos;
824 loff_t last_folio_pos = inode->v.i_size;
829 bch2_folio_reservation_init(c, inode, &res);
830 darray_init(&folios);
832 ret = bch2_filemap_get_contig_folios_d(mapping, pos, end,
833 FGP_LOCK|FGP_WRITE|FGP_STABLE|FGP_CREAT,
834 mapping_gfp_mask(mapping),
841 f = darray_first(folios);
842 if (pos != folio_pos(f) && !folio_test_uptodate(f)) {
843 ret = bch2_read_single_folio(f, mapping);
848 f = darray_last(folios);
849 end = min(end, folio_end_pos(f));
850 last_folio_pos = folio_pos(f);
851 if (end != folio_end_pos(f) && !folio_test_uptodate(f)) {
852 if (end >= inode->v.i_size) {
853 folio_zero_range(f, 0, folio_size(f));
855 ret = bch2_read_single_folio(f, mapping);
861 ret = bch2_folio_set(c, inode_inum(inode), folios.data, folios.nr);
866 f_offset = pos - folio_pos(darray_first(folios));
867 darray_for_each(folios, fi) {
868 struct folio *f = *fi;
869 u64 f_len = min(end, folio_end_pos(f)) - f_pos;
872 * XXX: per POSIX and fstests generic/275, on -ENOSPC we're
873 * supposed to write as much as we have disk space for.
875 * On failure here we should still write out a partial page if
876 * we aren't completely out of disk space - we don't do that
879 ret = bch2_folio_reservation_get(c, inode, f, &res, f_offset, f_len);
881 folios_trunc(&folios, fi);
885 end = min(end, folio_end_pos(darray_last(folios)));
889 f_pos = folio_end_pos(f);
893 if (mapping_writably_mapped(mapping))
894 darray_for_each(folios, fi)
895 flush_dcache_folio(*fi);
898 f_offset = pos - folio_pos(darray_first(folios));
899 darray_for_each(folios, fi) {
900 struct folio *f = *fi;
901 u64 f_len = min(end, folio_end_pos(f)) - f_pos;
902 unsigned f_copied = copy_page_from_iter_atomic(&f->page, f_offset, f_len, iter);
905 folios_trunc(&folios, fi);
909 if (!folio_test_uptodate(f) &&
910 f_copied != folio_size(f) &&
911 pos + copied + f_copied < inode->v.i_size) {
912 iov_iter_revert(iter, f_copied);
913 folio_zero_range(f, 0, folio_size(f));
914 folios_trunc(&folios, fi);
918 flush_dcache_folio(f);
921 if (f_copied != f_len) {
922 folios_trunc(&folios, fi + 1);
926 f_pos = folio_end_pos(f);
935 spin_lock(&inode->v.i_lock);
936 if (end > inode->v.i_size)
937 i_size_write(&inode->v, end);
938 spin_unlock(&inode->v.i_lock);
941 f_offset = pos - folio_pos(darray_first(folios));
942 darray_for_each(folios, fi) {
943 struct folio *f = *fi;
944 u64 f_len = min(end, folio_end_pos(f)) - f_pos;
946 if (!folio_test_uptodate(f))
947 folio_mark_uptodate(f);
949 bch2_set_folio_dirty(c, inode, f, &res, f_offset, f_len);
951 f_pos = folio_end_pos(f);
955 inode->ei_last_dirtied = (unsigned long) current;
957 darray_for_each(folios, fi) {
963 * If the last folio added to the mapping starts beyond current EOF, we
964 * performed a short write but left around at least one post-EOF folio.
965 * Clean up the mapping before we return.
967 if (last_folio_pos >= inode->v.i_size)
968 truncate_pagecache(&inode->v, inode->v.i_size);
970 darray_exit(&folios);
971 bch2_folio_reservation_put(c, inode, &res);
973 return copied ?: ret;
976 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
978 struct file *file = iocb->ki_filp;
979 struct address_space *mapping = file->f_mapping;
980 struct bch_inode_info *inode = file_bch_inode(file);
981 loff_t pos = iocb->ki_pos;
985 bch2_pagecache_add_get(inode);
988 unsigned offset = pos & (PAGE_SIZE - 1);
989 unsigned bytes = iov_iter_count(iter);
992 * Bring in the user page that we will copy from _first_.
993 * Otherwise there's a nasty deadlock on copying from the
994 * same page as we're writing to, without it being marked
997 * Not only is this an optimisation, but it is also required
998 * to check that the address is actually valid, when atomic
999 * usercopies are used, below.
1001 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
1002 bytes = min_t(unsigned long, iov_iter_count(iter),
1003 PAGE_SIZE - offset);
1005 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
1011 if (unlikely(fatal_signal_pending(current))) {
1016 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1017 if (unlikely(ret < 0))
1022 if (unlikely(ret == 0)) {
1024 * If we were unable to copy any data at all, we must
1025 * fall back to a single segment length write.
1027 * If we didn't fallback here, we could livelock
1028 * because not all segments in the iov can be copied at
1029 * once without a pagefault.
1031 bytes = min_t(unsigned long, PAGE_SIZE - offset,
1032 iov_iter_single_seg_count(iter));
1039 balance_dirty_pages_ratelimited(mapping);
1040 } while (iov_iter_count(iter));
1042 bch2_pagecache_add_put(inode);
1044 return written ? written : ret;
1047 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1049 struct file *file = iocb->ki_filp;
1050 struct bch_inode_info *inode = file_bch_inode(file);
1053 if (iocb->ki_flags & IOCB_DIRECT) {
1054 ret = bch2_direct_write(iocb, from);
1058 /* We can write back this queue in page reclaim */
1059 current->backing_dev_info = inode_to_bdi(&inode->v);
1060 inode_lock(&inode->v);
1062 ret = generic_write_checks(iocb, from);
1066 ret = file_remove_privs(file);
1070 ret = file_update_time(file);
1074 ret = bch2_buffered_write(iocb, from);
1075 if (likely(ret > 0))
1076 iocb->ki_pos += ret;
1078 inode_unlock(&inode->v);
1079 current->backing_dev_info = NULL;
1082 ret = generic_write_sync(iocb, ret);
1084 return bch2_err_class(ret);
1087 void bch2_fs_fs_io_buffered_exit(struct bch_fs *c)
1089 bioset_exit(&c->writepage_bioset);
1092 int bch2_fs_fs_io_buffered_init(struct bch_fs *c)
1094 if (bioset_init(&c->writepage_bioset,
1095 4, offsetof(struct bch_writepage_io, op.wbio.bio),
1097 return -BCH_ERR_ENOMEM_writepage_bioset_init;
1102 #endif /* NO_BCACHEFS_FS */