1 // SPDX-License-Identifier: GPL-2.0
5 #include "alloc_foreground.h"
7 #include "btree_update.h"
12 #include "extent_update.h"
24 #include <linux/aio.h>
25 #include <linux/backing-dev.h>
26 #include <linux/falloc.h>
27 #include <linux/migrate.h>
28 #include <linux/mmu_context.h>
29 #include <linux/pagevec.h>
30 #include <linux/rmap.h>
31 #include <linux/sched/signal.h>
32 #include <linux/task_io_accounting_ops.h>
33 #include <linux/uio.h>
34 #include <linux/writeback.h>
36 #include <trace/events/writeback.h>
39 * Use u64 for the end pos and sector helpers because if the folio covers the
40 * max supported range of the mapping, the start offset of the next folio
41 * overflows loff_t. This breaks much of the range based processing in the
42 * buffered write path.
44 static inline u64 folio_end_pos(struct folio *folio)
46 return folio_pos(folio) + folio_size(folio);
49 static inline size_t folio_sectors(struct folio *folio)
51 return PAGE_SECTORS << folio_order(folio);
54 static inline loff_t folio_sector(struct folio *folio)
56 return folio_pos(folio) >> 9;
59 static inline u64 folio_end_sector(struct folio *folio)
61 return folio_end_pos(folio) >> 9;
64 typedef DARRAY(struct folio *) folios;
66 static int filemap_get_contig_folios_d(struct address_space *mapping,
67 loff_t start, u64 end,
68 int fgp_flags, gfp_t gfp,
76 if ((u64) pos >= (u64) start + (1ULL << 20))
77 fgp_flags &= ~FGP_CREAT;
79 ret = darray_make_room_gfp(folios, 1, gfp & GFP_KERNEL);
83 f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp);
87 BUG_ON(folios->nr && folio_pos(f) != pos);
89 pos = folio_end_pos(f);
90 darray_push(folios, f);
93 if (!folios->nr && !ret && (fgp_flags & FGP_CREAT))
96 return folios->nr ? 0 : ret;
105 static void nocow_flush_endio(struct bio *_bio)
108 struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
110 closure_put(bio->cl);
111 percpu_ref_put(&bio->ca->io_ref);
115 static void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
116 struct bch_inode_info *inode,
119 struct nocow_flush *bio;
121 struct bch_devs_mask devs;
124 dev = find_first_bit(inode->ei_devs_need_flush.d, BCH_SB_MEMBERS_MAX);
125 if (dev == BCH_SB_MEMBERS_MAX)
128 devs = inode->ei_devs_need_flush;
129 memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
131 for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
133 ca = rcu_dereference(c->devs[dev]);
134 if (ca && !percpu_ref_tryget(&ca->io_ref))
141 bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
144 &c->nocow_flush_bioset),
145 struct nocow_flush, bio);
148 bio->bio.bi_end_io = nocow_flush_endio;
149 closure_bio_submit(&bio->bio, cl);
153 static int bch2_inode_flush_nocow_writes(struct bch_fs *c,
154 struct bch_inode_info *inode)
158 closure_init_stack(&cl);
159 bch2_inode_flush_nocow_writes_async(c, inode, &cl);
165 static inline bool bio_full(struct bio *bio, unsigned len)
167 if (bio->bi_vcnt >= bio->bi_max_vecs)
169 if (bio->bi_iter.bi_size > UINT_MAX - len)
174 static inline struct address_space *faults_disabled_mapping(void)
176 return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
179 static inline void set_fdm_dropped_locks(void)
181 current->faults_disabled_mapping =
182 (void *) (((unsigned long) current->faults_disabled_mapping)|1);
185 static inline bool fdm_dropped_locks(void)
187 return ((unsigned long) current->faults_disabled_mapping) & 1;
194 struct bch_writepage_io {
195 struct bch_inode_info *inode;
198 struct bch_write_op op;
203 struct address_space *mapping;
204 struct bch_inode_info *inode;
205 struct mm_struct *mm;
211 struct quota_res quota_res;
214 struct iov_iter iter;
215 struct iovec inline_vecs[2];
218 struct bch_write_op op;
226 struct bch_read_bio rbio;
229 /* pagecache_block must be held */
230 static noinline int write_invalidate_inode_pages_range(struct address_space *mapping,
231 loff_t start, loff_t end)
236 * XXX: the way this is currently implemented, we can spin if a process
237 * is continually redirtying a specific page
240 if (!mapping->nrpages)
243 ret = filemap_write_and_wait_range(mapping, start, end);
247 if (!mapping->nrpages)
250 ret = invalidate_inode_pages2_range(mapping,
253 } while (ret == -EBUSY);
260 #ifdef CONFIG_BCACHEFS_QUOTA
262 static void __bch2_quota_reservation_put(struct bch_fs *c,
263 struct bch_inode_info *inode,
264 struct quota_res *res)
266 BUG_ON(res->sectors > inode->ei_quota_reserved);
268 bch2_quota_acct(c, inode->ei_qid, Q_SPC,
269 -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
270 inode->ei_quota_reserved -= res->sectors;
274 static void bch2_quota_reservation_put(struct bch_fs *c,
275 struct bch_inode_info *inode,
276 struct quota_res *res)
279 mutex_lock(&inode->ei_quota_lock);
280 __bch2_quota_reservation_put(c, inode, res);
281 mutex_unlock(&inode->ei_quota_lock);
285 static int bch2_quota_reservation_add(struct bch_fs *c,
286 struct bch_inode_info *inode,
287 struct quota_res *res,
293 if (test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags))
296 mutex_lock(&inode->ei_quota_lock);
297 ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
298 check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
300 inode->ei_quota_reserved += sectors;
301 res->sectors += sectors;
303 mutex_unlock(&inode->ei_quota_lock);
310 static void __bch2_quota_reservation_put(struct bch_fs *c,
311 struct bch_inode_info *inode,
312 struct quota_res *res) {}
314 static void bch2_quota_reservation_put(struct bch_fs *c,
315 struct bch_inode_info *inode,
316 struct quota_res *res) {}
318 static int bch2_quota_reservation_add(struct bch_fs *c,
319 struct bch_inode_info *inode,
320 struct quota_res *res,
329 /* i_size updates: */
331 struct inode_new_size {
337 static int inode_set_size(struct bch_inode_info *inode,
338 struct bch_inode_unpacked *bi,
341 struct inode_new_size *s = p;
343 bi->bi_size = s->new_size;
344 if (s->fields & ATTR_ATIME)
345 bi->bi_atime = s->now;
346 if (s->fields & ATTR_MTIME)
347 bi->bi_mtime = s->now;
348 if (s->fields & ATTR_CTIME)
349 bi->bi_ctime = s->now;
354 int __must_check bch2_write_inode_size(struct bch_fs *c,
355 struct bch_inode_info *inode,
356 loff_t new_size, unsigned fields)
358 struct inode_new_size s = {
359 .new_size = new_size,
360 .now = bch2_current_time(c),
364 return bch2_write_inode(c, inode, inode_set_size, &s, fields);
367 static void __i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
368 struct quota_res *quota_res, s64 sectors)
370 bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
371 "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
372 inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
373 inode->ei_inode.bi_sectors);
374 inode->v.i_blocks += sectors;
376 #ifdef CONFIG_BCACHEFS_QUOTA
378 !test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags) &&
380 BUG_ON(sectors > quota_res->sectors);
381 BUG_ON(sectors > inode->ei_quota_reserved);
383 quota_res->sectors -= sectors;
384 inode->ei_quota_reserved -= sectors;
386 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
391 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
392 struct quota_res *quota_res, s64 sectors)
395 mutex_lock(&inode->ei_quota_lock);
396 __i_sectors_acct(c, inode, quota_res, sectors);
397 mutex_unlock(&inode->ei_quota_lock);
403 /* stored in page->private: */
405 #define BCH_FOLIO_SECTOR_STATE() \
412 enum bch_folio_sector_state {
413 #define x(n) SECTOR_##n,
414 BCH_FOLIO_SECTOR_STATE()
418 const char * const bch2_folio_sector_states[] = {
420 BCH_FOLIO_SECTOR_STATE()
425 static inline enum bch_folio_sector_state
426 folio_sector_dirty(enum bch_folio_sector_state state)
429 case SECTOR_unallocated:
431 case SECTOR_reserved:
432 return SECTOR_dirty_reserved;
438 static inline enum bch_folio_sector_state
439 folio_sector_undirty(enum bch_folio_sector_state state)
443 return SECTOR_unallocated;
444 case SECTOR_dirty_reserved:
445 return SECTOR_reserved;
451 static inline enum bch_folio_sector_state
452 folio_sector_reserve(enum bch_folio_sector_state state)
455 case SECTOR_unallocated:
456 return SECTOR_reserved;
458 return SECTOR_dirty_reserved;
464 struct bch_folio_sector {
465 /* Uncompressed, fully allocated replicas (or on disk reservation): */
466 unsigned nr_replicas:4;
468 /* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
469 unsigned replicas_reserved:4;
472 enum bch_folio_sector_state state:8;
477 atomic_t write_count;
479 * Is the sector state up to date with the btree?
480 * (Not the data itself)
483 struct bch_folio_sector s[];
486 static inline void folio_sector_set(struct folio *folio,
488 unsigned i, unsigned n)
493 /* file offset (to folio offset) to bch_folio_sector index */
494 static inline int folio_pos_to_s(struct folio *folio, loff_t pos)
496 u64 f_offset = pos - folio_pos(folio);
497 BUG_ON(pos < folio_pos(folio) || pos >= folio_end_pos(folio));
498 return f_offset >> SECTOR_SHIFT;
501 static inline struct bch_folio *__bch2_folio(struct folio *folio)
503 return folio_has_private(folio)
504 ? (struct bch_folio *) folio_get_private(folio)
508 static inline struct bch_folio *bch2_folio(struct folio *folio)
510 EBUG_ON(!folio_test_locked(folio));
512 return __bch2_folio(folio);
515 /* for newly allocated folios: */
516 static void __bch2_folio_release(struct folio *folio)
518 kfree(folio_detach_private(folio));
521 static void bch2_folio_release(struct folio *folio)
523 EBUG_ON(!folio_test_locked(folio));
524 __bch2_folio_release(folio);
527 /* for newly allocated folios: */
528 static struct bch_folio *__bch2_folio_create(struct folio *folio, gfp_t gfp)
532 s = kzalloc(sizeof(*s) +
533 sizeof(struct bch_folio_sector) *
534 folio_sectors(folio), GFP_NOFS|gfp);
538 spin_lock_init(&s->lock);
539 folio_attach_private(folio, s);
543 static struct bch_folio *bch2_folio_create(struct folio *folio, gfp_t gfp)
545 return bch2_folio(folio) ?: __bch2_folio_create(folio, gfp);
548 static unsigned bkey_to_sector_state(struct bkey_s_c k)
550 if (bkey_extent_is_reservation(k))
551 return SECTOR_reserved;
552 if (bkey_extent_is_allocation(k.k))
553 return SECTOR_allocated;
554 return SECTOR_unallocated;
557 static void __bch2_folio_set(struct folio *folio,
558 unsigned pg_offset, unsigned pg_len,
559 unsigned nr_ptrs, unsigned state)
561 struct bch_folio *s = bch2_folio_create(folio, __GFP_NOFAIL);
562 unsigned i, sectors = folio_sectors(folio);
564 BUG_ON(pg_offset >= sectors);
565 BUG_ON(pg_offset + pg_len > sectors);
569 for (i = pg_offset; i < pg_offset + pg_len; i++) {
570 s->s[i].nr_replicas = nr_ptrs;
571 folio_sector_set(folio, s, i, state);
577 spin_unlock(&s->lock);
581 * Initialize bch_folio state (allocated/unallocated, nr_replicas) from the
584 static int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
585 struct folio **folios, unsigned nr_folios)
587 struct btree_trans trans;
588 struct btree_iter iter;
590 u64 offset = folio_sector(folios[0]);
591 unsigned folio_idx = 0;
595 bch2_trans_init(&trans, c, 0, 0);
597 bch2_trans_begin(&trans);
599 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
603 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
604 SPOS(inum.inum, offset, snapshot),
605 BTREE_ITER_SLOTS, k, ret) {
606 unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
607 unsigned state = bkey_to_sector_state(k);
609 while (folio_idx < nr_folios) {
610 struct folio *folio = folios[folio_idx];
611 u64 folio_start = folio_sector(folio);
612 u64 folio_end = folio_end_sector(folio);
613 unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) - folio_start;
614 unsigned folio_len = min(k.k->p.offset, folio_end) - folio_offset - folio_start;
616 BUG_ON(k.k->p.offset < folio_start);
617 BUG_ON(bkey_start_offset(k.k) > folio_end);
619 if (!bch2_folio_create(folio, __GFP_NOFAIL)->uptodate)
620 __bch2_folio_set(folio, folio_offset, folio_len, nr_ptrs, state);
622 if (k.k->p.offset < folio_end)
627 if (folio_idx == nr_folios)
631 offset = iter.pos.offset;
632 bch2_trans_iter_exit(&trans, &iter);
634 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
636 bch2_trans_exit(&trans);
641 static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
643 struct bvec_iter iter;
645 unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
646 ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
647 unsigned state = bkey_to_sector_state(k);
649 bio_for_each_folio(fv, bio, iter)
650 __bch2_folio_set(fv.fv_folio,
656 static void mark_pagecache_unallocated(struct bch_inode_info *inode,
659 pgoff_t index = start >> PAGE_SECTORS_SHIFT;
660 pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
661 struct folio_batch fbatch;
667 folio_batch_init(&fbatch);
669 while (filemap_get_folios(inode->v.i_mapping,
670 &index, end_index, &fbatch)) {
671 for (i = 0; i < folio_batch_count(&fbatch); i++) {
672 struct folio *folio = fbatch.folios[i];
673 u64 folio_start = folio_sector(folio);
674 u64 folio_end = folio_end_sector(folio);
675 unsigned folio_offset = max(start, folio_start) - folio_start;
676 unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
679 BUG_ON(end <= folio_start);
682 s = bch2_folio(folio);
686 for (j = folio_offset; j < folio_offset + folio_len; j++)
687 s->s[j].nr_replicas = 0;
688 spin_unlock(&s->lock);
693 folio_batch_release(&fbatch);
698 static void mark_pagecache_reserved(struct bch_inode_info *inode,
701 struct bch_fs *c = inode->v.i_sb->s_fs_info;
702 pgoff_t index = start >> PAGE_SECTORS_SHIFT;
703 pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
704 struct folio_batch fbatch;
705 s64 i_sectors_delta = 0;
711 folio_batch_init(&fbatch);
713 while (filemap_get_folios(inode->v.i_mapping,
714 &index, end_index, &fbatch)) {
715 for (i = 0; i < folio_batch_count(&fbatch); i++) {
716 struct folio *folio = fbatch.folios[i];
717 u64 folio_start = folio_sector(folio);
718 u64 folio_end = folio_end_sector(folio);
719 unsigned folio_offset = max(start, folio_start) - folio_start;
720 unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
723 BUG_ON(end <= folio_start);
726 s = bch2_folio(folio);
730 for (j = folio_offset; j < folio_offset + folio_len; j++) {
731 i_sectors_delta -= s->s[j].state == SECTOR_dirty;
732 folio_sector_set(folio, s, j, folio_sector_reserve(s->s[j].state));
734 spin_unlock(&s->lock);
739 folio_batch_release(&fbatch);
743 i_sectors_acct(c, inode, NULL, i_sectors_delta);
746 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
748 /* XXX: this should not be open coded */
749 return inode->ei_inode.bi_data_replicas
750 ? inode->ei_inode.bi_data_replicas - 1
751 : c->opts.data_replicas;
754 static inline unsigned sectors_to_reserve(struct bch_folio_sector *s,
755 unsigned nr_replicas)
757 return max(0, (int) nr_replicas -
759 s->replicas_reserved);
762 static int bch2_get_folio_disk_reservation(struct bch_fs *c,
763 struct bch_inode_info *inode,
764 struct folio *folio, bool check_enospc)
766 struct bch_folio *s = bch2_folio_create(folio, 0);
767 unsigned nr_replicas = inode_nr_replicas(c, inode);
768 struct disk_reservation disk_res = { 0 };
769 unsigned i, sectors = folio_sectors(folio), disk_res_sectors = 0;
775 for (i = 0; i < sectors; i++)
776 disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
778 if (!disk_res_sectors)
781 ret = bch2_disk_reservation_get(c, &disk_res,
784 ? BCH_DISK_RESERVATION_NOFAIL
789 for (i = 0; i < sectors; i++)
790 s->s[i].replicas_reserved +=
791 sectors_to_reserve(&s->s[i], nr_replicas);
796 struct bch2_folio_reservation {
797 struct disk_reservation disk;
798 struct quota_res quota;
801 static void bch2_folio_reservation_init(struct bch_fs *c,
802 struct bch_inode_info *inode,
803 struct bch2_folio_reservation *res)
805 memset(res, 0, sizeof(*res));
807 res->disk.nr_replicas = inode_nr_replicas(c, inode);
810 static void bch2_folio_reservation_put(struct bch_fs *c,
811 struct bch_inode_info *inode,
812 struct bch2_folio_reservation *res)
814 bch2_disk_reservation_put(c, &res->disk);
815 bch2_quota_reservation_put(c, inode, &res->quota);
818 static int bch2_folio_reservation_get(struct bch_fs *c,
819 struct bch_inode_info *inode,
821 struct bch2_folio_reservation *res,
822 unsigned offset, unsigned len)
824 struct bch_folio *s = bch2_folio_create(folio, 0);
825 unsigned i, disk_sectors = 0, quota_sectors = 0;
831 BUG_ON(!s->uptodate);
833 for (i = round_down(offset, block_bytes(c)) >> 9;
834 i < round_up(offset + len, block_bytes(c)) >> 9;
836 disk_sectors += sectors_to_reserve(&s->s[i],
837 res->disk.nr_replicas);
838 quota_sectors += s->s[i].state == SECTOR_unallocated;
842 ret = bch2_disk_reservation_add(c, &res->disk, disk_sectors, 0);
848 ret = bch2_quota_reservation_add(c, inode, &res->quota,
849 quota_sectors, true);
851 struct disk_reservation tmp = {
852 .sectors = disk_sectors
855 bch2_disk_reservation_put(c, &tmp);
856 res->disk.sectors -= disk_sectors;
864 static void bch2_clear_folio_bits(struct folio *folio)
866 struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
867 struct bch_fs *c = inode->v.i_sb->s_fs_info;
868 struct bch_folio *s = bch2_folio(folio);
869 struct disk_reservation disk_res = { 0 };
870 int i, sectors = folio_sectors(folio), dirty_sectors = 0;
875 EBUG_ON(!folio_test_locked(folio));
876 EBUG_ON(folio_test_writeback(folio));
878 for (i = 0; i < sectors; i++) {
879 disk_res.sectors += s->s[i].replicas_reserved;
880 s->s[i].replicas_reserved = 0;
882 dirty_sectors -= s->s[i].state == SECTOR_dirty;
883 folio_sector_set(folio, s, i, folio_sector_undirty(s->s[i].state));
886 bch2_disk_reservation_put(c, &disk_res);
888 i_sectors_acct(c, inode, NULL, dirty_sectors);
890 bch2_folio_release(folio);
893 static void bch2_set_folio_dirty(struct bch_fs *c,
894 struct bch_inode_info *inode,
896 struct bch2_folio_reservation *res,
897 unsigned offset, unsigned len)
899 struct bch_folio *s = bch2_folio(folio);
900 unsigned i, dirty_sectors = 0;
902 WARN_ON((u64) folio_pos(folio) + offset + len >
903 round_up((u64) i_size_read(&inode->v), block_bytes(c)));
905 BUG_ON(!s->uptodate);
909 for (i = round_down(offset, block_bytes(c)) >> 9;
910 i < round_up(offset + len, block_bytes(c)) >> 9;
912 unsigned sectors = sectors_to_reserve(&s->s[i],
913 res->disk.nr_replicas);
916 * This can happen if we race with the error path in
917 * bch2_writepage_io_done():
919 sectors = min_t(unsigned, sectors, res->disk.sectors);
921 s->s[i].replicas_reserved += sectors;
922 res->disk.sectors -= sectors;
924 dirty_sectors += s->s[i].state == SECTOR_unallocated;
926 folio_sector_set(folio, s, i, folio_sector_dirty(s->s[i].state));
929 spin_unlock(&s->lock);
931 i_sectors_acct(c, inode, &res->quota, dirty_sectors);
933 if (!folio_test_dirty(folio))
934 filemap_dirty_folio(inode->v.i_mapping, folio);
937 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
939 struct file *file = vmf->vma->vm_file;
940 struct address_space *mapping = file->f_mapping;
941 struct address_space *fdm = faults_disabled_mapping();
942 struct bch_inode_info *inode = file_bch_inode(file);
946 return VM_FAULT_SIGBUS;
950 struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
952 if (bch2_pagecache_add_tryget(inode))
955 bch2_pagecache_block_put(fdm_host);
957 bch2_pagecache_add_get(inode);
958 bch2_pagecache_add_put(inode);
960 bch2_pagecache_block_get(fdm_host);
962 /* Signal that lock has been dropped: */
963 set_fdm_dropped_locks();
964 return VM_FAULT_SIGBUS;
967 bch2_pagecache_add_get(inode);
969 ret = filemap_fault(vmf);
970 bch2_pagecache_add_put(inode);
975 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
977 struct folio *folio = page_folio(vmf->page);
978 struct file *file = vmf->vma->vm_file;
979 struct bch_inode_info *inode = file_bch_inode(file);
980 struct address_space *mapping = file->f_mapping;
981 struct bch_fs *c = inode->v.i_sb->s_fs_info;
982 struct bch2_folio_reservation res;
987 bch2_folio_reservation_init(c, inode, &res);
989 sb_start_pagefault(inode->v.i_sb);
990 file_update_time(file);
993 * Not strictly necessary, but helps avoid dio writes livelocking in
994 * write_invalidate_inode_pages_range() - can drop this if/when we get
995 * a write_invalidate_inode_pages_range() that works without dropping
996 * page lock before invalidating page
998 bch2_pagecache_add_get(inode);
1001 isize = i_size_read(&inode->v);
1003 if (folio->mapping != mapping || folio_pos(folio) >= isize) {
1004 folio_unlock(folio);
1005 ret = VM_FAULT_NOPAGE;
1009 len = min_t(loff_t, folio_size(folio), isize - folio_pos(folio));
1011 if (!bch2_folio_create(folio, __GFP_NOFAIL)->uptodate) {
1012 if (bch2_folio_set(c, inode_inum(inode), &folio, 1)) {
1013 folio_unlock(folio);
1014 ret = VM_FAULT_SIGBUS;
1019 if (bch2_folio_reservation_get(c, inode, folio, &res, 0, len)) {
1020 folio_unlock(folio);
1021 ret = VM_FAULT_SIGBUS;
1025 bch2_set_folio_dirty(c, inode, folio, &res, 0, len);
1026 bch2_folio_reservation_put(c, inode, &res);
1028 folio_wait_stable(folio);
1029 ret = VM_FAULT_LOCKED;
1031 bch2_pagecache_add_put(inode);
1032 sb_end_pagefault(inode->v.i_sb);
1037 void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1039 if (offset || length < folio_size(folio))
1042 bch2_clear_folio_bits(folio);
1045 bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
1047 if (folio_test_dirty(folio) || folio_test_writeback(folio))
1050 bch2_clear_folio_bits(folio);
1056 static void bch2_readpages_end_io(struct bio *bio)
1058 struct bvec_iter_all iter;
1059 struct folio_vec fv;
1061 bio_for_each_folio_all(fv, bio, iter) {
1062 if (!bio->bi_status) {
1063 folio_mark_uptodate(fv.fv_folio);
1065 folio_clear_uptodate(fv.fv_folio);
1066 folio_set_error(fv.fv_folio);
1068 folio_unlock(fv.fv_folio);
1074 struct readpages_iter {
1075 struct address_space *mapping;
1080 static int readpages_iter_init(struct readpages_iter *iter,
1081 struct readahead_control *ractl)
1086 memset(iter, 0, sizeof(*iter));
1088 iter->mapping = ractl->mapping;
1090 ret = filemap_get_contig_folios_d(iter->mapping,
1091 ractl->_index << PAGE_SHIFT,
1092 (ractl->_index + ractl->_nr_pages) << PAGE_SHIFT,
1093 0, mapping_gfp_mask(iter->mapping),
1098 darray_for_each(iter->folios, fi) {
1099 ractl->_nr_pages -= 1U << folio_order(*fi);
1100 __bch2_folio_create(*fi, __GFP_NOFAIL);
1108 static inline struct folio *readpage_iter_peek(struct readpages_iter *iter)
1110 if (iter->idx >= iter->folios.nr)
1112 return iter->folios.data[iter->idx];
1115 static inline void readpage_iter_advance(struct readpages_iter *iter)
1120 static bool extent_partial_reads_expensive(struct bkey_s_c k)
1122 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1123 struct bch_extent_crc_unpacked crc;
1124 const union bch_extent_entry *i;
1126 bkey_for_each_crc(k.k, ptrs, crc, i)
1127 if (crc.csum_type || crc.compression_type)
1132 static void readpage_bio_extend(struct readpages_iter *iter,
1134 unsigned sectors_this_extent,
1137 while (bio_sectors(bio) < sectors_this_extent &&
1138 bio->bi_vcnt < bio->bi_max_vecs) {
1139 struct folio *folio = readpage_iter_peek(iter);
1143 readpage_iter_advance(iter);
1145 pgoff_t folio_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
1150 folio = xa_load(&iter->mapping->i_pages, folio_offset);
1151 if (folio && !xa_is_value(folio))
1154 folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), 0);
1158 if (!__bch2_folio_create(folio, 0)) {
1163 ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_NOFS);
1165 __bch2_folio_release(folio);
1173 BUG_ON(folio_sector(folio) != bio_end_sector(bio));
1175 BUG_ON(!bio_add_folio(bio, folio, folio_size(folio), 0));
1179 static void bchfs_read(struct btree_trans *trans,
1180 struct bch_read_bio *rbio,
1182 struct readpages_iter *readpages_iter)
1184 struct bch_fs *c = trans->c;
1185 struct btree_iter iter;
1187 int flags = BCH_READ_RETRY_IF_STALE|
1188 BCH_READ_MAY_PROMOTE;
1193 rbio->start_time = local_clock();
1194 rbio->subvol = inum.subvol;
1196 bch2_bkey_buf_init(&sk);
1198 bch2_trans_begin(trans);
1199 iter = (struct btree_iter) { NULL };
1201 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
1205 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1206 SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
1210 unsigned bytes, sectors, offset_into_extent;
1211 enum btree_id data_btree = BTREE_ID_extents;
1214 * read_extent -> io_time_reset may cause a transaction restart
1215 * without returning an error, we need to check for that here:
1217 ret = bch2_trans_relock(trans);
1221 bch2_btree_iter_set_pos(&iter,
1222 POS(inum.inum, rbio->bio.bi_iter.bi_sector));
1224 k = bch2_btree_iter_peek_slot(&iter);
1229 offset_into_extent = iter.pos.offset -
1230 bkey_start_offset(k.k);
1231 sectors = k.k->size - offset_into_extent;
1233 bch2_bkey_buf_reassemble(&sk, c, k);
1235 ret = bch2_read_indirect_extent(trans, &data_btree,
1236 &offset_into_extent, &sk);
1240 k = bkey_i_to_s_c(sk.k);
1242 sectors = min(sectors, k.k->size - offset_into_extent);
1245 readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
1246 extent_partial_reads_expensive(k));
1248 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
1249 swap(rbio->bio.bi_iter.bi_size, bytes);
1251 if (rbio->bio.bi_iter.bi_size == bytes)
1252 flags |= BCH_READ_LAST_FRAGMENT;
1254 bch2_bio_page_state_set(&rbio->bio, k);
1256 bch2_read_extent(trans, rbio, iter.pos,
1257 data_btree, k, offset_into_extent, flags);
1259 if (flags & BCH_READ_LAST_FRAGMENT)
1262 swap(rbio->bio.bi_iter.bi_size, bytes);
1263 bio_advance(&rbio->bio, bytes);
1265 ret = btree_trans_too_many_iters(trans);
1270 bch2_trans_iter_exit(trans, &iter);
1272 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1276 bch_err_inum_offset_ratelimited(c,
1278 iter.pos.offset << 9,
1279 "read error %i from btree lookup", ret);
1280 rbio->bio.bi_status = BLK_STS_IOERR;
1281 bio_endio(&rbio->bio);
1284 bch2_bkey_buf_exit(&sk, c);
1287 void bch2_readahead(struct readahead_control *ractl)
1289 struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
1290 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1291 struct bch_io_opts opts;
1292 struct btree_trans trans;
1293 struct folio *folio;
1294 struct readpages_iter readpages_iter;
1297 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
1299 ret = readpages_iter_init(&readpages_iter, ractl);
1302 bch2_trans_init(&trans, c, 0, 0);
1304 bch2_pagecache_add_get(inode);
1306 while ((folio = readpage_iter_peek(&readpages_iter))) {
1307 unsigned n = min_t(unsigned,
1308 readpages_iter.folios.nr -
1311 struct bch_read_bio *rbio =
1312 rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
1313 GFP_NOFS, &c->bio_read),
1316 readpage_iter_advance(&readpages_iter);
1318 rbio->bio.bi_iter.bi_sector = folio_sector(folio);
1319 rbio->bio.bi_end_io = bch2_readpages_end_io;
1320 BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
1322 bchfs_read(&trans, rbio, inode_inum(inode),
1326 bch2_pagecache_add_put(inode);
1328 bch2_trans_exit(&trans);
1329 darray_exit(&readpages_iter.folios);
1332 static void __bchfs_readfolio(struct bch_fs *c, struct bch_read_bio *rbio,
1333 subvol_inum inum, struct folio *folio)
1335 struct btree_trans trans;
1337 bch2_folio_create(folio, __GFP_NOFAIL);
1339 rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
1340 rbio->bio.bi_iter.bi_sector = folio_sector(folio);
1341 BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
1343 bch2_trans_init(&trans, c, 0, 0);
1344 bchfs_read(&trans, rbio, inum, NULL);
1345 bch2_trans_exit(&trans);
1348 static void bch2_read_single_folio_end_io(struct bio *bio)
1350 complete(bio->bi_private);
1353 static int bch2_read_single_folio(struct folio *folio,
1354 struct address_space *mapping)
1356 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1357 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1358 struct bch_read_bio *rbio;
1359 struct bch_io_opts opts;
1361 DECLARE_COMPLETION_ONSTACK(done);
1363 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
1365 rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read),
1367 rbio->bio.bi_private = &done;
1368 rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
1370 __bchfs_readfolio(c, rbio, inode_inum(inode), folio);
1371 wait_for_completion(&done);
1373 ret = blk_status_to_errno(rbio->bio.bi_status);
1374 bio_put(&rbio->bio);
1379 folio_mark_uptodate(folio);
1383 int bch2_read_folio(struct file *file, struct folio *folio)
1387 ret = bch2_read_single_folio(folio, folio->mapping);
1388 folio_unlock(folio);
1389 return bch2_err_class(ret);
1394 struct bch_writepage_state {
1395 struct bch_writepage_io *io;
1396 struct bch_io_opts opts;
1397 struct bch_folio_sector *tmp;
1398 unsigned tmp_sectors;
1401 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1402 struct bch_inode_info *inode)
1404 struct bch_writepage_state ret = { 0 };
1406 bch2_inode_opts_get(&ret.opts, c, &inode->ei_inode);
1410 static void bch2_writepage_io_done(struct bch_write_op *op)
1412 struct bch_writepage_io *io =
1413 container_of(op, struct bch_writepage_io, op);
1414 struct bch_fs *c = io->op.c;
1415 struct bio *bio = &io->op.wbio.bio;
1416 struct bvec_iter_all iter;
1417 struct folio_vec fv;
1421 set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
1423 bio_for_each_folio_all(fv, bio, iter) {
1424 struct bch_folio *s;
1426 folio_set_error(fv.fv_folio);
1427 mapping_set_error(fv.fv_folio->mapping, -EIO);
1429 s = __bch2_folio(fv.fv_folio);
1430 spin_lock(&s->lock);
1431 for (i = 0; i < folio_sectors(fv.fv_folio); i++)
1432 s->s[i].nr_replicas = 0;
1433 spin_unlock(&s->lock);
1437 if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
1438 bio_for_each_folio_all(fv, bio, iter) {
1439 struct bch_folio *s;
1441 s = __bch2_folio(fv.fv_folio);
1442 spin_lock(&s->lock);
1443 for (i = 0; i < folio_sectors(fv.fv_folio); i++)
1444 s->s[i].nr_replicas = 0;
1445 spin_unlock(&s->lock);
1450 * racing with fallocate can cause us to add fewer sectors than
1451 * expected - but we shouldn't add more sectors than expected:
1453 WARN_ON_ONCE(io->op.i_sectors_delta > 0);
1456 * (error (due to going RO) halfway through a page can screw that up
1459 BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
1463 * PageWriteback is effectively our ref on the inode - fixup i_blocks
1464 * before calling end_page_writeback:
1466 i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
1468 bio_for_each_folio_all(fv, bio, iter) {
1469 struct bch_folio *s = __bch2_folio(fv.fv_folio);
1471 if (atomic_dec_and_test(&s->write_count))
1472 folio_end_writeback(fv.fv_folio);
1475 bio_put(&io->op.wbio.bio);
1478 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1480 struct bch_writepage_io *io = w->io;
1483 closure_call(&io->op.cl, bch2_write, NULL, NULL);
1487 * Get a bch_writepage_io and add @page to it - appending to an existing one if
1488 * possible, else allocating a new one:
1490 static void bch2_writepage_io_alloc(struct bch_fs *c,
1491 struct writeback_control *wbc,
1492 struct bch_writepage_state *w,
1493 struct bch_inode_info *inode,
1495 unsigned nr_replicas)
1497 struct bch_write_op *op;
1499 w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
1502 &c->writepage_bioset),
1503 struct bch_writepage_io, op.wbio.bio);
1505 w->io->inode = inode;
1507 bch2_write_op_init(op, c, w->opts);
1508 op->target = w->opts.foreground_target;
1509 op->nr_replicas = nr_replicas;
1510 op->res.nr_replicas = nr_replicas;
1511 op->write_point = writepoint_hashed(inode->ei_last_dirtied);
1512 op->subvol = inode->ei_subvol;
1513 op->pos = POS(inode->v.i_ino, sector);
1514 op->end_io = bch2_writepage_io_done;
1515 op->devs_need_flush = &inode->ei_devs_need_flush;
1516 op->wbio.bio.bi_iter.bi_sector = sector;
1517 op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
1520 static int __bch2_writepage(struct folio *folio,
1521 struct writeback_control *wbc,
1524 struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
1525 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1526 struct bch_writepage_state *w = data;
1527 struct bch_folio *s;
1528 unsigned i, offset, f_sectors, nr_replicas_this_write = U32_MAX;
1529 loff_t i_size = i_size_read(&inode->v);
1532 EBUG_ON(!folio_test_uptodate(folio));
1534 /* Is the folio fully inside i_size? */
1535 if (folio_end_pos(folio) <= i_size)
1538 /* Is the folio fully outside i_size? (truncate in progress) */
1539 if (folio_pos(folio) >= i_size) {
1540 folio_unlock(folio);
1545 * The folio straddles i_size. It must be zeroed out on each and every
1546 * writepage invocation because it may be mmapped. "A file is mapped
1547 * in multiples of the folio size. For a file that is not a multiple of
1548 * the folio size, the remaining memory is zeroed when mapped, and
1549 * writes to that region are not written out to the file."
1551 folio_zero_segment(folio,
1552 i_size - folio_pos(folio),
1555 f_sectors = folio_sectors(folio);
1556 s = bch2_folio_create(folio, __GFP_NOFAIL);
1558 if (f_sectors > w->tmp_sectors) {
1560 w->tmp = kzalloc(sizeof(struct bch_folio_sector) *
1561 f_sectors, __GFP_NOFAIL);
1562 w->tmp_sectors = f_sectors;
1566 * Things get really hairy with errors during writeback:
1568 ret = bch2_get_folio_disk_reservation(c, inode, folio, false);
1571 /* Before unlocking the page, get copy of reservations: */
1572 spin_lock(&s->lock);
1573 memcpy(w->tmp, s->s, sizeof(struct bch_folio_sector) * f_sectors);
1575 for (i = 0; i < f_sectors; i++) {
1576 if (s->s[i].state < SECTOR_dirty)
1579 nr_replicas_this_write =
1580 min_t(unsigned, nr_replicas_this_write,
1581 s->s[i].nr_replicas +
1582 s->s[i].replicas_reserved);
1585 for (i = 0; i < f_sectors; i++) {
1586 if (s->s[i].state < SECTOR_dirty)
1589 s->s[i].nr_replicas = w->opts.compression
1590 ? 0 : nr_replicas_this_write;
1592 s->s[i].replicas_reserved = 0;
1593 folio_sector_set(folio, s, i, SECTOR_allocated);
1595 spin_unlock(&s->lock);
1597 BUG_ON(atomic_read(&s->write_count));
1598 atomic_set(&s->write_count, 1);
1600 BUG_ON(folio_test_writeback(folio));
1601 folio_start_writeback(folio);
1603 folio_unlock(folio);
1607 unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
1610 while (offset < f_sectors &&
1611 w->tmp[offset].state < SECTOR_dirty)
1614 if (offset == f_sectors)
1617 while (offset + sectors < f_sectors &&
1618 w->tmp[offset + sectors].state >= SECTOR_dirty) {
1619 reserved_sectors += w->tmp[offset + sectors].replicas_reserved;
1620 dirty_sectors += w->tmp[offset + sectors].state == SECTOR_dirty;
1625 sector = folio_sector(folio) + offset;
1628 (w->io->op.res.nr_replicas != nr_replicas_this_write ||
1629 bio_full(&w->io->op.wbio.bio, sectors << 9) ||
1630 w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
1631 (BIO_MAX_VECS * PAGE_SIZE) ||
1632 bio_end_sector(&w->io->op.wbio.bio) != sector))
1633 bch2_writepage_do_io(w);
1636 bch2_writepage_io_alloc(c, wbc, w, inode, sector,
1637 nr_replicas_this_write);
1639 atomic_inc(&s->write_count);
1641 BUG_ON(inode != w->io->inode);
1642 BUG_ON(!bio_add_folio(&w->io->op.wbio.bio, folio,
1643 sectors << 9, offset << 9));
1645 /* Check for writing past i_size: */
1646 WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
1647 round_up(i_size, block_bytes(c)) &&
1648 !test_bit(BCH_FS_EMERGENCY_RO, &c->flags),
1649 "writing past i_size: %llu > %llu (unrounded %llu)\n",
1650 bio_end_sector(&w->io->op.wbio.bio) << 9,
1651 round_up(i_size, block_bytes(c)),
1654 w->io->op.res.sectors += reserved_sectors;
1655 w->io->op.i_sectors_delta -= dirty_sectors;
1656 w->io->op.new_i_size = i_size;
1661 if (atomic_dec_and_test(&s->write_count))
1662 folio_end_writeback(folio);
1667 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1669 struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1670 struct bch_writepage_state w =
1671 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1672 struct blk_plug plug;
1675 blk_start_plug(&plug);
1676 ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1678 bch2_writepage_do_io(&w);
1679 blk_finish_plug(&plug);
1681 return bch2_err_class(ret);
1684 /* buffered writes: */
1686 int bch2_write_begin(struct file *file, struct address_space *mapping,
1687 loff_t pos, unsigned len,
1688 struct page **pagep, void **fsdata)
1690 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1691 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1692 struct bch2_folio_reservation *res;
1693 struct folio *folio;
1697 res = kmalloc(sizeof(*res), GFP_KERNEL);
1701 bch2_folio_reservation_init(c, inode, res);
1704 bch2_pagecache_add_get(inode);
1706 folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
1707 FGP_LOCK|FGP_WRITE|FGP_CREAT|FGP_STABLE,
1708 mapping_gfp_mask(mapping));
1712 if (folio_test_uptodate(folio))
1715 offset = pos - folio_pos(folio);
1716 len = min_t(size_t, len, folio_end_pos(folio) - pos);
1718 /* If we're writing entire folio, don't need to read it in first: */
1719 if (!offset && len == folio_size(folio))
1722 if (!offset && pos + len >= inode->v.i_size) {
1723 folio_zero_segment(folio, len, folio_size(folio));
1724 flush_dcache_folio(folio);
1728 if (folio_pos(folio) >= inode->v.i_size) {
1729 folio_zero_segments(folio, 0, offset, offset + len, folio_size(folio));
1730 flush_dcache_folio(folio);
1734 ret = bch2_read_single_folio(folio, mapping);
1738 if (!bch2_folio_create(folio, __GFP_NOFAIL)->uptodate) {
1739 ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
1744 ret = bch2_folio_reservation_get(c, inode, folio, res, offset, len);
1746 if (!folio_test_uptodate(folio)) {
1748 * If the folio hasn't been read in, we won't know if we
1749 * actually need a reservation - we don't actually need
1750 * to read here, we just need to check if the folio is
1751 * fully backed by uncompressed data:
1759 *pagep = &folio->page;
1762 folio_unlock(folio);
1766 bch2_pagecache_add_put(inode);
1769 return bch2_err_class(ret);
1772 int bch2_write_end(struct file *file, struct address_space *mapping,
1773 loff_t pos, unsigned len, unsigned copied,
1774 struct page *page, void *fsdata)
1776 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1777 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1778 struct bch2_folio_reservation *res = fsdata;
1779 struct folio *folio = page_folio(page);
1780 unsigned offset = pos - folio_pos(folio);
1782 lockdep_assert_held(&inode->v.i_rwsem);
1783 BUG_ON(offset + copied > folio_size(folio));
1785 if (unlikely(copied < len && !folio_test_uptodate(folio))) {
1787 * The folio needs to be read in, but that would destroy
1788 * our partial write - simplest thing is to just force
1789 * userspace to redo the write:
1791 folio_zero_range(folio, 0, folio_size(folio));
1792 flush_dcache_folio(folio);
1796 spin_lock(&inode->v.i_lock);
1797 if (pos + copied > inode->v.i_size)
1798 i_size_write(&inode->v, pos + copied);
1799 spin_unlock(&inode->v.i_lock);
1802 if (!folio_test_uptodate(folio))
1803 folio_mark_uptodate(folio);
1805 bch2_set_folio_dirty(c, inode, folio, res, offset, copied);
1807 inode->ei_last_dirtied = (unsigned long) current;
1810 folio_unlock(folio);
1812 bch2_pagecache_add_put(inode);
1814 bch2_folio_reservation_put(c, inode, res);
1820 static noinline void folios_trunc(folios *folios, struct folio **fi)
1822 while (folios->data + folios->nr > fi) {
1823 struct folio *f = darray_pop(folios);
1830 static int __bch2_buffered_write(struct bch_inode_info *inode,
1831 struct address_space *mapping,
1832 struct iov_iter *iter,
1833 loff_t pos, unsigned len)
1835 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1836 struct bch2_folio_reservation res;
1838 struct folio **fi, *f;
1839 unsigned copied = 0, f_offset;
1840 u64 end = pos + len, f_pos;
1841 loff_t last_folio_pos = inode->v.i_size;
1846 bch2_folio_reservation_init(c, inode, &res);
1847 darray_init(&folios);
1849 ret = filemap_get_contig_folios_d(mapping, pos, end,
1850 FGP_LOCK|FGP_WRITE|FGP_STABLE|FGP_CREAT,
1851 mapping_gfp_mask(mapping),
1858 f = darray_first(folios);
1859 if (pos != folio_pos(f) && !folio_test_uptodate(f)) {
1860 ret = bch2_read_single_folio(f, mapping);
1865 f = darray_last(folios);
1866 end = min(end, folio_end_pos(f));
1867 last_folio_pos = folio_pos(f);
1868 if (end != folio_end_pos(f) && !folio_test_uptodate(f)) {
1869 if (end >= inode->v.i_size) {
1870 folio_zero_range(f, 0, folio_size(f));
1872 ret = bch2_read_single_folio(f, mapping);
1879 f_offset = pos - folio_pos(darray_first(folios));
1880 darray_for_each(folios, fi) {
1881 struct folio *f = *fi;
1882 u64 f_len = min(end, folio_end_pos(f)) - f_pos;
1884 if (!bch2_folio_create(f, __GFP_NOFAIL)->uptodate) {
1885 ret = bch2_folio_set(c, inode_inum(inode), fi,
1886 folios.data + folios.nr - fi);
1892 * XXX: per POSIX and fstests generic/275, on -ENOSPC we're
1893 * supposed to write as much as we have disk space for.
1895 * On failure here we should still write out a partial page if
1896 * we aren't completely out of disk space - we don't do that
1899 ret = bch2_folio_reservation_get(c, inode, f, &res, f_offset, f_len);
1900 if (unlikely(ret)) {
1901 folios_trunc(&folios, fi);
1905 end = min(end, folio_end_pos(darray_last(folios)));
1909 f_pos = folio_end_pos(f);
1913 if (mapping_writably_mapped(mapping))
1914 darray_for_each(folios, fi)
1915 flush_dcache_folio(*fi);
1918 f_offset = pos - folio_pos(darray_first(folios));
1919 darray_for_each(folios, fi) {
1920 struct folio *f = *fi;
1921 u64 f_len = min(end, folio_end_pos(f)) - f_pos;
1922 unsigned f_copied = copy_folio_from_iter_atomic(f, f_offset, f_len, iter);
1925 folios_trunc(&folios, fi);
1929 if (!folio_test_uptodate(f) &&
1930 f_copied != folio_size(f) &&
1931 pos + copied + f_copied < inode->v.i_size) {
1932 folio_zero_range(f, 0, folio_size(f));
1933 folios_trunc(&folios, fi);
1937 flush_dcache_folio(f);
1940 if (f_copied != f_len) {
1941 folios_trunc(&folios, fi + 1);
1945 f_pos = folio_end_pos(f);
1954 spin_lock(&inode->v.i_lock);
1955 if (end > inode->v.i_size)
1956 i_size_write(&inode->v, end);
1957 spin_unlock(&inode->v.i_lock);
1960 f_offset = pos - folio_pos(darray_first(folios));
1961 darray_for_each(folios, fi) {
1962 struct folio *f = *fi;
1963 u64 f_len = min(end, folio_end_pos(f)) - f_pos;
1965 if (!folio_test_uptodate(f))
1966 folio_mark_uptodate(f);
1968 bch2_set_folio_dirty(c, inode, f, &res, f_offset, f_len);
1970 f_pos = folio_end_pos(f);
1974 inode->ei_last_dirtied = (unsigned long) current;
1976 darray_for_each(folios, fi) {
1982 * If the last folio added to the mapping starts beyond current EOF, we
1983 * performed a short write but left around at least one post-EOF folio.
1984 * Clean up the mapping before we return.
1986 if (last_folio_pos >= inode->v.i_size)
1987 truncate_pagecache(&inode->v, inode->v.i_size);
1989 darray_exit(&folios);
1990 bch2_folio_reservation_put(c, inode, &res);
1992 return copied ?: ret;
1995 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1997 struct file *file = iocb->ki_filp;
1998 struct address_space *mapping = file->f_mapping;
1999 struct bch_inode_info *inode = file_bch_inode(file);
2000 loff_t pos = iocb->ki_pos;
2001 ssize_t written = 0;
2004 bch2_pagecache_add_get(inode);
2007 unsigned offset = pos & (PAGE_SIZE - 1);
2008 unsigned bytes = iov_iter_count(iter);
2011 * Bring in the user page that we will copy from _first_.
2012 * Otherwise there's a nasty deadlock on copying from the
2013 * same page as we're writing to, without it being marked
2016 * Not only is this an optimisation, but it is also required
2017 * to check that the address is actually valid, when atomic
2018 * usercopies are used, below.
2020 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
2021 bytes = min_t(unsigned long, iov_iter_count(iter),
2022 PAGE_SIZE - offset);
2024 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
2030 if (unlikely(fatal_signal_pending(current))) {
2035 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
2036 if (unlikely(ret < 0))
2041 if (unlikely(ret == 0)) {
2043 * If we were unable to copy any data at all, we must
2044 * fall back to a single segment length write.
2046 * If we didn't fallback here, we could livelock
2047 * because not all segments in the iov can be copied at
2048 * once without a pagefault.
2050 bytes = min_t(unsigned long, PAGE_SIZE - offset,
2051 iov_iter_single_seg_count(iter));
2058 balance_dirty_pages_ratelimited(mapping);
2059 } while (iov_iter_count(iter));
2061 bch2_pagecache_add_put(inode);
2063 return written ? written : ret;
2066 /* O_DIRECT reads */
2068 static void bio_check_or_release(struct bio *bio, bool check_dirty)
2071 bio_check_pages_dirty(bio);
2073 bio_release_pages(bio, false);
2078 static void bch2_dio_read_complete(struct closure *cl)
2080 struct dio_read *dio = container_of(cl, struct dio_read, cl);
2082 dio->req->ki_complete(dio->req, dio->ret);
2083 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
2086 static void bch2_direct_IO_read_endio(struct bio *bio)
2088 struct dio_read *dio = bio->bi_private;
2091 dio->ret = blk_status_to_errno(bio->bi_status);
2093 closure_put(&dio->cl);
2096 static void bch2_direct_IO_read_split_endio(struct bio *bio)
2098 struct dio_read *dio = bio->bi_private;
2099 bool should_dirty = dio->should_dirty;
2101 bch2_direct_IO_read_endio(bio);
2102 bio_check_or_release(bio, should_dirty);
2105 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
2107 struct file *file = req->ki_filp;
2108 struct bch_inode_info *inode = file_bch_inode(file);
2109 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2110 struct bch_io_opts opts;
2111 struct dio_read *dio;
2113 loff_t offset = req->ki_pos;
2114 bool sync = is_sync_kiocb(req);
2118 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
2120 if ((offset|iter->count) & (block_bytes(c) - 1))
2123 ret = min_t(loff_t, iter->count,
2124 max_t(loff_t, 0, i_size_read(&inode->v) - offset));
2129 shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
2130 iter->count -= shorten;
2132 bio = bio_alloc_bioset(NULL,
2133 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2136 &c->dio_read_bioset);
2138 bio->bi_end_io = bch2_direct_IO_read_endio;
2140 dio = container_of(bio, struct dio_read, rbio.bio);
2141 closure_init(&dio->cl, NULL);
2144 * this is a _really_ horrible hack just to avoid an atomic sub at the
2148 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
2149 atomic_set(&dio->cl.remaining,
2150 CLOSURE_REMAINING_INITIALIZER -
2152 CLOSURE_DESTRUCTOR);
2154 atomic_set(&dio->cl.remaining,
2155 CLOSURE_REMAINING_INITIALIZER + 1);
2161 * This is one of the sketchier things I've encountered: we have to skip
2162 * the dirtying of requests that are internal from the kernel (i.e. from
2163 * loopback), because we'll deadlock on page_lock.
2165 dio->should_dirty = iter_is_iovec(iter);
2168 while (iter->count) {
2169 bio = bio_alloc_bioset(NULL,
2170 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2174 bio->bi_end_io = bch2_direct_IO_read_split_endio;
2176 bio->bi_opf = REQ_OP_READ|REQ_SYNC;
2177 bio->bi_iter.bi_sector = offset >> 9;
2178 bio->bi_private = dio;
2180 ret = bio_iov_iter_get_pages(bio, iter);
2182 /* XXX: fault inject this path */
2183 bio->bi_status = BLK_STS_RESOURCE;
2188 offset += bio->bi_iter.bi_size;
2190 if (dio->should_dirty)
2191 bio_set_pages_dirty(bio);
2194 closure_get(&dio->cl);
2196 bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
2199 iter->count += shorten;
2202 closure_sync(&dio->cl);
2203 closure_debug_destroy(&dio->cl);
2205 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
2208 return -EIOCBQUEUED;
2212 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2214 struct file *file = iocb->ki_filp;
2215 struct bch_inode_info *inode = file_bch_inode(file);
2216 struct address_space *mapping = file->f_mapping;
2217 size_t count = iov_iter_count(iter);
2221 return 0; /* skip atime */
2223 if (iocb->ki_flags & IOCB_DIRECT) {
2224 struct blk_plug plug;
2226 if (unlikely(mapping->nrpages)) {
2227 ret = filemap_write_and_wait_range(mapping,
2229 iocb->ki_pos + count - 1);
2234 file_accessed(file);
2236 blk_start_plug(&plug);
2237 ret = bch2_direct_IO_read(iocb, iter);
2238 blk_finish_plug(&plug);
2241 iocb->ki_pos += ret;
2243 bch2_pagecache_add_get(inode);
2244 ret = generic_file_read_iter(iocb, iter);
2245 bch2_pagecache_add_put(inode);
2248 return bch2_err_class(ret);
2251 /* O_DIRECT writes */
2253 static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
2254 u64 offset, u64 size,
2255 unsigned nr_replicas, bool compressed)
2257 struct btree_trans trans;
2258 struct btree_iter iter;
2260 u64 end = offset + size;
2265 bch2_trans_init(&trans, c, 0, 0);
2267 bch2_trans_begin(&trans);
2269 err = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
2273 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
2274 SPOS(inum.inum, offset, snapshot),
2275 BTREE_ITER_SLOTS, k, err) {
2276 if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
2279 if (k.k->p.snapshot != snapshot ||
2280 nr_replicas > bch2_bkey_replicas(c, k) ||
2281 (!compressed && bch2_bkey_sectors_compressed(k))) {
2287 offset = iter.pos.offset;
2288 bch2_trans_iter_exit(&trans, &iter);
2290 if (bch2_err_matches(err, BCH_ERR_transaction_restart))
2292 bch2_trans_exit(&trans);
2294 return err ? false : ret;
2297 static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
2299 struct bch_fs *c = dio->op.c;
2300 struct bch_inode_info *inode = dio->inode;
2301 struct bio *bio = &dio->op.wbio.bio;
2303 return bch2_check_range_allocated(c, inode_inum(inode),
2304 dio->op.pos.offset, bio_sectors(bio),
2305 dio->op.opts.data_replicas,
2306 dio->op.opts.compression != 0);
2309 static void bch2_dio_write_loop_async(struct bch_write_op *);
2310 static __always_inline long bch2_dio_write_done(struct dio_write *dio);
2312 static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
2314 struct iovec *iov = dio->inline_vecs;
2316 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
2317 iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
2322 dio->free_iov = true;
2325 memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
2326 dio->iter.iov = iov;
2330 static void bch2_dio_write_flush_done(struct closure *cl)
2332 struct dio_write *dio = container_of(cl, struct dio_write, op.cl);
2333 struct bch_fs *c = dio->op.c;
2335 closure_debug_destroy(cl);
2337 dio->op.error = bch2_journal_error(&c->journal);
2339 bch2_dio_write_done(dio);
2342 static noinline void bch2_dio_write_flush(struct dio_write *dio)
2344 struct bch_fs *c = dio->op.c;
2345 struct bch_inode_unpacked inode;
2350 closure_init(&dio->op.cl, NULL);
2352 if (!dio->op.error) {
2353 ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
2355 dio->op.error = ret;
2357 bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq, &dio->op.cl);
2358 bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
2363 closure_sync(&dio->op.cl);
2364 closure_debug_destroy(&dio->op.cl);
2366 continue_at(&dio->op.cl, bch2_dio_write_flush_done, NULL);
2370 static __always_inline long bch2_dio_write_done(struct dio_write *dio)
2372 struct kiocb *req = dio->req;
2373 struct bch_inode_info *inode = dio->inode;
2374 bool sync = dio->sync;
2377 if (unlikely(dio->flush)) {
2378 bch2_dio_write_flush(dio);
2380 return -EIOCBQUEUED;
2383 bch2_pagecache_block_put(inode);
2386 kfree(dio->iter.iov);
2388 ret = dio->op.error ?: ((long) dio->written << 9);
2389 bio_put(&dio->op.wbio.bio);
2391 /* inode->i_dio_count is our ref on inode and thus bch_fs */
2392 inode_dio_end(&inode->v);
2395 ret = bch2_err_class(ret);
2398 req->ki_complete(req, ret);
2404 static __always_inline void bch2_dio_write_end(struct dio_write *dio)
2406 struct bch_fs *c = dio->op.c;
2407 struct kiocb *req = dio->req;
2408 struct bch_inode_info *inode = dio->inode;
2409 struct bio *bio = &dio->op.wbio.bio;
2411 req->ki_pos += (u64) dio->op.written << 9;
2412 dio->written += dio->op.written;
2414 if (dio->extending) {
2415 spin_lock(&inode->v.i_lock);
2416 if (req->ki_pos > inode->v.i_size)
2417 i_size_write(&inode->v, req->ki_pos);
2418 spin_unlock(&inode->v.i_lock);
2421 if (dio->op.i_sectors_delta || dio->quota_res.sectors) {
2422 mutex_lock(&inode->ei_quota_lock);
2423 __i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta);
2424 __bch2_quota_reservation_put(c, inode, &dio->quota_res);
2425 mutex_unlock(&inode->ei_quota_lock);
2428 if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF))) {
2429 struct bvec_iter_all iter;
2430 struct folio_vec fv;
2432 bio_for_each_folio_all(fv, bio, iter)
2433 folio_put(fv.fv_folio);
2436 if (unlikely(dio->op.error))
2437 set_bit(EI_INODE_ERROR, &inode->ei_flags);
2440 static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
2442 struct bch_fs *c = dio->op.c;
2443 struct kiocb *req = dio->req;
2444 struct address_space *mapping = dio->mapping;
2445 struct bch_inode_info *inode = dio->inode;
2446 struct bch_io_opts opts;
2447 struct bio *bio = &dio->op.wbio.bio;
2448 unsigned unaligned, iter_count;
2449 bool sync = dio->sync, dropped_locks;
2452 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
2455 iter_count = dio->iter.count;
2457 EBUG_ON(current->faults_disabled_mapping);
2458 current->faults_disabled_mapping = mapping;
2460 ret = bio_iov_iter_get_pages(bio, &dio->iter);
2462 dropped_locks = fdm_dropped_locks();
2464 current->faults_disabled_mapping = NULL;
2467 * If the fault handler returned an error but also signalled
2468 * that it dropped & retook ei_pagecache_lock, we just need to
2469 * re-shoot down the page cache and retry:
2471 if (dropped_locks && ret)
2474 if (unlikely(ret < 0))
2477 if (unlikely(dropped_locks)) {
2478 ret = write_invalidate_inode_pages_range(mapping,
2480 req->ki_pos + iter_count - 1);
2484 if (!bio->bi_iter.bi_size)
2488 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
2489 bio->bi_iter.bi_size -= unaligned;
2490 iov_iter_revert(&dio->iter, unaligned);
2492 if (!bio->bi_iter.bi_size) {
2494 * bio_iov_iter_get_pages was only able to get <
2495 * blocksize worth of pages:
2501 bch2_write_op_init(&dio->op, c, opts);
2502 dio->op.end_io = sync
2504 : bch2_dio_write_loop_async;
2505 dio->op.target = dio->op.opts.foreground_target;
2506 dio->op.write_point = writepoint_hashed((unsigned long) current);
2507 dio->op.nr_replicas = dio->op.opts.data_replicas;
2508 dio->op.subvol = inode->ei_subvol;
2509 dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
2510 dio->op.devs_need_flush = &inode->ei_devs_need_flush;
2513 dio->op.flags |= BCH_WRITE_SYNC;
2514 dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
2516 ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
2517 bio_sectors(bio), true);
2521 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
2522 dio->op.opts.data_replicas, 0);
2523 if (unlikely(ret) &&
2524 !bch2_dio_write_check_allocated(dio))
2527 task_io_account_write(bio->bi_iter.bi_size);
2529 if (unlikely(dio->iter.count) &&
2532 bch2_dio_write_copy_iov(dio))
2533 dio->sync = sync = true;
2536 closure_call(&dio->op.cl, bch2_write, NULL, NULL);
2539 return -EIOCBQUEUED;
2541 bch2_dio_write_end(dio);
2543 if (likely(!dio->iter.count) || dio->op.error)
2546 bio_reset(bio, NULL, REQ_OP_WRITE);
2549 return bch2_dio_write_done(dio);
2551 dio->op.error = ret;
2553 if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
2554 struct bvec_iter_all iter;
2555 struct folio_vec fv;
2557 bio_for_each_folio_all(fv, bio, iter)
2558 folio_put(fv.fv_folio);
2561 bch2_quota_reservation_put(c, inode, &dio->quota_res);
2565 static noinline __cold void bch2_dio_write_continue(struct dio_write *dio)
2567 struct mm_struct *mm = dio->mm;
2569 bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE);
2573 bch2_dio_write_loop(dio);
2575 kthread_unuse_mm(mm);
2578 static void bch2_dio_write_loop_async(struct bch_write_op *op)
2580 struct dio_write *dio = container_of(op, struct dio_write, op);
2582 bch2_dio_write_end(dio);
2584 if (likely(!dio->iter.count) || dio->op.error)
2585 bch2_dio_write_done(dio);
2587 bch2_dio_write_continue(dio);
2591 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
2593 struct file *file = req->ki_filp;
2594 struct address_space *mapping = file->f_mapping;
2595 struct bch_inode_info *inode = file_bch_inode(file);
2596 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2597 struct dio_write *dio;
2599 bool locked = true, extending;
2603 prefetch((void *) &c->opts + 64);
2604 prefetch(&inode->ei_inode);
2605 prefetch((void *) &inode->ei_inode + 64);
2607 inode_lock(&inode->v);
2609 ret = generic_write_checks(req, iter);
2610 if (unlikely(ret <= 0))
2613 ret = file_remove_privs(file);
2617 ret = file_update_time(file);
2621 if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
2624 inode_dio_begin(&inode->v);
2625 bch2_pagecache_block_get(inode);
2627 extending = req->ki_pos + iter->count > inode->v.i_size;
2629 inode_unlock(&inode->v);
2633 bio = bio_alloc_bioset(NULL,
2634 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2637 &c->dio_write_bioset);
2638 dio = container_of(bio, struct dio_write, op.wbio.bio);
2640 dio->mapping = mapping;
2642 dio->mm = current->mm;
2644 dio->extending = extending;
2645 dio->sync = is_sync_kiocb(req) || extending;
2646 dio->flush = iocb_is_dsync(req) && !c->opts.journal_flush_disabled;
2647 dio->free_iov = false;
2648 dio->quota_res.sectors = 0;
2653 if (unlikely(mapping->nrpages)) {
2654 ret = write_invalidate_inode_pages_range(mapping,
2656 req->ki_pos + iter->count - 1);
2661 ret = bch2_dio_write_loop(dio);
2664 inode_unlock(&inode->v);
2667 bch2_pagecache_block_put(inode);
2669 inode_dio_end(&inode->v);
2673 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2675 struct file *file = iocb->ki_filp;
2676 struct bch_inode_info *inode = file_bch_inode(file);
2679 if (iocb->ki_flags & IOCB_DIRECT) {
2680 ret = bch2_direct_write(iocb, from);
2684 /* We can write back this queue in page reclaim */
2685 current->backing_dev_info = inode_to_bdi(&inode->v);
2686 inode_lock(&inode->v);
2688 ret = generic_write_checks(iocb, from);
2692 ret = file_remove_privs(file);
2696 ret = file_update_time(file);
2700 ret = bch2_buffered_write(iocb, from);
2701 if (likely(ret > 0))
2702 iocb->ki_pos += ret;
2704 inode_unlock(&inode->v);
2705 current->backing_dev_info = NULL;
2708 ret = generic_write_sync(iocb, ret);
2710 return bch2_err_class(ret);
2716 * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
2717 * insert trigger: look up the btree inode instead
2719 static int bch2_flush_inode(struct bch_fs *c,
2720 struct bch_inode_info *inode)
2722 struct bch_inode_unpacked u;
2725 if (c->opts.journal_flush_disabled)
2728 ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u);
2732 return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
2733 bch2_inode_flush_nocow_writes(c, inode);
2736 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2738 struct bch_inode_info *inode = file_bch_inode(file);
2739 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2740 int ret, ret2, ret3;
2742 ret = file_write_and_wait_range(file, start, end);
2743 ret2 = sync_inode_metadata(&inode->v, 1);
2744 ret3 = bch2_flush_inode(c, inode);
2746 return bch2_err_class(ret ?: ret2 ?: ret3);
2751 static inline int range_has_data(struct bch_fs *c, u32 subvol,
2755 struct btree_trans trans;
2756 struct btree_iter iter;
2760 bch2_trans_init(&trans, c, 0, 0);
2762 bch2_trans_begin(&trans);
2764 ret = bch2_subvolume_get_snapshot(&trans, subvol, &start.snapshot);
2768 for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents, start, end, 0, k, ret)
2769 if (bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k)) {
2774 bch2_trans_iter_exit(&trans, &iter);
2776 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
2779 bch2_trans_exit(&trans);
2783 static int __bch2_truncate_folio(struct bch_inode_info *inode,
2784 pgoff_t index, loff_t start, loff_t end)
2786 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2787 struct address_space *mapping = inode->v.i_mapping;
2788 struct bch_folio *s;
2789 unsigned start_offset = start & (PAGE_SIZE - 1);
2790 unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2792 struct folio *folio;
2793 s64 i_sectors_delta = 0;
2797 folio = filemap_lock_folio(mapping, index);
2800 * XXX: we're doing two index lookups when we end up reading the
2803 ret = range_has_data(c, inode->ei_subvol,
2804 POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT)),
2805 POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT) + PAGE_SECTORS));
2809 folio = __filemap_get_folio(mapping, index,
2810 FGP_LOCK|FGP_CREAT, GFP_KERNEL);
2811 if (unlikely(!folio)) {
2817 BUG_ON(start >= folio_end_pos(folio));
2818 BUG_ON(end <= folio_pos(folio));
2820 start_offset = max(start, folio_pos(folio)) - folio_pos(folio);
2821 end_offset = min_t(u64, end, folio_end_pos(folio)) - folio_pos(folio);
2823 /* Folio boundary? Nothing to do */
2824 if (start_offset == 0 &&
2825 end_offset == folio_size(folio)) {
2830 s = bch2_folio_create(folio, 0);
2836 if (!folio_test_uptodate(folio)) {
2837 ret = bch2_read_single_folio(folio, mapping);
2843 ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
2848 for (i = round_up(start_offset, block_bytes(c)) >> 9;
2849 i < round_down(end_offset, block_bytes(c)) >> 9;
2851 s->s[i].nr_replicas = 0;
2853 i_sectors_delta -= s->s[i].state == SECTOR_dirty;
2854 folio_sector_set(folio, s, i, SECTOR_unallocated);
2857 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2860 * Caller needs to know whether this folio will be written out by
2861 * writeback - doing an i_size update if necessary - or whether it will
2862 * be responsible for the i_size update.
2864 * Note that we shouldn't ever see a folio beyond EOF, but check and
2865 * warn if so. This has been observed by failure to clean up folios
2866 * after a short write and there's still a chance reclaim will fix
2869 WARN_ON_ONCE(folio_pos(folio) >= inode->v.i_size);
2870 end_pos = folio_end_pos(folio);
2871 if (inode->v.i_size > folio_pos(folio))
2872 end_pos = min_t(u64, inode->v.i_size, end_pos);
2873 ret = s->s[folio_pos_to_s(folio, end_pos - 1)].state >= SECTOR_dirty;
2875 folio_zero_segment(folio, start_offset, end_offset);
2878 * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2880 * XXX: because we aren't currently tracking whether the folio has actual
2881 * data in it (vs. just 0s, or only partially written) this wrong. ick.
2883 BUG_ON(bch2_get_folio_disk_reservation(c, inode, folio, false));
2886 * This removes any writeable userspace mappings; we need to force
2887 * .page_mkwrite to be called again before any mmapped writes, to
2888 * redirty the full page:
2890 folio_mkclean(folio);
2891 filemap_dirty_folio(mapping, folio);
2893 folio_unlock(folio);
2899 static int bch2_truncate_folio(struct bch_inode_info *inode, loff_t from)
2901 return __bch2_truncate_folio(inode, from >> PAGE_SHIFT,
2902 from, ANYSINT_MAX(loff_t));
2905 static int bch2_truncate_folios(struct bch_inode_info *inode,
2906 loff_t start, loff_t end)
2908 int ret = __bch2_truncate_folio(inode, start >> PAGE_SHIFT,
2912 start >> PAGE_SHIFT != end >> PAGE_SHIFT)
2913 ret = __bch2_truncate_folio(inode,
2914 (end - 1) >> PAGE_SHIFT,
2919 static int bch2_extend(struct mnt_idmap *idmap,
2920 struct bch_inode_info *inode,
2921 struct bch_inode_unpacked *inode_u,
2922 struct iattr *iattr)
2924 struct address_space *mapping = inode->v.i_mapping;
2930 * this has to be done _before_ extending i_size:
2932 ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
2936 truncate_setsize(&inode->v, iattr->ia_size);
2938 return bch2_setattr_nonsize(idmap, inode, iattr);
2941 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2942 struct bch_inode_unpacked *bi,
2945 bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2949 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2950 struct bch_inode_unpacked *bi, void *p)
2952 u64 *new_i_size = p;
2954 bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2955 bi->bi_size = *new_i_size;
2959 int bch2_truncate(struct mnt_idmap *idmap,
2960 struct bch_inode_info *inode, struct iattr *iattr)
2962 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2963 struct address_space *mapping = inode->v.i_mapping;
2964 struct bch_inode_unpacked inode_u;
2965 u64 new_i_size = iattr->ia_size;
2966 s64 i_sectors_delta = 0;
2970 * If the truncate call with change the size of the file, the
2971 * cmtimes should be updated. If the size will not change, we
2972 * do not need to update the cmtimes.
2974 if (iattr->ia_size != inode->v.i_size) {
2975 if (!(iattr->ia_valid & ATTR_MTIME))
2976 ktime_get_coarse_real_ts64(&iattr->ia_mtime);
2977 if (!(iattr->ia_valid & ATTR_CTIME))
2978 ktime_get_coarse_real_ts64(&iattr->ia_ctime);
2979 iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
2982 inode_dio_wait(&inode->v);
2983 bch2_pagecache_block_get(inode);
2985 ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
2990 * check this before next assertion; on filesystem error our normal
2991 * invariants are a bit broken (truncate has to truncate the page cache
2992 * before the inode).
2994 ret = bch2_journal_error(&c->journal);
2998 WARN_ONCE(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
2999 inode->v.i_size < inode_u.bi_size,
3000 "truncate spotted in mem i_size < btree i_size: %llu < %llu\n",
3001 (u64) inode->v.i_size, inode_u.bi_size);
3003 if (iattr->ia_size > inode->v.i_size) {
3004 ret = bch2_extend(idmap, inode, &inode_u, iattr);
3008 iattr->ia_valid &= ~ATTR_SIZE;
3010 ret = bch2_truncate_folio(inode, iattr->ia_size);
3011 if (unlikely(ret < 0))
3015 * When extending, we're going to write the new i_size to disk
3016 * immediately so we need to flush anything above the current on disk
3019 * Also, when extending we need to flush the page that i_size currently
3020 * straddles - if it's mapped to userspace, we need to ensure that
3021 * userspace has to redirty it and call .mkwrite -> set_page_dirty
3022 * again to allocate the part of the page that was extended.
3024 if (iattr->ia_size > inode_u.bi_size)
3025 ret = filemap_write_and_wait_range(mapping,
3027 iattr->ia_size - 1);
3028 else if (iattr->ia_size & (PAGE_SIZE - 1))
3029 ret = filemap_write_and_wait_range(mapping,
3030 round_down(iattr->ia_size, PAGE_SIZE),
3031 iattr->ia_size - 1);
3035 mutex_lock(&inode->ei_update_lock);
3036 ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
3038 mutex_unlock(&inode->ei_update_lock);
3043 truncate_setsize(&inode->v, iattr->ia_size);
3045 ret = bch2_fpunch(c, inode_inum(inode),
3046 round_up(iattr->ia_size, block_bytes(c)) >> 9,
3047 U64_MAX, &i_sectors_delta);
3048 i_sectors_acct(c, inode, NULL, i_sectors_delta);
3050 bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks &&
3051 !bch2_journal_error(&c->journal), c,
3052 "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
3053 inode->v.i_ino, (u64) inode->v.i_blocks,
3054 inode->ei_inode.bi_sectors);
3058 mutex_lock(&inode->ei_update_lock);
3059 ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
3060 mutex_unlock(&inode->ei_update_lock);
3062 ret = bch2_setattr_nonsize(idmap, inode, iattr);
3064 bch2_pagecache_block_put(inode);
3065 return bch2_err_class(ret);
3070 static int inode_update_times_fn(struct bch_inode_info *inode,
3071 struct bch_inode_unpacked *bi, void *p)
3073 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3075 bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
3079 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
3081 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3082 u64 end = offset + len;
3083 u64 block_start = round_up(offset, block_bytes(c));
3084 u64 block_end = round_down(end, block_bytes(c));
3085 bool truncated_last_page;
3088 ret = bch2_truncate_folios(inode, offset, end);
3089 if (unlikely(ret < 0))
3092 truncated_last_page = ret;
3094 truncate_pagecache_range(&inode->v, offset, end - 1);
3096 if (block_start < block_end) {
3097 s64 i_sectors_delta = 0;
3099 ret = bch2_fpunch(c, inode_inum(inode),
3100 block_start >> 9, block_end >> 9,
3102 i_sectors_acct(c, inode, NULL, i_sectors_delta);
3105 mutex_lock(&inode->ei_update_lock);
3106 if (end >= inode->v.i_size && !truncated_last_page) {
3107 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
3108 ATTR_MTIME|ATTR_CTIME);
3110 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
3111 ATTR_MTIME|ATTR_CTIME);
3113 mutex_unlock(&inode->ei_update_lock);
3118 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
3119 loff_t offset, loff_t len,
3122 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3123 struct address_space *mapping = inode->v.i_mapping;
3124 struct bkey_buf copy;
3125 struct btree_trans trans;
3126 struct btree_iter src, dst, del;
3127 loff_t shift, new_size;
3131 if ((offset | len) & (block_bytes(c) - 1))
3135 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
3138 if (offset >= inode->v.i_size)
3141 src_start = U64_MAX;
3144 if (offset + len >= inode->v.i_size)
3147 src_start = offset + len;
3151 new_size = inode->v.i_size + shift;
3153 ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
3158 i_size_write(&inode->v, new_size);
3159 mutex_lock(&inode->ei_update_lock);
3160 ret = bch2_write_inode_size(c, inode, new_size,
3161 ATTR_MTIME|ATTR_CTIME);
3162 mutex_unlock(&inode->ei_update_lock);
3164 s64 i_sectors_delta = 0;
3166 ret = bch2_fpunch(c, inode_inum(inode),
3167 offset >> 9, (offset + len) >> 9,
3169 i_sectors_acct(c, inode, NULL, i_sectors_delta);
3175 bch2_bkey_buf_init(©);
3176 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
3177 bch2_trans_iter_init(&trans, &src, BTREE_ID_extents,
3178 POS(inode->v.i_ino, src_start >> 9),
3180 bch2_trans_copy_iter(&dst, &src);
3181 bch2_trans_copy_iter(&del, &src);
3184 bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
3185 struct disk_reservation disk_res =
3186 bch2_disk_reservation_init(c, 0);
3187 struct bkey_i delete;
3189 struct bpos next_pos;
3190 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
3191 struct bpos atomic_end;
3192 unsigned trigger_flags = 0;
3195 bch2_trans_begin(&trans);
3197 ret = bch2_subvolume_get_snapshot(&trans,
3198 inode->ei_subvol, &snapshot);
3202 bch2_btree_iter_set_snapshot(&src, snapshot);
3203 bch2_btree_iter_set_snapshot(&dst, snapshot);
3204 bch2_btree_iter_set_snapshot(&del, snapshot);
3206 bch2_trans_begin(&trans);
3209 ? bch2_btree_iter_peek_prev(&src)
3210 : bch2_btree_iter_peek_upto(&src, POS(inode->v.i_ino, U64_MAX));
3211 if ((ret = bkey_err(k)))
3214 if (!k.k || k.k->p.inode != inode->v.i_ino)
3218 bkey_le(k.k->p, POS(inode->v.i_ino, offset >> 9)))
3221 bch2_bkey_buf_reassemble(©, c, k);
3224 bkey_lt(bkey_start_pos(k.k), move_pos))
3225 bch2_cut_front(move_pos, copy.k);
3227 copy.k->k.p.offset += shift >> 9;
3228 bch2_btree_iter_set_pos(&dst, bkey_start_pos(©.k->k));
3230 ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end);
3234 if (!bkey_eq(atomic_end, copy.k->k.p)) {
3236 move_pos = atomic_end;
3237 move_pos.offset -= shift >> 9;
3240 bch2_cut_back(atomic_end, copy.k);
3244 bkey_init(&delete.k);
3245 delete.k.p = copy.k->k.p;
3246 delete.k.size = copy.k->k.size;
3247 delete.k.p.offset -= shift >> 9;
3248 bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k));
3250 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
3252 if (copy.k->k.size != k.k->size) {
3253 /* We might end up splitting compressed extents: */
3255 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
3257 ret = bch2_disk_reservation_get(c, &disk_res,
3258 copy.k->k.size, nr_ptrs,
3259 BCH_DISK_RESERVATION_NOFAIL);
3263 ret = bch2_btree_iter_traverse(&del) ?:
3264 bch2_trans_update(&trans, &del, &delete, trigger_flags) ?:
3265 bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?:
3266 bch2_trans_commit(&trans, &disk_res, NULL,
3267 BTREE_INSERT_NOFAIL);
3268 bch2_disk_reservation_put(c, &disk_res);
3271 bch2_btree_iter_set_pos(&src, next_pos);
3273 bch2_trans_iter_exit(&trans, &del);
3274 bch2_trans_iter_exit(&trans, &dst);
3275 bch2_trans_iter_exit(&trans, &src);
3276 bch2_trans_exit(&trans);
3277 bch2_bkey_buf_exit(©, c);
3282 mutex_lock(&inode->ei_update_lock);
3284 i_size_write(&inode->v, new_size);
3285 ret = bch2_write_inode_size(c, inode, new_size,
3286 ATTR_MTIME|ATTR_CTIME);
3288 /* We need an inode update to update bi_journal_seq for fsync: */
3289 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
3290 ATTR_MTIME|ATTR_CTIME);
3292 mutex_unlock(&inode->ei_update_lock);
3296 static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
3297 u64 start_sector, u64 end_sector)
3299 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3300 struct btree_trans trans;
3301 struct btree_iter iter;
3302 struct bpos end_pos = POS(inode->v.i_ino, end_sector);
3303 struct bch_io_opts opts;
3306 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
3307 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
3309 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
3310 POS(inode->v.i_ino, start_sector),
3311 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
3313 while (!ret && bkey_lt(iter.pos, end_pos)) {
3314 s64 i_sectors_delta = 0;
3315 struct quota_res quota_res = { 0 };
3320 bch2_trans_begin(&trans);
3322 ret = bch2_subvolume_get_snapshot(&trans,
3323 inode->ei_subvol, &snapshot);
3327 bch2_btree_iter_set_snapshot(&iter, snapshot);
3329 k = bch2_btree_iter_peek_slot(&iter);
3330 if ((ret = bkey_err(k)))
3333 /* already reserved */
3334 if (bkey_extent_is_reservation(k) &&
3335 bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
3336 bch2_btree_iter_advance(&iter);
3340 if (bkey_extent_is_data(k.k) &&
3341 !(mode & FALLOC_FL_ZERO_RANGE)) {
3342 bch2_btree_iter_advance(&iter);
3347 * XXX: for nocow mode, we should promote shared extents to
3351 sectors = bpos_min(k.k->p, end_pos).offset - iter.pos.offset;
3353 if (!bkey_extent_is_allocation(k.k)) {
3354 ret = bch2_quota_reservation_add(c, inode,
3361 ret = bch2_extent_fallocate(&trans, inode_inum(inode), &iter,
3362 sectors, opts, &i_sectors_delta,
3363 writepoint_hashed((unsigned long) current));
3367 i_sectors_acct(c, inode, "a_res, i_sectors_delta);
3369 bch2_quota_reservation_put(c, inode, "a_res);
3370 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3374 bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
3375 mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
3377 if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
3378 struct quota_res quota_res = { 0 };
3379 s64 i_sectors_delta = 0;
3381 bch2_fpunch_at(&trans, &iter, inode_inum(inode),
3382 end_sector, &i_sectors_delta);
3383 i_sectors_acct(c, inode, "a_res, i_sectors_delta);
3384 bch2_quota_reservation_put(c, inode, "a_res);
3387 bch2_trans_iter_exit(&trans, &iter);
3388 bch2_trans_exit(&trans);
3392 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
3393 loff_t offset, loff_t len)
3395 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3396 u64 end = offset + len;
3397 u64 block_start = round_down(offset, block_bytes(c));
3398 u64 block_end = round_up(end, block_bytes(c));
3399 bool truncated_last_page = false;
3402 if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
3403 ret = inode_newsize_ok(&inode->v, end);
3408 if (mode & FALLOC_FL_ZERO_RANGE) {
3409 ret = bch2_truncate_folios(inode, offset, end);
3410 if (unlikely(ret < 0))
3413 truncated_last_page = ret;
3415 truncate_pagecache_range(&inode->v, offset, end - 1);
3417 block_start = round_up(offset, block_bytes(c));
3418 block_end = round_down(end, block_bytes(c));
3421 ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
3424 * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
3425 * so that the VFS cache i_size is consistent with the btree i_size:
3428 !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
3431 if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
3432 end = inode->v.i_size;
3434 if (end >= inode->v.i_size &&
3435 (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
3436 !(mode & FALLOC_FL_KEEP_SIZE))) {
3437 spin_lock(&inode->v.i_lock);
3438 i_size_write(&inode->v, end);
3439 spin_unlock(&inode->v.i_lock);
3441 mutex_lock(&inode->ei_update_lock);
3442 ret2 = bch2_write_inode_size(c, inode, end, 0);
3443 mutex_unlock(&inode->ei_update_lock);
3449 long bch2_fallocate_dispatch(struct file *file, int mode,
3450 loff_t offset, loff_t len)
3452 struct bch_inode_info *inode = file_bch_inode(file);
3453 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3456 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fallocate))
3459 inode_lock(&inode->v);
3460 inode_dio_wait(&inode->v);
3461 bch2_pagecache_block_get(inode);
3463 ret = file_modified(file);
3467 if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
3468 ret = bchfs_fallocate(inode, mode, offset, len);
3469 else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
3470 ret = bchfs_fpunch(inode, offset, len);
3471 else if (mode == FALLOC_FL_INSERT_RANGE)
3472 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
3473 else if (mode == FALLOC_FL_COLLAPSE_RANGE)
3474 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
3478 bch2_pagecache_block_put(inode);
3479 inode_unlock(&inode->v);
3480 bch2_write_ref_put(c, BCH_WRITE_REF_fallocate);
3482 return bch2_err_class(ret);
3486 * Take a quota reservation for unallocated blocks in a given file range
3487 * Does not check pagecache
3489 static int quota_reserve_range(struct bch_inode_info *inode,
3490 struct quota_res *res,
3493 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3494 struct btree_trans trans;
3495 struct btree_iter iter;
3498 u64 sectors = end - start;
3502 bch2_trans_init(&trans, c, 0, 0);
3504 bch2_trans_begin(&trans);
3506 ret = bch2_subvolume_get_snapshot(&trans, inode->ei_subvol, &snapshot);
3510 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
3511 SPOS(inode->v.i_ino, pos, snapshot), 0);
3513 while (!(ret = btree_trans_too_many_iters(&trans)) &&
3514 (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
3515 !(ret = bkey_err(k))) {
3516 if (bkey_extent_is_allocation(k.k)) {
3517 u64 s = min(end, k.k->p.offset) -
3518 max(start, bkey_start_offset(k.k));
3519 BUG_ON(s > sectors);
3522 bch2_btree_iter_advance(&iter);
3524 pos = iter.pos.offset;
3525 bch2_trans_iter_exit(&trans, &iter);
3527 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3530 bch2_trans_exit(&trans);
3535 return bch2_quota_reservation_add(c, inode, res, sectors, true);
3538 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
3539 struct file *file_dst, loff_t pos_dst,
3540 loff_t len, unsigned remap_flags)
3542 struct bch_inode_info *src = file_bch_inode(file_src);
3543 struct bch_inode_info *dst = file_bch_inode(file_dst);
3544 struct bch_fs *c = src->v.i_sb->s_fs_info;
3545 struct quota_res quota_res = { 0 };
3546 s64 i_sectors_delta = 0;
3550 if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
3553 if (remap_flags & REMAP_FILE_DEDUP)
3556 if ((pos_src & (block_bytes(c) - 1)) ||
3557 (pos_dst & (block_bytes(c) - 1)))
3561 abs(pos_src - pos_dst) < len)
3564 bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3566 inode_dio_wait(&src->v);
3567 inode_dio_wait(&dst->v);
3569 ret = generic_remap_file_range_prep(file_src, pos_src,
3572 if (ret < 0 || len == 0)
3575 aligned_len = round_up((u64) len, block_bytes(c));
3577 ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
3578 pos_dst, pos_dst + len - 1);
3582 ret = quota_reserve_range(dst, "a_res, pos_dst >> 9,
3583 (pos_dst + aligned_len) >> 9);
3587 file_update_time(file_dst);
3589 mark_pagecache_unallocated(src, pos_src >> 9,
3590 (pos_src + aligned_len) >> 9);
3592 ret = bch2_remap_range(c,
3593 inode_inum(dst), pos_dst >> 9,
3594 inode_inum(src), pos_src >> 9,
3596 pos_dst + len, &i_sectors_delta);
3601 * due to alignment, we might have remapped slightly more than requsted
3603 ret = min((u64) ret << 9, (u64) len);
3605 i_sectors_acct(c, dst, "a_res, i_sectors_delta);
3607 spin_lock(&dst->v.i_lock);
3608 if (pos_dst + ret > dst->v.i_size)
3609 i_size_write(&dst->v, pos_dst + ret);
3610 spin_unlock(&dst->v.i_lock);
3612 if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
3613 IS_SYNC(file_inode(file_dst)))
3614 ret = bch2_flush_inode(c, dst);
3616 bch2_quota_reservation_put(c, dst, "a_res);
3617 bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3619 return bch2_err_class(ret);
3624 static int folio_data_offset(struct folio *folio, loff_t pos)
3626 struct bch_folio *s = bch2_folio(folio);
3627 unsigned i, sectors = folio_sectors(folio);
3630 for (i = folio_pos_to_s(folio, pos); i < sectors; i++)
3631 if (s->s[i].state >= SECTOR_dirty)
3632 return i << SECTOR_SHIFT;
3637 static loff_t bch2_seek_pagecache_data(struct inode *vinode,
3638 loff_t start_offset,
3641 struct folio_batch fbatch;
3642 pgoff_t start_index = start_offset >> PAGE_SHIFT;
3643 pgoff_t end_index = end_offset >> PAGE_SHIFT;
3644 pgoff_t index = start_index;
3649 folio_batch_init(&fbatch);
3651 while (filemap_get_folios(vinode->i_mapping,
3652 &index, end_index, &fbatch)) {
3653 for (i = 0; i < folio_batch_count(&fbatch); i++) {
3654 struct folio *folio = fbatch.folios[i];
3657 offset = folio_data_offset(folio,
3658 max(folio_pos(folio), start_offset));
3660 ret = clamp(folio_pos(folio) + offset,
3661 start_offset, end_offset);
3662 folio_unlock(folio);
3663 folio_batch_release(&fbatch);
3666 folio_unlock(folio);
3668 folio_batch_release(&fbatch);
3675 static loff_t bch2_seek_data(struct file *file, u64 offset)
3677 struct bch_inode_info *inode = file_bch_inode(file);
3678 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3679 struct btree_trans trans;
3680 struct btree_iter iter;
3682 subvol_inum inum = inode_inum(inode);
3683 u64 isize, next_data = MAX_LFS_FILESIZE;
3687 isize = i_size_read(&inode->v);
3688 if (offset >= isize)
3691 bch2_trans_init(&trans, c, 0, 0);
3693 bch2_trans_begin(&trans);
3695 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3699 for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents,
3700 SPOS(inode->v.i_ino, offset >> 9, snapshot),
3701 POS(inode->v.i_ino, U64_MAX),
3703 if (bkey_extent_is_data(k.k)) {
3704 next_data = max(offset, bkey_start_offset(k.k) << 9);
3706 } else if (k.k->p.offset >> 9 > isize)
3709 bch2_trans_iter_exit(&trans, &iter);
3711 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3714 bch2_trans_exit(&trans);
3718 if (next_data > offset)
3719 next_data = bch2_seek_pagecache_data(&inode->v,
3722 if (next_data >= isize)
3725 return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
3728 static bool folio_hole_offset(struct address_space *mapping, loff_t *offset)
3730 struct folio *folio;
3731 struct bch_folio *s;
3732 unsigned i, sectors;
3735 folio = filemap_lock_folio(mapping, *offset >> PAGE_SHIFT);
3739 s = bch2_folio(folio);
3743 sectors = folio_sectors(folio);
3744 for (i = folio_pos_to_s(folio, *offset); i < sectors; i++)
3745 if (s->s[i].state < SECTOR_dirty) {
3746 *offset = max(*offset,
3747 folio_pos(folio) + (i << SECTOR_SHIFT));
3751 *offset = folio_end_pos(folio);
3754 folio_unlock(folio);
3758 static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
3759 loff_t start_offset,
3762 struct address_space *mapping = vinode->i_mapping;
3763 loff_t offset = start_offset;
3765 while (offset < end_offset &&
3766 !folio_hole_offset(mapping, &offset))
3769 return min(offset, end_offset);
3772 static loff_t bch2_seek_hole(struct file *file, u64 offset)
3774 struct bch_inode_info *inode = file_bch_inode(file);
3775 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3776 struct btree_trans trans;
3777 struct btree_iter iter;
3779 subvol_inum inum = inode_inum(inode);
3780 u64 isize, next_hole = MAX_LFS_FILESIZE;
3784 isize = i_size_read(&inode->v);
3785 if (offset >= isize)
3788 bch2_trans_init(&trans, c, 0, 0);
3790 bch2_trans_begin(&trans);
3792 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3796 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
3797 SPOS(inode->v.i_ino, offset >> 9, snapshot),
3798 BTREE_ITER_SLOTS, k, ret) {
3799 if (k.k->p.inode != inode->v.i_ino) {
3800 next_hole = bch2_seek_pagecache_hole(&inode->v,
3801 offset, MAX_LFS_FILESIZE);
3803 } else if (!bkey_extent_is_data(k.k)) {
3804 next_hole = bch2_seek_pagecache_hole(&inode->v,
3805 max(offset, bkey_start_offset(k.k) << 9),
3806 k.k->p.offset << 9);
3808 if (next_hole < k.k->p.offset << 9)
3811 offset = max(offset, bkey_start_offset(k.k) << 9);
3814 bch2_trans_iter_exit(&trans, &iter);
3816 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3819 bch2_trans_exit(&trans);
3823 if (next_hole > isize)
3826 return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
3829 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
3837 ret = generic_file_llseek(file, offset, whence);
3840 ret = bch2_seek_data(file, offset);
3843 ret = bch2_seek_hole(file, offset);
3850 return bch2_err_class(ret);
3853 void bch2_fs_fsio_exit(struct bch_fs *c)
3855 bioset_exit(&c->nocow_flush_bioset);
3856 bioset_exit(&c->dio_write_bioset);
3857 bioset_exit(&c->dio_read_bioset);
3858 bioset_exit(&c->writepage_bioset);
3861 int bch2_fs_fsio_init(struct bch_fs *c)
3865 pr_verbose_init(c->opts, "");
3867 if (bioset_init(&c->writepage_bioset,
3868 4, offsetof(struct bch_writepage_io, op.wbio.bio),
3870 return -BCH_ERR_ENOMEM_writepage_bioset_init;
3872 if (bioset_init(&c->dio_read_bioset,
3873 4, offsetof(struct dio_read, rbio.bio),
3875 return -BCH_ERR_ENOMEM_dio_read_bioset_init;
3877 if (bioset_init(&c->dio_write_bioset,
3878 4, offsetof(struct dio_write, op.wbio.bio),
3880 return -BCH_ERR_ENOMEM_dio_write_bioset_init;
3882 if (bioset_init(&c->nocow_flush_bioset,
3883 1, offsetof(struct nocow_flush, bio), 0))
3884 return -BCH_ERR_ENOMEM_nocow_flush_bioset_init;
3886 pr_verbose_init(c->opts, "ret %i", ret);
3890 #endif /* NO_BCACHEFS_FS */