1 // SPDX-License-Identifier: GPL-2.0
5 #include "alloc_foreground.h"
7 #include "btree_update.h"
12 #include "extent_update.h"
23 #include <linux/aio.h>
24 #include <linux/backing-dev.h>
25 #include <linux/falloc.h>
26 #include <linux/migrate.h>
27 #include <linux/mmu_context.h>
28 #include <linux/pagevec.h>
29 #include <linux/rmap.h>
30 #include <linux/sched/signal.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/uio.h>
33 #include <linux/writeback.h>
35 #include <trace/events/bcachefs.h>
36 #include <trace/events/writeback.h>
38 static inline loff_t folio_end_pos(struct folio *folio)
40 return folio_pos(folio) + folio_size(folio);
43 static inline size_t folio_sectors(struct folio *folio)
45 return PAGE_SECTORS << folio_order(folio);
48 static inline loff_t folio_sector(struct folio *folio)
50 return folio_pos(folio) >> 9;
53 static inline loff_t folio_end_sector(struct folio *folio)
55 return folio_end_pos(folio) >> 9;
58 typedef DARRAY(struct folio *) folios;
60 static int filemap_get_contig_folios_d(struct address_space *mapping,
61 loff_t start, loff_t end,
62 int fgp_flags, gfp_t gfp,
70 if ((u64) pos >= (u64) start + (1ULL << 20))
71 fgp_flags &= ~FGP_CREAT;
73 ret = darray_make_room_gfp(folios, 1, gfp & GFP_KERNEL);
77 f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp);
81 BUG_ON(folios->nr && folio_pos(f) != pos);
83 pos = folio_end_pos(f);
84 darray_push(folios, f);
87 if (!folios->nr && !ret && (fgp_flags & FGP_CREAT))
90 return folios->nr ? 0 : ret;
99 static void nocow_flush_endio(struct bio *_bio)
102 struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
104 closure_put(bio->cl);
105 percpu_ref_put(&bio->ca->io_ref);
109 static void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
110 struct bch_inode_info *inode,
113 struct nocow_flush *bio;
115 struct bch_devs_mask devs;
118 dev = find_first_bit(inode->ei_devs_need_flush.d, BCH_SB_MEMBERS_MAX);
119 if (dev == BCH_SB_MEMBERS_MAX)
122 devs = inode->ei_devs_need_flush;
123 memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
125 for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
127 ca = rcu_dereference(c->devs[dev]);
128 if (ca && !percpu_ref_tryget(&ca->io_ref))
135 bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
138 &c->nocow_flush_bioset),
139 struct nocow_flush, bio);
142 bio->bio.bi_end_io = nocow_flush_endio;
143 closure_bio_submit(&bio->bio, cl);
147 static int bch2_inode_flush_nocow_writes(struct bch_fs *c,
148 struct bch_inode_info *inode)
152 closure_init_stack(&cl);
153 bch2_inode_flush_nocow_writes_async(c, inode, &cl);
159 static inline bool bio_full(struct bio *bio, unsigned len)
161 if (bio->bi_vcnt >= bio->bi_max_vecs)
163 if (bio->bi_iter.bi_size > UINT_MAX - len)
168 static inline struct address_space *faults_disabled_mapping(void)
170 return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
173 static inline void set_fdm_dropped_locks(void)
175 current->faults_disabled_mapping =
176 (void *) (((unsigned long) current->faults_disabled_mapping)|1);
179 static inline bool fdm_dropped_locks(void)
181 return ((unsigned long) current->faults_disabled_mapping) & 1;
188 struct bch_writepage_io {
189 struct bch_inode_info *inode;
192 struct bch_write_op op;
197 struct address_space *mapping;
198 struct bch_inode_info *inode;
199 struct mm_struct *mm;
205 struct quota_res quota_res;
208 struct iov_iter iter;
209 struct iovec inline_vecs[2];
212 struct bch_write_op op;
220 struct bch_read_bio rbio;
223 /* pagecache_block must be held */
224 static noinline int write_invalidate_inode_pages_range(struct address_space *mapping,
225 loff_t start, loff_t end)
230 * XXX: the way this is currently implemented, we can spin if a process
231 * is continually redirtying a specific page
234 if (!mapping->nrpages)
237 ret = filemap_write_and_wait_range(mapping, start, end);
241 if (!mapping->nrpages)
244 ret = invalidate_inode_pages2_range(mapping,
247 } while (ret == -EBUSY);
254 #ifdef CONFIG_BCACHEFS_QUOTA
256 static void __bch2_quota_reservation_put(struct bch_fs *c,
257 struct bch_inode_info *inode,
258 struct quota_res *res)
260 BUG_ON(res->sectors > inode->ei_quota_reserved);
262 bch2_quota_acct(c, inode->ei_qid, Q_SPC,
263 -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
264 inode->ei_quota_reserved -= res->sectors;
268 static void bch2_quota_reservation_put(struct bch_fs *c,
269 struct bch_inode_info *inode,
270 struct quota_res *res)
273 mutex_lock(&inode->ei_quota_lock);
274 __bch2_quota_reservation_put(c, inode, res);
275 mutex_unlock(&inode->ei_quota_lock);
279 static int bch2_quota_reservation_add(struct bch_fs *c,
280 struct bch_inode_info *inode,
281 struct quota_res *res,
287 mutex_lock(&inode->ei_quota_lock);
288 ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
289 check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
291 inode->ei_quota_reserved += sectors;
292 res->sectors += sectors;
294 mutex_unlock(&inode->ei_quota_lock);
301 static void __bch2_quota_reservation_put(struct bch_fs *c,
302 struct bch_inode_info *inode,
303 struct quota_res *res) {}
305 static void bch2_quota_reservation_put(struct bch_fs *c,
306 struct bch_inode_info *inode,
307 struct quota_res *res) {}
309 static int bch2_quota_reservation_add(struct bch_fs *c,
310 struct bch_inode_info *inode,
311 struct quota_res *res,
320 /* i_size updates: */
322 struct inode_new_size {
328 static int inode_set_size(struct bch_inode_info *inode,
329 struct bch_inode_unpacked *bi,
332 struct inode_new_size *s = p;
334 bi->bi_size = s->new_size;
335 if (s->fields & ATTR_ATIME)
336 bi->bi_atime = s->now;
337 if (s->fields & ATTR_MTIME)
338 bi->bi_mtime = s->now;
339 if (s->fields & ATTR_CTIME)
340 bi->bi_ctime = s->now;
345 int __must_check bch2_write_inode_size(struct bch_fs *c,
346 struct bch_inode_info *inode,
347 loff_t new_size, unsigned fields)
349 struct inode_new_size s = {
350 .new_size = new_size,
351 .now = bch2_current_time(c),
355 return bch2_write_inode(c, inode, inode_set_size, &s, fields);
358 static void __i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
359 struct quota_res *quota_res, s64 sectors)
361 bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
362 "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
363 inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
364 inode->ei_inode.bi_sectors);
365 inode->v.i_blocks += sectors;
367 #ifdef CONFIG_BCACHEFS_QUOTA
368 if (quota_res && sectors > 0) {
369 BUG_ON(sectors > quota_res->sectors);
370 BUG_ON(sectors > inode->ei_quota_reserved);
372 quota_res->sectors -= sectors;
373 inode->ei_quota_reserved -= sectors;
375 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
380 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
381 struct quota_res *quota_res, s64 sectors)
384 mutex_lock(&inode->ei_quota_lock);
385 __i_sectors_acct(c, inode, quota_res, sectors);
386 mutex_unlock(&inode->ei_quota_lock);
392 /* stored in page->private: */
394 #define BCH_FOLIO_SECTOR_STATE() \
401 enum bch_folio_sector_state {
402 #define x(n) SECTOR_##n,
403 BCH_FOLIO_SECTOR_STATE()
407 const char * const bch2_folio_sector_states[] = {
409 BCH_FOLIO_SECTOR_STATE()
414 static inline enum bch_folio_sector_state
415 folio_sector_dirty(enum bch_folio_sector_state state)
418 case SECTOR_unallocated:
420 case SECTOR_reserved:
421 return SECTOR_dirty_reserved;
427 static inline enum bch_folio_sector_state
428 folio_sector_undirty(enum bch_folio_sector_state state)
432 return SECTOR_unallocated;
433 case SECTOR_dirty_reserved:
434 return SECTOR_reserved;
440 static inline enum bch_folio_sector_state
441 folio_sector_reserve(enum bch_folio_sector_state state)
444 case SECTOR_unallocated:
445 return SECTOR_reserved;
447 return SECTOR_dirty_reserved;
453 struct bch_folio_sector {
454 /* Uncompressed, fully allocated replicas (or on disk reservation): */
455 unsigned nr_replicas:4;
457 /* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
458 unsigned replicas_reserved:4;
461 enum bch_folio_sector_state state:8;
466 atomic_t write_count;
468 * Is the sector state up to date with the btree?
469 * (Not the data itself)
472 struct bch_folio_sector s[];
475 static inline void folio_sector_set(struct folio *folio,
477 unsigned i, unsigned n)
482 static inline struct bch_folio *__bch2_folio(struct folio *folio)
484 return folio_has_private(folio)
485 ? (struct bch_folio *) folio_get_private(folio)
489 static inline struct bch_folio *bch2_folio(struct folio *folio)
491 EBUG_ON(!folio_test_locked(folio));
493 return __bch2_folio(folio);
496 /* for newly allocated folios: */
497 static void __bch2_folio_release(struct folio *folio)
499 kfree(folio_detach_private(folio));
502 static void bch2_folio_release(struct folio *folio)
504 EBUG_ON(!folio_test_locked(folio));
505 __bch2_folio_release(folio);
508 /* for newly allocated folios: */
509 static struct bch_folio *__bch2_folio_create(struct folio *folio, gfp_t gfp)
513 s = kzalloc(sizeof(*s) +
514 sizeof(struct bch_folio_sector) *
515 folio_sectors(folio), GFP_NOFS|gfp);
519 spin_lock_init(&s->lock);
520 folio_attach_private(folio, s);
524 static struct bch_folio *bch2_folio_create(struct folio *folio, gfp_t gfp)
526 return bch2_folio(folio) ?: __bch2_folio_create(folio, gfp);
529 static unsigned bkey_to_sector_state(struct bkey_s_c k)
531 if (bkey_extent_is_reservation(k))
532 return SECTOR_reserved;
533 if (bkey_extent_is_allocation(k.k))
534 return SECTOR_allocated;
535 return SECTOR_unallocated;
538 static void __bch2_folio_set(struct folio *folio,
539 unsigned pg_offset, unsigned pg_len,
540 unsigned nr_ptrs, unsigned state)
542 struct bch_folio *s = bch2_folio_create(folio, __GFP_NOFAIL);
543 unsigned i, sectors = folio_sectors(folio);
545 BUG_ON(pg_offset >= sectors);
546 BUG_ON(pg_offset + pg_len > sectors);
550 for (i = pg_offset; i < pg_offset + pg_len; i++) {
551 s->s[i].nr_replicas = nr_ptrs;
552 folio_sector_set(folio, s, i, state);
558 spin_unlock(&s->lock);
562 * Initialize bch_folio state (allocated/unallocated, nr_replicas) from the
565 static int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
566 struct folio **folios, unsigned nr_folios)
568 struct btree_trans trans;
569 struct btree_iter iter;
571 u64 offset = folio_sector(folios[0]);
572 unsigned folio_idx = 0;
576 bch2_trans_init(&trans, c, 0, 0);
578 bch2_trans_begin(&trans);
580 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
584 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
585 SPOS(inum.inum, offset, snapshot),
586 BTREE_ITER_SLOTS, k, ret) {
587 unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
588 unsigned state = bkey_to_sector_state(k);
590 while (folio_idx < nr_folios) {
591 struct folio *folio = folios[folio_idx];
592 u64 folio_start = folio_sector(folio);
593 u64 folio_end = folio_end_sector(folio);
594 unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) - folio_start;
595 unsigned folio_len = min(k.k->p.offset, folio_end) - folio_offset - folio_start;
597 BUG_ON(k.k->p.offset < folio_start);
598 BUG_ON(bkey_start_offset(k.k) > folio_end);
600 if (!bch2_folio_create(folio, __GFP_NOFAIL)->uptodate)
601 __bch2_folio_set(folio, folio_offset, folio_len, nr_ptrs, state);
603 if (k.k->p.offset < folio_end)
608 if (folio_idx == nr_folios)
612 offset = iter.pos.offset;
613 bch2_trans_iter_exit(&trans, &iter);
615 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
617 bch2_trans_exit(&trans);
622 static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
624 struct bvec_iter iter;
626 unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
627 ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
628 unsigned state = bkey_to_sector_state(k);
630 bio_for_each_folio(fv, bio, iter)
631 __bch2_folio_set(fv.fv_folio,
637 static void mark_pagecache_unallocated(struct bch_inode_info *inode,
640 pgoff_t index = start >> PAGE_SECTORS_SHIFT;
641 pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
642 struct folio_batch fbatch;
648 folio_batch_init(&fbatch);
650 while (filemap_get_folios(inode->v.i_mapping,
651 &index, end_index, &fbatch)) {
652 for (i = 0; i < folio_batch_count(&fbatch); i++) {
653 struct folio *folio = fbatch.folios[i];
654 u64 folio_start = folio_sector(folio);
655 u64 folio_end = folio_end_sector(folio);
656 unsigned folio_offset = max(start, folio_start) - folio_start;
657 unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
660 BUG_ON(end <= folio_start);
663 s = bch2_folio(folio);
667 for (j = folio_offset; j < folio_offset + folio_len; j++)
668 s->s[j].nr_replicas = 0;
669 spin_unlock(&s->lock);
674 folio_batch_release(&fbatch);
679 static void mark_pagecache_reserved(struct bch_inode_info *inode,
682 struct bch_fs *c = inode->v.i_sb->s_fs_info;
683 pgoff_t index = start >> PAGE_SECTORS_SHIFT;
684 pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
685 struct folio_batch fbatch;
686 s64 i_sectors_delta = 0;
692 folio_batch_init(&fbatch);
694 while (filemap_get_folios(inode->v.i_mapping,
695 &index, end_index, &fbatch)) {
696 for (i = 0; i < folio_batch_count(&fbatch); i++) {
697 struct folio *folio = fbatch.folios[i];
698 u64 folio_start = folio_sector(folio);
699 u64 folio_end = folio_end_sector(folio);
700 unsigned folio_offset = max(start, folio_start) - folio_start;
701 unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
704 BUG_ON(end <= folio_start);
707 s = bch2_folio(folio);
711 for (j = folio_offset; j < folio_offset + folio_len; j++) {
712 i_sectors_delta -= s->s[j].state == SECTOR_dirty;
713 folio_sector_set(folio, s, j, folio_sector_reserve(s->s[j].state));
715 spin_unlock(&s->lock);
720 folio_batch_release(&fbatch);
724 i_sectors_acct(c, inode, NULL, i_sectors_delta);
727 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
729 /* XXX: this should not be open coded */
730 return inode->ei_inode.bi_data_replicas
731 ? inode->ei_inode.bi_data_replicas - 1
732 : c->opts.data_replicas;
735 static inline unsigned sectors_to_reserve(struct bch_folio_sector *s,
736 unsigned nr_replicas)
738 return max(0, (int) nr_replicas -
740 s->replicas_reserved);
743 static int bch2_get_folio_disk_reservation(struct bch_fs *c,
744 struct bch_inode_info *inode,
745 struct folio *folio, bool check_enospc)
747 struct bch_folio *s = bch2_folio_create(folio, 0);
748 unsigned nr_replicas = inode_nr_replicas(c, inode);
749 struct disk_reservation disk_res = { 0 };
750 unsigned i, sectors = folio_sectors(folio), disk_res_sectors = 0;
756 for (i = 0; i < sectors; i++)
757 disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
759 if (!disk_res_sectors)
762 ret = bch2_disk_reservation_get(c, &disk_res,
765 ? BCH_DISK_RESERVATION_NOFAIL
770 for (i = 0; i < sectors; i++)
771 s->s[i].replicas_reserved +=
772 sectors_to_reserve(&s->s[i], nr_replicas);
777 struct bch2_folio_reservation {
778 struct disk_reservation disk;
779 struct quota_res quota;
782 static void bch2_folio_reservation_init(struct bch_fs *c,
783 struct bch_inode_info *inode,
784 struct bch2_folio_reservation *res)
786 memset(res, 0, sizeof(*res));
788 res->disk.nr_replicas = inode_nr_replicas(c, inode);
791 static void bch2_folio_reservation_put(struct bch_fs *c,
792 struct bch_inode_info *inode,
793 struct bch2_folio_reservation *res)
795 bch2_disk_reservation_put(c, &res->disk);
796 bch2_quota_reservation_put(c, inode, &res->quota);
799 static int bch2_folio_reservation_get(struct bch_fs *c,
800 struct bch_inode_info *inode,
802 struct bch2_folio_reservation *res,
803 unsigned offset, unsigned len)
805 struct bch_folio *s = bch2_folio_create(folio, 0);
806 unsigned i, disk_sectors = 0, quota_sectors = 0;
812 BUG_ON(!s->uptodate);
814 for (i = round_down(offset, block_bytes(c)) >> 9;
815 i < round_up(offset + len, block_bytes(c)) >> 9;
817 disk_sectors += sectors_to_reserve(&s->s[i],
818 res->disk.nr_replicas);
819 quota_sectors += s->s[i].state == SECTOR_unallocated;
823 ret = bch2_disk_reservation_add(c, &res->disk, disk_sectors, 0);
829 ret = bch2_quota_reservation_add(c, inode, &res->quota,
830 quota_sectors, true);
832 struct disk_reservation tmp = {
833 .sectors = disk_sectors
836 bch2_disk_reservation_put(c, &tmp);
837 res->disk.sectors -= disk_sectors;
845 static void bch2_clear_folio_bits(struct folio *folio)
847 struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
848 struct bch_fs *c = inode->v.i_sb->s_fs_info;
849 struct bch_folio *s = bch2_folio(folio);
850 struct disk_reservation disk_res = { 0 };
851 int i, sectors = folio_sectors(folio), dirty_sectors = 0;
856 EBUG_ON(!folio_test_locked(folio));
857 EBUG_ON(folio_test_writeback(folio));
859 for (i = 0; i < sectors; i++) {
860 disk_res.sectors += s->s[i].replicas_reserved;
861 s->s[i].replicas_reserved = 0;
863 dirty_sectors -= s->s[i].state == SECTOR_dirty;
864 folio_sector_set(folio, s, i, folio_sector_undirty(s->s[i].state));
867 bch2_disk_reservation_put(c, &disk_res);
869 i_sectors_acct(c, inode, NULL, dirty_sectors);
871 bch2_folio_release(folio);
874 static void bch2_set_folio_dirty(struct bch_fs *c,
875 struct bch_inode_info *inode,
877 struct bch2_folio_reservation *res,
878 unsigned offset, unsigned len)
880 struct bch_folio *s = bch2_folio(folio);
881 unsigned i, dirty_sectors = 0;
883 WARN_ON((u64) folio_pos(folio) + offset + len >
884 round_up((u64) i_size_read(&inode->v), block_bytes(c)));
886 BUG_ON(!s->uptodate);
890 for (i = round_down(offset, block_bytes(c)) >> 9;
891 i < round_up(offset + len, block_bytes(c)) >> 9;
893 unsigned sectors = sectors_to_reserve(&s->s[i],
894 res->disk.nr_replicas);
897 * This can happen if we race with the error path in
898 * bch2_writepage_io_done():
900 sectors = min_t(unsigned, sectors, res->disk.sectors);
902 s->s[i].replicas_reserved += sectors;
903 res->disk.sectors -= sectors;
905 dirty_sectors += s->s[i].state == SECTOR_unallocated;
907 folio_sector_set(folio, s, i, folio_sector_dirty(s->s[i].state));
910 spin_unlock(&s->lock);
912 i_sectors_acct(c, inode, &res->quota, dirty_sectors);
914 if (!folio_test_dirty(folio))
915 filemap_dirty_folio(inode->v.i_mapping, folio);
918 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
920 struct file *file = vmf->vma->vm_file;
921 struct address_space *mapping = file->f_mapping;
922 struct address_space *fdm = faults_disabled_mapping();
923 struct bch_inode_info *inode = file_bch_inode(file);
927 return VM_FAULT_SIGBUS;
931 struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
933 if (bch2_pagecache_add_tryget(inode))
936 bch2_pagecache_block_put(fdm_host);
938 bch2_pagecache_add_get(inode);
939 bch2_pagecache_add_put(inode);
941 bch2_pagecache_block_get(fdm_host);
943 /* Signal that lock has been dropped: */
944 set_fdm_dropped_locks();
945 return VM_FAULT_SIGBUS;
948 bch2_pagecache_add_get(inode);
950 ret = filemap_fault(vmf);
951 bch2_pagecache_add_put(inode);
956 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
958 struct folio *folio = page_folio(vmf->page);
959 struct file *file = vmf->vma->vm_file;
960 struct bch_inode_info *inode = file_bch_inode(file);
961 struct address_space *mapping = file->f_mapping;
962 struct bch_fs *c = inode->v.i_sb->s_fs_info;
963 struct bch2_folio_reservation res;
968 bch2_folio_reservation_init(c, inode, &res);
970 sb_start_pagefault(inode->v.i_sb);
971 file_update_time(file);
974 * Not strictly necessary, but helps avoid dio writes livelocking in
975 * write_invalidate_inode_pages_range() - can drop this if/when we get
976 * a write_invalidate_inode_pages_range() that works without dropping
977 * page lock before invalidating page
979 bch2_pagecache_add_get(inode);
982 isize = i_size_read(&inode->v);
984 if (folio->mapping != mapping || folio_pos(folio) >= isize) {
986 ret = VM_FAULT_NOPAGE;
990 len = min_t(loff_t, folio_size(folio), isize - folio_pos(folio));
992 if (!bch2_folio_create(folio, __GFP_NOFAIL)->uptodate) {
993 if (bch2_folio_set(c, inode_inum(inode), &folio, 1)) {
995 ret = VM_FAULT_SIGBUS;
1000 if (bch2_folio_reservation_get(c, inode, folio, &res, 0, len)) {
1001 folio_unlock(folio);
1002 ret = VM_FAULT_SIGBUS;
1006 bch2_set_folio_dirty(c, inode, folio, &res, 0, len);
1007 bch2_folio_reservation_put(c, inode, &res);
1009 folio_wait_stable(folio);
1010 ret = VM_FAULT_LOCKED;
1012 bch2_pagecache_add_put(inode);
1013 sb_end_pagefault(inode->v.i_sb);
1018 void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1020 if (offset || length < folio_size(folio))
1023 bch2_clear_folio_bits(folio);
1026 bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
1028 if (folio_test_dirty(folio) || folio_test_writeback(folio))
1031 bch2_clear_folio_bits(folio);
1037 static void bch2_readpages_end_io(struct bio *bio)
1039 struct bvec_iter_all iter;
1040 struct folio_vec fv;
1042 bio_for_each_folio_all(fv, bio, iter) {
1043 if (!bio->bi_status) {
1044 folio_mark_uptodate(fv.fv_folio);
1046 folio_clear_uptodate(fv.fv_folio);
1047 folio_set_error(fv.fv_folio);
1049 folio_unlock(fv.fv_folio);
1055 struct readpages_iter {
1056 struct address_space *mapping;
1061 static int readpages_iter_init(struct readpages_iter *iter,
1062 struct readahead_control *ractl)
1067 memset(iter, 0, sizeof(*iter));
1069 iter->mapping = ractl->mapping;
1071 ret = filemap_get_contig_folios_d(iter->mapping,
1072 ractl->_index << PAGE_SHIFT,
1073 (ractl->_index + ractl->_nr_pages) << PAGE_SHIFT,
1074 0, mapping_gfp_mask(iter->mapping),
1079 darray_for_each(iter->folios, fi) {
1080 ractl->_nr_pages -= 1U << folio_order(*fi);
1081 __bch2_folio_create(*fi, __GFP_NOFAIL);
1089 static inline struct folio *readpage_iter_peek(struct readpages_iter *iter)
1091 if (iter->idx >= iter->folios.nr)
1093 return iter->folios.data[iter->idx];
1096 static inline void readpage_iter_advance(struct readpages_iter *iter)
1101 static bool extent_partial_reads_expensive(struct bkey_s_c k)
1103 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1104 struct bch_extent_crc_unpacked crc;
1105 const union bch_extent_entry *i;
1107 bkey_for_each_crc(k.k, ptrs, crc, i)
1108 if (crc.csum_type || crc.compression_type)
1113 static void readpage_bio_extend(struct readpages_iter *iter,
1115 unsigned sectors_this_extent,
1118 while (bio_sectors(bio) < sectors_this_extent &&
1119 bio->bi_vcnt < bio->bi_max_vecs) {
1120 struct folio *folio = readpage_iter_peek(iter);
1124 readpage_iter_advance(iter);
1126 pgoff_t folio_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
1131 folio = xa_load(&iter->mapping->i_pages, folio_offset);
1132 if (folio && !xa_is_value(folio))
1135 folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), 0);
1139 if (!__bch2_folio_create(folio, 0)) {
1144 ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_NOFS);
1146 __bch2_folio_release(folio);
1154 BUG_ON(folio_sector(folio) != bio_end_sector(bio));
1156 BUG_ON(!bio_add_folio(bio, folio, folio_size(folio), 0));
1160 static void bchfs_read(struct btree_trans *trans,
1161 struct bch_read_bio *rbio,
1163 struct readpages_iter *readpages_iter)
1165 struct bch_fs *c = trans->c;
1166 struct btree_iter iter;
1168 int flags = BCH_READ_RETRY_IF_STALE|
1169 BCH_READ_MAY_PROMOTE;
1174 rbio->start_time = local_clock();
1175 rbio->subvol = inum.subvol;
1177 bch2_bkey_buf_init(&sk);
1179 bch2_trans_begin(trans);
1180 iter = (struct btree_iter) { NULL };
1182 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
1186 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1187 SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
1191 unsigned bytes, sectors, offset_into_extent;
1192 enum btree_id data_btree = BTREE_ID_extents;
1195 * read_extent -> io_time_reset may cause a transaction restart
1196 * without returning an error, we need to check for that here:
1198 ret = bch2_trans_relock(trans);
1202 bch2_btree_iter_set_pos(&iter,
1203 POS(inum.inum, rbio->bio.bi_iter.bi_sector));
1205 k = bch2_btree_iter_peek_slot(&iter);
1210 offset_into_extent = iter.pos.offset -
1211 bkey_start_offset(k.k);
1212 sectors = k.k->size - offset_into_extent;
1214 bch2_bkey_buf_reassemble(&sk, c, k);
1216 ret = bch2_read_indirect_extent(trans, &data_btree,
1217 &offset_into_extent, &sk);
1221 k = bkey_i_to_s_c(sk.k);
1223 sectors = min(sectors, k.k->size - offset_into_extent);
1226 readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
1227 extent_partial_reads_expensive(k));
1229 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
1230 swap(rbio->bio.bi_iter.bi_size, bytes);
1232 if (rbio->bio.bi_iter.bi_size == bytes)
1233 flags |= BCH_READ_LAST_FRAGMENT;
1235 bch2_bio_page_state_set(&rbio->bio, k);
1237 bch2_read_extent(trans, rbio, iter.pos,
1238 data_btree, k, offset_into_extent, flags);
1240 if (flags & BCH_READ_LAST_FRAGMENT)
1243 swap(rbio->bio.bi_iter.bi_size, bytes);
1244 bio_advance(&rbio->bio, bytes);
1246 ret = btree_trans_too_many_iters(trans);
1251 bch2_trans_iter_exit(trans, &iter);
1253 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1257 bch_err_inum_offset_ratelimited(c,
1259 iter.pos.offset << 9,
1260 "read error %i from btree lookup", ret);
1261 rbio->bio.bi_status = BLK_STS_IOERR;
1262 bio_endio(&rbio->bio);
1265 bch2_bkey_buf_exit(&sk, c);
1268 void bch2_readahead(struct readahead_control *ractl)
1270 struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
1271 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1272 struct bch_io_opts opts;
1273 struct btree_trans trans;
1274 struct folio *folio;
1275 struct readpages_iter readpages_iter;
1278 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
1280 ret = readpages_iter_init(&readpages_iter, ractl);
1283 bch2_trans_init(&trans, c, 0, 0);
1285 bch2_pagecache_add_get(inode);
1287 while ((folio = readpage_iter_peek(&readpages_iter))) {
1288 unsigned n = min_t(unsigned,
1289 readpages_iter.folios.nr -
1292 struct bch_read_bio *rbio =
1293 rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
1294 GFP_NOFS, &c->bio_read),
1297 readpage_iter_advance(&readpages_iter);
1299 rbio->bio.bi_iter.bi_sector = folio_sector(folio);
1300 rbio->bio.bi_end_io = bch2_readpages_end_io;
1301 BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
1303 bchfs_read(&trans, rbio, inode_inum(inode),
1307 bch2_pagecache_add_put(inode);
1309 bch2_trans_exit(&trans);
1310 darray_exit(&readpages_iter.folios);
1313 static void __bchfs_readfolio(struct bch_fs *c, struct bch_read_bio *rbio,
1314 subvol_inum inum, struct folio *folio)
1316 struct btree_trans trans;
1318 bch2_folio_create(folio, __GFP_NOFAIL);
1320 rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
1321 rbio->bio.bi_iter.bi_sector = folio_sector(folio);
1322 BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
1324 bch2_trans_init(&trans, c, 0, 0);
1325 bchfs_read(&trans, rbio, inum, NULL);
1326 bch2_trans_exit(&trans);
1329 static void bch2_read_single_folio_end_io(struct bio *bio)
1331 complete(bio->bi_private);
1334 static int bch2_read_single_folio(struct folio *folio,
1335 struct address_space *mapping)
1337 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1338 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1339 struct bch_read_bio *rbio;
1340 struct bch_io_opts opts;
1342 DECLARE_COMPLETION_ONSTACK(done);
1344 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
1346 rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read),
1348 rbio->bio.bi_private = &done;
1349 rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
1351 __bchfs_readfolio(c, rbio, inode_inum(inode), folio);
1352 wait_for_completion(&done);
1354 ret = blk_status_to_errno(rbio->bio.bi_status);
1355 bio_put(&rbio->bio);
1360 folio_mark_uptodate(folio);
1364 int bch2_read_folio(struct file *file, struct folio *folio)
1368 ret = bch2_read_single_folio(folio, folio->mapping);
1369 folio_unlock(folio);
1370 return bch2_err_class(ret);
1375 struct bch_writepage_state {
1376 struct bch_writepage_io *io;
1377 struct bch_io_opts opts;
1378 struct bch_folio_sector *tmp;
1379 unsigned tmp_sectors;
1382 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1383 struct bch_inode_info *inode)
1385 struct bch_writepage_state ret = { 0 };
1387 bch2_inode_opts_get(&ret.opts, c, &inode->ei_inode);
1391 static void bch2_writepage_io_done(struct bch_write_op *op)
1393 struct bch_writepage_io *io =
1394 container_of(op, struct bch_writepage_io, op);
1395 struct bch_fs *c = io->op.c;
1396 struct bio *bio = &io->op.wbio.bio;
1397 struct bvec_iter_all iter;
1398 struct folio_vec fv;
1402 set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
1404 bio_for_each_folio_all(fv, bio, iter) {
1405 struct bch_folio *s;
1407 folio_set_error(fv.fv_folio);
1408 mapping_set_error(fv.fv_folio->mapping, -EIO);
1410 s = __bch2_folio(fv.fv_folio);
1411 spin_lock(&s->lock);
1412 for (i = 0; i < folio_sectors(fv.fv_folio); i++)
1413 s->s[i].nr_replicas = 0;
1414 spin_unlock(&s->lock);
1418 if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
1419 bio_for_each_folio_all(fv, bio, iter) {
1420 struct bch_folio *s;
1422 s = __bch2_folio(fv.fv_folio);
1423 spin_lock(&s->lock);
1424 for (i = 0; i < folio_sectors(fv.fv_folio); i++)
1425 s->s[i].nr_replicas = 0;
1426 spin_unlock(&s->lock);
1431 * racing with fallocate can cause us to add fewer sectors than
1432 * expected - but we shouldn't add more sectors than expected:
1434 WARN_ON_ONCE(io->op.i_sectors_delta > 0);
1437 * (error (due to going RO) halfway through a page can screw that up
1440 BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
1444 * PageWriteback is effectively our ref on the inode - fixup i_blocks
1445 * before calling end_page_writeback:
1447 i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
1449 bio_for_each_folio_all(fv, bio, iter) {
1450 struct bch_folio *s = __bch2_folio(fv.fv_folio);
1452 if (atomic_dec_and_test(&s->write_count))
1453 folio_end_writeback(fv.fv_folio);
1456 bio_put(&io->op.wbio.bio);
1459 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1461 struct bch_writepage_io *io = w->io;
1464 closure_call(&io->op.cl, bch2_write, NULL, NULL);
1468 * Get a bch_writepage_io and add @page to it - appending to an existing one if
1469 * possible, else allocating a new one:
1471 static void bch2_writepage_io_alloc(struct bch_fs *c,
1472 struct writeback_control *wbc,
1473 struct bch_writepage_state *w,
1474 struct bch_inode_info *inode,
1476 unsigned nr_replicas)
1478 struct bch_write_op *op;
1480 w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
1483 &c->writepage_bioset),
1484 struct bch_writepage_io, op.wbio.bio);
1486 w->io->inode = inode;
1488 bch2_write_op_init(op, c, w->opts);
1489 op->target = w->opts.foreground_target;
1490 op->nr_replicas = nr_replicas;
1491 op->res.nr_replicas = nr_replicas;
1492 op->write_point = writepoint_hashed(inode->ei_last_dirtied);
1493 op->subvol = inode->ei_subvol;
1494 op->pos = POS(inode->v.i_ino, sector);
1495 op->end_io = bch2_writepage_io_done;
1496 op->devs_need_flush = &inode->ei_devs_need_flush;
1497 op->wbio.bio.bi_iter.bi_sector = sector;
1498 op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
1501 static int __bch2_writepage(struct page *_page,
1502 struct writeback_control *wbc,
1505 struct folio *folio = page_folio(_page);
1506 struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
1507 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1508 struct bch_writepage_state *w = data;
1509 struct bch_folio *s;
1510 unsigned i, offset, f_sectors, nr_replicas_this_write = U32_MAX;
1511 loff_t i_size = i_size_read(&inode->v);
1514 EBUG_ON(!folio_test_uptodate(folio));
1516 /* Is the folio fully inside i_size? */
1517 if (folio_end_pos(folio) <= i_size)
1520 /* Is the folio fully outside i_size? (truncate in progress) */
1521 if (folio_pos(folio) >= i_size) {
1522 folio_unlock(folio);
1527 * The folio straddles i_size. It must be zeroed out on each and every
1528 * writepage invocation because it may be mmapped. "A file is mapped
1529 * in multiples of the folio size. For a file that is not a multiple of
1530 * the folio size, the remaining memory is zeroed when mapped, and
1531 * writes to that region are not written out to the file."
1533 folio_zero_segment(folio,
1534 i_size - folio_pos(folio),
1537 f_sectors = folio_sectors(folio);
1538 s = bch2_folio_create(folio, __GFP_NOFAIL);
1540 if (f_sectors > w->tmp_sectors) {
1542 w->tmp = kzalloc(sizeof(struct bch_folio_sector) *
1543 f_sectors, __GFP_NOFAIL);
1544 w->tmp_sectors = f_sectors;
1548 * Things get really hairy with errors during writeback:
1550 ret = bch2_get_folio_disk_reservation(c, inode, folio, false);
1553 /* Before unlocking the page, get copy of reservations: */
1554 spin_lock(&s->lock);
1555 memcpy(w->tmp, s->s, sizeof(struct bch_folio_sector) * f_sectors);
1557 for (i = 0; i < f_sectors; i++) {
1558 if (s->s[i].state < SECTOR_dirty)
1561 nr_replicas_this_write =
1562 min_t(unsigned, nr_replicas_this_write,
1563 s->s[i].nr_replicas +
1564 s->s[i].replicas_reserved);
1567 for (i = 0; i < f_sectors; i++) {
1568 if (s->s[i].state < SECTOR_dirty)
1571 s->s[i].nr_replicas = w->opts.compression
1572 ? 0 : nr_replicas_this_write;
1574 s->s[i].replicas_reserved = 0;
1575 folio_sector_set(folio, s, i, SECTOR_allocated);
1577 spin_unlock(&s->lock);
1579 BUG_ON(atomic_read(&s->write_count));
1580 atomic_set(&s->write_count, 1);
1582 BUG_ON(folio_test_writeback(folio));
1583 folio_start_writeback(folio);
1585 folio_unlock(folio);
1589 unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
1592 while (offset < f_sectors &&
1593 w->tmp[offset].state < SECTOR_dirty)
1596 if (offset == f_sectors)
1599 while (offset + sectors < f_sectors &&
1600 w->tmp[offset + sectors].state >= SECTOR_dirty) {
1601 reserved_sectors += w->tmp[offset + sectors].replicas_reserved;
1602 dirty_sectors += w->tmp[offset + sectors].state == SECTOR_dirty;
1607 sector = folio_sector(folio) + offset;
1610 (w->io->op.res.nr_replicas != nr_replicas_this_write ||
1611 bio_full(&w->io->op.wbio.bio, sectors << 9) ||
1612 w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
1613 (BIO_MAX_VECS * PAGE_SIZE) ||
1614 bio_end_sector(&w->io->op.wbio.bio) != sector))
1615 bch2_writepage_do_io(w);
1618 bch2_writepage_io_alloc(c, wbc, w, inode, sector,
1619 nr_replicas_this_write);
1621 atomic_inc(&s->write_count);
1623 BUG_ON(inode != w->io->inode);
1624 BUG_ON(!bio_add_folio(&w->io->op.wbio.bio, folio,
1625 sectors << 9, offset << 9));
1627 /* Check for writing past i_size: */
1628 WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
1629 round_up(i_size, block_bytes(c)) &&
1630 !test_bit(BCH_FS_EMERGENCY_RO, &c->flags),
1631 "writing past i_size: %llu > %llu (unrounded %llu)\n",
1632 bio_end_sector(&w->io->op.wbio.bio) << 9,
1633 round_up(i_size, block_bytes(c)),
1636 w->io->op.res.sectors += reserved_sectors;
1637 w->io->op.i_sectors_delta -= dirty_sectors;
1638 w->io->op.new_i_size = i_size;
1643 if (atomic_dec_and_test(&s->write_count))
1644 folio_end_writeback(folio);
1649 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1651 struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1652 struct bch_writepage_state w =
1653 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1654 struct blk_plug plug;
1657 blk_start_plug(&plug);
1658 ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1660 bch2_writepage_do_io(&w);
1661 blk_finish_plug(&plug);
1663 return bch2_err_class(ret);
1666 /* buffered writes: */
1668 int bch2_write_begin(struct file *file, struct address_space *mapping,
1669 loff_t pos, unsigned len,
1670 struct page **pagep, void **fsdata)
1672 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1673 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1674 struct bch2_folio_reservation *res;
1675 struct folio *folio;
1679 res = kmalloc(sizeof(*res), GFP_KERNEL);
1683 bch2_folio_reservation_init(c, inode, res);
1686 bch2_pagecache_add_get(inode);
1688 folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
1689 FGP_LOCK|FGP_WRITE|FGP_CREAT|FGP_STABLE,
1690 mapping_gfp_mask(mapping));
1694 if (folio_test_uptodate(folio))
1697 offset = pos - folio_pos(folio);
1698 len = min_t(size_t, len, folio_end_pos(folio) - pos);
1700 /* If we're writing entire folio, don't need to read it in first: */
1701 if (!offset && len == folio_size(folio))
1704 if (!offset && pos + len >= inode->v.i_size) {
1705 folio_zero_segment(folio, len, folio_size(folio));
1706 flush_dcache_folio(folio);
1710 if (folio_pos(folio) >= inode->v.i_size) {
1711 folio_zero_segments(folio, 0, offset, offset + len, folio_size(folio));
1712 flush_dcache_folio(folio);
1716 ret = bch2_read_single_folio(folio, mapping);
1720 if (!bch2_folio_create(folio, __GFP_NOFAIL)->uptodate) {
1721 ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
1726 ret = bch2_folio_reservation_get(c, inode, folio, res, offset, len);
1728 if (!folio_test_uptodate(folio)) {
1730 * If the folio hasn't been read in, we won't know if we
1731 * actually need a reservation - we don't actually need
1732 * to read here, we just need to check if the folio is
1733 * fully backed by uncompressed data:
1741 *pagep = &folio->page;
1744 folio_unlock(folio);
1748 bch2_pagecache_add_put(inode);
1751 return bch2_err_class(ret);
1754 int bch2_write_end(struct file *file, struct address_space *mapping,
1755 loff_t pos, unsigned len, unsigned copied,
1756 struct page *page, void *fsdata)
1758 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1759 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1760 struct bch2_folio_reservation *res = fsdata;
1761 struct folio *folio = page_folio(page);
1762 unsigned offset = pos - folio_pos(folio);
1764 lockdep_assert_held(&inode->v.i_rwsem);
1765 BUG_ON(offset + copied > folio_size(folio));
1767 if (unlikely(copied < len && !folio_test_uptodate(folio))) {
1769 * The folio needs to be read in, but that would destroy
1770 * our partial write - simplest thing is to just force
1771 * userspace to redo the write:
1773 folio_zero_range(folio, 0, folio_size(folio));
1774 flush_dcache_folio(folio);
1778 spin_lock(&inode->v.i_lock);
1779 if (pos + copied > inode->v.i_size)
1780 i_size_write(&inode->v, pos + copied);
1781 spin_unlock(&inode->v.i_lock);
1784 if (!folio_test_uptodate(folio))
1785 folio_mark_uptodate(folio);
1787 bch2_set_folio_dirty(c, inode, folio, res, offset, copied);
1789 inode->ei_last_dirtied = (unsigned long) current;
1792 folio_unlock(folio);
1794 bch2_pagecache_add_put(inode);
1796 bch2_folio_reservation_put(c, inode, res);
1802 static noinline void folios_trunc(folios *folios, struct folio **fi)
1804 while (folios->data + folios->nr > fi) {
1805 struct folio *f = darray_pop(folios);
1812 static int __bch2_buffered_write(struct bch_inode_info *inode,
1813 struct address_space *mapping,
1814 struct iov_iter *iter,
1815 loff_t pos, unsigned len)
1817 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1818 struct bch2_folio_reservation res;
1820 struct folio **fi, *f;
1821 unsigned copied = 0, f_offset;
1822 loff_t end = pos + len, f_pos;
1823 loff_t last_folio_pos = inode->v.i_size;
1828 bch2_folio_reservation_init(c, inode, &res);
1829 darray_init(&folios);
1831 ret = filemap_get_contig_folios_d(mapping, pos, end,
1832 FGP_LOCK|FGP_WRITE|FGP_STABLE|FGP_CREAT,
1833 mapping_gfp_mask(mapping),
1840 f = darray_first(folios);
1841 if (pos != folio_pos(f) && !folio_test_uptodate(f)) {
1842 ret = bch2_read_single_folio(f, mapping);
1847 f = darray_last(folios);
1848 end = min(end, folio_end_pos(f));
1849 last_folio_pos = folio_pos(f);
1850 if (end != folio_end_pos(f) && !folio_test_uptodate(f)) {
1851 if (end >= inode->v.i_size) {
1852 folio_zero_range(f, 0, folio_size(f));
1854 ret = bch2_read_single_folio(f, mapping);
1861 f_offset = pos - folio_pos(darray_first(folios));
1862 darray_for_each(folios, fi) {
1863 struct folio *f = *fi;
1864 unsigned f_len = min(end, folio_end_pos(f)) - f_pos;
1866 if (!bch2_folio_create(f, __GFP_NOFAIL)->uptodate) {
1867 ret = bch2_folio_set(c, inode_inum(inode), fi,
1868 folios.data + folios.nr - fi);
1874 * XXX: per POSIX and fstests generic/275, on -ENOSPC we're
1875 * supposed to write as much as we have disk space for.
1877 * On failure here we should still write out a partial page if
1878 * we aren't completely out of disk space - we don't do that
1881 ret = bch2_folio_reservation_get(c, inode, f, &res, f_offset, f_len);
1882 if (unlikely(ret)) {
1883 folios_trunc(&folios, fi);
1887 end = min(end, folio_end_pos(darray_last(folios)));
1891 f_pos = folio_end_pos(f);
1895 if (mapping_writably_mapped(mapping))
1896 darray_for_each(folios, fi)
1897 flush_dcache_folio(*fi);
1900 f_offset = pos - folio_pos(darray_first(folios));
1901 darray_for_each(folios, fi) {
1902 struct folio *f = *fi;
1903 unsigned f_len = min(end, folio_end_pos(f)) - f_pos;
1904 unsigned f_copied = copy_folio_from_iter_atomic(f, f_offset, f_len, iter);
1907 folios_trunc(&folios, fi);
1911 if (!folio_test_uptodate(f) &&
1912 f_copied != folio_size(f) &&
1913 pos + copied + f_copied < inode->v.i_size) {
1914 folio_zero_range(f, 0, folio_size(f));
1915 folios_trunc(&folios, fi);
1919 flush_dcache_folio(f);
1922 if (f_copied != f_len) {
1923 folios_trunc(&folios, fi + 1);
1927 f_pos = folio_end_pos(f);
1936 spin_lock(&inode->v.i_lock);
1937 if (end > inode->v.i_size)
1938 i_size_write(&inode->v, end);
1939 spin_unlock(&inode->v.i_lock);
1942 f_offset = pos - folio_pos(darray_first(folios));
1943 darray_for_each(folios, fi) {
1944 struct folio *f = *fi;
1945 unsigned f_len = min(end, folio_end_pos(f)) - f_pos;
1947 if (!folio_test_uptodate(f))
1948 folio_mark_uptodate(f);
1950 bch2_set_folio_dirty(c, inode, f, &res, f_offset, f_len);
1952 f_pos = folio_end_pos(f);
1956 inode->ei_last_dirtied = (unsigned long) current;
1958 darray_for_each(folios, fi) {
1964 * If the last folio added to the mapping starts beyond current EOF, we
1965 * performed a short write but left around at least one post-EOF folio.
1966 * Clean up the mapping before we return.
1968 if (last_folio_pos >= inode->v.i_size)
1969 truncate_pagecache(&inode->v, inode->v.i_size);
1971 darray_exit(&folios);
1972 bch2_folio_reservation_put(c, inode, &res);
1974 return copied ?: ret;
1977 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1979 struct file *file = iocb->ki_filp;
1980 struct address_space *mapping = file->f_mapping;
1981 struct bch_inode_info *inode = file_bch_inode(file);
1982 loff_t pos = iocb->ki_pos;
1983 ssize_t written = 0;
1986 bch2_pagecache_add_get(inode);
1989 unsigned offset = pos & (PAGE_SIZE - 1);
1990 unsigned bytes = iov_iter_count(iter);
1993 * Bring in the user page that we will copy from _first_.
1994 * Otherwise there's a nasty deadlock on copying from the
1995 * same page as we're writing to, without it being marked
1998 * Not only is this an optimisation, but it is also required
1999 * to check that the address is actually valid, when atomic
2000 * usercopies are used, below.
2002 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
2003 bytes = min_t(unsigned long, iov_iter_count(iter),
2004 PAGE_SIZE - offset);
2006 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
2012 if (unlikely(fatal_signal_pending(current))) {
2017 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
2018 if (unlikely(ret < 0))
2023 if (unlikely(ret == 0)) {
2025 * If we were unable to copy any data at all, we must
2026 * fall back to a single segment length write.
2028 * If we didn't fallback here, we could livelock
2029 * because not all segments in the iov can be copied at
2030 * once without a pagefault.
2032 bytes = min_t(unsigned long, PAGE_SIZE - offset,
2033 iov_iter_single_seg_count(iter));
2040 balance_dirty_pages_ratelimited(mapping);
2041 } while (iov_iter_count(iter));
2043 bch2_pagecache_add_put(inode);
2045 return written ? written : ret;
2048 /* O_DIRECT reads */
2050 static void bio_check_or_release(struct bio *bio, bool check_dirty)
2053 bio_check_pages_dirty(bio);
2055 bio_release_pages(bio, false);
2060 static void bch2_dio_read_complete(struct closure *cl)
2062 struct dio_read *dio = container_of(cl, struct dio_read, cl);
2064 dio->req->ki_complete(dio->req, dio->ret);
2065 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
2068 static void bch2_direct_IO_read_endio(struct bio *bio)
2070 struct dio_read *dio = bio->bi_private;
2073 dio->ret = blk_status_to_errno(bio->bi_status);
2075 closure_put(&dio->cl);
2078 static void bch2_direct_IO_read_split_endio(struct bio *bio)
2080 struct dio_read *dio = bio->bi_private;
2081 bool should_dirty = dio->should_dirty;
2083 bch2_direct_IO_read_endio(bio);
2084 bio_check_or_release(bio, should_dirty);
2087 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
2089 struct file *file = req->ki_filp;
2090 struct bch_inode_info *inode = file_bch_inode(file);
2091 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2092 struct bch_io_opts opts;
2093 struct dio_read *dio;
2095 loff_t offset = req->ki_pos;
2096 bool sync = is_sync_kiocb(req);
2100 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
2102 if ((offset|iter->count) & (block_bytes(c) - 1))
2105 ret = min_t(loff_t, iter->count,
2106 max_t(loff_t, 0, i_size_read(&inode->v) - offset));
2111 shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
2112 iter->count -= shorten;
2114 bio = bio_alloc_bioset(NULL,
2115 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2118 &c->dio_read_bioset);
2120 bio->bi_end_io = bch2_direct_IO_read_endio;
2122 dio = container_of(bio, struct dio_read, rbio.bio);
2123 closure_init(&dio->cl, NULL);
2126 * this is a _really_ horrible hack just to avoid an atomic sub at the
2130 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
2131 atomic_set(&dio->cl.remaining,
2132 CLOSURE_REMAINING_INITIALIZER -
2134 CLOSURE_DESTRUCTOR);
2136 atomic_set(&dio->cl.remaining,
2137 CLOSURE_REMAINING_INITIALIZER + 1);
2143 * This is one of the sketchier things I've encountered: we have to skip
2144 * the dirtying of requests that are internal from the kernel (i.e. from
2145 * loopback), because we'll deadlock on page_lock.
2147 dio->should_dirty = iter_is_iovec(iter);
2150 while (iter->count) {
2151 bio = bio_alloc_bioset(NULL,
2152 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2156 bio->bi_end_io = bch2_direct_IO_read_split_endio;
2158 bio->bi_opf = REQ_OP_READ|REQ_SYNC;
2159 bio->bi_iter.bi_sector = offset >> 9;
2160 bio->bi_private = dio;
2162 ret = bio_iov_iter_get_pages(bio, iter);
2164 /* XXX: fault inject this path */
2165 bio->bi_status = BLK_STS_RESOURCE;
2170 offset += bio->bi_iter.bi_size;
2172 if (dio->should_dirty)
2173 bio_set_pages_dirty(bio);
2176 closure_get(&dio->cl);
2178 bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
2181 iter->count += shorten;
2184 closure_sync(&dio->cl);
2185 closure_debug_destroy(&dio->cl);
2187 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
2190 return -EIOCBQUEUED;
2194 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2196 struct file *file = iocb->ki_filp;
2197 struct bch_inode_info *inode = file_bch_inode(file);
2198 struct address_space *mapping = file->f_mapping;
2199 size_t count = iov_iter_count(iter);
2203 return 0; /* skip atime */
2205 if (iocb->ki_flags & IOCB_DIRECT) {
2206 struct blk_plug plug;
2208 if (unlikely(mapping->nrpages)) {
2209 ret = filemap_write_and_wait_range(mapping,
2211 iocb->ki_pos + count - 1);
2216 file_accessed(file);
2218 blk_start_plug(&plug);
2219 ret = bch2_direct_IO_read(iocb, iter);
2220 blk_finish_plug(&plug);
2223 iocb->ki_pos += ret;
2225 bch2_pagecache_add_get(inode);
2226 ret = generic_file_read_iter(iocb, iter);
2227 bch2_pagecache_add_put(inode);
2230 return bch2_err_class(ret);
2233 /* O_DIRECT writes */
2235 static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
2236 u64 offset, u64 size,
2237 unsigned nr_replicas, bool compressed)
2239 struct btree_trans trans;
2240 struct btree_iter iter;
2242 u64 end = offset + size;
2247 bch2_trans_init(&trans, c, 0, 0);
2249 bch2_trans_begin(&trans);
2251 err = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
2255 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
2256 SPOS(inum.inum, offset, snapshot),
2257 BTREE_ITER_SLOTS, k, err) {
2258 if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
2261 if (k.k->p.snapshot != snapshot ||
2262 nr_replicas > bch2_bkey_replicas(c, k) ||
2263 (!compressed && bch2_bkey_sectors_compressed(k))) {
2269 offset = iter.pos.offset;
2270 bch2_trans_iter_exit(&trans, &iter);
2272 if (bch2_err_matches(err, BCH_ERR_transaction_restart))
2274 bch2_trans_exit(&trans);
2276 return err ? false : ret;
2279 static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
2281 struct bch_fs *c = dio->op.c;
2282 struct bch_inode_info *inode = dio->inode;
2283 struct bio *bio = &dio->op.wbio.bio;
2285 return bch2_check_range_allocated(c, inode_inum(inode),
2286 dio->op.pos.offset, bio_sectors(bio),
2287 dio->op.opts.data_replicas,
2288 dio->op.opts.compression != 0);
2291 static void bch2_dio_write_loop_async(struct bch_write_op *);
2292 static __always_inline long bch2_dio_write_done(struct dio_write *dio);
2294 static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
2296 struct iovec *iov = dio->inline_vecs;
2298 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
2299 iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
2304 dio->free_iov = true;
2307 memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
2308 dio->iter.iov = iov;
2312 static void bch2_dio_write_flush_done(struct closure *cl)
2314 struct dio_write *dio = container_of(cl, struct dio_write, op.cl);
2315 struct bch_fs *c = dio->op.c;
2317 closure_debug_destroy(cl);
2319 dio->op.error = bch2_journal_error(&c->journal);
2321 bch2_dio_write_done(dio);
2324 static noinline void bch2_dio_write_flush(struct dio_write *dio)
2326 struct bch_fs *c = dio->op.c;
2327 struct bch_inode_unpacked inode;
2332 closure_init(&dio->op.cl, NULL);
2334 if (!dio->op.error) {
2335 ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
2337 dio->op.error = ret;
2339 bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq, &dio->op.cl);
2340 bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
2345 closure_sync(&dio->op.cl);
2346 closure_debug_destroy(&dio->op.cl);
2348 continue_at(&dio->op.cl, bch2_dio_write_flush_done, NULL);
2352 static __always_inline long bch2_dio_write_done(struct dio_write *dio)
2354 struct kiocb *req = dio->req;
2355 struct bch_inode_info *inode = dio->inode;
2356 bool sync = dio->sync;
2359 if (unlikely(dio->flush)) {
2360 bch2_dio_write_flush(dio);
2362 return -EIOCBQUEUED;
2365 bch2_pagecache_block_put(inode);
2368 kfree(dio->iter.iov);
2370 ret = dio->op.error ?: ((long) dio->written << 9);
2371 bio_put(&dio->op.wbio.bio);
2373 /* inode->i_dio_count is our ref on inode and thus bch_fs */
2374 inode_dio_end(&inode->v);
2377 ret = bch2_err_class(ret);
2380 req->ki_complete(req, ret);
2386 static __always_inline void bch2_dio_write_end(struct dio_write *dio)
2388 struct bch_fs *c = dio->op.c;
2389 struct kiocb *req = dio->req;
2390 struct bch_inode_info *inode = dio->inode;
2391 struct bio *bio = &dio->op.wbio.bio;
2393 req->ki_pos += (u64) dio->op.written << 9;
2394 dio->written += dio->op.written;
2396 if (dio->extending) {
2397 spin_lock(&inode->v.i_lock);
2398 if (req->ki_pos > inode->v.i_size)
2399 i_size_write(&inode->v, req->ki_pos);
2400 spin_unlock(&inode->v.i_lock);
2403 if (dio->op.i_sectors_delta || dio->quota_res.sectors) {
2404 mutex_lock(&inode->ei_quota_lock);
2405 __i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta);
2406 __bch2_quota_reservation_put(c, inode, &dio->quota_res);
2407 mutex_unlock(&inode->ei_quota_lock);
2410 if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF))) {
2411 struct bvec_iter_all iter;
2412 struct folio_vec fv;
2414 bio_for_each_folio_all(fv, bio, iter)
2415 folio_put(fv.fv_folio);
2418 if (unlikely(dio->op.error))
2419 set_bit(EI_INODE_ERROR, &inode->ei_flags);
2422 static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
2424 struct bch_fs *c = dio->op.c;
2425 struct kiocb *req = dio->req;
2426 struct address_space *mapping = dio->mapping;
2427 struct bch_inode_info *inode = dio->inode;
2428 struct bch_io_opts opts;
2429 struct bio *bio = &dio->op.wbio.bio;
2430 unsigned unaligned, iter_count;
2431 bool sync = dio->sync, dropped_locks;
2434 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
2437 iter_count = dio->iter.count;
2439 EBUG_ON(current->faults_disabled_mapping);
2440 current->faults_disabled_mapping = mapping;
2442 ret = bio_iov_iter_get_pages(bio, &dio->iter);
2444 dropped_locks = fdm_dropped_locks();
2446 current->faults_disabled_mapping = NULL;
2449 * If the fault handler returned an error but also signalled
2450 * that it dropped & retook ei_pagecache_lock, we just need to
2451 * re-shoot down the page cache and retry:
2453 if (dropped_locks && ret)
2456 if (unlikely(ret < 0))
2459 if (unlikely(dropped_locks)) {
2460 ret = write_invalidate_inode_pages_range(mapping,
2462 req->ki_pos + iter_count - 1);
2466 if (!bio->bi_iter.bi_size)
2470 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
2471 bio->bi_iter.bi_size -= unaligned;
2472 iov_iter_revert(&dio->iter, unaligned);
2474 if (!bio->bi_iter.bi_size) {
2476 * bio_iov_iter_get_pages was only able to get <
2477 * blocksize worth of pages:
2483 bch2_write_op_init(&dio->op, c, opts);
2484 dio->op.end_io = sync
2486 : bch2_dio_write_loop_async;
2487 dio->op.target = dio->op.opts.foreground_target;
2488 dio->op.write_point = writepoint_hashed((unsigned long) current);
2489 dio->op.nr_replicas = dio->op.opts.data_replicas;
2490 dio->op.subvol = inode->ei_subvol;
2491 dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
2492 dio->op.devs_need_flush = &inode->ei_devs_need_flush;
2495 dio->op.flags |= BCH_WRITE_SYNC;
2496 dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
2498 ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
2499 bio_sectors(bio), true);
2503 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
2504 dio->op.opts.data_replicas, 0);
2505 if (unlikely(ret) &&
2506 !bch2_dio_write_check_allocated(dio))
2509 task_io_account_write(bio->bi_iter.bi_size);
2511 if (unlikely(dio->iter.count) &&
2514 bch2_dio_write_copy_iov(dio))
2515 dio->sync = sync = true;
2518 closure_call(&dio->op.cl, bch2_write, NULL, NULL);
2521 return -EIOCBQUEUED;
2523 bch2_dio_write_end(dio);
2525 if (likely(!dio->iter.count) || dio->op.error)
2528 bio_reset(bio, NULL, REQ_OP_WRITE);
2531 return bch2_dio_write_done(dio);
2533 dio->op.error = ret;
2535 if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
2536 struct bvec_iter_all iter;
2537 struct folio_vec fv;
2539 bio_for_each_folio_all(fv, bio, iter)
2540 folio_put(fv.fv_folio);
2543 bch2_quota_reservation_put(c, inode, &dio->quota_res);
2547 static noinline __cold void bch2_dio_write_continue(struct dio_write *dio)
2549 struct mm_struct *mm = dio->mm;
2551 bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE);
2555 bch2_dio_write_loop(dio);
2557 kthread_unuse_mm(mm);
2560 static void bch2_dio_write_loop_async(struct bch_write_op *op)
2562 struct dio_write *dio = container_of(op, struct dio_write, op);
2564 bch2_dio_write_end(dio);
2566 if (likely(!dio->iter.count) || dio->op.error)
2567 bch2_dio_write_done(dio);
2569 bch2_dio_write_continue(dio);
2573 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
2575 struct file *file = req->ki_filp;
2576 struct address_space *mapping = file->f_mapping;
2577 struct bch_inode_info *inode = file_bch_inode(file);
2578 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2579 struct dio_write *dio;
2581 bool locked = true, extending;
2585 prefetch((void *) &c->opts + 64);
2586 prefetch(&inode->ei_inode);
2587 prefetch((void *) &inode->ei_inode + 64);
2589 inode_lock(&inode->v);
2591 ret = generic_write_checks(req, iter);
2592 if (unlikely(ret <= 0))
2595 ret = file_remove_privs(file);
2599 ret = file_update_time(file);
2603 if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
2606 inode_dio_begin(&inode->v);
2607 bch2_pagecache_block_get(inode);
2609 extending = req->ki_pos + iter->count > inode->v.i_size;
2611 inode_unlock(&inode->v);
2615 bio = bio_alloc_bioset(NULL,
2616 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2619 &c->dio_write_bioset);
2620 dio = container_of(bio, struct dio_write, op.wbio.bio);
2622 dio->mapping = mapping;
2624 dio->mm = current->mm;
2626 dio->extending = extending;
2627 dio->sync = is_sync_kiocb(req) || extending;
2628 dio->flush = iocb_is_dsync(req) && !c->opts.journal_flush_disabled;
2629 dio->free_iov = false;
2630 dio->quota_res.sectors = 0;
2635 if (unlikely(mapping->nrpages)) {
2636 ret = write_invalidate_inode_pages_range(mapping,
2638 req->ki_pos + iter->count - 1);
2643 ret = bch2_dio_write_loop(dio);
2646 inode_unlock(&inode->v);
2649 bch2_pagecache_block_put(inode);
2651 inode_dio_end(&inode->v);
2655 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2657 struct file *file = iocb->ki_filp;
2658 struct bch_inode_info *inode = file_bch_inode(file);
2661 if (iocb->ki_flags & IOCB_DIRECT) {
2662 ret = bch2_direct_write(iocb, from);
2666 /* We can write back this queue in page reclaim */
2667 current->backing_dev_info = inode_to_bdi(&inode->v);
2668 inode_lock(&inode->v);
2670 ret = generic_write_checks(iocb, from);
2674 ret = file_remove_privs(file);
2678 ret = file_update_time(file);
2682 ret = bch2_buffered_write(iocb, from);
2683 if (likely(ret > 0))
2684 iocb->ki_pos += ret;
2686 inode_unlock(&inode->v);
2687 current->backing_dev_info = NULL;
2690 ret = generic_write_sync(iocb, ret);
2692 return bch2_err_class(ret);
2698 * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
2699 * insert trigger: look up the btree inode instead
2701 static int bch2_flush_inode(struct bch_fs *c,
2702 struct bch_inode_info *inode)
2704 struct bch_inode_unpacked u;
2707 if (c->opts.journal_flush_disabled)
2710 ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u);
2714 return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
2715 bch2_inode_flush_nocow_writes(c, inode);
2718 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2720 struct bch_inode_info *inode = file_bch_inode(file);
2721 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2722 int ret, ret2, ret3;
2724 ret = file_write_and_wait_range(file, start, end);
2725 ret2 = sync_inode_metadata(&inode->v, 1);
2726 ret3 = bch2_flush_inode(c, inode);
2728 return bch2_err_class(ret ?: ret2 ?: ret3);
2733 static inline int range_has_data(struct bch_fs *c, u32 subvol,
2737 struct btree_trans trans;
2738 struct btree_iter iter;
2742 bch2_trans_init(&trans, c, 0, 0);
2744 bch2_trans_begin(&trans);
2746 ret = bch2_subvolume_get_snapshot(&trans, subvol, &start.snapshot);
2750 for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents, start, end, 0, k, ret)
2751 if (bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k)) {
2756 bch2_trans_iter_exit(&trans, &iter);
2758 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
2761 bch2_trans_exit(&trans);
2765 static int __bch2_truncate_folio(struct bch_inode_info *inode,
2766 pgoff_t index, loff_t start, loff_t end)
2768 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2769 struct address_space *mapping = inode->v.i_mapping;
2770 struct bch_folio *s;
2771 unsigned start_offset = start & (PAGE_SIZE - 1);
2772 unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2774 struct folio *folio;
2775 s64 i_sectors_delta = 0;
2779 folio = filemap_lock_folio(mapping, index);
2782 * XXX: we're doing two index lookups when we end up reading the
2785 ret = range_has_data(c, inode->ei_subvol,
2786 POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT)),
2787 POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT) + PAGE_SECTORS));
2791 folio = __filemap_get_folio(mapping, index,
2792 FGP_LOCK|FGP_CREAT, GFP_KERNEL);
2793 if (unlikely(!folio)) {
2799 BUG_ON(start >= folio_end_pos(folio));
2800 BUG_ON(end <= folio_pos(folio));
2802 start_offset = max(start, folio_pos(folio)) - folio_pos(folio);
2803 end_offset = min(end, folio_end_pos(folio)) - folio_pos(folio);
2805 /* Folio boundary? Nothing to do */
2806 if (start_offset == 0 &&
2807 end_offset == folio_size(folio)) {
2812 s = bch2_folio_create(folio, 0);
2818 if (!folio_test_uptodate(folio)) {
2819 ret = bch2_read_single_folio(folio, mapping);
2825 ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
2830 for (i = round_up(start_offset, block_bytes(c)) >> 9;
2831 i < round_down(end_offset, block_bytes(c)) >> 9;
2833 s->s[i].nr_replicas = 0;
2835 i_sectors_delta -= s->s[i].state == SECTOR_dirty;
2836 folio_sector_set(folio, s, i, SECTOR_unallocated);
2839 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2842 * Caller needs to know whether this folio will be written out by
2843 * writeback - doing an i_size update if necessary - or whether it will
2844 * be responsible for the i_size update.
2846 * Note that we shouldn't ever see a folio beyond EOF, but check and
2847 * warn if so. This has been observed by failure to clean up folios
2848 * after a short write and there's still a chance reclaim will fix
2851 WARN_ON_ONCE(folio_pos(folio) >= inode->v.i_size);
2852 end_pos = folio_end_pos(folio);
2853 if (inode->v.i_size > folio_pos(folio))
2854 end_pos = min(inode->v.i_size, end_pos);
2855 ret = s->s[(end_pos - folio_pos(folio) - 1) >> 9].state >= SECTOR_dirty;
2857 folio_zero_segment(folio, start_offset, end_offset);
2860 * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2862 * XXX: because we aren't currently tracking whether the folio has actual
2863 * data in it (vs. just 0s, or only partially written) this wrong. ick.
2865 BUG_ON(bch2_get_folio_disk_reservation(c, inode, folio, false));
2868 * This removes any writeable userspace mappings; we need to force
2869 * .page_mkwrite to be called again before any mmapped writes, to
2870 * redirty the full page:
2872 folio_mkclean(folio);
2873 filemap_dirty_folio(mapping, folio);
2875 folio_unlock(folio);
2881 static int bch2_truncate_folio(struct bch_inode_info *inode, loff_t from)
2883 return __bch2_truncate_folio(inode, from >> PAGE_SHIFT,
2884 from, ANYSINT_MAX(loff_t));
2887 static int bch2_truncate_folios(struct bch_inode_info *inode,
2888 loff_t start, loff_t end)
2890 int ret = __bch2_truncate_folio(inode, start >> PAGE_SHIFT,
2894 start >> PAGE_SHIFT != end >> PAGE_SHIFT)
2895 ret = __bch2_truncate_folio(inode,
2896 (end - 1) >> PAGE_SHIFT,
2901 static int bch2_extend(struct user_namespace *mnt_userns,
2902 struct bch_inode_info *inode,
2903 struct bch_inode_unpacked *inode_u,
2904 struct iattr *iattr)
2906 struct address_space *mapping = inode->v.i_mapping;
2912 * this has to be done _before_ extending i_size:
2914 ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
2918 truncate_setsize(&inode->v, iattr->ia_size);
2920 return bch2_setattr_nonsize(mnt_userns, inode, iattr);
2923 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2924 struct bch_inode_unpacked *bi,
2927 bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2931 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2932 struct bch_inode_unpacked *bi, void *p)
2934 u64 *new_i_size = p;
2936 bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2937 bi->bi_size = *new_i_size;
2941 int bch2_truncate(struct user_namespace *mnt_userns,
2942 struct bch_inode_info *inode, struct iattr *iattr)
2944 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2945 struct address_space *mapping = inode->v.i_mapping;
2946 struct bch_inode_unpacked inode_u;
2947 u64 new_i_size = iattr->ia_size;
2948 s64 i_sectors_delta = 0;
2952 * If the truncate call with change the size of the file, the
2953 * cmtimes should be updated. If the size will not change, we
2954 * do not need to update the cmtimes.
2956 if (iattr->ia_size != inode->v.i_size) {
2957 if (!(iattr->ia_valid & ATTR_MTIME))
2958 ktime_get_coarse_real_ts64(&iattr->ia_mtime);
2959 if (!(iattr->ia_valid & ATTR_CTIME))
2960 ktime_get_coarse_real_ts64(&iattr->ia_ctime);
2961 iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
2964 inode_dio_wait(&inode->v);
2965 bch2_pagecache_block_get(inode);
2967 ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
2972 * check this before next assertion; on filesystem error our normal
2973 * invariants are a bit broken (truncate has to truncate the page cache
2974 * before the inode).
2976 ret = bch2_journal_error(&c->journal);
2980 WARN_ONCE(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
2981 inode->v.i_size < inode_u.bi_size,
2982 "truncate spotted in mem i_size < btree i_size: %llu < %llu\n",
2983 (u64) inode->v.i_size, inode_u.bi_size);
2985 if (iattr->ia_size > inode->v.i_size) {
2986 ret = bch2_extend(mnt_userns, inode, &inode_u, iattr);
2990 iattr->ia_valid &= ~ATTR_SIZE;
2992 ret = bch2_truncate_folio(inode, iattr->ia_size);
2993 if (unlikely(ret < 0))
2997 * When extending, we're going to write the new i_size to disk
2998 * immediately so we need to flush anything above the current on disk
3001 * Also, when extending we need to flush the page that i_size currently
3002 * straddles - if it's mapped to userspace, we need to ensure that
3003 * userspace has to redirty it and call .mkwrite -> set_page_dirty
3004 * again to allocate the part of the page that was extended.
3006 if (iattr->ia_size > inode_u.bi_size)
3007 ret = filemap_write_and_wait_range(mapping,
3009 iattr->ia_size - 1);
3010 else if (iattr->ia_size & (PAGE_SIZE - 1))
3011 ret = filemap_write_and_wait_range(mapping,
3012 round_down(iattr->ia_size, PAGE_SIZE),
3013 iattr->ia_size - 1);
3017 mutex_lock(&inode->ei_update_lock);
3018 ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
3020 mutex_unlock(&inode->ei_update_lock);
3025 truncate_setsize(&inode->v, iattr->ia_size);
3027 ret = bch2_fpunch(c, inode_inum(inode),
3028 round_up(iattr->ia_size, block_bytes(c)) >> 9,
3029 U64_MAX, &i_sectors_delta);
3030 i_sectors_acct(c, inode, NULL, i_sectors_delta);
3032 bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks &&
3033 !bch2_journal_error(&c->journal), c,
3034 "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
3035 inode->v.i_ino, (u64) inode->v.i_blocks,
3036 inode->ei_inode.bi_sectors);
3040 mutex_lock(&inode->ei_update_lock);
3041 ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
3042 mutex_unlock(&inode->ei_update_lock);
3044 ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
3046 bch2_pagecache_block_put(inode);
3047 return bch2_err_class(ret);
3052 static int inode_update_times_fn(struct bch_inode_info *inode,
3053 struct bch_inode_unpacked *bi, void *p)
3055 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3057 bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
3061 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
3063 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3064 u64 end = offset + len;
3065 u64 block_start = round_up(offset, block_bytes(c));
3066 u64 block_end = round_down(end, block_bytes(c));
3067 bool truncated_last_page;
3070 ret = bch2_truncate_folios(inode, offset, end);
3071 if (unlikely(ret < 0))
3074 truncated_last_page = ret;
3076 truncate_pagecache_range(&inode->v, offset, end - 1);
3078 if (block_start < block_end) {
3079 s64 i_sectors_delta = 0;
3081 ret = bch2_fpunch(c, inode_inum(inode),
3082 block_start >> 9, block_end >> 9,
3084 i_sectors_acct(c, inode, NULL, i_sectors_delta);
3087 mutex_lock(&inode->ei_update_lock);
3088 if (end >= inode->v.i_size && !truncated_last_page) {
3089 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
3090 ATTR_MTIME|ATTR_CTIME);
3092 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
3093 ATTR_MTIME|ATTR_CTIME);
3095 mutex_unlock(&inode->ei_update_lock);
3100 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
3101 loff_t offset, loff_t len,
3104 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3105 struct address_space *mapping = inode->v.i_mapping;
3106 struct bkey_buf copy;
3107 struct btree_trans trans;
3108 struct btree_iter src, dst, del;
3109 loff_t shift, new_size;
3113 if ((offset | len) & (block_bytes(c) - 1))
3117 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
3120 if (offset >= inode->v.i_size)
3123 src_start = U64_MAX;
3126 if (offset + len >= inode->v.i_size)
3129 src_start = offset + len;
3133 new_size = inode->v.i_size + shift;
3135 ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
3140 i_size_write(&inode->v, new_size);
3141 mutex_lock(&inode->ei_update_lock);
3142 ret = bch2_write_inode_size(c, inode, new_size,
3143 ATTR_MTIME|ATTR_CTIME);
3144 mutex_unlock(&inode->ei_update_lock);
3146 s64 i_sectors_delta = 0;
3148 ret = bch2_fpunch(c, inode_inum(inode),
3149 offset >> 9, (offset + len) >> 9,
3151 i_sectors_acct(c, inode, NULL, i_sectors_delta);
3157 bch2_bkey_buf_init(©);
3158 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
3159 bch2_trans_iter_init(&trans, &src, BTREE_ID_extents,
3160 POS(inode->v.i_ino, src_start >> 9),
3162 bch2_trans_copy_iter(&dst, &src);
3163 bch2_trans_copy_iter(&del, &src);
3166 bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
3167 struct disk_reservation disk_res =
3168 bch2_disk_reservation_init(c, 0);
3169 struct bkey_i delete;
3171 struct bpos next_pos;
3172 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
3173 struct bpos atomic_end;
3174 unsigned trigger_flags = 0;
3177 bch2_trans_begin(&trans);
3179 ret = bch2_subvolume_get_snapshot(&trans,
3180 inode->ei_subvol, &snapshot);
3184 bch2_btree_iter_set_snapshot(&src, snapshot);
3185 bch2_btree_iter_set_snapshot(&dst, snapshot);
3186 bch2_btree_iter_set_snapshot(&del, snapshot);
3188 bch2_trans_begin(&trans);
3191 ? bch2_btree_iter_peek_prev(&src)
3192 : bch2_btree_iter_peek_upto(&src, POS(inode->v.i_ino, U64_MAX));
3193 if ((ret = bkey_err(k)))
3196 if (!k.k || k.k->p.inode != inode->v.i_ino)
3200 bkey_le(k.k->p, POS(inode->v.i_ino, offset >> 9)))
3203 bch2_bkey_buf_reassemble(©, c, k);
3206 bkey_lt(bkey_start_pos(k.k), move_pos))
3207 bch2_cut_front(move_pos, copy.k);
3209 copy.k->k.p.offset += shift >> 9;
3210 bch2_btree_iter_set_pos(&dst, bkey_start_pos(©.k->k));
3212 ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end);
3216 if (!bkey_eq(atomic_end, copy.k->k.p)) {
3218 move_pos = atomic_end;
3219 move_pos.offset -= shift >> 9;
3222 bch2_cut_back(atomic_end, copy.k);
3226 bkey_init(&delete.k);
3227 delete.k.p = copy.k->k.p;
3228 delete.k.size = copy.k->k.size;
3229 delete.k.p.offset -= shift >> 9;
3230 bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k));
3232 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
3234 if (copy.k->k.size != k.k->size) {
3235 /* We might end up splitting compressed extents: */
3237 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
3239 ret = bch2_disk_reservation_get(c, &disk_res,
3240 copy.k->k.size, nr_ptrs,
3241 BCH_DISK_RESERVATION_NOFAIL);
3245 ret = bch2_btree_iter_traverse(&del) ?:
3246 bch2_trans_update(&trans, &del, &delete, trigger_flags) ?:
3247 bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?:
3248 bch2_trans_commit(&trans, &disk_res, NULL,
3249 BTREE_INSERT_NOFAIL);
3250 bch2_disk_reservation_put(c, &disk_res);
3253 bch2_btree_iter_set_pos(&src, next_pos);
3255 bch2_trans_iter_exit(&trans, &del);
3256 bch2_trans_iter_exit(&trans, &dst);
3257 bch2_trans_iter_exit(&trans, &src);
3258 bch2_trans_exit(&trans);
3259 bch2_bkey_buf_exit(©, c);
3264 mutex_lock(&inode->ei_update_lock);
3266 i_size_write(&inode->v, new_size);
3267 ret = bch2_write_inode_size(c, inode, new_size,
3268 ATTR_MTIME|ATTR_CTIME);
3270 /* We need an inode update to update bi_journal_seq for fsync: */
3271 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
3272 ATTR_MTIME|ATTR_CTIME);
3274 mutex_unlock(&inode->ei_update_lock);
3278 static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
3279 u64 start_sector, u64 end_sector)
3281 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3282 struct btree_trans trans;
3283 struct btree_iter iter;
3284 struct bpos end_pos = POS(inode->v.i_ino, end_sector);
3285 struct bch_io_opts opts;
3288 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
3289 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
3291 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
3292 POS(inode->v.i_ino, start_sector),
3293 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
3295 while (!ret && bkey_lt(iter.pos, end_pos)) {
3296 s64 i_sectors_delta = 0;
3297 struct quota_res quota_res = { 0 };
3302 bch2_trans_begin(&trans);
3304 ret = bch2_subvolume_get_snapshot(&trans,
3305 inode->ei_subvol, &snapshot);
3309 bch2_btree_iter_set_snapshot(&iter, snapshot);
3311 k = bch2_btree_iter_peek_slot(&iter);
3312 if ((ret = bkey_err(k)))
3315 /* already reserved */
3316 if (bkey_extent_is_reservation(k) &&
3317 bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
3318 bch2_btree_iter_advance(&iter);
3322 if (bkey_extent_is_data(k.k) &&
3323 !(mode & FALLOC_FL_ZERO_RANGE)) {
3324 bch2_btree_iter_advance(&iter);
3329 * XXX: for nocow mode, we should promote shared extents to
3333 sectors = bpos_min(k.k->p, end_pos).offset - iter.pos.offset;
3335 if (!bkey_extent_is_allocation(k.k)) {
3336 ret = bch2_quota_reservation_add(c, inode,
3343 ret = bch2_extent_fallocate(&trans, inode_inum(inode), &iter,
3344 sectors, opts, &i_sectors_delta,
3345 writepoint_hashed((unsigned long) current));
3349 i_sectors_acct(c, inode, "a_res, i_sectors_delta);
3351 bch2_quota_reservation_put(c, inode, "a_res);
3352 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3356 bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
3357 mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
3359 if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
3360 struct quota_res quota_res = { 0 };
3361 s64 i_sectors_delta = 0;
3363 bch2_fpunch_at(&trans, &iter, inode_inum(inode),
3364 end_sector, &i_sectors_delta);
3365 i_sectors_acct(c, inode, "a_res, i_sectors_delta);
3366 bch2_quota_reservation_put(c, inode, "a_res);
3369 bch2_trans_iter_exit(&trans, &iter);
3370 bch2_trans_exit(&trans);
3374 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
3375 loff_t offset, loff_t len)
3377 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3378 u64 end = offset + len;
3379 u64 block_start = round_down(offset, block_bytes(c));
3380 u64 block_end = round_up(end, block_bytes(c));
3381 bool truncated_last_page = false;
3384 if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
3385 ret = inode_newsize_ok(&inode->v, end);
3390 if (mode & FALLOC_FL_ZERO_RANGE) {
3391 ret = bch2_truncate_folios(inode, offset, end);
3392 if (unlikely(ret < 0))
3395 truncated_last_page = ret;
3397 truncate_pagecache_range(&inode->v, offset, end - 1);
3399 block_start = round_up(offset, block_bytes(c));
3400 block_end = round_down(end, block_bytes(c));
3403 ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
3406 * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
3407 * so that the VFS cache i_size is consistent with the btree i_size:
3410 !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
3413 if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
3414 end = inode->v.i_size;
3416 if (end >= inode->v.i_size &&
3417 (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
3418 !(mode & FALLOC_FL_KEEP_SIZE))) {
3419 spin_lock(&inode->v.i_lock);
3420 i_size_write(&inode->v, end);
3421 spin_unlock(&inode->v.i_lock);
3423 mutex_lock(&inode->ei_update_lock);
3424 ret2 = bch2_write_inode_size(c, inode, end, 0);
3425 mutex_unlock(&inode->ei_update_lock);
3431 long bch2_fallocate_dispatch(struct file *file, int mode,
3432 loff_t offset, loff_t len)
3434 struct bch_inode_info *inode = file_bch_inode(file);
3435 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3438 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fallocate))
3441 inode_lock(&inode->v);
3442 inode_dio_wait(&inode->v);
3443 bch2_pagecache_block_get(inode);
3445 ret = file_modified(file);
3449 if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
3450 ret = bchfs_fallocate(inode, mode, offset, len);
3451 else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
3452 ret = bchfs_fpunch(inode, offset, len);
3453 else if (mode == FALLOC_FL_INSERT_RANGE)
3454 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
3455 else if (mode == FALLOC_FL_COLLAPSE_RANGE)
3456 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
3460 bch2_pagecache_block_put(inode);
3461 inode_unlock(&inode->v);
3462 bch2_write_ref_put(c, BCH_WRITE_REF_fallocate);
3464 return bch2_err_class(ret);
3468 * Take a quota reservation for unallocated blocks in a given file range
3469 * Does not check pagecache
3471 static int quota_reserve_range(struct bch_inode_info *inode,
3472 struct quota_res *res,
3475 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3476 struct btree_trans trans;
3477 struct btree_iter iter;
3480 u64 sectors = end - start;
3484 bch2_trans_init(&trans, c, 0, 0);
3486 bch2_trans_begin(&trans);
3488 ret = bch2_subvolume_get_snapshot(&trans, inode->ei_subvol, &snapshot);
3492 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
3493 SPOS(inode->v.i_ino, pos, snapshot), 0);
3495 while (!(ret = btree_trans_too_many_iters(&trans)) &&
3496 (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
3497 !(ret = bkey_err(k))) {
3498 if (bkey_extent_is_allocation(k.k)) {
3499 u64 s = min(end, k.k->p.offset) -
3500 max(start, bkey_start_offset(k.k));
3501 BUG_ON(s > sectors);
3504 bch2_btree_iter_advance(&iter);
3506 pos = iter.pos.offset;
3507 bch2_trans_iter_exit(&trans, &iter);
3509 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3512 bch2_trans_exit(&trans);
3517 return bch2_quota_reservation_add(c, inode, res, sectors, true);
3520 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
3521 struct file *file_dst, loff_t pos_dst,
3522 loff_t len, unsigned remap_flags)
3524 struct bch_inode_info *src = file_bch_inode(file_src);
3525 struct bch_inode_info *dst = file_bch_inode(file_dst);
3526 struct bch_fs *c = src->v.i_sb->s_fs_info;
3527 struct quota_res quota_res = { 0 };
3528 s64 i_sectors_delta = 0;
3532 if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
3535 if (remap_flags & REMAP_FILE_DEDUP)
3538 if ((pos_src & (block_bytes(c) - 1)) ||
3539 (pos_dst & (block_bytes(c) - 1)))
3543 abs(pos_src - pos_dst) < len)
3546 bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3548 inode_dio_wait(&src->v);
3549 inode_dio_wait(&dst->v);
3551 ret = generic_remap_file_range_prep(file_src, pos_src,
3554 if (ret < 0 || len == 0)
3557 aligned_len = round_up((u64) len, block_bytes(c));
3559 ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
3560 pos_dst, pos_dst + len - 1);
3564 ret = quota_reserve_range(dst, "a_res, pos_dst >> 9,
3565 (pos_dst + aligned_len) >> 9);
3569 file_update_time(file_dst);
3571 mark_pagecache_unallocated(src, pos_src >> 9,
3572 (pos_src + aligned_len) >> 9);
3574 ret = bch2_remap_range(c,
3575 inode_inum(dst), pos_dst >> 9,
3576 inode_inum(src), pos_src >> 9,
3578 pos_dst + len, &i_sectors_delta);
3583 * due to alignment, we might have remapped slightly more than requsted
3585 ret = min((u64) ret << 9, (u64) len);
3587 i_sectors_acct(c, dst, "a_res, i_sectors_delta);
3589 spin_lock(&dst->v.i_lock);
3590 if (pos_dst + ret > dst->v.i_size)
3591 i_size_write(&dst->v, pos_dst + ret);
3592 spin_unlock(&dst->v.i_lock);
3594 if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
3595 IS_SYNC(file_inode(file_dst)))
3596 ret = bch2_flush_inode(c, dst);
3598 bch2_quota_reservation_put(c, dst, "a_res);
3599 bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3601 return bch2_err_class(ret);
3606 static int folio_data_offset(struct folio *folio, unsigned offset)
3608 struct bch_folio *s = bch2_folio(folio);
3609 unsigned i, sectors = folio_sectors(folio);
3612 for (i = offset >> 9; i < sectors; i++)
3613 if (s->s[i].state >= SECTOR_dirty)
3619 static loff_t bch2_seek_pagecache_data(struct inode *vinode,
3620 loff_t start_offset,
3623 struct folio_batch fbatch;
3624 pgoff_t start_index = start_offset >> PAGE_SHIFT;
3625 pgoff_t end_index = end_offset >> PAGE_SHIFT;
3626 pgoff_t index = start_index;
3631 folio_batch_init(&fbatch);
3633 while (filemap_get_folios(vinode->i_mapping,
3634 &index, end_index, &fbatch)) {
3635 for (i = 0; i < folio_batch_count(&fbatch); i++) {
3636 struct folio *folio = fbatch.folios[i];
3639 offset = folio_data_offset(folio,
3640 max(folio_pos(folio), start_offset) -
3643 ret = clamp(folio_pos(folio) + offset,
3644 start_offset, end_offset);
3645 folio_unlock(folio);
3646 folio_batch_release(&fbatch);
3649 folio_unlock(folio);
3651 folio_batch_release(&fbatch);
3658 static loff_t bch2_seek_data(struct file *file, u64 offset)
3660 struct bch_inode_info *inode = file_bch_inode(file);
3661 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3662 struct btree_trans trans;
3663 struct btree_iter iter;
3665 subvol_inum inum = inode_inum(inode);
3666 u64 isize, next_data = MAX_LFS_FILESIZE;
3670 isize = i_size_read(&inode->v);
3671 if (offset >= isize)
3674 bch2_trans_init(&trans, c, 0, 0);
3676 bch2_trans_begin(&trans);
3678 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3682 for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents,
3683 SPOS(inode->v.i_ino, offset >> 9, snapshot),
3684 POS(inode->v.i_ino, U64_MAX),
3686 if (bkey_extent_is_data(k.k)) {
3687 next_data = max(offset, bkey_start_offset(k.k) << 9);
3689 } else if (k.k->p.offset >> 9 > isize)
3692 bch2_trans_iter_exit(&trans, &iter);
3694 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3697 bch2_trans_exit(&trans);
3701 if (next_data > offset)
3702 next_data = bch2_seek_pagecache_data(&inode->v,
3705 if (next_data >= isize)
3708 return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
3711 static bool folio_hole_offset(struct address_space *mapping, loff_t *offset)
3713 struct folio *folio;
3714 struct bch_folio *s;
3715 unsigned i, sectors, f_offset;
3718 folio = filemap_lock_folio(mapping, *offset >> PAGE_SHIFT);
3722 s = bch2_folio(folio);
3726 sectors = folio_sectors(folio);
3727 f_offset = *offset - folio_pos(folio);
3729 for (i = f_offset >> 9; i < sectors; i++)
3730 if (s->s[i].state < SECTOR_dirty) {
3731 *offset = max(*offset, folio_pos(folio) + (i << 9));
3735 *offset = folio_end_pos(folio);
3738 folio_unlock(folio);
3742 static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
3743 loff_t start_offset,
3746 struct address_space *mapping = vinode->i_mapping;
3747 loff_t offset = start_offset;
3749 while (offset < end_offset &&
3750 !folio_hole_offset(mapping, &offset))
3753 return min(offset, end_offset);
3756 static loff_t bch2_seek_hole(struct file *file, u64 offset)
3758 struct bch_inode_info *inode = file_bch_inode(file);
3759 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3760 struct btree_trans trans;
3761 struct btree_iter iter;
3763 subvol_inum inum = inode_inum(inode);
3764 u64 isize, next_hole = MAX_LFS_FILESIZE;
3768 isize = i_size_read(&inode->v);
3769 if (offset >= isize)
3772 bch2_trans_init(&trans, c, 0, 0);
3774 bch2_trans_begin(&trans);
3776 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3780 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
3781 SPOS(inode->v.i_ino, offset >> 9, snapshot),
3782 BTREE_ITER_SLOTS, k, ret) {
3783 if (k.k->p.inode != inode->v.i_ino) {
3784 next_hole = bch2_seek_pagecache_hole(&inode->v,
3785 offset, MAX_LFS_FILESIZE);
3787 } else if (!bkey_extent_is_data(k.k)) {
3788 next_hole = bch2_seek_pagecache_hole(&inode->v,
3789 max(offset, bkey_start_offset(k.k) << 9),
3790 k.k->p.offset << 9);
3792 if (next_hole < k.k->p.offset << 9)
3795 offset = max(offset, bkey_start_offset(k.k) << 9);
3798 bch2_trans_iter_exit(&trans, &iter);
3800 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3803 bch2_trans_exit(&trans);
3807 if (next_hole > isize)
3810 return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
3813 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
3821 ret = generic_file_llseek(file, offset, whence);
3824 ret = bch2_seek_data(file, offset);
3827 ret = bch2_seek_hole(file, offset);
3834 return bch2_err_class(ret);
3837 void bch2_fs_fsio_exit(struct bch_fs *c)
3839 bioset_exit(&c->nocow_flush_bioset);
3840 bioset_exit(&c->dio_write_bioset);
3841 bioset_exit(&c->dio_read_bioset);
3842 bioset_exit(&c->writepage_bioset);
3845 int bch2_fs_fsio_init(struct bch_fs *c)
3849 pr_verbose_init(c->opts, "");
3851 if (bioset_init(&c->writepage_bioset,
3852 4, offsetof(struct bch_writepage_io, op.wbio.bio),
3854 return -BCH_ERR_ENOMEM_writepage_bioset_init;
3856 if (bioset_init(&c->dio_read_bioset,
3857 4, offsetof(struct dio_read, rbio.bio),
3859 return -BCH_ERR_ENOMEM_dio_read_bioset_init;
3861 if (bioset_init(&c->dio_write_bioset,
3862 4, offsetof(struct dio_write, op.wbio.bio),
3864 return -BCH_ERR_ENOMEM_dio_write_bioset_init;
3866 if (bioset_init(&c->nocow_flush_bioset,
3867 1, offsetof(struct nocow_flush, bio), 0))
3868 return -BCH_ERR_ENOMEM_nocow_flush_bioset_init;
3870 pr_verbose_init(c->opts, "ret %i", ret);
3874 #endif /* NO_BCACHEFS_FS */