1 // SPDX-License-Identifier: GPL-2.0
5 #include "alloc_foreground.h"
7 #include "btree_update.h"
12 #include "extent_update.h"
24 #include <linux/aio.h>
25 #include <linux/backing-dev.h>
26 #include <linux/falloc.h>
27 #include <linux/migrate.h>
28 #include <linux/mmu_context.h>
29 #include <linux/pagevec.h>
30 #include <linux/rmap.h>
31 #include <linux/sched/signal.h>
32 #include <linux/task_io_accounting_ops.h>
33 #include <linux/uio.h>
34 #include <linux/writeback.h>
36 #include <trace/events/writeback.h>
38 static void bch2_clamp_data_hole(struct inode *, u64 *, u64 *, unsigned);
41 struct folio *fv_folio;
46 static inline struct folio_vec biovec_to_foliovec(struct bio_vec bv)
49 struct folio *folio = page_folio(bv.bv_page);
50 size_t offset = (folio_page_idx(folio, bv.bv_page) << PAGE_SHIFT) +
52 size_t len = min_t(size_t, folio_size(folio) - offset, bv.bv_len);
54 return (struct folio_vec) {
61 static inline struct folio_vec bio_iter_iovec_folio(struct bio *bio,
62 struct bvec_iter iter)
64 return biovec_to_foliovec(bio_iter_iovec(bio, iter));
67 #define __bio_for_each_folio(bvl, bio, iter, start) \
68 for (iter = (start); \
70 ((bvl = bio_iter_iovec_folio((bio), (iter))), 1); \
71 bio_advance_iter_single((bio), &(iter), (bvl).fv_len))
74 * bio_for_each_folio - iterate over folios within a bio
76 * Like other non-_all versions, this iterates over what bio->bi_iter currently
77 * points to. This version is for drivers, where the bio may have previously
78 * been split or cloned.
80 #define bio_for_each_folio(bvl, bio, iter) \
81 __bio_for_each_folio(bvl, bio, iter, (bio)->bi_iter)
84 * Use u64 for the end pos and sector helpers because if the folio covers the
85 * max supported range of the mapping, the start offset of the next folio
86 * overflows loff_t. This breaks much of the range based processing in the
87 * buffered write path.
89 static inline u64 folio_end_pos(struct folio *folio)
91 return folio_pos(folio) + folio_size(folio);
94 static inline size_t folio_sectors(struct folio *folio)
96 return PAGE_SECTORS << folio_order(folio);
99 static inline loff_t folio_sector(struct folio *folio)
101 return folio_pos(folio) >> 9;
104 static inline u64 folio_end_sector(struct folio *folio)
106 return folio_end_pos(folio) >> 9;
109 typedef DARRAY(struct folio *) folios;
111 static int filemap_get_contig_folios_d(struct address_space *mapping,
112 loff_t start, u64 end,
113 int fgp_flags, gfp_t gfp,
121 if ((u64) pos >= (u64) start + (1ULL << 20))
122 fgp_flags &= ~FGP_CREAT;
124 ret = darray_make_room_gfp(folios, 1, gfp & GFP_KERNEL);
128 f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp);
129 if (IS_ERR_OR_NULL(f))
132 BUG_ON(folios->nr && folio_pos(f) != pos);
134 pos = folio_end_pos(f);
135 darray_push(folios, f);
138 if (!folios->nr && !ret && (fgp_flags & FGP_CREAT))
141 return folios->nr ? 0 : ret;
150 static void nocow_flush_endio(struct bio *_bio)
153 struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
155 closure_put(bio->cl);
156 percpu_ref_put(&bio->ca->io_ref);
160 static void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
161 struct bch_inode_info *inode,
164 struct nocow_flush *bio;
166 struct bch_devs_mask devs;
169 dev = find_first_bit(inode->ei_devs_need_flush.d, BCH_SB_MEMBERS_MAX);
170 if (dev == BCH_SB_MEMBERS_MAX)
173 devs = inode->ei_devs_need_flush;
174 memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
176 for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
178 ca = rcu_dereference(c->devs[dev]);
179 if (ca && !percpu_ref_tryget(&ca->io_ref))
186 bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
189 &c->nocow_flush_bioset),
190 struct nocow_flush, bio);
193 bio->bio.bi_end_io = nocow_flush_endio;
194 closure_bio_submit(&bio->bio, cl);
198 static int bch2_inode_flush_nocow_writes(struct bch_fs *c,
199 struct bch_inode_info *inode)
203 closure_init_stack(&cl);
204 bch2_inode_flush_nocow_writes_async(c, inode, &cl);
210 static inline bool bio_full(struct bio *bio, unsigned len)
212 if (bio->bi_vcnt >= bio->bi_max_vecs)
214 if (bio->bi_iter.bi_size > UINT_MAX - len)
219 static inline struct address_space *faults_disabled_mapping(void)
221 return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
224 static inline void set_fdm_dropped_locks(void)
226 current->faults_disabled_mapping =
227 (void *) (((unsigned long) current->faults_disabled_mapping)|1);
230 static inline bool fdm_dropped_locks(void)
232 return ((unsigned long) current->faults_disabled_mapping) & 1;
239 struct bch_writepage_io {
240 struct bch_inode_info *inode;
243 struct bch_write_op op;
248 struct address_space *mapping;
249 struct bch_inode_info *inode;
250 struct mm_struct *mm;
256 struct quota_res quota_res;
259 struct iov_iter iter;
260 struct iovec inline_vecs[2];
263 struct bch_write_op op;
271 struct bch_read_bio rbio;
274 /* pagecache_block must be held */
275 static noinline int write_invalidate_inode_pages_range(struct address_space *mapping,
276 loff_t start, loff_t end)
281 * XXX: the way this is currently implemented, we can spin if a process
282 * is continually redirtying a specific page
285 if (!mapping->nrpages)
288 ret = filemap_write_and_wait_range(mapping, start, end);
292 if (!mapping->nrpages)
295 ret = invalidate_inode_pages2_range(mapping,
298 } while (ret == -EBUSY);
305 #ifdef CONFIG_BCACHEFS_QUOTA
307 static void __bch2_quota_reservation_put(struct bch_fs *c,
308 struct bch_inode_info *inode,
309 struct quota_res *res)
311 BUG_ON(res->sectors > inode->ei_quota_reserved);
313 bch2_quota_acct(c, inode->ei_qid, Q_SPC,
314 -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
315 inode->ei_quota_reserved -= res->sectors;
319 static void bch2_quota_reservation_put(struct bch_fs *c,
320 struct bch_inode_info *inode,
321 struct quota_res *res)
324 mutex_lock(&inode->ei_quota_lock);
325 __bch2_quota_reservation_put(c, inode, res);
326 mutex_unlock(&inode->ei_quota_lock);
330 static int bch2_quota_reservation_add(struct bch_fs *c,
331 struct bch_inode_info *inode,
332 struct quota_res *res,
338 if (test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags))
341 mutex_lock(&inode->ei_quota_lock);
342 ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
343 check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
345 inode->ei_quota_reserved += sectors;
346 res->sectors += sectors;
348 mutex_unlock(&inode->ei_quota_lock);
355 static void __bch2_quota_reservation_put(struct bch_fs *c,
356 struct bch_inode_info *inode,
357 struct quota_res *res) {}
359 static void bch2_quota_reservation_put(struct bch_fs *c,
360 struct bch_inode_info *inode,
361 struct quota_res *res) {}
363 static int bch2_quota_reservation_add(struct bch_fs *c,
364 struct bch_inode_info *inode,
365 struct quota_res *res,
374 /* i_size updates: */
376 struct inode_new_size {
382 static int inode_set_size(struct bch_inode_info *inode,
383 struct bch_inode_unpacked *bi,
386 struct inode_new_size *s = p;
388 bi->bi_size = s->new_size;
389 if (s->fields & ATTR_ATIME)
390 bi->bi_atime = s->now;
391 if (s->fields & ATTR_MTIME)
392 bi->bi_mtime = s->now;
393 if (s->fields & ATTR_CTIME)
394 bi->bi_ctime = s->now;
399 int __must_check bch2_write_inode_size(struct bch_fs *c,
400 struct bch_inode_info *inode,
401 loff_t new_size, unsigned fields)
403 struct inode_new_size s = {
404 .new_size = new_size,
405 .now = bch2_current_time(c),
409 return bch2_write_inode(c, inode, inode_set_size, &s, fields);
412 static void __i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
413 struct quota_res *quota_res, s64 sectors)
415 bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
416 "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
417 inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
418 inode->ei_inode.bi_sectors);
419 inode->v.i_blocks += sectors;
421 #ifdef CONFIG_BCACHEFS_QUOTA
423 !test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags) &&
425 BUG_ON(sectors > quota_res->sectors);
426 BUG_ON(sectors > inode->ei_quota_reserved);
428 quota_res->sectors -= sectors;
429 inode->ei_quota_reserved -= sectors;
431 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
436 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
437 struct quota_res *quota_res, s64 sectors)
440 mutex_lock(&inode->ei_quota_lock);
441 __i_sectors_acct(c, inode, quota_res, sectors);
442 mutex_unlock(&inode->ei_quota_lock);
448 /* stored in page->private: */
450 #define BCH_FOLIO_SECTOR_STATE() \
457 enum bch_folio_sector_state {
458 #define x(n) SECTOR_##n,
459 BCH_FOLIO_SECTOR_STATE()
463 static const char * const bch2_folio_sector_states[] = {
465 BCH_FOLIO_SECTOR_STATE()
470 static inline enum bch_folio_sector_state
471 folio_sector_dirty(enum bch_folio_sector_state state)
474 case SECTOR_unallocated:
476 case SECTOR_reserved:
477 return SECTOR_dirty_reserved;
483 static inline enum bch_folio_sector_state
484 folio_sector_undirty(enum bch_folio_sector_state state)
488 return SECTOR_unallocated;
489 case SECTOR_dirty_reserved:
490 return SECTOR_reserved;
496 static inline enum bch_folio_sector_state
497 folio_sector_reserve(enum bch_folio_sector_state state)
500 case SECTOR_unallocated:
501 return SECTOR_reserved;
503 return SECTOR_dirty_reserved;
509 struct bch_folio_sector {
510 /* Uncompressed, fully allocated replicas (or on disk reservation): */
511 unsigned nr_replicas:4;
513 /* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
514 unsigned replicas_reserved:4;
517 enum bch_folio_sector_state state:8;
522 atomic_t write_count;
524 * Is the sector state up to date with the btree?
525 * (Not the data itself)
528 struct bch_folio_sector s[];
531 static inline void folio_sector_set(struct folio *folio,
533 unsigned i, unsigned n)
538 /* file offset (to folio offset) to bch_folio_sector index */
539 static inline int folio_pos_to_s(struct folio *folio, loff_t pos)
541 u64 f_offset = pos - folio_pos(folio);
542 BUG_ON(pos < folio_pos(folio) || pos >= folio_end_pos(folio));
543 return f_offset >> SECTOR_SHIFT;
546 static inline struct bch_folio *__bch2_folio(struct folio *folio)
548 return folio_has_private(folio)
549 ? (struct bch_folio *) folio_get_private(folio)
553 static inline struct bch_folio *bch2_folio(struct folio *folio)
555 EBUG_ON(!folio_test_locked(folio));
557 return __bch2_folio(folio);
560 /* for newly allocated folios: */
561 static void __bch2_folio_release(struct folio *folio)
563 kfree(folio_detach_private(folio));
566 static void bch2_folio_release(struct folio *folio)
568 EBUG_ON(!folio_test_locked(folio));
569 __bch2_folio_release(folio);
572 /* for newly allocated folios: */
573 static struct bch_folio *__bch2_folio_create(struct folio *folio, gfp_t gfp)
577 s = kzalloc(sizeof(*s) +
578 sizeof(struct bch_folio_sector) *
579 folio_sectors(folio), gfp);
583 spin_lock_init(&s->lock);
584 folio_attach_private(folio, s);
588 static struct bch_folio *bch2_folio_create(struct folio *folio, gfp_t gfp)
590 return bch2_folio(folio) ?: __bch2_folio_create(folio, gfp);
593 static unsigned bkey_to_sector_state(struct bkey_s_c k)
595 if (bkey_extent_is_reservation(k))
596 return SECTOR_reserved;
597 if (bkey_extent_is_allocation(k.k))
598 return SECTOR_allocated;
599 return SECTOR_unallocated;
602 static void __bch2_folio_set(struct folio *folio,
603 unsigned pg_offset, unsigned pg_len,
604 unsigned nr_ptrs, unsigned state)
606 struct bch_folio *s = bch2_folio(folio);
607 unsigned i, sectors = folio_sectors(folio);
609 BUG_ON(pg_offset >= sectors);
610 BUG_ON(pg_offset + pg_len > sectors);
614 for (i = pg_offset; i < pg_offset + pg_len; i++) {
615 s->s[i].nr_replicas = nr_ptrs;
616 folio_sector_set(folio, s, i, state);
622 spin_unlock(&s->lock);
626 * Initialize bch_folio state (allocated/unallocated, nr_replicas) from the
629 static int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
630 struct folio **folios, unsigned nr_folios)
632 struct btree_trans trans;
633 struct btree_iter iter;
636 u64 offset = folio_sector(folios[0]);
639 bool need_set = false;
642 for (folio_idx = 0; folio_idx < nr_folios; folio_idx++) {
643 s = bch2_folio_create(folios[folio_idx], GFP_KERNEL);
647 need_set |= !s->uptodate;
654 bch2_trans_init(&trans, c, 0, 0);
656 bch2_trans_begin(&trans);
658 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
662 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
663 SPOS(inum.inum, offset, snapshot),
664 BTREE_ITER_SLOTS, k, ret) {
665 unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
666 unsigned state = bkey_to_sector_state(k);
668 while (folio_idx < nr_folios) {
669 struct folio *folio = folios[folio_idx];
670 u64 folio_start = folio_sector(folio);
671 u64 folio_end = folio_end_sector(folio);
672 unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) - folio_start;
673 unsigned folio_len = min(k.k->p.offset, folio_end) - folio_offset - folio_start;
675 BUG_ON(k.k->p.offset < folio_start);
676 BUG_ON(bkey_start_offset(k.k) > folio_end);
678 if (!bch2_folio(folio)->uptodate)
679 __bch2_folio_set(folio, folio_offset, folio_len, nr_ptrs, state);
681 if (k.k->p.offset < folio_end)
686 if (folio_idx == nr_folios)
690 offset = iter.pos.offset;
691 bch2_trans_iter_exit(&trans, &iter);
693 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
695 bch2_trans_exit(&trans);
700 static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
702 struct bvec_iter iter;
704 unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
705 ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
706 unsigned state = bkey_to_sector_state(k);
708 bio_for_each_folio(fv, bio, iter)
709 __bch2_folio_set(fv.fv_folio,
715 static void mark_pagecache_unallocated(struct bch_inode_info *inode,
718 pgoff_t index = start >> PAGE_SECTORS_SHIFT;
719 pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
720 struct folio_batch fbatch;
726 folio_batch_init(&fbatch);
728 while (filemap_get_folios(inode->v.i_mapping,
729 &index, end_index, &fbatch)) {
730 for (i = 0; i < folio_batch_count(&fbatch); i++) {
731 struct folio *folio = fbatch.folios[i];
732 u64 folio_start = folio_sector(folio);
733 u64 folio_end = folio_end_sector(folio);
734 unsigned folio_offset = max(start, folio_start) - folio_start;
735 unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
738 BUG_ON(end <= folio_start);
741 s = bch2_folio(folio);
745 for (j = folio_offset; j < folio_offset + folio_len; j++)
746 s->s[j].nr_replicas = 0;
747 spin_unlock(&s->lock);
752 folio_batch_release(&fbatch);
757 static void mark_pagecache_reserved(struct bch_inode_info *inode,
760 struct bch_fs *c = inode->v.i_sb->s_fs_info;
761 pgoff_t index = start >> PAGE_SECTORS_SHIFT;
762 pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
763 struct folio_batch fbatch;
764 s64 i_sectors_delta = 0;
770 folio_batch_init(&fbatch);
772 while (filemap_get_folios(inode->v.i_mapping,
773 &index, end_index, &fbatch)) {
774 for (i = 0; i < folio_batch_count(&fbatch); i++) {
775 struct folio *folio = fbatch.folios[i];
776 u64 folio_start = folio_sector(folio);
777 u64 folio_end = folio_end_sector(folio);
778 unsigned folio_offset = max(start, folio_start) - folio_start;
779 unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
782 BUG_ON(end <= folio_start);
785 s = bch2_folio(folio);
789 for (j = folio_offset; j < folio_offset + folio_len; j++) {
790 i_sectors_delta -= s->s[j].state == SECTOR_dirty;
791 folio_sector_set(folio, s, j, folio_sector_reserve(s->s[j].state));
793 spin_unlock(&s->lock);
798 folio_batch_release(&fbatch);
802 i_sectors_acct(c, inode, NULL, i_sectors_delta);
805 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
807 /* XXX: this should not be open coded */
808 return inode->ei_inode.bi_data_replicas
809 ? inode->ei_inode.bi_data_replicas - 1
810 : c->opts.data_replicas;
813 static inline unsigned sectors_to_reserve(struct bch_folio_sector *s,
814 unsigned nr_replicas)
816 return max(0, (int) nr_replicas -
818 s->replicas_reserved);
821 static int bch2_get_folio_disk_reservation(struct bch_fs *c,
822 struct bch_inode_info *inode,
823 struct folio *folio, bool check_enospc)
825 struct bch_folio *s = bch2_folio_create(folio, 0);
826 unsigned nr_replicas = inode_nr_replicas(c, inode);
827 struct disk_reservation disk_res = { 0 };
828 unsigned i, sectors = folio_sectors(folio), disk_res_sectors = 0;
834 for (i = 0; i < sectors; i++)
835 disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
837 if (!disk_res_sectors)
840 ret = bch2_disk_reservation_get(c, &disk_res,
843 ? BCH_DISK_RESERVATION_NOFAIL
848 for (i = 0; i < sectors; i++)
849 s->s[i].replicas_reserved +=
850 sectors_to_reserve(&s->s[i], nr_replicas);
855 struct bch2_folio_reservation {
856 struct disk_reservation disk;
857 struct quota_res quota;
860 static void bch2_folio_reservation_init(struct bch_fs *c,
861 struct bch_inode_info *inode,
862 struct bch2_folio_reservation *res)
864 memset(res, 0, sizeof(*res));
866 res->disk.nr_replicas = inode_nr_replicas(c, inode);
869 static void bch2_folio_reservation_put(struct bch_fs *c,
870 struct bch_inode_info *inode,
871 struct bch2_folio_reservation *res)
873 bch2_disk_reservation_put(c, &res->disk);
874 bch2_quota_reservation_put(c, inode, &res->quota);
877 static int bch2_folio_reservation_get(struct bch_fs *c,
878 struct bch_inode_info *inode,
880 struct bch2_folio_reservation *res,
881 unsigned offset, unsigned len)
883 struct bch_folio *s = bch2_folio_create(folio, 0);
884 unsigned i, disk_sectors = 0, quota_sectors = 0;
890 BUG_ON(!s->uptodate);
892 for (i = round_down(offset, block_bytes(c)) >> 9;
893 i < round_up(offset + len, block_bytes(c)) >> 9;
895 disk_sectors += sectors_to_reserve(&s->s[i],
896 res->disk.nr_replicas);
897 quota_sectors += s->s[i].state == SECTOR_unallocated;
901 ret = bch2_disk_reservation_add(c, &res->disk, disk_sectors, 0);
907 ret = bch2_quota_reservation_add(c, inode, &res->quota,
908 quota_sectors, true);
910 struct disk_reservation tmp = {
911 .sectors = disk_sectors
914 bch2_disk_reservation_put(c, &tmp);
915 res->disk.sectors -= disk_sectors;
923 static void bch2_clear_folio_bits(struct folio *folio)
925 struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
926 struct bch_fs *c = inode->v.i_sb->s_fs_info;
927 struct bch_folio *s = bch2_folio(folio);
928 struct disk_reservation disk_res = { 0 };
929 int i, sectors = folio_sectors(folio), dirty_sectors = 0;
934 EBUG_ON(!folio_test_locked(folio));
935 EBUG_ON(folio_test_writeback(folio));
937 for (i = 0; i < sectors; i++) {
938 disk_res.sectors += s->s[i].replicas_reserved;
939 s->s[i].replicas_reserved = 0;
941 dirty_sectors -= s->s[i].state == SECTOR_dirty;
942 folio_sector_set(folio, s, i, folio_sector_undirty(s->s[i].state));
945 bch2_disk_reservation_put(c, &disk_res);
947 i_sectors_acct(c, inode, NULL, dirty_sectors);
949 bch2_folio_release(folio);
952 static void bch2_set_folio_dirty(struct bch_fs *c,
953 struct bch_inode_info *inode,
955 struct bch2_folio_reservation *res,
956 unsigned offset, unsigned len)
958 struct bch_folio *s = bch2_folio(folio);
959 unsigned i, dirty_sectors = 0;
961 WARN_ON((u64) folio_pos(folio) + offset + len >
962 round_up((u64) i_size_read(&inode->v), block_bytes(c)));
964 BUG_ON(!s->uptodate);
968 for (i = round_down(offset, block_bytes(c)) >> 9;
969 i < round_up(offset + len, block_bytes(c)) >> 9;
971 unsigned sectors = sectors_to_reserve(&s->s[i],
972 res->disk.nr_replicas);
975 * This can happen if we race with the error path in
976 * bch2_writepage_io_done():
978 sectors = min_t(unsigned, sectors, res->disk.sectors);
980 s->s[i].replicas_reserved += sectors;
981 res->disk.sectors -= sectors;
983 dirty_sectors += s->s[i].state == SECTOR_unallocated;
985 folio_sector_set(folio, s, i, folio_sector_dirty(s->s[i].state));
988 spin_unlock(&s->lock);
990 i_sectors_acct(c, inode, &res->quota, dirty_sectors);
992 if (!folio_test_dirty(folio))
993 filemap_dirty_folio(inode->v.i_mapping, folio);
996 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
998 struct file *file = vmf->vma->vm_file;
999 struct address_space *mapping = file->f_mapping;
1000 struct address_space *fdm = faults_disabled_mapping();
1001 struct bch_inode_info *inode = file_bch_inode(file);
1005 return VM_FAULT_SIGBUS;
1007 /* Lock ordering: */
1008 if (fdm > mapping) {
1009 struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
1011 if (bch2_pagecache_add_tryget(inode))
1014 bch2_pagecache_block_put(fdm_host);
1016 bch2_pagecache_add_get(inode);
1017 bch2_pagecache_add_put(inode);
1019 bch2_pagecache_block_get(fdm_host);
1021 /* Signal that lock has been dropped: */
1022 set_fdm_dropped_locks();
1023 return VM_FAULT_SIGBUS;
1026 bch2_pagecache_add_get(inode);
1028 ret = filemap_fault(vmf);
1029 bch2_pagecache_add_put(inode);
1034 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
1036 struct folio *folio = page_folio(vmf->page);
1037 struct file *file = vmf->vma->vm_file;
1038 struct bch_inode_info *inode = file_bch_inode(file);
1039 struct address_space *mapping = file->f_mapping;
1040 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1041 struct bch2_folio_reservation res;
1046 bch2_folio_reservation_init(c, inode, &res);
1048 sb_start_pagefault(inode->v.i_sb);
1049 file_update_time(file);
1052 * Not strictly necessary, but helps avoid dio writes livelocking in
1053 * write_invalidate_inode_pages_range() - can drop this if/when we get
1054 * a write_invalidate_inode_pages_range() that works without dropping
1055 * page lock before invalidating page
1057 bch2_pagecache_add_get(inode);
1060 isize = i_size_read(&inode->v);
1062 if (folio->mapping != mapping || folio_pos(folio) >= isize) {
1063 folio_unlock(folio);
1064 ret = VM_FAULT_NOPAGE;
1068 len = min_t(loff_t, folio_size(folio), isize - folio_pos(folio));
1070 if (bch2_folio_set(c, inode_inum(inode), &folio, 1) ?:
1071 bch2_folio_reservation_get(c, inode, folio, &res, 0, len)) {
1072 folio_unlock(folio);
1073 ret = VM_FAULT_SIGBUS;
1077 bch2_set_folio_dirty(c, inode, folio, &res, 0, len);
1078 bch2_folio_reservation_put(c, inode, &res);
1080 folio_wait_stable(folio);
1081 ret = VM_FAULT_LOCKED;
1083 bch2_pagecache_add_put(inode);
1084 sb_end_pagefault(inode->v.i_sb);
1089 void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1091 if (offset || length < folio_size(folio))
1094 bch2_clear_folio_bits(folio);
1097 bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
1099 if (folio_test_dirty(folio) || folio_test_writeback(folio))
1102 bch2_clear_folio_bits(folio);
1108 static void bch2_readpages_end_io(struct bio *bio)
1110 struct folio_iter fi;
1112 bio_for_each_folio_all(fi, bio) {
1113 if (!bio->bi_status) {
1114 folio_mark_uptodate(fi.folio);
1116 folio_clear_uptodate(fi.folio);
1117 folio_set_error(fi.folio);
1119 folio_unlock(fi.folio);
1125 struct readpages_iter {
1126 struct address_space *mapping;
1131 static int readpages_iter_init(struct readpages_iter *iter,
1132 struct readahead_control *ractl)
1137 memset(iter, 0, sizeof(*iter));
1139 iter->mapping = ractl->mapping;
1141 ret = filemap_get_contig_folios_d(iter->mapping,
1142 ractl->_index << PAGE_SHIFT,
1143 (ractl->_index + ractl->_nr_pages) << PAGE_SHIFT,
1144 0, mapping_gfp_mask(iter->mapping),
1149 darray_for_each(iter->folios, fi) {
1150 ractl->_nr_pages -= 1U << folio_order(*fi);
1151 __bch2_folio_create(*fi, __GFP_NOFAIL|GFP_KERNEL);
1159 static inline struct folio *readpage_iter_peek(struct readpages_iter *iter)
1161 if (iter->idx >= iter->folios.nr)
1163 return iter->folios.data[iter->idx];
1166 static inline void readpage_iter_advance(struct readpages_iter *iter)
1171 static bool extent_partial_reads_expensive(struct bkey_s_c k)
1173 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1174 struct bch_extent_crc_unpacked crc;
1175 const union bch_extent_entry *i;
1177 bkey_for_each_crc(k.k, ptrs, crc, i)
1178 if (crc.csum_type || crc.compression_type)
1183 static int readpage_bio_extend(struct btree_trans *trans,
1184 struct readpages_iter *iter,
1186 unsigned sectors_this_extent,
1189 /* Don't hold btree locks while allocating memory: */
1190 bch2_trans_unlock(trans);
1192 while (bio_sectors(bio) < sectors_this_extent &&
1193 bio->bi_vcnt < bio->bi_max_vecs) {
1194 struct folio *folio = readpage_iter_peek(iter);
1198 readpage_iter_advance(iter);
1200 pgoff_t folio_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
1205 folio = xa_load(&iter->mapping->i_pages, folio_offset);
1206 if (folio && !xa_is_value(folio))
1209 folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), 0);
1213 if (!__bch2_folio_create(folio, GFP_KERNEL)) {
1218 ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_KERNEL);
1220 __bch2_folio_release(folio);
1228 BUG_ON(folio_sector(folio) != bio_end_sector(bio));
1230 BUG_ON(!bio_add_folio(bio, folio, folio_size(folio), 0));
1233 return bch2_trans_relock(trans);
1236 static void bchfs_read(struct btree_trans *trans,
1237 struct bch_read_bio *rbio,
1239 struct readpages_iter *readpages_iter)
1241 struct bch_fs *c = trans->c;
1242 struct btree_iter iter;
1244 int flags = BCH_READ_RETRY_IF_STALE|
1245 BCH_READ_MAY_PROMOTE;
1250 rbio->start_time = local_clock();
1251 rbio->subvol = inum.subvol;
1253 bch2_bkey_buf_init(&sk);
1255 bch2_trans_begin(trans);
1256 iter = (struct btree_iter) { NULL };
1258 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
1262 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1263 SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
1267 unsigned bytes, sectors, offset_into_extent;
1268 enum btree_id data_btree = BTREE_ID_extents;
1271 * read_extent -> io_time_reset may cause a transaction restart
1272 * without returning an error, we need to check for that here:
1274 ret = bch2_trans_relock(trans);
1278 bch2_btree_iter_set_pos(&iter,
1279 POS(inum.inum, rbio->bio.bi_iter.bi_sector));
1281 k = bch2_btree_iter_peek_slot(&iter);
1286 offset_into_extent = iter.pos.offset -
1287 bkey_start_offset(k.k);
1288 sectors = k.k->size - offset_into_extent;
1290 bch2_bkey_buf_reassemble(&sk, c, k);
1292 ret = bch2_read_indirect_extent(trans, &data_btree,
1293 &offset_into_extent, &sk);
1297 k = bkey_i_to_s_c(sk.k);
1299 sectors = min(sectors, k.k->size - offset_into_extent);
1301 if (readpages_iter) {
1302 ret = readpage_bio_extend(trans, readpages_iter, &rbio->bio, sectors,
1303 extent_partial_reads_expensive(k));
1308 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
1309 swap(rbio->bio.bi_iter.bi_size, bytes);
1311 if (rbio->bio.bi_iter.bi_size == bytes)
1312 flags |= BCH_READ_LAST_FRAGMENT;
1314 bch2_bio_page_state_set(&rbio->bio, k);
1316 bch2_read_extent(trans, rbio, iter.pos,
1317 data_btree, k, offset_into_extent, flags);
1319 if (flags & BCH_READ_LAST_FRAGMENT)
1322 swap(rbio->bio.bi_iter.bi_size, bytes);
1323 bio_advance(&rbio->bio, bytes);
1325 ret = btree_trans_too_many_iters(trans);
1330 bch2_trans_iter_exit(trans, &iter);
1332 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1336 bch_err_inum_offset_ratelimited(c,
1338 iter.pos.offset << 9,
1339 "read error %i from btree lookup", ret);
1340 rbio->bio.bi_status = BLK_STS_IOERR;
1341 bio_endio(&rbio->bio);
1344 bch2_bkey_buf_exit(&sk, c);
1347 void bch2_readahead(struct readahead_control *ractl)
1349 struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
1350 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1351 struct bch_io_opts opts;
1352 struct btree_trans trans;
1353 struct folio *folio;
1354 struct readpages_iter readpages_iter;
1357 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
1359 ret = readpages_iter_init(&readpages_iter, ractl);
1362 bch2_trans_init(&trans, c, 0, 0);
1364 bch2_pagecache_add_get(inode);
1366 while ((folio = readpage_iter_peek(&readpages_iter))) {
1367 unsigned n = min_t(unsigned,
1368 readpages_iter.folios.nr -
1371 struct bch_read_bio *rbio =
1372 rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
1373 GFP_KERNEL, &c->bio_read),
1376 readpage_iter_advance(&readpages_iter);
1378 rbio->bio.bi_iter.bi_sector = folio_sector(folio);
1379 rbio->bio.bi_end_io = bch2_readpages_end_io;
1380 BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
1382 bchfs_read(&trans, rbio, inode_inum(inode),
1384 bch2_trans_unlock(&trans);
1387 bch2_pagecache_add_put(inode);
1389 bch2_trans_exit(&trans);
1390 darray_exit(&readpages_iter.folios);
1393 static void __bchfs_readfolio(struct bch_fs *c, struct bch_read_bio *rbio,
1394 subvol_inum inum, struct folio *folio)
1396 struct btree_trans trans;
1398 bch2_folio_create(folio, __GFP_NOFAIL);
1400 rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
1401 rbio->bio.bi_iter.bi_sector = folio_sector(folio);
1402 BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
1404 bch2_trans_init(&trans, c, 0, 0);
1405 bchfs_read(&trans, rbio, inum, NULL);
1406 bch2_trans_exit(&trans);
1409 static void bch2_read_single_folio_end_io(struct bio *bio)
1411 complete(bio->bi_private);
1414 static int bch2_read_single_folio(struct folio *folio,
1415 struct address_space *mapping)
1417 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1418 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1419 struct bch_read_bio *rbio;
1420 struct bch_io_opts opts;
1422 DECLARE_COMPLETION_ONSTACK(done);
1424 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
1426 rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_KERNEL, &c->bio_read),
1428 rbio->bio.bi_private = &done;
1429 rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
1431 __bchfs_readfolio(c, rbio, inode_inum(inode), folio);
1432 wait_for_completion(&done);
1434 ret = blk_status_to_errno(rbio->bio.bi_status);
1435 bio_put(&rbio->bio);
1440 folio_mark_uptodate(folio);
1444 int bch2_read_folio(struct file *file, struct folio *folio)
1448 ret = bch2_read_single_folio(folio, folio->mapping);
1449 folio_unlock(folio);
1450 return bch2_err_class(ret);
1455 struct bch_writepage_state {
1456 struct bch_writepage_io *io;
1457 struct bch_io_opts opts;
1458 struct bch_folio_sector *tmp;
1459 unsigned tmp_sectors;
1462 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1463 struct bch_inode_info *inode)
1465 struct bch_writepage_state ret = { 0 };
1467 bch2_inode_opts_get(&ret.opts, c, &inode->ei_inode);
1471 static void bch2_writepage_io_done(struct bch_write_op *op)
1473 struct bch_writepage_io *io =
1474 container_of(op, struct bch_writepage_io, op);
1475 struct bch_fs *c = io->op.c;
1476 struct bio *bio = &io->op.wbio.bio;
1477 struct folio_iter fi;
1481 set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
1483 bio_for_each_folio_all(fi, bio) {
1484 struct bch_folio *s;
1486 folio_set_error(fi.folio);
1487 mapping_set_error(fi.folio->mapping, -EIO);
1489 s = __bch2_folio(fi.folio);
1490 spin_lock(&s->lock);
1491 for (i = 0; i < folio_sectors(fi.folio); i++)
1492 s->s[i].nr_replicas = 0;
1493 spin_unlock(&s->lock);
1497 if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
1498 bio_for_each_folio_all(fi, bio) {
1499 struct bch_folio *s;
1501 s = __bch2_folio(fi.folio);
1502 spin_lock(&s->lock);
1503 for (i = 0; i < folio_sectors(fi.folio); i++)
1504 s->s[i].nr_replicas = 0;
1505 spin_unlock(&s->lock);
1510 * racing with fallocate can cause us to add fewer sectors than
1511 * expected - but we shouldn't add more sectors than expected:
1513 WARN_ON_ONCE(io->op.i_sectors_delta > 0);
1516 * (error (due to going RO) halfway through a page can screw that up
1519 BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
1523 * PageWriteback is effectively our ref on the inode - fixup i_blocks
1524 * before calling end_page_writeback:
1526 i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
1528 bio_for_each_folio_all(fi, bio) {
1529 struct bch_folio *s = __bch2_folio(fi.folio);
1531 if (atomic_dec_and_test(&s->write_count))
1532 folio_end_writeback(fi.folio);
1535 bio_put(&io->op.wbio.bio);
1538 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1540 struct bch_writepage_io *io = w->io;
1543 closure_call(&io->op.cl, bch2_write, NULL, NULL);
1547 * Get a bch_writepage_io and add @page to it - appending to an existing one if
1548 * possible, else allocating a new one:
1550 static void bch2_writepage_io_alloc(struct bch_fs *c,
1551 struct writeback_control *wbc,
1552 struct bch_writepage_state *w,
1553 struct bch_inode_info *inode,
1555 unsigned nr_replicas)
1557 struct bch_write_op *op;
1559 w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
1562 &c->writepage_bioset),
1563 struct bch_writepage_io, op.wbio.bio);
1565 w->io->inode = inode;
1567 bch2_write_op_init(op, c, w->opts);
1568 op->target = w->opts.foreground_target;
1569 op->nr_replicas = nr_replicas;
1570 op->res.nr_replicas = nr_replicas;
1571 op->write_point = writepoint_hashed(inode->ei_last_dirtied);
1572 op->subvol = inode->ei_subvol;
1573 op->pos = POS(inode->v.i_ino, sector);
1574 op->end_io = bch2_writepage_io_done;
1575 op->devs_need_flush = &inode->ei_devs_need_flush;
1576 op->wbio.bio.bi_iter.bi_sector = sector;
1577 op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
1580 static int __bch2_writepage(struct folio *folio,
1581 struct writeback_control *wbc,
1584 struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
1585 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1586 struct bch_writepage_state *w = data;
1587 struct bch_folio *s;
1588 unsigned i, offset, f_sectors, nr_replicas_this_write = U32_MAX;
1589 loff_t i_size = i_size_read(&inode->v);
1592 EBUG_ON(!folio_test_uptodate(folio));
1594 /* Is the folio fully inside i_size? */
1595 if (folio_end_pos(folio) <= i_size)
1598 /* Is the folio fully outside i_size? (truncate in progress) */
1599 if (folio_pos(folio) >= i_size) {
1600 folio_unlock(folio);
1605 * The folio straddles i_size. It must be zeroed out on each and every
1606 * writepage invocation because it may be mmapped. "A file is mapped
1607 * in multiples of the folio size. For a file that is not a multiple of
1608 * the folio size, the remaining memory is zeroed when mapped, and
1609 * writes to that region are not written out to the file."
1611 folio_zero_segment(folio,
1612 i_size - folio_pos(folio),
1615 f_sectors = folio_sectors(folio);
1616 s = bch2_folio(folio);
1618 if (f_sectors > w->tmp_sectors) {
1620 w->tmp = kzalloc(sizeof(struct bch_folio_sector) *
1621 f_sectors, __GFP_NOFAIL);
1622 w->tmp_sectors = f_sectors;
1626 * Things get really hairy with errors during writeback:
1628 ret = bch2_get_folio_disk_reservation(c, inode, folio, false);
1631 /* Before unlocking the page, get copy of reservations: */
1632 spin_lock(&s->lock);
1633 memcpy(w->tmp, s->s, sizeof(struct bch_folio_sector) * f_sectors);
1635 for (i = 0; i < f_sectors; i++) {
1636 if (s->s[i].state < SECTOR_dirty)
1639 nr_replicas_this_write =
1640 min_t(unsigned, nr_replicas_this_write,
1641 s->s[i].nr_replicas +
1642 s->s[i].replicas_reserved);
1645 for (i = 0; i < f_sectors; i++) {
1646 if (s->s[i].state < SECTOR_dirty)
1649 s->s[i].nr_replicas = w->opts.compression
1650 ? 0 : nr_replicas_this_write;
1652 s->s[i].replicas_reserved = 0;
1653 folio_sector_set(folio, s, i, SECTOR_allocated);
1655 spin_unlock(&s->lock);
1657 BUG_ON(atomic_read(&s->write_count));
1658 atomic_set(&s->write_count, 1);
1660 BUG_ON(folio_test_writeback(folio));
1661 folio_start_writeback(folio);
1663 folio_unlock(folio);
1667 unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
1670 while (offset < f_sectors &&
1671 w->tmp[offset].state < SECTOR_dirty)
1674 if (offset == f_sectors)
1677 while (offset + sectors < f_sectors &&
1678 w->tmp[offset + sectors].state >= SECTOR_dirty) {
1679 reserved_sectors += w->tmp[offset + sectors].replicas_reserved;
1680 dirty_sectors += w->tmp[offset + sectors].state == SECTOR_dirty;
1685 sector = folio_sector(folio) + offset;
1688 (w->io->op.res.nr_replicas != nr_replicas_this_write ||
1689 bio_full(&w->io->op.wbio.bio, sectors << 9) ||
1690 w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
1691 (BIO_MAX_VECS * PAGE_SIZE) ||
1692 bio_end_sector(&w->io->op.wbio.bio) != sector))
1693 bch2_writepage_do_io(w);
1696 bch2_writepage_io_alloc(c, wbc, w, inode, sector,
1697 nr_replicas_this_write);
1699 atomic_inc(&s->write_count);
1701 BUG_ON(inode != w->io->inode);
1702 BUG_ON(!bio_add_folio(&w->io->op.wbio.bio, folio,
1703 sectors << 9, offset << 9));
1705 /* Check for writing past i_size: */
1706 WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
1707 round_up(i_size, block_bytes(c)) &&
1708 !test_bit(BCH_FS_EMERGENCY_RO, &c->flags),
1709 "writing past i_size: %llu > %llu (unrounded %llu)\n",
1710 bio_end_sector(&w->io->op.wbio.bio) << 9,
1711 round_up(i_size, block_bytes(c)),
1714 w->io->op.res.sectors += reserved_sectors;
1715 w->io->op.i_sectors_delta -= dirty_sectors;
1716 w->io->op.new_i_size = i_size;
1721 if (atomic_dec_and_test(&s->write_count))
1722 folio_end_writeback(folio);
1727 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1729 struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1730 struct bch_writepage_state w =
1731 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1732 struct blk_plug plug;
1735 blk_start_plug(&plug);
1736 ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1738 bch2_writepage_do_io(&w);
1739 blk_finish_plug(&plug);
1741 return bch2_err_class(ret);
1744 /* buffered writes: */
1746 int bch2_write_begin(struct file *file, struct address_space *mapping,
1747 loff_t pos, unsigned len,
1748 struct page **pagep, void **fsdata)
1750 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1751 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1752 struct bch2_folio_reservation *res;
1753 struct folio *folio;
1757 res = kmalloc(sizeof(*res), GFP_KERNEL);
1761 bch2_folio_reservation_init(c, inode, res);
1764 bch2_pagecache_add_get(inode);
1766 folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
1767 FGP_LOCK|FGP_WRITE|FGP_CREAT|FGP_STABLE,
1768 mapping_gfp_mask(mapping));
1769 if (IS_ERR_OR_NULL(folio))
1772 if (folio_test_uptodate(folio))
1775 offset = pos - folio_pos(folio);
1776 len = min_t(size_t, len, folio_end_pos(folio) - pos);
1778 /* If we're writing entire folio, don't need to read it in first: */
1779 if (!offset && len == folio_size(folio))
1782 if (!offset && pos + len >= inode->v.i_size) {
1783 folio_zero_segment(folio, len, folio_size(folio));
1784 flush_dcache_folio(folio);
1788 if (folio_pos(folio) >= inode->v.i_size) {
1789 folio_zero_segments(folio, 0, offset, offset + len, folio_size(folio));
1790 flush_dcache_folio(folio);
1794 ret = bch2_read_single_folio(folio, mapping);
1798 ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
1802 ret = bch2_folio_reservation_get(c, inode, folio, res, offset, len);
1804 if (!folio_test_uptodate(folio)) {
1806 * If the folio hasn't been read in, we won't know if we
1807 * actually need a reservation - we don't actually need
1808 * to read here, we just need to check if the folio is
1809 * fully backed by uncompressed data:
1817 *pagep = &folio->page;
1820 folio_unlock(folio);
1824 bch2_pagecache_add_put(inode);
1827 return bch2_err_class(ret);
1830 int bch2_write_end(struct file *file, struct address_space *mapping,
1831 loff_t pos, unsigned len, unsigned copied,
1832 struct page *page, void *fsdata)
1834 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1835 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1836 struct bch2_folio_reservation *res = fsdata;
1837 struct folio *folio = page_folio(page);
1838 unsigned offset = pos - folio_pos(folio);
1840 lockdep_assert_held(&inode->v.i_rwsem);
1841 BUG_ON(offset + copied > folio_size(folio));
1843 if (unlikely(copied < len && !folio_test_uptodate(folio))) {
1845 * The folio needs to be read in, but that would destroy
1846 * our partial write - simplest thing is to just force
1847 * userspace to redo the write:
1849 folio_zero_range(folio, 0, folio_size(folio));
1850 flush_dcache_folio(folio);
1854 spin_lock(&inode->v.i_lock);
1855 if (pos + copied > inode->v.i_size)
1856 i_size_write(&inode->v, pos + copied);
1857 spin_unlock(&inode->v.i_lock);
1860 if (!folio_test_uptodate(folio))
1861 folio_mark_uptodate(folio);
1863 bch2_set_folio_dirty(c, inode, folio, res, offset, copied);
1865 inode->ei_last_dirtied = (unsigned long) current;
1868 folio_unlock(folio);
1870 bch2_pagecache_add_put(inode);
1872 bch2_folio_reservation_put(c, inode, res);
1878 static noinline void folios_trunc(folios *folios, struct folio **fi)
1880 while (folios->data + folios->nr > fi) {
1881 struct folio *f = darray_pop(folios);
1888 static int __bch2_buffered_write(struct bch_inode_info *inode,
1889 struct address_space *mapping,
1890 struct iov_iter *iter,
1891 loff_t pos, unsigned len)
1893 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1894 struct bch2_folio_reservation res;
1896 struct folio **fi, *f;
1897 unsigned copied = 0, f_offset;
1898 u64 end = pos + len, f_pos;
1899 loff_t last_folio_pos = inode->v.i_size;
1904 bch2_folio_reservation_init(c, inode, &res);
1905 darray_init(&folios);
1907 ret = filemap_get_contig_folios_d(mapping, pos, end,
1908 FGP_LOCK|FGP_WRITE|FGP_STABLE|FGP_CREAT,
1909 mapping_gfp_mask(mapping),
1916 f = darray_first(folios);
1917 if (pos != folio_pos(f) && !folio_test_uptodate(f)) {
1918 ret = bch2_read_single_folio(f, mapping);
1923 f = darray_last(folios);
1924 end = min(end, folio_end_pos(f));
1925 last_folio_pos = folio_pos(f);
1926 if (end != folio_end_pos(f) && !folio_test_uptodate(f)) {
1927 if (end >= inode->v.i_size) {
1928 folio_zero_range(f, 0, folio_size(f));
1930 ret = bch2_read_single_folio(f, mapping);
1936 ret = bch2_folio_set(c, inode_inum(inode), folios.data, folios.nr);
1941 f_offset = pos - folio_pos(darray_first(folios));
1942 darray_for_each(folios, fi) {
1943 struct folio *f = *fi;
1944 u64 f_len = min(end, folio_end_pos(f)) - f_pos;
1947 * XXX: per POSIX and fstests generic/275, on -ENOSPC we're
1948 * supposed to write as much as we have disk space for.
1950 * On failure here we should still write out a partial page if
1951 * we aren't completely out of disk space - we don't do that
1954 ret = bch2_folio_reservation_get(c, inode, f, &res, f_offset, f_len);
1955 if (unlikely(ret)) {
1956 folios_trunc(&folios, fi);
1960 end = min(end, folio_end_pos(darray_last(folios)));
1964 f_pos = folio_end_pos(f);
1968 if (mapping_writably_mapped(mapping))
1969 darray_for_each(folios, fi)
1970 flush_dcache_folio(*fi);
1973 f_offset = pos - folio_pos(darray_first(folios));
1974 darray_for_each(folios, fi) {
1975 struct folio *f = *fi;
1976 u64 f_len = min(end, folio_end_pos(f)) - f_pos;
1977 unsigned f_copied = copy_page_from_iter_atomic(&f->page, f_offset, f_len, iter);
1980 folios_trunc(&folios, fi);
1984 if (!folio_test_uptodate(f) &&
1985 f_copied != folio_size(f) &&
1986 pos + copied + f_copied < inode->v.i_size) {
1987 folio_zero_range(f, 0, folio_size(f));
1988 folios_trunc(&folios, fi);
1992 flush_dcache_folio(f);
1995 if (f_copied != f_len) {
1996 folios_trunc(&folios, fi + 1);
2000 f_pos = folio_end_pos(f);
2009 spin_lock(&inode->v.i_lock);
2010 if (end > inode->v.i_size)
2011 i_size_write(&inode->v, end);
2012 spin_unlock(&inode->v.i_lock);
2015 f_offset = pos - folio_pos(darray_first(folios));
2016 darray_for_each(folios, fi) {
2017 struct folio *f = *fi;
2018 u64 f_len = min(end, folio_end_pos(f)) - f_pos;
2020 if (!folio_test_uptodate(f))
2021 folio_mark_uptodate(f);
2023 bch2_set_folio_dirty(c, inode, f, &res, f_offset, f_len);
2025 f_pos = folio_end_pos(f);
2029 inode->ei_last_dirtied = (unsigned long) current;
2031 darray_for_each(folios, fi) {
2037 * If the last folio added to the mapping starts beyond current EOF, we
2038 * performed a short write but left around at least one post-EOF folio.
2039 * Clean up the mapping before we return.
2041 if (last_folio_pos >= inode->v.i_size)
2042 truncate_pagecache(&inode->v, inode->v.i_size);
2044 darray_exit(&folios);
2045 bch2_folio_reservation_put(c, inode, &res);
2047 return copied ?: ret;
2050 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
2052 struct file *file = iocb->ki_filp;
2053 struct address_space *mapping = file->f_mapping;
2054 struct bch_inode_info *inode = file_bch_inode(file);
2055 loff_t pos = iocb->ki_pos;
2056 ssize_t written = 0;
2059 bch2_pagecache_add_get(inode);
2062 unsigned offset = pos & (PAGE_SIZE - 1);
2063 unsigned bytes = iov_iter_count(iter);
2066 * Bring in the user page that we will copy from _first_.
2067 * Otherwise there's a nasty deadlock on copying from the
2068 * same page as we're writing to, without it being marked
2071 * Not only is this an optimisation, but it is also required
2072 * to check that the address is actually valid, when atomic
2073 * usercopies are used, below.
2075 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
2076 bytes = min_t(unsigned long, iov_iter_count(iter),
2077 PAGE_SIZE - offset);
2079 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
2085 if (unlikely(fatal_signal_pending(current))) {
2090 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
2091 if (unlikely(ret < 0))
2096 if (unlikely(ret == 0)) {
2098 * If we were unable to copy any data at all, we must
2099 * fall back to a single segment length write.
2101 * If we didn't fallback here, we could livelock
2102 * because not all segments in the iov can be copied at
2103 * once without a pagefault.
2105 bytes = min_t(unsigned long, PAGE_SIZE - offset,
2106 iov_iter_single_seg_count(iter));
2113 balance_dirty_pages_ratelimited(mapping);
2114 } while (iov_iter_count(iter));
2116 bch2_pagecache_add_put(inode);
2118 return written ? written : ret;
2121 /* O_DIRECT reads */
2123 static void bio_check_or_release(struct bio *bio, bool check_dirty)
2126 bio_check_pages_dirty(bio);
2128 bio_release_pages(bio, false);
2133 static void bch2_dio_read_complete(struct closure *cl)
2135 struct dio_read *dio = container_of(cl, struct dio_read, cl);
2137 dio->req->ki_complete(dio->req, dio->ret);
2138 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
2141 static void bch2_direct_IO_read_endio(struct bio *bio)
2143 struct dio_read *dio = bio->bi_private;
2146 dio->ret = blk_status_to_errno(bio->bi_status);
2148 closure_put(&dio->cl);
2151 static void bch2_direct_IO_read_split_endio(struct bio *bio)
2153 struct dio_read *dio = bio->bi_private;
2154 bool should_dirty = dio->should_dirty;
2156 bch2_direct_IO_read_endio(bio);
2157 bio_check_or_release(bio, should_dirty);
2160 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
2162 struct file *file = req->ki_filp;
2163 struct bch_inode_info *inode = file_bch_inode(file);
2164 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2165 struct bch_io_opts opts;
2166 struct dio_read *dio;
2168 loff_t offset = req->ki_pos;
2169 bool sync = is_sync_kiocb(req);
2173 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
2175 if ((offset|iter->count) & (block_bytes(c) - 1))
2178 ret = min_t(loff_t, iter->count,
2179 max_t(loff_t, 0, i_size_read(&inode->v) - offset));
2184 shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
2185 iter->count -= shorten;
2187 bio = bio_alloc_bioset(NULL,
2188 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2191 &c->dio_read_bioset);
2193 bio->bi_end_io = bch2_direct_IO_read_endio;
2195 dio = container_of(bio, struct dio_read, rbio.bio);
2196 closure_init(&dio->cl, NULL);
2199 * this is a _really_ horrible hack just to avoid an atomic sub at the
2203 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
2204 atomic_set(&dio->cl.remaining,
2205 CLOSURE_REMAINING_INITIALIZER -
2207 CLOSURE_DESTRUCTOR);
2209 atomic_set(&dio->cl.remaining,
2210 CLOSURE_REMAINING_INITIALIZER + 1);
2216 * This is one of the sketchier things I've encountered: we have to skip
2217 * the dirtying of requests that are internal from the kernel (i.e. from
2218 * loopback), because we'll deadlock on page_lock.
2220 dio->should_dirty = iter_is_iovec(iter);
2223 while (iter->count) {
2224 bio = bio_alloc_bioset(NULL,
2225 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2229 bio->bi_end_io = bch2_direct_IO_read_split_endio;
2231 bio->bi_opf = REQ_OP_READ|REQ_SYNC;
2232 bio->bi_iter.bi_sector = offset >> 9;
2233 bio->bi_private = dio;
2235 ret = bio_iov_iter_get_pages(bio, iter);
2237 /* XXX: fault inject this path */
2238 bio->bi_status = BLK_STS_RESOURCE;
2243 offset += bio->bi_iter.bi_size;
2245 if (dio->should_dirty)
2246 bio_set_pages_dirty(bio);
2249 closure_get(&dio->cl);
2251 bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
2254 iter->count += shorten;
2257 closure_sync(&dio->cl);
2258 closure_debug_destroy(&dio->cl);
2260 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
2263 return -EIOCBQUEUED;
2267 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2269 struct file *file = iocb->ki_filp;
2270 struct bch_inode_info *inode = file_bch_inode(file);
2271 struct address_space *mapping = file->f_mapping;
2272 size_t count = iov_iter_count(iter);
2276 return 0; /* skip atime */
2278 if (iocb->ki_flags & IOCB_DIRECT) {
2279 struct blk_plug plug;
2281 if (unlikely(mapping->nrpages)) {
2282 ret = filemap_write_and_wait_range(mapping,
2284 iocb->ki_pos + count - 1);
2289 file_accessed(file);
2291 blk_start_plug(&plug);
2292 ret = bch2_direct_IO_read(iocb, iter);
2293 blk_finish_plug(&plug);
2296 iocb->ki_pos += ret;
2298 bch2_pagecache_add_get(inode);
2299 ret = generic_file_read_iter(iocb, iter);
2300 bch2_pagecache_add_put(inode);
2303 return bch2_err_class(ret);
2306 /* O_DIRECT writes */
2308 static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
2309 u64 offset, u64 size,
2310 unsigned nr_replicas, bool compressed)
2312 struct btree_trans trans;
2313 struct btree_iter iter;
2315 u64 end = offset + size;
2320 bch2_trans_init(&trans, c, 0, 0);
2322 bch2_trans_begin(&trans);
2324 err = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
2328 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
2329 SPOS(inum.inum, offset, snapshot),
2330 BTREE_ITER_SLOTS, k, err) {
2331 if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
2334 if (k.k->p.snapshot != snapshot ||
2335 nr_replicas > bch2_bkey_replicas(c, k) ||
2336 (!compressed && bch2_bkey_sectors_compressed(k))) {
2342 offset = iter.pos.offset;
2343 bch2_trans_iter_exit(&trans, &iter);
2345 if (bch2_err_matches(err, BCH_ERR_transaction_restart))
2347 bch2_trans_exit(&trans);
2349 return err ? false : ret;
2352 static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
2354 struct bch_fs *c = dio->op.c;
2355 struct bch_inode_info *inode = dio->inode;
2356 struct bio *bio = &dio->op.wbio.bio;
2358 return bch2_check_range_allocated(c, inode_inum(inode),
2359 dio->op.pos.offset, bio_sectors(bio),
2360 dio->op.opts.data_replicas,
2361 dio->op.opts.compression != 0);
2364 static void bch2_dio_write_loop_async(struct bch_write_op *);
2365 static __always_inline long bch2_dio_write_done(struct dio_write *dio);
2368 * We're going to return -EIOCBQUEUED, but we haven't finished consuming the
2369 * iov_iter yet, so we need to stash a copy of the iovec: it might be on the
2370 * caller's stack, we're not guaranteed that it will live for the duration of
2373 static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
2375 struct iovec *iov = dio->inline_vecs;
2378 * iov_iter has a single embedded iovec - nothing to do:
2380 if (iter_is_ubuf(&dio->iter))
2384 * We don't currently handle non-iovec iov_iters here - return an error,
2385 * and we'll fall back to doing the IO synchronously:
2387 if (!iter_is_iovec(&dio->iter))
2390 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
2391 iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
2396 dio->free_iov = true;
2399 memcpy(iov, dio->iter.__iov, dio->iter.nr_segs * sizeof(*iov));
2400 dio->iter.__iov = iov;
2404 static void bch2_dio_write_flush_done(struct closure *cl)
2406 struct dio_write *dio = container_of(cl, struct dio_write, op.cl);
2407 struct bch_fs *c = dio->op.c;
2409 closure_debug_destroy(cl);
2411 dio->op.error = bch2_journal_error(&c->journal);
2413 bch2_dio_write_done(dio);
2416 static noinline void bch2_dio_write_flush(struct dio_write *dio)
2418 struct bch_fs *c = dio->op.c;
2419 struct bch_inode_unpacked inode;
2424 closure_init(&dio->op.cl, NULL);
2426 if (!dio->op.error) {
2427 ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
2429 dio->op.error = ret;
2431 bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq, &dio->op.cl);
2432 bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
2437 closure_sync(&dio->op.cl);
2438 closure_debug_destroy(&dio->op.cl);
2440 continue_at(&dio->op.cl, bch2_dio_write_flush_done, NULL);
2444 static __always_inline long bch2_dio_write_done(struct dio_write *dio)
2446 struct kiocb *req = dio->req;
2447 struct bch_inode_info *inode = dio->inode;
2448 bool sync = dio->sync;
2451 if (unlikely(dio->flush)) {
2452 bch2_dio_write_flush(dio);
2454 return -EIOCBQUEUED;
2457 bch2_pagecache_block_put(inode);
2460 kfree(dio->iter.__iov);
2462 ret = dio->op.error ?: ((long) dio->written << 9);
2463 bio_put(&dio->op.wbio.bio);
2465 /* inode->i_dio_count is our ref on inode and thus bch_fs */
2466 inode_dio_end(&inode->v);
2469 ret = bch2_err_class(ret);
2472 req->ki_complete(req, ret);
2478 static __always_inline void bch2_dio_write_end(struct dio_write *dio)
2480 struct bch_fs *c = dio->op.c;
2481 struct kiocb *req = dio->req;
2482 struct bch_inode_info *inode = dio->inode;
2483 struct bio *bio = &dio->op.wbio.bio;
2485 req->ki_pos += (u64) dio->op.written << 9;
2486 dio->written += dio->op.written;
2488 if (dio->extending) {
2489 spin_lock(&inode->v.i_lock);
2490 if (req->ki_pos > inode->v.i_size)
2491 i_size_write(&inode->v, req->ki_pos);
2492 spin_unlock(&inode->v.i_lock);
2495 if (dio->op.i_sectors_delta || dio->quota_res.sectors) {
2496 mutex_lock(&inode->ei_quota_lock);
2497 __i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta);
2498 __bch2_quota_reservation_put(c, inode, &dio->quota_res);
2499 mutex_unlock(&inode->ei_quota_lock);
2502 bio_release_pages(bio, false);
2504 if (unlikely(dio->op.error))
2505 set_bit(EI_INODE_ERROR, &inode->ei_flags);
2508 static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
2510 struct bch_fs *c = dio->op.c;
2511 struct kiocb *req = dio->req;
2512 struct address_space *mapping = dio->mapping;
2513 struct bch_inode_info *inode = dio->inode;
2514 struct bch_io_opts opts;
2515 struct bio *bio = &dio->op.wbio.bio;
2516 unsigned unaligned, iter_count;
2517 bool sync = dio->sync, dropped_locks;
2520 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
2523 iter_count = dio->iter.count;
2525 EBUG_ON(current->faults_disabled_mapping);
2526 current->faults_disabled_mapping = mapping;
2528 ret = bio_iov_iter_get_pages(bio, &dio->iter);
2530 dropped_locks = fdm_dropped_locks();
2532 current->faults_disabled_mapping = NULL;
2535 * If the fault handler returned an error but also signalled
2536 * that it dropped & retook ei_pagecache_lock, we just need to
2537 * re-shoot down the page cache and retry:
2539 if (dropped_locks && ret)
2542 if (unlikely(ret < 0))
2545 if (unlikely(dropped_locks)) {
2546 ret = write_invalidate_inode_pages_range(mapping,
2548 req->ki_pos + iter_count - 1);
2552 if (!bio->bi_iter.bi_size)
2556 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
2557 bio->bi_iter.bi_size -= unaligned;
2558 iov_iter_revert(&dio->iter, unaligned);
2560 if (!bio->bi_iter.bi_size) {
2562 * bio_iov_iter_get_pages was only able to get <
2563 * blocksize worth of pages:
2569 bch2_write_op_init(&dio->op, c, opts);
2570 dio->op.end_io = sync
2572 : bch2_dio_write_loop_async;
2573 dio->op.target = dio->op.opts.foreground_target;
2574 dio->op.write_point = writepoint_hashed((unsigned long) current);
2575 dio->op.nr_replicas = dio->op.opts.data_replicas;
2576 dio->op.subvol = inode->ei_subvol;
2577 dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
2578 dio->op.devs_need_flush = &inode->ei_devs_need_flush;
2581 dio->op.flags |= BCH_WRITE_SYNC;
2582 dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
2584 ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
2585 bio_sectors(bio), true);
2589 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
2590 dio->op.opts.data_replicas, 0);
2591 if (unlikely(ret) &&
2592 !bch2_dio_write_check_allocated(dio))
2595 task_io_account_write(bio->bi_iter.bi_size);
2597 if (unlikely(dio->iter.count) &&
2600 bch2_dio_write_copy_iov(dio))
2601 dio->sync = sync = true;
2604 closure_call(&dio->op.cl, bch2_write, NULL, NULL);
2607 return -EIOCBQUEUED;
2609 bch2_dio_write_end(dio);
2611 if (likely(!dio->iter.count) || dio->op.error)
2614 bio_reset(bio, NULL, REQ_OP_WRITE);
2617 return bch2_dio_write_done(dio);
2619 dio->op.error = ret;
2621 bio_release_pages(bio, false);
2623 bch2_quota_reservation_put(c, inode, &dio->quota_res);
2627 static noinline __cold void bch2_dio_write_continue(struct dio_write *dio)
2629 struct mm_struct *mm = dio->mm;
2631 bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE);
2635 bch2_dio_write_loop(dio);
2637 kthread_unuse_mm(mm);
2640 static void bch2_dio_write_loop_async(struct bch_write_op *op)
2642 struct dio_write *dio = container_of(op, struct dio_write, op);
2644 bch2_dio_write_end(dio);
2646 if (likely(!dio->iter.count) || dio->op.error)
2647 bch2_dio_write_done(dio);
2649 bch2_dio_write_continue(dio);
2653 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
2655 struct file *file = req->ki_filp;
2656 struct address_space *mapping = file->f_mapping;
2657 struct bch_inode_info *inode = file_bch_inode(file);
2658 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2659 struct dio_write *dio;
2661 bool locked = true, extending;
2665 prefetch((void *) &c->opts + 64);
2666 prefetch(&inode->ei_inode);
2667 prefetch((void *) &inode->ei_inode + 64);
2669 inode_lock(&inode->v);
2671 ret = generic_write_checks(req, iter);
2672 if (unlikely(ret <= 0))
2675 ret = file_remove_privs(file);
2679 ret = file_update_time(file);
2683 if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
2686 inode_dio_begin(&inode->v);
2687 bch2_pagecache_block_get(inode);
2689 extending = req->ki_pos + iter->count > inode->v.i_size;
2691 inode_unlock(&inode->v);
2695 bio = bio_alloc_bioset(NULL,
2696 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2699 &c->dio_write_bioset);
2700 dio = container_of(bio, struct dio_write, op.wbio.bio);
2702 dio->mapping = mapping;
2704 dio->mm = current->mm;
2706 dio->extending = extending;
2707 dio->sync = is_sync_kiocb(req) || extending;
2708 dio->flush = iocb_is_dsync(req) && !c->opts.journal_flush_disabled;
2709 dio->free_iov = false;
2710 dio->quota_res.sectors = 0;
2715 if (unlikely(mapping->nrpages)) {
2716 ret = write_invalidate_inode_pages_range(mapping,
2718 req->ki_pos + iter->count - 1);
2723 ret = bch2_dio_write_loop(dio);
2726 inode_unlock(&inode->v);
2729 bch2_pagecache_block_put(inode);
2731 inode_dio_end(&inode->v);
2735 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2737 struct file *file = iocb->ki_filp;
2738 struct bch_inode_info *inode = file_bch_inode(file);
2741 if (iocb->ki_flags & IOCB_DIRECT) {
2742 ret = bch2_direct_write(iocb, from);
2746 /* We can write back this queue in page reclaim */
2747 current->backing_dev_info = inode_to_bdi(&inode->v);
2748 inode_lock(&inode->v);
2750 ret = generic_write_checks(iocb, from);
2754 ret = file_remove_privs(file);
2758 ret = file_update_time(file);
2762 ret = bch2_buffered_write(iocb, from);
2763 if (likely(ret > 0))
2764 iocb->ki_pos += ret;
2766 inode_unlock(&inode->v);
2767 current->backing_dev_info = NULL;
2770 ret = generic_write_sync(iocb, ret);
2772 return bch2_err_class(ret);
2778 * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
2779 * insert trigger: look up the btree inode instead
2781 static int bch2_flush_inode(struct bch_fs *c,
2782 struct bch_inode_info *inode)
2784 struct bch_inode_unpacked u;
2787 if (c->opts.journal_flush_disabled)
2790 ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u);
2794 return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
2795 bch2_inode_flush_nocow_writes(c, inode);
2798 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2800 struct bch_inode_info *inode = file_bch_inode(file);
2801 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2802 int ret, ret2, ret3;
2804 ret = file_write_and_wait_range(file, start, end);
2805 ret2 = sync_inode_metadata(&inode->v, 1);
2806 ret3 = bch2_flush_inode(c, inode);
2808 return bch2_err_class(ret ?: ret2 ?: ret3);
2813 static inline int range_has_data(struct bch_fs *c, u32 subvol,
2817 struct btree_trans trans;
2818 struct btree_iter iter;
2822 bch2_trans_init(&trans, c, 0, 0);
2824 bch2_trans_begin(&trans);
2826 ret = bch2_subvolume_get_snapshot(&trans, subvol, &start.snapshot);
2830 for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents, start, end, 0, k, ret)
2831 if (bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k)) {
2836 bch2_trans_iter_exit(&trans, &iter);
2838 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
2841 bch2_trans_exit(&trans);
2845 static int __bch2_truncate_folio(struct bch_inode_info *inode,
2846 pgoff_t index, loff_t start, loff_t end)
2848 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2849 struct address_space *mapping = inode->v.i_mapping;
2850 struct bch_folio *s;
2851 unsigned start_offset = start & (PAGE_SIZE - 1);
2852 unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2854 struct folio *folio;
2855 s64 i_sectors_delta = 0;
2859 folio = filemap_lock_folio(mapping, index);
2860 if (IS_ERR_OR_NULL(folio)) {
2862 * XXX: we're doing two index lookups when we end up reading the
2865 ret = range_has_data(c, inode->ei_subvol,
2866 POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT)),
2867 POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT) + PAGE_SECTORS));
2871 folio = __filemap_get_folio(mapping, index,
2872 FGP_LOCK|FGP_CREAT, GFP_KERNEL);
2873 if (unlikely(IS_ERR_OR_NULL(folio))) {
2879 BUG_ON(start >= folio_end_pos(folio));
2880 BUG_ON(end <= folio_pos(folio));
2882 start_offset = max(start, folio_pos(folio)) - folio_pos(folio);
2883 end_offset = min_t(u64, end, folio_end_pos(folio)) - folio_pos(folio);
2885 /* Folio boundary? Nothing to do */
2886 if (start_offset == 0 &&
2887 end_offset == folio_size(folio)) {
2892 s = bch2_folio_create(folio, 0);
2898 if (!folio_test_uptodate(folio)) {
2899 ret = bch2_read_single_folio(folio, mapping);
2904 ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
2908 for (i = round_up(start_offset, block_bytes(c)) >> 9;
2909 i < round_down(end_offset, block_bytes(c)) >> 9;
2911 s->s[i].nr_replicas = 0;
2913 i_sectors_delta -= s->s[i].state == SECTOR_dirty;
2914 folio_sector_set(folio, s, i, SECTOR_unallocated);
2917 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2920 * Caller needs to know whether this folio will be written out by
2921 * writeback - doing an i_size update if necessary - or whether it will
2922 * be responsible for the i_size update.
2924 * Note that we shouldn't ever see a folio beyond EOF, but check and
2925 * warn if so. This has been observed by failure to clean up folios
2926 * after a short write and there's still a chance reclaim will fix
2929 WARN_ON_ONCE(folio_pos(folio) >= inode->v.i_size);
2930 end_pos = folio_end_pos(folio);
2931 if (inode->v.i_size > folio_pos(folio))
2932 end_pos = min_t(u64, inode->v.i_size, end_pos);
2933 ret = s->s[folio_pos_to_s(folio, end_pos - 1)].state >= SECTOR_dirty;
2935 folio_zero_segment(folio, start_offset, end_offset);
2938 * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2940 * XXX: because we aren't currently tracking whether the folio has actual
2941 * data in it (vs. just 0s, or only partially written) this wrong. ick.
2943 BUG_ON(bch2_get_folio_disk_reservation(c, inode, folio, false));
2946 * This removes any writeable userspace mappings; we need to force
2947 * .page_mkwrite to be called again before any mmapped writes, to
2948 * redirty the full page:
2950 folio_mkclean(folio);
2951 filemap_dirty_folio(mapping, folio);
2953 folio_unlock(folio);
2959 static int bch2_truncate_folio(struct bch_inode_info *inode, loff_t from)
2961 return __bch2_truncate_folio(inode, from >> PAGE_SHIFT,
2962 from, ANYSINT_MAX(loff_t));
2965 static int bch2_truncate_folios(struct bch_inode_info *inode,
2966 loff_t start, loff_t end)
2968 int ret = __bch2_truncate_folio(inode, start >> PAGE_SHIFT,
2972 start >> PAGE_SHIFT != end >> PAGE_SHIFT)
2973 ret = __bch2_truncate_folio(inode,
2974 (end - 1) >> PAGE_SHIFT,
2979 static int bch2_extend(struct mnt_idmap *idmap,
2980 struct bch_inode_info *inode,
2981 struct bch_inode_unpacked *inode_u,
2982 struct iattr *iattr)
2984 struct address_space *mapping = inode->v.i_mapping;
2990 * this has to be done _before_ extending i_size:
2992 ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
2996 truncate_setsize(&inode->v, iattr->ia_size);
2998 return bch2_setattr_nonsize(idmap, inode, iattr);
3001 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
3002 struct bch_inode_unpacked *bi,
3005 bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
3009 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
3010 struct bch_inode_unpacked *bi, void *p)
3012 u64 *new_i_size = p;
3014 bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
3015 bi->bi_size = *new_i_size;
3019 int bch2_truncate(struct mnt_idmap *idmap,
3020 struct bch_inode_info *inode, struct iattr *iattr)
3022 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3023 struct address_space *mapping = inode->v.i_mapping;
3024 struct bch_inode_unpacked inode_u;
3025 u64 new_i_size = iattr->ia_size;
3026 s64 i_sectors_delta = 0;
3030 * If the truncate call with change the size of the file, the
3031 * cmtimes should be updated. If the size will not change, we
3032 * do not need to update the cmtimes.
3034 if (iattr->ia_size != inode->v.i_size) {
3035 if (!(iattr->ia_valid & ATTR_MTIME))
3036 ktime_get_coarse_real_ts64(&iattr->ia_mtime);
3037 if (!(iattr->ia_valid & ATTR_CTIME))
3038 ktime_get_coarse_real_ts64(&iattr->ia_ctime);
3039 iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
3042 inode_dio_wait(&inode->v);
3043 bch2_pagecache_block_get(inode);
3045 ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
3050 * check this before next assertion; on filesystem error our normal
3051 * invariants are a bit broken (truncate has to truncate the page cache
3052 * before the inode).
3054 ret = bch2_journal_error(&c->journal);
3058 WARN_ONCE(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
3059 inode->v.i_size < inode_u.bi_size,
3060 "truncate spotted in mem i_size < btree i_size: %llu < %llu\n",
3061 (u64) inode->v.i_size, inode_u.bi_size);
3063 if (iattr->ia_size > inode->v.i_size) {
3064 ret = bch2_extend(idmap, inode, &inode_u, iattr);
3068 iattr->ia_valid &= ~ATTR_SIZE;
3070 ret = bch2_truncate_folio(inode, iattr->ia_size);
3071 if (unlikely(ret < 0))
3075 * When extending, we're going to write the new i_size to disk
3076 * immediately so we need to flush anything above the current on disk
3079 * Also, when extending we need to flush the page that i_size currently
3080 * straddles - if it's mapped to userspace, we need to ensure that
3081 * userspace has to redirty it and call .mkwrite -> set_page_dirty
3082 * again to allocate the part of the page that was extended.
3084 if (iattr->ia_size > inode_u.bi_size)
3085 ret = filemap_write_and_wait_range(mapping,
3087 iattr->ia_size - 1);
3088 else if (iattr->ia_size & (PAGE_SIZE - 1))
3089 ret = filemap_write_and_wait_range(mapping,
3090 round_down(iattr->ia_size, PAGE_SIZE),
3091 iattr->ia_size - 1);
3095 mutex_lock(&inode->ei_update_lock);
3096 ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
3098 mutex_unlock(&inode->ei_update_lock);
3103 truncate_setsize(&inode->v, iattr->ia_size);
3105 ret = bch2_fpunch(c, inode_inum(inode),
3106 round_up(iattr->ia_size, block_bytes(c)) >> 9,
3107 U64_MAX, &i_sectors_delta);
3108 i_sectors_acct(c, inode, NULL, i_sectors_delta);
3110 bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks &&
3111 !bch2_journal_error(&c->journal), c,
3112 "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
3113 inode->v.i_ino, (u64) inode->v.i_blocks,
3114 inode->ei_inode.bi_sectors);
3118 mutex_lock(&inode->ei_update_lock);
3119 ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
3120 mutex_unlock(&inode->ei_update_lock);
3122 ret = bch2_setattr_nonsize(idmap, inode, iattr);
3124 bch2_pagecache_block_put(inode);
3125 return bch2_err_class(ret);
3130 static int inode_update_times_fn(struct bch_inode_info *inode,
3131 struct bch_inode_unpacked *bi, void *p)
3133 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3135 bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
3139 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
3141 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3142 u64 end = offset + len;
3143 u64 block_start = round_up(offset, block_bytes(c));
3144 u64 block_end = round_down(end, block_bytes(c));
3145 bool truncated_last_page;
3148 ret = bch2_truncate_folios(inode, offset, end);
3149 if (unlikely(ret < 0))
3152 truncated_last_page = ret;
3154 truncate_pagecache_range(&inode->v, offset, end - 1);
3156 if (block_start < block_end) {
3157 s64 i_sectors_delta = 0;
3159 ret = bch2_fpunch(c, inode_inum(inode),
3160 block_start >> 9, block_end >> 9,
3162 i_sectors_acct(c, inode, NULL, i_sectors_delta);
3165 mutex_lock(&inode->ei_update_lock);
3166 if (end >= inode->v.i_size && !truncated_last_page) {
3167 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
3168 ATTR_MTIME|ATTR_CTIME);
3170 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
3171 ATTR_MTIME|ATTR_CTIME);
3173 mutex_unlock(&inode->ei_update_lock);
3178 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
3179 loff_t offset, loff_t len,
3182 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3183 struct address_space *mapping = inode->v.i_mapping;
3184 struct bkey_buf copy;
3185 struct btree_trans trans;
3186 struct btree_iter src, dst, del;
3187 loff_t shift, new_size;
3191 if ((offset | len) & (block_bytes(c) - 1))
3195 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
3198 if (offset >= inode->v.i_size)
3201 src_start = U64_MAX;
3204 if (offset + len >= inode->v.i_size)
3207 src_start = offset + len;
3211 new_size = inode->v.i_size + shift;
3213 ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
3218 i_size_write(&inode->v, new_size);
3219 mutex_lock(&inode->ei_update_lock);
3220 ret = bch2_write_inode_size(c, inode, new_size,
3221 ATTR_MTIME|ATTR_CTIME);
3222 mutex_unlock(&inode->ei_update_lock);
3224 s64 i_sectors_delta = 0;
3226 ret = bch2_fpunch(c, inode_inum(inode),
3227 offset >> 9, (offset + len) >> 9,
3229 i_sectors_acct(c, inode, NULL, i_sectors_delta);
3235 bch2_bkey_buf_init(©);
3236 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
3237 bch2_trans_iter_init(&trans, &src, BTREE_ID_extents,
3238 POS(inode->v.i_ino, src_start >> 9),
3240 bch2_trans_copy_iter(&dst, &src);
3241 bch2_trans_copy_iter(&del, &src);
3244 bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
3245 struct disk_reservation disk_res =
3246 bch2_disk_reservation_init(c, 0);
3247 struct bkey_i delete;
3249 struct bpos next_pos;
3250 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
3251 struct bpos atomic_end;
3252 unsigned trigger_flags = 0;
3255 bch2_trans_begin(&trans);
3257 ret = bch2_subvolume_get_snapshot(&trans,
3258 inode->ei_subvol, &snapshot);
3262 bch2_btree_iter_set_snapshot(&src, snapshot);
3263 bch2_btree_iter_set_snapshot(&dst, snapshot);
3264 bch2_btree_iter_set_snapshot(&del, snapshot);
3266 bch2_trans_begin(&trans);
3269 ? bch2_btree_iter_peek_prev(&src)
3270 : bch2_btree_iter_peek_upto(&src, POS(inode->v.i_ino, U64_MAX));
3271 if ((ret = bkey_err(k)))
3274 if (!k.k || k.k->p.inode != inode->v.i_ino)
3278 bkey_le(k.k->p, POS(inode->v.i_ino, offset >> 9)))
3281 bch2_bkey_buf_reassemble(©, c, k);
3284 bkey_lt(bkey_start_pos(k.k), move_pos))
3285 bch2_cut_front(move_pos, copy.k);
3287 copy.k->k.p.offset += shift >> 9;
3288 bch2_btree_iter_set_pos(&dst, bkey_start_pos(©.k->k));
3290 ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end);
3294 if (!bkey_eq(atomic_end, copy.k->k.p)) {
3296 move_pos = atomic_end;
3297 move_pos.offset -= shift >> 9;
3300 bch2_cut_back(atomic_end, copy.k);
3304 bkey_init(&delete.k);
3305 delete.k.p = copy.k->k.p;
3306 delete.k.size = copy.k->k.size;
3307 delete.k.p.offset -= shift >> 9;
3308 bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k));
3310 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
3312 if (copy.k->k.size != k.k->size) {
3313 /* We might end up splitting compressed extents: */
3315 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
3317 ret = bch2_disk_reservation_get(c, &disk_res,
3318 copy.k->k.size, nr_ptrs,
3319 BCH_DISK_RESERVATION_NOFAIL);
3323 ret = bch2_btree_iter_traverse(&del) ?:
3324 bch2_trans_update(&trans, &del, &delete, trigger_flags) ?:
3325 bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?:
3326 bch2_trans_commit(&trans, &disk_res, NULL,
3327 BTREE_INSERT_NOFAIL);
3328 bch2_disk_reservation_put(c, &disk_res);
3331 bch2_btree_iter_set_pos(&src, next_pos);
3333 bch2_trans_iter_exit(&trans, &del);
3334 bch2_trans_iter_exit(&trans, &dst);
3335 bch2_trans_iter_exit(&trans, &src);
3336 bch2_trans_exit(&trans);
3337 bch2_bkey_buf_exit(©, c);
3342 mutex_lock(&inode->ei_update_lock);
3344 i_size_write(&inode->v, new_size);
3345 ret = bch2_write_inode_size(c, inode, new_size,
3346 ATTR_MTIME|ATTR_CTIME);
3348 /* We need an inode update to update bi_journal_seq for fsync: */
3349 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
3350 ATTR_MTIME|ATTR_CTIME);
3352 mutex_unlock(&inode->ei_update_lock);
3356 static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
3357 u64 start_sector, u64 end_sector)
3359 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3360 struct btree_trans trans;
3361 struct btree_iter iter;
3362 struct bpos end_pos = POS(inode->v.i_ino, end_sector);
3363 struct bch_io_opts opts;
3366 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
3367 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
3369 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
3370 POS(inode->v.i_ino, start_sector),
3371 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
3373 while (!ret && bkey_lt(iter.pos, end_pos)) {
3374 s64 i_sectors_delta = 0;
3375 struct quota_res quota_res = { 0 };
3379 u64 hole_start, hole_end;
3382 bch2_trans_begin(&trans);
3384 ret = bch2_subvolume_get_snapshot(&trans,
3385 inode->ei_subvol, &snapshot);
3389 bch2_btree_iter_set_snapshot(&iter, snapshot);
3391 k = bch2_btree_iter_peek_slot(&iter);
3392 if ((ret = bkey_err(k)))
3395 hole_start = iter.pos.offset;
3396 hole_end = bpos_min(k.k->p, end_pos).offset;
3397 is_allocation = bkey_extent_is_allocation(k.k);
3399 /* already reserved */
3400 if (bkey_extent_is_reservation(k) &&
3401 bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
3402 bch2_btree_iter_advance(&iter);
3406 if (bkey_extent_is_data(k.k) &&
3407 !(mode & FALLOC_FL_ZERO_RANGE)) {
3408 bch2_btree_iter_advance(&iter);
3412 if (!(mode & FALLOC_FL_ZERO_RANGE)) {
3413 ret = drop_locks_do(&trans,
3414 (bch2_clamp_data_hole(&inode->v,
3417 opts.data_replicas), 0));
3418 bch2_btree_iter_set_pos(&iter, POS(iter.pos.inode, hole_start));
3423 if (hole_start == hole_end)
3427 sectors = hole_end - hole_start;
3429 if (!is_allocation) {
3430 ret = bch2_quota_reservation_add(c, inode,
3431 "a_res, sectors, true);
3436 ret = bch2_extent_fallocate(&trans, inode_inum(inode), &iter,
3437 sectors, opts, &i_sectors_delta,
3438 writepoint_hashed((unsigned long) current));
3442 i_sectors_acct(c, inode, "a_res, i_sectors_delta);
3444 drop_locks_do(&trans,
3445 (mark_pagecache_reserved(inode, hole_start, iter.pos.offset), 0));
3447 bch2_quota_reservation_put(c, inode, "a_res);
3448 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3452 if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
3453 struct quota_res quota_res = { 0 };
3454 s64 i_sectors_delta = 0;
3456 bch2_fpunch_at(&trans, &iter, inode_inum(inode),
3457 end_sector, &i_sectors_delta);
3458 i_sectors_acct(c, inode, "a_res, i_sectors_delta);
3459 bch2_quota_reservation_put(c, inode, "a_res);
3462 bch2_trans_iter_exit(&trans, &iter);
3463 bch2_trans_exit(&trans);
3467 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
3468 loff_t offset, loff_t len)
3470 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3471 u64 end = offset + len;
3472 u64 block_start = round_down(offset, block_bytes(c));
3473 u64 block_end = round_up(end, block_bytes(c));
3474 bool truncated_last_page = false;
3477 if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
3478 ret = inode_newsize_ok(&inode->v, end);
3483 if (mode & FALLOC_FL_ZERO_RANGE) {
3484 ret = bch2_truncate_folios(inode, offset, end);
3485 if (unlikely(ret < 0))
3488 truncated_last_page = ret;
3490 truncate_pagecache_range(&inode->v, offset, end - 1);
3492 block_start = round_up(offset, block_bytes(c));
3493 block_end = round_down(end, block_bytes(c));
3496 ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
3499 * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
3500 * so that the VFS cache i_size is consistent with the btree i_size:
3503 !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
3506 if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
3507 end = inode->v.i_size;
3509 if (end >= inode->v.i_size &&
3510 (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
3511 !(mode & FALLOC_FL_KEEP_SIZE))) {
3512 spin_lock(&inode->v.i_lock);
3513 i_size_write(&inode->v, end);
3514 spin_unlock(&inode->v.i_lock);
3516 mutex_lock(&inode->ei_update_lock);
3517 ret2 = bch2_write_inode_size(c, inode, end, 0);
3518 mutex_unlock(&inode->ei_update_lock);
3524 long bch2_fallocate_dispatch(struct file *file, int mode,
3525 loff_t offset, loff_t len)
3527 struct bch_inode_info *inode = file_bch_inode(file);
3528 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3531 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fallocate))
3534 inode_lock(&inode->v);
3535 inode_dio_wait(&inode->v);
3536 bch2_pagecache_block_get(inode);
3538 ret = file_modified(file);
3542 if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
3543 ret = bchfs_fallocate(inode, mode, offset, len);
3544 else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
3545 ret = bchfs_fpunch(inode, offset, len);
3546 else if (mode == FALLOC_FL_INSERT_RANGE)
3547 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
3548 else if (mode == FALLOC_FL_COLLAPSE_RANGE)
3549 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
3553 bch2_pagecache_block_put(inode);
3554 inode_unlock(&inode->v);
3555 bch2_write_ref_put(c, BCH_WRITE_REF_fallocate);
3557 return bch2_err_class(ret);
3561 * Take a quota reservation for unallocated blocks in a given file range
3562 * Does not check pagecache
3564 static int quota_reserve_range(struct bch_inode_info *inode,
3565 struct quota_res *res,
3568 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3569 struct btree_trans trans;
3570 struct btree_iter iter;
3573 u64 sectors = end - start;
3577 bch2_trans_init(&trans, c, 0, 0);
3579 bch2_trans_begin(&trans);
3581 ret = bch2_subvolume_get_snapshot(&trans, inode->ei_subvol, &snapshot);
3585 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
3586 SPOS(inode->v.i_ino, pos, snapshot), 0);
3588 while (!(ret = btree_trans_too_many_iters(&trans)) &&
3589 (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
3590 !(ret = bkey_err(k))) {
3591 if (bkey_extent_is_allocation(k.k)) {
3592 u64 s = min(end, k.k->p.offset) -
3593 max(start, bkey_start_offset(k.k));
3594 BUG_ON(s > sectors);
3597 bch2_btree_iter_advance(&iter);
3599 pos = iter.pos.offset;
3600 bch2_trans_iter_exit(&trans, &iter);
3602 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3605 bch2_trans_exit(&trans);
3610 return bch2_quota_reservation_add(c, inode, res, sectors, true);
3613 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
3614 struct file *file_dst, loff_t pos_dst,
3615 loff_t len, unsigned remap_flags)
3617 struct bch_inode_info *src = file_bch_inode(file_src);
3618 struct bch_inode_info *dst = file_bch_inode(file_dst);
3619 struct bch_fs *c = src->v.i_sb->s_fs_info;
3620 struct quota_res quota_res = { 0 };
3621 s64 i_sectors_delta = 0;
3625 if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
3628 if (remap_flags & REMAP_FILE_DEDUP)
3631 if ((pos_src & (block_bytes(c) - 1)) ||
3632 (pos_dst & (block_bytes(c) - 1)))
3636 abs(pos_src - pos_dst) < len)
3639 bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3641 inode_dio_wait(&src->v);
3642 inode_dio_wait(&dst->v);
3644 ret = generic_remap_file_range_prep(file_src, pos_src,
3647 if (ret < 0 || len == 0)
3650 aligned_len = round_up((u64) len, block_bytes(c));
3652 ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
3653 pos_dst, pos_dst + len - 1);
3657 ret = quota_reserve_range(dst, "a_res, pos_dst >> 9,
3658 (pos_dst + aligned_len) >> 9);
3662 file_update_time(file_dst);
3664 mark_pagecache_unallocated(src, pos_src >> 9,
3665 (pos_src + aligned_len) >> 9);
3667 ret = bch2_remap_range(c,
3668 inode_inum(dst), pos_dst >> 9,
3669 inode_inum(src), pos_src >> 9,
3671 pos_dst + len, &i_sectors_delta);
3676 * due to alignment, we might have remapped slightly more than requsted
3678 ret = min((u64) ret << 9, (u64) len);
3680 i_sectors_acct(c, dst, "a_res, i_sectors_delta);
3682 spin_lock(&dst->v.i_lock);
3683 if (pos_dst + ret > dst->v.i_size)
3684 i_size_write(&dst->v, pos_dst + ret);
3685 spin_unlock(&dst->v.i_lock);
3687 if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
3688 IS_SYNC(file_inode(file_dst)))
3689 ret = bch2_flush_inode(c, dst);
3691 bch2_quota_reservation_put(c, dst, "a_res);
3692 bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3694 return bch2_err_class(ret);
3699 static int folio_data_offset(struct folio *folio, loff_t pos,
3700 unsigned min_replicas)
3702 struct bch_folio *s = bch2_folio(folio);
3703 unsigned i, sectors = folio_sectors(folio);
3706 for (i = folio_pos_to_s(folio, pos); i < sectors; i++)
3707 if (s->s[i].state >= SECTOR_dirty &&
3708 s->s[i].nr_replicas + s->s[i].replicas_reserved >= min_replicas)
3709 return i << SECTOR_SHIFT;
3714 static loff_t bch2_seek_pagecache_data(struct inode *vinode,
3715 loff_t start_offset,
3717 unsigned min_replicas)
3719 struct folio_batch fbatch;
3720 pgoff_t start_index = start_offset >> PAGE_SHIFT;
3721 pgoff_t end_index = end_offset >> PAGE_SHIFT;
3722 pgoff_t index = start_index;
3727 folio_batch_init(&fbatch);
3729 while (filemap_get_folios(vinode->i_mapping,
3730 &index, end_index, &fbatch)) {
3731 for (i = 0; i < folio_batch_count(&fbatch); i++) {
3732 struct folio *folio = fbatch.folios[i];
3735 offset = folio_data_offset(folio,
3736 max(folio_pos(folio), start_offset),
3739 ret = clamp(folio_pos(folio) + offset,
3740 start_offset, end_offset);
3741 folio_unlock(folio);
3742 folio_batch_release(&fbatch);
3745 folio_unlock(folio);
3747 folio_batch_release(&fbatch);
3754 static loff_t bch2_seek_data(struct file *file, u64 offset)
3756 struct bch_inode_info *inode = file_bch_inode(file);
3757 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3758 struct btree_trans trans;
3759 struct btree_iter iter;
3761 subvol_inum inum = inode_inum(inode);
3762 u64 isize, next_data = MAX_LFS_FILESIZE;
3766 isize = i_size_read(&inode->v);
3767 if (offset >= isize)
3770 bch2_trans_init(&trans, c, 0, 0);
3772 bch2_trans_begin(&trans);
3774 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3778 for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents,
3779 SPOS(inode->v.i_ino, offset >> 9, snapshot),
3780 POS(inode->v.i_ino, U64_MAX),
3782 if (bkey_extent_is_data(k.k)) {
3783 next_data = max(offset, bkey_start_offset(k.k) << 9);
3785 } else if (k.k->p.offset >> 9 > isize)
3788 bch2_trans_iter_exit(&trans, &iter);
3790 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3793 bch2_trans_exit(&trans);
3797 if (next_data > offset)
3798 next_data = bch2_seek_pagecache_data(&inode->v,
3799 offset, next_data, 0);
3801 if (next_data >= isize)
3804 return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
3807 static bool folio_hole_offset(struct address_space *mapping, loff_t *offset,
3808 unsigned min_replicas)
3810 struct folio *folio;
3811 struct bch_folio *s;
3812 unsigned i, sectors;
3815 folio = filemap_lock_folio(mapping, *offset >> PAGE_SHIFT);
3816 if (IS_ERR_OR_NULL(folio))
3819 s = bch2_folio(folio);
3823 sectors = folio_sectors(folio);
3824 for (i = folio_pos_to_s(folio, *offset); i < sectors; i++)
3825 if (s->s[i].state < SECTOR_dirty ||
3826 s->s[i].nr_replicas + s->s[i].replicas_reserved < min_replicas) {
3827 *offset = max(*offset,
3828 folio_pos(folio) + (i << SECTOR_SHIFT));
3832 *offset = folio_end_pos(folio);
3835 folio_unlock(folio);
3839 static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
3840 loff_t start_offset,
3842 unsigned min_replicas)
3844 struct address_space *mapping = vinode->i_mapping;
3845 loff_t offset = start_offset;
3847 while (offset < end_offset &&
3848 !folio_hole_offset(mapping, &offset, min_replicas))
3851 return min(offset, end_offset);
3854 static void bch2_clamp_data_hole(struct inode *inode,
3857 unsigned min_replicas)
3859 *hole_start = bch2_seek_pagecache_hole(inode,
3860 *hole_start << 9, *hole_end << 9, min_replicas) >> 9;
3862 if (*hole_start == *hole_end)
3865 *hole_end = bch2_seek_pagecache_data(inode,
3866 *hole_start << 9, *hole_end << 9, min_replicas) >> 9;
3869 static loff_t bch2_seek_hole(struct file *file, u64 offset)
3871 struct bch_inode_info *inode = file_bch_inode(file);
3872 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3873 struct btree_trans trans;
3874 struct btree_iter iter;
3876 subvol_inum inum = inode_inum(inode);
3877 u64 isize, next_hole = MAX_LFS_FILESIZE;
3881 isize = i_size_read(&inode->v);
3882 if (offset >= isize)
3885 bch2_trans_init(&trans, c, 0, 0);
3887 bch2_trans_begin(&trans);
3889 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3893 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
3894 SPOS(inode->v.i_ino, offset >> 9, snapshot),
3895 BTREE_ITER_SLOTS, k, ret) {
3896 if (k.k->p.inode != inode->v.i_ino) {
3897 next_hole = bch2_seek_pagecache_hole(&inode->v,
3898 offset, MAX_LFS_FILESIZE, 0);
3900 } else if (!bkey_extent_is_data(k.k)) {
3901 next_hole = bch2_seek_pagecache_hole(&inode->v,
3902 max(offset, bkey_start_offset(k.k) << 9),
3903 k.k->p.offset << 9, 0);
3905 if (next_hole < k.k->p.offset << 9)
3908 offset = max(offset, bkey_start_offset(k.k) << 9);
3911 bch2_trans_iter_exit(&trans, &iter);
3913 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3916 bch2_trans_exit(&trans);
3920 if (next_hole > isize)
3923 return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
3926 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
3934 ret = generic_file_llseek(file, offset, whence);
3937 ret = bch2_seek_data(file, offset);
3940 ret = bch2_seek_hole(file, offset);
3947 return bch2_err_class(ret);
3950 void bch2_fs_fsio_exit(struct bch_fs *c)
3952 bioset_exit(&c->nocow_flush_bioset);
3953 bioset_exit(&c->dio_write_bioset);
3954 bioset_exit(&c->dio_read_bioset);
3955 bioset_exit(&c->writepage_bioset);
3958 int bch2_fs_fsio_init(struct bch_fs *c)
3960 if (bioset_init(&c->writepage_bioset,
3961 4, offsetof(struct bch_writepage_io, op.wbio.bio),
3963 return -BCH_ERR_ENOMEM_writepage_bioset_init;
3965 if (bioset_init(&c->dio_read_bioset,
3966 4, offsetof(struct dio_read, rbio.bio),
3968 return -BCH_ERR_ENOMEM_dio_read_bioset_init;
3970 if (bioset_init(&c->dio_write_bioset,
3971 4, offsetof(struct dio_write, op.wbio.bio),
3973 return -BCH_ERR_ENOMEM_dio_write_bioset_init;
3975 if (bioset_init(&c->nocow_flush_bioset,
3976 1, offsetof(struct nocow_flush, bio), 0))
3977 return -BCH_ERR_ENOMEM_nocow_flush_bioset_init;
3982 #endif /* NO_BCACHEFS_FS */