1 // SPDX-License-Identifier: GPL-2.0
5 #include "alloc_foreground.h"
7 #include "btree_update.h"
12 #include "extent_update.h"
23 #include <linux/aio.h>
24 #include <linux/backing-dev.h>
25 #include <linux/falloc.h>
26 #include <linux/migrate.h>
27 #include <linux/mmu_context.h>
28 #include <linux/pagevec.h>
29 #include <linux/rmap.h>
30 #include <linux/sched/signal.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/uio.h>
33 #include <linux/writeback.h>
35 #include <trace/events/bcachefs.h>
36 #include <trace/events/writeback.h>
44 static void nocow_flush_endio(struct bio *_bio)
47 struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
50 percpu_ref_put(&bio->ca->io_ref);
54 static void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
55 struct bch_inode_info *inode,
58 struct nocow_flush *bio;
60 struct bch_devs_mask devs;
63 dev = find_first_bit(inode->ei_devs_need_flush.d, BCH_SB_MEMBERS_MAX);
64 if (dev == BCH_SB_MEMBERS_MAX)
67 devs = inode->ei_devs_need_flush;
68 memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
70 for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
72 ca = rcu_dereference(c->devs[dev]);
73 if (ca && !percpu_ref_tryget(&ca->io_ref))
80 bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
83 &c->nocow_flush_bioset),
84 struct nocow_flush, bio);
87 bio->bio.bi_end_io = nocow_flush_endio;
88 closure_bio_submit(&bio->bio, cl);
92 static int bch2_inode_flush_nocow_writes(struct bch_fs *c,
93 struct bch_inode_info *inode)
97 closure_init_stack(&cl);
98 bch2_inode_flush_nocow_writes_async(c, inode, &cl);
104 static inline bool bio_full(struct bio *bio, unsigned len)
106 if (bio->bi_vcnt >= bio->bi_max_vecs)
108 if (bio->bi_iter.bi_size > UINT_MAX - len)
113 static inline struct address_space *faults_disabled_mapping(void)
115 return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
118 static inline void set_fdm_dropped_locks(void)
120 current->faults_disabled_mapping =
121 (void *) (((unsigned long) current->faults_disabled_mapping)|1);
124 static inline bool fdm_dropped_locks(void)
126 return ((unsigned long) current->faults_disabled_mapping) & 1;
133 struct bch_writepage_io {
134 struct bch_inode_info *inode;
137 struct bch_write_op op;
142 struct address_space *mapping;
143 struct bch_inode_info *inode;
144 struct mm_struct *mm;
150 struct quota_res quota_res;
153 struct iov_iter iter;
154 struct iovec inline_vecs[2];
157 struct bch_write_op op;
165 struct bch_read_bio rbio;
168 /* pagecache_block must be held */
169 static noinline int write_invalidate_inode_pages_range(struct address_space *mapping,
170 loff_t start, loff_t end)
175 * XXX: the way this is currently implemented, we can spin if a process
176 * is continually redirtying a specific page
179 if (!mapping->nrpages)
182 ret = filemap_write_and_wait_range(mapping, start, end);
186 if (!mapping->nrpages)
189 ret = invalidate_inode_pages2_range(mapping,
192 } while (ret == -EBUSY);
199 #ifdef CONFIG_BCACHEFS_QUOTA
201 static void __bch2_quota_reservation_put(struct bch_fs *c,
202 struct bch_inode_info *inode,
203 struct quota_res *res)
205 BUG_ON(res->sectors > inode->ei_quota_reserved);
207 bch2_quota_acct(c, inode->ei_qid, Q_SPC,
208 -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
209 inode->ei_quota_reserved -= res->sectors;
213 static void bch2_quota_reservation_put(struct bch_fs *c,
214 struct bch_inode_info *inode,
215 struct quota_res *res)
218 mutex_lock(&inode->ei_quota_lock);
219 __bch2_quota_reservation_put(c, inode, res);
220 mutex_unlock(&inode->ei_quota_lock);
224 static int bch2_quota_reservation_add(struct bch_fs *c,
225 struct bch_inode_info *inode,
226 struct quota_res *res,
232 mutex_lock(&inode->ei_quota_lock);
233 ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
234 check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
236 inode->ei_quota_reserved += sectors;
237 res->sectors += sectors;
239 mutex_unlock(&inode->ei_quota_lock);
246 static void __bch2_quota_reservation_put(struct bch_fs *c,
247 struct bch_inode_info *inode,
248 struct quota_res *res) {}
250 static void bch2_quota_reservation_put(struct bch_fs *c,
251 struct bch_inode_info *inode,
252 struct quota_res *res) {}
254 static int bch2_quota_reservation_add(struct bch_fs *c,
255 struct bch_inode_info *inode,
256 struct quota_res *res,
265 /* i_size updates: */
267 struct inode_new_size {
273 static int inode_set_size(struct bch_inode_info *inode,
274 struct bch_inode_unpacked *bi,
277 struct inode_new_size *s = p;
279 bi->bi_size = s->new_size;
280 if (s->fields & ATTR_ATIME)
281 bi->bi_atime = s->now;
282 if (s->fields & ATTR_MTIME)
283 bi->bi_mtime = s->now;
284 if (s->fields & ATTR_CTIME)
285 bi->bi_ctime = s->now;
290 int __must_check bch2_write_inode_size(struct bch_fs *c,
291 struct bch_inode_info *inode,
292 loff_t new_size, unsigned fields)
294 struct inode_new_size s = {
295 .new_size = new_size,
296 .now = bch2_current_time(c),
300 return bch2_write_inode(c, inode, inode_set_size, &s, fields);
303 static void __i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
304 struct quota_res *quota_res, s64 sectors)
306 bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
307 "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
308 inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
309 inode->ei_inode.bi_sectors);
310 inode->v.i_blocks += sectors;
312 #ifdef CONFIG_BCACHEFS_QUOTA
313 if (quota_res && sectors > 0) {
314 BUG_ON(sectors > quota_res->sectors);
315 BUG_ON(sectors > inode->ei_quota_reserved);
317 quota_res->sectors -= sectors;
318 inode->ei_quota_reserved -= sectors;
320 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
325 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
326 struct quota_res *quota_res, s64 sectors)
329 mutex_lock(&inode->ei_quota_lock);
330 __i_sectors_acct(c, inode, quota_res, sectors);
331 mutex_unlock(&inode->ei_quota_lock);
337 /* stored in page->private: */
339 struct bch_page_sector {
340 /* Uncompressed, fully allocated replicas (or on disk reservation): */
341 unsigned nr_replicas:4;
343 /* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
344 unsigned replicas_reserved:4;
351 SECTOR_DIRTY_RESERVED,
356 struct bch_page_state {
358 atomic_t write_count;
360 struct bch_page_sector s[PAGE_SECTORS];
363 static inline struct bch_page_state *__bch2_page_state(struct page *page)
365 return page_has_private(page)
366 ? (struct bch_page_state *) page_private(page)
370 static inline struct bch_page_state *bch2_page_state(struct page *page)
372 EBUG_ON(!PageLocked(page));
374 return __bch2_page_state(page);
377 /* for newly allocated pages: */
378 static void __bch2_page_state_release(struct page *page)
380 kfree(detach_page_private(page));
383 static void bch2_page_state_release(struct page *page)
385 EBUG_ON(!PageLocked(page));
386 __bch2_page_state_release(page);
389 /* for newly allocated pages: */
390 static struct bch_page_state *__bch2_page_state_create(struct page *page,
393 struct bch_page_state *s;
395 s = kzalloc(sizeof(*s), GFP_NOFS|gfp);
399 spin_lock_init(&s->lock);
400 attach_page_private(page, s);
404 static struct bch_page_state *bch2_page_state_create(struct page *page,
407 return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp);
410 static unsigned bkey_to_sector_state(struct bkey_s_c k)
412 if (bkey_extent_is_reservation(k))
413 return SECTOR_RESERVED;
414 if (bkey_extent_is_allocation(k.k))
415 return SECTOR_ALLOCATED;
416 return SECTOR_UNALLOCATED;
419 static void __bch2_page_state_set(struct page *page,
420 unsigned pg_offset, unsigned pg_len,
421 unsigned nr_ptrs, unsigned state)
423 struct bch_page_state *s = bch2_page_state_create(page, __GFP_NOFAIL);
426 BUG_ON(pg_offset >= PAGE_SECTORS);
427 BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
431 for (i = pg_offset; i < pg_offset + pg_len; i++) {
432 s->s[i].nr_replicas = nr_ptrs;
433 s->s[i].state = state;
436 if (i == PAGE_SECTORS)
439 spin_unlock(&s->lock);
442 static int bch2_page_state_set(struct bch_fs *c, subvol_inum inum,
443 struct page **pages, unsigned nr_pages)
445 struct btree_trans trans;
446 struct btree_iter iter;
448 u64 offset = pages[0]->index << PAGE_SECTORS_SHIFT;
453 bch2_trans_init(&trans, c, 0, 0);
455 bch2_trans_begin(&trans);
457 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
461 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
462 SPOS(inum.inum, offset, snapshot),
463 BTREE_ITER_SLOTS, k, ret) {
464 unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
465 unsigned state = bkey_to_sector_state(k);
467 while (pg_idx < nr_pages) {
468 struct page *page = pages[pg_idx];
469 u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
470 u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
471 unsigned pg_offset = max(bkey_start_offset(k.k), pg_start) - pg_start;
472 unsigned pg_len = min(k.k->p.offset, pg_end) - pg_offset - pg_start;
474 BUG_ON(k.k->p.offset < pg_start);
475 BUG_ON(bkey_start_offset(k.k) > pg_end);
477 if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate)
478 __bch2_page_state_set(page, pg_offset, pg_len, nr_ptrs, state);
480 if (k.k->p.offset < pg_end)
485 if (pg_idx == nr_pages)
489 offset = iter.pos.offset;
490 bch2_trans_iter_exit(&trans, &iter);
492 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
494 bch2_trans_exit(&trans);
499 static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
501 struct bvec_iter iter;
503 unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
504 ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
505 unsigned state = bkey_to_sector_state(k);
507 bio_for_each_segment(bv, bio, iter)
508 __bch2_page_state_set(bv.bv_page, bv.bv_offset >> 9,
509 bv.bv_len >> 9, nr_ptrs, state);
512 static void mark_pagecache_unallocated(struct bch_inode_info *inode,
515 pgoff_t index = start >> PAGE_SECTORS_SHIFT;
516 pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
517 struct folio_batch fbatch;
523 folio_batch_init(&fbatch);
525 while (filemap_get_folios(inode->v.i_mapping,
526 &index, end_index, &fbatch)) {
527 for (i = 0; i < folio_batch_count(&fbatch); i++) {
528 struct folio *folio = fbatch.folios[i];
529 u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
530 u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
531 unsigned pg_offset = max(start, pg_start) - pg_start;
532 unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
533 struct bch_page_state *s;
535 BUG_ON(end <= pg_start);
536 BUG_ON(pg_offset >= PAGE_SECTORS);
537 BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
540 s = bch2_page_state(&folio->page);
544 for (j = pg_offset; j < pg_offset + pg_len; j++)
545 s->s[j].nr_replicas = 0;
546 spin_unlock(&s->lock);
551 folio_batch_release(&fbatch);
556 static void mark_pagecache_reserved(struct bch_inode_info *inode,
559 struct bch_fs *c = inode->v.i_sb->s_fs_info;
560 pgoff_t index = start >> PAGE_SECTORS_SHIFT;
561 pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
562 struct folio_batch fbatch;
563 s64 i_sectors_delta = 0;
569 folio_batch_init(&fbatch);
571 while (filemap_get_folios(inode->v.i_mapping,
572 &index, end_index, &fbatch)) {
573 for (i = 0; i < folio_batch_count(&fbatch); i++) {
574 struct folio *folio = fbatch.folios[i];
575 u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
576 u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
577 unsigned pg_offset = max(start, pg_start) - pg_start;
578 unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
579 struct bch_page_state *s;
581 BUG_ON(end <= pg_start);
582 BUG_ON(pg_offset >= PAGE_SECTORS);
583 BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
586 s = bch2_page_state(&folio->page);
590 for (j = pg_offset; j < pg_offset + pg_len; j++)
591 switch (s->s[j].state) {
592 case SECTOR_UNALLOCATED:
593 s->s[j].state = SECTOR_RESERVED;
596 s->s[j].state = SECTOR_DIRTY_RESERVED;
602 spin_unlock(&s->lock);
607 folio_batch_release(&fbatch);
611 i_sectors_acct(c, inode, NULL, i_sectors_delta);
614 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
616 /* XXX: this should not be open coded */
617 return inode->ei_inode.bi_data_replicas
618 ? inode->ei_inode.bi_data_replicas - 1
619 : c->opts.data_replicas;
622 static inline unsigned sectors_to_reserve(struct bch_page_sector *s,
623 unsigned nr_replicas)
625 return max(0, (int) nr_replicas -
627 s->replicas_reserved);
630 static int bch2_get_page_disk_reservation(struct bch_fs *c,
631 struct bch_inode_info *inode,
632 struct page *page, bool check_enospc)
634 struct bch_page_state *s = bch2_page_state_create(page, 0);
635 unsigned nr_replicas = inode_nr_replicas(c, inode);
636 struct disk_reservation disk_res = { 0 };
637 unsigned i, disk_res_sectors = 0;
643 for (i = 0; i < ARRAY_SIZE(s->s); i++)
644 disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
646 if (!disk_res_sectors)
649 ret = bch2_disk_reservation_get(c, &disk_res,
652 ? BCH_DISK_RESERVATION_NOFAIL
657 for (i = 0; i < ARRAY_SIZE(s->s); i++)
658 s->s[i].replicas_reserved +=
659 sectors_to_reserve(&s->s[i], nr_replicas);
664 struct bch2_page_reservation {
665 struct disk_reservation disk;
666 struct quota_res quota;
669 static void bch2_page_reservation_init(struct bch_fs *c,
670 struct bch_inode_info *inode,
671 struct bch2_page_reservation *res)
673 memset(res, 0, sizeof(*res));
675 res->disk.nr_replicas = inode_nr_replicas(c, inode);
678 static void bch2_page_reservation_put(struct bch_fs *c,
679 struct bch_inode_info *inode,
680 struct bch2_page_reservation *res)
682 bch2_disk_reservation_put(c, &res->disk);
683 bch2_quota_reservation_put(c, inode, &res->quota);
686 static int bch2_page_reservation_get(struct bch_fs *c,
687 struct bch_inode_info *inode, struct page *page,
688 struct bch2_page_reservation *res,
689 unsigned offset, unsigned len)
691 struct bch_page_state *s = bch2_page_state_create(page, 0);
692 unsigned i, disk_sectors = 0, quota_sectors = 0;
698 BUG_ON(!s->uptodate);
700 for (i = round_down(offset, block_bytes(c)) >> 9;
701 i < round_up(offset + len, block_bytes(c)) >> 9;
703 disk_sectors += sectors_to_reserve(&s->s[i],
704 res->disk.nr_replicas);
705 quota_sectors += s->s[i].state == SECTOR_UNALLOCATED;
709 ret = bch2_disk_reservation_add(c, &res->disk, disk_sectors, 0);
715 ret = bch2_quota_reservation_add(c, inode, &res->quota,
716 quota_sectors, true);
718 struct disk_reservation tmp = {
719 .sectors = disk_sectors
722 bch2_disk_reservation_put(c, &tmp);
723 res->disk.sectors -= disk_sectors;
731 static void bch2_clear_page_bits(struct page *page)
733 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
734 struct bch_fs *c = inode->v.i_sb->s_fs_info;
735 struct bch_page_state *s = bch2_page_state(page);
736 struct disk_reservation disk_res = { 0 };
737 int i, dirty_sectors = 0;
742 EBUG_ON(!PageLocked(page));
743 EBUG_ON(PageWriteback(page));
745 for (i = 0; i < ARRAY_SIZE(s->s); i++) {
746 disk_res.sectors += s->s[i].replicas_reserved;
747 s->s[i].replicas_reserved = 0;
749 switch (s->s[i].state) {
751 s->s[i].state = SECTOR_UNALLOCATED;
754 case SECTOR_DIRTY_RESERVED:
755 s->s[i].state = SECTOR_RESERVED;
762 bch2_disk_reservation_put(c, &disk_res);
764 i_sectors_acct(c, inode, NULL, dirty_sectors);
766 bch2_page_state_release(page);
769 static void bch2_set_page_dirty(struct bch_fs *c,
770 struct bch_inode_info *inode, struct page *page,
771 struct bch2_page_reservation *res,
772 unsigned offset, unsigned len)
774 struct bch_page_state *s = bch2_page_state(page);
775 unsigned i, dirty_sectors = 0;
777 WARN_ON((u64) page_offset(page) + offset + len >
778 round_up((u64) i_size_read(&inode->v), block_bytes(c)));
782 for (i = round_down(offset, block_bytes(c)) >> 9;
783 i < round_up(offset + len, block_bytes(c)) >> 9;
785 unsigned sectors = sectors_to_reserve(&s->s[i],
786 res->disk.nr_replicas);
789 * This can happen if we race with the error path in
790 * bch2_writepage_io_done():
792 sectors = min_t(unsigned, sectors, res->disk.sectors);
794 s->s[i].replicas_reserved += sectors;
795 res->disk.sectors -= sectors;
797 switch (s->s[i].state) {
798 case SECTOR_UNALLOCATED:
799 s->s[i].state = SECTOR_DIRTY;
802 case SECTOR_RESERVED:
803 s->s[i].state = SECTOR_DIRTY_RESERVED;
810 spin_unlock(&s->lock);
812 i_sectors_acct(c, inode, &res->quota, dirty_sectors);
814 if (!PageDirty(page))
815 __set_page_dirty_nobuffers(page);
818 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
820 struct file *file = vmf->vma->vm_file;
821 struct address_space *mapping = file->f_mapping;
822 struct address_space *fdm = faults_disabled_mapping();
823 struct bch_inode_info *inode = file_bch_inode(file);
827 return VM_FAULT_SIGBUS;
831 struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
833 if (bch2_pagecache_add_tryget(inode))
836 bch2_pagecache_block_put(fdm_host);
838 bch2_pagecache_add_get(inode);
839 bch2_pagecache_add_put(inode);
841 bch2_pagecache_block_get(fdm_host);
843 /* Signal that lock has been dropped: */
844 set_fdm_dropped_locks();
845 return VM_FAULT_SIGBUS;
848 bch2_pagecache_add_get(inode);
850 ret = filemap_fault(vmf);
851 bch2_pagecache_add_put(inode);
856 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
858 struct page *page = vmf->page;
859 struct file *file = vmf->vma->vm_file;
860 struct bch_inode_info *inode = file_bch_inode(file);
861 struct address_space *mapping = file->f_mapping;
862 struct bch_fs *c = inode->v.i_sb->s_fs_info;
863 struct bch2_page_reservation res;
868 bch2_page_reservation_init(c, inode, &res);
870 sb_start_pagefault(inode->v.i_sb);
871 file_update_time(file);
874 * Not strictly necessary, but helps avoid dio writes livelocking in
875 * write_invalidate_inode_pages_range() - can drop this if/when we get
876 * a write_invalidate_inode_pages_range() that works without dropping
877 * page lock before invalidating page
879 bch2_pagecache_add_get(inode);
882 isize = i_size_read(&inode->v);
884 if (page->mapping != mapping || page_offset(page) >= isize) {
886 ret = VM_FAULT_NOPAGE;
890 len = min_t(loff_t, PAGE_SIZE, isize - page_offset(page));
892 if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
893 if (bch2_page_state_set(c, inode_inum(inode), &page, 1)) {
895 ret = VM_FAULT_SIGBUS;
900 if (bch2_page_reservation_get(c, inode, page, &res, 0, len)) {
902 ret = VM_FAULT_SIGBUS;
906 bch2_set_page_dirty(c, inode, page, &res, 0, len);
907 bch2_page_reservation_put(c, inode, &res);
909 wait_for_stable_page(page);
910 ret = VM_FAULT_LOCKED;
912 bch2_pagecache_add_put(inode);
913 sb_end_pagefault(inode->v.i_sb);
918 void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
920 if (offset || length < folio_size(folio))
923 bch2_clear_page_bits(&folio->page);
926 bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
928 if (folio_test_dirty(folio) || folio_test_writeback(folio))
931 bch2_clear_page_bits(&folio->page);
937 static void bch2_readpages_end_io(struct bio *bio)
939 struct bvec_iter_all iter;
942 bio_for_each_segment_all(bv, bio, iter) {
943 struct page *page = bv->bv_page;
945 if (!bio->bi_status) {
946 SetPageUptodate(page);
948 ClearPageUptodate(page);
957 struct readpages_iter {
958 struct address_space *mapping;
965 static int readpages_iter_init(struct readpages_iter *iter,
966 struct readahead_control *ractl)
968 unsigned i, nr_pages = readahead_count(ractl);
970 memset(iter, 0, sizeof(*iter));
972 iter->mapping = ractl->mapping;
973 iter->offset = readahead_index(ractl);
974 iter->nr_pages = nr_pages;
976 iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
980 nr_pages = __readahead_batch(ractl, iter->pages, nr_pages);
981 for (i = 0; i < nr_pages; i++) {
982 __bch2_page_state_create(iter->pages[i], __GFP_NOFAIL);
983 put_page(iter->pages[i]);
989 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
991 if (iter->idx >= iter->nr_pages)
994 EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
996 return iter->pages[iter->idx];
999 static bool extent_partial_reads_expensive(struct bkey_s_c k)
1001 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1002 struct bch_extent_crc_unpacked crc;
1003 const union bch_extent_entry *i;
1005 bkey_for_each_crc(k.k, ptrs, crc, i)
1006 if (crc.csum_type || crc.compression_type)
1011 static void readpage_bio_extend(struct readpages_iter *iter,
1013 unsigned sectors_this_extent,
1016 while (bio_sectors(bio) < sectors_this_extent &&
1017 bio->bi_vcnt < bio->bi_max_vecs) {
1018 pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
1019 struct page *page = readpage_iter_next(iter);
1023 if (iter->offset + iter->idx != page_offset)
1031 page = xa_load(&iter->mapping->i_pages, page_offset);
1032 if (page && !xa_is_value(page))
1035 page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
1039 if (!__bch2_page_state_create(page, 0)) {
1044 ret = add_to_page_cache_lru(page, iter->mapping,
1045 page_offset, GFP_NOFS);
1047 __bch2_page_state_release(page);
1055 BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0));
1059 static void bchfs_read(struct btree_trans *trans,
1060 struct bch_read_bio *rbio,
1062 struct readpages_iter *readpages_iter)
1064 struct bch_fs *c = trans->c;
1065 struct btree_iter iter;
1067 int flags = BCH_READ_RETRY_IF_STALE|
1068 BCH_READ_MAY_PROMOTE;
1073 rbio->start_time = local_clock();
1074 rbio->subvol = inum.subvol;
1076 bch2_bkey_buf_init(&sk);
1078 bch2_trans_begin(trans);
1079 iter = (struct btree_iter) { NULL };
1081 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
1085 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1086 SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
1090 unsigned bytes, sectors, offset_into_extent;
1091 enum btree_id data_btree = BTREE_ID_extents;
1094 * read_extent -> io_time_reset may cause a transaction restart
1095 * without returning an error, we need to check for that here:
1097 ret = bch2_trans_relock(trans);
1101 bch2_btree_iter_set_pos(&iter,
1102 POS(inum.inum, rbio->bio.bi_iter.bi_sector));
1104 k = bch2_btree_iter_peek_slot(&iter);
1109 offset_into_extent = iter.pos.offset -
1110 bkey_start_offset(k.k);
1111 sectors = k.k->size - offset_into_extent;
1113 bch2_bkey_buf_reassemble(&sk, c, k);
1115 ret = bch2_read_indirect_extent(trans, &data_btree,
1116 &offset_into_extent, &sk);
1120 k = bkey_i_to_s_c(sk.k);
1122 sectors = min(sectors, k.k->size - offset_into_extent);
1125 readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
1126 extent_partial_reads_expensive(k));
1128 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
1129 swap(rbio->bio.bi_iter.bi_size, bytes);
1131 if (rbio->bio.bi_iter.bi_size == bytes)
1132 flags |= BCH_READ_LAST_FRAGMENT;
1134 bch2_bio_page_state_set(&rbio->bio, k);
1136 bch2_read_extent(trans, rbio, iter.pos,
1137 data_btree, k, offset_into_extent, flags);
1139 if (flags & BCH_READ_LAST_FRAGMENT)
1142 swap(rbio->bio.bi_iter.bi_size, bytes);
1143 bio_advance(&rbio->bio, bytes);
1145 ret = btree_trans_too_many_iters(trans);
1150 bch2_trans_iter_exit(trans, &iter);
1152 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1156 bch_err_inum_offset_ratelimited(c,
1158 iter.pos.offset << 9,
1159 "read error %i from btree lookup", ret);
1160 rbio->bio.bi_status = BLK_STS_IOERR;
1161 bio_endio(&rbio->bio);
1164 bch2_bkey_buf_exit(&sk, c);
1167 void bch2_readahead(struct readahead_control *ractl)
1169 struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
1170 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1171 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1172 struct btree_trans trans;
1174 struct readpages_iter readpages_iter;
1177 ret = readpages_iter_init(&readpages_iter, ractl);
1180 bch2_trans_init(&trans, c, 0, 0);
1182 bch2_pagecache_add_get(inode);
1184 while ((page = readpage_iter_next(&readpages_iter))) {
1185 pgoff_t index = readpages_iter.offset + readpages_iter.idx;
1186 unsigned n = min_t(unsigned,
1187 readpages_iter.nr_pages -
1190 struct bch_read_bio *rbio =
1191 rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
1192 GFP_NOFS, &c->bio_read),
1195 readpages_iter.idx++;
1197 rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTORS_SHIFT;
1198 rbio->bio.bi_end_io = bch2_readpages_end_io;
1199 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
1201 bchfs_read(&trans, rbio, inode_inum(inode),
1205 bch2_pagecache_add_put(inode);
1207 bch2_trans_exit(&trans);
1208 kfree(readpages_iter.pages);
1211 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
1212 subvol_inum inum, struct page *page)
1214 struct btree_trans trans;
1216 bch2_page_state_create(page, __GFP_NOFAIL);
1218 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
1219 rbio->bio.bi_iter.bi_sector =
1220 (sector_t) page->index << PAGE_SECTORS_SHIFT;
1221 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
1223 bch2_trans_init(&trans, c, 0, 0);
1224 bchfs_read(&trans, rbio, inum, NULL);
1225 bch2_trans_exit(&trans);
1228 static void bch2_read_single_page_end_io(struct bio *bio)
1230 complete(bio->bi_private);
1233 static int bch2_read_single_page(struct page *page,
1234 struct address_space *mapping)
1236 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1237 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1238 struct bch_read_bio *rbio;
1240 DECLARE_COMPLETION_ONSTACK(done);
1242 rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read),
1243 io_opts(c, &inode->ei_inode));
1244 rbio->bio.bi_private = &done;
1245 rbio->bio.bi_end_io = bch2_read_single_page_end_io;
1247 __bchfs_readpage(c, rbio, inode_inum(inode), page);
1248 wait_for_completion(&done);
1250 ret = blk_status_to_errno(rbio->bio.bi_status);
1251 bio_put(&rbio->bio);
1256 SetPageUptodate(page);
1260 int bch2_read_folio(struct file *file, struct folio *folio)
1262 struct page *page = &folio->page;
1265 ret = bch2_read_single_page(page, page->mapping);
1266 folio_unlock(folio);
1267 return bch2_err_class(ret);
1272 struct bch_writepage_state {
1273 struct bch_writepage_io *io;
1274 struct bch_io_opts opts;
1277 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1278 struct bch_inode_info *inode)
1280 return (struct bch_writepage_state) {
1281 .opts = io_opts(c, &inode->ei_inode)
1285 static void bch2_writepage_io_done(struct bch_write_op *op)
1287 struct bch_writepage_io *io =
1288 container_of(op, struct bch_writepage_io, op);
1289 struct bch_fs *c = io->op.c;
1290 struct bio *bio = &io->op.wbio.bio;
1291 struct bvec_iter_all iter;
1292 struct bio_vec *bvec;
1296 set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
1298 bio_for_each_segment_all(bvec, bio, iter) {
1299 struct bch_page_state *s;
1301 SetPageError(bvec->bv_page);
1302 mapping_set_error(bvec->bv_page->mapping, -EIO);
1304 s = __bch2_page_state(bvec->bv_page);
1305 spin_lock(&s->lock);
1306 for (i = 0; i < PAGE_SECTORS; i++)
1307 s->s[i].nr_replicas = 0;
1308 spin_unlock(&s->lock);
1312 if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
1313 bio_for_each_segment_all(bvec, bio, iter) {
1314 struct bch_page_state *s;
1316 s = __bch2_page_state(bvec->bv_page);
1317 spin_lock(&s->lock);
1318 for (i = 0; i < PAGE_SECTORS; i++)
1319 s->s[i].nr_replicas = 0;
1320 spin_unlock(&s->lock);
1325 * racing with fallocate can cause us to add fewer sectors than
1326 * expected - but we shouldn't add more sectors than expected:
1328 WARN_ON_ONCE(io->op.i_sectors_delta > 0);
1331 * (error (due to going RO) halfway through a page can screw that up
1334 BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
1338 * PageWriteback is effectively our ref on the inode - fixup i_blocks
1339 * before calling end_page_writeback:
1341 i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
1343 bio_for_each_segment_all(bvec, bio, iter) {
1344 struct bch_page_state *s = __bch2_page_state(bvec->bv_page);
1346 if (atomic_dec_and_test(&s->write_count))
1347 end_page_writeback(bvec->bv_page);
1350 bio_put(&io->op.wbio.bio);
1353 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1355 struct bch_writepage_io *io = w->io;
1358 closure_call(&io->op.cl, bch2_write, NULL, NULL);
1362 * Get a bch_writepage_io and add @page to it - appending to an existing one if
1363 * possible, else allocating a new one:
1365 static void bch2_writepage_io_alloc(struct bch_fs *c,
1366 struct writeback_control *wbc,
1367 struct bch_writepage_state *w,
1368 struct bch_inode_info *inode,
1370 unsigned nr_replicas)
1372 struct bch_write_op *op;
1374 w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
1377 &c->writepage_bioset),
1378 struct bch_writepage_io, op.wbio.bio);
1380 w->io->inode = inode;
1382 bch2_write_op_init(op, c, w->opts);
1383 op->target = w->opts.foreground_target;
1384 op->nr_replicas = nr_replicas;
1385 op->res.nr_replicas = nr_replicas;
1386 op->write_point = writepoint_hashed(inode->ei_last_dirtied);
1387 op->subvol = inode->ei_subvol;
1388 op->pos = POS(inode->v.i_ino, sector);
1389 op->end_io = bch2_writepage_io_done;
1390 op->devs_need_flush = &inode->ei_devs_need_flush;
1391 op->wbio.bio.bi_iter.bi_sector = sector;
1392 op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
1395 static int __bch2_writepage(struct page *page,
1396 struct writeback_control *wbc,
1399 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1400 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1401 struct bch_writepage_state *w = data;
1402 struct bch_page_state *s, orig;
1403 unsigned i, offset, nr_replicas_this_write = U32_MAX;
1404 loff_t i_size = i_size_read(&inode->v);
1405 pgoff_t end_index = i_size >> PAGE_SHIFT;
1408 EBUG_ON(!PageUptodate(page));
1410 /* Is the page fully inside i_size? */
1411 if (page->index < end_index)
1414 /* Is the page fully outside i_size? (truncate in progress) */
1415 offset = i_size & (PAGE_SIZE - 1);
1416 if (page->index > end_index || !offset) {
1422 * The page straddles i_size. It must be zeroed out on each and every
1423 * writepage invocation because it may be mmapped. "A file is mapped
1424 * in multiples of the page size. For a file that is not a multiple of
1425 * the page size, the remaining memory is zeroed when mapped, and
1426 * writes to that region are not written out to the file."
1428 zero_user_segment(page, offset, PAGE_SIZE);
1430 s = bch2_page_state_create(page, __GFP_NOFAIL);
1433 * Things get really hairy with errors during writeback:
1435 ret = bch2_get_page_disk_reservation(c, inode, page, false);
1438 /* Before unlocking the page, get copy of reservations: */
1439 spin_lock(&s->lock);
1441 spin_unlock(&s->lock);
1443 for (i = 0; i < PAGE_SECTORS; i++) {
1444 if (s->s[i].state < SECTOR_DIRTY)
1447 nr_replicas_this_write =
1448 min_t(unsigned, nr_replicas_this_write,
1449 s->s[i].nr_replicas +
1450 s->s[i].replicas_reserved);
1453 for (i = 0; i < PAGE_SECTORS; i++) {
1454 if (s->s[i].state < SECTOR_DIRTY)
1457 s->s[i].nr_replicas = w->opts.compression
1458 ? 0 : nr_replicas_this_write;
1460 s->s[i].replicas_reserved = 0;
1461 s->s[i].state = SECTOR_ALLOCATED;
1464 BUG_ON(atomic_read(&s->write_count));
1465 atomic_set(&s->write_count, 1);
1467 BUG_ON(PageWriteback(page));
1468 set_page_writeback(page);
1474 unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
1477 while (offset < PAGE_SECTORS &&
1478 orig.s[offset].state < SECTOR_DIRTY)
1481 if (offset == PAGE_SECTORS)
1484 while (offset + sectors < PAGE_SECTORS &&
1485 orig.s[offset + sectors].state >= SECTOR_DIRTY) {
1486 reserved_sectors += orig.s[offset + sectors].replicas_reserved;
1487 dirty_sectors += orig.s[offset + sectors].state == SECTOR_DIRTY;
1492 sector = ((u64) page->index << PAGE_SECTORS_SHIFT) + offset;
1495 (w->io->op.res.nr_replicas != nr_replicas_this_write ||
1496 bio_full(&w->io->op.wbio.bio, PAGE_SIZE) ||
1497 w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
1498 (BIO_MAX_VECS * PAGE_SIZE) ||
1499 bio_end_sector(&w->io->op.wbio.bio) != sector))
1500 bch2_writepage_do_io(w);
1503 bch2_writepage_io_alloc(c, wbc, w, inode, sector,
1504 nr_replicas_this_write);
1506 atomic_inc(&s->write_count);
1508 BUG_ON(inode != w->io->inode);
1509 BUG_ON(!bio_add_page(&w->io->op.wbio.bio, page,
1510 sectors << 9, offset << 9));
1512 /* Check for writing past i_size: */
1513 WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
1514 round_up(i_size, block_bytes(c)) &&
1515 !test_bit(BCH_FS_EMERGENCY_RO, &c->flags),
1516 "writing past i_size: %llu > %llu (unrounded %llu)\n",
1517 bio_end_sector(&w->io->op.wbio.bio) << 9,
1518 round_up(i_size, block_bytes(c)),
1521 w->io->op.res.sectors += reserved_sectors;
1522 w->io->op.i_sectors_delta -= dirty_sectors;
1523 w->io->op.new_i_size = i_size;
1528 if (atomic_dec_and_test(&s->write_count))
1529 end_page_writeback(page);
1534 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1536 struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1537 struct bch_writepage_state w =
1538 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1539 struct blk_plug plug;
1542 blk_start_plug(&plug);
1543 ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1545 bch2_writepage_do_io(&w);
1546 blk_finish_plug(&plug);
1547 return bch2_err_class(ret);
1550 /* buffered writes: */
1552 int bch2_write_begin(struct file *file, struct address_space *mapping,
1553 loff_t pos, unsigned len,
1554 struct page **pagep, void **fsdata)
1556 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1557 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1558 struct bch2_page_reservation *res;
1559 pgoff_t index = pos >> PAGE_SHIFT;
1560 unsigned offset = pos & (PAGE_SIZE - 1);
1564 res = kmalloc(sizeof(*res), GFP_KERNEL);
1568 bch2_page_reservation_init(c, inode, res);
1571 bch2_pagecache_add_get(inode);
1573 page = grab_cache_page_write_begin(mapping, index);
1577 if (PageUptodate(page))
1580 /* If we're writing entire page, don't need to read it in first: */
1581 if (len == PAGE_SIZE)
1584 if (!offset && pos + len >= inode->v.i_size) {
1585 zero_user_segment(page, len, PAGE_SIZE);
1586 flush_dcache_page(page);
1590 if (index > inode->v.i_size >> PAGE_SHIFT) {
1591 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1592 flush_dcache_page(page);
1596 ret = bch2_read_single_page(page, mapping);
1600 if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
1601 ret = bch2_page_state_set(c, inode_inum(inode), &page, 1);
1606 ret = bch2_page_reservation_get(c, inode, page, res, offset, len);
1608 if (!PageUptodate(page)) {
1610 * If the page hasn't been read in, we won't know if we
1611 * actually need a reservation - we don't actually need
1612 * to read here, we just need to check if the page is
1613 * fully backed by uncompressed data:
1628 bch2_pagecache_add_put(inode);
1631 return bch2_err_class(ret);
1634 int bch2_write_end(struct file *file, struct address_space *mapping,
1635 loff_t pos, unsigned len, unsigned copied,
1636 struct page *page, void *fsdata)
1638 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1639 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1640 struct bch2_page_reservation *res = fsdata;
1641 unsigned offset = pos & (PAGE_SIZE - 1);
1643 lockdep_assert_held(&inode->v.i_rwsem);
1645 if (unlikely(copied < len && !PageUptodate(page))) {
1647 * The page needs to be read in, but that would destroy
1648 * our partial write - simplest thing is to just force
1649 * userspace to redo the write:
1651 zero_user(page, 0, PAGE_SIZE);
1652 flush_dcache_page(page);
1656 spin_lock(&inode->v.i_lock);
1657 if (pos + copied > inode->v.i_size)
1658 i_size_write(&inode->v, pos + copied);
1659 spin_unlock(&inode->v.i_lock);
1662 if (!PageUptodate(page))
1663 SetPageUptodate(page);
1665 bch2_set_page_dirty(c, inode, page, res, offset, copied);
1667 inode->ei_last_dirtied = (unsigned long) current;
1672 bch2_pagecache_add_put(inode);
1674 bch2_page_reservation_put(c, inode, res);
1680 #define WRITE_BATCH_PAGES 32
1682 static int __bch2_buffered_write(struct bch_inode_info *inode,
1683 struct address_space *mapping,
1684 struct iov_iter *iter,
1685 loff_t pos, unsigned len)
1687 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1688 struct page *pages[WRITE_BATCH_PAGES];
1689 struct bch2_page_reservation res;
1690 unsigned long index = pos >> PAGE_SHIFT;
1691 unsigned offset = pos & (PAGE_SIZE - 1);
1692 unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1693 unsigned i, reserved = 0, set_dirty = 0;
1694 unsigned copied = 0, nr_pages_copied = 0;
1698 BUG_ON(nr_pages > ARRAY_SIZE(pages));
1700 bch2_page_reservation_init(c, inode, &res);
1702 for (i = 0; i < nr_pages; i++) {
1703 pages[i] = grab_cache_page_write_begin(mapping, index + i);
1710 len = min_t(unsigned, len,
1711 nr_pages * PAGE_SIZE - offset);
1716 if (offset && !PageUptodate(pages[0])) {
1717 ret = bch2_read_single_page(pages[0], mapping);
1722 if ((pos + len) & (PAGE_SIZE - 1) &&
1723 !PageUptodate(pages[nr_pages - 1])) {
1724 if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
1725 zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
1727 ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
1733 while (reserved < len) {
1734 unsigned i = (offset + reserved) >> PAGE_SHIFT;
1735 struct page *page = pages[i];
1736 unsigned pg_offset = (offset + reserved) & (PAGE_SIZE - 1);
1737 unsigned pg_len = min_t(unsigned, len - reserved,
1738 PAGE_SIZE - pg_offset);
1740 if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
1741 ret = bch2_page_state_set(c, inode_inum(inode),
1742 pages + i, nr_pages - i);
1748 * XXX: per POSIX and fstests generic/275, on -ENOSPC we're
1749 * supposed to write as much as we have disk space for.
1751 * On failure here we should still write out a partial page if
1752 * we aren't completely out of disk space - we don't do that
1755 ret = bch2_page_reservation_get(c, inode, page, &res,
1757 if (unlikely(ret)) {
1766 if (mapping_writably_mapped(mapping))
1767 for (i = 0; i < nr_pages; i++)
1768 flush_dcache_page(pages[i]);
1770 while (copied < reserved) {
1771 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1772 unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
1773 unsigned pg_len = min_t(unsigned, reserved - copied,
1774 PAGE_SIZE - pg_offset);
1775 unsigned pg_copied = copy_page_from_iter_atomic(page,
1776 pg_offset, pg_len, iter);
1781 if (!PageUptodate(page) &&
1782 pg_copied != PAGE_SIZE &&
1783 pos + copied + pg_copied < inode->v.i_size) {
1784 zero_user(page, 0, PAGE_SIZE);
1788 flush_dcache_page(page);
1789 copied += pg_copied;
1791 if (pg_copied != pg_len)
1798 spin_lock(&inode->v.i_lock);
1799 if (pos + copied > inode->v.i_size)
1800 i_size_write(&inode->v, pos + copied);
1801 spin_unlock(&inode->v.i_lock);
1803 while (set_dirty < copied) {
1804 struct page *page = pages[(offset + set_dirty) >> PAGE_SHIFT];
1805 unsigned pg_offset = (offset + set_dirty) & (PAGE_SIZE - 1);
1806 unsigned pg_len = min_t(unsigned, copied - set_dirty,
1807 PAGE_SIZE - pg_offset);
1809 if (!PageUptodate(page))
1810 SetPageUptodate(page);
1812 bch2_set_page_dirty(c, inode, page, &res, pg_offset, pg_len);
1816 set_dirty += pg_len;
1819 nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
1820 inode->ei_last_dirtied = (unsigned long) current;
1822 for (i = nr_pages_copied; i < nr_pages; i++) {
1823 unlock_page(pages[i]);
1827 bch2_page_reservation_put(c, inode, &res);
1829 return copied ?: ret;
1832 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1834 struct file *file = iocb->ki_filp;
1835 struct address_space *mapping = file->f_mapping;
1836 struct bch_inode_info *inode = file_bch_inode(file);
1837 loff_t pos = iocb->ki_pos;
1838 ssize_t written = 0;
1841 bch2_pagecache_add_get(inode);
1844 unsigned offset = pos & (PAGE_SIZE - 1);
1845 unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
1846 PAGE_SIZE * WRITE_BATCH_PAGES - offset);
1849 * Bring in the user page that we will copy from _first_.
1850 * Otherwise there's a nasty deadlock on copying from the
1851 * same page as we're writing to, without it being marked
1854 * Not only is this an optimisation, but it is also required
1855 * to check that the address is actually valid, when atomic
1856 * usercopies are used, below.
1858 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
1859 bytes = min_t(unsigned long, iov_iter_count(iter),
1860 PAGE_SIZE - offset);
1862 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
1868 if (unlikely(fatal_signal_pending(current))) {
1873 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1874 if (unlikely(ret < 0))
1879 if (unlikely(ret == 0)) {
1881 * If we were unable to copy any data at all, we must
1882 * fall back to a single segment length write.
1884 * If we didn't fallback here, we could livelock
1885 * because not all segments in the iov can be copied at
1886 * once without a pagefault.
1888 bytes = min_t(unsigned long, PAGE_SIZE - offset,
1889 iov_iter_single_seg_count(iter));
1896 balance_dirty_pages_ratelimited(mapping);
1897 } while (iov_iter_count(iter));
1899 bch2_pagecache_add_put(inode);
1901 return written ? written : ret;
1904 /* O_DIRECT reads */
1906 static void bio_check_or_release(struct bio *bio, bool check_dirty)
1909 bio_check_pages_dirty(bio);
1911 bio_release_pages(bio, false);
1916 static void bch2_dio_read_complete(struct closure *cl)
1918 struct dio_read *dio = container_of(cl, struct dio_read, cl);
1920 dio->req->ki_complete(dio->req, dio->ret);
1921 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
1924 static void bch2_direct_IO_read_endio(struct bio *bio)
1926 struct dio_read *dio = bio->bi_private;
1929 dio->ret = blk_status_to_errno(bio->bi_status);
1931 closure_put(&dio->cl);
1934 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1936 struct dio_read *dio = bio->bi_private;
1937 bool should_dirty = dio->should_dirty;
1939 bch2_direct_IO_read_endio(bio);
1940 bio_check_or_release(bio, should_dirty);
1943 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1945 struct file *file = req->ki_filp;
1946 struct bch_inode_info *inode = file_bch_inode(file);
1947 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1948 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1949 struct dio_read *dio;
1951 loff_t offset = req->ki_pos;
1952 bool sync = is_sync_kiocb(req);
1956 if ((offset|iter->count) & (block_bytes(c) - 1))
1959 ret = min_t(loff_t, iter->count,
1960 max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1965 shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
1966 iter->count -= shorten;
1968 bio = bio_alloc_bioset(NULL,
1969 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
1972 &c->dio_read_bioset);
1974 bio->bi_end_io = bch2_direct_IO_read_endio;
1976 dio = container_of(bio, struct dio_read, rbio.bio);
1977 closure_init(&dio->cl, NULL);
1980 * this is a _really_ horrible hack just to avoid an atomic sub at the
1984 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1985 atomic_set(&dio->cl.remaining,
1986 CLOSURE_REMAINING_INITIALIZER -
1988 CLOSURE_DESTRUCTOR);
1990 atomic_set(&dio->cl.remaining,
1991 CLOSURE_REMAINING_INITIALIZER + 1);
1997 * This is one of the sketchier things I've encountered: we have to skip
1998 * the dirtying of requests that are internal from the kernel (i.e. from
1999 * loopback), because we'll deadlock on page_lock.
2001 dio->should_dirty = iter_is_iovec(iter);
2004 while (iter->count) {
2005 bio = bio_alloc_bioset(NULL,
2006 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2010 bio->bi_end_io = bch2_direct_IO_read_split_endio;
2012 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
2013 bio->bi_iter.bi_sector = offset >> 9;
2014 bio->bi_private = dio;
2016 ret = bio_iov_iter_get_pages(bio, iter);
2018 /* XXX: fault inject this path */
2019 bio->bi_status = BLK_STS_RESOURCE;
2024 offset += bio->bi_iter.bi_size;
2026 if (dio->should_dirty)
2027 bio_set_pages_dirty(bio);
2030 closure_get(&dio->cl);
2032 bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
2035 iter->count += shorten;
2038 closure_sync(&dio->cl);
2039 closure_debug_destroy(&dio->cl);
2041 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
2044 return -EIOCBQUEUED;
2048 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2050 struct file *file = iocb->ki_filp;
2051 struct bch_inode_info *inode = file_bch_inode(file);
2052 struct address_space *mapping = file->f_mapping;
2053 size_t count = iov_iter_count(iter);
2057 return 0; /* skip atime */
2059 if (iocb->ki_flags & IOCB_DIRECT) {
2060 struct blk_plug plug;
2062 if (unlikely(mapping->nrpages)) {
2063 ret = filemap_write_and_wait_range(mapping,
2065 iocb->ki_pos + count - 1);
2070 file_accessed(file);
2072 blk_start_plug(&plug);
2073 ret = bch2_direct_IO_read(iocb, iter);
2074 blk_finish_plug(&plug);
2077 iocb->ki_pos += ret;
2079 bch2_pagecache_add_get(inode);
2080 ret = generic_file_read_iter(iocb, iter);
2081 bch2_pagecache_add_put(inode);
2084 return bch2_err_class(ret);
2087 /* O_DIRECT writes */
2089 static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
2090 u64 offset, u64 size,
2091 unsigned nr_replicas, bool compressed)
2093 struct btree_trans trans;
2094 struct btree_iter iter;
2096 u64 end = offset + size;
2101 bch2_trans_init(&trans, c, 0, 0);
2103 bch2_trans_begin(&trans);
2105 err = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
2109 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
2110 SPOS(inum.inum, offset, snapshot),
2111 BTREE_ITER_SLOTS, k, err) {
2112 if (bkey_cmp(bkey_start_pos(k.k), POS(inum.inum, end)) >= 0)
2115 if (k.k->p.snapshot != snapshot ||
2116 nr_replicas > bch2_bkey_replicas(c, k) ||
2117 (!compressed && bch2_bkey_sectors_compressed(k))) {
2123 offset = iter.pos.offset;
2124 bch2_trans_iter_exit(&trans, &iter);
2126 if (bch2_err_matches(err, BCH_ERR_transaction_restart))
2128 bch2_trans_exit(&trans);
2130 return err ? false : ret;
2133 static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
2135 struct bch_fs *c = dio->op.c;
2136 struct bch_inode_info *inode = dio->inode;
2137 struct bio *bio = &dio->op.wbio.bio;
2139 return bch2_check_range_allocated(c, inode_inum(inode),
2140 dio->op.pos.offset, bio_sectors(bio),
2141 dio->op.opts.data_replicas,
2142 dio->op.opts.compression != 0);
2145 static void bch2_dio_write_loop_async(struct bch_write_op *);
2146 static __always_inline long bch2_dio_write_done(struct dio_write *dio);
2148 static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
2150 struct iovec *iov = dio->inline_vecs;
2152 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
2153 iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
2158 dio->free_iov = true;
2161 memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
2162 dio->iter.iov = iov;
2166 static void bch2_dio_write_flush_done(struct closure *cl)
2168 struct dio_write *dio = container_of(cl, struct dio_write, op.cl);
2169 struct bch_fs *c = dio->op.c;
2171 closure_debug_destroy(cl);
2173 dio->op.error = bch2_journal_error(&c->journal);
2175 bch2_dio_write_done(dio);
2178 static noinline void bch2_dio_write_flush(struct dio_write *dio)
2180 struct bch_fs *c = dio->op.c;
2181 struct bch_inode_unpacked inode;
2186 closure_init(&dio->op.cl, NULL);
2188 if (!dio->op.error) {
2189 ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
2191 dio->op.error = ret;
2193 bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq, &dio->op.cl);
2194 bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
2199 closure_sync(&dio->op.cl);
2200 closure_debug_destroy(&dio->op.cl);
2202 continue_at(&dio->op.cl, bch2_dio_write_flush_done, NULL);
2206 static __always_inline long bch2_dio_write_done(struct dio_write *dio)
2208 struct kiocb *req = dio->req;
2209 struct bch_inode_info *inode = dio->inode;
2210 bool sync = dio->sync;
2213 if (unlikely(dio->flush)) {
2214 bch2_dio_write_flush(dio);
2216 return -EIOCBQUEUED;
2219 bch2_pagecache_block_put(inode);
2222 kfree(dio->iter.iov);
2224 ret = dio->op.error ?: ((long) dio->written << 9);
2225 bio_put(&dio->op.wbio.bio);
2227 /* inode->i_dio_count is our ref on inode and thus bch_fs */
2228 inode_dio_end(&inode->v);
2231 ret = bch2_err_class(ret);
2234 req->ki_complete(req, ret);
2240 static __always_inline void bch2_dio_write_end(struct dio_write *dio)
2242 struct bch_fs *c = dio->op.c;
2243 struct kiocb *req = dio->req;
2244 struct bch_inode_info *inode = dio->inode;
2245 struct bio *bio = &dio->op.wbio.bio;
2246 struct bvec_iter_all iter;
2249 req->ki_pos += (u64) dio->op.written << 9;
2250 dio->written += dio->op.written;
2252 if (dio->extending) {
2253 spin_lock(&inode->v.i_lock);
2254 if (req->ki_pos > inode->v.i_size)
2255 i_size_write(&inode->v, req->ki_pos);
2256 spin_unlock(&inode->v.i_lock);
2259 if (dio->op.i_sectors_delta || dio->quota_res.sectors) {
2260 mutex_lock(&inode->ei_quota_lock);
2261 __i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta);
2262 __bch2_quota_reservation_put(c, inode, &dio->quota_res);
2263 mutex_unlock(&inode->ei_quota_lock);
2266 if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
2267 bio_for_each_segment_all(bv, bio, iter)
2268 put_page(bv->bv_page);
2270 if (unlikely(dio->op.error))
2271 set_bit(EI_INODE_ERROR, &inode->ei_flags);
2274 static long bch2_dio_write_loop(struct dio_write *dio)
2276 struct bch_fs *c = dio->op.c;
2277 struct kiocb *req = dio->req;
2278 struct address_space *mapping = dio->mapping;
2279 struct bch_inode_info *inode = dio->inode;
2280 struct bio *bio = &dio->op.wbio.bio;
2281 unsigned unaligned, iter_count;
2282 bool sync = dio->sync, dropped_locks;
2286 iter_count = dio->iter.count;
2288 EBUG_ON(current->faults_disabled_mapping);
2289 current->faults_disabled_mapping = mapping;
2291 ret = bio_iov_iter_get_pages(bio, &dio->iter);
2293 dropped_locks = fdm_dropped_locks();
2295 current->faults_disabled_mapping = NULL;
2298 * If the fault handler returned an error but also signalled
2299 * that it dropped & retook ei_pagecache_lock, we just need to
2300 * re-shoot down the page cache and retry:
2302 if (dropped_locks && ret)
2305 if (unlikely(ret < 0))
2308 if (unlikely(dropped_locks)) {
2309 ret = write_invalidate_inode_pages_range(mapping,
2311 req->ki_pos + iter_count - 1);
2315 if (!bio->bi_iter.bi_size)
2319 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
2320 bio->bi_iter.bi_size -= unaligned;
2321 iov_iter_revert(&dio->iter, unaligned);
2323 if (!bio->bi_iter.bi_size) {
2325 * bio_iov_iter_get_pages was only able to get <
2326 * blocksize worth of pages:
2332 bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode));
2333 dio->op.end_io = sync
2335 : bch2_dio_write_loop_async;
2336 dio->op.target = dio->op.opts.foreground_target;
2337 dio->op.write_point = writepoint_hashed((unsigned long) current);
2338 dio->op.nr_replicas = dio->op.opts.data_replicas;
2339 dio->op.subvol = inode->ei_subvol;
2340 dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
2341 dio->op.devs_need_flush = &inode->ei_devs_need_flush;
2344 dio->op.flags |= BCH_WRITE_SYNC;
2345 dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
2347 ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
2348 bio_sectors(bio), true);
2352 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
2353 dio->op.opts.data_replicas, 0);
2354 if (unlikely(ret) &&
2355 !bch2_dio_write_check_allocated(dio))
2358 task_io_account_write(bio->bi_iter.bi_size);
2360 if (unlikely(dio->iter.count) &&
2363 bch2_dio_write_copy_iov(dio))
2364 dio->sync = sync = true;
2367 closure_call(&dio->op.cl, bch2_write, NULL, NULL);
2370 return -EIOCBQUEUED;
2372 bch2_dio_write_end(dio);
2374 if (likely(!dio->iter.count) || dio->op.error)
2377 bio_reset(bio, NULL, REQ_OP_WRITE);
2380 return bch2_dio_write_done(dio);
2382 dio->op.error = ret;
2384 if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
2385 struct bvec_iter_all iter;
2388 bio_for_each_segment_all(bv, bio, iter)
2389 put_page(bv->bv_page);
2392 bch2_quota_reservation_put(c, inode, &dio->quota_res);
2396 static void bch2_dio_write_loop_async(struct bch_write_op *op)
2398 struct dio_write *dio = container_of(op, struct dio_write, op);
2399 struct mm_struct *mm = dio->mm;
2401 bch2_dio_write_end(dio);
2403 if (likely(!dio->iter.count) || dio->op.error) {
2404 bch2_dio_write_done(dio);
2408 bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE);
2412 bch2_dio_write_loop(dio);
2414 kthread_unuse_mm(mm);
2418 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
2420 struct file *file = req->ki_filp;
2421 struct address_space *mapping = file->f_mapping;
2422 struct bch_inode_info *inode = file_bch_inode(file);
2423 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2424 struct dio_write *dio;
2426 bool locked = true, extending;
2430 prefetch((void *) &c->opts + 64);
2431 prefetch(&inode->ei_inode);
2432 prefetch((void *) &inode->ei_inode + 64);
2434 inode_lock(&inode->v);
2436 ret = generic_write_checks(req, iter);
2437 if (unlikely(ret <= 0))
2440 ret = file_remove_privs(file);
2444 ret = file_update_time(file);
2448 if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
2451 inode_dio_begin(&inode->v);
2452 bch2_pagecache_block_get(inode);
2454 extending = req->ki_pos + iter->count > inode->v.i_size;
2456 inode_unlock(&inode->v);
2460 bio = bio_alloc_bioset(NULL,
2461 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2464 &c->dio_write_bioset);
2465 dio = container_of(bio, struct dio_write, op.wbio.bio);
2467 dio->mapping = mapping;
2469 dio->mm = current->mm;
2471 dio->extending = extending;
2472 dio->sync = is_sync_kiocb(req) || extending;
2473 dio->flush = iocb_is_dsync(req) && !c->opts.journal_flush_disabled;
2474 dio->free_iov = false;
2475 dio->quota_res.sectors = 0;
2480 if (unlikely(mapping->nrpages)) {
2481 ret = write_invalidate_inode_pages_range(mapping,
2483 req->ki_pos + iter->count - 1);
2488 ret = bch2_dio_write_loop(dio);
2491 inode_unlock(&inode->v);
2494 bch2_pagecache_block_put(inode);
2496 inode_dio_end(&inode->v);
2500 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2502 struct file *file = iocb->ki_filp;
2503 struct bch_inode_info *inode = file_bch_inode(file);
2506 if (iocb->ki_flags & IOCB_DIRECT) {
2507 ret = bch2_direct_write(iocb, from);
2511 /* We can write back this queue in page reclaim */
2512 current->backing_dev_info = inode_to_bdi(&inode->v);
2513 inode_lock(&inode->v);
2515 ret = generic_write_checks(iocb, from);
2519 ret = file_remove_privs(file);
2523 ret = file_update_time(file);
2527 ret = bch2_buffered_write(iocb, from);
2528 if (likely(ret > 0))
2529 iocb->ki_pos += ret;
2531 inode_unlock(&inode->v);
2532 current->backing_dev_info = NULL;
2535 ret = generic_write_sync(iocb, ret);
2537 return bch2_err_class(ret);
2543 * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
2544 * insert trigger: look up the btree inode instead
2546 static int bch2_flush_inode(struct bch_fs *c,
2547 struct bch_inode_info *inode)
2549 struct bch_inode_unpacked u;
2552 if (c->opts.journal_flush_disabled)
2555 ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u);
2559 return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
2560 bch2_inode_flush_nocow_writes(c, inode);
2563 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2565 struct bch_inode_info *inode = file_bch_inode(file);
2566 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2567 int ret, ret2, ret3;
2569 ret = file_write_and_wait_range(file, start, end);
2570 ret2 = sync_inode_metadata(&inode->v, 1);
2571 ret3 = bch2_flush_inode(c, inode);
2573 return bch2_err_class(ret ?: ret2 ?: ret3);
2578 static inline int range_has_data(struct bch_fs *c, u32 subvol,
2582 struct btree_trans trans;
2583 struct btree_iter iter;
2587 bch2_trans_init(&trans, c, 0, 0);
2589 bch2_trans_begin(&trans);
2591 ret = bch2_subvolume_get_snapshot(&trans, subvol, &start.snapshot);
2595 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
2596 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2599 if (bkey_extent_is_data(k.k)) {
2605 bch2_trans_iter_exit(&trans, &iter);
2607 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
2610 bch2_trans_exit(&trans);
2614 static int __bch2_truncate_page(struct bch_inode_info *inode,
2615 pgoff_t index, loff_t start, loff_t end)
2617 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2618 struct address_space *mapping = inode->v.i_mapping;
2619 struct bch_page_state *s;
2620 unsigned start_offset = start & (PAGE_SIZE - 1);
2621 unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2624 s64 i_sectors_delta = 0;
2627 /* Page boundary? Nothing to do */
2628 if (!((index == start >> PAGE_SHIFT && start_offset) ||
2629 (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
2633 if (index << PAGE_SHIFT >= inode->v.i_size)
2636 page = find_lock_page(mapping, index);
2639 * XXX: we're doing two index lookups when we end up reading the
2642 ret = range_has_data(c, inode->ei_subvol,
2643 POS(inode->v.i_ino, index << PAGE_SECTORS_SHIFT),
2644 POS(inode->v.i_ino, (index + 1) << PAGE_SECTORS_SHIFT));
2648 page = find_or_create_page(mapping, index, GFP_KERNEL);
2649 if (unlikely(!page)) {
2655 s = bch2_page_state_create(page, 0);
2661 if (!PageUptodate(page)) {
2662 ret = bch2_read_single_page(page, mapping);
2667 if (index != start >> PAGE_SHIFT)
2669 if (index != end >> PAGE_SHIFT)
2670 end_offset = PAGE_SIZE;
2672 for (i = round_up(start_offset, block_bytes(c)) >> 9;
2673 i < round_down(end_offset, block_bytes(c)) >> 9;
2675 s->s[i].nr_replicas = 0;
2676 if (s->s[i].state == SECTOR_DIRTY)
2678 s->s[i].state = SECTOR_UNALLOCATED;
2681 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2684 * Caller needs to know whether this page will be written out by
2685 * writeback - doing an i_size update if necessary - or whether it will
2686 * be responsible for the i_size update:
2688 ret = s->s[(min_t(u64, inode->v.i_size - (index << PAGE_SHIFT),
2689 PAGE_SIZE) - 1) >> 9].state >= SECTOR_DIRTY;
2691 zero_user_segment(page, start_offset, end_offset);
2694 * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2696 * XXX: because we aren't currently tracking whether the page has actual
2697 * data in it (vs. just 0s, or only partially written) this wrong. ick.
2699 BUG_ON(bch2_get_page_disk_reservation(c, inode, page, false));
2702 * This removes any writeable userspace mappings; we need to force
2703 * .page_mkwrite to be called again before any mmapped writes, to
2704 * redirty the full page:
2707 __set_page_dirty_nobuffers(page);
2715 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
2717 return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
2718 from, round_up(from, PAGE_SIZE));
2721 static int bch2_truncate_pages(struct bch_inode_info *inode,
2722 loff_t start, loff_t end)
2724 int ret = __bch2_truncate_page(inode, start >> PAGE_SHIFT,
2728 start >> PAGE_SHIFT != end >> PAGE_SHIFT)
2729 ret = __bch2_truncate_page(inode,
2735 static int bch2_extend(struct user_namespace *mnt_userns,
2736 struct bch_inode_info *inode,
2737 struct bch_inode_unpacked *inode_u,
2738 struct iattr *iattr)
2740 struct address_space *mapping = inode->v.i_mapping;
2746 * this has to be done _before_ extending i_size:
2748 ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
2752 truncate_setsize(&inode->v, iattr->ia_size);
2754 return bch2_setattr_nonsize(mnt_userns, inode, iattr);
2757 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2758 struct bch_inode_unpacked *bi,
2761 bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2765 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2766 struct bch_inode_unpacked *bi, void *p)
2768 u64 *new_i_size = p;
2770 bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2771 bi->bi_size = *new_i_size;
2775 int bch2_truncate(struct user_namespace *mnt_userns,
2776 struct bch_inode_info *inode, struct iattr *iattr)
2778 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2779 struct address_space *mapping = inode->v.i_mapping;
2780 struct bch_inode_unpacked inode_u;
2781 u64 new_i_size = iattr->ia_size;
2782 s64 i_sectors_delta = 0;
2786 * If the truncate call with change the size of the file, the
2787 * cmtimes should be updated. If the size will not change, we
2788 * do not need to update the cmtimes.
2790 if (iattr->ia_size != inode->v.i_size) {
2791 if (!(iattr->ia_valid & ATTR_MTIME))
2792 ktime_get_coarse_real_ts64(&iattr->ia_mtime);
2793 if (!(iattr->ia_valid & ATTR_CTIME))
2794 ktime_get_coarse_real_ts64(&iattr->ia_ctime);
2795 iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
2798 inode_dio_wait(&inode->v);
2799 bch2_pagecache_block_get(inode);
2801 ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
2806 * check this before next assertion; on filesystem error our normal
2807 * invariants are a bit broken (truncate has to truncate the page cache
2808 * before the inode).
2810 ret = bch2_journal_error(&c->journal);
2814 WARN_ONCE(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
2815 inode->v.i_size < inode_u.bi_size,
2816 "truncate spotted in mem i_size < btree i_size: %llu < %llu\n",
2817 (u64) inode->v.i_size, inode_u.bi_size);
2819 if (iattr->ia_size > inode->v.i_size) {
2820 ret = bch2_extend(mnt_userns, inode, &inode_u, iattr);
2824 iattr->ia_valid &= ~ATTR_SIZE;
2826 ret = bch2_truncate_page(inode, iattr->ia_size);
2827 if (unlikely(ret < 0))
2831 * When extending, we're going to write the new i_size to disk
2832 * immediately so we need to flush anything above the current on disk
2835 * Also, when extending we need to flush the page that i_size currently
2836 * straddles - if it's mapped to userspace, we need to ensure that
2837 * userspace has to redirty it and call .mkwrite -> set_page_dirty
2838 * again to allocate the part of the page that was extended.
2840 if (iattr->ia_size > inode_u.bi_size)
2841 ret = filemap_write_and_wait_range(mapping,
2843 iattr->ia_size - 1);
2844 else if (iattr->ia_size & (PAGE_SIZE - 1))
2845 ret = filemap_write_and_wait_range(mapping,
2846 round_down(iattr->ia_size, PAGE_SIZE),
2847 iattr->ia_size - 1);
2851 mutex_lock(&inode->ei_update_lock);
2852 ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
2854 mutex_unlock(&inode->ei_update_lock);
2859 truncate_setsize(&inode->v, iattr->ia_size);
2861 ret = bch2_fpunch(c, inode_inum(inode),
2862 round_up(iattr->ia_size, block_bytes(c)) >> 9,
2863 U64_MAX, &i_sectors_delta);
2864 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2866 bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks &&
2867 !bch2_journal_error(&c->journal), c,
2868 "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
2869 inode->v.i_ino, (u64) inode->v.i_blocks,
2870 inode->ei_inode.bi_sectors);
2874 mutex_lock(&inode->ei_update_lock);
2875 ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
2876 mutex_unlock(&inode->ei_update_lock);
2878 ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
2880 bch2_pagecache_block_put(inode);
2881 return bch2_err_class(ret);
2886 static int inode_update_times_fn(struct bch_inode_info *inode,
2887 struct bch_inode_unpacked *bi, void *p)
2889 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2891 bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
2895 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2897 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2898 u64 end = offset + len;
2899 u64 block_start = round_up(offset, block_bytes(c));
2900 u64 block_end = round_down(end, block_bytes(c));
2901 bool truncated_last_page;
2904 ret = bch2_truncate_pages(inode, offset, end);
2905 if (unlikely(ret < 0))
2908 truncated_last_page = ret;
2910 truncate_pagecache_range(&inode->v, offset, end - 1);
2912 if (block_start < block_end) {
2913 s64 i_sectors_delta = 0;
2915 ret = bch2_fpunch(c, inode_inum(inode),
2916 block_start >> 9, block_end >> 9,
2918 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2921 mutex_lock(&inode->ei_update_lock);
2922 if (end >= inode->v.i_size && !truncated_last_page) {
2923 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
2924 ATTR_MTIME|ATTR_CTIME);
2926 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
2927 ATTR_MTIME|ATTR_CTIME);
2929 mutex_unlock(&inode->ei_update_lock);
2934 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
2935 loff_t offset, loff_t len,
2938 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2939 struct address_space *mapping = inode->v.i_mapping;
2940 struct bkey_buf copy;
2941 struct btree_trans trans;
2942 struct btree_iter src, dst, del;
2943 loff_t shift, new_size;
2947 if ((offset | len) & (block_bytes(c) - 1))
2951 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
2954 if (offset >= inode->v.i_size)
2957 src_start = U64_MAX;
2960 if (offset + len >= inode->v.i_size)
2963 src_start = offset + len;
2967 new_size = inode->v.i_size + shift;
2969 ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2974 i_size_write(&inode->v, new_size);
2975 mutex_lock(&inode->ei_update_lock);
2976 ret = bch2_write_inode_size(c, inode, new_size,
2977 ATTR_MTIME|ATTR_CTIME);
2978 mutex_unlock(&inode->ei_update_lock);
2980 s64 i_sectors_delta = 0;
2982 ret = bch2_fpunch(c, inode_inum(inode),
2983 offset >> 9, (offset + len) >> 9,
2985 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2991 bch2_bkey_buf_init(©);
2992 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
2993 bch2_trans_iter_init(&trans, &src, BTREE_ID_extents,
2994 POS(inode->v.i_ino, src_start >> 9),
2996 bch2_trans_copy_iter(&dst, &src);
2997 bch2_trans_copy_iter(&del, &src);
3000 bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
3001 struct disk_reservation disk_res =
3002 bch2_disk_reservation_init(c, 0);
3003 struct bkey_i delete;
3005 struct bpos next_pos;
3006 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
3007 struct bpos atomic_end;
3008 unsigned trigger_flags = 0;
3011 bch2_trans_begin(&trans);
3013 ret = bch2_subvolume_get_snapshot(&trans,
3014 inode->ei_subvol, &snapshot);
3018 bch2_btree_iter_set_snapshot(&src, snapshot);
3019 bch2_btree_iter_set_snapshot(&dst, snapshot);
3020 bch2_btree_iter_set_snapshot(&del, snapshot);
3022 bch2_trans_begin(&trans);
3025 ? bch2_btree_iter_peek_prev(&src)
3026 : bch2_btree_iter_peek(&src);
3027 if ((ret = bkey_err(k)))
3030 if (!k.k || k.k->p.inode != inode->v.i_ino)
3034 bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
3037 bch2_bkey_buf_reassemble(©, c, k);
3040 bkey_cmp(bkey_start_pos(k.k), move_pos) < 0)
3041 bch2_cut_front(move_pos, copy.k);
3043 copy.k->k.p.offset += shift >> 9;
3044 bch2_btree_iter_set_pos(&dst, bkey_start_pos(©.k->k));
3046 ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end);
3050 if (bkey_cmp(atomic_end, copy.k->k.p)) {
3052 move_pos = atomic_end;
3053 move_pos.offset -= shift >> 9;
3056 bch2_cut_back(atomic_end, copy.k);
3060 bkey_init(&delete.k);
3061 delete.k.p = copy.k->k.p;
3062 delete.k.size = copy.k->k.size;
3063 delete.k.p.offset -= shift >> 9;
3064 bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k));
3066 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
3068 if (copy.k->k.size != k.k->size) {
3069 /* We might end up splitting compressed extents: */
3071 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
3073 ret = bch2_disk_reservation_get(c, &disk_res,
3074 copy.k->k.size, nr_ptrs,
3075 BCH_DISK_RESERVATION_NOFAIL);
3079 ret = bch2_btree_iter_traverse(&del) ?:
3080 bch2_trans_update(&trans, &del, &delete, trigger_flags) ?:
3081 bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?:
3082 bch2_trans_commit(&trans, &disk_res, NULL,
3083 BTREE_INSERT_NOFAIL);
3084 bch2_disk_reservation_put(c, &disk_res);
3087 bch2_btree_iter_set_pos(&src, next_pos);
3089 bch2_trans_iter_exit(&trans, &del);
3090 bch2_trans_iter_exit(&trans, &dst);
3091 bch2_trans_iter_exit(&trans, &src);
3092 bch2_trans_exit(&trans);
3093 bch2_bkey_buf_exit(©, c);
3098 mutex_lock(&inode->ei_update_lock);
3100 i_size_write(&inode->v, new_size);
3101 ret = bch2_write_inode_size(c, inode, new_size,
3102 ATTR_MTIME|ATTR_CTIME);
3104 /* We need an inode update to update bi_journal_seq for fsync: */
3105 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
3106 ATTR_MTIME|ATTR_CTIME);
3108 mutex_unlock(&inode->ei_update_lock);
3112 static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
3113 u64 start_sector, u64 end_sector)
3115 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3116 struct btree_trans trans;
3117 struct btree_iter iter;
3118 struct bpos end_pos = POS(inode->v.i_ino, end_sector);
3119 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
3122 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
3124 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
3125 POS(inode->v.i_ino, start_sector),
3126 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
3128 while (!ret && bkey_cmp(iter.pos, end_pos) < 0) {
3129 s64 i_sectors_delta = 0;
3130 struct quota_res quota_res = { 0 };
3135 bch2_trans_begin(&trans);
3137 ret = bch2_subvolume_get_snapshot(&trans,
3138 inode->ei_subvol, &snapshot);
3142 bch2_btree_iter_set_snapshot(&iter, snapshot);
3144 k = bch2_btree_iter_peek_slot(&iter);
3145 if ((ret = bkey_err(k)))
3148 /* already reserved */
3149 if (bkey_extent_is_reservation(k) &&
3150 bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
3151 bch2_btree_iter_advance(&iter);
3155 if (bkey_extent_is_data(k.k) &&
3156 !(mode & FALLOC_FL_ZERO_RANGE)) {
3157 bch2_btree_iter_advance(&iter);
3162 * XXX: for nocow mode, we should promote shared extents to
3166 sectors = bpos_min(k.k->p, end_pos).offset - iter.pos.offset;
3168 if (!bkey_extent_is_allocation(k.k)) {
3169 ret = bch2_quota_reservation_add(c, inode,
3176 ret = bch2_extent_fallocate(&trans, inode_inum(inode), &iter,
3177 sectors, opts, &i_sectors_delta,
3178 writepoint_hashed((unsigned long) current));
3182 i_sectors_acct(c, inode, "a_res, i_sectors_delta);
3184 bch2_quota_reservation_put(c, inode, "a_res);
3185 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3189 bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
3190 mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
3192 if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
3193 struct quota_res quota_res = { 0 };
3194 s64 i_sectors_delta = 0;
3196 bch2_fpunch_at(&trans, &iter, inode_inum(inode),
3197 end_sector, &i_sectors_delta);
3198 i_sectors_acct(c, inode, "a_res, i_sectors_delta);
3199 bch2_quota_reservation_put(c, inode, "a_res);
3202 bch2_trans_iter_exit(&trans, &iter);
3203 bch2_trans_exit(&trans);
3207 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
3208 loff_t offset, loff_t len)
3210 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3211 u64 end = offset + len;
3212 u64 block_start = round_down(offset, block_bytes(c));
3213 u64 block_end = round_up(end, block_bytes(c));
3214 bool truncated_last_page = false;
3217 if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
3218 ret = inode_newsize_ok(&inode->v, end);
3223 if (mode & FALLOC_FL_ZERO_RANGE) {
3224 ret = bch2_truncate_pages(inode, offset, end);
3225 if (unlikely(ret < 0))
3228 truncated_last_page = ret;
3230 truncate_pagecache_range(&inode->v, offset, end - 1);
3232 block_start = round_up(offset, block_bytes(c));
3233 block_end = round_down(end, block_bytes(c));
3236 ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
3239 * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
3240 * so that the VFS cache i_size is consistent with the btree i_size:
3243 !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
3246 if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
3247 end = inode->v.i_size;
3249 if (end >= inode->v.i_size &&
3250 (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
3251 !(mode & FALLOC_FL_KEEP_SIZE))) {
3252 spin_lock(&inode->v.i_lock);
3253 i_size_write(&inode->v, end);
3254 spin_unlock(&inode->v.i_lock);
3256 mutex_lock(&inode->ei_update_lock);
3257 ret2 = bch2_write_inode_size(c, inode, end, 0);
3258 mutex_unlock(&inode->ei_update_lock);
3264 long bch2_fallocate_dispatch(struct file *file, int mode,
3265 loff_t offset, loff_t len)
3267 struct bch_inode_info *inode = file_bch_inode(file);
3268 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3271 if (!percpu_ref_tryget_live(&c->writes))
3274 inode_lock(&inode->v);
3275 inode_dio_wait(&inode->v);
3276 bch2_pagecache_block_get(inode);
3278 ret = file_modified(file);
3282 if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
3283 ret = bchfs_fallocate(inode, mode, offset, len);
3284 else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
3285 ret = bchfs_fpunch(inode, offset, len);
3286 else if (mode == FALLOC_FL_INSERT_RANGE)
3287 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
3288 else if (mode == FALLOC_FL_COLLAPSE_RANGE)
3289 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
3293 bch2_pagecache_block_put(inode);
3294 inode_unlock(&inode->v);
3295 percpu_ref_put(&c->writes);
3297 return bch2_err_class(ret);
3300 static int quota_reserve_range(struct bch_inode_info *inode,
3301 struct quota_res *res,
3304 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3305 struct btree_trans trans;
3306 struct btree_iter iter;
3309 u64 sectors = end - start;
3313 bch2_trans_init(&trans, c, 0, 0);
3315 bch2_trans_begin(&trans);
3317 ret = bch2_subvolume_get_snapshot(&trans, inode->ei_subvol, &snapshot);
3321 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
3322 SPOS(inode->v.i_ino, pos, snapshot), 0);
3324 while (!(ret = btree_trans_too_many_iters(&trans)) &&
3325 (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
3326 !(ret = bkey_err(k))) {
3327 if (bkey_extent_is_allocation(k.k)) {
3328 u64 s = min(end, k.k->p.offset) -
3329 max(start, bkey_start_offset(k.k));
3330 BUG_ON(s > sectors);
3333 bch2_btree_iter_advance(&iter);
3335 pos = iter.pos.offset;
3336 bch2_trans_iter_exit(&trans, &iter);
3338 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3341 bch2_trans_exit(&trans);
3346 return bch2_quota_reservation_add(c, inode, res, sectors, true);
3349 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
3350 struct file *file_dst, loff_t pos_dst,
3351 loff_t len, unsigned remap_flags)
3353 struct bch_inode_info *src = file_bch_inode(file_src);
3354 struct bch_inode_info *dst = file_bch_inode(file_dst);
3355 struct bch_fs *c = src->v.i_sb->s_fs_info;
3356 struct quota_res quota_res = { 0 };
3357 s64 i_sectors_delta = 0;
3361 if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
3364 if (remap_flags & REMAP_FILE_DEDUP)
3367 if ((pos_src & (block_bytes(c) - 1)) ||
3368 (pos_dst & (block_bytes(c) - 1)))
3372 abs(pos_src - pos_dst) < len)
3375 bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3377 inode_dio_wait(&src->v);
3378 inode_dio_wait(&dst->v);
3380 ret = generic_remap_file_range_prep(file_src, pos_src,
3383 if (ret < 0 || len == 0)
3386 aligned_len = round_up((u64) len, block_bytes(c));
3388 ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
3389 pos_dst, pos_dst + len - 1);
3393 ret = quota_reserve_range(dst, "a_res, pos_dst >> 9,
3394 (pos_dst + aligned_len) >> 9);
3398 file_update_time(file_dst);
3400 mark_pagecache_unallocated(src, pos_src >> 9,
3401 (pos_src + aligned_len) >> 9);
3403 ret = bch2_remap_range(c,
3404 inode_inum(dst), pos_dst >> 9,
3405 inode_inum(src), pos_src >> 9,
3407 pos_dst + len, &i_sectors_delta);
3412 * due to alignment, we might have remapped slightly more than requsted
3414 ret = min((u64) ret << 9, (u64) len);
3416 i_sectors_acct(c, dst, "a_res, i_sectors_delta);
3418 spin_lock(&dst->v.i_lock);
3419 if (pos_dst + ret > dst->v.i_size)
3420 i_size_write(&dst->v, pos_dst + ret);
3421 spin_unlock(&dst->v.i_lock);
3423 if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
3424 IS_SYNC(file_inode(file_dst)))
3425 ret = bch2_flush_inode(c, dst);
3427 bch2_quota_reservation_put(c, dst, "a_res);
3428 bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3430 return bch2_err_class(ret);
3435 static int page_data_offset(struct page *page, unsigned offset)
3437 struct bch_page_state *s = bch2_page_state(page);
3441 for (i = offset >> 9; i < PAGE_SECTORS; i++)
3442 if (s->s[i].state >= SECTOR_DIRTY)
3448 static loff_t bch2_seek_pagecache_data(struct inode *vinode,
3449 loff_t start_offset,
3452 struct folio_batch fbatch;
3453 pgoff_t start_index = start_offset >> PAGE_SHIFT;
3454 pgoff_t end_index = end_offset >> PAGE_SHIFT;
3455 pgoff_t index = start_index;
3460 folio_batch_init(&fbatch);
3462 while (filemap_get_folios(vinode->i_mapping,
3463 &index, end_index, &fbatch)) {
3464 for (i = 0; i < folio_batch_count(&fbatch); i++) {
3465 struct folio *folio = fbatch.folios[i];
3469 offset = page_data_offset(&folio->page,
3470 folio->index == start_index
3471 ? start_offset & (PAGE_SIZE - 1)
3474 ret = clamp(((loff_t) folio->index << PAGE_SHIFT) +
3476 start_offset, end_offset);
3477 folio_unlock(folio);
3478 folio_batch_release(&fbatch);
3482 folio_unlock(folio);
3484 folio_batch_release(&fbatch);
3491 static loff_t bch2_seek_data(struct file *file, u64 offset)
3493 struct bch_inode_info *inode = file_bch_inode(file);
3494 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3495 struct btree_trans trans;
3496 struct btree_iter iter;
3498 subvol_inum inum = inode_inum(inode);
3499 u64 isize, next_data = MAX_LFS_FILESIZE;
3503 isize = i_size_read(&inode->v);
3504 if (offset >= isize)
3507 bch2_trans_init(&trans, c, 0, 0);
3509 bch2_trans_begin(&trans);
3511 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3515 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
3516 SPOS(inode->v.i_ino, offset >> 9, snapshot), 0, k, ret) {
3517 if (k.k->p.inode != inode->v.i_ino) {
3519 } else if (bkey_extent_is_data(k.k)) {
3520 next_data = max(offset, bkey_start_offset(k.k) << 9);
3522 } else if (k.k->p.offset >> 9 > isize)
3525 bch2_trans_iter_exit(&trans, &iter);
3527 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3530 bch2_trans_exit(&trans);
3534 if (next_data > offset)
3535 next_data = bch2_seek_pagecache_data(&inode->v,
3538 if (next_data >= isize)
3541 return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
3544 static int __page_hole_offset(struct page *page, unsigned offset)
3546 struct bch_page_state *s = bch2_page_state(page);
3552 for (i = offset >> 9; i < PAGE_SECTORS; i++)
3553 if (s->s[i].state < SECTOR_DIRTY)
3559 static loff_t page_hole_offset(struct address_space *mapping, loff_t offset)
3561 pgoff_t index = offset >> PAGE_SHIFT;
3566 page = find_lock_page(mapping, index);
3570 pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1));
3572 ret = ((loff_t) index << PAGE_SHIFT) + pg_offset;
3579 static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
3580 loff_t start_offset,
3583 struct address_space *mapping = vinode->i_mapping;
3584 loff_t offset = start_offset, hole;
3586 while (offset < end_offset) {
3587 hole = page_hole_offset(mapping, offset);
3588 if (hole >= 0 && hole <= end_offset)
3589 return max(start_offset, hole);
3591 offset += PAGE_SIZE;
3592 offset &= PAGE_MASK;
3598 static loff_t bch2_seek_hole(struct file *file, u64 offset)
3600 struct bch_inode_info *inode = file_bch_inode(file);
3601 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3602 struct btree_trans trans;
3603 struct btree_iter iter;
3605 subvol_inum inum = inode_inum(inode);
3606 u64 isize, next_hole = MAX_LFS_FILESIZE;
3610 isize = i_size_read(&inode->v);
3611 if (offset >= isize)
3614 bch2_trans_init(&trans, c, 0, 0);
3616 bch2_trans_begin(&trans);
3618 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3622 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
3623 SPOS(inode->v.i_ino, offset >> 9, snapshot),
3624 BTREE_ITER_SLOTS, k, ret) {
3625 if (k.k->p.inode != inode->v.i_ino) {
3626 next_hole = bch2_seek_pagecache_hole(&inode->v,
3627 offset, MAX_LFS_FILESIZE);
3629 } else if (!bkey_extent_is_data(k.k)) {
3630 next_hole = bch2_seek_pagecache_hole(&inode->v,
3631 max(offset, bkey_start_offset(k.k) << 9),
3632 k.k->p.offset << 9);
3634 if (next_hole < k.k->p.offset << 9)
3637 offset = max(offset, bkey_start_offset(k.k) << 9);
3640 bch2_trans_iter_exit(&trans, &iter);
3642 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3645 bch2_trans_exit(&trans);
3649 if (next_hole > isize)
3652 return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
3655 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
3663 ret = generic_file_llseek(file, offset, whence);
3666 ret = bch2_seek_data(file, offset);
3669 ret = bch2_seek_hole(file, offset);
3676 return bch2_err_class(ret);
3679 void bch2_fs_fsio_exit(struct bch_fs *c)
3681 bioset_exit(&c->nocow_flush_bioset);
3682 bioset_exit(&c->dio_write_bioset);
3683 bioset_exit(&c->dio_read_bioset);
3684 bioset_exit(&c->writepage_bioset);
3687 int bch2_fs_fsio_init(struct bch_fs *c)
3691 pr_verbose_init(c->opts, "");
3693 if (bioset_init(&c->writepage_bioset,
3694 4, offsetof(struct bch_writepage_io, op.wbio.bio),
3695 BIOSET_NEED_BVECS) ||
3696 bioset_init(&c->dio_read_bioset,
3697 4, offsetof(struct dio_read, rbio.bio),
3698 BIOSET_NEED_BVECS) ||
3699 bioset_init(&c->dio_write_bioset,
3700 4, offsetof(struct dio_write, op.wbio.bio),
3701 BIOSET_NEED_BVECS) ||
3702 bioset_init(&c->nocow_flush_bioset,
3703 1, offsetof(struct nocow_flush, bio), 0))
3706 pr_verbose_init(c->opts, "ret %i", ret);
3710 #endif /* NO_BCACHEFS_FS */