1 // SPDX-License-Identifier: GPL-2.0
5 #include "alloc_foreground.h"
7 #include "btree_update.h"
12 #include "extent_update.h"
23 #include <linux/aio.h>
24 #include <linux/backing-dev.h>
25 #include <linux/falloc.h>
26 #include <linux/migrate.h>
27 #include <linux/mmu_context.h>
28 #include <linux/pagevec.h>
29 #include <linux/rmap.h>
30 #include <linux/sched/signal.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/uio.h>
33 #include <linux/writeback.h>
35 #include <trace/events/bcachefs.h>
36 #include <trace/events/writeback.h>
38 static inline bool bio_full(struct bio *bio, unsigned len)
40 if (bio->bi_vcnt >= bio->bi_max_vecs)
42 if (bio->bi_iter.bi_size > UINT_MAX - len)
47 static inline struct address_space *faults_disabled_mapping(void)
49 return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
52 static inline void set_fdm_dropped_locks(void)
54 current->faults_disabled_mapping =
55 (void *) (((unsigned long) current->faults_disabled_mapping)|1);
58 static inline bool fdm_dropped_locks(void)
60 return ((unsigned long) current->faults_disabled_mapping) & 1;
67 struct bch_writepage_io {
69 struct bch_inode_info *inode;
72 struct bch_write_op op;
76 struct completion done;
82 struct quota_res quota_res;
86 struct iovec inline_vecs[2];
89 struct bch_write_op op;
97 struct bch_read_bio rbio;
100 /* pagecache_block must be held */
101 static int write_invalidate_inode_pages_range(struct address_space *mapping,
102 loff_t start, loff_t end)
107 * XXX: the way this is currently implemented, we can spin if a process
108 * is continually redirtying a specific page
111 if (!mapping->nrpages)
114 ret = filemap_write_and_wait_range(mapping, start, end);
118 if (!mapping->nrpages)
121 ret = invalidate_inode_pages2_range(mapping,
124 } while (ret == -EBUSY);
131 #ifdef CONFIG_BCACHEFS_QUOTA
133 static void bch2_quota_reservation_put(struct bch_fs *c,
134 struct bch_inode_info *inode,
135 struct quota_res *res)
140 mutex_lock(&inode->ei_quota_lock);
141 BUG_ON(res->sectors > inode->ei_quota_reserved);
143 bch2_quota_acct(c, inode->ei_qid, Q_SPC,
144 -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
145 inode->ei_quota_reserved -= res->sectors;
146 mutex_unlock(&inode->ei_quota_lock);
151 static int bch2_quota_reservation_add(struct bch_fs *c,
152 struct bch_inode_info *inode,
153 struct quota_res *res,
159 mutex_lock(&inode->ei_quota_lock);
160 ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
161 check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
163 inode->ei_quota_reserved += sectors;
164 res->sectors += sectors;
166 mutex_unlock(&inode->ei_quota_lock);
173 static void bch2_quota_reservation_put(struct bch_fs *c,
174 struct bch_inode_info *inode,
175 struct quota_res *res)
179 static int bch2_quota_reservation_add(struct bch_fs *c,
180 struct bch_inode_info *inode,
181 struct quota_res *res,
190 /* i_size updates: */
192 struct inode_new_size {
198 static int inode_set_size(struct bch_inode_info *inode,
199 struct bch_inode_unpacked *bi,
202 struct inode_new_size *s = p;
204 bi->bi_size = s->new_size;
205 if (s->fields & ATTR_ATIME)
206 bi->bi_atime = s->now;
207 if (s->fields & ATTR_MTIME)
208 bi->bi_mtime = s->now;
209 if (s->fields & ATTR_CTIME)
210 bi->bi_ctime = s->now;
215 int __must_check bch2_write_inode_size(struct bch_fs *c,
216 struct bch_inode_info *inode,
217 loff_t new_size, unsigned fields)
219 struct inode_new_size s = {
220 .new_size = new_size,
221 .now = bch2_current_time(c),
225 return bch2_write_inode(c, inode, inode_set_size, &s, fields);
228 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
229 struct quota_res *quota_res, s64 sectors)
234 mutex_lock(&inode->ei_quota_lock);
235 bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
236 "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
237 inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
238 inode->ei_inode.bi_sectors);
239 inode->v.i_blocks += sectors;
241 #ifdef CONFIG_BCACHEFS_QUOTA
242 if (quota_res && sectors > 0) {
243 BUG_ON(sectors > quota_res->sectors);
244 BUG_ON(sectors > inode->ei_quota_reserved);
246 quota_res->sectors -= sectors;
247 inode->ei_quota_reserved -= sectors;
249 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
252 mutex_unlock(&inode->ei_quota_lock);
257 /* stored in page->private: */
259 struct bch_page_sector {
260 /* Uncompressed, fully allocated replicas (or on disk reservation): */
261 unsigned nr_replicas:4;
263 /* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
264 unsigned replicas_reserved:4;
271 SECTOR_DIRTY_RESERVED,
276 struct bch_page_state {
278 atomic_t write_count;
280 struct bch_page_sector s[PAGE_SECTORS];
283 static inline struct bch_page_state *__bch2_page_state(struct page *page)
285 return page_has_private(page)
286 ? (struct bch_page_state *) page_private(page)
290 static inline struct bch_page_state *bch2_page_state(struct page *page)
292 EBUG_ON(!PageLocked(page));
294 return __bch2_page_state(page);
297 /* for newly allocated pages: */
298 static void __bch2_page_state_release(struct page *page)
300 kfree(detach_page_private(page));
303 static void bch2_page_state_release(struct page *page)
305 EBUG_ON(!PageLocked(page));
306 __bch2_page_state_release(page);
309 /* for newly allocated pages: */
310 static struct bch_page_state *__bch2_page_state_create(struct page *page,
313 struct bch_page_state *s;
315 s = kzalloc(sizeof(*s), GFP_NOFS|gfp);
319 spin_lock_init(&s->lock);
320 attach_page_private(page, s);
324 static struct bch_page_state *bch2_page_state_create(struct page *page,
327 return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp);
330 static unsigned bkey_to_sector_state(const struct bkey *k)
332 if (k->type == KEY_TYPE_reservation)
333 return SECTOR_RESERVED;
334 if (bkey_extent_is_allocation(k))
335 return SECTOR_ALLOCATED;
336 return SECTOR_UNALLOCATED;
339 static void __bch2_page_state_set(struct page *page,
340 unsigned pg_offset, unsigned pg_len,
341 unsigned nr_ptrs, unsigned state)
343 struct bch_page_state *s = bch2_page_state_create(page, __GFP_NOFAIL);
346 BUG_ON(pg_offset >= PAGE_SECTORS);
347 BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
351 for (i = pg_offset; i < pg_offset + pg_len; i++) {
352 s->s[i].nr_replicas = nr_ptrs;
353 s->s[i].state = state;
356 if (i == PAGE_SECTORS)
359 spin_unlock(&s->lock);
362 static int bch2_page_state_set(struct bch_fs *c, subvol_inum inum,
363 struct page **pages, unsigned nr_pages)
365 struct btree_trans trans;
366 struct btree_iter iter;
368 u64 offset = pages[0]->index << PAGE_SECTORS_SHIFT;
373 bch2_trans_init(&trans, c, 0, 0);
375 bch2_trans_begin(&trans);
377 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
381 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
382 SPOS(inum.inum, offset, snapshot),
383 BTREE_ITER_SLOTS, k, ret) {
384 unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
385 unsigned state = bkey_to_sector_state(k.k);
387 while (pg_idx < nr_pages) {
388 struct page *page = pages[pg_idx];
389 u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
390 u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
391 unsigned pg_offset = max(bkey_start_offset(k.k), pg_start) - pg_start;
392 unsigned pg_len = min(k.k->p.offset, pg_end) - pg_offset - pg_start;
394 BUG_ON(k.k->p.offset < pg_start);
395 BUG_ON(bkey_start_offset(k.k) > pg_end);
397 if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate)
398 __bch2_page_state_set(page, pg_offset, pg_len, nr_ptrs, state);
400 if (k.k->p.offset < pg_end)
405 if (pg_idx == nr_pages)
409 offset = iter.pos.offset;
410 bch2_trans_iter_exit(&trans, &iter);
412 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
414 bch2_trans_exit(&trans);
419 static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
421 struct bvec_iter iter;
423 unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
424 ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
425 unsigned state = bkey_to_sector_state(k.k);
427 bio_for_each_segment(bv, bio, iter)
428 __bch2_page_state_set(bv.bv_page, bv.bv_offset >> 9,
429 bv.bv_len >> 9, nr_ptrs, state);
432 static void mark_pagecache_unallocated(struct bch_inode_info *inode,
435 pgoff_t index = start >> PAGE_SECTORS_SHIFT;
436 pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
437 struct folio_batch fbatch;
443 folio_batch_init(&fbatch);
445 while (filemap_get_folios(inode->v.i_mapping,
446 &index, end_index, &fbatch)) {
447 for (i = 0; i < folio_batch_count(&fbatch); i++) {
448 struct folio *folio = fbatch.folios[i];
449 u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
450 u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
451 unsigned pg_offset = max(start, pg_start) - pg_start;
452 unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
453 struct bch_page_state *s;
455 BUG_ON(end <= pg_start);
456 BUG_ON(pg_offset >= PAGE_SECTORS);
457 BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
460 s = bch2_page_state(&folio->page);
464 for (j = pg_offset; j < pg_offset + pg_len; j++)
465 s->s[j].nr_replicas = 0;
466 spin_unlock(&s->lock);
471 folio_batch_release(&fbatch);
476 static void mark_pagecache_reserved(struct bch_inode_info *inode,
479 struct bch_fs *c = inode->v.i_sb->s_fs_info;
480 pgoff_t index = start >> PAGE_SECTORS_SHIFT;
481 pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
482 struct folio_batch fbatch;
483 s64 i_sectors_delta = 0;
489 folio_batch_init(&fbatch);
491 while (filemap_get_folios(inode->v.i_mapping,
492 &index, end_index, &fbatch)) {
493 for (i = 0; i < folio_batch_count(&fbatch); i++) {
494 struct folio *folio = fbatch.folios[i];
495 u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
496 u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
497 unsigned pg_offset = max(start, pg_start) - pg_start;
498 unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
499 struct bch_page_state *s;
501 BUG_ON(end <= pg_start);
502 BUG_ON(pg_offset >= PAGE_SECTORS);
503 BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
506 s = bch2_page_state(&folio->page);
510 for (j = pg_offset; j < pg_offset + pg_len; j++)
511 switch (s->s[j].state) {
512 case SECTOR_UNALLOCATED:
513 s->s[j].state = SECTOR_RESERVED;
516 s->s[j].state = SECTOR_DIRTY_RESERVED;
522 spin_unlock(&s->lock);
527 folio_batch_release(&fbatch);
531 i_sectors_acct(c, inode, NULL, i_sectors_delta);
534 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
536 /* XXX: this should not be open coded */
537 return inode->ei_inode.bi_data_replicas
538 ? inode->ei_inode.bi_data_replicas - 1
539 : c->opts.data_replicas;
542 static inline unsigned sectors_to_reserve(struct bch_page_sector *s,
543 unsigned nr_replicas)
545 return max(0, (int) nr_replicas -
547 s->replicas_reserved);
550 static int bch2_get_page_disk_reservation(struct bch_fs *c,
551 struct bch_inode_info *inode,
552 struct page *page, bool check_enospc)
554 struct bch_page_state *s = bch2_page_state_create(page, 0);
555 unsigned nr_replicas = inode_nr_replicas(c, inode);
556 struct disk_reservation disk_res = { 0 };
557 unsigned i, disk_res_sectors = 0;
563 for (i = 0; i < ARRAY_SIZE(s->s); i++)
564 disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
566 if (!disk_res_sectors)
569 ret = bch2_disk_reservation_get(c, &disk_res,
572 ? BCH_DISK_RESERVATION_NOFAIL
577 for (i = 0; i < ARRAY_SIZE(s->s); i++)
578 s->s[i].replicas_reserved +=
579 sectors_to_reserve(&s->s[i], nr_replicas);
584 struct bch2_page_reservation {
585 struct disk_reservation disk;
586 struct quota_res quota;
589 static void bch2_page_reservation_init(struct bch_fs *c,
590 struct bch_inode_info *inode,
591 struct bch2_page_reservation *res)
593 memset(res, 0, sizeof(*res));
595 res->disk.nr_replicas = inode_nr_replicas(c, inode);
598 static void bch2_page_reservation_put(struct bch_fs *c,
599 struct bch_inode_info *inode,
600 struct bch2_page_reservation *res)
602 bch2_disk_reservation_put(c, &res->disk);
603 bch2_quota_reservation_put(c, inode, &res->quota);
606 static int bch2_page_reservation_get(struct bch_fs *c,
607 struct bch_inode_info *inode, struct page *page,
608 struct bch2_page_reservation *res,
609 unsigned offset, unsigned len, bool check_enospc)
611 struct bch_page_state *s = bch2_page_state_create(page, 0);
612 unsigned i, disk_sectors = 0, quota_sectors = 0;
618 BUG_ON(!s->uptodate);
620 for (i = round_down(offset, block_bytes(c)) >> 9;
621 i < round_up(offset + len, block_bytes(c)) >> 9;
623 disk_sectors += sectors_to_reserve(&s->s[i],
624 res->disk.nr_replicas);
625 quota_sectors += s->s[i].state == SECTOR_UNALLOCATED;
629 ret = bch2_disk_reservation_add(c, &res->disk,
632 ? BCH_DISK_RESERVATION_NOFAIL
639 ret = bch2_quota_reservation_add(c, inode, &res->quota,
643 struct disk_reservation tmp = {
644 .sectors = disk_sectors
647 bch2_disk_reservation_put(c, &tmp);
648 res->disk.sectors -= disk_sectors;
656 static void bch2_clear_page_bits(struct page *page)
658 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
659 struct bch_fs *c = inode->v.i_sb->s_fs_info;
660 struct bch_page_state *s = bch2_page_state(page);
661 struct disk_reservation disk_res = { 0 };
662 int i, dirty_sectors = 0;
667 EBUG_ON(!PageLocked(page));
668 EBUG_ON(PageWriteback(page));
670 for (i = 0; i < ARRAY_SIZE(s->s); i++) {
671 disk_res.sectors += s->s[i].replicas_reserved;
672 s->s[i].replicas_reserved = 0;
674 switch (s->s[i].state) {
676 s->s[i].state = SECTOR_UNALLOCATED;
679 case SECTOR_DIRTY_RESERVED:
680 s->s[i].state = SECTOR_RESERVED;
687 bch2_disk_reservation_put(c, &disk_res);
689 i_sectors_acct(c, inode, NULL, dirty_sectors);
691 bch2_page_state_release(page);
694 static void bch2_set_page_dirty(struct bch_fs *c,
695 struct bch_inode_info *inode, struct page *page,
696 struct bch2_page_reservation *res,
697 unsigned offset, unsigned len)
699 struct bch_page_state *s = bch2_page_state(page);
700 unsigned i, dirty_sectors = 0;
702 WARN_ON((u64) page_offset(page) + offset + len >
703 round_up((u64) i_size_read(&inode->v), block_bytes(c)));
707 for (i = round_down(offset, block_bytes(c)) >> 9;
708 i < round_up(offset + len, block_bytes(c)) >> 9;
710 unsigned sectors = sectors_to_reserve(&s->s[i],
711 res->disk.nr_replicas);
714 * This can happen if we race with the error path in
715 * bch2_writepage_io_done():
717 sectors = min_t(unsigned, sectors, res->disk.sectors);
719 s->s[i].replicas_reserved += sectors;
720 res->disk.sectors -= sectors;
722 switch (s->s[i].state) {
723 case SECTOR_UNALLOCATED:
724 s->s[i].state = SECTOR_DIRTY;
727 case SECTOR_RESERVED:
728 s->s[i].state = SECTOR_DIRTY_RESERVED;
735 spin_unlock(&s->lock);
737 i_sectors_acct(c, inode, &res->quota, dirty_sectors);
739 if (!PageDirty(page))
740 __set_page_dirty_nobuffers(page);
743 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
745 struct file *file = vmf->vma->vm_file;
746 struct address_space *mapping = file->f_mapping;
747 struct address_space *fdm = faults_disabled_mapping();
748 struct bch_inode_info *inode = file_bch_inode(file);
752 return VM_FAULT_SIGBUS;
756 struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
758 if (bch2_pagecache_add_tryget(&inode->ei_pagecache_lock))
761 bch2_pagecache_block_put(&fdm_host->ei_pagecache_lock);
763 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
764 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
766 bch2_pagecache_block_get(&fdm_host->ei_pagecache_lock);
768 /* Signal that lock has been dropped: */
769 set_fdm_dropped_locks();
770 return VM_FAULT_SIGBUS;
773 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
775 ret = filemap_fault(vmf);
776 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
781 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
783 struct page *page = vmf->page;
784 struct file *file = vmf->vma->vm_file;
785 struct bch_inode_info *inode = file_bch_inode(file);
786 struct address_space *mapping = file->f_mapping;
787 struct bch_fs *c = inode->v.i_sb->s_fs_info;
788 struct bch2_page_reservation res;
793 bch2_page_reservation_init(c, inode, &res);
795 sb_start_pagefault(inode->v.i_sb);
796 file_update_time(file);
799 * Not strictly necessary, but helps avoid dio writes livelocking in
800 * write_invalidate_inode_pages_range() - can drop this if/when we get
801 * a write_invalidate_inode_pages_range() that works without dropping
802 * page lock before invalidating page
804 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
807 isize = i_size_read(&inode->v);
809 if (page->mapping != mapping || page_offset(page) >= isize) {
811 ret = VM_FAULT_NOPAGE;
815 len = min_t(loff_t, PAGE_SIZE, isize - page_offset(page));
817 if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
818 if (bch2_page_state_set(c, inode_inum(inode), &page, 1)) {
820 ret = VM_FAULT_SIGBUS;
825 if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) {
827 ret = VM_FAULT_SIGBUS;
831 bch2_set_page_dirty(c, inode, page, &res, 0, len);
832 bch2_page_reservation_put(c, inode, &res);
834 wait_for_stable_page(page);
835 ret = VM_FAULT_LOCKED;
837 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
838 sb_end_pagefault(inode->v.i_sb);
843 void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
845 if (offset || length < folio_size(folio))
848 bch2_clear_page_bits(&folio->page);
851 bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
853 if (folio_test_dirty(folio) || folio_test_writeback(folio))
856 bch2_clear_page_bits(&folio->page);
862 static void bch2_readpages_end_io(struct bio *bio)
864 struct bvec_iter_all iter;
867 bio_for_each_segment_all(bv, bio, iter) {
868 struct page *page = bv->bv_page;
870 if (!bio->bi_status) {
871 SetPageUptodate(page);
873 ClearPageUptodate(page);
882 struct readpages_iter {
883 struct address_space *mapping;
890 static int readpages_iter_init(struct readpages_iter *iter,
891 struct readahead_control *ractl)
893 unsigned i, nr_pages = readahead_count(ractl);
895 memset(iter, 0, sizeof(*iter));
897 iter->mapping = ractl->mapping;
898 iter->offset = readahead_index(ractl);
899 iter->nr_pages = nr_pages;
901 iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
905 nr_pages = __readahead_batch(ractl, iter->pages, nr_pages);
906 for (i = 0; i < nr_pages; i++) {
907 __bch2_page_state_create(iter->pages[i], __GFP_NOFAIL);
908 put_page(iter->pages[i]);
914 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
916 if (iter->idx >= iter->nr_pages)
919 EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
921 return iter->pages[iter->idx];
924 static bool extent_partial_reads_expensive(struct bkey_s_c k)
926 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
927 struct bch_extent_crc_unpacked crc;
928 const union bch_extent_entry *i;
930 bkey_for_each_crc(k.k, ptrs, crc, i)
931 if (crc.csum_type || crc.compression_type)
936 static void readpage_bio_extend(struct readpages_iter *iter,
938 unsigned sectors_this_extent,
941 while (bio_sectors(bio) < sectors_this_extent &&
942 bio->bi_vcnt < bio->bi_max_vecs) {
943 pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
944 struct page *page = readpage_iter_next(iter);
948 if (iter->offset + iter->idx != page_offset)
956 page = xa_load(&iter->mapping->i_pages, page_offset);
957 if (page && !xa_is_value(page))
960 page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
964 if (!__bch2_page_state_create(page, 0)) {
969 ret = add_to_page_cache_lru(page, iter->mapping,
970 page_offset, GFP_NOFS);
972 __bch2_page_state_release(page);
980 BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0));
984 static void bchfs_read(struct btree_trans *trans,
985 struct bch_read_bio *rbio,
987 struct readpages_iter *readpages_iter)
989 struct bch_fs *c = trans->c;
990 struct btree_iter iter;
992 int flags = BCH_READ_RETRY_IF_STALE|
993 BCH_READ_MAY_PROMOTE;
998 rbio->start_time = local_clock();
999 rbio->subvol = inum.subvol;
1001 bch2_bkey_buf_init(&sk);
1003 bch2_trans_begin(trans);
1004 iter = (struct btree_iter) { NULL };
1006 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
1010 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1011 SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
1015 unsigned bytes, sectors, offset_into_extent;
1016 enum btree_id data_btree = BTREE_ID_extents;
1019 * read_extent -> io_time_reset may cause a transaction restart
1020 * without returning an error, we need to check for that here:
1022 ret = bch2_trans_relock(trans);
1026 bch2_btree_iter_set_pos(&iter,
1027 POS(inum.inum, rbio->bio.bi_iter.bi_sector));
1029 k = bch2_btree_iter_peek_slot(&iter);
1034 offset_into_extent = iter.pos.offset -
1035 bkey_start_offset(k.k);
1036 sectors = k.k->size - offset_into_extent;
1038 bch2_bkey_buf_reassemble(&sk, c, k);
1040 ret = bch2_read_indirect_extent(trans, &data_btree,
1041 &offset_into_extent, &sk);
1045 k = bkey_i_to_s_c(sk.k);
1047 sectors = min(sectors, k.k->size - offset_into_extent);
1050 readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
1051 extent_partial_reads_expensive(k));
1053 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
1054 swap(rbio->bio.bi_iter.bi_size, bytes);
1056 if (rbio->bio.bi_iter.bi_size == bytes)
1057 flags |= BCH_READ_LAST_FRAGMENT;
1059 bch2_bio_page_state_set(&rbio->bio, k);
1061 bch2_read_extent(trans, rbio, iter.pos,
1062 data_btree, k, offset_into_extent, flags);
1064 if (flags & BCH_READ_LAST_FRAGMENT)
1067 swap(rbio->bio.bi_iter.bi_size, bytes);
1068 bio_advance(&rbio->bio, bytes);
1070 ret = btree_trans_too_many_iters(trans);
1075 bch2_trans_iter_exit(trans, &iter);
1077 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1081 bch_err_inum_ratelimited(c, inum.inum,
1082 "read error %i from btree lookup", ret);
1083 rbio->bio.bi_status = BLK_STS_IOERR;
1084 bio_endio(&rbio->bio);
1087 bch2_bkey_buf_exit(&sk, c);
1090 void bch2_readahead(struct readahead_control *ractl)
1092 struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
1093 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1094 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1095 struct btree_trans trans;
1097 struct readpages_iter readpages_iter;
1100 ret = readpages_iter_init(&readpages_iter, ractl);
1103 bch2_trans_init(&trans, c, 0, 0);
1105 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1107 while ((page = readpage_iter_next(&readpages_iter))) {
1108 pgoff_t index = readpages_iter.offset + readpages_iter.idx;
1109 unsigned n = min_t(unsigned,
1110 readpages_iter.nr_pages -
1113 struct bch_read_bio *rbio =
1114 rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
1115 GFP_NOFS, &c->bio_read),
1118 readpages_iter.idx++;
1120 rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTORS_SHIFT;
1121 rbio->bio.bi_end_io = bch2_readpages_end_io;
1122 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
1124 bchfs_read(&trans, rbio, inode_inum(inode),
1128 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1130 bch2_trans_exit(&trans);
1131 kfree(readpages_iter.pages);
1134 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
1135 subvol_inum inum, struct page *page)
1137 struct btree_trans trans;
1139 bch2_page_state_create(page, __GFP_NOFAIL);
1141 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
1142 rbio->bio.bi_iter.bi_sector =
1143 (sector_t) page->index << PAGE_SECTORS_SHIFT;
1144 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
1146 bch2_trans_init(&trans, c, 0, 0);
1147 bchfs_read(&trans, rbio, inum, NULL);
1148 bch2_trans_exit(&trans);
1151 static void bch2_read_single_page_end_io(struct bio *bio)
1153 complete(bio->bi_private);
1156 static int bch2_read_single_page(struct page *page,
1157 struct address_space *mapping)
1159 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1160 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1161 struct bch_read_bio *rbio;
1163 DECLARE_COMPLETION_ONSTACK(done);
1165 rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read),
1166 io_opts(c, &inode->ei_inode));
1167 rbio->bio.bi_private = &done;
1168 rbio->bio.bi_end_io = bch2_read_single_page_end_io;
1170 __bchfs_readpage(c, rbio, inode_inum(inode), page);
1171 wait_for_completion(&done);
1173 ret = blk_status_to_errno(rbio->bio.bi_status);
1174 bio_put(&rbio->bio);
1179 SetPageUptodate(page);
1183 int bch2_read_folio(struct file *file, struct folio *folio)
1185 struct page *page = &folio->page;
1188 ret = bch2_read_single_page(page, page->mapping);
1189 folio_unlock(folio);
1190 return bch2_err_class(ret);
1195 struct bch_writepage_state {
1196 struct bch_writepage_io *io;
1197 struct bch_io_opts opts;
1200 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1201 struct bch_inode_info *inode)
1203 return (struct bch_writepage_state) {
1204 .opts = io_opts(c, &inode->ei_inode)
1208 static void bch2_writepage_io_free(struct closure *cl)
1210 struct bch_writepage_io *io = container_of(cl,
1211 struct bch_writepage_io, cl);
1213 bio_put(&io->op.wbio.bio);
1216 static void bch2_writepage_io_done(struct closure *cl)
1218 struct bch_writepage_io *io = container_of(cl,
1219 struct bch_writepage_io, cl);
1220 struct bch_fs *c = io->op.c;
1221 struct bio *bio = &io->op.wbio.bio;
1222 struct bvec_iter_all iter;
1223 struct bio_vec *bvec;
1227 set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
1229 bio_for_each_segment_all(bvec, bio, iter) {
1230 struct bch_page_state *s;
1232 SetPageError(bvec->bv_page);
1233 mapping_set_error(bvec->bv_page->mapping, -EIO);
1235 s = __bch2_page_state(bvec->bv_page);
1236 spin_lock(&s->lock);
1237 for (i = 0; i < PAGE_SECTORS; i++)
1238 s->s[i].nr_replicas = 0;
1239 spin_unlock(&s->lock);
1243 if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
1244 bio_for_each_segment_all(bvec, bio, iter) {
1245 struct bch_page_state *s;
1247 s = __bch2_page_state(bvec->bv_page);
1248 spin_lock(&s->lock);
1249 for (i = 0; i < PAGE_SECTORS; i++)
1250 s->s[i].nr_replicas = 0;
1251 spin_unlock(&s->lock);
1256 * racing with fallocate can cause us to add fewer sectors than
1257 * expected - but we shouldn't add more sectors than expected:
1259 WARN_ON_ONCE(io->op.i_sectors_delta > 0);
1262 * (error (due to going RO) halfway through a page can screw that up
1265 BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
1269 * PageWriteback is effectively our ref on the inode - fixup i_blocks
1270 * before calling end_page_writeback:
1272 i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
1274 bio_for_each_segment_all(bvec, bio, iter) {
1275 struct bch_page_state *s = __bch2_page_state(bvec->bv_page);
1277 if (atomic_dec_and_test(&s->write_count))
1278 end_page_writeback(bvec->bv_page);
1281 closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
1284 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1286 struct bch_writepage_io *io = w->io;
1289 closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
1290 continue_at(&io->cl, bch2_writepage_io_done, NULL);
1294 * Get a bch_writepage_io and add @page to it - appending to an existing one if
1295 * possible, else allocating a new one:
1297 static void bch2_writepage_io_alloc(struct bch_fs *c,
1298 struct writeback_control *wbc,
1299 struct bch_writepage_state *w,
1300 struct bch_inode_info *inode,
1302 unsigned nr_replicas)
1304 struct bch_write_op *op;
1306 w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
1309 &c->writepage_bioset),
1310 struct bch_writepage_io, op.wbio.bio);
1312 closure_init(&w->io->cl, NULL);
1313 w->io->inode = inode;
1316 bch2_write_op_init(op, c, w->opts);
1317 op->target = w->opts.foreground_target;
1318 op->nr_replicas = nr_replicas;
1319 op->res.nr_replicas = nr_replicas;
1320 op->write_point = writepoint_hashed(inode->ei_last_dirtied);
1321 op->subvol = inode->ei_subvol;
1322 op->pos = POS(inode->v.i_ino, sector);
1323 op->wbio.bio.bi_iter.bi_sector = sector;
1324 op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
1327 static int __bch2_writepage(struct page *page,
1328 struct writeback_control *wbc,
1331 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1332 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1333 struct bch_writepage_state *w = data;
1334 struct bch_page_state *s, orig;
1335 unsigned i, offset, nr_replicas_this_write = U32_MAX;
1336 loff_t i_size = i_size_read(&inode->v);
1337 pgoff_t end_index = i_size >> PAGE_SHIFT;
1340 EBUG_ON(!PageUptodate(page));
1342 /* Is the page fully inside i_size? */
1343 if (page->index < end_index)
1346 /* Is the page fully outside i_size? (truncate in progress) */
1347 offset = i_size & (PAGE_SIZE - 1);
1348 if (page->index > end_index || !offset) {
1354 * The page straddles i_size. It must be zeroed out on each and every
1355 * writepage invocation because it may be mmapped. "A file is mapped
1356 * in multiples of the page size. For a file that is not a multiple of
1357 * the page size, the remaining memory is zeroed when mapped, and
1358 * writes to that region are not written out to the file."
1360 zero_user_segment(page, offset, PAGE_SIZE);
1362 s = bch2_page_state_create(page, __GFP_NOFAIL);
1365 * Things get really hairy with errors during writeback:
1367 ret = bch2_get_page_disk_reservation(c, inode, page, false);
1370 /* Before unlocking the page, get copy of reservations: */
1371 spin_lock(&s->lock);
1373 spin_unlock(&s->lock);
1375 for (i = 0; i < PAGE_SECTORS; i++) {
1376 if (s->s[i].state < SECTOR_DIRTY)
1379 nr_replicas_this_write =
1380 min_t(unsigned, nr_replicas_this_write,
1381 s->s[i].nr_replicas +
1382 s->s[i].replicas_reserved);
1385 for (i = 0; i < PAGE_SECTORS; i++) {
1386 if (s->s[i].state < SECTOR_DIRTY)
1389 s->s[i].nr_replicas = w->opts.compression
1390 ? 0 : nr_replicas_this_write;
1392 s->s[i].replicas_reserved = 0;
1393 s->s[i].state = SECTOR_ALLOCATED;
1396 BUG_ON(atomic_read(&s->write_count));
1397 atomic_set(&s->write_count, 1);
1399 BUG_ON(PageWriteback(page));
1400 set_page_writeback(page);
1406 unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
1409 while (offset < PAGE_SECTORS &&
1410 orig.s[offset].state < SECTOR_DIRTY)
1413 if (offset == PAGE_SECTORS)
1416 while (offset + sectors < PAGE_SECTORS &&
1417 orig.s[offset + sectors].state >= SECTOR_DIRTY) {
1418 reserved_sectors += orig.s[offset + sectors].replicas_reserved;
1419 dirty_sectors += orig.s[offset + sectors].state == SECTOR_DIRTY;
1424 sector = ((u64) page->index << PAGE_SECTORS_SHIFT) + offset;
1427 (w->io->op.res.nr_replicas != nr_replicas_this_write ||
1428 bio_full(&w->io->op.wbio.bio, PAGE_SIZE) ||
1429 w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
1430 (BIO_MAX_VECS * PAGE_SIZE) ||
1431 bio_end_sector(&w->io->op.wbio.bio) != sector))
1432 bch2_writepage_do_io(w);
1435 bch2_writepage_io_alloc(c, wbc, w, inode, sector,
1436 nr_replicas_this_write);
1438 atomic_inc(&s->write_count);
1440 BUG_ON(inode != w->io->inode);
1441 BUG_ON(!bio_add_page(&w->io->op.wbio.bio, page,
1442 sectors << 9, offset << 9));
1444 /* Check for writing past i_size: */
1445 WARN_ON_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
1446 round_up(i_size, block_bytes(c)));
1448 w->io->op.res.sectors += reserved_sectors;
1449 w->io->op.i_sectors_delta -= dirty_sectors;
1450 w->io->op.new_i_size = i_size;
1455 if (atomic_dec_and_test(&s->write_count))
1456 end_page_writeback(page);
1461 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1463 struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1464 struct bch_writepage_state w =
1465 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1466 struct blk_plug plug;
1469 blk_start_plug(&plug);
1470 ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1472 bch2_writepage_do_io(&w);
1473 blk_finish_plug(&plug);
1474 return bch2_err_class(ret);
1477 /* buffered writes: */
1479 int bch2_write_begin(struct file *file, struct address_space *mapping,
1480 loff_t pos, unsigned len,
1481 struct page **pagep, void **fsdata)
1483 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1484 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1485 struct bch2_page_reservation *res;
1486 pgoff_t index = pos >> PAGE_SHIFT;
1487 unsigned offset = pos & (PAGE_SIZE - 1);
1491 res = kmalloc(sizeof(*res), GFP_KERNEL);
1495 bch2_page_reservation_init(c, inode, res);
1498 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1500 page = grab_cache_page_write_begin(mapping, index);
1504 if (PageUptodate(page))
1507 /* If we're writing entire page, don't need to read it in first: */
1508 if (len == PAGE_SIZE)
1511 if (!offset && pos + len >= inode->v.i_size) {
1512 zero_user_segment(page, len, PAGE_SIZE);
1513 flush_dcache_page(page);
1517 if (index > inode->v.i_size >> PAGE_SHIFT) {
1518 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1519 flush_dcache_page(page);
1523 ret = bch2_read_single_page(page, mapping);
1527 if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
1528 ret = bch2_page_state_set(c, inode_inum(inode), &page, 1);
1533 ret = bch2_page_reservation_get(c, inode, page, res,
1536 if (!PageUptodate(page)) {
1538 * If the page hasn't been read in, we won't know if we
1539 * actually need a reservation - we don't actually need
1540 * to read here, we just need to check if the page is
1541 * fully backed by uncompressed data:
1556 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1559 return bch2_err_class(ret);
1562 int bch2_write_end(struct file *file, struct address_space *mapping,
1563 loff_t pos, unsigned len, unsigned copied,
1564 struct page *page, void *fsdata)
1566 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1567 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1568 struct bch2_page_reservation *res = fsdata;
1569 unsigned offset = pos & (PAGE_SIZE - 1);
1571 lockdep_assert_held(&inode->v.i_rwsem);
1573 if (unlikely(copied < len && !PageUptodate(page))) {
1575 * The page needs to be read in, but that would destroy
1576 * our partial write - simplest thing is to just force
1577 * userspace to redo the write:
1579 zero_user(page, 0, PAGE_SIZE);
1580 flush_dcache_page(page);
1584 spin_lock(&inode->v.i_lock);
1585 if (pos + copied > inode->v.i_size)
1586 i_size_write(&inode->v, pos + copied);
1587 spin_unlock(&inode->v.i_lock);
1590 if (!PageUptodate(page))
1591 SetPageUptodate(page);
1593 bch2_set_page_dirty(c, inode, page, res, offset, copied);
1595 inode->ei_last_dirtied = (unsigned long) current;
1600 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1602 bch2_page_reservation_put(c, inode, res);
1608 #define WRITE_BATCH_PAGES 32
1610 static int __bch2_buffered_write(struct bch_inode_info *inode,
1611 struct address_space *mapping,
1612 struct iov_iter *iter,
1613 loff_t pos, unsigned len)
1615 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1616 struct page *pages[WRITE_BATCH_PAGES];
1617 struct bch2_page_reservation res;
1618 unsigned long index = pos >> PAGE_SHIFT;
1619 unsigned offset = pos & (PAGE_SIZE - 1);
1620 unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1621 unsigned i, reserved = 0, set_dirty = 0;
1622 unsigned copied = 0, nr_pages_copied = 0;
1626 BUG_ON(nr_pages > ARRAY_SIZE(pages));
1628 bch2_page_reservation_init(c, inode, &res);
1630 for (i = 0; i < nr_pages; i++) {
1631 pages[i] = grab_cache_page_write_begin(mapping, index + i);
1638 len = min_t(unsigned, len,
1639 nr_pages * PAGE_SIZE - offset);
1644 if (offset && !PageUptodate(pages[0])) {
1645 ret = bch2_read_single_page(pages[0], mapping);
1650 if ((pos + len) & (PAGE_SIZE - 1) &&
1651 !PageUptodate(pages[nr_pages - 1])) {
1652 if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
1653 zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
1655 ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
1661 while (reserved < len) {
1662 unsigned i = (offset + reserved) >> PAGE_SHIFT;
1663 struct page *page = pages[i];
1664 unsigned pg_offset = (offset + reserved) & (PAGE_SIZE - 1);
1665 unsigned pg_len = min_t(unsigned, len - reserved,
1666 PAGE_SIZE - pg_offset);
1668 if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
1669 ret = bch2_page_state_set(c, inode_inum(inode),
1670 pages + i, nr_pages - i);
1675 ret = bch2_page_reservation_get(c, inode, page, &res,
1676 pg_offset, pg_len, true);
1683 if (mapping_writably_mapped(mapping))
1684 for (i = 0; i < nr_pages; i++)
1685 flush_dcache_page(pages[i]);
1687 while (copied < len) {
1688 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1689 unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
1690 unsigned pg_len = min_t(unsigned, len - copied,
1691 PAGE_SIZE - pg_offset);
1692 unsigned pg_copied = copy_page_from_iter_atomic(page,
1693 pg_offset, pg_len,iter);
1698 if (!PageUptodate(page) &&
1699 pg_copied != PAGE_SIZE &&
1700 pos + copied + pg_copied < inode->v.i_size) {
1701 zero_user(page, 0, PAGE_SIZE);
1705 flush_dcache_page(page);
1706 copied += pg_copied;
1708 if (pg_copied != pg_len)
1715 spin_lock(&inode->v.i_lock);
1716 if (pos + copied > inode->v.i_size)
1717 i_size_write(&inode->v, pos + copied);
1718 spin_unlock(&inode->v.i_lock);
1720 while (set_dirty < copied) {
1721 struct page *page = pages[(offset + set_dirty) >> PAGE_SHIFT];
1722 unsigned pg_offset = (offset + set_dirty) & (PAGE_SIZE - 1);
1723 unsigned pg_len = min_t(unsigned, copied - set_dirty,
1724 PAGE_SIZE - pg_offset);
1726 if (!PageUptodate(page))
1727 SetPageUptodate(page);
1729 bch2_set_page_dirty(c, inode, page, &res, pg_offset, pg_len);
1733 set_dirty += pg_len;
1736 nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
1737 inode->ei_last_dirtied = (unsigned long) current;
1739 for (i = nr_pages_copied; i < nr_pages; i++) {
1740 unlock_page(pages[i]);
1744 bch2_page_reservation_put(c, inode, &res);
1746 return copied ?: ret;
1749 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1751 struct file *file = iocb->ki_filp;
1752 struct address_space *mapping = file->f_mapping;
1753 struct bch_inode_info *inode = file_bch_inode(file);
1754 loff_t pos = iocb->ki_pos;
1755 ssize_t written = 0;
1758 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1761 unsigned offset = pos & (PAGE_SIZE - 1);
1762 unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
1763 PAGE_SIZE * WRITE_BATCH_PAGES - offset);
1766 * Bring in the user page that we will copy from _first_.
1767 * Otherwise there's a nasty deadlock on copying from the
1768 * same page as we're writing to, without it being marked
1771 * Not only is this an optimisation, but it is also required
1772 * to check that the address is actually valid, when atomic
1773 * usercopies are used, below.
1775 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
1776 bytes = min_t(unsigned long, iov_iter_count(iter),
1777 PAGE_SIZE - offset);
1779 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
1785 if (unlikely(fatal_signal_pending(current))) {
1790 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1791 if (unlikely(ret < 0))
1796 if (unlikely(ret == 0)) {
1798 * If we were unable to copy any data at all, we must
1799 * fall back to a single segment length write.
1801 * If we didn't fallback here, we could livelock
1802 * because not all segments in the iov can be copied at
1803 * once without a pagefault.
1805 bytes = min_t(unsigned long, PAGE_SIZE - offset,
1806 iov_iter_single_seg_count(iter));
1813 balance_dirty_pages_ratelimited(mapping);
1814 } while (iov_iter_count(iter));
1816 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1818 return written ? written : ret;
1821 /* O_DIRECT reads */
1823 static void bio_check_or_release(struct bio *bio, bool check_dirty)
1826 bio_check_pages_dirty(bio);
1828 bio_release_pages(bio, false);
1833 static void bch2_dio_read_complete(struct closure *cl)
1835 struct dio_read *dio = container_of(cl, struct dio_read, cl);
1837 dio->req->ki_complete(dio->req, dio->ret);
1838 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
1841 static void bch2_direct_IO_read_endio(struct bio *bio)
1843 struct dio_read *dio = bio->bi_private;
1846 dio->ret = blk_status_to_errno(bio->bi_status);
1848 closure_put(&dio->cl);
1851 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1853 struct dio_read *dio = bio->bi_private;
1854 bool should_dirty = dio->should_dirty;
1856 bch2_direct_IO_read_endio(bio);
1857 bio_check_or_release(bio, should_dirty);
1860 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1862 struct file *file = req->ki_filp;
1863 struct bch_inode_info *inode = file_bch_inode(file);
1864 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1865 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1866 struct dio_read *dio;
1868 loff_t offset = req->ki_pos;
1869 bool sync = is_sync_kiocb(req);
1873 if ((offset|iter->count) & (block_bytes(c) - 1))
1876 ret = min_t(loff_t, iter->count,
1877 max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1882 shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
1883 iter->count -= shorten;
1885 bio = bio_alloc_bioset(NULL,
1886 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
1889 &c->dio_read_bioset);
1891 bio->bi_end_io = bch2_direct_IO_read_endio;
1893 dio = container_of(bio, struct dio_read, rbio.bio);
1894 closure_init(&dio->cl, NULL);
1897 * this is a _really_ horrible hack just to avoid an atomic sub at the
1901 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1902 atomic_set(&dio->cl.remaining,
1903 CLOSURE_REMAINING_INITIALIZER -
1905 CLOSURE_DESTRUCTOR);
1907 atomic_set(&dio->cl.remaining,
1908 CLOSURE_REMAINING_INITIALIZER + 1);
1914 * This is one of the sketchier things I've encountered: we have to skip
1915 * the dirtying of requests that are internal from the kernel (i.e. from
1916 * loopback), because we'll deadlock on page_lock.
1918 dio->should_dirty = iter_is_iovec(iter);
1921 while (iter->count) {
1922 bio = bio_alloc_bioset(NULL,
1923 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
1927 bio->bi_end_io = bch2_direct_IO_read_split_endio;
1929 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1930 bio->bi_iter.bi_sector = offset >> 9;
1931 bio->bi_private = dio;
1933 ret = bio_iov_iter_get_pages(bio, iter);
1935 /* XXX: fault inject this path */
1936 bio->bi_status = BLK_STS_RESOURCE;
1941 offset += bio->bi_iter.bi_size;
1943 if (dio->should_dirty)
1944 bio_set_pages_dirty(bio);
1947 closure_get(&dio->cl);
1949 bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
1952 iter->count += shorten;
1955 closure_sync(&dio->cl);
1956 closure_debug_destroy(&dio->cl);
1958 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
1961 return -EIOCBQUEUED;
1965 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1967 struct file *file = iocb->ki_filp;
1968 struct bch_inode_info *inode = file_bch_inode(file);
1969 struct address_space *mapping = file->f_mapping;
1970 size_t count = iov_iter_count(iter);
1974 return 0; /* skip atime */
1976 if (iocb->ki_flags & IOCB_DIRECT) {
1977 struct blk_plug plug;
1979 ret = filemap_write_and_wait_range(mapping,
1981 iocb->ki_pos + count - 1);
1985 file_accessed(file);
1987 blk_start_plug(&plug);
1988 ret = bch2_direct_IO_read(iocb, iter);
1989 blk_finish_plug(&plug);
1992 iocb->ki_pos += ret;
1994 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1995 ret = generic_file_read_iter(iocb, iter);
1996 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1999 return bch2_err_class(ret);
2002 /* O_DIRECT writes */
2004 static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
2005 u64 offset, u64 size,
2006 unsigned nr_replicas, bool compressed)
2008 struct btree_trans trans;
2009 struct btree_iter iter;
2011 u64 end = offset + size;
2016 bch2_trans_init(&trans, c, 0, 0);
2018 bch2_trans_begin(&trans);
2020 err = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
2024 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
2025 SPOS(inum.inum, offset, snapshot),
2026 BTREE_ITER_SLOTS, k, err) {
2027 if (bkey_cmp(bkey_start_pos(k.k), POS(inum.inum, end)) >= 0)
2030 if (k.k->p.snapshot != snapshot ||
2031 nr_replicas > bch2_bkey_replicas(c, k) ||
2032 (!compressed && bch2_bkey_sectors_compressed(k))) {
2038 offset = iter.pos.offset;
2039 bch2_trans_iter_exit(&trans, &iter);
2041 if (bch2_err_matches(err, BCH_ERR_transaction_restart))
2043 bch2_trans_exit(&trans);
2045 return err ? false : ret;
2048 static void bch2_dio_write_loop_async(struct bch_write_op *);
2050 static long bch2_dio_write_loop(struct dio_write *dio)
2052 bool kthread = (current->flags & PF_KTHREAD) != 0;
2053 struct kiocb *req = dio->req;
2054 struct address_space *mapping = req->ki_filp->f_mapping;
2055 struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
2056 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2057 struct bio *bio = &dio->op.wbio.bio;
2058 struct bvec_iter_all iter;
2060 unsigned unaligned, iter_count;
2061 bool sync = dio->sync, dropped_locks;
2068 iter_count = dio->iter.count;
2070 if (kthread && dio->mm)
2071 kthread_use_mm(dio->mm);
2072 BUG_ON(current->faults_disabled_mapping);
2073 current->faults_disabled_mapping = mapping;
2075 ret = bio_iov_iter_get_pages(bio, &dio->iter);
2077 dropped_locks = fdm_dropped_locks();
2079 current->faults_disabled_mapping = NULL;
2080 if (kthread && dio->mm)
2081 kthread_unuse_mm(dio->mm);
2084 * If the fault handler returned an error but also signalled
2085 * that it dropped & retook ei_pagecache_lock, we just need to
2086 * re-shoot down the page cache and retry:
2088 if (dropped_locks && ret)
2091 if (unlikely(ret < 0))
2094 if (unlikely(dropped_locks)) {
2095 ret = write_invalidate_inode_pages_range(mapping,
2097 req->ki_pos + iter_count - 1);
2101 if (!bio->bi_iter.bi_size)
2105 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
2106 bio->bi_iter.bi_size -= unaligned;
2107 iov_iter_revert(&dio->iter, unaligned);
2109 if (!bio->bi_iter.bi_size) {
2111 * bio_iov_iter_get_pages was only able to get <
2112 * blocksize worth of pages:
2118 bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode));
2119 dio->op.end_io = bch2_dio_write_loop_async;
2120 dio->op.target = dio->op.opts.foreground_target;
2121 dio->op.write_point = writepoint_hashed((unsigned long) current);
2122 dio->op.nr_replicas = dio->op.opts.data_replicas;
2123 dio->op.subvol = inode->ei_subvol;
2124 dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
2126 if ((req->ki_flags & IOCB_DSYNC) &&
2127 !c->opts.journal_flush_disabled)
2128 dio->op.flags |= BCH_WRITE_FLUSH;
2129 dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
2131 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
2132 dio->op.opts.data_replicas, 0);
2133 if (unlikely(ret) &&
2134 !bch2_check_range_allocated(c, inode_inum(inode),
2135 dio->op.pos.offset, bio_sectors(bio),
2136 dio->op.opts.data_replicas,
2137 dio->op.opts.compression != 0))
2140 task_io_account_write(bio->bi_iter.bi_size);
2142 if (!dio->sync && !dio->loop && dio->iter.count) {
2143 struct iovec *iov = dio->inline_vecs;
2145 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
2146 iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
2148 if (unlikely(!iov)) {
2149 dio->sync = sync = true;
2153 dio->free_iov = true;
2156 memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
2157 dio->iter.iov = iov;
2161 closure_call(&dio->op.cl, bch2_write, NULL, NULL);
2164 wait_for_completion(&dio->done);
2166 return -EIOCBQUEUED;
2168 i_sectors_acct(c, inode, &dio->quota_res,
2169 dio->op.i_sectors_delta);
2170 req->ki_pos += (u64) dio->op.written << 9;
2171 dio->written += dio->op.written;
2173 spin_lock(&inode->v.i_lock);
2174 if (req->ki_pos > inode->v.i_size)
2175 i_size_write(&inode->v, req->ki_pos);
2176 spin_unlock(&inode->v.i_lock);
2178 if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
2179 bio_for_each_segment_all(bv, bio, iter)
2180 put_page(bv->bv_page);
2183 if (dio->op.error) {
2184 set_bit(EI_INODE_ERROR, &inode->ei_flags);
2188 if (!dio->iter.count)
2191 bio_reset(bio, NULL, REQ_OP_WRITE);
2192 reinit_completion(&dio->done);
2195 ret = dio->op.error ?: ((long) dio->written << 9);
2197 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2198 bch2_quota_reservation_put(c, inode, &dio->quota_res);
2201 kfree(dio->iter.iov);
2203 if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
2204 bio_for_each_segment_all(bv, bio, iter)
2205 put_page(bv->bv_page);
2208 /* inode->i_dio_count is our ref on inode and thus bch_fs */
2209 inode_dio_end(&inode->v);
2212 ret = bch2_err_class(ret);
2215 req->ki_complete(req, ret);
2221 static void bch2_dio_write_loop_async(struct bch_write_op *op)
2223 struct dio_write *dio = container_of(op, struct dio_write, op);
2226 complete(&dio->done);
2228 bch2_dio_write_loop(dio);
2232 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
2234 struct file *file = req->ki_filp;
2235 struct address_space *mapping = file->f_mapping;
2236 struct bch_inode_info *inode = file_bch_inode(file);
2237 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2238 struct dio_write *dio;
2240 bool locked = true, extending;
2244 prefetch((void *) &c->opts + 64);
2245 prefetch(&inode->ei_inode);
2246 prefetch((void *) &inode->ei_inode + 64);
2248 inode_lock(&inode->v);
2250 ret = generic_write_checks(req, iter);
2251 if (unlikely(ret <= 0))
2254 ret = file_remove_privs(file);
2258 ret = file_update_time(file);
2262 if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
2265 inode_dio_begin(&inode->v);
2266 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2268 extending = req->ki_pos + iter->count > inode->v.i_size;
2270 inode_unlock(&inode->v);
2274 bio = bio_alloc_bioset(NULL,
2275 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
2278 &c->dio_write_bioset);
2279 dio = container_of(bio, struct dio_write, op.wbio.bio);
2280 init_completion(&dio->done);
2282 dio->mm = current->mm;
2284 dio->sync = is_sync_kiocb(req) || extending;
2285 dio->free_iov = false;
2286 dio->quota_res.sectors = 0;
2290 ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
2291 iter->count >> 9, true);
2295 ret = write_invalidate_inode_pages_range(mapping,
2297 req->ki_pos + iter->count - 1);
2301 ret = bch2_dio_write_loop(dio);
2304 inode_unlock(&inode->v);
2307 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2308 bch2_quota_reservation_put(c, inode, &dio->quota_res);
2310 inode_dio_end(&inode->v);
2314 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2316 struct file *file = iocb->ki_filp;
2317 struct bch_inode_info *inode = file_bch_inode(file);
2320 if (iocb->ki_flags & IOCB_DIRECT) {
2321 ret = bch2_direct_write(iocb, from);
2325 /* We can write back this queue in page reclaim */
2326 current->backing_dev_info = inode_to_bdi(&inode->v);
2327 inode_lock(&inode->v);
2329 ret = generic_write_checks(iocb, from);
2333 ret = file_remove_privs(file);
2337 ret = file_update_time(file);
2341 ret = bch2_buffered_write(iocb, from);
2342 if (likely(ret > 0))
2343 iocb->ki_pos += ret;
2345 inode_unlock(&inode->v);
2346 current->backing_dev_info = NULL;
2349 ret = generic_write_sync(iocb, ret);
2351 return bch2_err_class(ret);
2357 * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
2358 * insert trigger: look up the btree inode instead
2360 static int bch2_flush_inode(struct bch_fs *c, subvol_inum inum)
2362 struct bch_inode_unpacked inode;
2365 if (c->opts.journal_flush_disabled)
2368 ret = bch2_inode_find_by_inum(c, inum, &inode);
2372 return bch2_journal_flush_seq(&c->journal, inode.bi_journal_seq);
2375 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2377 struct bch_inode_info *inode = file_bch_inode(file);
2378 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2379 int ret, ret2, ret3;
2381 ret = file_write_and_wait_range(file, start, end);
2382 ret2 = sync_inode_metadata(&inode->v, 1);
2383 ret3 = bch2_flush_inode(c, inode_inum(inode));
2385 return bch2_err_class(ret ?: ret2 ?: ret3);
2390 static inline int range_has_data(struct bch_fs *c, u32 subvol,
2394 struct btree_trans trans;
2395 struct btree_iter iter;
2399 bch2_trans_init(&trans, c, 0, 0);
2401 bch2_trans_begin(&trans);
2403 ret = bch2_subvolume_get_snapshot(&trans, subvol, &start.snapshot);
2407 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
2408 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2411 if (bkey_extent_is_data(k.k)) {
2417 bch2_trans_iter_exit(&trans, &iter);
2419 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
2422 bch2_trans_exit(&trans);
2426 static int __bch2_truncate_page(struct bch_inode_info *inode,
2427 pgoff_t index, loff_t start, loff_t end)
2429 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2430 struct address_space *mapping = inode->v.i_mapping;
2431 struct bch_page_state *s;
2432 unsigned start_offset = start & (PAGE_SIZE - 1);
2433 unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2436 s64 i_sectors_delta = 0;
2439 /* Page boundary? Nothing to do */
2440 if (!((index == start >> PAGE_SHIFT && start_offset) ||
2441 (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
2445 if (index << PAGE_SHIFT >= inode->v.i_size)
2448 page = find_lock_page(mapping, index);
2451 * XXX: we're doing two index lookups when we end up reading the
2454 ret = range_has_data(c, inode->ei_subvol,
2455 POS(inode->v.i_ino, index << PAGE_SECTORS_SHIFT),
2456 POS(inode->v.i_ino, (index + 1) << PAGE_SECTORS_SHIFT));
2460 page = find_or_create_page(mapping, index, GFP_KERNEL);
2461 if (unlikely(!page)) {
2467 s = bch2_page_state_create(page, 0);
2473 if (!PageUptodate(page)) {
2474 ret = bch2_read_single_page(page, mapping);
2479 if (index != start >> PAGE_SHIFT)
2481 if (index != end >> PAGE_SHIFT)
2482 end_offset = PAGE_SIZE;
2484 for (i = round_up(start_offset, block_bytes(c)) >> 9;
2485 i < round_down(end_offset, block_bytes(c)) >> 9;
2487 s->s[i].nr_replicas = 0;
2488 if (s->s[i].state == SECTOR_DIRTY)
2490 s->s[i].state = SECTOR_UNALLOCATED;
2493 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2496 * Caller needs to know whether this page will be written out by
2497 * writeback - doing an i_size update if necessary - or whether it will
2498 * be responsible for the i_size update:
2500 ret = s->s[(min_t(u64, inode->v.i_size - (index << PAGE_SHIFT),
2501 PAGE_SIZE) - 1) >> 9].state >= SECTOR_DIRTY;
2503 zero_user_segment(page, start_offset, end_offset);
2506 * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2508 * XXX: because we aren't currently tracking whether the page has actual
2509 * data in it (vs. just 0s, or only partially written) this wrong. ick.
2511 BUG_ON(bch2_get_page_disk_reservation(c, inode, page, false));
2514 * This removes any writeable userspace mappings; we need to force
2515 * .page_mkwrite to be called again before any mmapped writes, to
2516 * redirty the full page:
2519 __set_page_dirty_nobuffers(page);
2527 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
2529 return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
2530 from, round_up(from, PAGE_SIZE));
2533 static int bch2_truncate_pages(struct bch_inode_info *inode,
2534 loff_t start, loff_t end)
2536 int ret = __bch2_truncate_page(inode, start >> PAGE_SHIFT,
2540 start >> PAGE_SHIFT != end >> PAGE_SHIFT)
2541 ret = __bch2_truncate_page(inode,
2547 static int bch2_extend(struct user_namespace *mnt_userns,
2548 struct bch_inode_info *inode,
2549 struct bch_inode_unpacked *inode_u,
2550 struct iattr *iattr)
2552 struct address_space *mapping = inode->v.i_mapping;
2558 * this has to be done _before_ extending i_size:
2560 ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
2564 truncate_setsize(&inode->v, iattr->ia_size);
2566 return bch2_setattr_nonsize(mnt_userns, inode, iattr);
2569 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2570 struct bch_inode_unpacked *bi,
2573 bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2577 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2578 struct bch_inode_unpacked *bi, void *p)
2580 u64 *new_i_size = p;
2582 bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2583 bi->bi_size = *new_i_size;
2587 int bch2_truncate(struct user_namespace *mnt_userns,
2588 struct bch_inode_info *inode, struct iattr *iattr)
2590 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2591 struct address_space *mapping = inode->v.i_mapping;
2592 struct bch_inode_unpacked inode_u;
2593 u64 new_i_size = iattr->ia_size;
2594 s64 i_sectors_delta = 0;
2598 * If the truncate call with change the size of the file, the
2599 * cmtimes should be updated. If the size will not change, we
2600 * do not need to update the cmtimes.
2602 if (iattr->ia_size != inode->v.i_size) {
2603 if (!(iattr->ia_valid & ATTR_MTIME))
2604 ktime_get_coarse_real_ts64(&iattr->ia_mtime);
2605 if (!(iattr->ia_valid & ATTR_CTIME))
2606 ktime_get_coarse_real_ts64(&iattr->ia_ctime);
2607 iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
2610 inode_dio_wait(&inode->v);
2611 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2613 ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
2618 * check this before next assertion; on filesystem error our normal
2619 * invariants are a bit broken (truncate has to truncate the page cache
2620 * before the inode).
2622 ret = bch2_journal_error(&c->journal);
2626 WARN_ON(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
2627 inode->v.i_size < inode_u.bi_size);
2629 if (iattr->ia_size > inode->v.i_size) {
2630 ret = bch2_extend(mnt_userns, inode, &inode_u, iattr);
2634 iattr->ia_valid &= ~ATTR_SIZE;
2636 ret = bch2_truncate_page(inode, iattr->ia_size);
2637 if (unlikely(ret < 0))
2641 * When extending, we're going to write the new i_size to disk
2642 * immediately so we need to flush anything above the current on disk
2645 * Also, when extending we need to flush the page that i_size currently
2646 * straddles - if it's mapped to userspace, we need to ensure that
2647 * userspace has to redirty it and call .mkwrite -> set_page_dirty
2648 * again to allocate the part of the page that was extended.
2650 if (iattr->ia_size > inode_u.bi_size)
2651 ret = filemap_write_and_wait_range(mapping,
2653 iattr->ia_size - 1);
2654 else if (iattr->ia_size & (PAGE_SIZE - 1))
2655 ret = filemap_write_and_wait_range(mapping,
2656 round_down(iattr->ia_size, PAGE_SIZE),
2657 iattr->ia_size - 1);
2661 mutex_lock(&inode->ei_update_lock);
2662 ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
2664 mutex_unlock(&inode->ei_update_lock);
2669 truncate_setsize(&inode->v, iattr->ia_size);
2671 ret = bch2_fpunch(c, inode_inum(inode),
2672 round_up(iattr->ia_size, block_bytes(c)) >> 9,
2673 U64_MAX, &i_sectors_delta);
2674 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2676 bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks &&
2677 !bch2_journal_error(&c->journal), c,
2678 "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
2679 inode->v.i_ino, (u64) inode->v.i_blocks,
2680 inode->ei_inode.bi_sectors);
2684 mutex_lock(&inode->ei_update_lock);
2685 ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
2686 mutex_unlock(&inode->ei_update_lock);
2688 ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
2690 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2691 return bch2_err_class(ret);
2696 static int inode_update_times_fn(struct bch_inode_info *inode,
2697 struct bch_inode_unpacked *bi, void *p)
2699 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2701 bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
2705 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2707 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2708 u64 end = offset + len;
2709 u64 block_start = round_up(offset, block_bytes(c));
2710 u64 block_end = round_down(end, block_bytes(c));
2711 bool truncated_last_page;
2714 ret = bch2_truncate_pages(inode, offset, end);
2715 if (unlikely(ret < 0))
2718 truncated_last_page = ret;
2720 truncate_pagecache_range(&inode->v, offset, end - 1);
2722 if (block_start < block_end ) {
2723 s64 i_sectors_delta = 0;
2725 ret = bch2_fpunch(c, inode_inum(inode),
2726 block_start >> 9, block_end >> 9,
2728 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2731 mutex_lock(&inode->ei_update_lock);
2732 if (end >= inode->v.i_size && !truncated_last_page) {
2733 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
2734 ATTR_MTIME|ATTR_CTIME);
2736 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
2737 ATTR_MTIME|ATTR_CTIME);
2739 mutex_unlock(&inode->ei_update_lock);
2744 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
2745 loff_t offset, loff_t len,
2748 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2749 struct address_space *mapping = inode->v.i_mapping;
2750 struct bkey_buf copy;
2751 struct btree_trans trans;
2752 struct btree_iter src, dst, del;
2753 loff_t shift, new_size;
2757 if ((offset | len) & (block_bytes(c) - 1))
2761 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
2764 if (offset >= inode->v.i_size)
2767 src_start = U64_MAX;
2770 if (offset + len >= inode->v.i_size)
2773 src_start = offset + len;
2777 new_size = inode->v.i_size + shift;
2779 ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2784 i_size_write(&inode->v, new_size);
2785 mutex_lock(&inode->ei_update_lock);
2786 ret = bch2_write_inode_size(c, inode, new_size,
2787 ATTR_MTIME|ATTR_CTIME);
2788 mutex_unlock(&inode->ei_update_lock);
2790 s64 i_sectors_delta = 0;
2792 ret = bch2_fpunch(c, inode_inum(inode),
2793 offset >> 9, (offset + len) >> 9,
2795 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2801 bch2_bkey_buf_init(©);
2802 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
2803 bch2_trans_iter_init(&trans, &src, BTREE_ID_extents,
2804 POS(inode->v.i_ino, src_start >> 9),
2806 bch2_trans_copy_iter(&dst, &src);
2807 bch2_trans_copy_iter(&del, &src);
2810 bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
2811 struct disk_reservation disk_res =
2812 bch2_disk_reservation_init(c, 0);
2813 struct bkey_i delete;
2815 struct bpos next_pos;
2816 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
2817 struct bpos atomic_end;
2818 unsigned trigger_flags = 0;
2821 bch2_trans_begin(&trans);
2823 ret = bch2_subvolume_get_snapshot(&trans,
2824 inode->ei_subvol, &snapshot);
2828 bch2_btree_iter_set_snapshot(&src, snapshot);
2829 bch2_btree_iter_set_snapshot(&dst, snapshot);
2830 bch2_btree_iter_set_snapshot(&del, snapshot);
2832 bch2_trans_begin(&trans);
2835 ? bch2_btree_iter_peek_prev(&src)
2836 : bch2_btree_iter_peek(&src);
2837 if ((ret = bkey_err(k)))
2840 if (!k.k || k.k->p.inode != inode->v.i_ino)
2844 bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
2847 bch2_bkey_buf_reassemble(©, c, k);
2850 bkey_cmp(bkey_start_pos(k.k), move_pos) < 0)
2851 bch2_cut_front(move_pos, copy.k);
2853 copy.k->k.p.offset += shift >> 9;
2854 bch2_btree_iter_set_pos(&dst, bkey_start_pos(©.k->k));
2856 ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end);
2860 if (bkey_cmp(atomic_end, copy.k->k.p)) {
2862 move_pos = atomic_end;
2863 move_pos.offset -= shift >> 9;
2866 bch2_cut_back(atomic_end, copy.k);
2870 bkey_init(&delete.k);
2871 delete.k.p = copy.k->k.p;
2872 delete.k.size = copy.k->k.size;
2873 delete.k.p.offset -= shift >> 9;
2874 bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k));
2876 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
2878 if (copy.k->k.size != k.k->size) {
2879 /* We might end up splitting compressed extents: */
2881 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
2883 ret = bch2_disk_reservation_get(c, &disk_res,
2884 copy.k->k.size, nr_ptrs,
2885 BCH_DISK_RESERVATION_NOFAIL);
2889 ret = bch2_btree_iter_traverse(&del) ?:
2890 bch2_trans_update(&trans, &del, &delete, trigger_flags) ?:
2891 bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?:
2892 bch2_trans_commit(&trans, &disk_res, NULL,
2893 BTREE_INSERT_NOFAIL);
2894 bch2_disk_reservation_put(c, &disk_res);
2897 bch2_btree_iter_set_pos(&src, next_pos);
2899 bch2_trans_iter_exit(&trans, &del);
2900 bch2_trans_iter_exit(&trans, &dst);
2901 bch2_trans_iter_exit(&trans, &src);
2902 bch2_trans_exit(&trans);
2903 bch2_bkey_buf_exit(©, c);
2908 mutex_lock(&inode->ei_update_lock);
2910 i_size_write(&inode->v, new_size);
2911 ret = bch2_write_inode_size(c, inode, new_size,
2912 ATTR_MTIME|ATTR_CTIME);
2914 /* We need an inode update to update bi_journal_seq for fsync: */
2915 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
2916 ATTR_MTIME|ATTR_CTIME);
2918 mutex_unlock(&inode->ei_update_lock);
2922 static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
2923 u64 start_sector, u64 end_sector)
2925 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2926 struct btree_trans trans;
2927 struct btree_iter iter;
2928 struct bpos end_pos = POS(inode->v.i_ino, end_sector);
2929 unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
2932 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
2934 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
2935 POS(inode->v.i_ino, start_sector),
2936 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2938 while (!ret && bkey_cmp(iter.pos, end_pos) < 0) {
2939 s64 i_sectors_delta = 0;
2940 struct disk_reservation disk_res = { 0 };
2941 struct quota_res quota_res = { 0 };
2942 struct bkey_i_reservation reservation;
2947 bch2_trans_begin(&trans);
2949 ret = bch2_subvolume_get_snapshot(&trans,
2950 inode->ei_subvol, &snapshot);
2954 bch2_btree_iter_set_snapshot(&iter, snapshot);
2956 k = bch2_btree_iter_peek_slot(&iter);
2957 if ((ret = bkey_err(k)))
2960 /* already reserved */
2961 if (k.k->type == KEY_TYPE_reservation &&
2962 bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2963 bch2_btree_iter_advance(&iter);
2967 if (bkey_extent_is_data(k.k) &&
2968 !(mode & FALLOC_FL_ZERO_RANGE)) {
2969 bch2_btree_iter_advance(&iter);
2973 bkey_reservation_init(&reservation.k_i);
2974 reservation.k.type = KEY_TYPE_reservation;
2975 reservation.k.p = k.k->p;
2976 reservation.k.size = k.k->size;
2978 bch2_cut_front(iter.pos, &reservation.k_i);
2979 bch2_cut_back(end_pos, &reservation.k_i);
2981 sectors = reservation.k.size;
2982 reservation.v.nr_replicas = bch2_bkey_nr_ptrs_allocated(k);
2984 if (!bkey_extent_is_allocation(k.k)) {
2985 ret = bch2_quota_reservation_add(c, inode,
2992 if (reservation.v.nr_replicas < replicas ||
2993 bch2_bkey_sectors_compressed(k)) {
2994 ret = bch2_disk_reservation_get(c, &disk_res, sectors,
2999 reservation.v.nr_replicas = disk_res.nr_replicas;
3002 ret = bch2_extent_update(&trans, inode_inum(inode), &iter,
3005 0, &i_sectors_delta, true);
3008 i_sectors_acct(c, inode, "a_res, i_sectors_delta);
3010 bch2_quota_reservation_put(c, inode, "a_res);
3011 bch2_disk_reservation_put(c, &disk_res);
3012 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3016 bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
3017 mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
3019 if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
3020 struct quota_res quota_res = { 0 };
3021 s64 i_sectors_delta = 0;
3023 bch2_fpunch_at(&trans, &iter, inode_inum(inode),
3024 end_sector, &i_sectors_delta);
3025 i_sectors_acct(c, inode, "a_res, i_sectors_delta);
3026 bch2_quota_reservation_put(c, inode, "a_res);
3029 bch2_trans_iter_exit(&trans, &iter);
3030 bch2_trans_exit(&trans);
3034 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
3035 loff_t offset, loff_t len)
3037 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3038 u64 end = offset + len;
3039 u64 block_start = round_down(offset, block_bytes(c));
3040 u64 block_end = round_up(end, block_bytes(c));
3041 bool truncated_last_page = false;
3044 if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
3045 ret = inode_newsize_ok(&inode->v, end);
3050 if (mode & FALLOC_FL_ZERO_RANGE) {
3051 ret = bch2_truncate_pages(inode, offset, end);
3052 if (unlikely(ret < 0))
3055 truncated_last_page = ret;
3057 truncate_pagecache_range(&inode->v, offset, end - 1);
3059 block_start = round_up(offset, block_bytes(c));
3060 block_end = round_down(end, block_bytes(c));
3063 ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
3066 * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
3067 * so that the VFS cache i_size is consistent with the btree i_size:
3070 !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
3073 if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
3074 end = inode->v.i_size;
3076 if (end >= inode->v.i_size &&
3077 (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
3078 !(mode & FALLOC_FL_KEEP_SIZE))) {
3079 spin_lock(&inode->v.i_lock);
3080 i_size_write(&inode->v, end);
3081 spin_unlock(&inode->v.i_lock);
3083 mutex_lock(&inode->ei_update_lock);
3084 ret2 = bch2_write_inode_size(c, inode, end, 0);
3085 mutex_unlock(&inode->ei_update_lock);
3091 long bch2_fallocate_dispatch(struct file *file, int mode,
3092 loff_t offset, loff_t len)
3094 struct bch_inode_info *inode = file_bch_inode(file);
3095 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3098 if (!percpu_ref_tryget_live(&c->writes))
3101 inode_lock(&inode->v);
3102 inode_dio_wait(&inode->v);
3103 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
3105 ret = file_modified(file);
3109 if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
3110 ret = bchfs_fallocate(inode, mode, offset, len);
3111 else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
3112 ret = bchfs_fpunch(inode, offset, len);
3113 else if (mode == FALLOC_FL_INSERT_RANGE)
3114 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
3115 else if (mode == FALLOC_FL_COLLAPSE_RANGE)
3116 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
3120 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
3121 inode_unlock(&inode->v);
3122 percpu_ref_put(&c->writes);
3124 return bch2_err_class(ret);
3127 static int quota_reserve_range(struct bch_inode_info *inode,
3128 struct quota_res *res,
3131 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3132 struct btree_trans trans;
3133 struct btree_iter iter;
3136 u64 sectors = end - start;
3140 bch2_trans_init(&trans, c, 0, 0);
3142 bch2_trans_begin(&trans);
3144 ret = bch2_subvolume_get_snapshot(&trans, inode->ei_subvol, &snapshot);
3148 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
3149 SPOS(inode->v.i_ino, pos, snapshot), 0);
3151 while (!(ret = btree_trans_too_many_iters(&trans)) &&
3152 (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
3153 !(ret = bkey_err(k))) {
3154 if (bkey_extent_is_allocation(k.k)) {
3155 u64 s = min(end, k.k->p.offset) -
3156 max(start, bkey_start_offset(k.k));
3157 BUG_ON(s > sectors);
3160 bch2_btree_iter_advance(&iter);
3162 pos = iter.pos.offset;
3163 bch2_trans_iter_exit(&trans, &iter);
3165 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3168 bch2_trans_exit(&trans);
3173 return bch2_quota_reservation_add(c, inode, res, sectors, true);
3176 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
3177 struct file *file_dst, loff_t pos_dst,
3178 loff_t len, unsigned remap_flags)
3180 struct bch_inode_info *src = file_bch_inode(file_src);
3181 struct bch_inode_info *dst = file_bch_inode(file_dst);
3182 struct bch_fs *c = src->v.i_sb->s_fs_info;
3183 struct quota_res quota_res = { 0 };
3184 s64 i_sectors_delta = 0;
3188 if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
3191 if (remap_flags & REMAP_FILE_DEDUP)
3194 if ((pos_src & (block_bytes(c) - 1)) ||
3195 (pos_dst & (block_bytes(c) - 1)))
3199 abs(pos_src - pos_dst) < len)
3202 bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3204 inode_dio_wait(&src->v);
3205 inode_dio_wait(&dst->v);
3207 ret = generic_remap_file_range_prep(file_src, pos_src,
3210 if (ret < 0 || len == 0)
3213 aligned_len = round_up((u64) len, block_bytes(c));
3215 ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
3216 pos_dst, pos_dst + len - 1);
3220 ret = quota_reserve_range(dst, "a_res, pos_dst >> 9,
3221 (pos_dst + aligned_len) >> 9);
3225 file_update_time(file_dst);
3227 mark_pagecache_unallocated(src, pos_src >> 9,
3228 (pos_src + aligned_len) >> 9);
3230 ret = bch2_remap_range(c,
3231 inode_inum(dst), pos_dst >> 9,
3232 inode_inum(src), pos_src >> 9,
3234 pos_dst + len, &i_sectors_delta);
3239 * due to alignment, we might have remapped slightly more than requsted
3241 ret = min((u64) ret << 9, (u64) len);
3243 i_sectors_acct(c, dst, "a_res, i_sectors_delta);
3245 spin_lock(&dst->v.i_lock);
3246 if (pos_dst + ret > dst->v.i_size)
3247 i_size_write(&dst->v, pos_dst + ret);
3248 spin_unlock(&dst->v.i_lock);
3250 if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
3251 IS_SYNC(file_inode(file_dst)))
3252 ret = bch2_flush_inode(c, inode_inum(dst));
3254 bch2_quota_reservation_put(c, dst, "a_res);
3255 bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3257 return bch2_err_class(ret);
3262 static int page_data_offset(struct page *page, unsigned offset)
3264 struct bch_page_state *s = bch2_page_state(page);
3268 for (i = offset >> 9; i < PAGE_SECTORS; i++)
3269 if (s->s[i].state >= SECTOR_DIRTY)
3275 static loff_t bch2_seek_pagecache_data(struct inode *vinode,
3276 loff_t start_offset,
3279 struct folio_batch fbatch;
3280 pgoff_t start_index = start_offset >> PAGE_SHIFT;
3281 pgoff_t end_index = end_offset >> PAGE_SHIFT;
3282 pgoff_t index = start_index;
3287 folio_batch_init(&fbatch);
3289 while (filemap_get_folios(vinode->i_mapping,
3290 &index, end_index, &fbatch)) {
3291 for (i = 0; i < folio_batch_count(&fbatch); i++) {
3292 struct folio *folio = fbatch.folios[i];
3296 offset = page_data_offset(&folio->page,
3297 folio->index == start_index
3298 ? start_offset & (PAGE_SIZE - 1)
3301 ret = clamp(((loff_t) folio->index << PAGE_SHIFT) +
3303 start_offset, end_offset);
3304 folio_unlock(folio);
3305 folio_batch_release(&fbatch);
3309 folio_unlock(folio);
3311 folio_batch_release(&fbatch);
3318 static loff_t bch2_seek_data(struct file *file, u64 offset)
3320 struct bch_inode_info *inode = file_bch_inode(file);
3321 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3322 struct btree_trans trans;
3323 struct btree_iter iter;
3325 subvol_inum inum = inode_inum(inode);
3326 u64 isize, next_data = MAX_LFS_FILESIZE;
3330 isize = i_size_read(&inode->v);
3331 if (offset >= isize)
3334 bch2_trans_init(&trans, c, 0, 0);
3336 bch2_trans_begin(&trans);
3338 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3342 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
3343 SPOS(inode->v.i_ino, offset >> 9, snapshot), 0, k, ret) {
3344 if (k.k->p.inode != inode->v.i_ino) {
3346 } else if (bkey_extent_is_data(k.k)) {
3347 next_data = max(offset, bkey_start_offset(k.k) << 9);
3349 } else if (k.k->p.offset >> 9 > isize)
3352 bch2_trans_iter_exit(&trans, &iter);
3354 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3357 bch2_trans_exit(&trans);
3361 if (next_data > offset)
3362 next_data = bch2_seek_pagecache_data(&inode->v,
3365 if (next_data >= isize)
3368 return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
3371 static int __page_hole_offset(struct page *page, unsigned offset)
3373 struct bch_page_state *s = bch2_page_state(page);
3379 for (i = offset >> 9; i < PAGE_SECTORS; i++)
3380 if (s->s[i].state < SECTOR_DIRTY)
3386 static loff_t page_hole_offset(struct address_space *mapping, loff_t offset)
3388 pgoff_t index = offset >> PAGE_SHIFT;
3393 page = find_lock_page(mapping, index);
3397 pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1));
3399 ret = ((loff_t) index << PAGE_SHIFT) + pg_offset;
3406 static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
3407 loff_t start_offset,
3410 struct address_space *mapping = vinode->i_mapping;
3411 loff_t offset = start_offset, hole;
3413 while (offset < end_offset) {
3414 hole = page_hole_offset(mapping, offset);
3415 if (hole >= 0 && hole <= end_offset)
3416 return max(start_offset, hole);
3418 offset += PAGE_SIZE;
3419 offset &= PAGE_MASK;
3425 static loff_t bch2_seek_hole(struct file *file, u64 offset)
3427 struct bch_inode_info *inode = file_bch_inode(file);
3428 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3429 struct btree_trans trans;
3430 struct btree_iter iter;
3432 subvol_inum inum = inode_inum(inode);
3433 u64 isize, next_hole = MAX_LFS_FILESIZE;
3437 isize = i_size_read(&inode->v);
3438 if (offset >= isize)
3441 bch2_trans_init(&trans, c, 0, 0);
3443 bch2_trans_begin(&trans);
3445 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3449 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
3450 SPOS(inode->v.i_ino, offset >> 9, snapshot),
3451 BTREE_ITER_SLOTS, k, ret) {
3452 if (k.k->p.inode != inode->v.i_ino) {
3453 next_hole = bch2_seek_pagecache_hole(&inode->v,
3454 offset, MAX_LFS_FILESIZE);
3456 } else if (!bkey_extent_is_data(k.k)) {
3457 next_hole = bch2_seek_pagecache_hole(&inode->v,
3458 max(offset, bkey_start_offset(k.k) << 9),
3459 k.k->p.offset << 9);
3461 if (next_hole < k.k->p.offset << 9)
3464 offset = max(offset, bkey_start_offset(k.k) << 9);
3467 bch2_trans_iter_exit(&trans, &iter);
3469 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
3472 bch2_trans_exit(&trans);
3476 if (next_hole > isize)
3479 return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
3482 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
3490 ret = generic_file_llseek(file, offset, whence);
3493 ret = bch2_seek_data(file, offset);
3496 ret = bch2_seek_hole(file, offset);
3503 return bch2_err_class(ret);
3506 void bch2_fs_fsio_exit(struct bch_fs *c)
3508 bioset_exit(&c->dio_write_bioset);
3509 bioset_exit(&c->dio_read_bioset);
3510 bioset_exit(&c->writepage_bioset);
3513 int bch2_fs_fsio_init(struct bch_fs *c)
3517 pr_verbose_init(c->opts, "");
3519 if (bioset_init(&c->writepage_bioset,
3520 4, offsetof(struct bch_writepage_io, op.wbio.bio),
3521 BIOSET_NEED_BVECS) ||
3522 bioset_init(&c->dio_read_bioset,
3523 4, offsetof(struct dio_read, rbio.bio),
3524 BIOSET_NEED_BVECS) ||
3525 bioset_init(&c->dio_write_bioset,
3526 4, offsetof(struct dio_write, op.wbio.bio),
3530 pr_verbose_init(c->opts, "ret %i", ret);
3534 #endif /* NO_BCACHEFS_FS */