1 // SPDX-License-Identifier: GPL-2.0
5 #include "alloc_foreground.h"
6 #include "bkey_on_stack.h"
7 #include "btree_update.h"
12 #include "extent_update.h"
23 #include <linux/aio.h>
24 #include <linux/backing-dev.h>
25 #include <linux/falloc.h>
26 #include <linux/migrate.h>
27 #include <linux/mmu_context.h>
28 #include <linux/pagevec.h>
29 #include <linux/rmap.h>
30 #include <linux/sched/signal.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/uio.h>
33 #include <linux/writeback.h>
35 #include <trace/events/bcachefs.h>
36 #include <trace/events/writeback.h>
38 static inline struct address_space *faults_disabled_mapping(void)
40 return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
43 static inline void set_fdm_dropped_locks(void)
45 current->faults_disabled_mapping =
46 (void *) (((unsigned long) current->faults_disabled_mapping)|1);
49 static inline bool fdm_dropped_locks(void)
51 return ((unsigned long) current->faults_disabled_mapping) & 1;
58 struct bch_writepage_io {
60 struct bch_inode_info *inode;
63 struct bch_write_op op;
67 struct completion done;
73 struct quota_res quota_res;
77 struct iovec inline_vecs[2];
80 struct bch_write_op op;
87 struct bch_read_bio rbio;
90 /* pagecache_block must be held */
91 static int write_invalidate_inode_pages_range(struct address_space *mapping,
92 loff_t start, loff_t end)
97 * XXX: the way this is currently implemented, we can spin if a process
98 * is continually redirtying a specific page
101 if (!mapping->nrpages &&
102 !mapping->nrexceptional)
105 ret = filemap_write_and_wait_range(mapping, start, end);
109 if (!mapping->nrpages)
112 ret = invalidate_inode_pages2_range(mapping,
115 } while (ret == -EBUSY);
122 #ifdef CONFIG_BCACHEFS_QUOTA
124 static void bch2_quota_reservation_put(struct bch_fs *c,
125 struct bch_inode_info *inode,
126 struct quota_res *res)
131 mutex_lock(&inode->ei_quota_lock);
132 BUG_ON(res->sectors > inode->ei_quota_reserved);
134 bch2_quota_acct(c, inode->ei_qid, Q_SPC,
135 -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
136 inode->ei_quota_reserved -= res->sectors;
137 mutex_unlock(&inode->ei_quota_lock);
142 static int bch2_quota_reservation_add(struct bch_fs *c,
143 struct bch_inode_info *inode,
144 struct quota_res *res,
150 mutex_lock(&inode->ei_quota_lock);
151 ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
152 check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
154 inode->ei_quota_reserved += sectors;
155 res->sectors += sectors;
157 mutex_unlock(&inode->ei_quota_lock);
164 static void bch2_quota_reservation_put(struct bch_fs *c,
165 struct bch_inode_info *inode,
166 struct quota_res *res)
170 static int bch2_quota_reservation_add(struct bch_fs *c,
171 struct bch_inode_info *inode,
172 struct quota_res *res,
181 /* i_size updates: */
183 struct inode_new_size {
189 static int inode_set_size(struct bch_inode_info *inode,
190 struct bch_inode_unpacked *bi,
193 struct inode_new_size *s = p;
195 bi->bi_size = s->new_size;
196 if (s->fields & ATTR_ATIME)
197 bi->bi_atime = s->now;
198 if (s->fields & ATTR_MTIME)
199 bi->bi_mtime = s->now;
200 if (s->fields & ATTR_CTIME)
201 bi->bi_ctime = s->now;
206 int __must_check bch2_write_inode_size(struct bch_fs *c,
207 struct bch_inode_info *inode,
208 loff_t new_size, unsigned fields)
210 struct inode_new_size s = {
211 .new_size = new_size,
212 .now = bch2_current_time(c),
216 return bch2_write_inode(c, inode, inode_set_size, &s, fields);
219 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
220 struct quota_res *quota_res, s64 sectors)
225 mutex_lock(&inode->ei_quota_lock);
226 #ifdef CONFIG_BCACHEFS_QUOTA
227 if (quota_res && sectors > 0) {
228 BUG_ON(sectors > quota_res->sectors);
229 BUG_ON(sectors > inode->ei_quota_reserved);
231 quota_res->sectors -= sectors;
232 inode->ei_quota_reserved -= sectors;
234 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
237 inode->v.i_blocks += sectors;
238 mutex_unlock(&inode->ei_quota_lock);
243 /* stored in page->private: */
245 struct bch_page_sector {
246 /* Uncompressed, fully allocated replicas: */
247 unsigned nr_replicas:3;
249 /* Owns PAGE_SECTORS * replicas_reserved sized reservation: */
250 unsigned replicas_reserved:3;
261 struct bch_page_state {
263 atomic_t write_count;
264 struct bch_page_sector s[PAGE_SECTORS];
267 static inline struct bch_page_state *__bch2_page_state(struct page *page)
269 return page_has_private(page)
270 ? (struct bch_page_state *) page_private(page)
274 static inline struct bch_page_state *bch2_page_state(struct page *page)
276 EBUG_ON(!PageLocked(page));
278 return __bch2_page_state(page);
281 /* for newly allocated pages: */
282 static void __bch2_page_state_release(struct page *page)
284 kfree(detach_page_private(page));
287 static void bch2_page_state_release(struct page *page)
289 EBUG_ON(!PageLocked(page));
290 __bch2_page_state_release(page);
293 /* for newly allocated pages: */
294 static struct bch_page_state *__bch2_page_state_create(struct page *page,
297 struct bch_page_state *s;
299 s = kzalloc(sizeof(*s), GFP_NOFS|gfp);
303 spin_lock_init(&s->lock);
304 attach_page_private(page, s);
308 static struct bch_page_state *bch2_page_state_create(struct page *page,
311 return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp);
314 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
316 /* XXX: this should not be open coded */
317 return inode->ei_inode.bi_data_replicas
318 ? inode->ei_inode.bi_data_replicas - 1
319 : c->opts.data_replicas;
322 static inline unsigned sectors_to_reserve(struct bch_page_sector *s,
323 unsigned nr_replicas)
325 return max(0, (int) nr_replicas -
327 s->replicas_reserved);
330 static int bch2_get_page_disk_reservation(struct bch_fs *c,
331 struct bch_inode_info *inode,
332 struct page *page, bool check_enospc)
334 struct bch_page_state *s = bch2_page_state_create(page, 0);
335 unsigned nr_replicas = inode_nr_replicas(c, inode);
336 struct disk_reservation disk_res = { 0 };
337 unsigned i, disk_res_sectors = 0;
343 for (i = 0; i < ARRAY_SIZE(s->s); i++)
344 disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
346 if (!disk_res_sectors)
349 ret = bch2_disk_reservation_get(c, &disk_res,
352 ? BCH_DISK_RESERVATION_NOFAIL
357 for (i = 0; i < ARRAY_SIZE(s->s); i++)
358 s->s[i].replicas_reserved +=
359 sectors_to_reserve(&s->s[i], nr_replicas);
364 struct bch2_page_reservation {
365 struct disk_reservation disk;
366 struct quota_res quota;
369 static void bch2_page_reservation_init(struct bch_fs *c,
370 struct bch_inode_info *inode,
371 struct bch2_page_reservation *res)
373 memset(res, 0, sizeof(*res));
375 res->disk.nr_replicas = inode_nr_replicas(c, inode);
378 static void bch2_page_reservation_put(struct bch_fs *c,
379 struct bch_inode_info *inode,
380 struct bch2_page_reservation *res)
382 bch2_disk_reservation_put(c, &res->disk);
383 bch2_quota_reservation_put(c, inode, &res->quota);
386 static int bch2_page_reservation_get(struct bch_fs *c,
387 struct bch_inode_info *inode, struct page *page,
388 struct bch2_page_reservation *res,
389 unsigned offset, unsigned len, bool check_enospc)
391 struct bch_page_state *s = bch2_page_state_create(page, 0);
392 unsigned i, disk_sectors = 0, quota_sectors = 0;
398 for (i = round_down(offset, block_bytes(c)) >> 9;
399 i < round_up(offset + len, block_bytes(c)) >> 9;
401 disk_sectors += sectors_to_reserve(&s->s[i],
402 res->disk.nr_replicas);
403 quota_sectors += s->s[i].state == SECTOR_UNALLOCATED;
407 ret = bch2_disk_reservation_add(c, &res->disk,
410 ? BCH_DISK_RESERVATION_NOFAIL
417 ret = bch2_quota_reservation_add(c, inode, &res->quota,
421 struct disk_reservation tmp = {
422 .sectors = disk_sectors
425 bch2_disk_reservation_put(c, &tmp);
426 res->disk.sectors -= disk_sectors;
434 static void bch2_clear_page_bits(struct page *page)
436 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
437 struct bch_fs *c = inode->v.i_sb->s_fs_info;
438 struct bch_page_state *s = bch2_page_state(page);
439 struct disk_reservation disk_res = { 0 };
440 int i, dirty_sectors = 0;
445 EBUG_ON(!PageLocked(page));
446 EBUG_ON(PageWriteback(page));
448 for (i = 0; i < ARRAY_SIZE(s->s); i++) {
449 disk_res.sectors += s->s[i].replicas_reserved;
450 s->s[i].replicas_reserved = 0;
452 if (s->s[i].state == SECTOR_DIRTY) {
454 s->s[i].state = SECTOR_UNALLOCATED;
458 bch2_disk_reservation_put(c, &disk_res);
461 i_sectors_acct(c, inode, NULL, -dirty_sectors);
463 bch2_page_state_release(page);
466 static void bch2_set_page_dirty(struct bch_fs *c,
467 struct bch_inode_info *inode, struct page *page,
468 struct bch2_page_reservation *res,
469 unsigned offset, unsigned len)
471 struct bch_page_state *s = bch2_page_state(page);
472 unsigned i, dirty_sectors = 0;
474 WARN_ON((u64) page_offset(page) + offset + len >
475 round_up((u64) i_size_read(&inode->v), block_bytes(c)));
479 for (i = round_down(offset, block_bytes(c)) >> 9;
480 i < round_up(offset + len, block_bytes(c)) >> 9;
482 unsigned sectors = sectors_to_reserve(&s->s[i],
483 res->disk.nr_replicas);
486 * This can happen if we race with the error path in
487 * bch2_writepage_io_done():
489 sectors = min_t(unsigned, sectors, res->disk.sectors);
491 s->s[i].replicas_reserved += sectors;
492 res->disk.sectors -= sectors;
494 if (s->s[i].state == SECTOR_UNALLOCATED)
497 s->s[i].state = max_t(unsigned, s->s[i].state, SECTOR_DIRTY);
500 spin_unlock(&s->lock);
503 i_sectors_acct(c, inode, &res->quota, dirty_sectors);
505 if (!PageDirty(page))
506 __set_page_dirty_nobuffers(page);
509 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
511 struct file *file = vmf->vma->vm_file;
512 struct address_space *mapping = file->f_mapping;
513 struct address_space *fdm = faults_disabled_mapping();
514 struct bch_inode_info *inode = file_bch_inode(file);
518 return VM_FAULT_SIGBUS;
522 struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
524 if (bch2_pagecache_add_tryget(&inode->ei_pagecache_lock))
527 bch2_pagecache_block_put(&fdm_host->ei_pagecache_lock);
529 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
530 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
532 bch2_pagecache_block_get(&fdm_host->ei_pagecache_lock);
534 /* Signal that lock has been dropped: */
535 set_fdm_dropped_locks();
536 return VM_FAULT_SIGBUS;
539 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
541 ret = filemap_fault(vmf);
542 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
547 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
549 struct page *page = vmf->page;
550 struct file *file = vmf->vma->vm_file;
551 struct bch_inode_info *inode = file_bch_inode(file);
552 struct address_space *mapping = file->f_mapping;
553 struct bch_fs *c = inode->v.i_sb->s_fs_info;
554 struct bch2_page_reservation res;
557 int ret = VM_FAULT_LOCKED;
559 bch2_page_reservation_init(c, inode, &res);
561 sb_start_pagefault(inode->v.i_sb);
562 file_update_time(file);
565 * Not strictly necessary, but helps avoid dio writes livelocking in
566 * write_invalidate_inode_pages_range() - can drop this if/when we get
567 * a write_invalidate_inode_pages_range() that works without dropping
568 * page lock before invalidating page
570 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
573 isize = i_size_read(&inode->v);
575 if (page->mapping != mapping || page_offset(page) >= isize) {
577 ret = VM_FAULT_NOPAGE;
581 len = min_t(loff_t, PAGE_SIZE, isize - page_offset(page));
583 if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) {
585 ret = VM_FAULT_SIGBUS;
589 bch2_set_page_dirty(c, inode, page, &res, 0, len);
590 bch2_page_reservation_put(c, inode, &res);
592 wait_for_stable_page(page);
594 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
595 sb_end_pagefault(inode->v.i_sb);
600 void bch2_invalidatepage(struct page *page, unsigned int offset,
603 if (offset || length < PAGE_SIZE)
606 bch2_clear_page_bits(page);
609 int bch2_releasepage(struct page *page, gfp_t gfp_mask)
614 bch2_clear_page_bits(page);
618 #ifdef CONFIG_MIGRATION
619 int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
620 struct page *page, enum migrate_mode mode)
624 EBUG_ON(!PageLocked(page));
625 EBUG_ON(!PageLocked(newpage));
627 ret = migrate_page_move_mapping(mapping, newpage, page, 0);
628 if (ret != MIGRATEPAGE_SUCCESS)
631 if (PagePrivate(page))
632 attach_page_private(newpage, detach_page_private(page));
634 if (mode != MIGRATE_SYNC_NO_COPY)
635 migrate_page_copy(newpage, page);
637 migrate_page_states(newpage, page);
638 return MIGRATEPAGE_SUCCESS;
644 static void bch2_readpages_end_io(struct bio *bio)
646 struct bvec_iter_all iter;
649 bio_for_each_segment_all(bv, bio, iter) {
650 struct page *page = bv->bv_page;
652 if (!bio->bi_status) {
653 SetPageUptodate(page);
655 ClearPageUptodate(page);
664 struct readpages_iter {
665 struct address_space *mapping;
672 static int readpages_iter_init(struct readpages_iter *iter,
673 struct readahead_control *ractl)
675 unsigned i, nr_pages = readahead_count(ractl);
677 memset(iter, 0, sizeof(*iter));
679 iter->mapping = ractl->mapping;
680 iter->offset = readahead_index(ractl);
681 iter->nr_pages = nr_pages;
683 iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
687 nr_pages = __readahead_batch(ractl, iter->pages, nr_pages);
688 for (i = 0; i < nr_pages; i++) {
689 __bch2_page_state_create(iter->pages[i], __GFP_NOFAIL);
690 put_page(iter->pages[i]);
696 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
698 if (iter->idx >= iter->nr_pages)
701 EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
703 return iter->pages[iter->idx];
706 static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
708 struct bvec_iter iter;
710 unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
711 ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
712 unsigned state = k.k->type == KEY_TYPE_reservation
716 bio_for_each_segment(bv, bio, iter) {
717 struct bch_page_state *s = bch2_page_state(bv.bv_page);
720 for (i = bv.bv_offset >> 9;
721 i < (bv.bv_offset + bv.bv_len) >> 9;
723 s->s[i].nr_replicas = nr_ptrs;
724 s->s[i].state = state;
729 static bool extent_partial_reads_expensive(struct bkey_s_c k)
731 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
732 struct bch_extent_crc_unpacked crc;
733 const union bch_extent_entry *i;
735 bkey_for_each_crc(k.k, ptrs, crc, i)
736 if (crc.csum_type || crc.compression_type)
741 static void readpage_bio_extend(struct readpages_iter *iter,
743 unsigned sectors_this_extent,
746 while (bio_sectors(bio) < sectors_this_extent &&
747 bio->bi_vcnt < bio->bi_max_vecs) {
748 pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT;
749 struct page *page = readpage_iter_next(iter);
753 if (iter->offset + iter->idx != page_offset)
761 page = xa_load(&iter->mapping->i_pages, page_offset);
762 if (page && !xa_is_value(page))
765 page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
769 if (!__bch2_page_state_create(page, 0)) {
774 ret = add_to_page_cache_lru(page, iter->mapping,
775 page_offset, GFP_NOFS);
777 __bch2_page_state_release(page);
785 BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0));
789 static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
790 struct bch_read_bio *rbio, u64 inum,
791 struct readpages_iter *readpages_iter)
793 struct bch_fs *c = trans->c;
794 struct bkey_on_stack sk;
795 int flags = BCH_READ_RETRY_IF_STALE|
796 BCH_READ_MAY_PROMOTE;
800 rbio->start_time = local_clock();
802 bkey_on_stack_init(&sk);
806 unsigned bytes, sectors, offset_into_extent;
808 bch2_btree_iter_set_pos(iter,
809 POS(inum, rbio->bio.bi_iter.bi_sector));
811 k = bch2_btree_iter_peek_slot(iter);
816 offset_into_extent = iter->pos.offset -
817 bkey_start_offset(k.k);
818 sectors = k.k->size - offset_into_extent;
820 bkey_on_stack_reassemble(&sk, c, k);
822 ret = bch2_read_indirect_extent(trans,
823 &offset_into_extent, &sk);
827 k = bkey_i_to_s_c(sk.k);
829 sectors = min(sectors, k.k->size - offset_into_extent);
831 bch2_trans_unlock(trans);
834 readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
835 extent_partial_reads_expensive(k));
837 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
838 swap(rbio->bio.bi_iter.bi_size, bytes);
840 if (rbio->bio.bi_iter.bi_size == bytes)
841 flags |= BCH_READ_LAST_FRAGMENT;
843 if (bkey_extent_is_allocation(k.k))
844 bch2_add_page_sectors(&rbio->bio, k);
846 bch2_read_extent(trans, rbio, k, offset_into_extent, flags);
848 if (flags & BCH_READ_LAST_FRAGMENT)
851 swap(rbio->bio.bi_iter.bi_size, bytes);
852 bio_advance(&rbio->bio, bytes);
859 bcache_io_error(c, &rbio->bio, "btree IO error %i", ret);
860 bio_endio(&rbio->bio);
863 bkey_on_stack_exit(&sk, c);
866 void bch2_readahead(struct readahead_control *ractl)
868 struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
869 struct bch_fs *c = inode->v.i_sb->s_fs_info;
870 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
871 struct btree_trans trans;
872 struct btree_iter *iter;
874 struct readpages_iter readpages_iter;
877 ret = readpages_iter_init(&readpages_iter, ractl);
880 bch2_trans_init(&trans, c, 0, 0);
882 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
885 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
887 while ((page = readpage_iter_next(&readpages_iter))) {
888 pgoff_t index = readpages_iter.offset + readpages_iter.idx;
889 unsigned n = min_t(unsigned,
890 readpages_iter.nr_pages -
893 struct bch_read_bio *rbio =
894 rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
897 readpages_iter.idx++;
899 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0);
900 rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTOR_SHIFT;
901 rbio->bio.bi_end_io = bch2_readpages_end_io;
902 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
904 bchfs_read(&trans, iter, rbio, inode->v.i_ino,
908 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
910 bch2_trans_exit(&trans);
911 kfree(readpages_iter.pages);
914 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
915 u64 inum, struct page *page)
917 struct btree_trans trans;
918 struct btree_iter *iter;
920 bch2_page_state_create(page, __GFP_NOFAIL);
922 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
923 rbio->bio.bi_iter.bi_sector =
924 (sector_t) page->index << PAGE_SECTOR_SHIFT;
925 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
927 bch2_trans_init(&trans, c, 0, 0);
928 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
931 bchfs_read(&trans, iter, rbio, inum, NULL);
933 bch2_trans_exit(&trans);
936 int bch2_readpage(struct file *file, struct page *page)
938 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
939 struct bch_fs *c = inode->v.i_sb->s_fs_info;
940 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
941 struct bch_read_bio *rbio;
943 rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
944 rbio->bio.bi_end_io = bch2_readpages_end_io;
946 __bchfs_readpage(c, rbio, inode->v.i_ino, page);
950 static void bch2_read_single_page_end_io(struct bio *bio)
952 complete(bio->bi_private);
955 static int bch2_read_single_page(struct page *page,
956 struct address_space *mapping)
958 struct bch_inode_info *inode = to_bch_ei(mapping->host);
959 struct bch_fs *c = inode->v.i_sb->s_fs_info;
960 struct bch_read_bio *rbio;
962 DECLARE_COMPLETION_ONSTACK(done);
964 rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
965 io_opts(c, &inode->ei_inode));
966 rbio->bio.bi_private = &done;
967 rbio->bio.bi_end_io = bch2_read_single_page_end_io;
969 __bchfs_readpage(c, rbio, inode->v.i_ino, page);
970 wait_for_completion(&done);
972 ret = blk_status_to_errno(rbio->bio.bi_status);
978 SetPageUptodate(page);
984 struct bch_writepage_state {
985 struct bch_writepage_io *io;
986 struct bch_io_opts opts;
989 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
990 struct bch_inode_info *inode)
992 return (struct bch_writepage_state) {
993 .opts = io_opts(c, &inode->ei_inode)
997 static void bch2_writepage_io_free(struct closure *cl)
999 struct bch_writepage_io *io = container_of(cl,
1000 struct bch_writepage_io, cl);
1002 bio_put(&io->op.wbio.bio);
1005 static void bch2_writepage_io_done(struct closure *cl)
1007 struct bch_writepage_io *io = container_of(cl,
1008 struct bch_writepage_io, cl);
1009 struct bch_fs *c = io->op.c;
1010 struct bio *bio = &io->op.wbio.bio;
1011 struct bvec_iter_all iter;
1012 struct bio_vec *bvec;
1016 bio_for_each_segment_all(bvec, bio, iter) {
1017 struct bch_page_state *s;
1019 SetPageError(bvec->bv_page);
1020 mapping_set_error(bvec->bv_page->mapping, -EIO);
1022 s = __bch2_page_state(bvec->bv_page);
1023 spin_lock(&s->lock);
1024 for (i = 0; i < PAGE_SECTORS; i++)
1025 s->s[i].nr_replicas = 0;
1026 spin_unlock(&s->lock);
1030 if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
1031 bio_for_each_segment_all(bvec, bio, iter) {
1032 struct bch_page_state *s;
1034 s = __bch2_page_state(bvec->bv_page);
1035 spin_lock(&s->lock);
1036 for (i = 0; i < PAGE_SECTORS; i++)
1037 s->s[i].nr_replicas = 0;
1038 spin_unlock(&s->lock);
1043 * racing with fallocate can cause us to add fewer sectors than
1044 * expected - but we shouldn't add more sectors than expected:
1046 BUG_ON(io->op.i_sectors_delta > 0);
1049 * (error (due to going RO) halfway through a page can screw that up
1052 BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
1056 * PageWriteback is effectively our ref on the inode - fixup i_blocks
1057 * before calling end_page_writeback:
1059 i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
1061 bio_for_each_segment_all(bvec, bio, iter) {
1062 struct bch_page_state *s = __bch2_page_state(bvec->bv_page);
1064 if (atomic_dec_and_test(&s->write_count))
1065 end_page_writeback(bvec->bv_page);
1068 closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
1071 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1073 struct bch_writepage_io *io = w->io;
1076 closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
1077 continue_at(&io->cl, bch2_writepage_io_done, NULL);
1081 * Get a bch_writepage_io and add @page to it - appending to an existing one if
1082 * possible, else allocating a new one:
1084 static void bch2_writepage_io_alloc(struct bch_fs *c,
1085 struct writeback_control *wbc,
1086 struct bch_writepage_state *w,
1087 struct bch_inode_info *inode,
1089 unsigned nr_replicas)
1091 struct bch_write_op *op;
1093 w->io = container_of(bio_alloc_bioset(GFP_NOFS,
1095 &c->writepage_bioset),
1096 struct bch_writepage_io, op.wbio.bio);
1098 closure_init(&w->io->cl, NULL);
1099 w->io->inode = inode;
1102 bch2_write_op_init(op, c, w->opts);
1103 op->target = w->opts.foreground_target;
1104 op_journal_seq_set(op, &inode->ei_journal_seq);
1105 op->nr_replicas = nr_replicas;
1106 op->res.nr_replicas = nr_replicas;
1107 op->write_point = writepoint_hashed(inode->ei_last_dirtied);
1108 op->pos = POS(inode->v.i_ino, sector);
1109 op->wbio.bio.bi_iter.bi_sector = sector;
1110 op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
1113 static int __bch2_writepage(struct page *page,
1114 struct writeback_control *wbc,
1117 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1118 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1119 struct bch_writepage_state *w = data;
1120 struct bch_page_state *s, orig;
1121 unsigned i, offset, nr_replicas_this_write = U32_MAX;
1122 loff_t i_size = i_size_read(&inode->v);
1123 pgoff_t end_index = i_size >> PAGE_SHIFT;
1126 EBUG_ON(!PageUptodate(page));
1128 /* Is the page fully inside i_size? */
1129 if (page->index < end_index)
1132 /* Is the page fully outside i_size? (truncate in progress) */
1133 offset = i_size & (PAGE_SIZE - 1);
1134 if (page->index > end_index || !offset) {
1140 * The page straddles i_size. It must be zeroed out on each and every
1141 * writepage invocation because it may be mmapped. "A file is mapped
1142 * in multiples of the page size. For a file that is not a multiple of
1143 * the page size, the remaining memory is zeroed when mapped, and
1144 * writes to that region are not written out to the file."
1146 zero_user_segment(page, offset, PAGE_SIZE);
1148 s = bch2_page_state_create(page, __GFP_NOFAIL);
1150 ret = bch2_get_page_disk_reservation(c, inode, page, true);
1153 mapping_set_error(page->mapping, ret);
1158 /* Before unlocking the page, get copy of reservations: */
1161 for (i = 0; i < PAGE_SECTORS; i++) {
1162 if (s->s[i].state < SECTOR_DIRTY)
1165 nr_replicas_this_write =
1166 min_t(unsigned, nr_replicas_this_write,
1167 s->s[i].nr_replicas +
1168 s->s[i].replicas_reserved);
1171 for (i = 0; i < PAGE_SECTORS; i++) {
1172 if (s->s[i].state < SECTOR_DIRTY)
1175 s->s[i].nr_replicas = w->opts.compression
1176 ? 0 : nr_replicas_this_write;
1178 s->s[i].replicas_reserved = 0;
1179 s->s[i].state = SECTOR_ALLOCATED;
1182 BUG_ON(atomic_read(&s->write_count));
1183 atomic_set(&s->write_count, 1);
1185 BUG_ON(PageWriteback(page));
1186 set_page_writeback(page);
1192 unsigned sectors = 1, dirty_sectors = 0, reserved_sectors = 0;
1195 while (offset < PAGE_SECTORS &&
1196 orig.s[offset].state < SECTOR_DIRTY)
1199 if (offset == PAGE_SECTORS)
1202 sector = ((u64) page->index << PAGE_SECTOR_SHIFT) + offset;
1204 while (offset + sectors < PAGE_SECTORS &&
1205 orig.s[offset + sectors].state >= SECTOR_DIRTY)
1208 for (i = offset; i < offset + sectors; i++) {
1209 reserved_sectors += orig.s[i].replicas_reserved;
1210 dirty_sectors += orig.s[i].state == SECTOR_DIRTY;
1214 (w->io->op.res.nr_replicas != nr_replicas_this_write ||
1215 bio_full(&w->io->op.wbio.bio, PAGE_SIZE) ||
1216 w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
1217 (BIO_MAX_PAGES * PAGE_SIZE) ||
1218 bio_end_sector(&w->io->op.wbio.bio) != sector))
1219 bch2_writepage_do_io(w);
1222 bch2_writepage_io_alloc(c, wbc, w, inode, sector,
1223 nr_replicas_this_write);
1225 atomic_inc(&s->write_count);
1227 BUG_ON(inode != w->io->inode);
1228 BUG_ON(!bio_add_page(&w->io->op.wbio.bio, page,
1229 sectors << 9, offset << 9));
1231 /* Check for writing past i_size: */
1232 WARN_ON((bio_end_sector(&w->io->op.wbio.bio) << 9) >
1233 round_up(i_size, block_bytes(c)));
1235 w->io->op.res.sectors += reserved_sectors;
1236 w->io->op.i_sectors_delta -= dirty_sectors;
1237 w->io->op.new_i_size = i_size;
1242 if (atomic_dec_and_test(&s->write_count))
1243 end_page_writeback(page);
1248 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1250 struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1251 struct bch_writepage_state w =
1252 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1253 struct blk_plug plug;
1256 blk_start_plug(&plug);
1257 ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1259 bch2_writepage_do_io(&w);
1260 blk_finish_plug(&plug);
1264 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1266 struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1267 struct bch_writepage_state w =
1268 bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
1271 ret = __bch2_writepage(page, wbc, &w);
1273 bch2_writepage_do_io(&w);
1278 /* buffered writes: */
1280 int bch2_write_begin(struct file *file, struct address_space *mapping,
1281 loff_t pos, unsigned len, unsigned flags,
1282 struct page **pagep, void **fsdata)
1284 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1285 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1286 struct bch2_page_reservation *res;
1287 pgoff_t index = pos >> PAGE_SHIFT;
1288 unsigned offset = pos & (PAGE_SIZE - 1);
1292 res = kmalloc(sizeof(*res), GFP_KERNEL);
1296 bch2_page_reservation_init(c, inode, res);
1299 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1301 page = grab_cache_page_write_begin(mapping, index, flags);
1305 if (PageUptodate(page))
1308 /* If we're writing entire page, don't need to read it in first: */
1309 if (len == PAGE_SIZE)
1312 if (!offset && pos + len >= inode->v.i_size) {
1313 zero_user_segment(page, len, PAGE_SIZE);
1314 flush_dcache_page(page);
1318 if (index > inode->v.i_size >> PAGE_SHIFT) {
1319 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1320 flush_dcache_page(page);
1324 ret = bch2_read_single_page(page, mapping);
1328 ret = bch2_page_reservation_get(c, inode, page, res,
1331 if (!PageUptodate(page)) {
1333 * If the page hasn't been read in, we won't know if we
1334 * actually need a reservation - we don't actually need
1335 * to read here, we just need to check if the page is
1336 * fully backed by uncompressed data:
1351 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1357 int bch2_write_end(struct file *file, struct address_space *mapping,
1358 loff_t pos, unsigned len, unsigned copied,
1359 struct page *page, void *fsdata)
1361 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1362 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1363 struct bch2_page_reservation *res = fsdata;
1364 unsigned offset = pos & (PAGE_SIZE - 1);
1366 lockdep_assert_held(&inode->v.i_rwsem);
1368 if (unlikely(copied < len && !PageUptodate(page))) {
1370 * The page needs to be read in, but that would destroy
1371 * our partial write - simplest thing is to just force
1372 * userspace to redo the write:
1374 zero_user(page, 0, PAGE_SIZE);
1375 flush_dcache_page(page);
1379 spin_lock(&inode->v.i_lock);
1380 if (pos + copied > inode->v.i_size)
1381 i_size_write(&inode->v, pos + copied);
1382 spin_unlock(&inode->v.i_lock);
1385 if (!PageUptodate(page))
1386 SetPageUptodate(page);
1388 bch2_set_page_dirty(c, inode, page, res, offset, copied);
1390 inode->ei_last_dirtied = (unsigned long) current;
1395 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1397 bch2_page_reservation_put(c, inode, res);
1403 #define WRITE_BATCH_PAGES 32
1405 static int __bch2_buffered_write(struct bch_inode_info *inode,
1406 struct address_space *mapping,
1407 struct iov_iter *iter,
1408 loff_t pos, unsigned len)
1410 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1411 struct page *pages[WRITE_BATCH_PAGES];
1412 struct bch2_page_reservation res;
1413 unsigned long index = pos >> PAGE_SHIFT;
1414 unsigned offset = pos & (PAGE_SIZE - 1);
1415 unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1416 unsigned i, reserved = 0, set_dirty = 0;
1417 unsigned copied = 0, nr_pages_copied = 0;
1421 BUG_ON(nr_pages > ARRAY_SIZE(pages));
1423 bch2_page_reservation_init(c, inode, &res);
1425 for (i = 0; i < nr_pages; i++) {
1426 pages[i] = grab_cache_page_write_begin(mapping, index + i, 0);
1433 len = min_t(unsigned, len,
1434 nr_pages * PAGE_SIZE - offset);
1439 if (offset && !PageUptodate(pages[0])) {
1440 ret = bch2_read_single_page(pages[0], mapping);
1445 if ((pos + len) & (PAGE_SIZE - 1) &&
1446 !PageUptodate(pages[nr_pages - 1])) {
1447 if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
1448 zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
1450 ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
1456 while (reserved < len) {
1457 struct page *page = pages[(offset + reserved) >> PAGE_SHIFT];
1458 unsigned pg_offset = (offset + reserved) & (PAGE_SIZE - 1);
1459 unsigned pg_len = min_t(unsigned, len - reserved,
1460 PAGE_SIZE - pg_offset);
1462 ret = bch2_page_reservation_get(c, inode, page, &res,
1463 pg_offset, pg_len, true);
1465 if (ret && !PageUptodate(page)) {
1466 ret = bch2_read_single_page(page, mapping);
1468 goto retry_reservation;
1477 if (mapping_writably_mapped(mapping))
1478 for (i = 0; i < nr_pages; i++)
1479 flush_dcache_page(pages[i]);
1481 while (copied < len) {
1482 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1483 unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
1484 unsigned pg_len = min_t(unsigned, len - copied,
1485 PAGE_SIZE - pg_offset);
1486 unsigned pg_copied = iov_iter_copy_from_user_atomic(page,
1487 iter, pg_offset, pg_len);
1492 if (!PageUptodate(page) &&
1493 pg_copied != PAGE_SIZE &&
1494 pos + copied + pg_copied < inode->v.i_size) {
1495 zero_user(page, 0, PAGE_SIZE);
1499 flush_dcache_page(page);
1500 iov_iter_advance(iter, pg_copied);
1501 copied += pg_copied;
1503 if (pg_copied != pg_len)
1510 spin_lock(&inode->v.i_lock);
1511 if (pos + copied > inode->v.i_size)
1512 i_size_write(&inode->v, pos + copied);
1513 spin_unlock(&inode->v.i_lock);
1515 while (set_dirty < copied) {
1516 struct page *page = pages[(offset + set_dirty) >> PAGE_SHIFT];
1517 unsigned pg_offset = (offset + set_dirty) & (PAGE_SIZE - 1);
1518 unsigned pg_len = min_t(unsigned, copied - set_dirty,
1519 PAGE_SIZE - pg_offset);
1521 if (!PageUptodate(page))
1522 SetPageUptodate(page);
1524 bch2_set_page_dirty(c, inode, page, &res, pg_offset, pg_len);
1528 set_dirty += pg_len;
1531 nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
1532 inode->ei_last_dirtied = (unsigned long) current;
1534 for (i = nr_pages_copied; i < nr_pages; i++) {
1535 unlock_page(pages[i]);
1539 bch2_page_reservation_put(c, inode, &res);
1541 return copied ?: ret;
1544 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1546 struct file *file = iocb->ki_filp;
1547 struct address_space *mapping = file->f_mapping;
1548 struct bch_inode_info *inode = file_bch_inode(file);
1549 loff_t pos = iocb->ki_pos;
1550 ssize_t written = 0;
1553 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1556 unsigned offset = pos & (PAGE_SIZE - 1);
1557 unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
1558 PAGE_SIZE * WRITE_BATCH_PAGES - offset);
1561 * Bring in the user page that we will copy from _first_.
1562 * Otherwise there's a nasty deadlock on copying from the
1563 * same page as we're writing to, without it being marked
1566 * Not only is this an optimisation, but it is also required
1567 * to check that the address is actually valid, when atomic
1568 * usercopies are used, below.
1570 if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1571 bytes = min_t(unsigned long, iov_iter_count(iter),
1572 PAGE_SIZE - offset);
1574 if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1580 if (unlikely(fatal_signal_pending(current))) {
1585 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1586 if (unlikely(ret < 0))
1591 if (unlikely(ret == 0)) {
1593 * If we were unable to copy any data at all, we must
1594 * fall back to a single segment length write.
1596 * If we didn't fallback here, we could livelock
1597 * because not all segments in the iov can be copied at
1598 * once without a pagefault.
1600 bytes = min_t(unsigned long, PAGE_SIZE - offset,
1601 iov_iter_single_seg_count(iter));
1608 balance_dirty_pages_ratelimited(mapping);
1609 } while (iov_iter_count(iter));
1611 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1613 return written ? written : ret;
1616 /* O_DIRECT reads */
1618 static void bch2_dio_read_complete(struct closure *cl)
1620 struct dio_read *dio = container_of(cl, struct dio_read, cl);
1622 dio->req->ki_complete(dio->req, dio->ret, 0);
1623 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1626 static void bch2_direct_IO_read_endio(struct bio *bio)
1628 struct dio_read *dio = bio->bi_private;
1631 dio->ret = blk_status_to_errno(bio->bi_status);
1633 closure_put(&dio->cl);
1636 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1638 bch2_direct_IO_read_endio(bio);
1639 bio_check_pages_dirty(bio); /* transfers ownership */
1642 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1644 struct file *file = req->ki_filp;
1645 struct bch_inode_info *inode = file_bch_inode(file);
1646 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1647 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1648 struct dio_read *dio;
1650 loff_t offset = req->ki_pos;
1651 bool sync = is_sync_kiocb(req);
1655 if ((offset|iter->count) & (block_bytes(c) - 1))
1658 ret = min_t(loff_t, iter->count,
1659 max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1664 shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
1665 iter->count -= shorten;
1667 bio = bio_alloc_bioset(GFP_KERNEL,
1668 iov_iter_npages(iter, BIO_MAX_PAGES),
1669 &c->dio_read_bioset);
1671 bio->bi_end_io = bch2_direct_IO_read_endio;
1673 dio = container_of(bio, struct dio_read, rbio.bio);
1674 closure_init(&dio->cl, NULL);
1677 * this is a _really_ horrible hack just to avoid an atomic sub at the
1681 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1682 atomic_set(&dio->cl.remaining,
1683 CLOSURE_REMAINING_INITIALIZER -
1685 CLOSURE_DESTRUCTOR);
1687 atomic_set(&dio->cl.remaining,
1688 CLOSURE_REMAINING_INITIALIZER + 1);
1695 while (iter->count) {
1696 bio = bio_alloc_bioset(GFP_KERNEL,
1697 iov_iter_npages(iter, BIO_MAX_PAGES),
1699 bio->bi_end_io = bch2_direct_IO_read_split_endio;
1701 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1702 bio->bi_iter.bi_sector = offset >> 9;
1703 bio->bi_private = dio;
1705 ret = bio_iov_iter_get_pages(bio, iter);
1707 /* XXX: fault inject this path */
1708 bio->bi_status = BLK_STS_RESOURCE;
1713 offset += bio->bi_iter.bi_size;
1714 bio_set_pages_dirty(bio);
1717 closure_get(&dio->cl);
1719 bch2_read(c, rbio_init(bio, opts), inode->v.i_ino);
1722 iter->count += shorten;
1725 closure_sync(&dio->cl);
1726 closure_debug_destroy(&dio->cl);
1728 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1731 return -EIOCBQUEUED;
1735 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1737 struct file *file = iocb->ki_filp;
1738 struct bch_inode_info *inode = file_bch_inode(file);
1739 struct address_space *mapping = file->f_mapping;
1740 size_t count = iov_iter_count(iter);
1744 return 0; /* skip atime */
1746 if (iocb->ki_flags & IOCB_DIRECT) {
1747 struct blk_plug plug;
1749 ret = filemap_write_and_wait_range(mapping,
1751 iocb->ki_pos + count - 1);
1755 file_accessed(file);
1757 blk_start_plug(&plug);
1758 ret = bch2_direct_IO_read(iocb, iter);
1759 blk_finish_plug(&plug);
1762 iocb->ki_pos += ret;
1764 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1765 ret = generic_file_read_iter(iocb, iter);
1766 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1772 /* O_DIRECT writes */
1774 static void bch2_dio_write_loop_async(struct bch_write_op *);
1776 static long bch2_dio_write_loop(struct dio_write *dio)
1778 bool kthread = (current->flags & PF_KTHREAD) != 0;
1779 struct kiocb *req = dio->req;
1780 struct address_space *mapping = req->ki_filp->f_mapping;
1781 struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
1782 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1783 struct bio *bio = &dio->op.wbio.bio;
1784 struct bvec_iter_all iter;
1786 unsigned unaligned, iter_count;
1787 bool sync = dio->sync, dropped_locks;
1794 iter_count = dio->iter.count;
1797 kthread_use_mm(dio->mm);
1798 BUG_ON(current->faults_disabled_mapping);
1799 current->faults_disabled_mapping = mapping;
1801 ret = bio_iov_iter_get_pages(bio, &dio->iter);
1803 dropped_locks = fdm_dropped_locks();
1805 current->faults_disabled_mapping = NULL;
1807 kthread_unuse_mm(dio->mm);
1810 * If the fault handler returned an error but also signalled
1811 * that it dropped & retook ei_pagecache_lock, we just need to
1812 * re-shoot down the page cache and retry:
1814 if (dropped_locks && ret)
1817 if (unlikely(ret < 0))
1820 if (unlikely(dropped_locks)) {
1821 ret = write_invalidate_inode_pages_range(mapping,
1823 req->ki_pos + iter_count - 1);
1827 if (!bio->bi_iter.bi_size)
1831 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
1832 bio->bi_iter.bi_size -= unaligned;
1833 iov_iter_revert(&dio->iter, unaligned);
1835 if (!bio->bi_iter.bi_size) {
1837 * bio_iov_iter_get_pages was only able to get <
1838 * blocksize worth of pages:
1840 bio_for_each_segment_all(bv, bio, iter)
1841 put_page(bv->bv_page);
1846 bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode));
1847 dio->op.end_io = bch2_dio_write_loop_async;
1848 dio->op.target = dio->op.opts.foreground_target;
1849 op_journal_seq_set(&dio->op, &inode->ei_journal_seq);
1850 dio->op.write_point = writepoint_hashed((unsigned long) current);
1851 dio->op.nr_replicas = dio->op.opts.data_replicas;
1852 dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
1854 if ((req->ki_flags & IOCB_DSYNC) &&
1855 !c->opts.journal_flush_disabled)
1856 dio->op.flags |= BCH_WRITE_FLUSH;
1858 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
1859 dio->op.opts.data_replicas, 0);
1860 if (unlikely(ret) &&
1861 !bch2_check_range_allocated(c, dio->op.pos,
1862 bio_sectors(bio), dio->op.opts.data_replicas))
1865 task_io_account_write(bio->bi_iter.bi_size);
1867 if (!dio->sync && !dio->loop && dio->iter.count) {
1868 struct iovec *iov = dio->inline_vecs;
1870 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
1871 iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
1873 if (unlikely(!iov)) {
1874 dio->sync = sync = true;
1878 dio->free_iov = true;
1881 memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
1882 dio->iter.iov = iov;
1886 closure_call(&dio->op.cl, bch2_write, NULL, NULL);
1889 wait_for_completion(&dio->done);
1891 return -EIOCBQUEUED;
1893 i_sectors_acct(c, inode, &dio->quota_res,
1894 dio->op.i_sectors_delta);
1895 req->ki_pos += (u64) dio->op.written << 9;
1896 dio->written += dio->op.written;
1898 spin_lock(&inode->v.i_lock);
1899 if (req->ki_pos > inode->v.i_size)
1900 i_size_write(&inode->v, req->ki_pos);
1901 spin_unlock(&inode->v.i_lock);
1903 bio_for_each_segment_all(bv, bio, iter)
1904 put_page(bv->bv_page);
1905 if (!dio->iter.count || dio->op.error)
1909 reinit_completion(&dio->done);
1912 ret = dio->op.error ?: ((long) dio->written << 9);
1914 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
1915 bch2_quota_reservation_put(c, inode, &dio->quota_res);
1918 kfree(dio->iter.iov);
1922 /* inode->i_dio_count is our ref on inode and thus bch_fs */
1923 inode_dio_end(&inode->v);
1926 req->ki_complete(req, ret, 0);
1932 static void bch2_dio_write_loop_async(struct bch_write_op *op)
1934 struct dio_write *dio = container_of(op, struct dio_write, op);
1937 complete(&dio->done);
1939 bch2_dio_write_loop(dio);
1943 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
1945 struct file *file = req->ki_filp;
1946 struct address_space *mapping = file->f_mapping;
1947 struct bch_inode_info *inode = file_bch_inode(file);
1948 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1949 struct dio_write *dio;
1951 bool locked = true, extending;
1955 prefetch((void *) &c->opts + 64);
1956 prefetch(&inode->ei_inode);
1957 prefetch((void *) &inode->ei_inode + 64);
1959 inode_lock(&inode->v);
1961 ret = generic_write_checks(req, iter);
1962 if (unlikely(ret <= 0))
1965 ret = file_remove_privs(file);
1969 ret = file_update_time(file);
1973 if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
1976 inode_dio_begin(&inode->v);
1977 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
1979 extending = req->ki_pos + iter->count > inode->v.i_size;
1981 inode_unlock(&inode->v);
1985 bio = bio_alloc_bioset(GFP_KERNEL,
1986 iov_iter_npages(iter, BIO_MAX_PAGES),
1987 &c->dio_write_bioset);
1988 dio = container_of(bio, struct dio_write, op.wbio.bio);
1989 init_completion(&dio->done);
1991 dio->mm = current->mm;
1993 dio->sync = is_sync_kiocb(req) || extending;
1994 dio->free_iov = false;
1995 dio->quota_res.sectors = 0;
1999 ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
2000 iter->count >> 9, true);
2004 ret = write_invalidate_inode_pages_range(mapping,
2006 req->ki_pos + iter->count - 1);
2010 ret = bch2_dio_write_loop(dio);
2013 inode_unlock(&inode->v);
2016 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2017 bch2_quota_reservation_put(c, inode, &dio->quota_res);
2019 inode_dio_end(&inode->v);
2023 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2025 struct file *file = iocb->ki_filp;
2026 struct bch_inode_info *inode = file_bch_inode(file);
2029 if (iocb->ki_flags & IOCB_DIRECT)
2030 return bch2_direct_write(iocb, from);
2032 /* We can write back this queue in page reclaim */
2033 current->backing_dev_info = inode_to_bdi(&inode->v);
2034 inode_lock(&inode->v);
2036 ret = generic_write_checks(iocb, from);
2040 ret = file_remove_privs(file);
2044 ret = file_update_time(file);
2048 ret = bch2_buffered_write(iocb, from);
2049 if (likely(ret > 0))
2050 iocb->ki_pos += ret;
2052 inode_unlock(&inode->v);
2053 current->backing_dev_info = NULL;
2056 ret = generic_write_sync(iocb, ret);
2063 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2065 struct bch_inode_info *inode = file_bch_inode(file);
2066 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2069 ret = file_write_and_wait_range(file, start, end);
2073 if (datasync && !(inode->v.i_state & I_DIRTY_DATASYNC))
2076 ret = sync_inode_metadata(&inode->v, 1);
2080 if (!c->opts.journal_flush_disabled)
2081 ret = bch2_journal_flush_seq(&c->journal,
2082 inode->ei_journal_seq);
2083 ret2 = file_check_and_advance_wb_err(file);
2090 static inline int range_has_data(struct bch_fs *c,
2094 struct btree_trans trans;
2095 struct btree_iter *iter;
2099 bch2_trans_init(&trans, c, 0, 0);
2101 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k, ret) {
2102 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2105 if (bkey_extent_is_data(k.k)) {
2111 return bch2_trans_exit(&trans) ?: ret;
2114 static int __bch2_truncate_page(struct bch_inode_info *inode,
2115 pgoff_t index, loff_t start, loff_t end)
2117 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2118 struct address_space *mapping = inode->v.i_mapping;
2119 struct bch_page_state *s;
2120 unsigned start_offset = start & (PAGE_SIZE - 1);
2121 unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2126 /* Page boundary? Nothing to do */
2127 if (!((index == start >> PAGE_SHIFT && start_offset) ||
2128 (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
2132 if (index << PAGE_SHIFT >= inode->v.i_size)
2135 page = find_lock_page(mapping, index);
2138 * XXX: we're doing two index lookups when we end up reading the
2141 ret = range_has_data(c,
2142 POS(inode->v.i_ino, index << PAGE_SECTOR_SHIFT),
2143 POS(inode->v.i_ino, (index + 1) << PAGE_SECTOR_SHIFT));
2147 page = find_or_create_page(mapping, index, GFP_KERNEL);
2148 if (unlikely(!page)) {
2154 s = bch2_page_state_create(page, 0);
2160 if (!PageUptodate(page)) {
2161 ret = bch2_read_single_page(page, mapping);
2166 if (index != start >> PAGE_SHIFT)
2168 if (index != end >> PAGE_SHIFT)
2169 end_offset = PAGE_SIZE;
2171 for (i = round_up(start_offset, block_bytes(c)) >> 9;
2172 i < round_down(end_offset, block_bytes(c)) >> 9;
2174 s->s[i].nr_replicas = 0;
2175 s->s[i].state = SECTOR_UNALLOCATED;
2178 zero_user_segment(page, start_offset, end_offset);
2181 * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2183 * XXX: because we aren't currently tracking whether the page has actual
2184 * data in it (vs. just 0s, or only partially written) this wrong. ick.
2186 ret = bch2_get_page_disk_reservation(c, inode, page, false);
2190 * This removes any writeable userspace mappings; we need to force
2191 * .page_mkwrite to be called again before any mmapped writes, to
2192 * redirty the full page:
2195 __set_page_dirty_nobuffers(page);
2203 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
2205 return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
2206 from, round_up(from, PAGE_SIZE));
2209 static int bch2_extend(struct bch_inode_info *inode,
2210 struct bch_inode_unpacked *inode_u,
2211 struct iattr *iattr)
2213 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2214 struct address_space *mapping = inode->v.i_mapping;
2220 * this has to be done _before_ extending i_size:
2222 ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
2226 truncate_setsize(&inode->v, iattr->ia_size);
2227 setattr_copy(&inode->v, iattr);
2229 mutex_lock(&inode->ei_update_lock);
2230 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
2231 ATTR_MTIME|ATTR_CTIME);
2232 mutex_unlock(&inode->ei_update_lock);
2237 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2238 struct bch_inode_unpacked *bi,
2241 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2243 bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2244 bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
2248 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2249 struct bch_inode_unpacked *bi, void *p)
2251 u64 *new_i_size = p;
2253 bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2254 bi->bi_size = *new_i_size;
2258 int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
2260 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2261 struct address_space *mapping = inode->v.i_mapping;
2262 struct bch_inode_unpacked inode_u;
2263 struct btree_trans trans;
2264 struct btree_iter *iter;
2265 u64 new_i_size = iattr->ia_size;
2266 s64 i_sectors_delta = 0;
2269 inode_dio_wait(&inode->v);
2270 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2273 * fetch current on disk i_size: inode is locked, i_size can only
2274 * increase underneath us:
2276 bch2_trans_init(&trans, c, 0, 0);
2277 iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino, 0);
2278 ret = PTR_ERR_OR_ZERO(iter);
2279 bch2_trans_exit(&trans);
2285 * check this before next assertion; on filesystem error our normal
2286 * invariants are a bit broken (truncate has to truncate the page cache
2287 * before the inode).
2289 ret = bch2_journal_error(&c->journal);
2293 BUG_ON(inode->v.i_size < inode_u.bi_size);
2295 if (iattr->ia_size > inode->v.i_size) {
2296 ret = bch2_extend(inode, &inode_u, iattr);
2300 ret = bch2_truncate_page(inode, iattr->ia_size);
2305 * When extending, we're going to write the new i_size to disk
2306 * immediately so we need to flush anything above the current on disk
2309 * Also, when extending we need to flush the page that i_size currently
2310 * straddles - if it's mapped to userspace, we need to ensure that
2311 * userspace has to redirty it and call .mkwrite -> set_page_dirty
2312 * again to allocate the part of the page that was extended.
2314 if (iattr->ia_size > inode_u.bi_size)
2315 ret = filemap_write_and_wait_range(mapping,
2317 iattr->ia_size - 1);
2318 else if (iattr->ia_size & (PAGE_SIZE - 1))
2319 ret = filemap_write_and_wait_range(mapping,
2320 round_down(iattr->ia_size, PAGE_SIZE),
2321 iattr->ia_size - 1);
2325 mutex_lock(&inode->ei_update_lock);
2326 ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
2328 mutex_unlock(&inode->ei_update_lock);
2333 truncate_setsize(&inode->v, iattr->ia_size);
2335 ret = bch2_fpunch(c, inode->v.i_ino,
2336 round_up(iattr->ia_size, block_bytes(c)) >> 9,
2337 U64_MAX, &inode->ei_journal_seq, &i_sectors_delta);
2338 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2343 setattr_copy(&inode->v, iattr);
2345 mutex_lock(&inode->ei_update_lock);
2346 ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL,
2347 ATTR_MTIME|ATTR_CTIME);
2348 mutex_unlock(&inode->ei_update_lock);
2350 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2356 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2358 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2359 u64 discard_start = round_up(offset, block_bytes(c)) >> 9;
2360 u64 discard_end = round_down(offset + len, block_bytes(c)) >> 9;
2363 inode_lock(&inode->v);
2364 inode_dio_wait(&inode->v);
2365 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2367 ret = __bch2_truncate_page(inode,
2368 offset >> PAGE_SHIFT,
2369 offset, offset + len);
2373 if (offset >> PAGE_SHIFT !=
2374 (offset + len) >> PAGE_SHIFT) {
2375 ret = __bch2_truncate_page(inode,
2376 (offset + len) >> PAGE_SHIFT,
2377 offset, offset + len);
2382 truncate_pagecache_range(&inode->v, offset, offset + len - 1);
2384 if (discard_start < discard_end) {
2385 s64 i_sectors_delta = 0;
2387 ret = bch2_fpunch(c, inode->v.i_ino,
2388 discard_start, discard_end,
2389 &inode->ei_journal_seq,
2391 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2394 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2395 inode_unlock(&inode->v);
2400 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
2401 loff_t offset, loff_t len,
2404 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2405 struct address_space *mapping = inode->v.i_mapping;
2406 struct bkey_on_stack copy;
2407 struct btree_trans trans;
2408 struct btree_iter *src, *dst;
2409 loff_t shift, new_size;
2413 if ((offset | len) & (block_bytes(c) - 1))
2416 bkey_on_stack_init(©);
2417 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
2420 * We need i_mutex to keep the page cache consistent with the extents
2421 * btree, and the btree consistent with i_size - we don't need outside
2422 * locking for the extents btree itself, because we're using linked
2425 inode_lock(&inode->v);
2426 inode_dio_wait(&inode->v);
2427 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2431 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
2435 if (offset >= inode->v.i_size)
2438 src_start = U64_MAX;
2442 if (offset + len >= inode->v.i_size)
2445 src_start = offset + len;
2449 new_size = inode->v.i_size + shift;
2451 ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2456 i_size_write(&inode->v, new_size);
2457 mutex_lock(&inode->ei_update_lock);
2458 ret = bch2_write_inode_size(c, inode, new_size,
2459 ATTR_MTIME|ATTR_CTIME);
2460 mutex_unlock(&inode->ei_update_lock);
2462 s64 i_sectors_delta = 0;
2464 ret = bch2_fpunch(c, inode->v.i_ino,
2465 offset >> 9, (offset + len) >> 9,
2466 &inode->ei_journal_seq,
2468 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2474 src = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2475 POS(inode->v.i_ino, src_start >> 9),
2477 dst = bch2_trans_copy_iter(&trans, src);
2480 struct disk_reservation disk_res =
2481 bch2_disk_reservation_init(c, 0);
2482 struct bkey_i delete;
2484 struct bpos next_pos;
2485 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
2486 struct bpos atomic_end;
2487 unsigned trigger_flags = 0;
2490 ? bch2_btree_iter_peek_prev(src)
2491 : bch2_btree_iter_peek(src);
2492 if ((ret = bkey_err(k)))
2495 if (!k.k || k.k->p.inode != inode->v.i_ino)
2498 BUG_ON(bkey_cmp(src->pos, bkey_start_pos(k.k)));
2501 bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
2504 bkey_on_stack_reassemble(©, c, k);
2507 bkey_cmp(bkey_start_pos(k.k), move_pos) < 0)
2508 bch2_cut_front(move_pos, copy.k);
2510 copy.k->k.p.offset += shift >> 9;
2511 bch2_btree_iter_set_pos(dst, bkey_start_pos(©.k->k));
2513 ret = bch2_extent_atomic_end(dst, copy.k, &atomic_end);
2517 if (bkey_cmp(atomic_end, copy.k->k.p)) {
2519 move_pos = atomic_end;
2520 move_pos.offset -= shift >> 9;
2523 bch2_cut_back(atomic_end, copy.k);
2527 bkey_init(&delete.k);
2528 delete.k.p = copy.k->k.p;
2529 delete.k.size = copy.k->k.size;
2530 delete.k.p.offset -= shift >> 9;
2532 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
2534 if (copy.k->k.size == k.k->size) {
2536 * If we're moving the entire extent, we can skip
2539 trigger_flags |= BTREE_TRIGGER_NORUN;
2541 /* We might end up splitting compressed extents: */
2543 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
2545 ret = bch2_disk_reservation_get(c, &disk_res,
2546 copy.k->k.size, nr_ptrs,
2547 BCH_DISK_RESERVATION_NOFAIL);
2551 bch2_btree_iter_set_pos(src, bkey_start_pos(&delete.k));
2553 ret = bch2_trans_update(&trans, src, &delete, trigger_flags) ?:
2554 bch2_trans_update(&trans, dst, copy.k, trigger_flags) ?:
2555 bch2_trans_commit(&trans, &disk_res,
2556 &inode->ei_journal_seq,
2557 BTREE_INSERT_NOFAIL);
2558 bch2_disk_reservation_put(c, &disk_res);
2561 bch2_btree_iter_set_pos(src, next_pos);
2568 bch2_trans_cond_resched(&trans);
2570 bch2_trans_unlock(&trans);
2573 i_size_write(&inode->v, new_size);
2574 mutex_lock(&inode->ei_update_lock);
2575 ret = bch2_write_inode_size(c, inode, new_size,
2576 ATTR_MTIME|ATTR_CTIME);
2577 mutex_unlock(&inode->ei_update_lock);
2580 bch2_trans_exit(&trans);
2581 bkey_on_stack_exit(©, c);
2582 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2583 inode_unlock(&inode->v);
2587 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
2588 loff_t offset, loff_t len)
2590 struct address_space *mapping = inode->v.i_mapping;
2591 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2592 struct btree_trans trans;
2593 struct btree_iter *iter;
2594 struct bpos end_pos;
2595 loff_t end = offset + len;
2596 loff_t block_start = round_down(offset, block_bytes(c));
2597 loff_t block_end = round_up(end, block_bytes(c));
2599 unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
2602 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
2604 inode_lock(&inode->v);
2605 inode_dio_wait(&inode->v);
2606 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2608 if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
2609 ret = inode_newsize_ok(&inode->v, end);
2614 if (mode & FALLOC_FL_ZERO_RANGE) {
2615 ret = __bch2_truncate_page(inode,
2616 offset >> PAGE_SHIFT,
2620 offset >> PAGE_SHIFT != end >> PAGE_SHIFT)
2621 ret = __bch2_truncate_page(inode,
2628 truncate_pagecache_range(&inode->v, offset, end - 1);
2631 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2632 POS(inode->v.i_ino, block_start >> 9),
2633 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2634 end_pos = POS(inode->v.i_ino, block_end >> 9);
2636 while (bkey_cmp(iter->pos, end_pos) < 0) {
2637 s64 i_sectors_delta = 0;
2638 struct disk_reservation disk_res = { 0 };
2639 struct quota_res quota_res = { 0 };
2640 struct bkey_i_reservation reservation;
2643 bch2_trans_begin(&trans);
2645 k = bch2_btree_iter_peek_slot(iter);
2646 if ((ret = bkey_err(k)))
2649 /* already reserved */
2650 if (k.k->type == KEY_TYPE_reservation &&
2651 bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2652 bch2_btree_iter_next_slot(iter);
2656 if (bkey_extent_is_data(k.k) &&
2657 !(mode & FALLOC_FL_ZERO_RANGE)) {
2658 bch2_btree_iter_next_slot(iter);
2662 bkey_reservation_init(&reservation.k_i);
2663 reservation.k.type = KEY_TYPE_reservation;
2664 reservation.k.p = k.k->p;
2665 reservation.k.size = k.k->size;
2667 bch2_cut_front(iter->pos, &reservation.k_i);
2668 bch2_cut_back(end_pos, &reservation.k_i);
2670 sectors = reservation.k.size;
2671 reservation.v.nr_replicas = bch2_bkey_nr_ptrs_allocated(k);
2673 if (!bkey_extent_is_allocation(k.k)) {
2674 ret = bch2_quota_reservation_add(c, inode,
2681 if (reservation.v.nr_replicas < replicas ||
2682 bch2_bkey_sectors_compressed(k)) {
2683 ret = bch2_disk_reservation_get(c, &disk_res, sectors,
2688 reservation.v.nr_replicas = disk_res.nr_replicas;
2691 ret = bch2_extent_update(&trans, iter, &reservation.k_i,
2692 &disk_res, &inode->ei_journal_seq,
2693 0, &i_sectors_delta);
2694 i_sectors_acct(c, inode, "a_res, i_sectors_delta);
2696 bch2_quota_reservation_put(c, inode, "a_res);
2697 bch2_disk_reservation_put(c, &disk_res);
2705 * Do we need to extend the file?
2707 * If we zeroed up to the end of the file, we dropped whatever writes
2708 * were going to write out the current i_size, so we have to extend
2709 * manually even if FL_KEEP_SIZE was set:
2711 if (end >= inode->v.i_size &&
2712 (!(mode & FALLOC_FL_KEEP_SIZE) ||
2713 (mode & FALLOC_FL_ZERO_RANGE))) {
2714 struct btree_iter *inode_iter;
2715 struct bch_inode_unpacked inode_u;
2718 bch2_trans_begin(&trans);
2719 inode_iter = bch2_inode_peek(&trans, &inode_u,
2721 ret = PTR_ERR_OR_ZERO(inode_iter);
2722 } while (ret == -EINTR);
2724 bch2_trans_unlock(&trans);
2730 * Sync existing appends before extending i_size,
2731 * as in bch2_extend():
2733 ret = filemap_write_and_wait_range(mapping,
2734 inode_u.bi_size, S64_MAX);
2738 if (mode & FALLOC_FL_KEEP_SIZE)
2739 end = inode->v.i_size;
2741 i_size_write(&inode->v, end);
2743 mutex_lock(&inode->ei_update_lock);
2744 ret = bch2_write_inode_size(c, inode, end, 0);
2745 mutex_unlock(&inode->ei_update_lock);
2748 bch2_trans_exit(&trans);
2749 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2750 inode_unlock(&inode->v);
2754 long bch2_fallocate_dispatch(struct file *file, int mode,
2755 loff_t offset, loff_t len)
2757 struct bch_inode_info *inode = file_bch_inode(file);
2758 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2761 if (!percpu_ref_tryget(&c->writes))
2764 if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
2765 ret = bchfs_fallocate(inode, mode, offset, len);
2766 else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
2767 ret = bchfs_fpunch(inode, offset, len);
2768 else if (mode == FALLOC_FL_INSERT_RANGE)
2769 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
2770 else if (mode == FALLOC_FL_COLLAPSE_RANGE)
2771 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
2775 percpu_ref_put(&c->writes);
2780 static void mark_range_unallocated(struct bch_inode_info *inode,
2781 loff_t start, loff_t end)
2783 pgoff_t index = start >> PAGE_SHIFT;
2784 pgoff_t end_index = (end - 1) >> PAGE_SHIFT;
2785 struct pagevec pvec;
2787 pagevec_init(&pvec);
2790 unsigned nr_pages, i, j;
2792 nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
2797 for (i = 0; i < nr_pages; i++) {
2798 struct page *page = pvec.pages[i];
2799 struct bch_page_state *s;
2802 s = bch2_page_state(page);
2805 spin_lock(&s->lock);
2806 for (j = 0; j < PAGE_SECTORS; j++)
2807 s->s[j].nr_replicas = 0;
2808 spin_unlock(&s->lock);
2813 pagevec_release(&pvec);
2814 } while (index <= end_index);
2817 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
2818 struct file *file_dst, loff_t pos_dst,
2819 loff_t len, unsigned remap_flags)
2821 struct bch_inode_info *src = file_bch_inode(file_src);
2822 struct bch_inode_info *dst = file_bch_inode(file_dst);
2823 struct bch_fs *c = src->v.i_sb->s_fs_info;
2824 s64 i_sectors_delta = 0;
2828 if (!c->opts.reflink)
2831 if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
2834 if (remap_flags & REMAP_FILE_DEDUP)
2837 if ((pos_src & (block_bytes(c) - 1)) ||
2838 (pos_dst & (block_bytes(c) - 1)))
2842 abs(pos_src - pos_dst) < len)
2845 bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
2847 file_update_time(file_dst);
2849 inode_dio_wait(&src->v);
2850 inode_dio_wait(&dst->v);
2852 ret = generic_remap_file_range_prep(file_src, pos_src,
2855 if (ret < 0 || len == 0)
2858 aligned_len = round_up((u64) len, block_bytes(c));
2860 ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
2861 pos_dst, pos_dst + len - 1);
2865 mark_range_unallocated(src, pos_src, pos_src + aligned_len);
2867 ret = bch2_remap_range(c,
2868 POS(dst->v.i_ino, pos_dst >> 9),
2869 POS(src->v.i_ino, pos_src >> 9),
2871 &dst->ei_journal_seq,
2872 pos_dst + len, &i_sectors_delta);
2877 * due to alignment, we might have remapped slightly more than requsted
2879 ret = min((u64) ret << 9, (u64) len);
2881 /* XXX get a quota reservation */
2882 i_sectors_acct(c, dst, NULL, i_sectors_delta);
2884 spin_lock(&dst->v.i_lock);
2885 if (pos_dst + ret > dst->v.i_size)
2886 i_size_write(&dst->v, pos_dst + ret);
2887 spin_unlock(&dst->v.i_lock);
2889 bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
2896 static int page_data_offset(struct page *page, unsigned offset)
2898 struct bch_page_state *s = bch2_page_state(page);
2902 for (i = offset >> 9; i < PAGE_SECTORS; i++)
2903 if (s->s[i].state >= SECTOR_DIRTY)
2909 static loff_t bch2_seek_pagecache_data(struct inode *vinode,
2910 loff_t start_offset,
2913 struct address_space *mapping = vinode->i_mapping;
2915 pgoff_t start_index = start_offset >> PAGE_SHIFT;
2916 pgoff_t end_index = end_offset >> PAGE_SHIFT;
2917 pgoff_t index = start_index;
2921 while (index <= end_index) {
2922 if (find_get_pages_range(mapping, &index, end_index, 1, &page)) {
2925 offset = page_data_offset(page,
2926 page->index == start_index
2927 ? start_offset & (PAGE_SIZE - 1)
2930 ret = clamp(((loff_t) page->index << PAGE_SHIFT) +
2932 start_offset, end_offset);
2948 static loff_t bch2_seek_data(struct file *file, u64 offset)
2950 struct bch_inode_info *inode = file_bch_inode(file);
2951 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2952 struct btree_trans trans;
2953 struct btree_iter *iter;
2955 u64 isize, next_data = MAX_LFS_FILESIZE;
2958 isize = i_size_read(&inode->v);
2959 if (offset >= isize)
2962 bch2_trans_init(&trans, c, 0, 0);
2964 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
2965 POS(inode->v.i_ino, offset >> 9), 0, k, ret) {
2966 if (k.k->p.inode != inode->v.i_ino) {
2968 } else if (bkey_extent_is_data(k.k)) {
2969 next_data = max(offset, bkey_start_offset(k.k) << 9);
2971 } else if (k.k->p.offset >> 9 > isize)
2975 ret = bch2_trans_exit(&trans) ?: ret;
2979 if (next_data > offset)
2980 next_data = bch2_seek_pagecache_data(&inode->v,
2983 if (next_data >= isize)
2986 return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
2989 static int __page_hole_offset(struct page *page, unsigned offset)
2991 struct bch_page_state *s = bch2_page_state(page);
2997 for (i = offset >> 9; i < PAGE_SECTORS; i++)
2998 if (s->s[i].state < SECTOR_DIRTY)
3004 static loff_t page_hole_offset(struct address_space *mapping, loff_t offset)
3006 pgoff_t index = offset >> PAGE_SHIFT;
3011 page = find_lock_entry(mapping, index);
3012 if (!page || xa_is_value(page))
3015 pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1));
3017 ret = ((loff_t) index << PAGE_SHIFT) + pg_offset;
3024 static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
3025 loff_t start_offset,
3028 struct address_space *mapping = vinode->i_mapping;
3029 loff_t offset = start_offset, hole;
3031 while (offset < end_offset) {
3032 hole = page_hole_offset(mapping, offset);
3033 if (hole >= 0 && hole <= end_offset)
3034 return max(start_offset, hole);
3036 offset += PAGE_SIZE;
3037 offset &= PAGE_MASK;
3043 static loff_t bch2_seek_hole(struct file *file, u64 offset)
3045 struct bch_inode_info *inode = file_bch_inode(file);
3046 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3047 struct btree_trans trans;
3048 struct btree_iter *iter;
3050 u64 isize, next_hole = MAX_LFS_FILESIZE;
3053 isize = i_size_read(&inode->v);
3054 if (offset >= isize)
3057 bch2_trans_init(&trans, c, 0, 0);
3059 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
3060 POS(inode->v.i_ino, offset >> 9),
3061 BTREE_ITER_SLOTS, k, ret) {
3062 if (k.k->p.inode != inode->v.i_ino) {
3063 next_hole = bch2_seek_pagecache_hole(&inode->v,
3064 offset, MAX_LFS_FILESIZE);
3066 } else if (!bkey_extent_is_data(k.k)) {
3067 next_hole = bch2_seek_pagecache_hole(&inode->v,
3068 max(offset, bkey_start_offset(k.k) << 9),
3069 k.k->p.offset << 9);
3071 if (next_hole < k.k->p.offset << 9)
3074 offset = max(offset, bkey_start_offset(k.k) << 9);
3078 ret = bch2_trans_exit(&trans) ?: ret;
3082 if (next_hole > isize)
3085 return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
3088 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
3094 return generic_file_llseek(file, offset, whence);
3096 return bch2_seek_data(file, offset);
3098 return bch2_seek_hole(file, offset);
3104 void bch2_fs_fsio_exit(struct bch_fs *c)
3106 bioset_exit(&c->dio_write_bioset);
3107 bioset_exit(&c->dio_read_bioset);
3108 bioset_exit(&c->writepage_bioset);
3111 int bch2_fs_fsio_init(struct bch_fs *c)
3115 pr_verbose_init(c->opts, "");
3117 if (bioset_init(&c->writepage_bioset,
3118 4, offsetof(struct bch_writepage_io, op.wbio.bio),
3119 BIOSET_NEED_BVECS) ||
3120 bioset_init(&c->dio_read_bioset,
3121 4, offsetof(struct dio_read, rbio.bio),
3122 BIOSET_NEED_BVECS) ||
3123 bioset_init(&c->dio_write_bioset,
3124 4, offsetof(struct dio_write, op.wbio.bio),
3128 pr_verbose_init(c->opts, "ret %i", ret);
3132 #endif /* NO_BCACHEFS_FS */