1 // SPDX-License-Identifier: GPL-2.0
5 #include "alloc_foreground.h"
7 #include "btree_update.h"
12 #include "extent_update.h"
23 #include <linux/aio.h>
24 #include <linux/backing-dev.h>
25 #include <linux/falloc.h>
26 #include <linux/migrate.h>
27 #include <linux/mmu_context.h>
28 #include <linux/pagevec.h>
29 #include <linux/rmap.h>
30 #include <linux/sched/signal.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/uio.h>
33 #include <linux/writeback.h>
35 #include <trace/events/bcachefs.h>
36 #include <trace/events/writeback.h>
38 static inline struct address_space *faults_disabled_mapping(void)
40 return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
43 static inline void set_fdm_dropped_locks(void)
45 current->faults_disabled_mapping =
46 (void *) (((unsigned long) current->faults_disabled_mapping)|1);
49 static inline bool fdm_dropped_locks(void)
51 return ((unsigned long) current->faults_disabled_mapping) & 1;
58 struct bch_writepage_io {
60 struct bch_inode_info *inode;
63 struct bch_write_op op;
67 struct completion done;
73 struct quota_res quota_res;
77 struct iovec inline_vecs[2];
80 struct bch_write_op op;
88 struct bch_read_bio rbio;
91 /* pagecache_block must be held */
92 static int write_invalidate_inode_pages_range(struct address_space *mapping,
93 loff_t start, loff_t end)
98 * XXX: the way this is currently implemented, we can spin if a process
99 * is continually redirtying a specific page
102 if (!mapping->nrpages &&
103 !mapping->nrexceptional)
106 ret = filemap_write_and_wait_range(mapping, start, end);
110 if (!mapping->nrpages)
113 ret = invalidate_inode_pages2_range(mapping,
116 } while (ret == -EBUSY);
123 #ifdef CONFIG_BCACHEFS_QUOTA
125 static void bch2_quota_reservation_put(struct bch_fs *c,
126 struct bch_inode_info *inode,
127 struct quota_res *res)
132 mutex_lock(&inode->ei_quota_lock);
133 BUG_ON(res->sectors > inode->ei_quota_reserved);
135 bch2_quota_acct(c, inode->ei_qid, Q_SPC,
136 -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
137 inode->ei_quota_reserved -= res->sectors;
138 mutex_unlock(&inode->ei_quota_lock);
143 static int bch2_quota_reservation_add(struct bch_fs *c,
144 struct bch_inode_info *inode,
145 struct quota_res *res,
151 mutex_lock(&inode->ei_quota_lock);
152 ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
153 check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
155 inode->ei_quota_reserved += sectors;
156 res->sectors += sectors;
158 mutex_unlock(&inode->ei_quota_lock);
165 static void bch2_quota_reservation_put(struct bch_fs *c,
166 struct bch_inode_info *inode,
167 struct quota_res *res)
171 static int bch2_quota_reservation_add(struct bch_fs *c,
172 struct bch_inode_info *inode,
173 struct quota_res *res,
182 /* i_size updates: */
184 struct inode_new_size {
190 static int inode_set_size(struct bch_inode_info *inode,
191 struct bch_inode_unpacked *bi,
194 struct inode_new_size *s = p;
196 bi->bi_size = s->new_size;
197 if (s->fields & ATTR_ATIME)
198 bi->bi_atime = s->now;
199 if (s->fields & ATTR_MTIME)
200 bi->bi_mtime = s->now;
201 if (s->fields & ATTR_CTIME)
202 bi->bi_ctime = s->now;
207 int __must_check bch2_write_inode_size(struct bch_fs *c,
208 struct bch_inode_info *inode,
209 loff_t new_size, unsigned fields)
211 struct inode_new_size s = {
212 .new_size = new_size,
213 .now = bch2_current_time(c),
217 return bch2_write_inode(c, inode, inode_set_size, &s, fields);
220 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
221 struct quota_res *quota_res, s64 sectors)
226 mutex_lock(&inode->ei_quota_lock);
227 #ifdef CONFIG_BCACHEFS_QUOTA
228 if (quota_res && sectors > 0) {
229 BUG_ON(sectors > quota_res->sectors);
230 BUG_ON(sectors > inode->ei_quota_reserved);
232 quota_res->sectors -= sectors;
233 inode->ei_quota_reserved -= sectors;
235 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
238 inode->v.i_blocks += sectors;
239 mutex_unlock(&inode->ei_quota_lock);
244 /* stored in page->private: */
246 struct bch_page_sector {
247 /* Uncompressed, fully allocated replicas: */
248 unsigned nr_replicas:3;
250 /* Owns PAGE_SECTORS * replicas_reserved sized reservation: */
251 unsigned replicas_reserved:3;
262 struct bch_page_state {
264 atomic_t write_count;
265 struct bch_page_sector s[PAGE_SECTORS];
268 static inline struct bch_page_state *__bch2_page_state(struct page *page)
270 return page_has_private(page)
271 ? (struct bch_page_state *) page_private(page)
275 static inline struct bch_page_state *bch2_page_state(struct page *page)
277 EBUG_ON(!PageLocked(page));
279 return __bch2_page_state(page);
282 /* for newly allocated pages: */
283 static void __bch2_page_state_release(struct page *page)
285 kfree(detach_page_private(page));
288 static void bch2_page_state_release(struct page *page)
290 EBUG_ON(!PageLocked(page));
291 __bch2_page_state_release(page);
294 /* for newly allocated pages: */
295 static struct bch_page_state *__bch2_page_state_create(struct page *page,
298 struct bch_page_state *s;
300 s = kzalloc(sizeof(*s), GFP_NOFS|gfp);
304 spin_lock_init(&s->lock);
305 attach_page_private(page, s);
309 static struct bch_page_state *bch2_page_state_create(struct page *page,
312 return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp);
315 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
317 /* XXX: this should not be open coded */
318 return inode->ei_inode.bi_data_replicas
319 ? inode->ei_inode.bi_data_replicas - 1
320 : c->opts.data_replicas;
323 static inline unsigned sectors_to_reserve(struct bch_page_sector *s,
324 unsigned nr_replicas)
326 return max(0, (int) nr_replicas -
328 s->replicas_reserved);
331 static int bch2_get_page_disk_reservation(struct bch_fs *c,
332 struct bch_inode_info *inode,
333 struct page *page, bool check_enospc)
335 struct bch_page_state *s = bch2_page_state_create(page, 0);
336 unsigned nr_replicas = inode_nr_replicas(c, inode);
337 struct disk_reservation disk_res = { 0 };
338 unsigned i, disk_res_sectors = 0;
344 for (i = 0; i < ARRAY_SIZE(s->s); i++)
345 disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
347 if (!disk_res_sectors)
350 ret = bch2_disk_reservation_get(c, &disk_res,
353 ? BCH_DISK_RESERVATION_NOFAIL
358 for (i = 0; i < ARRAY_SIZE(s->s); i++)
359 s->s[i].replicas_reserved +=
360 sectors_to_reserve(&s->s[i], nr_replicas);
365 struct bch2_page_reservation {
366 struct disk_reservation disk;
367 struct quota_res quota;
370 static void bch2_page_reservation_init(struct bch_fs *c,
371 struct bch_inode_info *inode,
372 struct bch2_page_reservation *res)
374 memset(res, 0, sizeof(*res));
376 res->disk.nr_replicas = inode_nr_replicas(c, inode);
379 static void bch2_page_reservation_put(struct bch_fs *c,
380 struct bch_inode_info *inode,
381 struct bch2_page_reservation *res)
383 bch2_disk_reservation_put(c, &res->disk);
384 bch2_quota_reservation_put(c, inode, &res->quota);
387 static int bch2_page_reservation_get(struct bch_fs *c,
388 struct bch_inode_info *inode, struct page *page,
389 struct bch2_page_reservation *res,
390 unsigned offset, unsigned len, bool check_enospc)
392 struct bch_page_state *s = bch2_page_state_create(page, 0);
393 unsigned i, disk_sectors = 0, quota_sectors = 0;
399 for (i = round_down(offset, block_bytes(c)) >> 9;
400 i < round_up(offset + len, block_bytes(c)) >> 9;
402 disk_sectors += sectors_to_reserve(&s->s[i],
403 res->disk.nr_replicas);
404 quota_sectors += s->s[i].state == SECTOR_UNALLOCATED;
408 ret = bch2_disk_reservation_add(c, &res->disk,
411 ? BCH_DISK_RESERVATION_NOFAIL
418 ret = bch2_quota_reservation_add(c, inode, &res->quota,
422 struct disk_reservation tmp = {
423 .sectors = disk_sectors
426 bch2_disk_reservation_put(c, &tmp);
427 res->disk.sectors -= disk_sectors;
435 static void bch2_clear_page_bits(struct page *page)
437 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
438 struct bch_fs *c = inode->v.i_sb->s_fs_info;
439 struct bch_page_state *s = bch2_page_state(page);
440 struct disk_reservation disk_res = { 0 };
441 int i, dirty_sectors = 0;
446 EBUG_ON(!PageLocked(page));
447 EBUG_ON(PageWriteback(page));
449 for (i = 0; i < ARRAY_SIZE(s->s); i++) {
450 disk_res.sectors += s->s[i].replicas_reserved;
451 s->s[i].replicas_reserved = 0;
453 if (s->s[i].state == SECTOR_DIRTY) {
455 s->s[i].state = SECTOR_UNALLOCATED;
459 bch2_disk_reservation_put(c, &disk_res);
462 i_sectors_acct(c, inode, NULL, -dirty_sectors);
464 bch2_page_state_release(page);
467 static void bch2_set_page_dirty(struct bch_fs *c,
468 struct bch_inode_info *inode, struct page *page,
469 struct bch2_page_reservation *res,
470 unsigned offset, unsigned len)
472 struct bch_page_state *s = bch2_page_state(page);
473 unsigned i, dirty_sectors = 0;
475 WARN_ON((u64) page_offset(page) + offset + len >
476 round_up((u64) i_size_read(&inode->v), block_bytes(c)));
480 for (i = round_down(offset, block_bytes(c)) >> 9;
481 i < round_up(offset + len, block_bytes(c)) >> 9;
483 unsigned sectors = sectors_to_reserve(&s->s[i],
484 res->disk.nr_replicas);
487 * This can happen if we race with the error path in
488 * bch2_writepage_io_done():
490 sectors = min_t(unsigned, sectors, res->disk.sectors);
492 s->s[i].replicas_reserved += sectors;
493 res->disk.sectors -= sectors;
495 if (s->s[i].state == SECTOR_UNALLOCATED)
498 s->s[i].state = max_t(unsigned, s->s[i].state, SECTOR_DIRTY);
501 spin_unlock(&s->lock);
504 i_sectors_acct(c, inode, &res->quota, dirty_sectors);
506 if (!PageDirty(page))
507 __set_page_dirty_nobuffers(page);
510 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
512 struct file *file = vmf->vma->vm_file;
513 struct address_space *mapping = file->f_mapping;
514 struct address_space *fdm = faults_disabled_mapping();
515 struct bch_inode_info *inode = file_bch_inode(file);
519 return VM_FAULT_SIGBUS;
523 struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
525 if (bch2_pagecache_add_tryget(&inode->ei_pagecache_lock))
528 bch2_pagecache_block_put(&fdm_host->ei_pagecache_lock);
530 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
531 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
533 bch2_pagecache_block_get(&fdm_host->ei_pagecache_lock);
535 /* Signal that lock has been dropped: */
536 set_fdm_dropped_locks();
537 return VM_FAULT_SIGBUS;
540 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
542 ret = filemap_fault(vmf);
543 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
548 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
550 struct page *page = vmf->page;
551 struct file *file = vmf->vma->vm_file;
552 struct bch_inode_info *inode = file_bch_inode(file);
553 struct address_space *mapping = file->f_mapping;
554 struct bch_fs *c = inode->v.i_sb->s_fs_info;
555 struct bch2_page_reservation res;
558 int ret = VM_FAULT_LOCKED;
560 bch2_page_reservation_init(c, inode, &res);
562 sb_start_pagefault(inode->v.i_sb);
563 file_update_time(file);
566 * Not strictly necessary, but helps avoid dio writes livelocking in
567 * write_invalidate_inode_pages_range() - can drop this if/when we get
568 * a write_invalidate_inode_pages_range() that works without dropping
569 * page lock before invalidating page
571 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
574 isize = i_size_read(&inode->v);
576 if (page->mapping != mapping || page_offset(page) >= isize) {
578 ret = VM_FAULT_NOPAGE;
582 len = min_t(loff_t, PAGE_SIZE, isize - page_offset(page));
584 if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) {
586 ret = VM_FAULT_SIGBUS;
590 bch2_set_page_dirty(c, inode, page, &res, 0, len);
591 bch2_page_reservation_put(c, inode, &res);
593 wait_for_stable_page(page);
595 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
596 sb_end_pagefault(inode->v.i_sb);
601 void bch2_invalidatepage(struct page *page, unsigned int offset,
604 if (offset || length < PAGE_SIZE)
607 bch2_clear_page_bits(page);
610 int bch2_releasepage(struct page *page, gfp_t gfp_mask)
615 bch2_clear_page_bits(page);
619 #ifdef CONFIG_MIGRATION
620 int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
621 struct page *page, enum migrate_mode mode)
625 EBUG_ON(!PageLocked(page));
626 EBUG_ON(!PageLocked(newpage));
628 ret = migrate_page_move_mapping(mapping, newpage, page, 0);
629 if (ret != MIGRATEPAGE_SUCCESS)
632 if (PagePrivate(page))
633 attach_page_private(newpage, detach_page_private(page));
635 if (mode != MIGRATE_SYNC_NO_COPY)
636 migrate_page_copy(newpage, page);
638 migrate_page_states(newpage, page);
639 return MIGRATEPAGE_SUCCESS;
645 static void bch2_readpages_end_io(struct bio *bio)
647 struct bvec_iter_all iter;
650 bio_for_each_segment_all(bv, bio, iter) {
651 struct page *page = bv->bv_page;
653 if (!bio->bi_status) {
654 SetPageUptodate(page);
656 ClearPageUptodate(page);
665 struct readpages_iter {
666 struct address_space *mapping;
673 static int readpages_iter_init(struct readpages_iter *iter,
674 struct readahead_control *ractl)
676 unsigned i, nr_pages = readahead_count(ractl);
678 memset(iter, 0, sizeof(*iter));
680 iter->mapping = ractl->mapping;
681 iter->offset = readahead_index(ractl);
682 iter->nr_pages = nr_pages;
684 iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
688 nr_pages = __readahead_batch(ractl, iter->pages, nr_pages);
689 for (i = 0; i < nr_pages; i++) {
690 __bch2_page_state_create(iter->pages[i], __GFP_NOFAIL);
691 put_page(iter->pages[i]);
697 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
699 if (iter->idx >= iter->nr_pages)
702 EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
704 return iter->pages[iter->idx];
707 static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
709 struct bvec_iter iter;
711 unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
712 ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
713 unsigned state = k.k->type == KEY_TYPE_reservation
717 bio_for_each_segment(bv, bio, iter) {
718 struct bch_page_state *s = bch2_page_state(bv.bv_page);
721 for (i = bv.bv_offset >> 9;
722 i < (bv.bv_offset + bv.bv_len) >> 9;
724 s->s[i].nr_replicas = nr_ptrs;
725 s->s[i].state = state;
730 static bool extent_partial_reads_expensive(struct bkey_s_c k)
732 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
733 struct bch_extent_crc_unpacked crc;
734 const union bch_extent_entry *i;
736 bkey_for_each_crc(k.k, ptrs, crc, i)
737 if (crc.csum_type || crc.compression_type)
742 static void readpage_bio_extend(struct readpages_iter *iter,
744 unsigned sectors_this_extent,
747 while (bio_sectors(bio) < sectors_this_extent &&
748 bio->bi_vcnt < bio->bi_max_vecs) {
749 pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT;
750 struct page *page = readpage_iter_next(iter);
754 if (iter->offset + iter->idx != page_offset)
762 page = xa_load(&iter->mapping->i_pages, page_offset);
763 if (page && !xa_is_value(page))
766 page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
770 if (!__bch2_page_state_create(page, 0)) {
775 ret = add_to_page_cache_lru(page, iter->mapping,
776 page_offset, GFP_NOFS);
778 __bch2_page_state_release(page);
786 BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0));
790 static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
791 struct bch_read_bio *rbio, u64 inum,
792 struct readpages_iter *readpages_iter)
794 struct bch_fs *c = trans->c;
796 int flags = BCH_READ_RETRY_IF_STALE|
797 BCH_READ_MAY_PROMOTE;
801 rbio->start_time = local_clock();
803 bch2_bkey_buf_init(&sk);
807 unsigned bytes, sectors, offset_into_extent;
808 enum btree_id data_btree = BTREE_ID_extents;
810 bch2_btree_iter_set_pos(iter,
811 POS(inum, rbio->bio.bi_iter.bi_sector));
813 k = bch2_btree_iter_peek_slot(iter);
818 offset_into_extent = iter->pos.offset -
819 bkey_start_offset(k.k);
820 sectors = k.k->size - offset_into_extent;
822 bch2_bkey_buf_reassemble(&sk, c, k);
824 ret = bch2_read_indirect_extent(trans, &data_btree,
825 &offset_into_extent, &sk);
829 k = bkey_i_to_s_c(sk.k);
831 sectors = min(sectors, k.k->size - offset_into_extent);
833 bch2_trans_unlock(trans);
836 readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
837 extent_partial_reads_expensive(k));
839 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
840 swap(rbio->bio.bi_iter.bi_size, bytes);
842 if (rbio->bio.bi_iter.bi_size == bytes)
843 flags |= BCH_READ_LAST_FRAGMENT;
845 if (bkey_extent_is_allocation(k.k))
846 bch2_add_page_sectors(&rbio->bio, k);
848 bch2_read_extent(trans, rbio, iter->pos,
849 data_btree, k, offset_into_extent, flags);
851 if (flags & BCH_READ_LAST_FRAGMENT)
854 swap(rbio->bio.bi_iter.bi_size, bytes);
855 bio_advance(&rbio->bio, bytes);
862 bch_err_inum_ratelimited(c, inum,
863 "read error %i from btree lookup", ret);
864 rbio->bio.bi_status = BLK_STS_IOERR;
865 bio_endio(&rbio->bio);
868 bch2_bkey_buf_exit(&sk, c);
871 void bch2_readahead(struct readahead_control *ractl)
873 struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
874 struct bch_fs *c = inode->v.i_sb->s_fs_info;
875 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
876 struct btree_trans trans;
877 struct btree_iter *iter;
879 struct readpages_iter readpages_iter;
882 ret = readpages_iter_init(&readpages_iter, ractl);
885 bch2_trans_init(&trans, c, 0, 0);
886 iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, POS_MIN,
889 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
891 while ((page = readpage_iter_next(&readpages_iter))) {
892 pgoff_t index = readpages_iter.offset + readpages_iter.idx;
893 unsigned n = min_t(unsigned,
894 readpages_iter.nr_pages -
897 struct bch_read_bio *rbio =
898 rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
901 readpages_iter.idx++;
903 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0);
904 rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTOR_SHIFT;
905 rbio->bio.bi_end_io = bch2_readpages_end_io;
906 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
908 bchfs_read(&trans, iter, rbio, inode->v.i_ino,
912 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
914 bch2_trans_iter_put(&trans, iter);
915 bch2_trans_exit(&trans);
916 kfree(readpages_iter.pages);
919 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
920 u64 inum, struct page *page)
922 struct btree_trans trans;
923 struct btree_iter *iter;
925 bch2_page_state_create(page, __GFP_NOFAIL);
927 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
928 rbio->bio.bi_iter.bi_sector =
929 (sector_t) page->index << PAGE_SECTOR_SHIFT;
930 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
932 bch2_trans_init(&trans, c, 0, 0);
933 iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, POS_MIN,
936 bchfs_read(&trans, iter, rbio, inum, NULL);
938 bch2_trans_iter_put(&trans, iter);
939 bch2_trans_exit(&trans);
942 int bch2_readpage(struct file *file, struct page *page)
944 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
945 struct bch_fs *c = inode->v.i_sb->s_fs_info;
946 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
947 struct bch_read_bio *rbio;
949 rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
950 rbio->bio.bi_end_io = bch2_readpages_end_io;
952 __bchfs_readpage(c, rbio, inode->v.i_ino, page);
956 static void bch2_read_single_page_end_io(struct bio *bio)
958 complete(bio->bi_private);
961 static int bch2_read_single_page(struct page *page,
962 struct address_space *mapping)
964 struct bch_inode_info *inode = to_bch_ei(mapping->host);
965 struct bch_fs *c = inode->v.i_sb->s_fs_info;
966 struct bch_read_bio *rbio;
968 DECLARE_COMPLETION_ONSTACK(done);
970 rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
971 io_opts(c, &inode->ei_inode));
972 rbio->bio.bi_private = &done;
973 rbio->bio.bi_end_io = bch2_read_single_page_end_io;
975 __bchfs_readpage(c, rbio, inode->v.i_ino, page);
976 wait_for_completion(&done);
978 ret = blk_status_to_errno(rbio->bio.bi_status);
984 SetPageUptodate(page);
990 struct bch_writepage_state {
991 struct bch_writepage_io *io;
992 struct bch_io_opts opts;
995 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
996 struct bch_inode_info *inode)
998 return (struct bch_writepage_state) {
999 .opts = io_opts(c, &inode->ei_inode)
1003 static void bch2_writepage_io_free(struct closure *cl)
1005 struct bch_writepage_io *io = container_of(cl,
1006 struct bch_writepage_io, cl);
1008 bio_put(&io->op.wbio.bio);
1011 static void bch2_writepage_io_done(struct closure *cl)
1013 struct bch_writepage_io *io = container_of(cl,
1014 struct bch_writepage_io, cl);
1015 struct bch_fs *c = io->op.c;
1016 struct bio *bio = &io->op.wbio.bio;
1017 struct bvec_iter_all iter;
1018 struct bio_vec *bvec;
1022 set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
1024 bio_for_each_segment_all(bvec, bio, iter) {
1025 struct bch_page_state *s;
1027 SetPageError(bvec->bv_page);
1028 mapping_set_error(bvec->bv_page->mapping, -EIO);
1030 s = __bch2_page_state(bvec->bv_page);
1031 spin_lock(&s->lock);
1032 for (i = 0; i < PAGE_SECTORS; i++)
1033 s->s[i].nr_replicas = 0;
1034 spin_unlock(&s->lock);
1038 if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
1039 bio_for_each_segment_all(bvec, bio, iter) {
1040 struct bch_page_state *s;
1042 s = __bch2_page_state(bvec->bv_page);
1043 spin_lock(&s->lock);
1044 for (i = 0; i < PAGE_SECTORS; i++)
1045 s->s[i].nr_replicas = 0;
1046 spin_unlock(&s->lock);
1051 * racing with fallocate can cause us to add fewer sectors than
1052 * expected - but we shouldn't add more sectors than expected:
1054 BUG_ON(io->op.i_sectors_delta > 0);
1057 * (error (due to going RO) halfway through a page can screw that up
1060 BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
1064 * PageWriteback is effectively our ref on the inode - fixup i_blocks
1065 * before calling end_page_writeback:
1067 i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
1069 bio_for_each_segment_all(bvec, bio, iter) {
1070 struct bch_page_state *s = __bch2_page_state(bvec->bv_page);
1072 if (atomic_dec_and_test(&s->write_count))
1073 end_page_writeback(bvec->bv_page);
1076 closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
1079 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1081 struct bch_writepage_io *io = w->io;
1084 closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
1085 continue_at(&io->cl, bch2_writepage_io_done, NULL);
1089 * Get a bch_writepage_io and add @page to it - appending to an existing one if
1090 * possible, else allocating a new one:
1092 static void bch2_writepage_io_alloc(struct bch_fs *c,
1093 struct writeback_control *wbc,
1094 struct bch_writepage_state *w,
1095 struct bch_inode_info *inode,
1097 unsigned nr_replicas)
1099 struct bch_write_op *op;
1101 w->io = container_of(bio_alloc_bioset(GFP_NOFS,
1103 &c->writepage_bioset),
1104 struct bch_writepage_io, op.wbio.bio);
1106 closure_init(&w->io->cl, NULL);
1107 w->io->inode = inode;
1110 bch2_write_op_init(op, c, w->opts);
1111 op->target = w->opts.foreground_target;
1112 op_journal_seq_set(op, &inode->ei_journal_seq);
1113 op->nr_replicas = nr_replicas;
1114 op->res.nr_replicas = nr_replicas;
1115 op->write_point = writepoint_hashed(inode->ei_last_dirtied);
1116 op->pos = POS(inode->v.i_ino, sector);
1117 op->wbio.bio.bi_iter.bi_sector = sector;
1118 op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
1121 static int __bch2_writepage(struct page *page,
1122 struct writeback_control *wbc,
1125 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1126 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1127 struct bch_writepage_state *w = data;
1128 struct bch_page_state *s, orig;
1129 unsigned i, offset, nr_replicas_this_write = U32_MAX;
1130 loff_t i_size = i_size_read(&inode->v);
1131 pgoff_t end_index = i_size >> PAGE_SHIFT;
1134 EBUG_ON(!PageUptodate(page));
1136 /* Is the page fully inside i_size? */
1137 if (page->index < end_index)
1140 /* Is the page fully outside i_size? (truncate in progress) */
1141 offset = i_size & (PAGE_SIZE - 1);
1142 if (page->index > end_index || !offset) {
1148 * The page straddles i_size. It must be zeroed out on each and every
1149 * writepage invocation because it may be mmapped. "A file is mapped
1150 * in multiples of the page size. For a file that is not a multiple of
1151 * the page size, the remaining memory is zeroed when mapped, and
1152 * writes to that region are not written out to the file."
1154 zero_user_segment(page, offset, PAGE_SIZE);
1156 s = bch2_page_state_create(page, __GFP_NOFAIL);
1158 ret = bch2_get_page_disk_reservation(c, inode, page, true);
1161 mapping_set_error(page->mapping, ret);
1166 /* Before unlocking the page, get copy of reservations: */
1169 for (i = 0; i < PAGE_SECTORS; i++) {
1170 if (s->s[i].state < SECTOR_DIRTY)
1173 nr_replicas_this_write =
1174 min_t(unsigned, nr_replicas_this_write,
1175 s->s[i].nr_replicas +
1176 s->s[i].replicas_reserved);
1179 for (i = 0; i < PAGE_SECTORS; i++) {
1180 if (s->s[i].state < SECTOR_DIRTY)
1183 s->s[i].nr_replicas = w->opts.compression
1184 ? 0 : nr_replicas_this_write;
1186 s->s[i].replicas_reserved = 0;
1187 s->s[i].state = SECTOR_ALLOCATED;
1190 BUG_ON(atomic_read(&s->write_count));
1191 atomic_set(&s->write_count, 1);
1193 BUG_ON(PageWriteback(page));
1194 set_page_writeback(page);
1200 unsigned sectors = 1, dirty_sectors = 0, reserved_sectors = 0;
1203 while (offset < PAGE_SECTORS &&
1204 orig.s[offset].state < SECTOR_DIRTY)
1207 if (offset == PAGE_SECTORS)
1210 sector = ((u64) page->index << PAGE_SECTOR_SHIFT) + offset;
1212 while (offset + sectors < PAGE_SECTORS &&
1213 orig.s[offset + sectors].state >= SECTOR_DIRTY)
1216 for (i = offset; i < offset + sectors; i++) {
1217 reserved_sectors += orig.s[i].replicas_reserved;
1218 dirty_sectors += orig.s[i].state == SECTOR_DIRTY;
1222 (w->io->op.res.nr_replicas != nr_replicas_this_write ||
1223 bio_full(&w->io->op.wbio.bio, PAGE_SIZE) ||
1224 w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
1225 (BIO_MAX_PAGES * PAGE_SIZE) ||
1226 bio_end_sector(&w->io->op.wbio.bio) != sector))
1227 bch2_writepage_do_io(w);
1230 bch2_writepage_io_alloc(c, wbc, w, inode, sector,
1231 nr_replicas_this_write);
1233 atomic_inc(&s->write_count);
1235 BUG_ON(inode != w->io->inode);
1236 BUG_ON(!bio_add_page(&w->io->op.wbio.bio, page,
1237 sectors << 9, offset << 9));
1239 /* Check for writing past i_size: */
1240 WARN_ON((bio_end_sector(&w->io->op.wbio.bio) << 9) >
1241 round_up(i_size, block_bytes(c)));
1243 w->io->op.res.sectors += reserved_sectors;
1244 w->io->op.i_sectors_delta -= dirty_sectors;
1245 w->io->op.new_i_size = i_size;
1250 if (atomic_dec_and_test(&s->write_count))
1251 end_page_writeback(page);
1256 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1258 struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1259 struct bch_writepage_state w =
1260 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1261 struct blk_plug plug;
1264 blk_start_plug(&plug);
1265 ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1267 bch2_writepage_do_io(&w);
1268 blk_finish_plug(&plug);
1272 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1274 struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1275 struct bch_writepage_state w =
1276 bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
1279 ret = __bch2_writepage(page, wbc, &w);
1281 bch2_writepage_do_io(&w);
1286 /* buffered writes: */
1288 int bch2_write_begin(struct file *file, struct address_space *mapping,
1289 loff_t pos, unsigned len, unsigned flags,
1290 struct page **pagep, void **fsdata)
1292 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1293 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1294 struct bch2_page_reservation *res;
1295 pgoff_t index = pos >> PAGE_SHIFT;
1296 unsigned offset = pos & (PAGE_SIZE - 1);
1300 res = kmalloc(sizeof(*res), GFP_KERNEL);
1304 bch2_page_reservation_init(c, inode, res);
1307 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1309 page = grab_cache_page_write_begin(mapping, index, flags);
1313 if (PageUptodate(page))
1316 /* If we're writing entire page, don't need to read it in first: */
1317 if (len == PAGE_SIZE)
1320 if (!offset && pos + len >= inode->v.i_size) {
1321 zero_user_segment(page, len, PAGE_SIZE);
1322 flush_dcache_page(page);
1326 if (index > inode->v.i_size >> PAGE_SHIFT) {
1327 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1328 flush_dcache_page(page);
1332 ret = bch2_read_single_page(page, mapping);
1336 ret = bch2_page_reservation_get(c, inode, page, res,
1339 if (!PageUptodate(page)) {
1341 * If the page hasn't been read in, we won't know if we
1342 * actually need a reservation - we don't actually need
1343 * to read here, we just need to check if the page is
1344 * fully backed by uncompressed data:
1359 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1365 int bch2_write_end(struct file *file, struct address_space *mapping,
1366 loff_t pos, unsigned len, unsigned copied,
1367 struct page *page, void *fsdata)
1369 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1370 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1371 struct bch2_page_reservation *res = fsdata;
1372 unsigned offset = pos & (PAGE_SIZE - 1);
1374 lockdep_assert_held(&inode->v.i_rwsem);
1376 if (unlikely(copied < len && !PageUptodate(page))) {
1378 * The page needs to be read in, but that would destroy
1379 * our partial write - simplest thing is to just force
1380 * userspace to redo the write:
1382 zero_user(page, 0, PAGE_SIZE);
1383 flush_dcache_page(page);
1387 spin_lock(&inode->v.i_lock);
1388 if (pos + copied > inode->v.i_size)
1389 i_size_write(&inode->v, pos + copied);
1390 spin_unlock(&inode->v.i_lock);
1393 if (!PageUptodate(page))
1394 SetPageUptodate(page);
1396 bch2_set_page_dirty(c, inode, page, res, offset, copied);
1398 inode->ei_last_dirtied = (unsigned long) current;
1403 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1405 bch2_page_reservation_put(c, inode, res);
1411 #define WRITE_BATCH_PAGES 32
1413 static int __bch2_buffered_write(struct bch_inode_info *inode,
1414 struct address_space *mapping,
1415 struct iov_iter *iter,
1416 loff_t pos, unsigned len)
1418 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1419 struct page *pages[WRITE_BATCH_PAGES];
1420 struct bch2_page_reservation res;
1421 unsigned long index = pos >> PAGE_SHIFT;
1422 unsigned offset = pos & (PAGE_SIZE - 1);
1423 unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1424 unsigned i, reserved = 0, set_dirty = 0;
1425 unsigned copied = 0, nr_pages_copied = 0;
1429 BUG_ON(nr_pages > ARRAY_SIZE(pages));
1431 bch2_page_reservation_init(c, inode, &res);
1433 for (i = 0; i < nr_pages; i++) {
1434 pages[i] = grab_cache_page_write_begin(mapping, index + i, 0);
1441 len = min_t(unsigned, len,
1442 nr_pages * PAGE_SIZE - offset);
1447 if (offset && !PageUptodate(pages[0])) {
1448 ret = bch2_read_single_page(pages[0], mapping);
1453 if ((pos + len) & (PAGE_SIZE - 1) &&
1454 !PageUptodate(pages[nr_pages - 1])) {
1455 if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
1456 zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
1458 ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
1464 while (reserved < len) {
1465 struct page *page = pages[(offset + reserved) >> PAGE_SHIFT];
1466 unsigned pg_offset = (offset + reserved) & (PAGE_SIZE - 1);
1467 unsigned pg_len = min_t(unsigned, len - reserved,
1468 PAGE_SIZE - pg_offset);
1470 ret = bch2_page_reservation_get(c, inode, page, &res,
1471 pg_offset, pg_len, true);
1473 if (ret && !PageUptodate(page)) {
1474 ret = bch2_read_single_page(page, mapping);
1476 goto retry_reservation;
1485 if (mapping_writably_mapped(mapping))
1486 for (i = 0; i < nr_pages; i++)
1487 flush_dcache_page(pages[i]);
1489 while (copied < len) {
1490 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1491 unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
1492 unsigned pg_len = min_t(unsigned, len - copied,
1493 PAGE_SIZE - pg_offset);
1494 unsigned pg_copied = iov_iter_copy_from_user_atomic(page,
1495 iter, pg_offset, pg_len);
1500 if (!PageUptodate(page) &&
1501 pg_copied != PAGE_SIZE &&
1502 pos + copied + pg_copied < inode->v.i_size) {
1503 zero_user(page, 0, PAGE_SIZE);
1507 flush_dcache_page(page);
1508 iov_iter_advance(iter, pg_copied);
1509 copied += pg_copied;
1511 if (pg_copied != pg_len)
1518 spin_lock(&inode->v.i_lock);
1519 if (pos + copied > inode->v.i_size)
1520 i_size_write(&inode->v, pos + copied);
1521 spin_unlock(&inode->v.i_lock);
1523 while (set_dirty < copied) {
1524 struct page *page = pages[(offset + set_dirty) >> PAGE_SHIFT];
1525 unsigned pg_offset = (offset + set_dirty) & (PAGE_SIZE - 1);
1526 unsigned pg_len = min_t(unsigned, copied - set_dirty,
1527 PAGE_SIZE - pg_offset);
1529 if (!PageUptodate(page))
1530 SetPageUptodate(page);
1532 bch2_set_page_dirty(c, inode, page, &res, pg_offset, pg_len);
1536 set_dirty += pg_len;
1539 nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
1540 inode->ei_last_dirtied = (unsigned long) current;
1542 for (i = nr_pages_copied; i < nr_pages; i++) {
1543 unlock_page(pages[i]);
1547 bch2_page_reservation_put(c, inode, &res);
1549 return copied ?: ret;
1552 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1554 struct file *file = iocb->ki_filp;
1555 struct address_space *mapping = file->f_mapping;
1556 struct bch_inode_info *inode = file_bch_inode(file);
1557 loff_t pos = iocb->ki_pos;
1558 ssize_t written = 0;
1561 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1564 unsigned offset = pos & (PAGE_SIZE - 1);
1565 unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
1566 PAGE_SIZE * WRITE_BATCH_PAGES - offset);
1569 * Bring in the user page that we will copy from _first_.
1570 * Otherwise there's a nasty deadlock on copying from the
1571 * same page as we're writing to, without it being marked
1574 * Not only is this an optimisation, but it is also required
1575 * to check that the address is actually valid, when atomic
1576 * usercopies are used, below.
1578 if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1579 bytes = min_t(unsigned long, iov_iter_count(iter),
1580 PAGE_SIZE - offset);
1582 if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1588 if (unlikely(fatal_signal_pending(current))) {
1593 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1594 if (unlikely(ret < 0))
1599 if (unlikely(ret == 0)) {
1601 * If we were unable to copy any data at all, we must
1602 * fall back to a single segment length write.
1604 * If we didn't fallback here, we could livelock
1605 * because not all segments in the iov can be copied at
1606 * once without a pagefault.
1608 bytes = min_t(unsigned long, PAGE_SIZE - offset,
1609 iov_iter_single_seg_count(iter));
1616 balance_dirty_pages_ratelimited(mapping);
1617 } while (iov_iter_count(iter));
1619 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1621 return written ? written : ret;
1624 /* O_DIRECT reads */
1626 static void bio_check_or_release(struct bio *bio, bool check_dirty)
1629 bio_check_pages_dirty(bio);
1631 bio_release_pages(bio, false);
1636 static void bch2_dio_read_complete(struct closure *cl)
1638 struct dio_read *dio = container_of(cl, struct dio_read, cl);
1640 dio->req->ki_complete(dio->req, dio->ret, 0);
1641 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
1644 static void bch2_direct_IO_read_endio(struct bio *bio)
1646 struct dio_read *dio = bio->bi_private;
1649 dio->ret = blk_status_to_errno(bio->bi_status);
1651 closure_put(&dio->cl);
1654 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1656 struct dio_read *dio = bio->bi_private;
1657 bool should_dirty = dio->should_dirty;
1659 bch2_direct_IO_read_endio(bio);
1660 bio_check_or_release(bio, should_dirty);
1663 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1665 struct file *file = req->ki_filp;
1666 struct bch_inode_info *inode = file_bch_inode(file);
1667 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1668 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1669 struct dio_read *dio;
1671 loff_t offset = req->ki_pos;
1672 bool sync = is_sync_kiocb(req);
1676 if ((offset|iter->count) & (block_bytes(c) - 1))
1679 ret = min_t(loff_t, iter->count,
1680 max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1685 shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
1686 iter->count -= shorten;
1688 bio = bio_alloc_bioset(GFP_KERNEL,
1689 iov_iter_npages(iter, BIO_MAX_PAGES),
1690 &c->dio_read_bioset);
1692 bio->bi_end_io = bch2_direct_IO_read_endio;
1694 dio = container_of(bio, struct dio_read, rbio.bio);
1695 closure_init(&dio->cl, NULL);
1698 * this is a _really_ horrible hack just to avoid an atomic sub at the
1702 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1703 atomic_set(&dio->cl.remaining,
1704 CLOSURE_REMAINING_INITIALIZER -
1706 CLOSURE_DESTRUCTOR);
1708 atomic_set(&dio->cl.remaining,
1709 CLOSURE_REMAINING_INITIALIZER + 1);
1715 * This is one of the sketchier things I've encountered: we have to skip
1716 * the dirtying of requests that are internal from the kernel (i.e. from
1717 * loopback), because we'll deadlock on page_lock.
1719 dio->should_dirty = iter_is_iovec(iter);
1722 while (iter->count) {
1723 bio = bio_alloc_bioset(GFP_KERNEL,
1724 iov_iter_npages(iter, BIO_MAX_PAGES),
1726 bio->bi_end_io = bch2_direct_IO_read_split_endio;
1728 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1729 bio->bi_iter.bi_sector = offset >> 9;
1730 bio->bi_private = dio;
1732 ret = bio_iov_iter_get_pages(bio, iter);
1734 /* XXX: fault inject this path */
1735 bio->bi_status = BLK_STS_RESOURCE;
1740 offset += bio->bi_iter.bi_size;
1742 if (dio->should_dirty)
1743 bio_set_pages_dirty(bio);
1746 closure_get(&dio->cl);
1748 bch2_read(c, rbio_init(bio, opts), inode->v.i_ino);
1751 iter->count += shorten;
1754 closure_sync(&dio->cl);
1755 closure_debug_destroy(&dio->cl);
1757 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
1760 return -EIOCBQUEUED;
1764 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1766 struct file *file = iocb->ki_filp;
1767 struct bch_inode_info *inode = file_bch_inode(file);
1768 struct address_space *mapping = file->f_mapping;
1769 size_t count = iov_iter_count(iter);
1773 return 0; /* skip atime */
1775 if (iocb->ki_flags & IOCB_DIRECT) {
1776 struct blk_plug plug;
1778 ret = filemap_write_and_wait_range(mapping,
1780 iocb->ki_pos + count - 1);
1784 file_accessed(file);
1786 blk_start_plug(&plug);
1787 ret = bch2_direct_IO_read(iocb, iter);
1788 blk_finish_plug(&plug);
1791 iocb->ki_pos += ret;
1793 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1794 ret = generic_file_read_iter(iocb, iter);
1795 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1801 /* O_DIRECT writes */
1803 static void bch2_dio_write_loop_async(struct bch_write_op *);
1805 static long bch2_dio_write_loop(struct dio_write *dio)
1807 bool kthread = (current->flags & PF_KTHREAD) != 0;
1808 struct kiocb *req = dio->req;
1809 struct address_space *mapping = req->ki_filp->f_mapping;
1810 struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
1811 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1812 struct bio *bio = &dio->op.wbio.bio;
1813 struct bvec_iter_all iter;
1815 unsigned unaligned, iter_count;
1816 bool sync = dio->sync, dropped_locks;
1823 iter_count = dio->iter.count;
1826 kthread_use_mm(dio->mm);
1827 BUG_ON(current->faults_disabled_mapping);
1828 current->faults_disabled_mapping = mapping;
1830 ret = bio_iov_iter_get_pages(bio, &dio->iter);
1832 dropped_locks = fdm_dropped_locks();
1834 current->faults_disabled_mapping = NULL;
1836 kthread_unuse_mm(dio->mm);
1839 * If the fault handler returned an error but also signalled
1840 * that it dropped & retook ei_pagecache_lock, we just need to
1841 * re-shoot down the page cache and retry:
1843 if (dropped_locks && ret)
1846 if (unlikely(ret < 0))
1849 if (unlikely(dropped_locks)) {
1850 ret = write_invalidate_inode_pages_range(mapping,
1852 req->ki_pos + iter_count - 1);
1856 if (!bio->bi_iter.bi_size)
1860 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
1861 bio->bi_iter.bi_size -= unaligned;
1862 iov_iter_revert(&dio->iter, unaligned);
1864 if (!bio->bi_iter.bi_size) {
1866 * bio_iov_iter_get_pages was only able to get <
1867 * blocksize worth of pages:
1869 bio_for_each_segment_all(bv, bio, iter)
1870 put_page(bv->bv_page);
1875 bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode));
1876 dio->op.end_io = bch2_dio_write_loop_async;
1877 dio->op.target = dio->op.opts.foreground_target;
1878 op_journal_seq_set(&dio->op, &inode->ei_journal_seq);
1879 dio->op.write_point = writepoint_hashed((unsigned long) current);
1880 dio->op.nr_replicas = dio->op.opts.data_replicas;
1881 dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
1883 if ((req->ki_flags & IOCB_DSYNC) &&
1884 !c->opts.journal_flush_disabled)
1885 dio->op.flags |= BCH_WRITE_FLUSH;
1887 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
1888 dio->op.opts.data_replicas, 0);
1889 if (unlikely(ret) &&
1890 !bch2_check_range_allocated(c, dio->op.pos,
1892 dio->op.opts.data_replicas,
1893 dio->op.opts.compression != 0))
1896 task_io_account_write(bio->bi_iter.bi_size);
1898 if (!dio->sync && !dio->loop && dio->iter.count) {
1899 struct iovec *iov = dio->inline_vecs;
1901 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
1902 iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
1904 if (unlikely(!iov)) {
1905 dio->sync = sync = true;
1909 dio->free_iov = true;
1912 memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
1913 dio->iter.iov = iov;
1917 closure_call(&dio->op.cl, bch2_write, NULL, NULL);
1920 wait_for_completion(&dio->done);
1922 return -EIOCBQUEUED;
1924 i_sectors_acct(c, inode, &dio->quota_res,
1925 dio->op.i_sectors_delta);
1926 req->ki_pos += (u64) dio->op.written << 9;
1927 dio->written += dio->op.written;
1929 spin_lock(&inode->v.i_lock);
1930 if (req->ki_pos > inode->v.i_size)
1931 i_size_write(&inode->v, req->ki_pos);
1932 spin_unlock(&inode->v.i_lock);
1934 if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
1935 bio_for_each_segment_all(bv, bio, iter)
1936 put_page(bv->bv_page);
1938 if (dio->op.error) {
1939 set_bit(EI_INODE_ERROR, &inode->ei_flags);
1943 if (!dio->iter.count)
1947 reinit_completion(&dio->done);
1950 ret = dio->op.error ?: ((long) dio->written << 9);
1952 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
1953 bch2_quota_reservation_put(c, inode, &dio->quota_res);
1956 kfree(dio->iter.iov);
1960 /* inode->i_dio_count is our ref on inode and thus bch_fs */
1961 inode_dio_end(&inode->v);
1964 req->ki_complete(req, ret, 0);
1970 static void bch2_dio_write_loop_async(struct bch_write_op *op)
1972 struct dio_write *dio = container_of(op, struct dio_write, op);
1975 complete(&dio->done);
1977 bch2_dio_write_loop(dio);
1981 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
1983 struct file *file = req->ki_filp;
1984 struct address_space *mapping = file->f_mapping;
1985 struct bch_inode_info *inode = file_bch_inode(file);
1986 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1987 struct dio_write *dio;
1989 bool locked = true, extending;
1993 prefetch((void *) &c->opts + 64);
1994 prefetch(&inode->ei_inode);
1995 prefetch((void *) &inode->ei_inode + 64);
1997 inode_lock(&inode->v);
1999 ret = generic_write_checks(req, iter);
2000 if (unlikely(ret <= 0))
2003 ret = file_remove_privs(file);
2007 ret = file_update_time(file);
2011 if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
2014 inode_dio_begin(&inode->v);
2015 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2017 extending = req->ki_pos + iter->count > inode->v.i_size;
2019 inode_unlock(&inode->v);
2023 bio = bio_alloc_bioset(GFP_KERNEL,
2024 iov_iter_npages(iter, BIO_MAX_PAGES),
2025 &c->dio_write_bioset);
2026 dio = container_of(bio, struct dio_write, op.wbio.bio);
2027 init_completion(&dio->done);
2029 dio->mm = current->mm;
2031 dio->sync = is_sync_kiocb(req) || extending;
2032 dio->free_iov = false;
2033 dio->quota_res.sectors = 0;
2037 ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
2038 iter->count >> 9, true);
2042 ret = write_invalidate_inode_pages_range(mapping,
2044 req->ki_pos + iter->count - 1);
2048 ret = bch2_dio_write_loop(dio);
2051 inode_unlock(&inode->v);
2054 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2055 bch2_quota_reservation_put(c, inode, &dio->quota_res);
2057 inode_dio_end(&inode->v);
2061 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2063 struct file *file = iocb->ki_filp;
2064 struct bch_inode_info *inode = file_bch_inode(file);
2067 if (iocb->ki_flags & IOCB_DIRECT)
2068 return bch2_direct_write(iocb, from);
2070 /* We can write back this queue in page reclaim */
2071 current->backing_dev_info = inode_to_bdi(&inode->v);
2072 inode_lock(&inode->v);
2074 ret = generic_write_checks(iocb, from);
2078 ret = file_remove_privs(file);
2082 ret = file_update_time(file);
2086 ret = bch2_buffered_write(iocb, from);
2087 if (likely(ret > 0))
2088 iocb->ki_pos += ret;
2090 inode_unlock(&inode->v);
2091 current->backing_dev_info = NULL;
2094 ret = generic_write_sync(iocb, ret);
2101 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2103 struct bch_inode_info *inode = file_bch_inode(file);
2104 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2107 ret = file_write_and_wait_range(file, start, end);
2111 if (datasync && !(inode->v.i_state & I_DIRTY_DATASYNC))
2114 ret = sync_inode_metadata(&inode->v, 1);
2118 if (!c->opts.journal_flush_disabled)
2119 ret = bch2_journal_flush_seq(&c->journal,
2120 inode->ei_journal_seq);
2121 ret2 = file_check_and_advance_wb_err(file);
2128 static inline int range_has_data(struct bch_fs *c,
2132 struct btree_trans trans;
2133 struct btree_iter *iter;
2137 bch2_trans_init(&trans, c, 0, 0);
2139 for_each_btree_key(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
2140 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2143 if (bkey_extent_is_data(k.k)) {
2148 bch2_trans_iter_put(&trans, iter);
2150 return bch2_trans_exit(&trans) ?: ret;
2153 static int __bch2_truncate_page(struct bch_inode_info *inode,
2154 pgoff_t index, loff_t start, loff_t end)
2156 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2157 struct address_space *mapping = inode->v.i_mapping;
2158 struct bch_page_state *s;
2159 unsigned start_offset = start & (PAGE_SIZE - 1);
2160 unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2165 /* Page boundary? Nothing to do */
2166 if (!((index == start >> PAGE_SHIFT && start_offset) ||
2167 (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
2171 if (index << PAGE_SHIFT >= inode->v.i_size)
2174 page = find_lock_page(mapping, index);
2177 * XXX: we're doing two index lookups when we end up reading the
2180 ret = range_has_data(c,
2181 POS(inode->v.i_ino, index << PAGE_SECTOR_SHIFT),
2182 POS(inode->v.i_ino, (index + 1) << PAGE_SECTOR_SHIFT));
2186 page = find_or_create_page(mapping, index, GFP_KERNEL);
2187 if (unlikely(!page)) {
2193 s = bch2_page_state_create(page, 0);
2199 if (!PageUptodate(page)) {
2200 ret = bch2_read_single_page(page, mapping);
2205 if (index != start >> PAGE_SHIFT)
2207 if (index != end >> PAGE_SHIFT)
2208 end_offset = PAGE_SIZE;
2210 for (i = round_up(start_offset, block_bytes(c)) >> 9;
2211 i < round_down(end_offset, block_bytes(c)) >> 9;
2213 s->s[i].nr_replicas = 0;
2214 s->s[i].state = SECTOR_UNALLOCATED;
2217 zero_user_segment(page, start_offset, end_offset);
2220 * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2222 * XXX: because we aren't currently tracking whether the page has actual
2223 * data in it (vs. just 0s, or only partially written) this wrong. ick.
2225 ret = bch2_get_page_disk_reservation(c, inode, page, false);
2229 * This removes any writeable userspace mappings; we need to force
2230 * .page_mkwrite to be called again before any mmapped writes, to
2231 * redirty the full page:
2234 __set_page_dirty_nobuffers(page);
2242 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
2244 return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
2245 from, round_up(from, PAGE_SIZE));
2248 static int bch2_extend(struct bch_inode_info *inode,
2249 struct bch_inode_unpacked *inode_u,
2250 struct iattr *iattr)
2252 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2253 struct address_space *mapping = inode->v.i_mapping;
2259 * this has to be done _before_ extending i_size:
2261 ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
2265 truncate_setsize(&inode->v, iattr->ia_size);
2266 setattr_copy(&inode->v, iattr);
2268 mutex_lock(&inode->ei_update_lock);
2269 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
2270 ATTR_MTIME|ATTR_CTIME);
2271 mutex_unlock(&inode->ei_update_lock);
2276 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2277 struct bch_inode_unpacked *bi,
2280 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2282 bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2283 bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
2287 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2288 struct bch_inode_unpacked *bi, void *p)
2290 u64 *new_i_size = p;
2292 bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2293 bi->bi_size = *new_i_size;
2297 int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
2299 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2300 struct address_space *mapping = inode->v.i_mapping;
2301 struct bch_inode_unpacked inode_u;
2302 struct btree_trans trans;
2303 struct btree_iter *iter;
2304 u64 new_i_size = iattr->ia_size;
2305 s64 i_sectors_delta = 0;
2308 inode_dio_wait(&inode->v);
2309 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2312 * fetch current on disk i_size: inode is locked, i_size can only
2313 * increase underneath us:
2315 bch2_trans_init(&trans, c, 0, 0);
2316 iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino, 0);
2317 ret = PTR_ERR_OR_ZERO(iter);
2318 bch2_trans_iter_put(&trans, iter);
2319 bch2_trans_exit(&trans);
2325 * check this before next assertion; on filesystem error our normal
2326 * invariants are a bit broken (truncate has to truncate the page cache
2327 * before the inode).
2329 ret = bch2_journal_error(&c->journal);
2333 WARN_ON(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
2334 inode->v.i_size < inode_u.bi_size);
2336 if (iattr->ia_size > inode->v.i_size) {
2337 ret = bch2_extend(inode, &inode_u, iattr);
2341 ret = bch2_truncate_page(inode, iattr->ia_size);
2346 * When extending, we're going to write the new i_size to disk
2347 * immediately so we need to flush anything above the current on disk
2350 * Also, when extending we need to flush the page that i_size currently
2351 * straddles - if it's mapped to userspace, we need to ensure that
2352 * userspace has to redirty it and call .mkwrite -> set_page_dirty
2353 * again to allocate the part of the page that was extended.
2355 if (iattr->ia_size > inode_u.bi_size)
2356 ret = filemap_write_and_wait_range(mapping,
2358 iattr->ia_size - 1);
2359 else if (iattr->ia_size & (PAGE_SIZE - 1))
2360 ret = filemap_write_and_wait_range(mapping,
2361 round_down(iattr->ia_size, PAGE_SIZE),
2362 iattr->ia_size - 1);
2366 mutex_lock(&inode->ei_update_lock);
2367 ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
2369 mutex_unlock(&inode->ei_update_lock);
2374 truncate_setsize(&inode->v, iattr->ia_size);
2376 ret = bch2_fpunch(c, inode->v.i_ino,
2377 round_up(iattr->ia_size, block_bytes(c)) >> 9,
2378 U64_MAX, &inode->ei_journal_seq, &i_sectors_delta);
2379 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2384 setattr_copy(&inode->v, iattr);
2386 mutex_lock(&inode->ei_update_lock);
2387 ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL,
2388 ATTR_MTIME|ATTR_CTIME);
2389 mutex_unlock(&inode->ei_update_lock);
2391 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2397 static int inode_update_times_fn(struct bch_inode_info *inode,
2398 struct bch_inode_unpacked *bi, void *p)
2400 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2402 bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
2406 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2408 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2409 u64 discard_start = round_up(offset, block_bytes(c)) >> 9;
2410 u64 discard_end = round_down(offset + len, block_bytes(c)) >> 9;
2413 inode_lock(&inode->v);
2414 inode_dio_wait(&inode->v);
2415 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2417 ret = __bch2_truncate_page(inode,
2418 offset >> PAGE_SHIFT,
2419 offset, offset + len);
2423 if (offset >> PAGE_SHIFT !=
2424 (offset + len) >> PAGE_SHIFT) {
2425 ret = __bch2_truncate_page(inode,
2426 (offset + len) >> PAGE_SHIFT,
2427 offset, offset + len);
2432 truncate_pagecache_range(&inode->v, offset, offset + len - 1);
2434 if (discard_start < discard_end) {
2435 s64 i_sectors_delta = 0;
2437 ret = bch2_fpunch(c, inode->v.i_ino,
2438 discard_start, discard_end,
2439 &inode->ei_journal_seq,
2441 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2444 mutex_lock(&inode->ei_update_lock);
2445 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
2446 ATTR_MTIME|ATTR_CTIME) ?: ret;
2447 mutex_unlock(&inode->ei_update_lock);
2449 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2450 inode_unlock(&inode->v);
2455 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
2456 loff_t offset, loff_t len,
2459 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2460 struct address_space *mapping = inode->v.i_mapping;
2461 struct bkey_buf copy;
2462 struct btree_trans trans;
2463 struct btree_iter *src, *dst, *del;
2464 loff_t shift, new_size;
2468 if ((offset | len) & (block_bytes(c) - 1))
2472 * We need i_mutex to keep the page cache consistent with the extents
2473 * btree, and the btree consistent with i_size - we don't need outside
2474 * locking for the extents btree itself, because we're using linked
2477 inode_lock(&inode->v);
2478 inode_dio_wait(&inode->v);
2479 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2483 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
2487 if (offset >= inode->v.i_size)
2490 src_start = U64_MAX;
2494 if (offset + len >= inode->v.i_size)
2497 src_start = offset + len;
2501 new_size = inode->v.i_size + shift;
2503 ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2508 i_size_write(&inode->v, new_size);
2509 mutex_lock(&inode->ei_update_lock);
2510 ret = bch2_write_inode_size(c, inode, new_size,
2511 ATTR_MTIME|ATTR_CTIME);
2512 mutex_unlock(&inode->ei_update_lock);
2514 s64 i_sectors_delta = 0;
2516 ret = bch2_fpunch(c, inode->v.i_ino,
2517 offset >> 9, (offset + len) >> 9,
2518 &inode->ei_journal_seq,
2520 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2526 bch2_bkey_buf_init(©);
2527 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
2528 src = bch2_trans_get_iter(&trans, BTREE_ID_extents,
2529 POS(inode->v.i_ino, src_start >> 9),
2531 dst = bch2_trans_copy_iter(&trans, src);
2532 del = bch2_trans_copy_iter(&trans, src);
2534 while (ret == 0 || ret == -EINTR) {
2535 struct disk_reservation disk_res =
2536 bch2_disk_reservation_init(c, 0);
2537 struct bkey_i delete;
2539 struct bpos next_pos;
2540 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
2541 struct bpos atomic_end;
2542 unsigned trigger_flags = 0;
2545 ? bch2_btree_iter_peek_prev(src)
2546 : bch2_btree_iter_peek(src);
2547 if ((ret = bkey_err(k)))
2550 if (!k.k || k.k->p.inode != inode->v.i_ino)
2554 bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
2557 bch2_bkey_buf_reassemble(©, c, k);
2560 bkey_cmp(bkey_start_pos(k.k), move_pos) < 0)
2561 bch2_cut_front(move_pos, copy.k);
2563 copy.k->k.p.offset += shift >> 9;
2564 bch2_btree_iter_set_pos(dst, bkey_start_pos(©.k->k));
2566 ret = bch2_extent_atomic_end(dst, copy.k, &atomic_end);
2570 if (bkey_cmp(atomic_end, copy.k->k.p)) {
2572 move_pos = atomic_end;
2573 move_pos.offset -= shift >> 9;
2576 bch2_cut_back(atomic_end, copy.k);
2580 bkey_init(&delete.k);
2581 delete.k.p = copy.k->k.p;
2582 delete.k.size = copy.k->k.size;
2583 delete.k.p.offset -= shift >> 9;
2584 bch2_btree_iter_set_pos(del, bkey_start_pos(&delete.k));
2586 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
2588 if (copy.k->k.size == k.k->size) {
2590 * If we're moving the entire extent, we can skip
2593 trigger_flags |= BTREE_TRIGGER_NORUN;
2595 /* We might end up splitting compressed extents: */
2597 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
2599 ret = bch2_disk_reservation_get(c, &disk_res,
2600 copy.k->k.size, nr_ptrs,
2601 BCH_DISK_RESERVATION_NOFAIL);
2605 ret = bch2_trans_update(&trans, del, &delete, trigger_flags) ?:
2606 bch2_trans_update(&trans, dst, copy.k, trigger_flags) ?:
2607 bch2_trans_commit(&trans, &disk_res,
2608 &inode->ei_journal_seq,
2609 BTREE_INSERT_NOFAIL);
2610 bch2_disk_reservation_put(c, &disk_res);
2613 bch2_btree_iter_set_pos(src, next_pos);
2615 bch2_trans_iter_put(&trans, del);
2616 bch2_trans_iter_put(&trans, dst);
2617 bch2_trans_iter_put(&trans, src);
2618 bch2_trans_exit(&trans);
2619 bch2_bkey_buf_exit(©, c);
2625 i_size_write(&inode->v, new_size);
2626 mutex_lock(&inode->ei_update_lock);
2627 ret = bch2_write_inode_size(c, inode, new_size,
2628 ATTR_MTIME|ATTR_CTIME);
2629 mutex_unlock(&inode->ei_update_lock);
2632 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2633 inode_unlock(&inode->v);
2637 static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
2638 u64 start_sector, u64 end_sector)
2640 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2641 struct btree_trans trans;
2642 struct btree_iter *iter;
2643 struct bpos end_pos = POS(inode->v.i_ino, end_sector);
2644 unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
2647 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
2649 iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
2650 POS(inode->v.i_ino, start_sector),
2651 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2653 while (!ret && bkey_cmp(iter->pos, end_pos) < 0) {
2654 s64 i_sectors_delta = 0;
2655 struct disk_reservation disk_res = { 0 };
2656 struct quota_res quota_res = { 0 };
2657 struct bkey_i_reservation reservation;
2661 bch2_trans_begin(&trans);
2663 k = bch2_btree_iter_peek_slot(iter);
2664 if ((ret = bkey_err(k)))
2667 /* already reserved */
2668 if (k.k->type == KEY_TYPE_reservation &&
2669 bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2670 bch2_btree_iter_next_slot(iter);
2674 if (bkey_extent_is_data(k.k) &&
2675 !(mode & FALLOC_FL_ZERO_RANGE)) {
2676 bch2_btree_iter_next_slot(iter);
2680 bkey_reservation_init(&reservation.k_i);
2681 reservation.k.type = KEY_TYPE_reservation;
2682 reservation.k.p = k.k->p;
2683 reservation.k.size = k.k->size;
2685 bch2_cut_front(iter->pos, &reservation.k_i);
2686 bch2_cut_back(end_pos, &reservation.k_i);
2688 sectors = reservation.k.size;
2689 reservation.v.nr_replicas = bch2_bkey_nr_ptrs_allocated(k);
2691 if (!bkey_extent_is_allocation(k.k)) {
2692 ret = bch2_quota_reservation_add(c, inode,
2699 if (reservation.v.nr_replicas < replicas ||
2700 bch2_bkey_sectors_compressed(k)) {
2701 ret = bch2_disk_reservation_get(c, &disk_res, sectors,
2706 reservation.v.nr_replicas = disk_res.nr_replicas;
2709 ret = bch2_extent_update(&trans, iter, &reservation.k_i,
2710 &disk_res, &inode->ei_journal_seq,
2711 0, &i_sectors_delta);
2712 i_sectors_acct(c, inode, "a_res, i_sectors_delta);
2714 bch2_quota_reservation_put(c, inode, "a_res);
2715 bch2_disk_reservation_put(c, &disk_res);
2719 bch2_trans_iter_put(&trans, iter);
2720 bch2_trans_exit(&trans);
2724 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
2725 loff_t offset, loff_t len)
2727 struct address_space *mapping = inode->v.i_mapping;
2728 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2729 loff_t end = offset + len;
2730 loff_t block_start = round_down(offset, block_bytes(c));
2731 loff_t block_end = round_up(end, block_bytes(c));
2734 inode_lock(&inode->v);
2735 inode_dio_wait(&inode->v);
2736 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2738 if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
2739 ret = inode_newsize_ok(&inode->v, end);
2744 if (mode & FALLOC_FL_ZERO_RANGE) {
2745 ret = __bch2_truncate_page(inode,
2746 offset >> PAGE_SHIFT,
2750 offset >> PAGE_SHIFT != end >> PAGE_SHIFT)
2751 ret = __bch2_truncate_page(inode,
2758 truncate_pagecache_range(&inode->v, offset, end - 1);
2761 ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
2766 * Do we need to extend the file?
2768 * If we zeroed up to the end of the file, we dropped whatever writes
2769 * were going to write out the current i_size, so we have to extend
2770 * manually even if FL_KEEP_SIZE was set:
2772 if (end >= inode->v.i_size &&
2773 (!(mode & FALLOC_FL_KEEP_SIZE) ||
2774 (mode & FALLOC_FL_ZERO_RANGE))) {
2777 * Sync existing appends before extending i_size,
2778 * as in bch2_extend():
2780 ret = filemap_write_and_wait_range(mapping,
2781 inode->ei_inode.bi_size, S64_MAX);
2785 if (mode & FALLOC_FL_KEEP_SIZE)
2786 end = inode->v.i_size;
2788 i_size_write(&inode->v, end);
2790 mutex_lock(&inode->ei_update_lock);
2791 ret = bch2_write_inode_size(c, inode, end, 0);
2792 mutex_unlock(&inode->ei_update_lock);
2795 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2796 inode_unlock(&inode->v);
2800 long bch2_fallocate_dispatch(struct file *file, int mode,
2801 loff_t offset, loff_t len)
2803 struct bch_inode_info *inode = file_bch_inode(file);
2804 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2807 if (!percpu_ref_tryget(&c->writes))
2810 if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
2811 ret = bchfs_fallocate(inode, mode, offset, len);
2812 else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
2813 ret = bchfs_fpunch(inode, offset, len);
2814 else if (mode == FALLOC_FL_INSERT_RANGE)
2815 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
2816 else if (mode == FALLOC_FL_COLLAPSE_RANGE)
2817 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
2821 percpu_ref_put(&c->writes);
2826 static void mark_range_unallocated(struct bch_inode_info *inode,
2827 loff_t start, loff_t end)
2829 pgoff_t index = start >> PAGE_SHIFT;
2830 pgoff_t end_index = (end - 1) >> PAGE_SHIFT;
2831 struct pagevec pvec;
2833 pagevec_init(&pvec);
2836 unsigned nr_pages, i, j;
2838 nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
2843 for (i = 0; i < nr_pages; i++) {
2844 struct page *page = pvec.pages[i];
2845 struct bch_page_state *s;
2848 s = bch2_page_state(page);
2851 spin_lock(&s->lock);
2852 for (j = 0; j < PAGE_SECTORS; j++)
2853 s->s[j].nr_replicas = 0;
2854 spin_unlock(&s->lock);
2859 pagevec_release(&pvec);
2860 } while (index <= end_index);
2863 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
2864 struct file *file_dst, loff_t pos_dst,
2865 loff_t len, unsigned remap_flags)
2867 struct bch_inode_info *src = file_bch_inode(file_src);
2868 struct bch_inode_info *dst = file_bch_inode(file_dst);
2869 struct bch_fs *c = src->v.i_sb->s_fs_info;
2870 s64 i_sectors_delta = 0;
2874 if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
2877 if (remap_flags & REMAP_FILE_DEDUP)
2880 if ((pos_src & (block_bytes(c) - 1)) ||
2881 (pos_dst & (block_bytes(c) - 1)))
2885 abs(pos_src - pos_dst) < len)
2888 bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
2890 file_update_time(file_dst);
2892 inode_dio_wait(&src->v);
2893 inode_dio_wait(&dst->v);
2895 ret = generic_remap_file_range_prep(file_src, pos_src,
2898 if (ret < 0 || len == 0)
2901 aligned_len = round_up((u64) len, block_bytes(c));
2903 ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
2904 pos_dst, pos_dst + len - 1);
2908 mark_range_unallocated(src, pos_src, pos_src + aligned_len);
2910 ret = bch2_remap_range(c,
2911 POS(dst->v.i_ino, pos_dst >> 9),
2912 POS(src->v.i_ino, pos_src >> 9),
2914 &dst->ei_journal_seq,
2915 pos_dst + len, &i_sectors_delta);
2920 * due to alignment, we might have remapped slightly more than requsted
2922 ret = min((u64) ret << 9, (u64) len);
2924 /* XXX get a quota reservation */
2925 i_sectors_acct(c, dst, NULL, i_sectors_delta);
2927 spin_lock(&dst->v.i_lock);
2928 if (pos_dst + ret > dst->v.i_size)
2929 i_size_write(&dst->v, pos_dst + ret);
2930 spin_unlock(&dst->v.i_lock);
2932 bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
2939 static int page_data_offset(struct page *page, unsigned offset)
2941 struct bch_page_state *s = bch2_page_state(page);
2945 for (i = offset >> 9; i < PAGE_SECTORS; i++)
2946 if (s->s[i].state >= SECTOR_DIRTY)
2952 static loff_t bch2_seek_pagecache_data(struct inode *vinode,
2953 loff_t start_offset,
2956 struct address_space *mapping = vinode->i_mapping;
2958 pgoff_t start_index = start_offset >> PAGE_SHIFT;
2959 pgoff_t end_index = end_offset >> PAGE_SHIFT;
2960 pgoff_t index = start_index;
2964 while (index <= end_index) {
2965 if (find_get_pages_range(mapping, &index, end_index, 1, &page)) {
2968 offset = page_data_offset(page,
2969 page->index == start_index
2970 ? start_offset & (PAGE_SIZE - 1)
2973 ret = clamp(((loff_t) page->index << PAGE_SHIFT) +
2975 start_offset, end_offset);
2991 static loff_t bch2_seek_data(struct file *file, u64 offset)
2993 struct bch_inode_info *inode = file_bch_inode(file);
2994 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2995 struct btree_trans trans;
2996 struct btree_iter *iter;
2998 u64 isize, next_data = MAX_LFS_FILESIZE;
3001 isize = i_size_read(&inode->v);
3002 if (offset >= isize)
3005 bch2_trans_init(&trans, c, 0, 0);
3007 for_each_btree_key(&trans, iter, BTREE_ID_extents,
3008 POS(inode->v.i_ino, offset >> 9), 0, k, ret) {
3009 if (k.k->p.inode != inode->v.i_ino) {
3011 } else if (bkey_extent_is_data(k.k)) {
3012 next_data = max(offset, bkey_start_offset(k.k) << 9);
3014 } else if (k.k->p.offset >> 9 > isize)
3017 bch2_trans_iter_put(&trans, iter);
3019 ret = bch2_trans_exit(&trans) ?: ret;
3023 if (next_data > offset)
3024 next_data = bch2_seek_pagecache_data(&inode->v,
3027 if (next_data >= isize)
3030 return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
3033 static int __page_hole_offset(struct page *page, unsigned offset)
3035 struct bch_page_state *s = bch2_page_state(page);
3041 for (i = offset >> 9; i < PAGE_SECTORS; i++)
3042 if (s->s[i].state < SECTOR_DIRTY)
3048 static loff_t page_hole_offset(struct address_space *mapping, loff_t offset)
3050 pgoff_t index = offset >> PAGE_SHIFT;
3055 page = find_lock_page(mapping, index);
3059 pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1));
3061 ret = ((loff_t) index << PAGE_SHIFT) + pg_offset;
3068 static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
3069 loff_t start_offset,
3072 struct address_space *mapping = vinode->i_mapping;
3073 loff_t offset = start_offset, hole;
3075 while (offset < end_offset) {
3076 hole = page_hole_offset(mapping, offset);
3077 if (hole >= 0 && hole <= end_offset)
3078 return max(start_offset, hole);
3080 offset += PAGE_SIZE;
3081 offset &= PAGE_MASK;
3087 static loff_t bch2_seek_hole(struct file *file, u64 offset)
3089 struct bch_inode_info *inode = file_bch_inode(file);
3090 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3091 struct btree_trans trans;
3092 struct btree_iter *iter;
3094 u64 isize, next_hole = MAX_LFS_FILESIZE;
3097 isize = i_size_read(&inode->v);
3098 if (offset >= isize)
3101 bch2_trans_init(&trans, c, 0, 0);
3103 for_each_btree_key(&trans, iter, BTREE_ID_extents,
3104 POS(inode->v.i_ino, offset >> 9),
3105 BTREE_ITER_SLOTS, k, ret) {
3106 if (k.k->p.inode != inode->v.i_ino) {
3107 next_hole = bch2_seek_pagecache_hole(&inode->v,
3108 offset, MAX_LFS_FILESIZE);
3110 } else if (!bkey_extent_is_data(k.k)) {
3111 next_hole = bch2_seek_pagecache_hole(&inode->v,
3112 max(offset, bkey_start_offset(k.k) << 9),
3113 k.k->p.offset << 9);
3115 if (next_hole < k.k->p.offset << 9)
3118 offset = max(offset, bkey_start_offset(k.k) << 9);
3121 bch2_trans_iter_put(&trans, iter);
3123 ret = bch2_trans_exit(&trans) ?: ret;
3127 if (next_hole > isize)
3130 return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
3133 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
3139 return generic_file_llseek(file, offset, whence);
3141 return bch2_seek_data(file, offset);
3143 return bch2_seek_hole(file, offset);
3149 void bch2_fs_fsio_exit(struct bch_fs *c)
3151 bioset_exit(&c->dio_write_bioset);
3152 bioset_exit(&c->dio_read_bioset);
3153 bioset_exit(&c->writepage_bioset);
3156 int bch2_fs_fsio_init(struct bch_fs *c)
3160 pr_verbose_init(c->opts, "");
3162 if (bioset_init(&c->writepage_bioset,
3163 4, offsetof(struct bch_writepage_io, op.wbio.bio),
3164 BIOSET_NEED_BVECS) ||
3165 bioset_init(&c->dio_read_bioset,
3166 4, offsetof(struct dio_read, rbio.bio),
3167 BIOSET_NEED_BVECS) ||
3168 bioset_init(&c->dio_write_bioset,
3169 4, offsetof(struct dio_write, op.wbio.bio),
3173 pr_verbose_init(c->opts, "ret %i", ret);
3177 #endif /* NO_BCACHEFS_FS */