1 // SPDX-License-Identifier: GPL-2.0
5 #include "alloc_foreground.h"
6 #include "btree_update.h"
21 #include <linux/aio.h>
22 #include <linux/backing-dev.h>
23 #include <linux/falloc.h>
24 #include <linux/migrate.h>
25 #include <linux/mmu_context.h>
26 #include <linux/pagevec.h>
27 #include <linux/sched/signal.h>
28 #include <linux/task_io_accounting_ops.h>
29 #include <linux/uio.h>
30 #include <linux/writeback.h>
32 #include <trace/events/bcachefs.h>
33 #include <trace/events/writeback.h>
39 struct bch_writepage_io {
41 struct bch_inode_info *inode;
44 struct bch_write_op op;
54 struct quota_res quota_res;
57 struct iovec inline_vecs[2];
60 struct bch_write_op op;
67 struct bch_read_bio rbio;
70 /* pagecache_block must be held */
71 static int write_invalidate_inode_pages_range(struct address_space *mapping,
72 loff_t start, loff_t end)
77 * XXX: the way this is currently implemented, we can spin if a process
78 * is continually redirtying a specific page
81 if (!mapping->nrpages &&
82 !mapping->nrexceptional)
85 ret = filemap_write_and_wait_range(mapping, start, end);
89 if (!mapping->nrpages)
92 ret = invalidate_inode_pages2_range(mapping,
95 } while (ret == -EBUSY);
102 #ifdef CONFIG_BCACHEFS_QUOTA
104 static void bch2_quota_reservation_put(struct bch_fs *c,
105 struct bch_inode_info *inode,
106 struct quota_res *res)
111 mutex_lock(&inode->ei_quota_lock);
112 BUG_ON(res->sectors > inode->ei_quota_reserved);
114 bch2_quota_acct(c, inode->ei_qid, Q_SPC,
115 -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
116 inode->ei_quota_reserved -= res->sectors;
117 mutex_unlock(&inode->ei_quota_lock);
122 static int bch2_quota_reservation_add(struct bch_fs *c,
123 struct bch_inode_info *inode,
124 struct quota_res *res,
130 mutex_lock(&inode->ei_quota_lock);
131 ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
132 check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
134 inode->ei_quota_reserved += sectors;
135 res->sectors += sectors;
137 mutex_unlock(&inode->ei_quota_lock);
144 static void bch2_quota_reservation_put(struct bch_fs *c,
145 struct bch_inode_info *inode,
146 struct quota_res *res)
150 static int bch2_quota_reservation_add(struct bch_fs *c,
151 struct bch_inode_info *inode,
152 struct quota_res *res,
161 /* i_size updates: */
163 struct inode_new_size {
169 static int inode_set_size(struct bch_inode_info *inode,
170 struct bch_inode_unpacked *bi,
173 struct inode_new_size *s = p;
175 bi->bi_size = s->new_size;
176 if (s->fields & ATTR_ATIME)
177 bi->bi_atime = s->now;
178 if (s->fields & ATTR_MTIME)
179 bi->bi_mtime = s->now;
180 if (s->fields & ATTR_CTIME)
181 bi->bi_ctime = s->now;
186 int __must_check bch2_write_inode_size(struct bch_fs *c,
187 struct bch_inode_info *inode,
188 loff_t new_size, unsigned fields)
190 struct inode_new_size s = {
191 .new_size = new_size,
192 .now = bch2_current_time(c),
196 return bch2_write_inode(c, inode, inode_set_size, &s, fields);
199 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
200 struct quota_res *quota_res, s64 sectors)
205 mutex_lock(&inode->ei_quota_lock);
206 #ifdef CONFIG_BCACHEFS_QUOTA
207 if (quota_res && sectors > 0) {
208 BUG_ON(sectors > quota_res->sectors);
209 BUG_ON(sectors > inode->ei_quota_reserved);
211 quota_res->sectors -= sectors;
212 inode->ei_quota_reserved -= sectors;
214 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
217 inode->v.i_blocks += sectors;
218 mutex_unlock(&inode->ei_quota_lock);
223 /* stored in page->private: */
225 struct bch_page_sector {
226 /* Uncompressed, fully allocated replicas: */
227 unsigned nr_replicas:3;
229 /* Owns PAGE_SECTORS * replicas_reserved sized reservation: */
230 unsigned replicas_reserved:3;
241 struct bch_page_state {
243 atomic_t write_count;
244 struct bch_page_sector s[PAGE_SECTORS];
247 static inline struct bch_page_state *__bch2_page_state(struct page *page)
249 return page_has_private(page)
250 ? (struct bch_page_state *) page_private(page)
254 static inline struct bch_page_state *bch2_page_state(struct page *page)
256 EBUG_ON(!PageLocked(page));
258 return __bch2_page_state(page);
261 /* for newly allocated pages: */
262 static void __bch2_page_state_release(struct page *page)
264 struct bch_page_state *s = __bch2_page_state(page);
269 ClearPagePrivate(page);
270 set_page_private(page, 0);
275 static void bch2_page_state_release(struct page *page)
277 struct bch_page_state *s = bch2_page_state(page);
282 ClearPagePrivate(page);
283 set_page_private(page, 0);
288 /* for newly allocated pages: */
289 static struct bch_page_state *__bch2_page_state_create(struct page *page,
292 struct bch_page_state *s;
294 s = kzalloc(sizeof(*s), GFP_NOFS|gfp);
298 spin_lock_init(&s->lock);
300 * migrate_page_move_mapping() assumes that pages with private data
301 * have their count elevated by 1.
304 set_page_private(page, (unsigned long) s);
305 SetPagePrivate(page);
309 static struct bch_page_state *bch2_page_state_create(struct page *page,
312 return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp);
315 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
317 /* XXX: this should not be open coded */
318 return inode->ei_inode.bi_data_replicas
319 ? inode->ei_inode.bi_data_replicas - 1
320 : c->opts.data_replicas;
323 static inline unsigned sectors_to_reserve(struct bch_page_sector *s,
324 unsigned nr_replicas)
326 return max(0, (int) nr_replicas -
328 s->replicas_reserved);
331 static int bch2_get_page_disk_reservation(struct bch_fs *c,
332 struct bch_inode_info *inode,
333 struct page *page, bool check_enospc)
335 struct bch_page_state *s = bch2_page_state_create(page, 0);
336 unsigned nr_replicas = inode_nr_replicas(c, inode);
337 struct disk_reservation disk_res = { 0 };
338 unsigned i, disk_res_sectors = 0;
344 for (i = 0; i < ARRAY_SIZE(s->s); i++)
345 disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
347 if (!disk_res_sectors)
350 ret = bch2_disk_reservation_get(c, &disk_res,
353 ? BCH_DISK_RESERVATION_NOFAIL
358 for (i = 0; i < ARRAY_SIZE(s->s); i++)
359 s->s[i].replicas_reserved +=
360 sectors_to_reserve(&s->s[i], nr_replicas);
365 struct bch2_page_reservation {
366 struct disk_reservation disk;
367 struct quota_res quota;
370 static void bch2_page_reservation_init(struct bch_fs *c,
371 struct bch_inode_info *inode,
372 struct bch2_page_reservation *res)
374 memset(res, 0, sizeof(*res));
376 res->disk.nr_replicas = inode_nr_replicas(c, inode);
379 static void bch2_page_reservation_put(struct bch_fs *c,
380 struct bch_inode_info *inode,
381 struct bch2_page_reservation *res)
383 bch2_disk_reservation_put(c, &res->disk);
384 bch2_quota_reservation_put(c, inode, &res->quota);
387 static int bch2_page_reservation_get(struct bch_fs *c,
388 struct bch_inode_info *inode, struct page *page,
389 struct bch2_page_reservation *res,
390 unsigned offset, unsigned len, bool check_enospc)
392 struct bch_page_state *s = bch2_page_state_create(page, 0);
393 unsigned i, disk_sectors = 0, quota_sectors = 0;
399 for (i = round_down(offset, block_bytes(c)) >> 9;
400 i < round_up(offset + len, block_bytes(c)) >> 9;
402 disk_sectors += sectors_to_reserve(&s->s[i],
403 res->disk.nr_replicas);
404 quota_sectors += s->s[i].state == SECTOR_UNALLOCATED;
408 ret = bch2_disk_reservation_add(c, &res->disk,
411 ? BCH_DISK_RESERVATION_NOFAIL
418 ret = bch2_quota_reservation_add(c, inode, &res->quota,
422 struct disk_reservation tmp = {
423 .sectors = disk_sectors
426 bch2_disk_reservation_put(c, &tmp);
427 res->disk.sectors -= disk_sectors;
435 static void bch2_clear_page_bits(struct page *page)
437 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
438 struct bch_fs *c = inode->v.i_sb->s_fs_info;
439 struct bch_page_state *s = bch2_page_state(page);
440 struct disk_reservation disk_res = { 0 };
441 int i, dirty_sectors = 0;
446 EBUG_ON(!PageLocked(page));
447 EBUG_ON(PageWriteback(page));
449 for (i = 0; i < ARRAY_SIZE(s->s); i++) {
450 disk_res.sectors += s->s[i].replicas_reserved;
451 s->s[i].replicas_reserved = 0;
453 if (s->s[i].state == SECTOR_DIRTY) {
455 s->s[i].state = SECTOR_UNALLOCATED;
459 bch2_disk_reservation_put(c, &disk_res);
462 i_sectors_acct(c, inode, NULL, -dirty_sectors);
464 bch2_page_state_release(page);
467 static void bch2_set_page_dirty(struct bch_fs *c,
468 struct bch_inode_info *inode, struct page *page,
469 struct bch2_page_reservation *res,
470 unsigned offset, unsigned len)
472 struct bch_page_state *s = bch2_page_state(page);
473 unsigned i, dirty_sectors = 0;
475 WARN_ON((u64) page_offset(page) + offset + len >
476 round_up((u64) i_size_read(&inode->v), block_bytes(c)));
480 for (i = round_down(offset, block_bytes(c)) >> 9;
481 i < round_up(offset + len, block_bytes(c)) >> 9;
483 unsigned sectors = sectors_to_reserve(&s->s[i],
484 res->disk.nr_replicas);
486 BUG_ON(sectors > res->disk.sectors);
487 s->s[i].replicas_reserved += sectors;
488 res->disk.sectors -= sectors;
490 if (s->s[i].state == SECTOR_UNALLOCATED)
493 s->s[i].state = max_t(unsigned, s->s[i].state, SECTOR_DIRTY);
496 spin_unlock(&s->lock);
499 i_sectors_acct(c, inode, &res->quota, dirty_sectors);
501 if (!PageDirty(page))
502 __set_page_dirty_nobuffers(page);
505 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
507 struct page *page = vmf->page;
508 struct file *file = vmf->vma->vm_file;
509 struct bch_inode_info *inode = file_bch_inode(file);
510 struct address_space *mapping = inode->v.i_mapping;
511 struct bch_fs *c = inode->v.i_sb->s_fs_info;
512 struct bch2_page_reservation res;
515 int ret = VM_FAULT_LOCKED;
517 bch2_page_reservation_init(c, inode, &res);
519 sb_start_pagefault(inode->v.i_sb);
520 file_update_time(file);
523 * Not strictly necessary, but helps avoid dio writes livelocking in
524 * write_invalidate_inode_pages_range() - can drop this if/when we get
525 * a write_invalidate_inode_pages_range() that works without dropping
526 * page lock before invalidating page
528 if (current->pagecache_lock != &mapping->add_lock)
529 pagecache_add_get(&mapping->add_lock);
532 isize = i_size_read(&inode->v);
534 if (page->mapping != mapping || page_offset(page) >= isize) {
536 ret = VM_FAULT_NOPAGE;
540 len = min_t(loff_t, PAGE_SIZE, isize - page_offset(page));
542 if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) {
544 ret = VM_FAULT_SIGBUS;
548 bch2_set_page_dirty(c, inode, page, &res, 0, len);
549 wait_for_stable_page(page);
551 if (current->pagecache_lock != &mapping->add_lock)
552 pagecache_add_put(&mapping->add_lock);
553 sb_end_pagefault(inode->v.i_sb);
555 bch2_page_reservation_put(c, inode, &res);
560 void bch2_invalidatepage(struct page *page, unsigned int offset,
563 if (offset || length < PAGE_SIZE)
566 bch2_clear_page_bits(page);
569 int bch2_releasepage(struct page *page, gfp_t gfp_mask)
574 bch2_clear_page_bits(page);
578 #ifdef CONFIG_MIGRATION
579 int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
580 struct page *page, enum migrate_mode mode)
584 EBUG_ON(!PageLocked(page));
585 EBUG_ON(!PageLocked(newpage));
587 ret = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
588 if (ret != MIGRATEPAGE_SUCCESS)
591 if (PagePrivate(page)) {
592 ClearPagePrivate(page);
594 set_page_private(newpage, page_private(page));
595 set_page_private(page, 0);
597 SetPagePrivate(newpage);
600 if (mode != MIGRATE_SYNC_NO_COPY)
601 migrate_page_copy(newpage, page);
603 migrate_page_states(newpage, page);
604 return MIGRATEPAGE_SUCCESS;
610 static void bch2_readpages_end_io(struct bio *bio)
612 struct bvec_iter_all iter;
615 bio_for_each_segment_all(bv, bio, iter) {
616 struct page *page = bv->bv_page;
618 if (!bio->bi_status) {
619 SetPageUptodate(page);
621 ClearPageUptodate(page);
630 static inline void page_state_init_for_read(struct page *page)
632 SetPagePrivate(page);
636 struct readpages_iter {
637 struct address_space *mapping;
645 static int readpages_iter_init(struct readpages_iter *iter,
646 struct address_space *mapping,
647 struct list_head *pages, unsigned nr_pages)
649 memset(iter, 0, sizeof(*iter));
651 iter->mapping = mapping;
652 iter->offset = list_last_entry(pages, struct page, lru)->index;
654 iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
658 while (!list_empty(pages)) {
659 struct page *page = list_last_entry(pages, struct page, lru);
661 __bch2_page_state_create(page, __GFP_NOFAIL);
663 iter->pages[iter->nr_pages++] = page;
664 list_del(&page->lru);
670 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
676 BUG_ON(iter->idx > iter->nr_added);
677 BUG_ON(iter->nr_added > iter->nr_pages);
679 if (iter->idx < iter->nr_added)
683 if (iter->idx == iter->nr_pages)
686 ret = add_to_page_cache_lru_vec(iter->mapping,
687 iter->pages + iter->nr_added,
688 iter->nr_pages - iter->nr_added,
689 iter->offset + iter->nr_added,
694 page = iter->pages[iter->nr_added];
698 __bch2_page_state_release(page);
702 iter->nr_added += ret;
704 for (i = iter->idx; i < iter->nr_added; i++)
705 put_page(iter->pages[i]);
707 EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
709 return iter->pages[iter->idx];
712 static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
714 struct bvec_iter iter;
716 unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
717 ? 0 : bch2_bkey_nr_ptrs_allocated(k);
718 unsigned state = k.k->type == KEY_TYPE_reservation
722 bio_for_each_segment(bv, bio, iter) {
723 struct bch_page_state *s = bch2_page_state(bv.bv_page);
726 for (i = bv.bv_offset >> 9;
727 i < (bv.bv_offset + bv.bv_len) >> 9;
729 s->s[i].nr_replicas = nr_ptrs;
730 s->s[i].state = state;
735 static void readpage_bio_extend(struct readpages_iter *iter,
737 unsigned sectors_this_extent,
740 while (bio_sectors(bio) < sectors_this_extent &&
741 bio->bi_vcnt < bio->bi_max_vecs) {
742 pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT;
743 struct page *page = readpage_iter_next(iter);
747 if (iter->offset + iter->idx != page_offset)
755 page = xa_load(&iter->mapping->i_pages, page_offset);
756 if (page && !xa_is_value(page))
759 page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
763 if (!__bch2_page_state_create(page, 0)) {
768 ret = add_to_page_cache_lru(page, iter->mapping,
769 page_offset, GFP_NOFS);
771 __bch2_page_state_release(page);
779 BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0));
783 static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
784 struct bch_read_bio *rbio, u64 inum,
785 struct readpages_iter *readpages_iter)
787 struct bch_fs *c = trans->c;
788 int flags = BCH_READ_RETRY_IF_STALE|
789 BCH_READ_MAY_PROMOTE;
793 rbio->start_time = local_clock();
798 unsigned bytes, sectors, offset_into_extent;
800 bch2_btree_iter_set_pos(iter,
801 POS(inum, rbio->bio.bi_iter.bi_sector));
803 k = bch2_btree_iter_peek_slot(iter);
808 bkey_reassemble(&tmp.k, k);
809 k = bkey_i_to_s_c(&tmp.k);
811 offset_into_extent = iter->pos.offset -
812 bkey_start_offset(k.k);
813 sectors = k.k->size - offset_into_extent;
815 ret = bch2_read_indirect_extent(trans,
816 &offset_into_extent, &tmp.k);
820 sectors = min(sectors, k.k->size - offset_into_extent);
822 bch2_trans_unlock(trans);
824 if (readpages_iter) {
825 bool want_full_extent = false;
827 if (bkey_extent_is_data(k.k)) {
828 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
829 const union bch_extent_entry *i;
830 struct extent_ptr_decoded p;
832 bkey_for_each_ptr_decode(k.k, ptrs, p, i)
833 want_full_extent |= ((p.crc.csum_type != 0) |
834 (p.crc.compression_type != 0));
837 readpage_bio_extend(readpages_iter, &rbio->bio,
838 sectors, want_full_extent);
841 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
842 swap(rbio->bio.bi_iter.bi_size, bytes);
844 if (rbio->bio.bi_iter.bi_size == bytes)
845 flags |= BCH_READ_LAST_FRAGMENT;
847 if (bkey_extent_is_allocation(k.k))
848 bch2_add_page_sectors(&rbio->bio, k);
850 bch2_read_extent(c, rbio, k, offset_into_extent, flags);
852 if (flags & BCH_READ_LAST_FRAGMENT)
855 swap(rbio->bio.bi_iter.bi_size, bytes);
856 bio_advance(&rbio->bio, bytes);
862 bcache_io_error(c, &rbio->bio, "btree IO error %i", ret);
863 bio_endio(&rbio->bio);
866 int bch2_readpages(struct file *file, struct address_space *mapping,
867 struct list_head *pages, unsigned nr_pages)
869 struct bch_inode_info *inode = to_bch_ei(mapping->host);
870 struct bch_fs *c = inode->v.i_sb->s_fs_info;
871 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
872 struct btree_trans trans;
873 struct btree_iter *iter;
875 struct readpages_iter readpages_iter;
878 ret = readpages_iter_init(&readpages_iter, mapping, pages, nr_pages);
881 bch2_trans_init(&trans, c, 0, 0);
883 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
886 if (current->pagecache_lock != &mapping->add_lock)
887 pagecache_add_get(&mapping->add_lock);
889 while ((page = readpage_iter_next(&readpages_iter))) {
890 pgoff_t index = readpages_iter.offset + readpages_iter.idx;
891 unsigned n = min_t(unsigned,
892 readpages_iter.nr_pages -
895 struct bch_read_bio *rbio =
896 rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
899 readpages_iter.idx++;
901 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0);
902 rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTOR_SHIFT;
903 rbio->bio.bi_end_io = bch2_readpages_end_io;
904 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
906 bchfs_read(&trans, iter, rbio, inode->v.i_ino,
910 if (current->pagecache_lock != &mapping->add_lock)
911 pagecache_add_put(&mapping->add_lock);
913 bch2_trans_exit(&trans);
914 kfree(readpages_iter.pages);
919 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
920 u64 inum, struct page *page)
922 struct btree_trans trans;
923 struct btree_iter *iter;
925 bch2_page_state_create(page, __GFP_NOFAIL);
927 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
928 rbio->bio.bi_iter.bi_sector =
929 (sector_t) page->index << PAGE_SECTOR_SHIFT;
930 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
932 bch2_trans_init(&trans, c, 0, 0);
933 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
936 bchfs_read(&trans, iter, rbio, inum, NULL);
938 bch2_trans_exit(&trans);
941 int bch2_readpage(struct file *file, struct page *page)
943 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
944 struct bch_fs *c = inode->v.i_sb->s_fs_info;
945 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
946 struct bch_read_bio *rbio;
948 rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
949 rbio->bio.bi_end_io = bch2_readpages_end_io;
951 __bchfs_readpage(c, rbio, inode->v.i_ino, page);
955 static void bch2_read_single_page_end_io(struct bio *bio)
957 complete(bio->bi_private);
960 static int bch2_read_single_page(struct page *page,
961 struct address_space *mapping)
963 struct bch_inode_info *inode = to_bch_ei(mapping->host);
964 struct bch_fs *c = inode->v.i_sb->s_fs_info;
965 struct bch_read_bio *rbio;
967 DECLARE_COMPLETION_ONSTACK(done);
969 rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
970 io_opts(c, &inode->ei_inode));
971 rbio->bio.bi_private = &done;
972 rbio->bio.bi_end_io = bch2_read_single_page_end_io;
974 __bchfs_readpage(c, rbio, inode->v.i_ino, page);
975 wait_for_completion(&done);
977 ret = blk_status_to_errno(rbio->bio.bi_status);
983 SetPageUptodate(page);
989 struct bch_writepage_state {
990 struct bch_writepage_io *io;
991 struct bch_io_opts opts;
994 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
995 struct bch_inode_info *inode)
997 return (struct bch_writepage_state) {
998 .opts = io_opts(c, &inode->ei_inode)
1002 static void bch2_writepage_io_free(struct closure *cl)
1004 struct bch_writepage_io *io = container_of(cl,
1005 struct bch_writepage_io, cl);
1007 bio_put(&io->op.wbio.bio);
1010 static void bch2_writepage_io_done(struct closure *cl)
1012 struct bch_writepage_io *io = container_of(cl,
1013 struct bch_writepage_io, cl);
1014 struct bch_fs *c = io->op.c;
1015 struct bio *bio = &io->op.wbio.bio;
1016 struct bvec_iter_all iter;
1017 struct bio_vec *bvec;
1021 bio_for_each_segment_all(bvec, bio, iter) {
1022 struct bch_page_state *s;
1024 SetPageError(bvec->bv_page);
1025 mapping_set_error(bvec->bv_page->mapping, -EIO);
1027 s = __bch2_page_state(bvec->bv_page);
1028 spin_lock(&s->lock);
1029 for (i = 0; i < PAGE_SECTORS; i++)
1030 s->s[i].nr_replicas = 0;
1031 spin_unlock(&s->lock);
1036 * racing with fallocate can cause us to add fewer sectors than
1037 * expected - but we shouldn't add more sectors than expected:
1039 BUG_ON(io->op.i_sectors_delta > 0);
1042 * (error (due to going RO) halfway through a page can screw that up
1045 BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
1049 * PageWriteback is effectively our ref on the inode - fixup i_blocks
1050 * before calling end_page_writeback:
1052 i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
1054 bio_for_each_segment_all(bvec, bio, iter) {
1055 struct bch_page_state *s = __bch2_page_state(bvec->bv_page);
1057 if (atomic_dec_and_test(&s->write_count))
1058 end_page_writeback(bvec->bv_page);
1061 closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
1064 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1066 struct bch_writepage_io *io = w->io;
1069 closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
1070 continue_at(&io->cl, bch2_writepage_io_done, NULL);
1074 * Get a bch_writepage_io and add @page to it - appending to an existing one if
1075 * possible, else allocating a new one:
1077 static void bch2_writepage_io_alloc(struct bch_fs *c,
1078 struct bch_writepage_state *w,
1079 struct bch_inode_info *inode,
1081 unsigned nr_replicas)
1083 struct bch_write_op *op;
1085 w->io = container_of(bio_alloc_bioset(GFP_NOFS,
1087 &c->writepage_bioset),
1088 struct bch_writepage_io, op.wbio.bio);
1090 closure_init(&w->io->cl, NULL);
1091 w->io->inode = inode;
1094 bch2_write_op_init(op, c, w->opts);
1095 op->target = w->opts.foreground_target;
1096 op_journal_seq_set(op, &inode->ei_journal_seq);
1097 op->nr_replicas = nr_replicas;
1098 op->res.nr_replicas = nr_replicas;
1099 op->write_point = writepoint_hashed(inode->ei_last_dirtied);
1100 op->pos = POS(inode->v.i_ino, sector);
1101 op->wbio.bio.bi_iter.bi_sector = sector;
1104 static int __bch2_writepage(struct page *page,
1105 struct writeback_control *wbc,
1108 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1109 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1110 struct bch_writepage_state *w = data;
1111 struct bch_page_state *s, orig;
1112 unsigned i, offset, nr_replicas_this_write = U32_MAX;
1113 loff_t i_size = i_size_read(&inode->v);
1114 pgoff_t end_index = i_size >> PAGE_SHIFT;
1117 EBUG_ON(!PageUptodate(page));
1119 /* Is the page fully inside i_size? */
1120 if (page->index < end_index)
1123 /* Is the page fully outside i_size? (truncate in progress) */
1124 offset = i_size & (PAGE_SIZE - 1);
1125 if (page->index > end_index || !offset) {
1131 * The page straddles i_size. It must be zeroed out on each and every
1132 * writepage invocation because it may be mmapped. "A file is mapped
1133 * in multiples of the page size. For a file that is not a multiple of
1134 * the page size, the remaining memory is zeroed when mapped, and
1135 * writes to that region are not written out to the file."
1137 zero_user_segment(page, offset, PAGE_SIZE);
1139 s = bch2_page_state_create(page, __GFP_NOFAIL);
1141 ret = bch2_get_page_disk_reservation(c, inode, page, true);
1144 mapping_set_error(page->mapping, ret);
1149 /* Before unlocking the page, get copy of reservations: */
1152 for (i = 0; i < PAGE_SECTORS; i++) {
1153 if (s->s[i].state < SECTOR_DIRTY)
1156 nr_replicas_this_write =
1157 min_t(unsigned, nr_replicas_this_write,
1158 s->s[i].nr_replicas +
1159 s->s[i].replicas_reserved);
1162 for (i = 0; i < PAGE_SECTORS; i++) {
1163 if (s->s[i].state < SECTOR_DIRTY)
1166 s->s[i].nr_replicas = w->opts.compression
1167 ? 0 : nr_replicas_this_write;
1169 s->s[i].replicas_reserved = 0;
1170 s->s[i].state = SECTOR_ALLOCATED;
1173 BUG_ON(atomic_read(&s->write_count));
1174 atomic_set(&s->write_count, 1);
1176 BUG_ON(PageWriteback(page));
1177 set_page_writeback(page);
1183 unsigned sectors = 1, dirty_sectors = 0, reserved_sectors = 0;
1186 while (offset < PAGE_SECTORS &&
1187 orig.s[offset].state < SECTOR_DIRTY)
1190 if (offset == PAGE_SECTORS)
1193 sector = ((u64) page->index << PAGE_SECTOR_SHIFT) + offset;
1195 while (offset + sectors < PAGE_SECTORS &&
1196 orig.s[offset + sectors].state >= SECTOR_DIRTY)
1199 for (i = offset; i < offset + sectors; i++) {
1200 reserved_sectors += orig.s[i].replicas_reserved;
1201 dirty_sectors += orig.s[i].state == SECTOR_DIRTY;
1205 (w->io->op.res.nr_replicas != nr_replicas_this_write ||
1206 bio_full(&w->io->op.wbio.bio) ||
1207 bio_end_sector(&w->io->op.wbio.bio) != sector))
1208 bch2_writepage_do_io(w);
1211 bch2_writepage_io_alloc(c, w, inode, sector,
1212 nr_replicas_this_write);
1214 atomic_inc(&s->write_count);
1216 BUG_ON(inode != w->io->inode);
1217 BUG_ON(!bio_add_page(&w->io->op.wbio.bio, page,
1218 sectors << 9, offset << 9));
1220 /* Check for writing past i_size: */
1221 WARN_ON((bio_end_sector(&w->io->op.wbio.bio) << 9) >
1222 round_up(i_size, block_bytes(c)));
1224 w->io->op.res.sectors += reserved_sectors;
1225 w->io->op.i_sectors_delta -= dirty_sectors;
1226 w->io->op.new_i_size = i_size;
1228 if (wbc->sync_mode == WB_SYNC_ALL)
1229 w->io->op.wbio.bio.bi_opf |= REQ_SYNC;
1234 if (atomic_dec_and_test(&s->write_count))
1235 end_page_writeback(page);
1240 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1242 struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1243 struct bch_writepage_state w =
1244 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1245 struct blk_plug plug;
1248 blk_start_plug(&plug);
1249 ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1251 bch2_writepage_do_io(&w);
1252 blk_finish_plug(&plug);
1256 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1258 struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1259 struct bch_writepage_state w =
1260 bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
1263 ret = __bch2_writepage(page, wbc, &w);
1265 bch2_writepage_do_io(&w);
1270 /* buffered writes: */
1272 int bch2_write_begin(struct file *file, struct address_space *mapping,
1273 loff_t pos, unsigned len, unsigned flags,
1274 struct page **pagep, void **fsdata)
1276 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1277 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1278 struct bch2_page_reservation *res;
1279 pgoff_t index = pos >> PAGE_SHIFT;
1280 unsigned offset = pos & (PAGE_SIZE - 1);
1284 res = kmalloc(sizeof(*res), GFP_KERNEL);
1288 bch2_page_reservation_init(c, inode, res);
1291 /* Not strictly necessary - same reason as mkwrite(): */
1292 pagecache_add_get(&mapping->add_lock);
1294 page = grab_cache_page_write_begin(mapping, index, flags);
1298 if (PageUptodate(page))
1301 /* If we're writing entire page, don't need to read it in first: */
1302 if (len == PAGE_SIZE)
1305 if (!offset && pos + len >= inode->v.i_size) {
1306 zero_user_segment(page, len, PAGE_SIZE);
1307 flush_dcache_page(page);
1311 if (index > inode->v.i_size >> PAGE_SHIFT) {
1312 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1313 flush_dcache_page(page);
1317 ret = bch2_read_single_page(page, mapping);
1321 ret = bch2_page_reservation_get(c, inode, page, res,
1324 if (!PageUptodate(page)) {
1326 * If the page hasn't been read in, we won't know if we
1327 * actually need a reservation - we don't actually need
1328 * to read here, we just need to check if the page is
1329 * fully backed by uncompressed data:
1344 pagecache_add_put(&mapping->add_lock);
1350 int bch2_write_end(struct file *file, struct address_space *mapping,
1351 loff_t pos, unsigned len, unsigned copied,
1352 struct page *page, void *fsdata)
1354 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1355 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1356 struct bch2_page_reservation *res = fsdata;
1357 unsigned offset = pos & (PAGE_SIZE - 1);
1359 lockdep_assert_held(&inode->v.i_rwsem);
1361 if (unlikely(copied < len && !PageUptodate(page))) {
1363 * The page needs to be read in, but that would destroy
1364 * our partial write - simplest thing is to just force
1365 * userspace to redo the write:
1367 zero_user(page, 0, PAGE_SIZE);
1368 flush_dcache_page(page);
1372 spin_lock(&inode->v.i_lock);
1373 if (pos + copied > inode->v.i_size)
1374 i_size_write(&inode->v, pos + copied);
1375 spin_unlock(&inode->v.i_lock);
1378 if (!PageUptodate(page))
1379 SetPageUptodate(page);
1381 bch2_set_page_dirty(c, inode, page, res, offset, copied);
1383 inode->ei_last_dirtied = (unsigned long) current;
1388 pagecache_add_put(&mapping->add_lock);
1390 bch2_page_reservation_put(c, inode, res);
1396 #define WRITE_BATCH_PAGES 32
1398 static int __bch2_buffered_write(struct bch_inode_info *inode,
1399 struct address_space *mapping,
1400 struct iov_iter *iter,
1401 loff_t pos, unsigned len)
1403 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1404 struct page *pages[WRITE_BATCH_PAGES];
1405 struct bch2_page_reservation res;
1406 unsigned long index = pos >> PAGE_SHIFT;
1407 unsigned offset = pos & (PAGE_SIZE - 1);
1408 unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1409 unsigned i, reserved = 0, set_dirty = 0;
1410 unsigned copied = 0, nr_pages_copied = 0;
1414 BUG_ON(nr_pages > ARRAY_SIZE(pages));
1416 bch2_page_reservation_init(c, inode, &res);
1418 for (i = 0; i < nr_pages; i++) {
1419 pages[i] = grab_cache_page_write_begin(mapping, index + i, 0);
1426 len = min_t(unsigned, len,
1427 nr_pages * PAGE_SIZE - offset);
1432 if (offset && !PageUptodate(pages[0])) {
1433 ret = bch2_read_single_page(pages[0], mapping);
1438 if ((pos + len) & (PAGE_SIZE - 1) &&
1439 !PageUptodate(pages[nr_pages - 1])) {
1440 if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
1441 zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
1443 ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
1449 while (reserved < len) {
1450 struct page *page = pages[(offset + reserved) >> PAGE_SHIFT];
1451 unsigned pg_offset = (offset + reserved) & (PAGE_SIZE - 1);
1452 unsigned pg_len = min_t(unsigned, len - reserved,
1453 PAGE_SIZE - pg_offset);
1455 ret = bch2_page_reservation_get(c, inode, page, &res,
1456 pg_offset, pg_len, true);
1458 if (ret && !PageUptodate(page)) {
1459 ret = bch2_read_single_page(page, mapping);
1461 goto retry_reservation;
1470 if (mapping_writably_mapped(mapping))
1471 for (i = 0; i < nr_pages; i++)
1472 flush_dcache_page(pages[i]);
1474 while (copied < len) {
1475 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1476 unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
1477 unsigned pg_len = min_t(unsigned, len - copied,
1478 PAGE_SIZE - pg_offset);
1479 unsigned pg_copied = iov_iter_copy_from_user_atomic(page,
1480 iter, pg_offset, pg_len);
1485 flush_dcache_page(page);
1486 iov_iter_advance(iter, pg_copied);
1487 copied += pg_copied;
1494 ((offset + copied) & (PAGE_SIZE - 1))) {
1495 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1497 if (!PageUptodate(page)) {
1498 zero_user(page, 0, PAGE_SIZE);
1499 copied -= (offset + copied) & (PAGE_SIZE - 1);
1503 spin_lock(&inode->v.i_lock);
1504 if (pos + copied > inode->v.i_size)
1505 i_size_write(&inode->v, pos + copied);
1506 spin_unlock(&inode->v.i_lock);
1508 while (set_dirty < copied) {
1509 struct page *page = pages[(offset + set_dirty) >> PAGE_SHIFT];
1510 unsigned pg_offset = (offset + set_dirty) & (PAGE_SIZE - 1);
1511 unsigned pg_len = min_t(unsigned, copied - set_dirty,
1512 PAGE_SIZE - pg_offset);
1514 if (!PageUptodate(page))
1515 SetPageUptodate(page);
1517 bch2_set_page_dirty(c, inode, page, &res, pg_offset, pg_len);
1521 set_dirty += pg_len;
1524 nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
1525 inode->ei_last_dirtied = (unsigned long) current;
1527 for (i = nr_pages_copied; i < nr_pages; i++) {
1528 unlock_page(pages[i]);
1532 bch2_page_reservation_put(c, inode, &res);
1534 return copied ?: ret;
1537 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1539 struct file *file = iocb->ki_filp;
1540 struct address_space *mapping = file->f_mapping;
1541 struct bch_inode_info *inode = file_bch_inode(file);
1542 loff_t pos = iocb->ki_pos;
1543 ssize_t written = 0;
1546 pagecache_add_get(&mapping->add_lock);
1549 unsigned offset = pos & (PAGE_SIZE - 1);
1550 unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
1551 PAGE_SIZE * WRITE_BATCH_PAGES - offset);
1554 * Bring in the user page that we will copy from _first_.
1555 * Otherwise there's a nasty deadlock on copying from the
1556 * same page as we're writing to, without it being marked
1559 * Not only is this an optimisation, but it is also required
1560 * to check that the address is actually valid, when atomic
1561 * usercopies are used, below.
1563 if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1564 bytes = min_t(unsigned long, iov_iter_count(iter),
1565 PAGE_SIZE - offset);
1567 if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1573 if (unlikely(fatal_signal_pending(current))) {
1578 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1579 if (unlikely(ret < 0))
1584 if (unlikely(ret == 0)) {
1586 * If we were unable to copy any data at all, we must
1587 * fall back to a single segment length write.
1589 * If we didn't fallback here, we could livelock
1590 * because not all segments in the iov can be copied at
1591 * once without a pagefault.
1593 bytes = min_t(unsigned long, PAGE_SIZE - offset,
1594 iov_iter_single_seg_count(iter));
1600 balance_dirty_pages_ratelimited(mapping);
1601 } while (iov_iter_count(iter));
1603 pagecache_add_put(&mapping->add_lock);
1605 return written ? written : ret;
1608 /* O_DIRECT reads */
1610 static void bch2_dio_read_complete(struct closure *cl)
1612 struct dio_read *dio = container_of(cl, struct dio_read, cl);
1614 dio->req->ki_complete(dio->req, dio->ret, 0);
1615 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1618 static void bch2_direct_IO_read_endio(struct bio *bio)
1620 struct dio_read *dio = bio->bi_private;
1623 dio->ret = blk_status_to_errno(bio->bi_status);
1625 closure_put(&dio->cl);
1628 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1630 bch2_direct_IO_read_endio(bio);
1631 bio_check_pages_dirty(bio); /* transfers ownership */
1634 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1636 struct file *file = req->ki_filp;
1637 struct bch_inode_info *inode = file_bch_inode(file);
1638 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1639 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1640 struct dio_read *dio;
1642 loff_t offset = req->ki_pos;
1643 bool sync = is_sync_kiocb(req);
1647 if ((offset|iter->count) & (block_bytes(c) - 1))
1650 ret = min_t(loff_t, iter->count,
1651 max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1656 shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
1657 iter->count -= shorten;
1659 bio = bio_alloc_bioset(GFP_KERNEL,
1660 iov_iter_npages(iter, BIO_MAX_PAGES),
1661 &c->dio_read_bioset);
1663 bio->bi_end_io = bch2_direct_IO_read_endio;
1665 dio = container_of(bio, struct dio_read, rbio.bio);
1666 closure_init(&dio->cl, NULL);
1669 * this is a _really_ horrible hack just to avoid an atomic sub at the
1673 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1674 atomic_set(&dio->cl.remaining,
1675 CLOSURE_REMAINING_INITIALIZER -
1677 CLOSURE_DESTRUCTOR);
1679 atomic_set(&dio->cl.remaining,
1680 CLOSURE_REMAINING_INITIALIZER + 1);
1687 while (iter->count) {
1688 bio = bio_alloc_bioset(GFP_KERNEL,
1689 iov_iter_npages(iter, BIO_MAX_PAGES),
1691 bio->bi_end_io = bch2_direct_IO_read_split_endio;
1693 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1694 bio->bi_iter.bi_sector = offset >> 9;
1695 bio->bi_private = dio;
1697 ret = bio_iov_iter_get_pages(bio, iter);
1699 /* XXX: fault inject this path */
1700 bio->bi_status = BLK_STS_RESOURCE;
1705 offset += bio->bi_iter.bi_size;
1706 bio_set_pages_dirty(bio);
1709 closure_get(&dio->cl);
1711 bch2_read(c, rbio_init(bio, opts), inode->v.i_ino);
1714 iter->count += shorten;
1717 closure_sync(&dio->cl);
1718 closure_debug_destroy(&dio->cl);
1720 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1723 return -EIOCBQUEUED;
1727 /* O_DIRECT writes */
1729 static void bch2_dio_write_loop_async(struct closure *);
1731 static long bch2_dio_write_loop(struct dio_write *dio)
1733 bool kthread = (current->flags & PF_KTHREAD) != 0;
1734 struct bch_fs *c = dio->op.c;
1735 struct kiocb *req = dio->req;
1736 struct address_space *mapping = req->ki_filp->f_mapping;
1737 struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
1738 struct bio *bio = &dio->op.wbio.bio;
1739 struct bvec_iter_all iter;
1750 inode_dio_begin(&inode->v);
1751 __pagecache_block_get(&mapping->add_lock);
1753 /* Write and invalidate pagecache range that we're writing to: */
1754 offset = req->ki_pos + (dio->op.written << 9);
1755 ret = write_invalidate_inode_pages_range(mapping,
1757 offset + iov_iter_count(&dio->iter) - 1);
1762 offset = req->ki_pos + (dio->op.written << 9);
1764 BUG_ON(current->pagecache_lock);
1765 current->pagecache_lock = &mapping->add_lock;
1769 ret = bio_iov_iter_get_pages(bio, &dio->iter);
1773 current->pagecache_lock = NULL;
1775 if (unlikely(ret < 0))
1778 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
1779 bio->bi_iter.bi_size -= unaligned;
1780 iov_iter_revert(&dio->iter, unaligned);
1782 if (!bio->bi_iter.bi_size) {
1784 * bio_iov_iter_get_pages was only able to get <
1785 * blocksize worth of pages:
1787 bio_for_each_segment_all(bv, bio, iter)
1788 put_page(bv->bv_page);
1793 /* gup might have faulted pages back in: */
1794 ret = write_invalidate_inode_pages_range(mapping,
1796 offset + bio->bi_iter.bi_size - 1);
1800 dio->op.pos = POS(inode->v.i_ino, offset >> 9);
1802 task_io_account_write(bio->bi_iter.bi_size);
1804 closure_call(&dio->op.cl, bch2_write, NULL, &dio->cl);
1806 if (!dio->sync && !dio->loop && dio->iter.count) {
1807 struct iovec *iov = dio->inline_vecs;
1809 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
1810 iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
1812 if (unlikely(!iov)) {
1813 dio->op.error = -ENOMEM;
1817 dio->free_iov = true;
1820 memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
1821 dio->iter.iov = iov;
1827 continue_at(&dio->cl, bch2_dio_write_loop_async, NULL);
1828 return -EIOCBQUEUED;
1831 closure_sync(&dio->cl);
1833 i_sectors_acct(c, inode, &dio->quota_res,
1834 dio->op.i_sectors_delta);
1835 dio->op.i_sectors_delta = 0;
1837 new_i_size = req->ki_pos + ((u64) dio->op.written << 9);
1839 spin_lock(&inode->v.i_lock);
1840 if (new_i_size > inode->v.i_size)
1841 i_size_write(&inode->v, new_i_size);
1842 spin_unlock(&inode->v.i_lock);
1844 bio_for_each_segment_all(bv, bio, iter)
1845 put_page(bv->bv_page);
1846 if (!dio->iter.count || dio->op.error)
1851 ret = dio->op.error ?: ((long) dio->op.written << 9);
1853 __pagecache_block_put(&mapping->add_lock);
1854 bch2_disk_reservation_put(c, &dio->op.res);
1855 bch2_quota_reservation_put(c, inode, &dio->quota_res);
1858 kfree(dio->iter.iov);
1860 closure_debug_destroy(&dio->cl);
1865 /* inode->i_dio_count is our ref on inode and thus bch_fs */
1866 inode_dio_end(&inode->v);
1869 req->ki_complete(req, ret, 0);
1875 static void bch2_dio_write_loop_async(struct closure *cl)
1877 struct dio_write *dio = container_of(cl, struct dio_write, cl);
1879 bch2_dio_write_loop(dio);
1882 static int bch2_direct_IO_write(struct kiocb *req,
1883 struct iov_iter *iter,
1886 struct file *file = req->ki_filp;
1887 struct bch_inode_info *inode = file_bch_inode(file);
1888 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1889 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1890 struct dio_write *dio;
1894 lockdep_assert_held(&inode->v.i_rwsem);
1896 if (unlikely(!iter->count))
1899 if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
1902 bio = bio_alloc_bioset(GFP_KERNEL,
1903 iov_iter_npages(iter, BIO_MAX_PAGES),
1904 &c->dio_write_bioset);
1905 dio = container_of(bio, struct dio_write, op.wbio.bio);
1906 closure_init(&dio->cl, NULL);
1908 dio->mm = current->mm;
1910 dio->sync = is_sync_kiocb(req) ||
1911 req->ki_pos + iter->count > inode->v.i_size;
1912 dio->free_iov = false;
1913 dio->quota_res.sectors = 0;
1916 bch2_write_op_init(&dio->op, c, opts);
1917 dio->op.target = opts.foreground_target;
1918 op_journal_seq_set(&dio->op, &inode->ei_journal_seq);
1919 dio->op.write_point = writepoint_hashed((unsigned long) current);
1920 dio->op.flags |= BCH_WRITE_NOPUT_RESERVATION;
1922 if ((req->ki_flags & IOCB_DSYNC) &&
1923 !c->opts.journal_flush_disabled)
1924 dio->op.flags |= BCH_WRITE_FLUSH;
1926 ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
1927 iter->count >> 9, true);
1931 dio->op.nr_replicas = dio->op.opts.data_replicas;
1933 ret = bch2_disk_reservation_get(c, &dio->op.res, iter->count >> 9,
1934 dio->op.opts.data_replicas, 0);
1935 if (unlikely(ret) &&
1936 !bch2_check_range_allocated(c, POS(inode->v.i_ino,
1939 dio->op.opts.data_replicas))
1942 return bch2_dio_write_loop(dio);
1944 bch2_disk_reservation_put(c, &dio->op.res);
1945 bch2_quota_reservation_put(c, inode, &dio->quota_res);
1946 closure_debug_destroy(&dio->cl);
1951 ssize_t bch2_direct_IO(struct kiocb *req, struct iov_iter *iter)
1953 struct blk_plug plug;
1956 blk_start_plug(&plug);
1957 ret = iov_iter_rw(iter) == WRITE
1958 ? bch2_direct_IO_write(req, iter, false)
1959 : bch2_direct_IO_read(req, iter);
1960 blk_finish_plug(&plug);
1966 bch2_direct_write(struct kiocb *iocb, struct iov_iter *iter)
1968 return bch2_direct_IO_write(iocb, iter, true);
1971 static ssize_t __bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1973 struct file *file = iocb->ki_filp;
1974 struct bch_inode_info *inode = file_bch_inode(file);
1977 /* We can write back this queue in page reclaim */
1978 current->backing_dev_info = inode_to_bdi(&inode->v);
1979 ret = file_remove_privs(file);
1983 ret = file_update_time(file);
1987 ret = iocb->ki_flags & IOCB_DIRECT
1988 ? bch2_direct_write(iocb, from)
1989 : bch2_buffered_write(iocb, from);
1991 if (likely(ret > 0))
1992 iocb->ki_pos += ret;
1994 current->backing_dev_info = NULL;
1998 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2000 struct bch_inode_info *inode = file_bch_inode(iocb->ki_filp);
2001 bool direct = iocb->ki_flags & IOCB_DIRECT;
2004 inode_lock(&inode->v);
2005 ret = generic_write_checks(iocb, from);
2007 ret = __bch2_write_iter(iocb, from);
2008 inode_unlock(&inode->v);
2010 if (ret > 0 && !direct)
2011 ret = generic_write_sync(iocb, ret);
2018 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2020 struct bch_inode_info *inode = file_bch_inode(file);
2021 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2024 ret = file_write_and_wait_range(file, start, end);
2028 if (datasync && !(inode->v.i_state & I_DIRTY_DATASYNC))
2031 ret = sync_inode_metadata(&inode->v, 1);
2035 if (!c->opts.journal_flush_disabled)
2036 ret = bch2_journal_flush_seq(&c->journal,
2037 inode->ei_journal_seq);
2038 ret2 = file_check_and_advance_wb_err(file);
2045 static inline int range_has_data(struct bch_fs *c,
2049 struct btree_trans trans;
2050 struct btree_iter *iter;
2054 bch2_trans_init(&trans, c, 0, 0);
2056 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k, ret) {
2057 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2060 if (bkey_extent_is_data(k.k)) {
2066 return bch2_trans_exit(&trans) ?: ret;
2069 static int __bch2_truncate_page(struct bch_inode_info *inode,
2070 pgoff_t index, loff_t start, loff_t end)
2072 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2073 struct address_space *mapping = inode->v.i_mapping;
2074 struct bch_page_state *s;
2075 unsigned start_offset = start & (PAGE_SIZE - 1);
2076 unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2081 /* Page boundary? Nothing to do */
2082 if (!((index == start >> PAGE_SHIFT && start_offset) ||
2083 (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
2087 if (index << PAGE_SHIFT >= inode->v.i_size)
2090 page = find_lock_page(mapping, index);
2093 * XXX: we're doing two index lookups when we end up reading the
2096 ret = range_has_data(c,
2097 POS(inode->v.i_ino, index << PAGE_SECTOR_SHIFT),
2098 POS(inode->v.i_ino, (index + 1) << PAGE_SECTOR_SHIFT));
2102 page = find_or_create_page(mapping, index, GFP_KERNEL);
2103 if (unlikely(!page)) {
2109 s = bch2_page_state_create(page, 0);
2115 if (!PageUptodate(page)) {
2116 ret = bch2_read_single_page(page, mapping);
2121 if (index != start >> PAGE_SHIFT)
2123 if (index != end >> PAGE_SHIFT)
2124 end_offset = PAGE_SIZE;
2126 for (i = round_up(start_offset, block_bytes(c)) >> 9;
2127 i < round_down(end_offset, block_bytes(c)) >> 9;
2129 s->s[i].nr_replicas = 0;
2130 s->s[i].state = SECTOR_UNALLOCATED;
2133 zero_user_segment(page, start_offset, end_offset);
2136 * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2138 * XXX: because we aren't currently tracking whether the page has actual
2139 * data in it (vs. just 0s, or only partially written) this wrong. ick.
2141 ret = bch2_get_page_disk_reservation(c, inode, page, false);
2144 __set_page_dirty_nobuffers(page);
2152 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
2154 return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
2155 from, round_up(from, PAGE_SIZE));
2158 static int bch2_extend(struct bch_inode_info *inode,
2159 struct bch_inode_unpacked *inode_u,
2160 struct iattr *iattr)
2162 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2163 struct address_space *mapping = inode->v.i_mapping;
2169 * this has to be done _before_ extending i_size:
2171 ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
2175 truncate_setsize(&inode->v, iattr->ia_size);
2176 setattr_copy(&inode->v, iattr);
2178 mutex_lock(&inode->ei_update_lock);
2179 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
2180 ATTR_MTIME|ATTR_CTIME);
2181 mutex_unlock(&inode->ei_update_lock);
2186 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2187 struct bch_inode_unpacked *bi,
2190 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2192 bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2193 bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
2197 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2198 struct bch_inode_unpacked *bi, void *p)
2200 u64 *new_i_size = p;
2202 bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2203 bi->bi_size = *new_i_size;
2207 int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
2209 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2210 struct address_space *mapping = inode->v.i_mapping;
2211 struct bch_inode_unpacked inode_u;
2212 struct btree_trans trans;
2213 struct btree_iter *iter;
2214 u64 new_i_size = iattr->ia_size;
2215 s64 i_sectors_delta = 0;
2218 inode_dio_wait(&inode->v);
2219 pagecache_block_get(&mapping->add_lock);
2222 * fetch current on disk i_size: inode is locked, i_size can only
2223 * increase underneath us:
2225 bch2_trans_init(&trans, c, 0, 0);
2226 iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino, 0);
2227 ret = PTR_ERR_OR_ZERO(iter);
2228 bch2_trans_exit(&trans);
2233 BUG_ON(inode->v.i_size < inode_u.bi_size);
2235 if (iattr->ia_size > inode->v.i_size) {
2236 ret = bch2_extend(inode, &inode_u, iattr);
2240 ret = bch2_truncate_page(inode, iattr->ia_size);
2245 * When extending, we're going to write the new i_size to disk
2246 * immediately so we need to flush anything above the current on disk
2249 * Also, when extending we need to flush the page that i_size currently
2250 * straddles - if it's mapped to userspace, we need to ensure that
2251 * userspace has to redirty it and call .mkwrite -> set_page_dirty
2252 * again to allocate the part of the page that was extended.
2254 if (iattr->ia_size > inode_u.bi_size)
2255 ret = filemap_write_and_wait_range(mapping,
2257 iattr->ia_size - 1);
2258 else if (iattr->ia_size & (PAGE_SIZE - 1))
2259 ret = filemap_write_and_wait_range(mapping,
2260 round_down(iattr->ia_size, PAGE_SIZE),
2261 iattr->ia_size - 1);
2265 mutex_lock(&inode->ei_update_lock);
2266 ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
2268 mutex_unlock(&inode->ei_update_lock);
2273 truncate_setsize(&inode->v, iattr->ia_size);
2275 ret = bch2_fpunch(c, inode->v.i_ino,
2276 round_up(iattr->ia_size, block_bytes(c)) >> 9,
2277 U64_MAX, &inode->ei_journal_seq, &i_sectors_delta);
2278 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2283 setattr_copy(&inode->v, iattr);
2285 mutex_lock(&inode->ei_update_lock);
2286 ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL,
2287 ATTR_MTIME|ATTR_CTIME);
2288 mutex_unlock(&inode->ei_update_lock);
2290 pagecache_block_put(&mapping->add_lock);
2296 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2298 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2299 struct address_space *mapping = inode->v.i_mapping;
2300 u64 discard_start = round_up(offset, block_bytes(c)) >> 9;
2301 u64 discard_end = round_down(offset + len, block_bytes(c)) >> 9;
2304 inode_lock(&inode->v);
2305 inode_dio_wait(&inode->v);
2306 pagecache_block_get(&mapping->add_lock);
2308 ret = __bch2_truncate_page(inode,
2309 offset >> PAGE_SHIFT,
2310 offset, offset + len);
2314 if (offset >> PAGE_SHIFT !=
2315 (offset + len) >> PAGE_SHIFT) {
2316 ret = __bch2_truncate_page(inode,
2317 (offset + len) >> PAGE_SHIFT,
2318 offset, offset + len);
2323 truncate_pagecache_range(&inode->v, offset, offset + len - 1);
2325 if (discard_start < discard_end) {
2326 s64 i_sectors_delta = 0;
2328 ret = bch2_fpunch(c, inode->v.i_ino,
2329 discard_start, discard_end,
2330 &inode->ei_journal_seq,
2332 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2335 pagecache_block_put(&mapping->add_lock);
2336 inode_unlock(&inode->v);
2341 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
2342 loff_t offset, loff_t len,
2345 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2346 struct address_space *mapping = inode->v.i_mapping;
2347 struct btree_trans trans;
2348 struct btree_iter *src, *dst, *del = NULL;
2349 loff_t shift, new_size;
2353 if ((offset | len) & (block_bytes(c) - 1))
2356 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
2359 * We need i_mutex to keep the page cache consistent with the extents
2360 * btree, and the btree consistent with i_size - we don't need outside
2361 * locking for the extents btree itself, because we're using linked
2364 inode_lock(&inode->v);
2365 inode_dio_wait(&inode->v);
2366 pagecache_block_get(&mapping->add_lock);
2370 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
2374 if (offset >= inode->v.i_size)
2377 src_start = U64_MAX;
2381 if (offset + len >= inode->v.i_size)
2384 src_start = offset + len;
2388 new_size = inode->v.i_size + shift;
2390 ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2395 i_size_write(&inode->v, new_size);
2396 mutex_lock(&inode->ei_update_lock);
2397 ret = bch2_write_inode_size(c, inode, new_size,
2398 ATTR_MTIME|ATTR_CTIME);
2399 mutex_unlock(&inode->ei_update_lock);
2401 s64 i_sectors_delta = 0;
2403 ret = bch2_fpunch(c, inode->v.i_ino,
2404 offset >> 9, (offset + len) >> 9,
2405 &inode->ei_journal_seq,
2407 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2413 src = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2414 POS(inode->v.i_ino, src_start >> 9),
2416 BUG_ON(IS_ERR_OR_NULL(src));
2418 dst = bch2_trans_copy_iter(&trans, src);
2419 BUG_ON(IS_ERR_OR_NULL(dst));
2422 struct disk_reservation disk_res =
2423 bch2_disk_reservation_init(c, 0);
2424 BKEY_PADDED(k) copy;
2425 struct bkey_i delete;
2427 struct bpos next_pos;
2428 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
2429 struct bpos atomic_end;
2430 unsigned commit_flags = BTREE_INSERT_NOFAIL|
2431 BTREE_INSERT_ATOMIC|
2432 BTREE_INSERT_USE_RESERVE;
2435 ? bch2_btree_iter_peek_prev(src)
2436 : bch2_btree_iter_peek(src);
2437 if ((ret = bkey_err(k)))
2440 if (!k.k || k.k->p.inode != inode->v.i_ino)
2443 BUG_ON(bkey_cmp(src->pos, bkey_start_pos(k.k)));
2446 bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
2449 bkey_reassemble(©.k, k);
2452 bkey_cmp(bkey_start_pos(k.k), move_pos) < 0) {
2453 bch2_cut_front(move_pos, ©.k);
2454 bch2_btree_iter_set_pos(src, bkey_start_pos(©.k.k));
2457 copy.k.k.p.offset += shift >> 9;
2458 bch2_btree_iter_set_pos(dst, bkey_start_pos(©.k.k));
2460 ret = bch2_extent_atomic_end(dst, ©.k, &atomic_end);
2464 if (bkey_cmp(atomic_end, copy.k.k.p)) {
2466 move_pos = atomic_end;
2467 move_pos.offset -= shift >> 9;
2470 bch2_cut_back(atomic_end, ©.k.k);
2474 bkey_init(&delete.k);
2475 delete.k.p = src->pos;
2476 bch2_key_resize(&delete.k, copy.k.k.size);
2478 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
2481 * If the new and old keys overlap (because we're moving an
2482 * extent that's bigger than the amount we're collapsing by),
2483 * we need to trim the delete key here so they don't overlap
2484 * because overlaps on insertions aren't handled before
2485 * triggers are run, so the overwrite will get double counted
2486 * by the triggers machinery:
2489 bkey_cmp(bkey_start_pos(©.k.k), delete.k.p) < 0) {
2490 bch2_cut_back(bkey_start_pos(©.k.k), &delete.k);
2491 } else if (!insert &&
2492 bkey_cmp(copy.k.k.p,
2493 bkey_start_pos(&delete.k)) > 0) {
2494 bch2_cut_front(copy.k.k.p, &delete);
2496 del = bch2_trans_copy_iter(&trans, src);
2497 BUG_ON(IS_ERR_OR_NULL(del));
2499 bch2_btree_iter_set_pos(del,
2500 bkey_start_pos(&delete.k));
2503 bch2_trans_update(&trans, dst, ©.k);
2504 bch2_trans_update(&trans, del ?: src, &delete);
2506 if (copy.k.k.size == k.k->size) {
2508 * If we're moving the entire extent, we can skip
2511 commit_flags |= BTREE_INSERT_NOMARK;
2513 /* We might end up splitting compressed extents: */
2515 bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(©.k));
2517 ret = bch2_disk_reservation_get(c, &disk_res,
2518 copy.k.k.size, nr_ptrs,
2519 BCH_DISK_RESERVATION_NOFAIL);
2523 ret = bch2_trans_commit(&trans, &disk_res,
2524 &inode->ei_journal_seq,
2526 bch2_disk_reservation_put(c, &disk_res);
2529 bch2_trans_iter_put(&trans, del);
2533 bch2_btree_iter_set_pos(src, next_pos);
2540 bch2_trans_cond_resched(&trans);
2542 bch2_trans_unlock(&trans);
2545 i_size_write(&inode->v, new_size);
2546 mutex_lock(&inode->ei_update_lock);
2547 ret = bch2_write_inode_size(c, inode, new_size,
2548 ATTR_MTIME|ATTR_CTIME);
2549 mutex_unlock(&inode->ei_update_lock);
2552 bch2_trans_exit(&trans);
2553 pagecache_block_put(&mapping->add_lock);
2554 inode_unlock(&inode->v);
2558 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
2559 loff_t offset, loff_t len)
2561 struct address_space *mapping = inode->v.i_mapping;
2562 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2563 struct btree_trans trans;
2564 struct btree_iter *iter;
2565 struct bpos end_pos;
2566 loff_t end = offset + len;
2567 loff_t block_start = round_down(offset, block_bytes(c));
2568 loff_t block_end = round_up(end, block_bytes(c));
2570 unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
2573 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
2575 inode_lock(&inode->v);
2576 inode_dio_wait(&inode->v);
2577 pagecache_block_get(&mapping->add_lock);
2579 if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
2580 ret = inode_newsize_ok(&inode->v, end);
2585 if (mode & FALLOC_FL_ZERO_RANGE) {
2586 ret = __bch2_truncate_page(inode,
2587 offset >> PAGE_SHIFT,
2591 offset >> PAGE_SHIFT != end >> PAGE_SHIFT)
2592 ret = __bch2_truncate_page(inode,
2599 truncate_pagecache_range(&inode->v, offset, end - 1);
2602 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2603 POS(inode->v.i_ino, block_start >> 9),
2604 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2605 end_pos = POS(inode->v.i_ino, block_end >> 9);
2607 while (bkey_cmp(iter->pos, end_pos) < 0) {
2608 s64 i_sectors_delta = 0;
2609 struct disk_reservation disk_res = { 0 };
2610 struct quota_res quota_res = { 0 };
2611 struct bkey_i_reservation reservation;
2614 k = bch2_btree_iter_peek_slot(iter);
2615 if ((ret = bkey_err(k)))
2618 /* already reserved */
2619 if (k.k->type == KEY_TYPE_reservation &&
2620 bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2621 bch2_btree_iter_next_slot(iter);
2625 if (bkey_extent_is_data(k.k) &&
2626 !(mode & FALLOC_FL_ZERO_RANGE)) {
2627 bch2_btree_iter_next_slot(iter);
2631 bkey_reservation_init(&reservation.k_i);
2632 reservation.k.type = KEY_TYPE_reservation;
2633 reservation.k.p = k.k->p;
2634 reservation.k.size = k.k->size;
2636 bch2_cut_front(iter->pos, &reservation.k_i);
2637 bch2_cut_back(end_pos, &reservation.k);
2639 sectors = reservation.k.size;
2640 reservation.v.nr_replicas = bch2_bkey_nr_dirty_ptrs(k);
2642 if (!bkey_extent_is_allocation(k.k)) {
2643 ret = bch2_quota_reservation_add(c, inode,
2650 if (reservation.v.nr_replicas < replicas ||
2651 bch2_extent_is_compressed(k)) {
2652 ret = bch2_disk_reservation_get(c, &disk_res, sectors,
2657 reservation.v.nr_replicas = disk_res.nr_replicas;
2660 bch2_trans_begin_updates(&trans);
2662 ret = bch2_extent_update(&trans, iter, &reservation.k_i,
2663 &disk_res, &inode->ei_journal_seq,
2664 0, &i_sectors_delta);
2665 i_sectors_acct(c, inode, "a_res, i_sectors_delta);
2667 bch2_quota_reservation_put(c, inode, "a_res);
2668 bch2_disk_reservation_put(c, &disk_res);
2676 * Do we need to extend the file?
2678 * If we zeroed up to the end of the file, we dropped whatever writes
2679 * were going to write out the current i_size, so we have to extend
2680 * manually even if FL_KEEP_SIZE was set:
2682 if (end >= inode->v.i_size &&
2683 (!(mode & FALLOC_FL_KEEP_SIZE) ||
2684 (mode & FALLOC_FL_ZERO_RANGE))) {
2685 struct btree_iter *inode_iter;
2686 struct bch_inode_unpacked inode_u;
2689 bch2_trans_begin(&trans);
2690 inode_iter = bch2_inode_peek(&trans, &inode_u,
2692 ret = PTR_ERR_OR_ZERO(inode_iter);
2693 } while (ret == -EINTR);
2695 bch2_trans_unlock(&trans);
2701 * Sync existing appends before extending i_size,
2702 * as in bch2_extend():
2704 ret = filemap_write_and_wait_range(mapping,
2705 inode_u.bi_size, S64_MAX);
2709 if (mode & FALLOC_FL_KEEP_SIZE)
2710 end = inode->v.i_size;
2712 i_size_write(&inode->v, end);
2714 mutex_lock(&inode->ei_update_lock);
2715 ret = bch2_write_inode_size(c, inode, end, 0);
2716 mutex_unlock(&inode->ei_update_lock);
2719 bch2_trans_exit(&trans);
2720 pagecache_block_put(&mapping->add_lock);
2721 inode_unlock(&inode->v);
2725 long bch2_fallocate_dispatch(struct file *file, int mode,
2726 loff_t offset, loff_t len)
2728 struct bch_inode_info *inode = file_bch_inode(file);
2730 if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
2731 return bchfs_fallocate(inode, mode, offset, len);
2733 if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
2734 return bchfs_fpunch(inode, offset, len);
2736 if (mode == FALLOC_FL_INSERT_RANGE)
2737 return bchfs_fcollapse_finsert(inode, offset, len, true);
2739 if (mode == FALLOC_FL_COLLAPSE_RANGE)
2740 return bchfs_fcollapse_finsert(inode, offset, len, false);
2745 static void mark_range_unallocated(struct bch_inode_info *inode,
2746 loff_t start, loff_t end)
2748 pgoff_t index = start >> PAGE_SHIFT;
2749 pgoff_t end_index = (end - 1) >> PAGE_SHIFT;
2750 struct pagevec pvec;
2752 pagevec_init(&pvec);
2755 unsigned nr_pages, i, j;
2757 nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
2762 for (i = 0; i < nr_pages; i++) {
2763 struct page *page = pvec.pages[i];
2764 struct bch_page_state *s;
2767 s = bch2_page_state(page);
2770 spin_lock(&s->lock);
2771 for (j = 0; j < PAGE_SECTORS; j++)
2772 s->s[j].nr_replicas = 0;
2773 spin_unlock(&s->lock);
2778 pagevec_release(&pvec);
2779 } while (index <= end_index);
2782 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
2783 struct file *file_dst, loff_t pos_dst,
2784 loff_t len, unsigned remap_flags)
2786 struct bch_inode_info *src = file_bch_inode(file_src);
2787 struct bch_inode_info *dst = file_bch_inode(file_dst);
2788 struct bch_fs *c = src->v.i_sb->s_fs_info;
2789 s64 i_sectors_delta = 0;
2793 if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
2796 if (remap_flags & REMAP_FILE_DEDUP)
2799 if ((pos_src & (block_bytes(c) - 1)) ||
2800 (pos_dst & (block_bytes(c) - 1)))
2804 abs(pos_src - pos_dst) < len)
2807 bch2_lock_inodes(INODE_LOCK, src, dst);
2809 file_update_time(file_dst);
2811 inode_dio_wait(&src->v);
2812 inode_dio_wait(&dst->v);
2814 __pagecache_block_get(&src->v.i_mapping->add_lock);
2815 __pagecache_block_get(&dst->v.i_mapping->add_lock);
2817 ret = generic_remap_file_range_prep(file_src, pos_src,
2820 if (ret < 0 || len == 0)
2823 aligned_len = round_up(len, block_bytes(c));
2825 ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
2826 pos_dst, pos_dst + aligned_len);
2830 mark_range_unallocated(src, pos_src, pos_src + aligned_len);
2832 ret = bch2_remap_range(c,
2833 POS(dst->v.i_ino, pos_dst >> 9),
2834 POS(src->v.i_ino, pos_src >> 9),
2836 &dst->ei_journal_seq,
2837 pos_dst + len, &i_sectors_delta);
2843 * due to alignment, we might have remapped slightly more than requsted
2845 ret = min(ret, len);
2847 /* XXX get a quota reservation */
2848 i_sectors_acct(c, dst, NULL, i_sectors_delta);
2850 spin_lock(&dst->v.i_lock);
2851 if (pos_dst + len > dst->v.i_size)
2852 i_size_write(&dst->v, pos_dst + len);
2853 spin_unlock(&dst->v.i_lock);
2855 __pagecache_block_put(&dst->v.i_mapping->add_lock);
2856 __pagecache_block_put(&src->v.i_mapping->add_lock);
2858 bch2_unlock_inodes(INODE_LOCK, src, dst);
2865 static int page_data_offset(struct page *page, unsigned offset)
2867 struct bch_page_state *s = bch2_page_state(page);
2871 for (i = offset >> 9; i < PAGE_SECTORS; i++)
2872 if (s->s[i].state >= SECTOR_DIRTY)
2878 static loff_t bch2_seek_pagecache_data(struct inode *vinode,
2879 loff_t start_offset,
2882 struct address_space *mapping = vinode->i_mapping;
2884 pgoff_t start_index = start_offset >> PAGE_SHIFT;
2885 pgoff_t end_index = end_offset >> PAGE_SHIFT;
2886 pgoff_t index = start_index;
2890 while (index <= end_index) {
2891 if (find_get_pages_range(mapping, &index, end_index, 1, &page)) {
2894 offset = page_data_offset(page,
2895 page->index == start_index
2896 ? start_offset & (PAGE_SIZE - 1)
2899 ret = clamp(((loff_t) page->index << PAGE_SHIFT) +
2901 start_offset, end_offset);
2917 static loff_t bch2_seek_data(struct file *file, u64 offset)
2919 struct bch_inode_info *inode = file_bch_inode(file);
2920 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2921 struct btree_trans trans;
2922 struct btree_iter *iter;
2924 u64 isize, next_data = MAX_LFS_FILESIZE;
2927 isize = i_size_read(&inode->v);
2928 if (offset >= isize)
2931 bch2_trans_init(&trans, c, 0, 0);
2933 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
2934 POS(inode->v.i_ino, offset >> 9), 0, k, ret) {
2935 if (k.k->p.inode != inode->v.i_ino) {
2937 } else if (bkey_extent_is_data(k.k)) {
2938 next_data = max(offset, bkey_start_offset(k.k) << 9);
2940 } else if (k.k->p.offset >> 9 > isize)
2944 ret = bch2_trans_exit(&trans) ?: ret;
2948 if (next_data > offset)
2949 next_data = bch2_seek_pagecache_data(&inode->v,
2952 if (next_data >= isize)
2955 return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
2958 static int __page_hole_offset(struct page *page, unsigned offset)
2960 struct bch_page_state *s = bch2_page_state(page);
2966 for (i = offset >> 9; i < PAGE_SECTORS; i++)
2967 if (s->s[i].state < SECTOR_DIRTY)
2973 static loff_t page_hole_offset(struct address_space *mapping, loff_t offset)
2975 pgoff_t index = offset >> PAGE_SHIFT;
2980 page = find_lock_entry(mapping, index);
2981 if (!page || xa_is_value(page))
2984 pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1));
2986 ret = ((loff_t) index << PAGE_SHIFT) + pg_offset;
2993 static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
2994 loff_t start_offset,
2997 struct address_space *mapping = vinode->i_mapping;
2998 loff_t offset = start_offset, hole;
3000 while (offset < end_offset) {
3001 hole = page_hole_offset(mapping, offset);
3002 if (hole >= 0 && hole <= end_offset)
3003 return max(start_offset, hole);
3005 offset += PAGE_SIZE;
3006 offset &= PAGE_MASK;
3012 static loff_t bch2_seek_hole(struct file *file, u64 offset)
3014 struct bch_inode_info *inode = file_bch_inode(file);
3015 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3016 struct btree_trans trans;
3017 struct btree_iter *iter;
3019 u64 isize, next_hole = MAX_LFS_FILESIZE;
3022 isize = i_size_read(&inode->v);
3023 if (offset >= isize)
3026 bch2_trans_init(&trans, c, 0, 0);
3028 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
3029 POS(inode->v.i_ino, offset >> 9),
3030 BTREE_ITER_SLOTS, k, ret) {
3031 if (k.k->p.inode != inode->v.i_ino) {
3032 next_hole = bch2_seek_pagecache_hole(&inode->v,
3033 offset, MAX_LFS_FILESIZE);
3035 } else if (!bkey_extent_is_data(k.k)) {
3036 next_hole = bch2_seek_pagecache_hole(&inode->v,
3037 max(offset, bkey_start_offset(k.k) << 9),
3038 k.k->p.offset << 9);
3040 if (next_hole < k.k->p.offset << 9)
3043 offset = max(offset, bkey_start_offset(k.k) << 9);
3047 ret = bch2_trans_exit(&trans) ?: ret;
3051 if (next_hole > isize)
3054 return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
3057 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
3063 return generic_file_llseek(file, offset, whence);
3065 return bch2_seek_data(file, offset);
3067 return bch2_seek_hole(file, offset);
3073 void bch2_fs_fsio_exit(struct bch_fs *c)
3075 bioset_exit(&c->dio_write_bioset);
3076 bioset_exit(&c->dio_read_bioset);
3077 bioset_exit(&c->writepage_bioset);
3080 int bch2_fs_fsio_init(struct bch_fs *c)
3084 pr_verbose_init(c->opts, "");
3086 if (bioset_init(&c->writepage_bioset,
3087 4, offsetof(struct bch_writepage_io, op.wbio.bio),
3088 BIOSET_NEED_BVECS) ||
3089 bioset_init(&c->dio_read_bioset,
3090 4, offsetof(struct dio_read, rbio.bio),
3091 BIOSET_NEED_BVECS) ||
3092 bioset_init(&c->dio_write_bioset,
3093 4, offsetof(struct dio_write, op.wbio.bio),
3097 pr_verbose_init(c->opts, "ret %i", ret);
3101 #endif /* NO_BCACHEFS_FS */