1 // SPDX-License-Identifier: GPL-2.0
5 #include "alloc_foreground.h"
6 #include "bkey_on_stack.h"
7 #include "btree_update.h"
12 #include "extent_update.h"
23 #include <linux/aio.h>
24 #include <linux/backing-dev.h>
25 #include <linux/falloc.h>
26 #include <linux/migrate.h>
27 #include <linux/mmu_context.h>
28 #include <linux/pagevec.h>
29 #include <linux/sched/signal.h>
30 #include <linux/task_io_accounting_ops.h>
31 #include <linux/uio.h>
32 #include <linux/writeback.h>
34 #include <trace/events/bcachefs.h>
35 #include <trace/events/writeback.h>
41 struct bch_writepage_io {
43 struct bch_inode_info *inode;
46 struct bch_write_op op;
50 struct completion done;
56 struct quota_res quota_res;
60 struct iovec inline_vecs[2];
63 struct bch_write_op op;
70 struct bch_read_bio rbio;
73 /* pagecache_block must be held */
74 static int write_invalidate_inode_pages_range(struct address_space *mapping,
75 loff_t start, loff_t end)
80 * XXX: the way this is currently implemented, we can spin if a process
81 * is continually redirtying a specific page
84 if (!mapping->nrpages &&
85 !mapping->nrexceptional)
88 ret = filemap_write_and_wait_range(mapping, start, end);
92 if (!mapping->nrpages)
95 ret = invalidate_inode_pages2_range(mapping,
98 } while (ret == -EBUSY);
105 #ifdef CONFIG_BCACHEFS_QUOTA
107 static void bch2_quota_reservation_put(struct bch_fs *c,
108 struct bch_inode_info *inode,
109 struct quota_res *res)
114 mutex_lock(&inode->ei_quota_lock);
115 BUG_ON(res->sectors > inode->ei_quota_reserved);
117 bch2_quota_acct(c, inode->ei_qid, Q_SPC,
118 -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
119 inode->ei_quota_reserved -= res->sectors;
120 mutex_unlock(&inode->ei_quota_lock);
125 static int bch2_quota_reservation_add(struct bch_fs *c,
126 struct bch_inode_info *inode,
127 struct quota_res *res,
133 mutex_lock(&inode->ei_quota_lock);
134 ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
135 check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
137 inode->ei_quota_reserved += sectors;
138 res->sectors += sectors;
140 mutex_unlock(&inode->ei_quota_lock);
147 static void bch2_quota_reservation_put(struct bch_fs *c,
148 struct bch_inode_info *inode,
149 struct quota_res *res)
153 static int bch2_quota_reservation_add(struct bch_fs *c,
154 struct bch_inode_info *inode,
155 struct quota_res *res,
164 /* i_size updates: */
166 struct inode_new_size {
172 static int inode_set_size(struct bch_inode_info *inode,
173 struct bch_inode_unpacked *bi,
176 struct inode_new_size *s = p;
178 bi->bi_size = s->new_size;
179 if (s->fields & ATTR_ATIME)
180 bi->bi_atime = s->now;
181 if (s->fields & ATTR_MTIME)
182 bi->bi_mtime = s->now;
183 if (s->fields & ATTR_CTIME)
184 bi->bi_ctime = s->now;
189 int __must_check bch2_write_inode_size(struct bch_fs *c,
190 struct bch_inode_info *inode,
191 loff_t new_size, unsigned fields)
193 struct inode_new_size s = {
194 .new_size = new_size,
195 .now = bch2_current_time(c),
199 return bch2_write_inode(c, inode, inode_set_size, &s, fields);
202 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
203 struct quota_res *quota_res, s64 sectors)
208 mutex_lock(&inode->ei_quota_lock);
209 #ifdef CONFIG_BCACHEFS_QUOTA
210 if (quota_res && sectors > 0) {
211 BUG_ON(sectors > quota_res->sectors);
212 BUG_ON(sectors > inode->ei_quota_reserved);
214 quota_res->sectors -= sectors;
215 inode->ei_quota_reserved -= sectors;
217 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
220 inode->v.i_blocks += sectors;
221 mutex_unlock(&inode->ei_quota_lock);
226 /* stored in page->private: */
228 struct bch_page_sector {
229 /* Uncompressed, fully allocated replicas: */
230 unsigned nr_replicas:3;
232 /* Owns PAGE_SECTORS * replicas_reserved sized reservation: */
233 unsigned replicas_reserved:3;
244 struct bch_page_state {
246 atomic_t write_count;
247 struct bch_page_sector s[PAGE_SECTORS];
250 static inline struct bch_page_state *__bch2_page_state(struct page *page)
252 return page_has_private(page)
253 ? (struct bch_page_state *) page_private(page)
257 static inline struct bch_page_state *bch2_page_state(struct page *page)
259 EBUG_ON(!PageLocked(page));
261 return __bch2_page_state(page);
264 /* for newly allocated pages: */
265 static void __bch2_page_state_release(struct page *page)
267 struct bch_page_state *s = __bch2_page_state(page);
272 ClearPagePrivate(page);
273 set_page_private(page, 0);
278 static void bch2_page_state_release(struct page *page)
280 struct bch_page_state *s = bch2_page_state(page);
285 ClearPagePrivate(page);
286 set_page_private(page, 0);
291 /* for newly allocated pages: */
292 static struct bch_page_state *__bch2_page_state_create(struct page *page,
295 struct bch_page_state *s;
297 s = kzalloc(sizeof(*s), GFP_NOFS|gfp);
301 spin_lock_init(&s->lock);
303 * migrate_page_move_mapping() assumes that pages with private data
304 * have their count elevated by 1.
307 set_page_private(page, (unsigned long) s);
308 SetPagePrivate(page);
312 static struct bch_page_state *bch2_page_state_create(struct page *page,
315 return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp);
318 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
320 /* XXX: this should not be open coded */
321 return inode->ei_inode.bi_data_replicas
322 ? inode->ei_inode.bi_data_replicas - 1
323 : c->opts.data_replicas;
326 static inline unsigned sectors_to_reserve(struct bch_page_sector *s,
327 unsigned nr_replicas)
329 return max(0, (int) nr_replicas -
331 s->replicas_reserved);
334 static int bch2_get_page_disk_reservation(struct bch_fs *c,
335 struct bch_inode_info *inode,
336 struct page *page, bool check_enospc)
338 struct bch_page_state *s = bch2_page_state_create(page, 0);
339 unsigned nr_replicas = inode_nr_replicas(c, inode);
340 struct disk_reservation disk_res = { 0 };
341 unsigned i, disk_res_sectors = 0;
347 for (i = 0; i < ARRAY_SIZE(s->s); i++)
348 disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
350 if (!disk_res_sectors)
353 ret = bch2_disk_reservation_get(c, &disk_res,
356 ? BCH_DISK_RESERVATION_NOFAIL
361 for (i = 0; i < ARRAY_SIZE(s->s); i++)
362 s->s[i].replicas_reserved +=
363 sectors_to_reserve(&s->s[i], nr_replicas);
368 struct bch2_page_reservation {
369 struct disk_reservation disk;
370 struct quota_res quota;
373 static void bch2_page_reservation_init(struct bch_fs *c,
374 struct bch_inode_info *inode,
375 struct bch2_page_reservation *res)
377 memset(res, 0, sizeof(*res));
379 res->disk.nr_replicas = inode_nr_replicas(c, inode);
382 static void bch2_page_reservation_put(struct bch_fs *c,
383 struct bch_inode_info *inode,
384 struct bch2_page_reservation *res)
386 bch2_disk_reservation_put(c, &res->disk);
387 bch2_quota_reservation_put(c, inode, &res->quota);
390 static int bch2_page_reservation_get(struct bch_fs *c,
391 struct bch_inode_info *inode, struct page *page,
392 struct bch2_page_reservation *res,
393 unsigned offset, unsigned len, bool check_enospc)
395 struct bch_page_state *s = bch2_page_state_create(page, 0);
396 unsigned i, disk_sectors = 0, quota_sectors = 0;
402 for (i = round_down(offset, block_bytes(c)) >> 9;
403 i < round_up(offset + len, block_bytes(c)) >> 9;
405 disk_sectors += sectors_to_reserve(&s->s[i],
406 res->disk.nr_replicas);
407 quota_sectors += s->s[i].state == SECTOR_UNALLOCATED;
411 ret = bch2_disk_reservation_add(c, &res->disk,
414 ? BCH_DISK_RESERVATION_NOFAIL
421 ret = bch2_quota_reservation_add(c, inode, &res->quota,
425 struct disk_reservation tmp = {
426 .sectors = disk_sectors
429 bch2_disk_reservation_put(c, &tmp);
430 res->disk.sectors -= disk_sectors;
438 static void bch2_clear_page_bits(struct page *page)
440 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
441 struct bch_fs *c = inode->v.i_sb->s_fs_info;
442 struct bch_page_state *s = bch2_page_state(page);
443 struct disk_reservation disk_res = { 0 };
444 int i, dirty_sectors = 0;
449 EBUG_ON(!PageLocked(page));
450 EBUG_ON(PageWriteback(page));
452 for (i = 0; i < ARRAY_SIZE(s->s); i++) {
453 disk_res.sectors += s->s[i].replicas_reserved;
454 s->s[i].replicas_reserved = 0;
456 if (s->s[i].state == SECTOR_DIRTY) {
458 s->s[i].state = SECTOR_UNALLOCATED;
462 bch2_disk_reservation_put(c, &disk_res);
465 i_sectors_acct(c, inode, NULL, -dirty_sectors);
467 bch2_page_state_release(page);
470 static void bch2_set_page_dirty(struct bch_fs *c,
471 struct bch_inode_info *inode, struct page *page,
472 struct bch2_page_reservation *res,
473 unsigned offset, unsigned len)
475 struct bch_page_state *s = bch2_page_state(page);
476 unsigned i, dirty_sectors = 0;
478 WARN_ON((u64) page_offset(page) + offset + len >
479 round_up((u64) i_size_read(&inode->v), block_bytes(c)));
483 for (i = round_down(offset, block_bytes(c)) >> 9;
484 i < round_up(offset + len, block_bytes(c)) >> 9;
486 unsigned sectors = sectors_to_reserve(&s->s[i],
487 res->disk.nr_replicas);
490 * This can happen if we race with the error path in
491 * bch2_writepage_io_done():
493 sectors = min_t(unsigned, sectors, res->disk.sectors);
495 s->s[i].replicas_reserved += sectors;
496 res->disk.sectors -= sectors;
498 if (s->s[i].state == SECTOR_UNALLOCATED)
501 s->s[i].state = max_t(unsigned, s->s[i].state, SECTOR_DIRTY);
504 spin_unlock(&s->lock);
507 i_sectors_acct(c, inode, &res->quota, dirty_sectors);
509 if (!PageDirty(page))
510 __set_page_dirty_nobuffers(page);
513 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
515 struct file *file = vmf->vma->vm_file;
516 struct bch_inode_info *inode = file_bch_inode(file);
519 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
520 ret = filemap_fault(vmf);
521 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
526 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
528 struct page *page = vmf->page;
529 struct file *file = vmf->vma->vm_file;
530 struct bch_inode_info *inode = file_bch_inode(file);
531 struct address_space *mapping = file->f_mapping;
532 struct bch_fs *c = inode->v.i_sb->s_fs_info;
533 struct bch2_page_reservation res;
536 int ret = VM_FAULT_LOCKED;
538 bch2_page_reservation_init(c, inode, &res);
540 sb_start_pagefault(inode->v.i_sb);
541 file_update_time(file);
544 * Not strictly necessary, but helps avoid dio writes livelocking in
545 * write_invalidate_inode_pages_range() - can drop this if/when we get
546 * a write_invalidate_inode_pages_range() that works without dropping
547 * page lock before invalidating page
549 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
552 isize = i_size_read(&inode->v);
554 if (page->mapping != mapping || page_offset(page) >= isize) {
556 ret = VM_FAULT_NOPAGE;
560 len = min_t(loff_t, PAGE_SIZE, isize - page_offset(page));
562 if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) {
564 ret = VM_FAULT_SIGBUS;
568 bch2_set_page_dirty(c, inode, page, &res, 0, len);
569 bch2_page_reservation_put(c, inode, &res);
571 wait_for_stable_page(page);
573 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
574 sb_end_pagefault(inode->v.i_sb);
579 void bch2_invalidatepage(struct page *page, unsigned int offset,
582 if (offset || length < PAGE_SIZE)
585 bch2_clear_page_bits(page);
588 int bch2_releasepage(struct page *page, gfp_t gfp_mask)
593 bch2_clear_page_bits(page);
597 #ifdef CONFIG_MIGRATION
598 int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
599 struct page *page, enum migrate_mode mode)
603 EBUG_ON(!PageLocked(page));
604 EBUG_ON(!PageLocked(newpage));
606 ret = migrate_page_move_mapping(mapping, newpage, page, 0);
607 if (ret != MIGRATEPAGE_SUCCESS)
610 if (PagePrivate(page)) {
611 ClearPagePrivate(page);
613 set_page_private(newpage, page_private(page));
614 set_page_private(page, 0);
616 SetPagePrivate(newpage);
619 if (mode != MIGRATE_SYNC_NO_COPY)
620 migrate_page_copy(newpage, page);
622 migrate_page_states(newpage, page);
623 return MIGRATEPAGE_SUCCESS;
629 static void bch2_readpages_end_io(struct bio *bio)
631 struct bvec_iter_all iter;
634 bio_for_each_segment_all(bv, bio, iter) {
635 struct page *page = bv->bv_page;
637 if (!bio->bi_status) {
638 SetPageUptodate(page);
640 ClearPageUptodate(page);
649 static inline void page_state_init_for_read(struct page *page)
651 SetPagePrivate(page);
655 struct readpages_iter {
656 struct address_space *mapping;
664 static int readpages_iter_init(struct readpages_iter *iter,
665 struct address_space *mapping,
666 struct list_head *pages, unsigned nr_pages)
668 memset(iter, 0, sizeof(*iter));
670 iter->mapping = mapping;
671 iter->offset = list_last_entry(pages, struct page, lru)->index;
673 iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
677 while (!list_empty(pages)) {
678 struct page *page = list_last_entry(pages, struct page, lru);
680 __bch2_page_state_create(page, __GFP_NOFAIL);
682 iter->pages[iter->nr_pages++] = page;
683 list_del(&page->lru);
689 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
695 BUG_ON(iter->idx > iter->nr_added);
696 BUG_ON(iter->nr_added > iter->nr_pages);
698 if (iter->idx < iter->nr_added)
702 if (iter->idx == iter->nr_pages)
705 ret = add_to_page_cache_lru_vec(iter->mapping,
706 iter->pages + iter->nr_added,
707 iter->nr_pages - iter->nr_added,
708 iter->offset + iter->nr_added,
713 page = iter->pages[iter->nr_added];
717 __bch2_page_state_release(page);
721 iter->nr_added += ret;
723 for (i = iter->idx; i < iter->nr_added; i++)
724 put_page(iter->pages[i]);
726 EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
728 return iter->pages[iter->idx];
731 static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
733 struct bvec_iter iter;
735 unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
736 ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
737 unsigned state = k.k->type == KEY_TYPE_reservation
741 bio_for_each_segment(bv, bio, iter) {
742 struct bch_page_state *s = bch2_page_state(bv.bv_page);
745 for (i = bv.bv_offset >> 9;
746 i < (bv.bv_offset + bv.bv_len) >> 9;
748 s->s[i].nr_replicas = nr_ptrs;
749 s->s[i].state = state;
754 static bool extent_partial_reads_expensive(struct bkey_s_c k)
756 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
757 struct bch_extent_crc_unpacked crc;
758 const union bch_extent_entry *i;
760 bkey_for_each_crc(k.k, ptrs, crc, i)
761 if (crc.csum_type || crc.compression_type)
766 static void readpage_bio_extend(struct readpages_iter *iter,
768 unsigned sectors_this_extent,
771 while (bio_sectors(bio) < sectors_this_extent &&
772 bio->bi_vcnt < bio->bi_max_vecs) {
773 pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT;
774 struct page *page = readpage_iter_next(iter);
778 if (iter->offset + iter->idx != page_offset)
786 page = xa_load(&iter->mapping->i_pages, page_offset);
787 if (page && !xa_is_value(page))
790 page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
794 if (!__bch2_page_state_create(page, 0)) {
799 ret = add_to_page_cache_lru(page, iter->mapping,
800 page_offset, GFP_NOFS);
802 __bch2_page_state_release(page);
810 BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0));
814 static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
815 struct bch_read_bio *rbio, u64 inum,
816 struct readpages_iter *readpages_iter)
818 struct bch_fs *c = trans->c;
819 struct bkey_on_stack sk;
820 int flags = BCH_READ_RETRY_IF_STALE|
821 BCH_READ_MAY_PROMOTE;
825 rbio->start_time = local_clock();
827 bkey_on_stack_init(&sk);
831 unsigned bytes, sectors, offset_into_extent;
833 bch2_btree_iter_set_pos(iter,
834 POS(inum, rbio->bio.bi_iter.bi_sector));
836 k = bch2_btree_iter_peek_slot(iter);
841 bkey_on_stack_reassemble(&sk, c, k);
842 k = bkey_i_to_s_c(sk.k);
844 offset_into_extent = iter->pos.offset -
845 bkey_start_offset(k.k);
846 sectors = k.k->size - offset_into_extent;
848 ret = bch2_read_indirect_extent(trans,
849 &offset_into_extent, &sk);
853 sectors = min(sectors, k.k->size - offset_into_extent);
855 bch2_trans_unlock(trans);
858 readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
859 extent_partial_reads_expensive(k));
861 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
862 swap(rbio->bio.bi_iter.bi_size, bytes);
864 if (rbio->bio.bi_iter.bi_size == bytes)
865 flags |= BCH_READ_LAST_FRAGMENT;
867 if (bkey_extent_is_allocation(k.k))
868 bch2_add_page_sectors(&rbio->bio, k);
870 bch2_read_extent(c, rbio, k, offset_into_extent, flags);
872 if (flags & BCH_READ_LAST_FRAGMENT)
875 swap(rbio->bio.bi_iter.bi_size, bytes);
876 bio_advance(&rbio->bio, bytes);
883 bcache_io_error(c, &rbio->bio, "btree IO error %i", ret);
884 bio_endio(&rbio->bio);
887 bkey_on_stack_exit(&sk, c);
890 int bch2_readpages(struct file *file, struct address_space *mapping,
891 struct list_head *pages, unsigned nr_pages)
893 struct bch_inode_info *inode = to_bch_ei(mapping->host);
894 struct bch_fs *c = inode->v.i_sb->s_fs_info;
895 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
896 struct btree_trans trans;
897 struct btree_iter *iter;
899 struct readpages_iter readpages_iter;
902 ret = readpages_iter_init(&readpages_iter, mapping, pages, nr_pages);
905 bch2_trans_init(&trans, c, 0, 0);
907 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
910 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
912 while ((page = readpage_iter_next(&readpages_iter))) {
913 pgoff_t index = readpages_iter.offset + readpages_iter.idx;
914 unsigned n = min_t(unsigned,
915 readpages_iter.nr_pages -
918 struct bch_read_bio *rbio =
919 rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
922 readpages_iter.idx++;
924 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0);
925 rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTOR_SHIFT;
926 rbio->bio.bi_end_io = bch2_readpages_end_io;
927 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
929 bchfs_read(&trans, iter, rbio, inode->v.i_ino,
933 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
935 bch2_trans_exit(&trans);
936 kfree(readpages_iter.pages);
941 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
942 u64 inum, struct page *page)
944 struct btree_trans trans;
945 struct btree_iter *iter;
947 bch2_page_state_create(page, __GFP_NOFAIL);
949 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
950 rbio->bio.bi_iter.bi_sector =
951 (sector_t) page->index << PAGE_SECTOR_SHIFT;
952 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
954 bch2_trans_init(&trans, c, 0, 0);
955 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
958 bchfs_read(&trans, iter, rbio, inum, NULL);
960 bch2_trans_exit(&trans);
963 int bch2_readpage(struct file *file, struct page *page)
965 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
966 struct bch_fs *c = inode->v.i_sb->s_fs_info;
967 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
968 struct bch_read_bio *rbio;
970 rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
971 rbio->bio.bi_end_io = bch2_readpages_end_io;
973 __bchfs_readpage(c, rbio, inode->v.i_ino, page);
977 static void bch2_read_single_page_end_io(struct bio *bio)
979 complete(bio->bi_private);
982 static int bch2_read_single_page(struct page *page,
983 struct address_space *mapping)
985 struct bch_inode_info *inode = to_bch_ei(mapping->host);
986 struct bch_fs *c = inode->v.i_sb->s_fs_info;
987 struct bch_read_bio *rbio;
989 DECLARE_COMPLETION_ONSTACK(done);
991 rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
992 io_opts(c, &inode->ei_inode));
993 rbio->bio.bi_private = &done;
994 rbio->bio.bi_end_io = bch2_read_single_page_end_io;
996 __bchfs_readpage(c, rbio, inode->v.i_ino, page);
997 wait_for_completion(&done);
999 ret = blk_status_to_errno(rbio->bio.bi_status);
1000 bio_put(&rbio->bio);
1005 SetPageUptodate(page);
1011 struct bch_writepage_state {
1012 struct bch_writepage_io *io;
1013 struct bch_io_opts opts;
1016 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1017 struct bch_inode_info *inode)
1019 return (struct bch_writepage_state) {
1020 .opts = io_opts(c, &inode->ei_inode)
1024 static void bch2_writepage_io_free(struct closure *cl)
1026 struct bch_writepage_io *io = container_of(cl,
1027 struct bch_writepage_io, cl);
1029 bio_put(&io->op.wbio.bio);
1032 static void bch2_writepage_io_done(struct closure *cl)
1034 struct bch_writepage_io *io = container_of(cl,
1035 struct bch_writepage_io, cl);
1036 struct bch_fs *c = io->op.c;
1037 struct bio *bio = &io->op.wbio.bio;
1038 struct bvec_iter_all iter;
1039 struct bio_vec *bvec;
1043 bio_for_each_segment_all(bvec, bio, iter) {
1044 struct bch_page_state *s;
1046 SetPageError(bvec->bv_page);
1047 mapping_set_error(bvec->bv_page->mapping, -EIO);
1049 s = __bch2_page_state(bvec->bv_page);
1050 spin_lock(&s->lock);
1051 for (i = 0; i < PAGE_SECTORS; i++)
1052 s->s[i].nr_replicas = 0;
1053 spin_unlock(&s->lock);
1057 if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
1058 bio_for_each_segment_all(bvec, bio, iter) {
1059 struct bch_page_state *s;
1061 s = __bch2_page_state(bvec->bv_page);
1062 spin_lock(&s->lock);
1063 for (i = 0; i < PAGE_SECTORS; i++)
1064 s->s[i].nr_replicas = 0;
1065 spin_unlock(&s->lock);
1070 * racing with fallocate can cause us to add fewer sectors than
1071 * expected - but we shouldn't add more sectors than expected:
1073 BUG_ON(io->op.i_sectors_delta > 0);
1076 * (error (due to going RO) halfway through a page can screw that up
1079 BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
1083 * PageWriteback is effectively our ref on the inode - fixup i_blocks
1084 * before calling end_page_writeback:
1086 i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
1088 bio_for_each_segment_all(bvec, bio, iter) {
1089 struct bch_page_state *s = __bch2_page_state(bvec->bv_page);
1091 if (atomic_dec_and_test(&s->write_count))
1092 end_page_writeback(bvec->bv_page);
1095 closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
1098 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1100 struct bch_writepage_io *io = w->io;
1103 closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
1104 continue_at(&io->cl, bch2_writepage_io_done, NULL);
1108 * Get a bch_writepage_io and add @page to it - appending to an existing one if
1109 * possible, else allocating a new one:
1111 static void bch2_writepage_io_alloc(struct bch_fs *c,
1112 struct writeback_control *wbc,
1113 struct bch_writepage_state *w,
1114 struct bch_inode_info *inode,
1116 unsigned nr_replicas)
1118 struct bch_write_op *op;
1120 w->io = container_of(bio_alloc_bioset(GFP_NOFS,
1122 &c->writepage_bioset),
1123 struct bch_writepage_io, op.wbio.bio);
1125 closure_init(&w->io->cl, NULL);
1126 w->io->inode = inode;
1129 bch2_write_op_init(op, c, w->opts);
1130 op->target = w->opts.foreground_target;
1131 op_journal_seq_set(op, &inode->ei_journal_seq);
1132 op->nr_replicas = nr_replicas;
1133 op->res.nr_replicas = nr_replicas;
1134 op->write_point = writepoint_hashed(inode->ei_last_dirtied);
1135 op->pos = POS(inode->v.i_ino, sector);
1136 op->wbio.bio.bi_iter.bi_sector = sector;
1137 op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
1140 static int __bch2_writepage(struct page *page,
1141 struct writeback_control *wbc,
1144 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1145 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1146 struct bch_writepage_state *w = data;
1147 struct bch_page_state *s, orig;
1148 unsigned i, offset, nr_replicas_this_write = U32_MAX;
1149 loff_t i_size = i_size_read(&inode->v);
1150 pgoff_t end_index = i_size >> PAGE_SHIFT;
1153 EBUG_ON(!PageUptodate(page));
1155 /* Is the page fully inside i_size? */
1156 if (page->index < end_index)
1159 /* Is the page fully outside i_size? (truncate in progress) */
1160 offset = i_size & (PAGE_SIZE - 1);
1161 if (page->index > end_index || !offset) {
1167 * The page straddles i_size. It must be zeroed out on each and every
1168 * writepage invocation because it may be mmapped. "A file is mapped
1169 * in multiples of the page size. For a file that is not a multiple of
1170 * the page size, the remaining memory is zeroed when mapped, and
1171 * writes to that region are not written out to the file."
1173 zero_user_segment(page, offset, PAGE_SIZE);
1175 s = bch2_page_state_create(page, __GFP_NOFAIL);
1177 ret = bch2_get_page_disk_reservation(c, inode, page, true);
1180 mapping_set_error(page->mapping, ret);
1185 /* Before unlocking the page, get copy of reservations: */
1188 for (i = 0; i < PAGE_SECTORS; i++) {
1189 if (s->s[i].state < SECTOR_DIRTY)
1192 nr_replicas_this_write =
1193 min_t(unsigned, nr_replicas_this_write,
1194 s->s[i].nr_replicas +
1195 s->s[i].replicas_reserved);
1198 for (i = 0; i < PAGE_SECTORS; i++) {
1199 if (s->s[i].state < SECTOR_DIRTY)
1202 s->s[i].nr_replicas = w->opts.compression
1203 ? 0 : nr_replicas_this_write;
1205 s->s[i].replicas_reserved = 0;
1206 s->s[i].state = SECTOR_ALLOCATED;
1209 BUG_ON(atomic_read(&s->write_count));
1210 atomic_set(&s->write_count, 1);
1212 BUG_ON(PageWriteback(page));
1213 set_page_writeback(page);
1219 unsigned sectors = 1, dirty_sectors = 0, reserved_sectors = 0;
1222 while (offset < PAGE_SECTORS &&
1223 orig.s[offset].state < SECTOR_DIRTY)
1226 if (offset == PAGE_SECTORS)
1229 sector = ((u64) page->index << PAGE_SECTOR_SHIFT) + offset;
1231 while (offset + sectors < PAGE_SECTORS &&
1232 orig.s[offset + sectors].state >= SECTOR_DIRTY)
1235 for (i = offset; i < offset + sectors; i++) {
1236 reserved_sectors += orig.s[i].replicas_reserved;
1237 dirty_sectors += orig.s[i].state == SECTOR_DIRTY;
1241 (w->io->op.res.nr_replicas != nr_replicas_this_write ||
1242 bio_full(&w->io->op.wbio.bio, PAGE_SIZE) ||
1243 w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
1244 (BIO_MAX_PAGES * PAGE_SIZE) ||
1245 bio_end_sector(&w->io->op.wbio.bio) != sector))
1246 bch2_writepage_do_io(w);
1249 bch2_writepage_io_alloc(c, wbc, w, inode, sector,
1250 nr_replicas_this_write);
1252 atomic_inc(&s->write_count);
1254 BUG_ON(inode != w->io->inode);
1255 BUG_ON(!bio_add_page(&w->io->op.wbio.bio, page,
1256 sectors << 9, offset << 9));
1258 /* Check for writing past i_size: */
1259 WARN_ON((bio_end_sector(&w->io->op.wbio.bio) << 9) >
1260 round_up(i_size, block_bytes(c)));
1262 w->io->op.res.sectors += reserved_sectors;
1263 w->io->op.i_sectors_delta -= dirty_sectors;
1264 w->io->op.new_i_size = i_size;
1269 if (atomic_dec_and_test(&s->write_count))
1270 end_page_writeback(page);
1275 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1277 struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1278 struct bch_writepage_state w =
1279 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1280 struct blk_plug plug;
1283 blk_start_plug(&plug);
1284 ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1286 bch2_writepage_do_io(&w);
1287 blk_finish_plug(&plug);
1291 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1293 struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1294 struct bch_writepage_state w =
1295 bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
1298 ret = __bch2_writepage(page, wbc, &w);
1300 bch2_writepage_do_io(&w);
1305 /* buffered writes: */
1307 int bch2_write_begin(struct file *file, struct address_space *mapping,
1308 loff_t pos, unsigned len, unsigned flags,
1309 struct page **pagep, void **fsdata)
1311 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1312 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1313 struct bch2_page_reservation *res;
1314 pgoff_t index = pos >> PAGE_SHIFT;
1315 unsigned offset = pos & (PAGE_SIZE - 1);
1319 res = kmalloc(sizeof(*res), GFP_KERNEL);
1323 bch2_page_reservation_init(c, inode, res);
1326 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1328 page = grab_cache_page_write_begin(mapping, index, flags);
1332 if (PageUptodate(page))
1335 /* If we're writing entire page, don't need to read it in first: */
1336 if (len == PAGE_SIZE)
1339 if (!offset && pos + len >= inode->v.i_size) {
1340 zero_user_segment(page, len, PAGE_SIZE);
1341 flush_dcache_page(page);
1345 if (index > inode->v.i_size >> PAGE_SHIFT) {
1346 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1347 flush_dcache_page(page);
1351 ret = bch2_read_single_page(page, mapping);
1355 ret = bch2_page_reservation_get(c, inode, page, res,
1358 if (!PageUptodate(page)) {
1360 * If the page hasn't been read in, we won't know if we
1361 * actually need a reservation - we don't actually need
1362 * to read here, we just need to check if the page is
1363 * fully backed by uncompressed data:
1378 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1384 int bch2_write_end(struct file *file, struct address_space *mapping,
1385 loff_t pos, unsigned len, unsigned copied,
1386 struct page *page, void *fsdata)
1388 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1389 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1390 struct bch2_page_reservation *res = fsdata;
1391 unsigned offset = pos & (PAGE_SIZE - 1);
1393 lockdep_assert_held(&inode->v.i_rwsem);
1395 if (unlikely(copied < len && !PageUptodate(page))) {
1397 * The page needs to be read in, but that would destroy
1398 * our partial write - simplest thing is to just force
1399 * userspace to redo the write:
1401 zero_user(page, 0, PAGE_SIZE);
1402 flush_dcache_page(page);
1406 spin_lock(&inode->v.i_lock);
1407 if (pos + copied > inode->v.i_size)
1408 i_size_write(&inode->v, pos + copied);
1409 spin_unlock(&inode->v.i_lock);
1412 if (!PageUptodate(page))
1413 SetPageUptodate(page);
1415 bch2_set_page_dirty(c, inode, page, res, offset, copied);
1417 inode->ei_last_dirtied = (unsigned long) current;
1422 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1424 bch2_page_reservation_put(c, inode, res);
1430 #define WRITE_BATCH_PAGES 32
1432 static int __bch2_buffered_write(struct bch_inode_info *inode,
1433 struct address_space *mapping,
1434 struct iov_iter *iter,
1435 loff_t pos, unsigned len)
1437 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1438 struct page *pages[WRITE_BATCH_PAGES];
1439 struct bch2_page_reservation res;
1440 unsigned long index = pos >> PAGE_SHIFT;
1441 unsigned offset = pos & (PAGE_SIZE - 1);
1442 unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1443 unsigned i, reserved = 0, set_dirty = 0;
1444 unsigned copied = 0, nr_pages_copied = 0;
1448 BUG_ON(nr_pages > ARRAY_SIZE(pages));
1450 bch2_page_reservation_init(c, inode, &res);
1452 for (i = 0; i < nr_pages; i++) {
1453 pages[i] = grab_cache_page_write_begin(mapping, index + i, 0);
1460 len = min_t(unsigned, len,
1461 nr_pages * PAGE_SIZE - offset);
1466 if (offset && !PageUptodate(pages[0])) {
1467 ret = bch2_read_single_page(pages[0], mapping);
1472 if ((pos + len) & (PAGE_SIZE - 1) &&
1473 !PageUptodate(pages[nr_pages - 1])) {
1474 if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
1475 zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
1477 ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
1483 while (reserved < len) {
1484 struct page *page = pages[(offset + reserved) >> PAGE_SHIFT];
1485 unsigned pg_offset = (offset + reserved) & (PAGE_SIZE - 1);
1486 unsigned pg_len = min_t(unsigned, len - reserved,
1487 PAGE_SIZE - pg_offset);
1489 ret = bch2_page_reservation_get(c, inode, page, &res,
1490 pg_offset, pg_len, true);
1492 if (ret && !PageUptodate(page)) {
1493 ret = bch2_read_single_page(page, mapping);
1495 goto retry_reservation;
1504 if (mapping_writably_mapped(mapping))
1505 for (i = 0; i < nr_pages; i++)
1506 flush_dcache_page(pages[i]);
1508 while (copied < len) {
1509 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1510 unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
1511 unsigned pg_len = min_t(unsigned, len - copied,
1512 PAGE_SIZE - pg_offset);
1513 unsigned pg_copied = iov_iter_copy_from_user_atomic(page,
1514 iter, pg_offset, pg_len);
1519 flush_dcache_page(page);
1520 iov_iter_advance(iter, pg_copied);
1521 copied += pg_copied;
1528 ((offset + copied) & (PAGE_SIZE - 1))) {
1529 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1531 if (!PageUptodate(page)) {
1532 zero_user(page, 0, PAGE_SIZE);
1533 copied -= (offset + copied) & (PAGE_SIZE - 1);
1537 spin_lock(&inode->v.i_lock);
1538 if (pos + copied > inode->v.i_size)
1539 i_size_write(&inode->v, pos + copied);
1540 spin_unlock(&inode->v.i_lock);
1542 while (set_dirty < copied) {
1543 struct page *page = pages[(offset + set_dirty) >> PAGE_SHIFT];
1544 unsigned pg_offset = (offset + set_dirty) & (PAGE_SIZE - 1);
1545 unsigned pg_len = min_t(unsigned, copied - set_dirty,
1546 PAGE_SIZE - pg_offset);
1548 if (!PageUptodate(page))
1549 SetPageUptodate(page);
1551 bch2_set_page_dirty(c, inode, page, &res, pg_offset, pg_len);
1555 set_dirty += pg_len;
1558 nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
1559 inode->ei_last_dirtied = (unsigned long) current;
1561 for (i = nr_pages_copied; i < nr_pages; i++) {
1562 unlock_page(pages[i]);
1566 bch2_page_reservation_put(c, inode, &res);
1568 return copied ?: ret;
1571 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1573 struct file *file = iocb->ki_filp;
1574 struct address_space *mapping = file->f_mapping;
1575 struct bch_inode_info *inode = file_bch_inode(file);
1576 loff_t pos = iocb->ki_pos;
1577 ssize_t written = 0;
1580 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1583 unsigned offset = pos & (PAGE_SIZE - 1);
1584 unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
1585 PAGE_SIZE * WRITE_BATCH_PAGES - offset);
1588 * Bring in the user page that we will copy from _first_.
1589 * Otherwise there's a nasty deadlock on copying from the
1590 * same page as we're writing to, without it being marked
1593 * Not only is this an optimisation, but it is also required
1594 * to check that the address is actually valid, when atomic
1595 * usercopies are used, below.
1597 if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1598 bytes = min_t(unsigned long, iov_iter_count(iter),
1599 PAGE_SIZE - offset);
1601 if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1607 if (unlikely(fatal_signal_pending(current))) {
1612 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1613 if (unlikely(ret < 0))
1618 if (unlikely(ret == 0)) {
1620 * If we were unable to copy any data at all, we must
1621 * fall back to a single segment length write.
1623 * If we didn't fallback here, we could livelock
1624 * because not all segments in the iov can be copied at
1625 * once without a pagefault.
1627 bytes = min_t(unsigned long, PAGE_SIZE - offset,
1628 iov_iter_single_seg_count(iter));
1634 balance_dirty_pages_ratelimited(mapping);
1635 } while (iov_iter_count(iter));
1637 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1639 return written ? written : ret;
1642 /* O_DIRECT reads */
1644 static void bch2_dio_read_complete(struct closure *cl)
1646 struct dio_read *dio = container_of(cl, struct dio_read, cl);
1648 dio->req->ki_complete(dio->req, dio->ret, 0);
1649 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1652 static void bch2_direct_IO_read_endio(struct bio *bio)
1654 struct dio_read *dio = bio->bi_private;
1657 dio->ret = blk_status_to_errno(bio->bi_status);
1659 closure_put(&dio->cl);
1662 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1664 bch2_direct_IO_read_endio(bio);
1665 bio_check_pages_dirty(bio); /* transfers ownership */
1668 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1670 struct file *file = req->ki_filp;
1671 struct bch_inode_info *inode = file_bch_inode(file);
1672 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1673 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1674 struct dio_read *dio;
1676 loff_t offset = req->ki_pos;
1677 bool sync = is_sync_kiocb(req);
1681 if ((offset|iter->count) & (block_bytes(c) - 1))
1684 ret = min_t(loff_t, iter->count,
1685 max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1690 shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
1691 iter->count -= shorten;
1693 bio = bio_alloc_bioset(GFP_KERNEL,
1694 iov_iter_npages(iter, BIO_MAX_PAGES),
1695 &c->dio_read_bioset);
1697 bio->bi_end_io = bch2_direct_IO_read_endio;
1699 dio = container_of(bio, struct dio_read, rbio.bio);
1700 closure_init(&dio->cl, NULL);
1703 * this is a _really_ horrible hack just to avoid an atomic sub at the
1707 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1708 atomic_set(&dio->cl.remaining,
1709 CLOSURE_REMAINING_INITIALIZER -
1711 CLOSURE_DESTRUCTOR);
1713 atomic_set(&dio->cl.remaining,
1714 CLOSURE_REMAINING_INITIALIZER + 1);
1721 while (iter->count) {
1722 bio = bio_alloc_bioset(GFP_KERNEL,
1723 iov_iter_npages(iter, BIO_MAX_PAGES),
1725 bio->bi_end_io = bch2_direct_IO_read_split_endio;
1727 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1728 bio->bi_iter.bi_sector = offset >> 9;
1729 bio->bi_private = dio;
1731 ret = bio_iov_iter_get_pages(bio, iter);
1733 /* XXX: fault inject this path */
1734 bio->bi_status = BLK_STS_RESOURCE;
1739 offset += bio->bi_iter.bi_size;
1740 bio_set_pages_dirty(bio);
1743 closure_get(&dio->cl);
1745 bch2_read(c, rbio_init(bio, opts), inode->v.i_ino);
1748 iter->count += shorten;
1751 closure_sync(&dio->cl);
1752 closure_debug_destroy(&dio->cl);
1754 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1757 return -EIOCBQUEUED;
1761 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1763 struct file *file = iocb->ki_filp;
1764 struct bch_inode_info *inode = file_bch_inode(file);
1765 struct address_space *mapping = file->f_mapping;
1766 size_t count = iov_iter_count(iter);
1770 return 0; /* skip atime */
1772 if (iocb->ki_flags & IOCB_DIRECT) {
1773 struct blk_plug plug;
1775 ret = filemap_write_and_wait_range(mapping,
1777 iocb->ki_pos + count - 1);
1781 file_accessed(file);
1783 blk_start_plug(&plug);
1784 ret = bch2_direct_IO_read(iocb, iter);
1785 blk_finish_plug(&plug);
1788 iocb->ki_pos += ret;
1790 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1791 ret = generic_file_read_iter(iocb, iter);
1792 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1798 /* O_DIRECT writes */
1800 static void bch2_dio_write_loop_async(struct bch_write_op *);
1802 static long bch2_dio_write_loop(struct dio_write *dio)
1804 bool kthread = (current->flags & PF_KTHREAD) != 0;
1805 struct kiocb *req = dio->req;
1806 struct address_space *mapping = req->ki_filp->f_mapping;
1807 struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
1808 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1809 struct bio *bio = &dio->op.wbio.bio;
1810 struct bvec_iter_all iter;
1813 bool sync = dio->sync;
1820 size_t extra = dio->iter.count -
1821 min(BIO_MAX_PAGES * PAGE_SIZE, dio->iter.count);
1825 BUG_ON(current->faults_disabled_mapping);
1826 current->faults_disabled_mapping = mapping;
1829 * Don't issue more than 2MB at once, the bcachefs io path in
1830 * io.c can't bounce more than that:
1833 dio->iter.count -= extra;
1834 ret = bio_iov_iter_get_pages(bio, &dio->iter);
1835 dio->iter.count += extra;
1837 current->faults_disabled_mapping = NULL;
1841 if (unlikely(ret < 0))
1844 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
1845 bio->bi_iter.bi_size -= unaligned;
1846 iov_iter_revert(&dio->iter, unaligned);
1848 if (!bio->bi_iter.bi_size) {
1850 * bio_iov_iter_get_pages was only able to get <
1851 * blocksize worth of pages:
1853 bio_for_each_segment_all(bv, bio, iter)
1854 put_page(bv->bv_page);
1859 bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode));
1860 dio->op.end_io = bch2_dio_write_loop_async;
1861 dio->op.target = dio->op.opts.foreground_target;
1862 op_journal_seq_set(&dio->op, &inode->ei_journal_seq);
1863 dio->op.write_point = writepoint_hashed((unsigned long) current);
1864 dio->op.nr_replicas = dio->op.opts.data_replicas;
1865 dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
1867 if ((req->ki_flags & IOCB_DSYNC) &&
1868 !c->opts.journal_flush_disabled)
1869 dio->op.flags |= BCH_WRITE_FLUSH;
1871 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
1872 dio->op.opts.data_replicas, 0);
1873 if (unlikely(ret) &&
1874 !bch2_check_range_allocated(c, dio->op.pos,
1875 bio_sectors(bio), dio->op.opts.data_replicas))
1878 task_io_account_write(bio->bi_iter.bi_size);
1880 if (!dio->sync && !dio->loop && dio->iter.count) {
1881 struct iovec *iov = dio->inline_vecs;
1883 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
1884 iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
1886 if (unlikely(!iov)) {
1887 dio->sync = sync = true;
1891 dio->free_iov = true;
1894 memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
1895 dio->iter.iov = iov;
1899 closure_call(&dio->op.cl, bch2_write, NULL, NULL);
1902 wait_for_completion(&dio->done);
1904 return -EIOCBQUEUED;
1906 i_sectors_acct(c, inode, &dio->quota_res,
1907 dio->op.i_sectors_delta);
1908 req->ki_pos += (u64) dio->op.written << 9;
1909 dio->written += dio->op.written;
1911 spin_lock(&inode->v.i_lock);
1912 if (req->ki_pos > inode->v.i_size)
1913 i_size_write(&inode->v, req->ki_pos);
1914 spin_unlock(&inode->v.i_lock);
1916 bio_for_each_segment_all(bv, bio, iter)
1917 put_page(bv->bv_page);
1918 if (!dio->iter.count || dio->op.error)
1922 reinit_completion(&dio->done);
1925 ret = dio->op.error ?: ((long) dio->written << 9);
1927 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
1928 bch2_quota_reservation_put(c, inode, &dio->quota_res);
1931 kfree(dio->iter.iov);
1935 /* inode->i_dio_count is our ref on inode and thus bch_fs */
1936 inode_dio_end(&inode->v);
1939 req->ki_complete(req, ret, 0);
1945 static void bch2_dio_write_loop_async(struct bch_write_op *op)
1947 struct dio_write *dio = container_of(op, struct dio_write, op);
1950 complete(&dio->done);
1952 bch2_dio_write_loop(dio);
1956 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
1958 struct file *file = req->ki_filp;
1959 struct address_space *mapping = file->f_mapping;
1960 struct bch_inode_info *inode = file_bch_inode(file);
1961 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1962 struct dio_write *dio;
1964 bool locked = true, extending;
1968 prefetch((void *) &c->opts + 64);
1969 prefetch(&inode->ei_inode);
1970 prefetch((void *) &inode->ei_inode + 64);
1972 inode_lock(&inode->v);
1974 ret = generic_write_checks(req, iter);
1975 if (unlikely(ret <= 0))
1978 ret = file_remove_privs(file);
1982 ret = file_update_time(file);
1986 if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
1989 inode_dio_begin(&inode->v);
1990 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
1992 extending = req->ki_pos + iter->count > inode->v.i_size;
1994 inode_unlock(&inode->v);
1998 bio = bio_alloc_bioset(GFP_KERNEL,
1999 iov_iter_npages(iter, BIO_MAX_PAGES),
2000 &c->dio_write_bioset);
2001 dio = container_of(bio, struct dio_write, op.wbio.bio);
2002 init_completion(&dio->done);
2004 dio->mm = current->mm;
2006 dio->sync = is_sync_kiocb(req) || extending;
2007 dio->free_iov = false;
2008 dio->quota_res.sectors = 0;
2012 ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
2013 iter->count >> 9, true);
2017 ret = write_invalidate_inode_pages_range(mapping,
2019 req->ki_pos + iter->count - 1);
2023 ret = bch2_dio_write_loop(dio);
2026 inode_unlock(&inode->v);
2029 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2030 bch2_quota_reservation_put(c, inode, &dio->quota_res);
2032 inode_dio_end(&inode->v);
2036 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2038 struct file *file = iocb->ki_filp;
2039 struct bch_inode_info *inode = file_bch_inode(file);
2042 if (iocb->ki_flags & IOCB_DIRECT)
2043 return bch2_direct_write(iocb, from);
2045 /* We can write back this queue in page reclaim */
2046 current->backing_dev_info = inode_to_bdi(&inode->v);
2047 inode_lock(&inode->v);
2049 ret = generic_write_checks(iocb, from);
2053 ret = file_remove_privs(file);
2057 ret = file_update_time(file);
2061 ret = bch2_buffered_write(iocb, from);
2062 if (likely(ret > 0))
2063 iocb->ki_pos += ret;
2065 inode_unlock(&inode->v);
2066 current->backing_dev_info = NULL;
2069 ret = generic_write_sync(iocb, ret);
2076 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2078 struct bch_inode_info *inode = file_bch_inode(file);
2079 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2082 ret = file_write_and_wait_range(file, start, end);
2086 if (datasync && !(inode->v.i_state & I_DIRTY_DATASYNC))
2089 ret = sync_inode_metadata(&inode->v, 1);
2093 if (!c->opts.journal_flush_disabled)
2094 ret = bch2_journal_flush_seq(&c->journal,
2095 inode->ei_journal_seq);
2096 ret2 = file_check_and_advance_wb_err(file);
2103 static inline int range_has_data(struct bch_fs *c,
2107 struct btree_trans trans;
2108 struct btree_iter *iter;
2112 bch2_trans_init(&trans, c, 0, 0);
2114 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k, ret) {
2115 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2118 if (bkey_extent_is_data(k.k)) {
2124 return bch2_trans_exit(&trans) ?: ret;
2127 static int __bch2_truncate_page(struct bch_inode_info *inode,
2128 pgoff_t index, loff_t start, loff_t end)
2130 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2131 struct address_space *mapping = inode->v.i_mapping;
2132 struct bch_page_state *s;
2133 unsigned start_offset = start & (PAGE_SIZE - 1);
2134 unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2139 /* Page boundary? Nothing to do */
2140 if (!((index == start >> PAGE_SHIFT && start_offset) ||
2141 (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
2145 if (index << PAGE_SHIFT >= inode->v.i_size)
2148 page = find_lock_page(mapping, index);
2151 * XXX: we're doing two index lookups when we end up reading the
2154 ret = range_has_data(c,
2155 POS(inode->v.i_ino, index << PAGE_SECTOR_SHIFT),
2156 POS(inode->v.i_ino, (index + 1) << PAGE_SECTOR_SHIFT));
2160 page = find_or_create_page(mapping, index, GFP_KERNEL);
2161 if (unlikely(!page)) {
2167 s = bch2_page_state_create(page, 0);
2173 if (!PageUptodate(page)) {
2174 ret = bch2_read_single_page(page, mapping);
2179 if (index != start >> PAGE_SHIFT)
2181 if (index != end >> PAGE_SHIFT)
2182 end_offset = PAGE_SIZE;
2184 for (i = round_up(start_offset, block_bytes(c)) >> 9;
2185 i < round_down(end_offset, block_bytes(c)) >> 9;
2187 s->s[i].nr_replicas = 0;
2188 s->s[i].state = SECTOR_UNALLOCATED;
2191 zero_user_segment(page, start_offset, end_offset);
2194 * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2196 * XXX: because we aren't currently tracking whether the page has actual
2197 * data in it (vs. just 0s, or only partially written) this wrong. ick.
2199 ret = bch2_get_page_disk_reservation(c, inode, page, false);
2202 __set_page_dirty_nobuffers(page);
2210 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
2212 return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
2213 from, round_up(from, PAGE_SIZE));
2216 static int bch2_extend(struct bch_inode_info *inode,
2217 struct bch_inode_unpacked *inode_u,
2218 struct iattr *iattr)
2220 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2221 struct address_space *mapping = inode->v.i_mapping;
2227 * this has to be done _before_ extending i_size:
2229 ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
2233 truncate_setsize(&inode->v, iattr->ia_size);
2234 setattr_copy(&inode->v, iattr);
2236 mutex_lock(&inode->ei_update_lock);
2237 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
2238 ATTR_MTIME|ATTR_CTIME);
2239 mutex_unlock(&inode->ei_update_lock);
2244 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2245 struct bch_inode_unpacked *bi,
2248 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2250 bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2251 bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
2255 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2256 struct bch_inode_unpacked *bi, void *p)
2258 u64 *new_i_size = p;
2260 bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2261 bi->bi_size = *new_i_size;
2265 int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
2267 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2268 struct address_space *mapping = inode->v.i_mapping;
2269 struct bch_inode_unpacked inode_u;
2270 struct btree_trans trans;
2271 struct btree_iter *iter;
2272 u64 new_i_size = iattr->ia_size;
2273 s64 i_sectors_delta = 0;
2276 inode_dio_wait(&inode->v);
2277 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2280 * fetch current on disk i_size: inode is locked, i_size can only
2281 * increase underneath us:
2283 bch2_trans_init(&trans, c, 0, 0);
2284 iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino, 0);
2285 ret = PTR_ERR_OR_ZERO(iter);
2286 bch2_trans_exit(&trans);
2292 * check this before next assertion; on filesystem error our normal
2293 * invariants are a bit broken (truncate has to truncate the page cache
2294 * before the inode).
2296 ret = bch2_journal_error(&c->journal);
2300 BUG_ON(inode->v.i_size < inode_u.bi_size);
2302 if (iattr->ia_size > inode->v.i_size) {
2303 ret = bch2_extend(inode, &inode_u, iattr);
2307 ret = bch2_truncate_page(inode, iattr->ia_size);
2312 * When extending, we're going to write the new i_size to disk
2313 * immediately so we need to flush anything above the current on disk
2316 * Also, when extending we need to flush the page that i_size currently
2317 * straddles - if it's mapped to userspace, we need to ensure that
2318 * userspace has to redirty it and call .mkwrite -> set_page_dirty
2319 * again to allocate the part of the page that was extended.
2321 if (iattr->ia_size > inode_u.bi_size)
2322 ret = filemap_write_and_wait_range(mapping,
2324 iattr->ia_size - 1);
2325 else if (iattr->ia_size & (PAGE_SIZE - 1))
2326 ret = filemap_write_and_wait_range(mapping,
2327 round_down(iattr->ia_size, PAGE_SIZE),
2328 iattr->ia_size - 1);
2332 mutex_lock(&inode->ei_update_lock);
2333 ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
2335 mutex_unlock(&inode->ei_update_lock);
2340 truncate_setsize(&inode->v, iattr->ia_size);
2342 ret = bch2_fpunch(c, inode->v.i_ino,
2343 round_up(iattr->ia_size, block_bytes(c)) >> 9,
2344 U64_MAX, &inode->ei_journal_seq, &i_sectors_delta);
2345 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2350 setattr_copy(&inode->v, iattr);
2352 mutex_lock(&inode->ei_update_lock);
2353 ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL,
2354 ATTR_MTIME|ATTR_CTIME);
2355 mutex_unlock(&inode->ei_update_lock);
2357 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2363 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2365 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2366 u64 discard_start = round_up(offset, block_bytes(c)) >> 9;
2367 u64 discard_end = round_down(offset + len, block_bytes(c)) >> 9;
2370 inode_lock(&inode->v);
2371 inode_dio_wait(&inode->v);
2372 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2374 ret = __bch2_truncate_page(inode,
2375 offset >> PAGE_SHIFT,
2376 offset, offset + len);
2380 if (offset >> PAGE_SHIFT !=
2381 (offset + len) >> PAGE_SHIFT) {
2382 ret = __bch2_truncate_page(inode,
2383 (offset + len) >> PAGE_SHIFT,
2384 offset, offset + len);
2389 truncate_pagecache_range(&inode->v, offset, offset + len - 1);
2391 if (discard_start < discard_end) {
2392 s64 i_sectors_delta = 0;
2394 ret = bch2_fpunch(c, inode->v.i_ino,
2395 discard_start, discard_end,
2396 &inode->ei_journal_seq,
2398 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2401 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2402 inode_unlock(&inode->v);
2407 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
2408 loff_t offset, loff_t len,
2411 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2412 struct address_space *mapping = inode->v.i_mapping;
2413 struct bkey_on_stack copy;
2414 struct btree_trans trans;
2415 struct btree_iter *src, *dst;
2416 loff_t shift, new_size;
2420 if ((offset | len) & (block_bytes(c) - 1))
2423 bkey_on_stack_init(©);
2424 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
2427 * We need i_mutex to keep the page cache consistent with the extents
2428 * btree, and the btree consistent with i_size - we don't need outside
2429 * locking for the extents btree itself, because we're using linked
2432 inode_lock(&inode->v);
2433 inode_dio_wait(&inode->v);
2434 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2438 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
2442 if (offset >= inode->v.i_size)
2445 src_start = U64_MAX;
2449 if (offset + len >= inode->v.i_size)
2452 src_start = offset + len;
2456 new_size = inode->v.i_size + shift;
2458 ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2463 i_size_write(&inode->v, new_size);
2464 mutex_lock(&inode->ei_update_lock);
2465 ret = bch2_write_inode_size(c, inode, new_size,
2466 ATTR_MTIME|ATTR_CTIME);
2467 mutex_unlock(&inode->ei_update_lock);
2469 s64 i_sectors_delta = 0;
2471 ret = bch2_fpunch(c, inode->v.i_ino,
2472 offset >> 9, (offset + len) >> 9,
2473 &inode->ei_journal_seq,
2475 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2481 src = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2482 POS(inode->v.i_ino, src_start >> 9),
2484 BUG_ON(IS_ERR_OR_NULL(src));
2486 dst = bch2_trans_copy_iter(&trans, src);
2487 BUG_ON(IS_ERR_OR_NULL(dst));
2490 struct disk_reservation disk_res =
2491 bch2_disk_reservation_init(c, 0);
2492 struct bkey_i delete;
2494 struct bpos next_pos;
2495 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
2496 struct bpos atomic_end;
2497 unsigned trigger_flags = 0;
2500 ? bch2_btree_iter_peek_prev(src)
2501 : bch2_btree_iter_peek(src);
2502 if ((ret = bkey_err(k)))
2505 if (!k.k || k.k->p.inode != inode->v.i_ino)
2508 BUG_ON(bkey_cmp(src->pos, bkey_start_pos(k.k)));
2511 bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
2514 bkey_on_stack_reassemble(©, c, k);
2517 bkey_cmp(bkey_start_pos(k.k), move_pos) < 0)
2518 bch2_cut_front(move_pos, copy.k);
2520 copy.k->k.p.offset += shift >> 9;
2521 bch2_btree_iter_set_pos(dst, bkey_start_pos(©.k->k));
2523 ret = bch2_extent_atomic_end(dst, copy.k, &atomic_end);
2527 if (bkey_cmp(atomic_end, copy.k->k.p)) {
2529 move_pos = atomic_end;
2530 move_pos.offset -= shift >> 9;
2533 bch2_cut_back(atomic_end, copy.k);
2537 bkey_init(&delete.k);
2538 delete.k.p = copy.k->k.p;
2539 delete.k.size = copy.k->k.size;
2540 delete.k.p.offset -= shift >> 9;
2542 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
2544 if (copy.k->k.size == k.k->size) {
2546 * If we're moving the entire extent, we can skip
2549 trigger_flags |= BTREE_TRIGGER_NORUN;
2551 /* We might end up splitting compressed extents: */
2553 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
2555 ret = bch2_disk_reservation_get(c, &disk_res,
2556 copy.k->k.size, nr_ptrs,
2557 BCH_DISK_RESERVATION_NOFAIL);
2561 bch2_btree_iter_set_pos(src, bkey_start_pos(&delete.k));
2563 ret = bch2_trans_update(&trans, src, &delete, trigger_flags) ?:
2564 bch2_trans_update(&trans, dst, copy.k, trigger_flags) ?:
2565 bch2_trans_commit(&trans, &disk_res,
2566 &inode->ei_journal_seq,
2567 BTREE_INSERT_NOFAIL);
2568 bch2_disk_reservation_put(c, &disk_res);
2571 bch2_btree_iter_set_pos(src, next_pos);
2578 bch2_trans_cond_resched(&trans);
2580 bch2_trans_unlock(&trans);
2583 i_size_write(&inode->v, new_size);
2584 mutex_lock(&inode->ei_update_lock);
2585 ret = bch2_write_inode_size(c, inode, new_size,
2586 ATTR_MTIME|ATTR_CTIME);
2587 mutex_unlock(&inode->ei_update_lock);
2590 bch2_trans_exit(&trans);
2591 bkey_on_stack_exit(©, c);
2592 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2593 inode_unlock(&inode->v);
2597 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
2598 loff_t offset, loff_t len)
2600 struct address_space *mapping = inode->v.i_mapping;
2601 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2602 struct btree_trans trans;
2603 struct btree_iter *iter;
2604 struct bpos end_pos;
2605 loff_t end = offset + len;
2606 loff_t block_start = round_down(offset, block_bytes(c));
2607 loff_t block_end = round_up(end, block_bytes(c));
2609 unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
2612 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
2614 inode_lock(&inode->v);
2615 inode_dio_wait(&inode->v);
2616 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2618 if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
2619 ret = inode_newsize_ok(&inode->v, end);
2624 if (mode & FALLOC_FL_ZERO_RANGE) {
2625 ret = __bch2_truncate_page(inode,
2626 offset >> PAGE_SHIFT,
2630 offset >> PAGE_SHIFT != end >> PAGE_SHIFT)
2631 ret = __bch2_truncate_page(inode,
2638 truncate_pagecache_range(&inode->v, offset, end - 1);
2641 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2642 POS(inode->v.i_ino, block_start >> 9),
2643 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2644 end_pos = POS(inode->v.i_ino, block_end >> 9);
2646 while (bkey_cmp(iter->pos, end_pos) < 0) {
2647 s64 i_sectors_delta = 0;
2648 struct disk_reservation disk_res = { 0 };
2649 struct quota_res quota_res = { 0 };
2650 struct bkey_i_reservation reservation;
2653 bch2_trans_begin(&trans);
2655 k = bch2_btree_iter_peek_slot(iter);
2656 if ((ret = bkey_err(k)))
2659 /* already reserved */
2660 if (k.k->type == KEY_TYPE_reservation &&
2661 bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2662 bch2_btree_iter_next_slot(iter);
2666 if (bkey_extent_is_data(k.k) &&
2667 !(mode & FALLOC_FL_ZERO_RANGE)) {
2668 bch2_btree_iter_next_slot(iter);
2672 bkey_reservation_init(&reservation.k_i);
2673 reservation.k.type = KEY_TYPE_reservation;
2674 reservation.k.p = k.k->p;
2675 reservation.k.size = k.k->size;
2677 bch2_cut_front(iter->pos, &reservation.k_i);
2678 bch2_cut_back(end_pos, &reservation.k_i);
2680 sectors = reservation.k.size;
2681 reservation.v.nr_replicas = bch2_bkey_nr_ptrs_allocated(k);
2683 if (!bkey_extent_is_allocation(k.k)) {
2684 ret = bch2_quota_reservation_add(c, inode,
2691 if (reservation.v.nr_replicas < replicas ||
2692 bch2_bkey_sectors_compressed(k)) {
2693 ret = bch2_disk_reservation_get(c, &disk_res, sectors,
2698 reservation.v.nr_replicas = disk_res.nr_replicas;
2701 ret = bch2_extent_update(&trans, iter, &reservation.k_i,
2702 &disk_res, &inode->ei_journal_seq,
2703 0, &i_sectors_delta);
2704 i_sectors_acct(c, inode, "a_res, i_sectors_delta);
2706 bch2_quota_reservation_put(c, inode, "a_res);
2707 bch2_disk_reservation_put(c, &disk_res);
2715 * Do we need to extend the file?
2717 * If we zeroed up to the end of the file, we dropped whatever writes
2718 * were going to write out the current i_size, so we have to extend
2719 * manually even if FL_KEEP_SIZE was set:
2721 if (end >= inode->v.i_size &&
2722 (!(mode & FALLOC_FL_KEEP_SIZE) ||
2723 (mode & FALLOC_FL_ZERO_RANGE))) {
2724 struct btree_iter *inode_iter;
2725 struct bch_inode_unpacked inode_u;
2728 bch2_trans_begin(&trans);
2729 inode_iter = bch2_inode_peek(&trans, &inode_u,
2731 ret = PTR_ERR_OR_ZERO(inode_iter);
2732 } while (ret == -EINTR);
2734 bch2_trans_unlock(&trans);
2740 * Sync existing appends before extending i_size,
2741 * as in bch2_extend():
2743 ret = filemap_write_and_wait_range(mapping,
2744 inode_u.bi_size, S64_MAX);
2748 if (mode & FALLOC_FL_KEEP_SIZE)
2749 end = inode->v.i_size;
2751 i_size_write(&inode->v, end);
2753 mutex_lock(&inode->ei_update_lock);
2754 ret = bch2_write_inode_size(c, inode, end, 0);
2755 mutex_unlock(&inode->ei_update_lock);
2758 bch2_trans_exit(&trans);
2759 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2760 inode_unlock(&inode->v);
2764 long bch2_fallocate_dispatch(struct file *file, int mode,
2765 loff_t offset, loff_t len)
2767 struct bch_inode_info *inode = file_bch_inode(file);
2768 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2771 if (!percpu_ref_tryget(&c->writes))
2774 if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
2775 ret = bchfs_fallocate(inode, mode, offset, len);
2776 else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
2777 ret = bchfs_fpunch(inode, offset, len);
2778 else if (mode == FALLOC_FL_INSERT_RANGE)
2779 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
2780 else if (mode == FALLOC_FL_COLLAPSE_RANGE)
2781 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
2785 percpu_ref_put(&c->writes);
2790 static void mark_range_unallocated(struct bch_inode_info *inode,
2791 loff_t start, loff_t end)
2793 pgoff_t index = start >> PAGE_SHIFT;
2794 pgoff_t end_index = (end - 1) >> PAGE_SHIFT;
2795 struct pagevec pvec;
2797 pagevec_init(&pvec);
2800 unsigned nr_pages, i, j;
2802 nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
2807 for (i = 0; i < nr_pages; i++) {
2808 struct page *page = pvec.pages[i];
2809 struct bch_page_state *s;
2812 s = bch2_page_state(page);
2815 spin_lock(&s->lock);
2816 for (j = 0; j < PAGE_SECTORS; j++)
2817 s->s[j].nr_replicas = 0;
2818 spin_unlock(&s->lock);
2823 pagevec_release(&pvec);
2824 } while (index <= end_index);
2827 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
2828 struct file *file_dst, loff_t pos_dst,
2829 loff_t len, unsigned remap_flags)
2831 struct bch_inode_info *src = file_bch_inode(file_src);
2832 struct bch_inode_info *dst = file_bch_inode(file_dst);
2833 struct bch_fs *c = src->v.i_sb->s_fs_info;
2834 s64 i_sectors_delta = 0;
2838 if (!c->opts.reflink)
2841 if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
2844 if (remap_flags & REMAP_FILE_DEDUP)
2847 if ((pos_src & (block_bytes(c) - 1)) ||
2848 (pos_dst & (block_bytes(c) - 1)))
2852 abs(pos_src - pos_dst) < len)
2855 bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
2857 file_update_time(file_dst);
2859 inode_dio_wait(&src->v);
2860 inode_dio_wait(&dst->v);
2862 ret = generic_remap_file_range_prep(file_src, pos_src,
2865 if (ret < 0 || len == 0)
2868 aligned_len = round_up((u64) len, block_bytes(c));
2870 ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
2871 pos_dst, pos_dst + len - 1);
2875 mark_range_unallocated(src, pos_src, pos_src + aligned_len);
2877 ret = bch2_remap_range(c,
2878 POS(dst->v.i_ino, pos_dst >> 9),
2879 POS(src->v.i_ino, pos_src >> 9),
2881 &dst->ei_journal_seq,
2882 pos_dst + len, &i_sectors_delta);
2887 * due to alignment, we might have remapped slightly more than requsted
2889 ret = min((u64) ret << 9, (u64) len);
2891 /* XXX get a quota reservation */
2892 i_sectors_acct(c, dst, NULL, i_sectors_delta);
2894 spin_lock(&dst->v.i_lock);
2895 if (pos_dst + ret > dst->v.i_size)
2896 i_size_write(&dst->v, pos_dst + ret);
2897 spin_unlock(&dst->v.i_lock);
2899 bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
2906 static int page_data_offset(struct page *page, unsigned offset)
2908 struct bch_page_state *s = bch2_page_state(page);
2912 for (i = offset >> 9; i < PAGE_SECTORS; i++)
2913 if (s->s[i].state >= SECTOR_DIRTY)
2919 static loff_t bch2_seek_pagecache_data(struct inode *vinode,
2920 loff_t start_offset,
2923 struct address_space *mapping = vinode->i_mapping;
2925 pgoff_t start_index = start_offset >> PAGE_SHIFT;
2926 pgoff_t end_index = end_offset >> PAGE_SHIFT;
2927 pgoff_t index = start_index;
2931 while (index <= end_index) {
2932 if (find_get_pages_range(mapping, &index, end_index, 1, &page)) {
2935 offset = page_data_offset(page,
2936 page->index == start_index
2937 ? start_offset & (PAGE_SIZE - 1)
2940 ret = clamp(((loff_t) page->index << PAGE_SHIFT) +
2942 start_offset, end_offset);
2958 static loff_t bch2_seek_data(struct file *file, u64 offset)
2960 struct bch_inode_info *inode = file_bch_inode(file);
2961 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2962 struct btree_trans trans;
2963 struct btree_iter *iter;
2965 u64 isize, next_data = MAX_LFS_FILESIZE;
2968 isize = i_size_read(&inode->v);
2969 if (offset >= isize)
2972 bch2_trans_init(&trans, c, 0, 0);
2974 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
2975 POS(inode->v.i_ino, offset >> 9), 0, k, ret) {
2976 if (k.k->p.inode != inode->v.i_ino) {
2978 } else if (bkey_extent_is_data(k.k)) {
2979 next_data = max(offset, bkey_start_offset(k.k) << 9);
2981 } else if (k.k->p.offset >> 9 > isize)
2985 ret = bch2_trans_exit(&trans) ?: ret;
2989 if (next_data > offset)
2990 next_data = bch2_seek_pagecache_data(&inode->v,
2993 if (next_data >= isize)
2996 return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
2999 static int __page_hole_offset(struct page *page, unsigned offset)
3001 struct bch_page_state *s = bch2_page_state(page);
3007 for (i = offset >> 9; i < PAGE_SECTORS; i++)
3008 if (s->s[i].state < SECTOR_DIRTY)
3014 static loff_t page_hole_offset(struct address_space *mapping, loff_t offset)
3016 pgoff_t index = offset >> PAGE_SHIFT;
3021 page = find_lock_entry(mapping, index);
3022 if (!page || xa_is_value(page))
3025 pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1));
3027 ret = ((loff_t) index << PAGE_SHIFT) + pg_offset;
3034 static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
3035 loff_t start_offset,
3038 struct address_space *mapping = vinode->i_mapping;
3039 loff_t offset = start_offset, hole;
3041 while (offset < end_offset) {
3042 hole = page_hole_offset(mapping, offset);
3043 if (hole >= 0 && hole <= end_offset)
3044 return max(start_offset, hole);
3046 offset += PAGE_SIZE;
3047 offset &= PAGE_MASK;
3053 static loff_t bch2_seek_hole(struct file *file, u64 offset)
3055 struct bch_inode_info *inode = file_bch_inode(file);
3056 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3057 struct btree_trans trans;
3058 struct btree_iter *iter;
3060 u64 isize, next_hole = MAX_LFS_FILESIZE;
3063 isize = i_size_read(&inode->v);
3064 if (offset >= isize)
3067 bch2_trans_init(&trans, c, 0, 0);
3069 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
3070 POS(inode->v.i_ino, offset >> 9),
3071 BTREE_ITER_SLOTS, k, ret) {
3072 if (k.k->p.inode != inode->v.i_ino) {
3073 next_hole = bch2_seek_pagecache_hole(&inode->v,
3074 offset, MAX_LFS_FILESIZE);
3076 } else if (!bkey_extent_is_data(k.k)) {
3077 next_hole = bch2_seek_pagecache_hole(&inode->v,
3078 max(offset, bkey_start_offset(k.k) << 9),
3079 k.k->p.offset << 9);
3081 if (next_hole < k.k->p.offset << 9)
3084 offset = max(offset, bkey_start_offset(k.k) << 9);
3088 ret = bch2_trans_exit(&trans) ?: ret;
3092 if (next_hole > isize)
3095 return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
3098 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
3104 return generic_file_llseek(file, offset, whence);
3106 return bch2_seek_data(file, offset);
3108 return bch2_seek_hole(file, offset);
3114 void bch2_fs_fsio_exit(struct bch_fs *c)
3116 bioset_exit(&c->dio_write_bioset);
3117 bioset_exit(&c->dio_read_bioset);
3118 bioset_exit(&c->writepage_bioset);
3121 int bch2_fs_fsio_init(struct bch_fs *c)
3125 pr_verbose_init(c->opts, "");
3127 if (bioset_init(&c->writepage_bioset,
3128 4, offsetof(struct bch_writepage_io, op.wbio.bio),
3129 BIOSET_NEED_BVECS) ||
3130 bioset_init(&c->dio_read_bioset,
3131 4, offsetof(struct dio_read, rbio.bio),
3132 BIOSET_NEED_BVECS) ||
3133 bioset_init(&c->dio_write_bioset,
3134 4, offsetof(struct dio_write, op.wbio.bio),
3138 pr_verbose_init(c->opts, "ret %i", ret);
3142 #endif /* NO_BCACHEFS_FS */