1 // SPDX-License-Identifier: GPL-2.0
5 #include "alloc_foreground.h"
7 #include "btree_update.h"
12 #include "extent_update.h"
23 #include <linux/aio.h>
24 #include <linux/backing-dev.h>
25 #include <linux/falloc.h>
26 #include <linux/migrate.h>
27 #include <linux/mmu_context.h>
28 #include <linux/pagevec.h>
29 #include <linux/rmap.h>
30 #include <linux/sched/signal.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/uio.h>
33 #include <linux/writeback.h>
35 #include <trace/events/bcachefs.h>
36 #include <trace/events/writeback.h>
38 static inline struct address_space *faults_disabled_mapping(void)
40 return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
43 static inline void set_fdm_dropped_locks(void)
45 current->faults_disabled_mapping =
46 (void *) (((unsigned long) current->faults_disabled_mapping)|1);
49 static inline bool fdm_dropped_locks(void)
51 return ((unsigned long) current->faults_disabled_mapping) & 1;
58 struct bch_writepage_io {
60 struct bch_inode_info *inode;
63 struct bch_write_op op;
67 struct completion done;
73 struct quota_res quota_res;
77 struct iovec inline_vecs[2];
80 struct bch_write_op op;
88 struct bch_read_bio rbio;
91 /* pagecache_block must be held */
92 static int write_invalidate_inode_pages_range(struct address_space *mapping,
93 loff_t start, loff_t end)
98 * XXX: the way this is currently implemented, we can spin if a process
99 * is continually redirtying a specific page
102 if (!mapping->nrpages)
105 ret = filemap_write_and_wait_range(mapping, start, end);
109 if (!mapping->nrpages)
112 ret = invalidate_inode_pages2_range(mapping,
115 } while (ret == -EBUSY);
122 #ifdef CONFIG_BCACHEFS_QUOTA
124 static void bch2_quota_reservation_put(struct bch_fs *c,
125 struct bch_inode_info *inode,
126 struct quota_res *res)
131 mutex_lock(&inode->ei_quota_lock);
132 BUG_ON(res->sectors > inode->ei_quota_reserved);
134 bch2_quota_acct(c, inode->ei_qid, Q_SPC,
135 -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
136 inode->ei_quota_reserved -= res->sectors;
137 mutex_unlock(&inode->ei_quota_lock);
142 static int bch2_quota_reservation_add(struct bch_fs *c,
143 struct bch_inode_info *inode,
144 struct quota_res *res,
150 mutex_lock(&inode->ei_quota_lock);
151 ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
152 check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
154 inode->ei_quota_reserved += sectors;
155 res->sectors += sectors;
157 mutex_unlock(&inode->ei_quota_lock);
164 static void bch2_quota_reservation_put(struct bch_fs *c,
165 struct bch_inode_info *inode,
166 struct quota_res *res)
170 static int bch2_quota_reservation_add(struct bch_fs *c,
171 struct bch_inode_info *inode,
172 struct quota_res *res,
181 /* i_size updates: */
183 struct inode_new_size {
189 static int inode_set_size(struct bch_inode_info *inode,
190 struct bch_inode_unpacked *bi,
193 struct inode_new_size *s = p;
195 bi->bi_size = s->new_size;
196 if (s->fields & ATTR_ATIME)
197 bi->bi_atime = s->now;
198 if (s->fields & ATTR_MTIME)
199 bi->bi_mtime = s->now;
200 if (s->fields & ATTR_CTIME)
201 bi->bi_ctime = s->now;
206 int __must_check bch2_write_inode_size(struct bch_fs *c,
207 struct bch_inode_info *inode,
208 loff_t new_size, unsigned fields)
210 struct inode_new_size s = {
211 .new_size = new_size,
212 .now = bch2_current_time(c),
216 return bch2_write_inode(c, inode, inode_set_size, &s, fields);
219 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
220 struct quota_res *quota_res, s64 sectors)
225 mutex_lock(&inode->ei_quota_lock);
226 BUG_ON((s64) inode->v.i_blocks + sectors < 0);
227 inode->v.i_blocks += sectors;
229 #ifdef CONFIG_BCACHEFS_QUOTA
230 if (quota_res && sectors > 0) {
231 BUG_ON(sectors > quota_res->sectors);
232 BUG_ON(sectors > inode->ei_quota_reserved);
234 quota_res->sectors -= sectors;
235 inode->ei_quota_reserved -= sectors;
237 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
240 mutex_unlock(&inode->ei_quota_lock);
245 /* stored in page->private: */
247 struct bch_page_sector {
248 /* Uncompressed, fully allocated replicas (or on disk reservation): */
249 unsigned nr_replicas:4;
251 /* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
252 unsigned replicas_reserved:4;
259 SECTOR_DIRTY_RESERVED,
264 struct bch_page_state {
266 atomic_t write_count;
268 struct bch_page_sector s[PAGE_SECTORS];
271 static inline struct bch_page_state *__bch2_page_state(struct page *page)
273 return page_has_private(page)
274 ? (struct bch_page_state *) page_private(page)
278 static inline struct bch_page_state *bch2_page_state(struct page *page)
280 EBUG_ON(!PageLocked(page));
282 return __bch2_page_state(page);
285 /* for newly allocated pages: */
286 static void __bch2_page_state_release(struct page *page)
288 kfree(detach_page_private(page));
291 static void bch2_page_state_release(struct page *page)
293 EBUG_ON(!PageLocked(page));
294 __bch2_page_state_release(page);
297 /* for newly allocated pages: */
298 static struct bch_page_state *__bch2_page_state_create(struct page *page,
301 struct bch_page_state *s;
303 s = kzalloc(sizeof(*s), GFP_NOFS|gfp);
307 spin_lock_init(&s->lock);
308 attach_page_private(page, s);
312 static struct bch_page_state *bch2_page_state_create(struct page *page,
315 return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp);
318 static unsigned bkey_to_sector_state(const struct bkey *k)
320 if (k->type == KEY_TYPE_reservation)
321 return SECTOR_RESERVED;
322 if (bkey_extent_is_allocation(k))
323 return SECTOR_ALLOCATED;
324 return SECTOR_UNALLOCATED;
327 static void __bch2_page_state_set(struct page *page,
328 unsigned pg_offset, unsigned pg_len,
329 unsigned nr_ptrs, unsigned state)
331 struct bch_page_state *s = bch2_page_state_create(page, __GFP_NOFAIL);
334 BUG_ON(pg_offset >= PAGE_SECTORS);
335 BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
339 for (i = pg_offset; i < pg_offset + pg_len; i++) {
340 s->s[i].nr_replicas = nr_ptrs;
341 s->s[i].state = state;
344 if (i == PAGE_SECTORS)
347 spin_unlock(&s->lock);
350 static int bch2_page_state_set(struct bch_fs *c, subvol_inum inum,
351 struct page **pages, unsigned nr_pages)
353 struct btree_trans trans;
354 struct btree_iter iter;
356 u64 offset = pages[0]->index << PAGE_SECTORS_SHIFT;
361 bch2_trans_init(&trans, c, 0, 0);
363 bch2_trans_begin(&trans);
365 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
369 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
370 SPOS(inum.inum, offset, snapshot),
371 BTREE_ITER_SLOTS, k, ret) {
372 unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
373 unsigned state = bkey_to_sector_state(k.k);
375 while (pg_idx < nr_pages) {
376 struct page *page = pages[pg_idx];
377 u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
378 u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
379 unsigned pg_offset = max(bkey_start_offset(k.k), pg_start) - pg_start;
380 unsigned pg_len = min(k.k->p.offset, pg_end) - pg_offset - pg_start;
382 BUG_ON(k.k->p.offset < pg_start);
383 BUG_ON(bkey_start_offset(k.k) > pg_end);
385 if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate)
386 __bch2_page_state_set(page, pg_offset, pg_len, nr_ptrs, state);
388 if (k.k->p.offset < pg_end)
393 if (pg_idx == nr_pages)
397 offset = iter.pos.offset;
398 bch2_trans_iter_exit(&trans, &iter);
402 bch2_trans_exit(&trans);
407 static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
409 struct bvec_iter iter;
411 unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
412 ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
413 unsigned state = bkey_to_sector_state(k.k);
415 bio_for_each_segment(bv, bio, iter)
416 __bch2_page_state_set(bv.bv_page, bv.bv_offset >> 9,
417 bv.bv_len >> 9, nr_ptrs, state);
420 static void mark_pagecache_unallocated(struct bch_inode_info *inode,
423 pgoff_t index = start >> PAGE_SECTORS_SHIFT;
424 pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
433 unsigned nr_pages, i, j;
435 nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
437 for (i = 0; i < nr_pages; i++) {
438 struct page *page = pvec.pages[i];
439 u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
440 u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
441 unsigned pg_offset = max(start, pg_start) - pg_start;
442 unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
443 struct bch_page_state *s;
445 BUG_ON(end <= pg_start);
446 BUG_ON(pg_offset >= PAGE_SECTORS);
447 BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
450 s = bch2_page_state(page);
454 for (j = pg_offset; j < pg_offset + pg_len; j++)
455 s->s[j].nr_replicas = 0;
456 spin_unlock(&s->lock);
461 pagevec_release(&pvec);
462 } while (index <= end_index);
465 static void mark_pagecache_reserved(struct bch_inode_info *inode,
468 struct bch_fs *c = inode->v.i_sb->s_fs_info;
469 pgoff_t index = start >> PAGE_SECTORS_SHIFT;
470 pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
472 s64 i_sectors_delta = 0;
480 unsigned nr_pages, i, j;
482 nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
484 for (i = 0; i < nr_pages; i++) {
485 struct page *page = pvec.pages[i];
486 u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
487 u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
488 unsigned pg_offset = max(start, pg_start) - pg_start;
489 unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
490 struct bch_page_state *s;
492 BUG_ON(end <= pg_start);
493 BUG_ON(pg_offset >= PAGE_SECTORS);
494 BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
497 s = bch2_page_state(page);
501 for (j = pg_offset; j < pg_offset + pg_len; j++)
502 switch (s->s[j].state) {
503 case SECTOR_UNALLOCATED:
504 s->s[j].state = SECTOR_RESERVED;
507 s->s[j].state = SECTOR_DIRTY_RESERVED;
513 spin_unlock(&s->lock);
518 pagevec_release(&pvec);
519 } while (index <= end_index);
521 i_sectors_acct(c, inode, NULL, i_sectors_delta);
524 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
526 /* XXX: this should not be open coded */
527 return inode->ei_inode.bi_data_replicas
528 ? inode->ei_inode.bi_data_replicas - 1
529 : c->opts.data_replicas;
532 static inline unsigned sectors_to_reserve(struct bch_page_sector *s,
533 unsigned nr_replicas)
535 return max(0, (int) nr_replicas -
537 s->replicas_reserved);
540 static int bch2_get_page_disk_reservation(struct bch_fs *c,
541 struct bch_inode_info *inode,
542 struct page *page, bool check_enospc)
544 struct bch_page_state *s = bch2_page_state_create(page, 0);
545 unsigned nr_replicas = inode_nr_replicas(c, inode);
546 struct disk_reservation disk_res = { 0 };
547 unsigned i, disk_res_sectors = 0;
553 for (i = 0; i < ARRAY_SIZE(s->s); i++)
554 disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
556 if (!disk_res_sectors)
559 ret = bch2_disk_reservation_get(c, &disk_res,
562 ? BCH_DISK_RESERVATION_NOFAIL
567 for (i = 0; i < ARRAY_SIZE(s->s); i++)
568 s->s[i].replicas_reserved +=
569 sectors_to_reserve(&s->s[i], nr_replicas);
574 struct bch2_page_reservation {
575 struct disk_reservation disk;
576 struct quota_res quota;
579 static void bch2_page_reservation_init(struct bch_fs *c,
580 struct bch_inode_info *inode,
581 struct bch2_page_reservation *res)
583 memset(res, 0, sizeof(*res));
585 res->disk.nr_replicas = inode_nr_replicas(c, inode);
588 static void bch2_page_reservation_put(struct bch_fs *c,
589 struct bch_inode_info *inode,
590 struct bch2_page_reservation *res)
592 bch2_disk_reservation_put(c, &res->disk);
593 bch2_quota_reservation_put(c, inode, &res->quota);
596 static int bch2_page_reservation_get(struct bch_fs *c,
597 struct bch_inode_info *inode, struct page *page,
598 struct bch2_page_reservation *res,
599 unsigned offset, unsigned len, bool check_enospc)
601 struct bch_page_state *s = bch2_page_state_create(page, 0);
602 unsigned i, disk_sectors = 0, quota_sectors = 0;
608 BUG_ON(!s->uptodate);
610 for (i = round_down(offset, block_bytes(c)) >> 9;
611 i < round_up(offset + len, block_bytes(c)) >> 9;
613 disk_sectors += sectors_to_reserve(&s->s[i],
614 res->disk.nr_replicas);
615 quota_sectors += s->s[i].state == SECTOR_UNALLOCATED;
619 ret = bch2_disk_reservation_add(c, &res->disk,
622 ? BCH_DISK_RESERVATION_NOFAIL
629 ret = bch2_quota_reservation_add(c, inode, &res->quota,
633 struct disk_reservation tmp = {
634 .sectors = disk_sectors
637 bch2_disk_reservation_put(c, &tmp);
638 res->disk.sectors -= disk_sectors;
646 static void bch2_clear_page_bits(struct page *page)
648 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
649 struct bch_fs *c = inode->v.i_sb->s_fs_info;
650 struct bch_page_state *s = bch2_page_state(page);
651 struct disk_reservation disk_res = { 0 };
652 int i, dirty_sectors = 0;
657 EBUG_ON(!PageLocked(page));
658 EBUG_ON(PageWriteback(page));
660 for (i = 0; i < ARRAY_SIZE(s->s); i++) {
661 disk_res.sectors += s->s[i].replicas_reserved;
662 s->s[i].replicas_reserved = 0;
664 switch (s->s[i].state) {
666 s->s[i].state = SECTOR_UNALLOCATED;
669 case SECTOR_DIRTY_RESERVED:
670 s->s[i].state = SECTOR_RESERVED;
677 bch2_disk_reservation_put(c, &disk_res);
679 i_sectors_acct(c, inode, NULL, dirty_sectors);
681 bch2_page_state_release(page);
684 static void bch2_set_page_dirty(struct bch_fs *c,
685 struct bch_inode_info *inode, struct page *page,
686 struct bch2_page_reservation *res,
687 unsigned offset, unsigned len)
689 struct bch_page_state *s = bch2_page_state(page);
690 unsigned i, dirty_sectors = 0;
692 WARN_ON((u64) page_offset(page) + offset + len >
693 round_up((u64) i_size_read(&inode->v), block_bytes(c)));
697 for (i = round_down(offset, block_bytes(c)) >> 9;
698 i < round_up(offset + len, block_bytes(c)) >> 9;
700 unsigned sectors = sectors_to_reserve(&s->s[i],
701 res->disk.nr_replicas);
704 * This can happen if we race with the error path in
705 * bch2_writepage_io_done():
707 sectors = min_t(unsigned, sectors, res->disk.sectors);
709 s->s[i].replicas_reserved += sectors;
710 res->disk.sectors -= sectors;
712 switch (s->s[i].state) {
713 case SECTOR_UNALLOCATED:
714 s->s[i].state = SECTOR_DIRTY;
717 case SECTOR_RESERVED:
718 s->s[i].state = SECTOR_DIRTY_RESERVED;
725 spin_unlock(&s->lock);
727 i_sectors_acct(c, inode, &res->quota, dirty_sectors);
729 if (!PageDirty(page))
730 __set_page_dirty_nobuffers(page);
733 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
735 struct file *file = vmf->vma->vm_file;
736 struct address_space *mapping = file->f_mapping;
737 struct address_space *fdm = faults_disabled_mapping();
738 struct bch_inode_info *inode = file_bch_inode(file);
742 return VM_FAULT_SIGBUS;
746 struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
748 if (bch2_pagecache_add_tryget(&inode->ei_pagecache_lock))
751 bch2_pagecache_block_put(&fdm_host->ei_pagecache_lock);
753 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
754 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
756 bch2_pagecache_block_get(&fdm_host->ei_pagecache_lock);
758 /* Signal that lock has been dropped: */
759 set_fdm_dropped_locks();
760 return VM_FAULT_SIGBUS;
763 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
765 ret = filemap_fault(vmf);
766 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
771 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
773 struct page *page = vmf->page;
774 struct file *file = vmf->vma->vm_file;
775 struct bch_inode_info *inode = file_bch_inode(file);
776 struct address_space *mapping = file->f_mapping;
777 struct bch_fs *c = inode->v.i_sb->s_fs_info;
778 struct bch2_page_reservation res;
783 bch2_page_reservation_init(c, inode, &res);
785 sb_start_pagefault(inode->v.i_sb);
786 file_update_time(file);
789 * Not strictly necessary, but helps avoid dio writes livelocking in
790 * write_invalidate_inode_pages_range() - can drop this if/when we get
791 * a write_invalidate_inode_pages_range() that works without dropping
792 * page lock before invalidating page
794 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
797 isize = i_size_read(&inode->v);
799 if (page->mapping != mapping || page_offset(page) >= isize) {
801 ret = VM_FAULT_NOPAGE;
805 len = min_t(loff_t, PAGE_SIZE, isize - page_offset(page));
807 if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
808 if (bch2_page_state_set(c, inode_inum(inode), &page, 1)) {
810 ret = VM_FAULT_SIGBUS;
815 if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) {
817 ret = VM_FAULT_SIGBUS;
821 bch2_set_page_dirty(c, inode, page, &res, 0, len);
822 bch2_page_reservation_put(c, inode, &res);
824 wait_for_stable_page(page);
825 ret = VM_FAULT_LOCKED;
827 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
828 sb_end_pagefault(inode->v.i_sb);
833 void bch2_invalidatepage(struct page *page, unsigned int offset,
836 if (offset || length < PAGE_SIZE)
839 bch2_clear_page_bits(page);
842 int bch2_releasepage(struct page *page, gfp_t gfp_mask)
847 bch2_clear_page_bits(page);
851 #ifdef CONFIG_MIGRATION
852 int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
853 struct page *page, enum migrate_mode mode)
857 EBUG_ON(!PageLocked(page));
858 EBUG_ON(!PageLocked(newpage));
860 ret = migrate_page_move_mapping(mapping, newpage, page, 0);
861 if (ret != MIGRATEPAGE_SUCCESS)
864 if (PagePrivate(page))
865 attach_page_private(newpage, detach_page_private(page));
867 if (mode != MIGRATE_SYNC_NO_COPY)
868 migrate_page_copy(newpage, page);
870 migrate_page_states(newpage, page);
871 return MIGRATEPAGE_SUCCESS;
877 static void bch2_readpages_end_io(struct bio *bio)
879 struct bvec_iter_all iter;
882 bio_for_each_segment_all(bv, bio, iter) {
883 struct page *page = bv->bv_page;
885 if (!bio->bi_status) {
886 SetPageUptodate(page);
888 ClearPageUptodate(page);
897 struct readpages_iter {
898 struct address_space *mapping;
905 static int readpages_iter_init(struct readpages_iter *iter,
906 struct readahead_control *ractl)
908 unsigned i, nr_pages = readahead_count(ractl);
910 memset(iter, 0, sizeof(*iter));
912 iter->mapping = ractl->mapping;
913 iter->offset = readahead_index(ractl);
914 iter->nr_pages = nr_pages;
916 iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
920 nr_pages = __readahead_batch(ractl, iter->pages, nr_pages);
921 for (i = 0; i < nr_pages; i++) {
922 __bch2_page_state_create(iter->pages[i], __GFP_NOFAIL);
923 put_page(iter->pages[i]);
929 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
931 if (iter->idx >= iter->nr_pages)
934 EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
936 return iter->pages[iter->idx];
939 static bool extent_partial_reads_expensive(struct bkey_s_c k)
941 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
942 struct bch_extent_crc_unpacked crc;
943 const union bch_extent_entry *i;
945 bkey_for_each_crc(k.k, ptrs, crc, i)
946 if (crc.csum_type || crc.compression_type)
951 static void readpage_bio_extend(struct readpages_iter *iter,
953 unsigned sectors_this_extent,
956 while (bio_sectors(bio) < sectors_this_extent &&
957 bio->bi_vcnt < bio->bi_max_vecs) {
958 pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
959 struct page *page = readpage_iter_next(iter);
963 if (iter->offset + iter->idx != page_offset)
971 page = xa_load(&iter->mapping->i_pages, page_offset);
972 if (page && !xa_is_value(page))
975 page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
979 if (!__bch2_page_state_create(page, 0)) {
984 ret = add_to_page_cache_lru(page, iter->mapping,
985 page_offset, GFP_NOFS);
987 __bch2_page_state_release(page);
995 BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0));
999 static void bchfs_read(struct btree_trans *trans,
1000 struct bch_read_bio *rbio,
1002 struct readpages_iter *readpages_iter)
1004 struct bch_fs *c = trans->c;
1005 struct btree_iter iter;
1007 int flags = BCH_READ_RETRY_IF_STALE|
1008 BCH_READ_MAY_PROMOTE;
1013 rbio->start_time = local_clock();
1014 rbio->subvol = inum.subvol;
1016 bch2_bkey_buf_init(&sk);
1018 bch2_trans_begin(trans);
1019 iter = (struct btree_iter) { NULL };
1021 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
1025 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1026 SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
1030 unsigned bytes, sectors, offset_into_extent;
1031 enum btree_id data_btree = BTREE_ID_extents;
1034 * read_extent -> io_time_reset may cause a transaction restart
1035 * without returning an error, we need to check for that here:
1037 if (!bch2_trans_relock(trans)) {
1042 bch2_btree_iter_set_pos(&iter,
1043 POS(inum.inum, rbio->bio.bi_iter.bi_sector));
1045 k = bch2_btree_iter_peek_slot(&iter);
1050 offset_into_extent = iter.pos.offset -
1051 bkey_start_offset(k.k);
1052 sectors = k.k->size - offset_into_extent;
1054 bch2_bkey_buf_reassemble(&sk, c, k);
1056 ret = bch2_read_indirect_extent(trans, &data_btree,
1057 &offset_into_extent, &sk);
1061 k = bkey_i_to_s_c(sk.k);
1063 sectors = min(sectors, k.k->size - offset_into_extent);
1066 readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
1067 extent_partial_reads_expensive(k));
1069 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
1070 swap(rbio->bio.bi_iter.bi_size, bytes);
1072 if (rbio->bio.bi_iter.bi_size == bytes)
1073 flags |= BCH_READ_LAST_FRAGMENT;
1075 bch2_bio_page_state_set(&rbio->bio, k);
1077 bch2_read_extent(trans, rbio, iter.pos,
1078 data_btree, k, offset_into_extent, flags);
1080 if (flags & BCH_READ_LAST_FRAGMENT)
1083 swap(rbio->bio.bi_iter.bi_size, bytes);
1084 bio_advance(&rbio->bio, bytes);
1086 ret = btree_trans_too_many_iters(trans);
1091 bch2_trans_iter_exit(trans, &iter);
1097 bch_err_inum_ratelimited(c, inum.inum,
1098 "read error %i from btree lookup", ret);
1099 rbio->bio.bi_status = BLK_STS_IOERR;
1100 bio_endio(&rbio->bio);
1103 bch2_bkey_buf_exit(&sk, c);
1106 void bch2_readahead(struct readahead_control *ractl)
1108 struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
1109 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1110 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1111 struct btree_trans trans;
1113 struct readpages_iter readpages_iter;
1116 ret = readpages_iter_init(&readpages_iter, ractl);
1119 bch2_trans_init(&trans, c, 0, 0);
1121 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1123 while ((page = readpage_iter_next(&readpages_iter))) {
1124 pgoff_t index = readpages_iter.offset + readpages_iter.idx;
1125 unsigned n = min_t(unsigned,
1126 readpages_iter.nr_pages -
1129 struct bch_read_bio *rbio =
1130 rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
1133 readpages_iter.idx++;
1135 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0);
1136 rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTORS_SHIFT;
1137 rbio->bio.bi_end_io = bch2_readpages_end_io;
1138 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
1140 bchfs_read(&trans, rbio, inode_inum(inode),
1144 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1146 bch2_trans_exit(&trans);
1147 kfree(readpages_iter.pages);
1150 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
1151 subvol_inum inum, struct page *page)
1153 struct btree_trans trans;
1155 bch2_page_state_create(page, __GFP_NOFAIL);
1157 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
1158 rbio->bio.bi_iter.bi_sector =
1159 (sector_t) page->index << PAGE_SECTORS_SHIFT;
1160 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
1162 bch2_trans_init(&trans, c, 0, 0);
1163 bchfs_read(&trans, rbio, inum, NULL);
1164 bch2_trans_exit(&trans);
1167 int bch2_readpage(struct file *file, struct page *page)
1169 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1170 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1171 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1172 struct bch_read_bio *rbio;
1174 rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
1175 rbio->bio.bi_end_io = bch2_readpages_end_io;
1177 __bchfs_readpage(c, rbio, inode_inum(inode), page);
1181 static void bch2_read_single_page_end_io(struct bio *bio)
1183 complete(bio->bi_private);
1186 static int bch2_read_single_page(struct page *page,
1187 struct address_space *mapping)
1189 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1190 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1191 struct bch_read_bio *rbio;
1193 DECLARE_COMPLETION_ONSTACK(done);
1195 rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
1196 io_opts(c, &inode->ei_inode));
1197 rbio->bio.bi_private = &done;
1198 rbio->bio.bi_end_io = bch2_read_single_page_end_io;
1200 __bchfs_readpage(c, rbio, inode_inum(inode), page);
1201 wait_for_completion(&done);
1203 ret = blk_status_to_errno(rbio->bio.bi_status);
1204 bio_put(&rbio->bio);
1209 SetPageUptodate(page);
1215 struct bch_writepage_state {
1216 struct bch_writepage_io *io;
1217 struct bch_io_opts opts;
1220 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1221 struct bch_inode_info *inode)
1223 return (struct bch_writepage_state) {
1224 .opts = io_opts(c, &inode->ei_inode)
1228 static void bch2_writepage_io_free(struct closure *cl)
1230 struct bch_writepage_io *io = container_of(cl,
1231 struct bch_writepage_io, cl);
1233 bio_put(&io->op.wbio.bio);
1236 static void bch2_writepage_io_done(struct closure *cl)
1238 struct bch_writepage_io *io = container_of(cl,
1239 struct bch_writepage_io, cl);
1240 struct bch_fs *c = io->op.c;
1241 struct bio *bio = &io->op.wbio.bio;
1242 struct bvec_iter_all iter;
1243 struct bio_vec *bvec;
1246 up(&io->op.c->io_in_flight);
1249 set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
1251 bio_for_each_segment_all(bvec, bio, iter) {
1252 struct bch_page_state *s;
1254 SetPageError(bvec->bv_page);
1255 mapping_set_error(bvec->bv_page->mapping, -EIO);
1257 s = __bch2_page_state(bvec->bv_page);
1258 spin_lock(&s->lock);
1259 for (i = 0; i < PAGE_SECTORS; i++)
1260 s->s[i].nr_replicas = 0;
1261 spin_unlock(&s->lock);
1265 if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
1266 bio_for_each_segment_all(bvec, bio, iter) {
1267 struct bch_page_state *s;
1269 s = __bch2_page_state(bvec->bv_page);
1270 spin_lock(&s->lock);
1271 for (i = 0; i < PAGE_SECTORS; i++)
1272 s->s[i].nr_replicas = 0;
1273 spin_unlock(&s->lock);
1278 * racing with fallocate can cause us to add fewer sectors than
1279 * expected - but we shouldn't add more sectors than expected:
1281 WARN_ON(io->op.i_sectors_delta > 0);
1284 * (error (due to going RO) halfway through a page can screw that up
1287 BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
1291 * PageWriteback is effectively our ref on the inode - fixup i_blocks
1292 * before calling end_page_writeback:
1294 i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
1296 bio_for_each_segment_all(bvec, bio, iter) {
1297 struct bch_page_state *s = __bch2_page_state(bvec->bv_page);
1299 if (atomic_dec_and_test(&s->write_count))
1300 end_page_writeback(bvec->bv_page);
1303 closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
1306 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1308 struct bch_writepage_io *io = w->io;
1310 down(&io->op.c->io_in_flight);
1313 closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
1314 continue_at(&io->cl, bch2_writepage_io_done, NULL);
1318 * Get a bch_writepage_io and add @page to it - appending to an existing one if
1319 * possible, else allocating a new one:
1321 static void bch2_writepage_io_alloc(struct bch_fs *c,
1322 struct writeback_control *wbc,
1323 struct bch_writepage_state *w,
1324 struct bch_inode_info *inode,
1326 unsigned nr_replicas)
1328 struct bch_write_op *op;
1330 w->io = container_of(bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS,
1331 &c->writepage_bioset),
1332 struct bch_writepage_io, op.wbio.bio);
1334 closure_init(&w->io->cl, NULL);
1335 w->io->inode = inode;
1338 bch2_write_op_init(op, c, w->opts);
1339 op->target = w->opts.foreground_target;
1340 op->nr_replicas = nr_replicas;
1341 op->res.nr_replicas = nr_replicas;
1342 op->write_point = writepoint_hashed(inode->ei_last_dirtied);
1343 op->subvol = inode->ei_subvol;
1344 op->pos = POS(inode->v.i_ino, sector);
1345 op->wbio.bio.bi_iter.bi_sector = sector;
1346 op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
1349 static int __bch2_writepage(struct page *page,
1350 struct writeback_control *wbc,
1353 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1354 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1355 struct bch_writepage_state *w = data;
1356 struct bch_page_state *s, orig;
1357 unsigned i, offset, nr_replicas_this_write = U32_MAX;
1358 loff_t i_size = i_size_read(&inode->v);
1359 pgoff_t end_index = i_size >> PAGE_SHIFT;
1362 EBUG_ON(!PageUptodate(page));
1364 /* Is the page fully inside i_size? */
1365 if (page->index < end_index)
1368 /* Is the page fully outside i_size? (truncate in progress) */
1369 offset = i_size & (PAGE_SIZE - 1);
1370 if (page->index > end_index || !offset) {
1376 * The page straddles i_size. It must be zeroed out on each and every
1377 * writepage invocation because it may be mmapped. "A file is mapped
1378 * in multiples of the page size. For a file that is not a multiple of
1379 * the page size, the remaining memory is zeroed when mapped, and
1380 * writes to that region are not written out to the file."
1382 zero_user_segment(page, offset, PAGE_SIZE);
1384 s = bch2_page_state_create(page, __GFP_NOFAIL);
1387 * Things get really hairy with errors during writeback:
1389 ret = bch2_get_page_disk_reservation(c, inode, page, false);
1392 /* Before unlocking the page, get copy of reservations: */
1393 spin_lock(&s->lock);
1395 spin_unlock(&s->lock);
1397 for (i = 0; i < PAGE_SECTORS; i++) {
1398 if (s->s[i].state < SECTOR_DIRTY)
1401 nr_replicas_this_write =
1402 min_t(unsigned, nr_replicas_this_write,
1403 s->s[i].nr_replicas +
1404 s->s[i].replicas_reserved);
1407 for (i = 0; i < PAGE_SECTORS; i++) {
1408 if (s->s[i].state < SECTOR_DIRTY)
1411 s->s[i].nr_replicas = w->opts.compression
1412 ? 0 : nr_replicas_this_write;
1414 s->s[i].replicas_reserved = 0;
1415 s->s[i].state = SECTOR_ALLOCATED;
1418 BUG_ON(atomic_read(&s->write_count));
1419 atomic_set(&s->write_count, 1);
1421 BUG_ON(PageWriteback(page));
1422 set_page_writeback(page);
1428 unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
1431 while (offset < PAGE_SECTORS &&
1432 orig.s[offset].state < SECTOR_DIRTY)
1435 if (offset == PAGE_SECTORS)
1438 while (offset + sectors < PAGE_SECTORS &&
1439 orig.s[offset + sectors].state >= SECTOR_DIRTY) {
1440 reserved_sectors += orig.s[offset + sectors].replicas_reserved;
1441 dirty_sectors += orig.s[offset + sectors].state == SECTOR_DIRTY;
1446 sector = ((u64) page->index << PAGE_SECTORS_SHIFT) + offset;
1449 (w->io->op.res.nr_replicas != nr_replicas_this_write ||
1450 bio_full(&w->io->op.wbio.bio, PAGE_SIZE) ||
1451 w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
1452 (BIO_MAX_VECS * PAGE_SIZE) ||
1453 bio_end_sector(&w->io->op.wbio.bio) != sector))
1454 bch2_writepage_do_io(w);
1457 bch2_writepage_io_alloc(c, wbc, w, inode, sector,
1458 nr_replicas_this_write);
1460 atomic_inc(&s->write_count);
1462 BUG_ON(inode != w->io->inode);
1463 BUG_ON(!bio_add_page(&w->io->op.wbio.bio, page,
1464 sectors << 9, offset << 9));
1466 /* Check for writing past i_size: */
1467 WARN_ON((bio_end_sector(&w->io->op.wbio.bio) << 9) >
1468 round_up(i_size, block_bytes(c)));
1470 w->io->op.res.sectors += reserved_sectors;
1471 w->io->op.i_sectors_delta -= dirty_sectors;
1472 w->io->op.new_i_size = i_size;
1477 if (atomic_dec_and_test(&s->write_count))
1478 end_page_writeback(page);
1483 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1485 struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1486 struct bch_writepage_state w =
1487 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1488 struct blk_plug plug;
1491 blk_start_plug(&plug);
1492 ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1494 bch2_writepage_do_io(&w);
1495 blk_finish_plug(&plug);
1499 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1501 struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1502 struct bch_writepage_state w =
1503 bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
1506 ret = __bch2_writepage(page, wbc, &w);
1508 bch2_writepage_do_io(&w);
1513 /* buffered writes: */
1515 int bch2_write_begin(struct file *file, struct address_space *mapping,
1516 loff_t pos, unsigned len, unsigned flags,
1517 struct page **pagep, void **fsdata)
1519 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1520 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1521 struct bch2_page_reservation *res;
1522 pgoff_t index = pos >> PAGE_SHIFT;
1523 unsigned offset = pos & (PAGE_SIZE - 1);
1527 res = kmalloc(sizeof(*res), GFP_KERNEL);
1531 bch2_page_reservation_init(c, inode, res);
1534 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1536 page = grab_cache_page_write_begin(mapping, index, flags);
1540 if (PageUptodate(page))
1543 /* If we're writing entire page, don't need to read it in first: */
1544 if (len == PAGE_SIZE)
1547 if (!offset && pos + len >= inode->v.i_size) {
1548 zero_user_segment(page, len, PAGE_SIZE);
1549 flush_dcache_page(page);
1553 if (index > inode->v.i_size >> PAGE_SHIFT) {
1554 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1555 flush_dcache_page(page);
1559 ret = bch2_read_single_page(page, mapping);
1563 if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
1564 ret = bch2_page_state_set(c, inode_inum(inode), &page, 1);
1569 ret = bch2_page_reservation_get(c, inode, page, res,
1572 if (!PageUptodate(page)) {
1574 * If the page hasn't been read in, we won't know if we
1575 * actually need a reservation - we don't actually need
1576 * to read here, we just need to check if the page is
1577 * fully backed by uncompressed data:
1592 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1598 int bch2_write_end(struct file *file, struct address_space *mapping,
1599 loff_t pos, unsigned len, unsigned copied,
1600 struct page *page, void *fsdata)
1602 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1603 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1604 struct bch2_page_reservation *res = fsdata;
1605 unsigned offset = pos & (PAGE_SIZE - 1);
1607 lockdep_assert_held(&inode->v.i_rwsem);
1609 if (unlikely(copied < len && !PageUptodate(page))) {
1611 * The page needs to be read in, but that would destroy
1612 * our partial write - simplest thing is to just force
1613 * userspace to redo the write:
1615 zero_user(page, 0, PAGE_SIZE);
1616 flush_dcache_page(page);
1620 spin_lock(&inode->v.i_lock);
1621 if (pos + copied > inode->v.i_size)
1622 i_size_write(&inode->v, pos + copied);
1623 spin_unlock(&inode->v.i_lock);
1626 if (!PageUptodate(page))
1627 SetPageUptodate(page);
1629 bch2_set_page_dirty(c, inode, page, res, offset, copied);
1631 inode->ei_last_dirtied = (unsigned long) current;
1636 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1638 bch2_page_reservation_put(c, inode, res);
1644 #define WRITE_BATCH_PAGES 32
1646 static int __bch2_buffered_write(struct bch_inode_info *inode,
1647 struct address_space *mapping,
1648 struct iov_iter *iter,
1649 loff_t pos, unsigned len)
1651 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1652 struct page *pages[WRITE_BATCH_PAGES];
1653 struct bch2_page_reservation res;
1654 unsigned long index = pos >> PAGE_SHIFT;
1655 unsigned offset = pos & (PAGE_SIZE - 1);
1656 unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1657 unsigned i, reserved = 0, set_dirty = 0;
1658 unsigned copied = 0, nr_pages_copied = 0;
1662 BUG_ON(nr_pages > ARRAY_SIZE(pages));
1664 bch2_page_reservation_init(c, inode, &res);
1666 for (i = 0; i < nr_pages; i++) {
1667 pages[i] = grab_cache_page_write_begin(mapping, index + i, 0);
1674 len = min_t(unsigned, len,
1675 nr_pages * PAGE_SIZE - offset);
1680 if (offset && !PageUptodate(pages[0])) {
1681 ret = bch2_read_single_page(pages[0], mapping);
1686 if ((pos + len) & (PAGE_SIZE - 1) &&
1687 !PageUptodate(pages[nr_pages - 1])) {
1688 if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
1689 zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
1691 ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
1697 while (reserved < len) {
1698 unsigned i = (offset + reserved) >> PAGE_SHIFT;
1699 struct page *page = pages[i];
1700 unsigned pg_offset = (offset + reserved) & (PAGE_SIZE - 1);
1701 unsigned pg_len = min_t(unsigned, len - reserved,
1702 PAGE_SIZE - pg_offset);
1704 if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
1705 ret = bch2_page_state_set(c, inode_inum(inode),
1706 pages + i, nr_pages - i);
1711 ret = bch2_page_reservation_get(c, inode, page, &res,
1712 pg_offset, pg_len, true);
1719 if (mapping_writably_mapped(mapping))
1720 for (i = 0; i < nr_pages; i++)
1721 flush_dcache_page(pages[i]);
1723 while (copied < len) {
1724 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1725 unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
1726 unsigned pg_len = min_t(unsigned, len - copied,
1727 PAGE_SIZE - pg_offset);
1728 unsigned pg_copied = copy_page_from_iter_atomic(page,
1729 pg_offset, pg_len,iter);
1734 if (!PageUptodate(page) &&
1735 pg_copied != PAGE_SIZE &&
1736 pos + copied + pg_copied < inode->v.i_size) {
1737 zero_user(page, 0, PAGE_SIZE);
1741 flush_dcache_page(page);
1742 copied += pg_copied;
1744 if (pg_copied != pg_len)
1751 spin_lock(&inode->v.i_lock);
1752 if (pos + copied > inode->v.i_size)
1753 i_size_write(&inode->v, pos + copied);
1754 spin_unlock(&inode->v.i_lock);
1756 while (set_dirty < copied) {
1757 struct page *page = pages[(offset + set_dirty) >> PAGE_SHIFT];
1758 unsigned pg_offset = (offset + set_dirty) & (PAGE_SIZE - 1);
1759 unsigned pg_len = min_t(unsigned, copied - set_dirty,
1760 PAGE_SIZE - pg_offset);
1762 if (!PageUptodate(page))
1763 SetPageUptodate(page);
1765 bch2_set_page_dirty(c, inode, page, &res, pg_offset, pg_len);
1769 set_dirty += pg_len;
1772 nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
1773 inode->ei_last_dirtied = (unsigned long) current;
1775 for (i = nr_pages_copied; i < nr_pages; i++) {
1776 unlock_page(pages[i]);
1780 bch2_page_reservation_put(c, inode, &res);
1782 return copied ?: ret;
1785 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1787 struct file *file = iocb->ki_filp;
1788 struct address_space *mapping = file->f_mapping;
1789 struct bch_inode_info *inode = file_bch_inode(file);
1790 loff_t pos = iocb->ki_pos;
1791 ssize_t written = 0;
1794 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1797 unsigned offset = pos & (PAGE_SIZE - 1);
1798 unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
1799 PAGE_SIZE * WRITE_BATCH_PAGES - offset);
1802 * Bring in the user page that we will copy from _first_.
1803 * Otherwise there's a nasty deadlock on copying from the
1804 * same page as we're writing to, without it being marked
1807 * Not only is this an optimisation, but it is also required
1808 * to check that the address is actually valid, when atomic
1809 * usercopies are used, below.
1811 if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1812 bytes = min_t(unsigned long, iov_iter_count(iter),
1813 PAGE_SIZE - offset);
1815 if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1821 if (unlikely(fatal_signal_pending(current))) {
1826 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1827 if (unlikely(ret < 0))
1832 if (unlikely(ret == 0)) {
1834 * If we were unable to copy any data at all, we must
1835 * fall back to a single segment length write.
1837 * If we didn't fallback here, we could livelock
1838 * because not all segments in the iov can be copied at
1839 * once without a pagefault.
1841 bytes = min_t(unsigned long, PAGE_SIZE - offset,
1842 iov_iter_single_seg_count(iter));
1849 balance_dirty_pages_ratelimited(mapping);
1850 } while (iov_iter_count(iter));
1852 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1854 return written ? written : ret;
1857 /* O_DIRECT reads */
1859 static void bio_check_or_release(struct bio *bio, bool check_dirty)
1862 bio_check_pages_dirty(bio);
1864 bio_release_pages(bio, false);
1869 static void bch2_dio_read_complete(struct closure *cl)
1871 struct dio_read *dio = container_of(cl, struct dio_read, cl);
1873 dio->req->ki_complete(dio->req, dio->ret, 0);
1874 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
1877 static void bch2_direct_IO_read_endio(struct bio *bio)
1879 struct dio_read *dio = bio->bi_private;
1882 dio->ret = blk_status_to_errno(bio->bi_status);
1884 closure_put(&dio->cl);
1887 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1889 struct dio_read *dio = bio->bi_private;
1890 bool should_dirty = dio->should_dirty;
1892 bch2_direct_IO_read_endio(bio);
1893 bio_check_or_release(bio, should_dirty);
1896 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1898 struct file *file = req->ki_filp;
1899 struct bch_inode_info *inode = file_bch_inode(file);
1900 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1901 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1902 struct dio_read *dio;
1904 loff_t offset = req->ki_pos;
1905 bool sync = is_sync_kiocb(req);
1909 if ((offset|iter->count) & (block_bytes(c) - 1))
1912 ret = min_t(loff_t, iter->count,
1913 max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1918 shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
1919 iter->count -= shorten;
1921 bio = bio_alloc_bioset(GFP_KERNEL,
1922 iov_iter_npages(iter, BIO_MAX_VECS),
1923 &c->dio_read_bioset);
1925 bio->bi_end_io = bch2_direct_IO_read_endio;
1927 dio = container_of(bio, struct dio_read, rbio.bio);
1928 closure_init(&dio->cl, NULL);
1931 * this is a _really_ horrible hack just to avoid an atomic sub at the
1935 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1936 atomic_set(&dio->cl.remaining,
1937 CLOSURE_REMAINING_INITIALIZER -
1939 CLOSURE_DESTRUCTOR);
1941 atomic_set(&dio->cl.remaining,
1942 CLOSURE_REMAINING_INITIALIZER + 1);
1948 * This is one of the sketchier things I've encountered: we have to skip
1949 * the dirtying of requests that are internal from the kernel (i.e. from
1950 * loopback), because we'll deadlock on page_lock.
1952 dio->should_dirty = iter_is_iovec(iter);
1955 while (iter->count) {
1956 bio = bio_alloc_bioset(GFP_KERNEL,
1957 iov_iter_npages(iter, BIO_MAX_VECS),
1959 bio->bi_end_io = bch2_direct_IO_read_split_endio;
1961 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1962 bio->bi_iter.bi_sector = offset >> 9;
1963 bio->bi_private = dio;
1965 ret = bio_iov_iter_get_pages(bio, iter);
1967 /* XXX: fault inject this path */
1968 bio->bi_status = BLK_STS_RESOURCE;
1973 offset += bio->bi_iter.bi_size;
1975 if (dio->should_dirty)
1976 bio_set_pages_dirty(bio);
1979 closure_get(&dio->cl);
1981 bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
1984 iter->count += shorten;
1987 closure_sync(&dio->cl);
1988 closure_debug_destroy(&dio->cl);
1990 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
1993 return -EIOCBQUEUED;
1997 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1999 struct file *file = iocb->ki_filp;
2000 struct bch_inode_info *inode = file_bch_inode(file);
2001 struct address_space *mapping = file->f_mapping;
2002 size_t count = iov_iter_count(iter);
2006 return 0; /* skip atime */
2008 if (iocb->ki_flags & IOCB_DIRECT) {
2009 struct blk_plug plug;
2011 ret = filemap_write_and_wait_range(mapping,
2013 iocb->ki_pos + count - 1);
2017 file_accessed(file);
2019 blk_start_plug(&plug);
2020 ret = bch2_direct_IO_read(iocb, iter);
2021 blk_finish_plug(&plug);
2024 iocb->ki_pos += ret;
2026 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
2027 ret = generic_file_read_iter(iocb, iter);
2028 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
2034 /* O_DIRECT writes */
2036 static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
2037 u64 offset, u64 size,
2038 unsigned nr_replicas, bool compressed)
2040 struct btree_trans trans;
2041 struct btree_iter iter;
2043 u64 end = offset + size;
2048 bch2_trans_init(&trans, c, 0, 0);
2050 bch2_trans_begin(&trans);
2052 err = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
2056 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
2057 SPOS(inum.inum, offset, snapshot),
2058 BTREE_ITER_SLOTS, k, err) {
2059 if (bkey_cmp(bkey_start_pos(k.k), POS(inum.inum, end)) >= 0)
2062 if (k.k->p.snapshot != snapshot ||
2063 nr_replicas > bch2_bkey_replicas(c, k) ||
2064 (!compressed && bch2_bkey_sectors_compressed(k))) {
2070 offset = iter.pos.offset;
2071 bch2_trans_iter_exit(&trans, &iter);
2075 bch2_trans_exit(&trans);
2077 return err ? false : ret;
2080 static void bch2_dio_write_loop_async(struct bch_write_op *);
2082 static long bch2_dio_write_loop(struct dio_write *dio)
2084 bool kthread = (current->flags & PF_KTHREAD) != 0;
2085 struct kiocb *req = dio->req;
2086 struct address_space *mapping = req->ki_filp->f_mapping;
2087 struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
2088 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2089 struct bio *bio = &dio->op.wbio.bio;
2090 struct bvec_iter_all iter;
2092 unsigned unaligned, iter_count;
2093 bool sync = dio->sync, dropped_locks;
2099 down(&c->io_in_flight);
2102 iter_count = dio->iter.count;
2105 kthread_use_mm(dio->mm);
2106 BUG_ON(current->faults_disabled_mapping);
2107 current->faults_disabled_mapping = mapping;
2109 ret = bio_iov_iter_get_pages(bio, &dio->iter);
2111 dropped_locks = fdm_dropped_locks();
2113 current->faults_disabled_mapping = NULL;
2115 kthread_unuse_mm(dio->mm);
2118 * If the fault handler returned an error but also signalled
2119 * that it dropped & retook ei_pagecache_lock, we just need to
2120 * re-shoot down the page cache and retry:
2122 if (dropped_locks && ret)
2125 if (unlikely(ret < 0))
2128 if (unlikely(dropped_locks)) {
2129 ret = write_invalidate_inode_pages_range(mapping,
2131 req->ki_pos + iter_count - 1);
2135 if (!bio->bi_iter.bi_size)
2139 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
2140 bio->bi_iter.bi_size -= unaligned;
2141 iov_iter_revert(&dio->iter, unaligned);
2143 if (!bio->bi_iter.bi_size) {
2145 * bio_iov_iter_get_pages was only able to get <
2146 * blocksize worth of pages:
2152 bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode));
2153 dio->op.end_io = bch2_dio_write_loop_async;
2154 dio->op.target = dio->op.opts.foreground_target;
2155 dio->op.write_point = writepoint_hashed((unsigned long) current);
2156 dio->op.nr_replicas = dio->op.opts.data_replicas;
2157 dio->op.subvol = inode->ei_subvol;
2158 dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
2160 if ((req->ki_flags & IOCB_DSYNC) &&
2161 !c->opts.journal_flush_disabled)
2162 dio->op.flags |= BCH_WRITE_FLUSH;
2163 dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
2165 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
2166 dio->op.opts.data_replicas, 0);
2167 if (unlikely(ret) &&
2168 !bch2_check_range_allocated(c, inode_inum(inode),
2169 dio->op.pos.offset, bio_sectors(bio),
2170 dio->op.opts.data_replicas,
2171 dio->op.opts.compression != 0))
2174 task_io_account_write(bio->bi_iter.bi_size);
2176 if (!dio->sync && !dio->loop && dio->iter.count) {
2177 struct iovec *iov = dio->inline_vecs;
2179 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
2180 iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
2182 if (unlikely(!iov)) {
2183 dio->sync = sync = true;
2187 dio->free_iov = true;
2190 memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
2191 dio->iter.iov = iov;
2195 closure_call(&dio->op.cl, bch2_write, NULL, NULL);
2198 wait_for_completion(&dio->done);
2200 return -EIOCBQUEUED;
2202 i_sectors_acct(c, inode, &dio->quota_res,
2203 dio->op.i_sectors_delta);
2204 req->ki_pos += (u64) dio->op.written << 9;
2205 dio->written += dio->op.written;
2207 spin_lock(&inode->v.i_lock);
2208 if (req->ki_pos > inode->v.i_size)
2209 i_size_write(&inode->v, req->ki_pos);
2210 spin_unlock(&inode->v.i_lock);
2212 if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
2213 bio_for_each_segment_all(bv, bio, iter)
2214 put_page(bv->bv_page);
2217 if (dio->op.error) {
2218 set_bit(EI_INODE_ERROR, &inode->ei_flags);
2222 if (!dio->iter.count)
2226 reinit_completion(&dio->done);
2229 ret = dio->op.error ?: ((long) dio->written << 9);
2231 up(&c->io_in_flight);
2232 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2233 bch2_quota_reservation_put(c, inode, &dio->quota_res);
2236 kfree(dio->iter.iov);
2238 if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
2239 bio_for_each_segment_all(bv, bio, iter)
2240 put_page(bv->bv_page);
2243 /* inode->i_dio_count is our ref on inode and thus bch_fs */
2244 inode_dio_end(&inode->v);
2247 req->ki_complete(req, ret, 0);
2253 static void bch2_dio_write_loop_async(struct bch_write_op *op)
2255 struct dio_write *dio = container_of(op, struct dio_write, op);
2258 complete(&dio->done);
2260 bch2_dio_write_loop(dio);
2264 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
2266 struct file *file = req->ki_filp;
2267 struct address_space *mapping = file->f_mapping;
2268 struct bch_inode_info *inode = file_bch_inode(file);
2269 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2270 struct dio_write *dio;
2272 bool locked = true, extending;
2276 prefetch((void *) &c->opts + 64);
2277 prefetch(&inode->ei_inode);
2278 prefetch((void *) &inode->ei_inode + 64);
2280 inode_lock(&inode->v);
2282 ret = generic_write_checks(req, iter);
2283 if (unlikely(ret <= 0))
2286 ret = file_remove_privs(file);
2290 ret = file_update_time(file);
2294 if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
2297 inode_dio_begin(&inode->v);
2298 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2300 extending = req->ki_pos + iter->count > inode->v.i_size;
2302 inode_unlock(&inode->v);
2306 bio = bio_alloc_bioset(GFP_KERNEL,
2307 iov_iter_is_bvec(iter)
2309 : iov_iter_npages(iter, BIO_MAX_VECS),
2310 &c->dio_write_bioset);
2311 dio = container_of(bio, struct dio_write, op.wbio.bio);
2312 init_completion(&dio->done);
2314 dio->mm = current->mm;
2316 dio->sync = is_sync_kiocb(req) || extending;
2317 dio->free_iov = false;
2318 dio->quota_res.sectors = 0;
2322 ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
2323 iter->count >> 9, true);
2327 ret = write_invalidate_inode_pages_range(mapping,
2329 req->ki_pos + iter->count - 1);
2333 ret = bch2_dio_write_loop(dio);
2336 inode_unlock(&inode->v);
2339 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2340 bch2_quota_reservation_put(c, inode, &dio->quota_res);
2342 inode_dio_end(&inode->v);
2346 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2348 struct file *file = iocb->ki_filp;
2349 struct bch_inode_info *inode = file_bch_inode(file);
2352 if (iocb->ki_flags & IOCB_DIRECT)
2353 return bch2_direct_write(iocb, from);
2355 /* We can write back this queue in page reclaim */
2356 current->backing_dev_info = inode_to_bdi(&inode->v);
2357 inode_lock(&inode->v);
2359 ret = generic_write_checks(iocb, from);
2363 ret = file_remove_privs(file);
2367 ret = file_update_time(file);
2371 ret = bch2_buffered_write(iocb, from);
2372 if (likely(ret > 0))
2373 iocb->ki_pos += ret;
2375 inode_unlock(&inode->v);
2376 current->backing_dev_info = NULL;
2379 ret = generic_write_sync(iocb, ret);
2387 * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
2388 * insert trigger: look up the btree inode instead
2390 static int bch2_flush_inode(struct bch_fs *c, subvol_inum inum)
2392 struct bch_inode_unpacked inode;
2395 if (c->opts.journal_flush_disabled)
2398 ret = bch2_inode_find_by_inum(c, inum, &inode);
2402 return bch2_journal_flush_seq(&c->journal, inode.bi_journal_seq);
2405 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2407 struct bch_inode_info *inode = file_bch_inode(file);
2408 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2409 int ret, ret2, ret3;
2411 ret = file_write_and_wait_range(file, start, end);
2412 ret2 = sync_inode_metadata(&inode->v, 1);
2413 ret3 = bch2_flush_inode(c, inode_inum(inode));
2415 return ret ?: ret2 ?: ret3;
2420 static inline int range_has_data(struct bch_fs *c, u32 subvol,
2424 struct btree_trans trans;
2425 struct btree_iter iter;
2429 bch2_trans_init(&trans, c, 0, 0);
2431 bch2_trans_begin(&trans);
2433 ret = bch2_subvolume_get_snapshot(&trans, subvol, &start.snapshot);
2437 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
2438 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2441 if (bkey_extent_is_data(k.k)) {
2447 bch2_trans_iter_exit(&trans, &iter);
2452 bch2_trans_exit(&trans);
2456 static int __bch2_truncate_page(struct bch_inode_info *inode,
2457 pgoff_t index, loff_t start, loff_t end)
2459 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2460 struct address_space *mapping = inode->v.i_mapping;
2461 struct bch_page_state *s;
2462 unsigned start_offset = start & (PAGE_SIZE - 1);
2463 unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2466 s64 i_sectors_delta = 0;
2469 /* Page boundary? Nothing to do */
2470 if (!((index == start >> PAGE_SHIFT && start_offset) ||
2471 (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
2475 if (index << PAGE_SHIFT >= inode->v.i_size)
2478 page = find_lock_page(mapping, index);
2481 * XXX: we're doing two index lookups when we end up reading the
2484 ret = range_has_data(c, inode->ei_subvol,
2485 POS(inode->v.i_ino, index << PAGE_SECTORS_SHIFT),
2486 POS(inode->v.i_ino, (index + 1) << PAGE_SECTORS_SHIFT));
2490 page = find_or_create_page(mapping, index, GFP_KERNEL);
2491 if (unlikely(!page)) {
2497 s = bch2_page_state_create(page, 0);
2503 if (!PageUptodate(page)) {
2504 ret = bch2_read_single_page(page, mapping);
2509 if (index != start >> PAGE_SHIFT)
2511 if (index != end >> PAGE_SHIFT)
2512 end_offset = PAGE_SIZE;
2514 for (i = round_up(start_offset, block_bytes(c)) >> 9;
2515 i < round_down(end_offset, block_bytes(c)) >> 9;
2517 s->s[i].nr_replicas = 0;
2518 if (s->s[i].state == SECTOR_DIRTY)
2520 s->s[i].state = SECTOR_UNALLOCATED;
2523 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2526 * Caller needs to know whether this page will be written out by
2527 * writeback - doing an i_size update if necessary - or whether it will
2528 * be responsible for the i_size update:
2530 ret = s->s[(min_t(u64, inode->v.i_size - (index << PAGE_SHIFT),
2531 PAGE_SIZE) - 1) >> 9].state >= SECTOR_DIRTY;
2533 zero_user_segment(page, start_offset, end_offset);
2536 * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2538 * XXX: because we aren't currently tracking whether the page has actual
2539 * data in it (vs. just 0s, or only partially written) this wrong. ick.
2541 BUG_ON(bch2_get_page_disk_reservation(c, inode, page, false));
2544 * This removes any writeable userspace mappings; we need to force
2545 * .page_mkwrite to be called again before any mmapped writes, to
2546 * redirty the full page:
2549 __set_page_dirty_nobuffers(page);
2557 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
2559 return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
2560 from, round_up(from, PAGE_SIZE));
2563 static int bch2_truncate_pages(struct bch_inode_info *inode,
2564 loff_t start, loff_t end)
2566 int ret = __bch2_truncate_page(inode, start >> PAGE_SHIFT,
2570 start >> PAGE_SHIFT != end >> PAGE_SHIFT)
2571 ret = __bch2_truncate_page(inode,
2577 static int bch2_extend(struct user_namespace *mnt_userns,
2578 struct bch_inode_info *inode,
2579 struct bch_inode_unpacked *inode_u,
2580 struct iattr *iattr)
2582 struct address_space *mapping = inode->v.i_mapping;
2588 * this has to be done _before_ extending i_size:
2590 ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
2594 truncate_setsize(&inode->v, iattr->ia_size);
2596 return bch2_setattr_nonsize(mnt_userns, inode, iattr);
2599 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2600 struct bch_inode_unpacked *bi,
2603 bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2607 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2608 struct bch_inode_unpacked *bi, void *p)
2610 u64 *new_i_size = p;
2612 bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2613 bi->bi_size = *new_i_size;
2617 int bch2_truncate(struct user_namespace *mnt_userns,
2618 struct bch_inode_info *inode, struct iattr *iattr)
2620 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2621 struct address_space *mapping = inode->v.i_mapping;
2622 struct bch_inode_unpacked inode_u;
2623 u64 new_i_size = iattr->ia_size;
2624 s64 i_sectors_delta = 0;
2628 * If the truncate call with change the size of the file, the
2629 * cmtimes should be updated. If the size will not change, we
2630 * do not need to update the cmtimes.
2632 if (iattr->ia_size != inode->v.i_size) {
2633 if (!(iattr->ia_valid & ATTR_MTIME))
2634 ktime_get_coarse_real_ts64(&iattr->ia_mtime);
2635 if (!(iattr->ia_valid & ATTR_CTIME))
2636 ktime_get_coarse_real_ts64(&iattr->ia_ctime);
2637 iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
2640 inode_dio_wait(&inode->v);
2641 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2643 ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
2648 * check this before next assertion; on filesystem error our normal
2649 * invariants are a bit broken (truncate has to truncate the page cache
2650 * before the inode).
2652 ret = bch2_journal_error(&c->journal);
2656 WARN_ON(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
2657 inode->v.i_size < inode_u.bi_size);
2659 if (iattr->ia_size > inode->v.i_size) {
2660 ret = bch2_extend(mnt_userns, inode, &inode_u, iattr);
2664 iattr->ia_valid &= ~ATTR_SIZE;
2666 ret = bch2_truncate_page(inode, iattr->ia_size);
2667 if (unlikely(ret < 0))
2671 * When extending, we're going to write the new i_size to disk
2672 * immediately so we need to flush anything above the current on disk
2675 * Also, when extending we need to flush the page that i_size currently
2676 * straddles - if it's mapped to userspace, we need to ensure that
2677 * userspace has to redirty it and call .mkwrite -> set_page_dirty
2678 * again to allocate the part of the page that was extended.
2680 if (iattr->ia_size > inode_u.bi_size)
2681 ret = filemap_write_and_wait_range(mapping,
2683 iattr->ia_size - 1);
2684 else if (iattr->ia_size & (PAGE_SIZE - 1))
2685 ret = filemap_write_and_wait_range(mapping,
2686 round_down(iattr->ia_size, PAGE_SIZE),
2687 iattr->ia_size - 1);
2691 mutex_lock(&inode->ei_update_lock);
2692 ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
2694 mutex_unlock(&inode->ei_update_lock);
2699 truncate_setsize(&inode->v, iattr->ia_size);
2701 ret = bch2_fpunch(c, inode_inum(inode),
2702 round_up(iattr->ia_size, block_bytes(c)) >> 9,
2703 U64_MAX, &i_sectors_delta);
2704 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2706 WARN_ON(!inode->v.i_size && inode->v.i_blocks &&
2707 !bch2_journal_error(&c->journal));
2712 mutex_lock(&inode->ei_update_lock);
2713 ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
2714 mutex_unlock(&inode->ei_update_lock);
2716 ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
2718 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2724 static int inode_update_times_fn(struct bch_inode_info *inode,
2725 struct bch_inode_unpacked *bi, void *p)
2727 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2729 bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
2733 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2735 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2736 u64 end = offset + len;
2737 u64 block_start = round_up(offset, block_bytes(c));
2738 u64 block_end = round_down(end, block_bytes(c));
2739 bool truncated_last_page;
2742 ret = bch2_truncate_pages(inode, offset, end);
2743 if (unlikely(ret < 0))
2746 truncated_last_page = ret;
2748 truncate_pagecache_range(&inode->v, offset, end - 1);
2750 if (block_start < block_end ) {
2751 s64 i_sectors_delta = 0;
2753 ret = bch2_fpunch(c, inode_inum(inode),
2754 block_start >> 9, block_end >> 9,
2756 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2759 mutex_lock(&inode->ei_update_lock);
2760 if (end >= inode->v.i_size && !truncated_last_page) {
2761 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
2762 ATTR_MTIME|ATTR_CTIME);
2764 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
2765 ATTR_MTIME|ATTR_CTIME);
2767 mutex_unlock(&inode->ei_update_lock);
2772 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
2773 loff_t offset, loff_t len,
2776 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2777 struct address_space *mapping = inode->v.i_mapping;
2778 struct bkey_buf copy;
2779 struct btree_trans trans;
2780 struct btree_iter src, dst, del;
2781 loff_t shift, new_size;
2785 if ((offset | len) & (block_bytes(c) - 1))
2789 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
2792 if (offset >= inode->v.i_size)
2795 src_start = U64_MAX;
2798 if (offset + len >= inode->v.i_size)
2801 src_start = offset + len;
2805 new_size = inode->v.i_size + shift;
2807 ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2812 i_size_write(&inode->v, new_size);
2813 mutex_lock(&inode->ei_update_lock);
2814 ret = bch2_write_inode_size(c, inode, new_size,
2815 ATTR_MTIME|ATTR_CTIME);
2816 mutex_unlock(&inode->ei_update_lock);
2818 s64 i_sectors_delta = 0;
2820 ret = bch2_fpunch(c, inode_inum(inode),
2821 offset >> 9, (offset + len) >> 9,
2823 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2829 bch2_bkey_buf_init(©);
2830 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
2831 bch2_trans_iter_init(&trans, &src, BTREE_ID_extents,
2832 POS(inode->v.i_ino, src_start >> 9),
2834 bch2_trans_copy_iter(&dst, &src);
2835 bch2_trans_copy_iter(&del, &src);
2837 while (ret == 0 || ret == -EINTR) {
2838 struct disk_reservation disk_res =
2839 bch2_disk_reservation_init(c, 0);
2840 struct bkey_i delete;
2842 struct bpos next_pos;
2843 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
2844 struct bpos atomic_end;
2845 unsigned trigger_flags = 0;
2848 bch2_trans_begin(&trans);
2850 ret = bch2_subvolume_get_snapshot(&trans,
2851 inode->ei_subvol, &snapshot);
2855 bch2_btree_iter_set_snapshot(&src, snapshot);
2856 bch2_btree_iter_set_snapshot(&dst, snapshot);
2857 bch2_btree_iter_set_snapshot(&del, snapshot);
2859 bch2_trans_begin(&trans);
2862 ? bch2_btree_iter_peek_prev(&src)
2863 : bch2_btree_iter_peek(&src);
2864 if ((ret = bkey_err(k)))
2867 if (!k.k || k.k->p.inode != inode->v.i_ino)
2871 bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
2874 bch2_bkey_buf_reassemble(©, c, k);
2877 bkey_cmp(bkey_start_pos(k.k), move_pos) < 0)
2878 bch2_cut_front(move_pos, copy.k);
2880 copy.k->k.p.offset += shift >> 9;
2881 bch2_btree_iter_set_pos(&dst, bkey_start_pos(©.k->k));
2883 ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end);
2887 if (bkey_cmp(atomic_end, copy.k->k.p)) {
2889 move_pos = atomic_end;
2890 move_pos.offset -= shift >> 9;
2893 bch2_cut_back(atomic_end, copy.k);
2897 bkey_init(&delete.k);
2898 delete.k.p = copy.k->k.p;
2899 delete.k.size = copy.k->k.size;
2900 delete.k.p.offset -= shift >> 9;
2901 bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k));
2903 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
2905 if (copy.k->k.size == k.k->size) {
2907 * If we're moving the entire extent, we can skip
2910 trigger_flags |= BTREE_TRIGGER_NORUN;
2912 /* We might end up splitting compressed extents: */
2914 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
2916 ret = bch2_disk_reservation_get(c, &disk_res,
2917 copy.k->k.size, nr_ptrs,
2918 BCH_DISK_RESERVATION_NOFAIL);
2922 ret = bch2_btree_iter_traverse(&del) ?:
2923 bch2_trans_update(&trans, &del, &delete, trigger_flags) ?:
2924 bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?:
2925 bch2_trans_commit(&trans, &disk_res, NULL,
2926 BTREE_INSERT_NOFAIL);
2927 bch2_disk_reservation_put(c, &disk_res);
2930 bch2_btree_iter_set_pos(&src, next_pos);
2932 bch2_trans_iter_exit(&trans, &del);
2933 bch2_trans_iter_exit(&trans, &dst);
2934 bch2_trans_iter_exit(&trans, &src);
2935 bch2_trans_exit(&trans);
2936 bch2_bkey_buf_exit(©, c);
2941 mutex_lock(&inode->ei_update_lock);
2943 i_size_write(&inode->v, new_size);
2944 ret = bch2_write_inode_size(c, inode, new_size,
2945 ATTR_MTIME|ATTR_CTIME);
2947 /* We need an inode update to update bi_journal_seq for fsync: */
2948 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
2949 ATTR_MTIME|ATTR_CTIME);
2951 mutex_unlock(&inode->ei_update_lock);
2955 static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
2956 u64 start_sector, u64 end_sector)
2958 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2959 struct btree_trans trans;
2960 struct btree_iter iter;
2961 struct bpos end_pos = POS(inode->v.i_ino, end_sector);
2962 unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
2965 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
2967 bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
2968 POS(inode->v.i_ino, start_sector),
2969 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2971 while (!ret && bkey_cmp(iter.pos, end_pos) < 0) {
2972 s64 i_sectors_delta = 0;
2973 struct disk_reservation disk_res = { 0 };
2974 struct quota_res quota_res = { 0 };
2975 struct bkey_i_reservation reservation;
2980 bch2_trans_begin(&trans);
2982 ret = bch2_subvolume_get_snapshot(&trans,
2983 inode->ei_subvol, &snapshot);
2987 bch2_btree_iter_set_snapshot(&iter, snapshot);
2989 k = bch2_btree_iter_peek_slot(&iter);
2990 if ((ret = bkey_err(k)))
2993 /* already reserved */
2994 if (k.k->type == KEY_TYPE_reservation &&
2995 bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2996 bch2_btree_iter_advance(&iter);
3000 if (bkey_extent_is_data(k.k) &&
3001 !(mode & FALLOC_FL_ZERO_RANGE)) {
3002 bch2_btree_iter_advance(&iter);
3006 bkey_reservation_init(&reservation.k_i);
3007 reservation.k.type = KEY_TYPE_reservation;
3008 reservation.k.p = k.k->p;
3009 reservation.k.size = k.k->size;
3011 bch2_cut_front(iter.pos, &reservation.k_i);
3012 bch2_cut_back(end_pos, &reservation.k_i);
3014 sectors = reservation.k.size;
3015 reservation.v.nr_replicas = bch2_bkey_nr_ptrs_allocated(k);
3017 if (!bkey_extent_is_allocation(k.k)) {
3018 ret = bch2_quota_reservation_add(c, inode,
3025 if (reservation.v.nr_replicas < replicas ||
3026 bch2_bkey_sectors_compressed(k)) {
3027 ret = bch2_disk_reservation_get(c, &disk_res, sectors,
3032 reservation.v.nr_replicas = disk_res.nr_replicas;
3035 ret = bch2_extent_update(&trans, inode_inum(inode), &iter,
3038 0, &i_sectors_delta, true);
3041 i_sectors_acct(c, inode, "a_res, i_sectors_delta);
3043 bch2_quota_reservation_put(c, inode, "a_res);
3044 bch2_disk_reservation_put(c, &disk_res);
3049 bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
3050 mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
3052 if (ret == -ENOSPC && (mode & FALLOC_FL_ZERO_RANGE)) {
3053 struct quota_res quota_res = { 0 };
3054 s64 i_sectors_delta = 0;
3056 bch2_fpunch_at(&trans, &iter, inode_inum(inode),
3057 end_sector, &i_sectors_delta);
3058 i_sectors_acct(c, inode, "a_res, i_sectors_delta);
3059 bch2_quota_reservation_put(c, inode, "a_res);
3062 bch2_trans_iter_exit(&trans, &iter);
3063 bch2_trans_exit(&trans);
3067 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
3068 loff_t offset, loff_t len)
3070 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3071 u64 end = offset + len;
3072 u64 block_start = round_down(offset, block_bytes(c));
3073 u64 block_end = round_up(end, block_bytes(c));
3074 bool truncated_last_page = false;
3077 if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
3078 ret = inode_newsize_ok(&inode->v, end);
3083 if (mode & FALLOC_FL_ZERO_RANGE) {
3084 ret = bch2_truncate_pages(inode, offset, end);
3085 if (unlikely(ret < 0))
3088 truncated_last_page = ret;
3090 truncate_pagecache_range(&inode->v, offset, end - 1);
3092 block_start = round_up(offset, block_bytes(c));
3093 block_end = round_down(end, block_bytes(c));
3096 ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
3099 * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
3100 * so that the VFS cache i_size is consistent with the btree i_size:
3103 !(ret == -ENOSPC && (mode & FALLOC_FL_ZERO_RANGE)))
3106 if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
3107 end = inode->v.i_size;
3109 if (end >= inode->v.i_size &&
3110 (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
3111 !(mode & FALLOC_FL_KEEP_SIZE))) {
3112 spin_lock(&inode->v.i_lock);
3113 i_size_write(&inode->v, end);
3114 spin_unlock(&inode->v.i_lock);
3116 mutex_lock(&inode->ei_update_lock);
3117 ret2 = bch2_write_inode_size(c, inode, end, 0);
3118 mutex_unlock(&inode->ei_update_lock);
3124 long bch2_fallocate_dispatch(struct file *file, int mode,
3125 loff_t offset, loff_t len)
3127 struct bch_inode_info *inode = file_bch_inode(file);
3128 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3131 if (!percpu_ref_tryget(&c->writes))
3134 inode_lock(&inode->v);
3135 inode_dio_wait(&inode->v);
3136 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
3138 if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
3139 ret = bchfs_fallocate(inode, mode, offset, len);
3140 else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
3141 ret = bchfs_fpunch(inode, offset, len);
3142 else if (mode == FALLOC_FL_INSERT_RANGE)
3143 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
3144 else if (mode == FALLOC_FL_COLLAPSE_RANGE)
3145 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
3150 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
3151 inode_unlock(&inode->v);
3152 percpu_ref_put(&c->writes);
3157 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
3158 struct file *file_dst, loff_t pos_dst,
3159 loff_t len, unsigned remap_flags)
3161 struct bch_inode_info *src = file_bch_inode(file_src);
3162 struct bch_inode_info *dst = file_bch_inode(file_dst);
3163 struct bch_fs *c = src->v.i_sb->s_fs_info;
3164 s64 i_sectors_delta = 0;
3168 if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
3171 if (remap_flags & REMAP_FILE_DEDUP)
3174 if ((pos_src & (block_bytes(c) - 1)) ||
3175 (pos_dst & (block_bytes(c) - 1)))
3179 abs(pos_src - pos_dst) < len)
3182 bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3184 file_update_time(file_dst);
3186 inode_dio_wait(&src->v);
3187 inode_dio_wait(&dst->v);
3189 ret = generic_remap_file_range_prep(file_src, pos_src,
3192 if (ret < 0 || len == 0)
3195 aligned_len = round_up((u64) len, block_bytes(c));
3197 ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
3198 pos_dst, pos_dst + len - 1);
3202 mark_pagecache_unallocated(src, pos_src >> 9,
3203 (pos_src + aligned_len) >> 9);
3205 ret = bch2_remap_range(c,
3206 inode_inum(dst), pos_dst >> 9,
3207 inode_inum(src), pos_src >> 9,
3209 pos_dst + len, &i_sectors_delta);
3214 * due to alignment, we might have remapped slightly more than requsted
3216 ret = min((u64) ret << 9, (u64) len);
3218 /* XXX get a quota reservation */
3219 i_sectors_acct(c, dst, NULL, i_sectors_delta);
3221 spin_lock(&dst->v.i_lock);
3222 if (pos_dst + ret > dst->v.i_size)
3223 i_size_write(&dst->v, pos_dst + ret);
3224 spin_unlock(&dst->v.i_lock);
3226 if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
3227 IS_SYNC(file_inode(file_dst)))
3228 ret = bch2_flush_inode(c, inode_inum(dst));
3230 bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
3237 static int page_data_offset(struct page *page, unsigned offset)
3239 struct bch_page_state *s = bch2_page_state(page);
3243 for (i = offset >> 9; i < PAGE_SECTORS; i++)
3244 if (s->s[i].state >= SECTOR_DIRTY)
3250 static loff_t bch2_seek_pagecache_data(struct inode *vinode,
3251 loff_t start_offset,
3254 struct address_space *mapping = vinode->i_mapping;
3256 pgoff_t start_index = start_offset >> PAGE_SHIFT;
3257 pgoff_t end_index = end_offset >> PAGE_SHIFT;
3258 pgoff_t index = start_index;
3262 while (index <= end_index) {
3263 if (find_get_pages_range(mapping, &index, end_index, 1, &page)) {
3266 offset = page_data_offset(page,
3267 page->index == start_index
3268 ? start_offset & (PAGE_SIZE - 1)
3271 ret = clamp(((loff_t) page->index << PAGE_SHIFT) +
3273 start_offset, end_offset);
3289 static loff_t bch2_seek_data(struct file *file, u64 offset)
3291 struct bch_inode_info *inode = file_bch_inode(file);
3292 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3293 struct btree_trans trans;
3294 struct btree_iter iter;
3296 subvol_inum inum = inode_inum(inode);
3297 u64 isize, next_data = MAX_LFS_FILESIZE;
3301 isize = i_size_read(&inode->v);
3302 if (offset >= isize)
3305 bch2_trans_init(&trans, c, 0, 0);
3307 bch2_trans_begin(&trans);
3309 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3313 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
3314 SPOS(inode->v.i_ino, offset >> 9, snapshot), 0, k, ret) {
3315 if (k.k->p.inode != inode->v.i_ino) {
3317 } else if (bkey_extent_is_data(k.k)) {
3318 next_data = max(offset, bkey_start_offset(k.k) << 9);
3320 } else if (k.k->p.offset >> 9 > isize)
3323 bch2_trans_iter_exit(&trans, &iter);
3328 bch2_trans_exit(&trans);
3332 if (next_data > offset)
3333 next_data = bch2_seek_pagecache_data(&inode->v,
3336 if (next_data >= isize)
3339 return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
3342 static int __page_hole_offset(struct page *page, unsigned offset)
3344 struct bch_page_state *s = bch2_page_state(page);
3350 for (i = offset >> 9; i < PAGE_SECTORS; i++)
3351 if (s->s[i].state < SECTOR_DIRTY)
3357 static loff_t page_hole_offset(struct address_space *mapping, loff_t offset)
3359 pgoff_t index = offset >> PAGE_SHIFT;
3364 page = find_lock_page(mapping, index);
3368 pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1));
3370 ret = ((loff_t) index << PAGE_SHIFT) + pg_offset;
3377 static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
3378 loff_t start_offset,
3381 struct address_space *mapping = vinode->i_mapping;
3382 loff_t offset = start_offset, hole;
3384 while (offset < end_offset) {
3385 hole = page_hole_offset(mapping, offset);
3386 if (hole >= 0 && hole <= end_offset)
3387 return max(start_offset, hole);
3389 offset += PAGE_SIZE;
3390 offset &= PAGE_MASK;
3396 static loff_t bch2_seek_hole(struct file *file, u64 offset)
3398 struct bch_inode_info *inode = file_bch_inode(file);
3399 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3400 struct btree_trans trans;
3401 struct btree_iter iter;
3403 subvol_inum inum = inode_inum(inode);
3404 u64 isize, next_hole = MAX_LFS_FILESIZE;
3408 isize = i_size_read(&inode->v);
3409 if (offset >= isize)
3412 bch2_trans_init(&trans, c, 0, 0);
3414 bch2_trans_begin(&trans);
3416 ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
3420 for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
3421 SPOS(inode->v.i_ino, offset >> 9, snapshot),
3422 BTREE_ITER_SLOTS, k, ret) {
3423 if (k.k->p.inode != inode->v.i_ino) {
3424 next_hole = bch2_seek_pagecache_hole(&inode->v,
3425 offset, MAX_LFS_FILESIZE);
3427 } else if (!bkey_extent_is_data(k.k)) {
3428 next_hole = bch2_seek_pagecache_hole(&inode->v,
3429 max(offset, bkey_start_offset(k.k) << 9),
3430 k.k->p.offset << 9);
3432 if (next_hole < k.k->p.offset << 9)
3435 offset = max(offset, bkey_start_offset(k.k) << 9);
3438 bch2_trans_iter_exit(&trans, &iter);
3443 bch2_trans_exit(&trans);
3447 if (next_hole > isize)
3450 return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
3453 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
3459 return generic_file_llseek(file, offset, whence);
3461 return bch2_seek_data(file, offset);
3463 return bch2_seek_hole(file, offset);
3469 void bch2_fs_fsio_exit(struct bch_fs *c)
3471 bioset_exit(&c->dio_write_bioset);
3472 bioset_exit(&c->dio_read_bioset);
3473 bioset_exit(&c->writepage_bioset);
3476 int bch2_fs_fsio_init(struct bch_fs *c)
3480 pr_verbose_init(c->opts, "");
3482 if (bioset_init(&c->writepage_bioset,
3483 4, offsetof(struct bch_writepage_io, op.wbio.bio),
3484 BIOSET_NEED_BVECS) ||
3485 bioset_init(&c->dio_read_bioset,
3486 4, offsetof(struct dio_read, rbio.bio),
3487 BIOSET_NEED_BVECS) ||
3488 bioset_init(&c->dio_write_bioset,
3489 4, offsetof(struct dio_write, op.wbio.bio),
3493 pr_verbose_init(c->opts, "ret %i", ret);
3497 #endif /* NO_BCACHEFS_FS */