4 #include "alloc_foreground.h"
5 #include "btree_update.h"
19 #include <linux/aio.h>
20 #include <linux/backing-dev.h>
21 #include <linux/falloc.h>
22 #include <linux/migrate.h>
23 #include <linux/mmu_context.h>
24 #include <linux/pagevec.h>
25 #include <linux/sched/signal.h>
26 #include <linux/task_io_accounting_ops.h>
27 #include <linux/uio.h>
28 #include <linux/writeback.h>
30 #include <trace/events/bcachefs.h>
31 #include <trace/events/writeback.h>
37 struct bchfs_write_op {
38 struct bch_inode_info *inode;
45 struct bch_write_op op;
48 struct bch_writepage_io {
53 struct bchfs_write_op op;
63 struct quota_res quota_res;
66 struct iovec inline_vecs[2];
69 struct bchfs_write_op iop;
76 struct bch_read_bio rbio;
79 /* pagecache_block must be held */
80 static int write_invalidate_inode_pages_range(struct address_space *mapping,
81 loff_t start, loff_t end)
86 * XXX: the way this is currently implemented, we can spin if a process
87 * is continually redirtying a specific page
90 if (!mapping->nrpages &&
91 !mapping->nrexceptional)
94 ret = filemap_write_and_wait_range(mapping, start, end);
98 if (!mapping->nrpages)
101 ret = invalidate_inode_pages2_range(mapping,
104 } while (ret == -EBUSY);
111 #ifdef CONFIG_BCACHEFS_QUOTA
113 static void bch2_quota_reservation_put(struct bch_fs *c,
114 struct bch_inode_info *inode,
115 struct quota_res *res)
120 mutex_lock(&inode->ei_quota_lock);
121 BUG_ON(res->sectors > inode->ei_quota_reserved);
123 bch2_quota_acct(c, inode->ei_qid, Q_SPC,
124 -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
125 inode->ei_quota_reserved -= res->sectors;
126 mutex_unlock(&inode->ei_quota_lock);
131 static int bch2_quota_reservation_add(struct bch_fs *c,
132 struct bch_inode_info *inode,
133 struct quota_res *res,
139 mutex_lock(&inode->ei_quota_lock);
140 ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
141 check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
143 inode->ei_quota_reserved += sectors;
144 res->sectors += sectors;
146 mutex_unlock(&inode->ei_quota_lock);
153 static void bch2_quota_reservation_put(struct bch_fs *c,
154 struct bch_inode_info *inode,
155 struct quota_res *res)
159 static int bch2_quota_reservation_add(struct bch_fs *c,
160 struct bch_inode_info *inode,
161 struct quota_res *res,
170 /* i_size updates: */
172 struct inode_new_size {
178 static int inode_set_size(struct bch_inode_info *inode,
179 struct bch_inode_unpacked *bi,
182 struct inode_new_size *s = p;
184 bi->bi_size = s->new_size;
185 if (s->fields & ATTR_ATIME)
186 bi->bi_atime = s->now;
187 if (s->fields & ATTR_MTIME)
188 bi->bi_mtime = s->now;
189 if (s->fields & ATTR_CTIME)
190 bi->bi_ctime = s->now;
195 static int __must_check bch2_write_inode_size(struct bch_fs *c,
196 struct bch_inode_info *inode,
197 loff_t new_size, unsigned fields)
199 struct inode_new_size s = {
200 .new_size = new_size,
201 .now = bch2_current_time(c),
205 return bch2_write_inode(c, inode, inode_set_size, &s, fields);
208 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
209 struct quota_res *quota_res, s64 sectors)
214 mutex_lock(&inode->ei_quota_lock);
215 #ifdef CONFIG_BCACHEFS_QUOTA
216 if (quota_res && sectors > 0) {
217 BUG_ON(sectors > quota_res->sectors);
218 BUG_ON(sectors > inode->ei_quota_reserved);
220 quota_res->sectors -= sectors;
221 inode->ei_quota_reserved -= sectors;
223 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
226 inode->v.i_blocks += sectors;
227 mutex_unlock(&inode->ei_quota_lock);
230 /* normal i_size/i_sectors update machinery: */
232 static int sum_sector_overwrites(struct btree_trans *trans,
233 struct btree_iter *extent_iter,
234 struct bkey_i *new, bool *allocating,
237 struct btree_iter *iter;
242 iter = bch2_trans_copy_iter(trans, extent_iter);
244 return PTR_ERR(iter);
246 old = bch2_btree_iter_peek_slot(iter);
250 * should not be possible to get an error here, since we're
251 * carefully not advancing past @new and thus whatever leaf node
252 * @_iter currently points to:
254 BUG_ON(bkey_err(old));
258 bch2_bkey_nr_ptrs_allocated(old) <
259 bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(new)))
262 *delta += (min(new->k.p.offset,
264 max(bkey_start_offset(&new->k),
265 bkey_start_offset(old.k))) *
266 (bkey_extent_is_allocation(&new->k) -
267 bkey_extent_is_allocation(old.k));
269 if (bkey_cmp(old.k->p, new->k.p) >= 0)
272 old = bch2_btree_iter_next_slot(iter);
275 bch2_trans_iter_free(trans, iter);
279 static int bch2_extent_update(struct btree_trans *trans,
280 struct bch_inode_info *inode,
281 struct disk_reservation *disk_res,
282 struct quota_res *quota_res,
283 struct btree_iter *extent_iter,
290 struct bch_fs *c = trans->c;
291 struct btree_iter *inode_iter = NULL;
292 struct bch_inode_unpacked inode_u;
293 struct bkey_inode_buf inode_p;
294 bool allocating = false;
295 bool extended = false;
296 bool inode_locked = false;
300 bch2_trans_begin_updates(trans);
302 ret = bch2_btree_iter_traverse(extent_iter);
306 bch2_extent_trim_atomic(k, extent_iter);
308 ret = sum_sector_overwrites(trans, extent_iter,
314 if (!may_allocate && allocating)
317 bch2_trans_update(trans, BTREE_INSERT_ENTRY(extent_iter, k));
319 new_i_size = min(k->k.p.offset << 9, new_i_size);
321 /* XXX: inode->i_size locking */
322 if (i_sectors_delta ||
323 new_i_size > inode->ei_inode.bi_size) {
324 if (c->opts.new_inode_updates) {
325 bch2_btree_trans_unlock(trans);
326 mutex_lock(&inode->ei_update_lock);
328 if (!bch2_btree_trans_relock(trans)) {
329 mutex_unlock(&inode->ei_update_lock);
335 if (!inode->ei_inode_update)
336 inode->ei_inode_update =
337 bch2_deferred_update_alloc(c,
338 BTREE_ID_INODES, 64);
340 inode_u = inode->ei_inode;
341 inode_u.bi_sectors += i_sectors_delta;
343 /* XXX: this is slightly suspect */
344 if (!(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
345 new_i_size > inode_u.bi_size) {
346 inode_u.bi_size = new_i_size;
350 bch2_inode_pack(&inode_p, &inode_u);
351 bch2_trans_update(trans,
352 BTREE_INSERT_DEFERRED(inode->ei_inode_update,
353 &inode_p.inode.k_i));
355 inode_iter = bch2_trans_get_iter(trans,
357 POS(k->k.p.inode, 0),
358 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
359 if (IS_ERR(inode_iter))
360 return PTR_ERR(inode_iter);
362 ret = bch2_btree_iter_traverse(inode_iter);
366 inode_u = inode->ei_inode;
367 inode_u.bi_sectors += i_sectors_delta;
369 /* XXX: this is slightly suspect */
370 if (!(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
371 new_i_size > inode_u.bi_size) {
372 inode_u.bi_size = new_i_size;
376 bch2_inode_pack(&inode_p, &inode_u);
377 bch2_trans_update(trans,
378 BTREE_INSERT_ENTRY(inode_iter, &inode_p.inode.k_i));
382 ret = bch2_trans_commit(trans, disk_res,
383 &inode->ei_journal_seq,
386 BTREE_INSERT_NOUNLOCK|
387 BTREE_INSERT_USE_RESERVE);
391 inode->ei_inode.bi_sectors += i_sectors_delta;
393 EBUG_ON(i_sectors_delta &&
394 inode->ei_inode.bi_sectors != inode_u.bi_sectors);
397 inode->ei_inode.bi_size = new_i_size;
400 spin_lock(&inode->v.i_lock);
401 if (new_i_size > inode->v.i_size)
402 i_size_write(&inode->v, new_i_size);
403 spin_unlock(&inode->v.i_lock);
408 i_sectors_acct(c, inode, quota_res, i_sectors_delta);
411 *total_delta += i_sectors_delta;
413 if (!IS_ERR_OR_NULL(inode_iter))
414 bch2_trans_iter_put(trans, inode_iter);
416 mutex_unlock(&inode->ei_update_lock);
421 static int bchfs_write_index_update(struct bch_write_op *wop)
423 struct bch_fs *c = wop->c;
424 struct bchfs_write_op *op = container_of(wop,
425 struct bchfs_write_op, op);
426 struct quota_res *quota_res = op->is_dio
427 ? &container_of(op, struct dio_write, iop)->quota_res
429 struct bch_inode_info *inode = op->inode;
430 struct keylist *keys = &op->op.insert_keys;
431 struct bkey_i *k = bch2_keylist_front(keys);
432 struct btree_trans trans;
433 struct btree_iter *iter;
436 BUG_ON(k->k.p.inode != inode->v.i_ino);
438 bch2_trans_init(&trans, c);
439 bch2_trans_preload_iters(&trans);
441 iter = bch2_trans_get_iter(&trans,
443 bkey_start_pos(&k->k),
449 bkey_copy(&tmp.k, bch2_keylist_front(keys));
451 ret = bch2_extent_update(&trans, inode,
452 &wop->res, quota_res,
463 if (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) < 0)
464 bch2_cut_front(iter->pos, bch2_keylist_front(keys));
466 bch2_keylist_pop_front(keys);
467 } while (!bch2_keylist_empty(keys));
469 bch2_trans_exit(&trans);
474 static inline void bch2_fswrite_op_init(struct bchfs_write_op *op,
476 struct bch_inode_info *inode,
477 struct bch_io_opts opts,
481 op->sectors_added = 0;
484 op->new_i_size = U64_MAX;
486 bch2_write_op_init(&op->op, c, opts);
487 op->op.target = opts.foreground_target;
488 op->op.index_update_fn = bchfs_write_index_update;
489 op_journal_seq_set(&op->op, &inode->ei_journal_seq);
492 static inline struct bch_io_opts io_opts(struct bch_fs *c, struct bch_inode_info *inode)
494 struct bch_io_opts opts = bch2_opts_to_inode_opts(c->opts);
496 bch2_io_opts_apply(&opts, bch2_inode_opts_get(&inode->ei_inode));
502 /* stored in page->private: */
505 * bch_page_state has to (unfortunately) be manipulated with cmpxchg - we could
506 * almost protected it with the page lock, except that bch2_writepage_io_done has
507 * to update the sector counts (and from interrupt/bottom half context).
509 struct bch_page_state {
512 unsigned sectors:PAGE_SECTOR_SHIFT + 1;
514 /* Uncompressed, fully allocated replicas: */
515 unsigned nr_replicas:4;
517 /* Owns PAGE_SECTORS * replicas_reserved sized reservation: */
518 unsigned replicas_reserved:4;
520 /* Owns PAGE_SECTORS sized quota reservation: */
521 unsigned quota_reserved:1;
524 * Number of sectors on disk - for i_blocks
525 * Uncompressed size, not compressed size:
527 unsigned dirty_sectors:PAGE_SECTOR_SHIFT + 1;
534 #define page_state_cmpxchg(_ptr, _new, _expr) \
536 unsigned long _v = READ_ONCE((_ptr)->v); \
537 struct bch_page_state _old; \
540 _old.v = _new.v = _v; \
543 EBUG_ON(_new.sectors + _new.dirty_sectors > PAGE_SECTORS);\
544 } while (_old.v != _new.v && \
545 (_v = cmpxchg(&(_ptr)->v, _old.v, _new.v)) != _old.v); \
550 static inline struct bch_page_state *page_state(struct page *page)
552 struct bch_page_state *s = (void *) &page->private;
554 BUILD_BUG_ON(sizeof(*s) > sizeof(page->private));
556 if (!PagePrivate(page))
557 SetPagePrivate(page);
562 static inline unsigned page_res_sectors(struct bch_page_state s)
565 return s.replicas_reserved * PAGE_SECTORS;
568 static void __bch2_put_page_reservation(struct bch_fs *c, struct bch_inode_info *inode,
569 struct bch_page_state s)
571 struct disk_reservation res = { .sectors = page_res_sectors(s) };
572 struct quota_res quota_res = { .sectors = s.quota_reserved ? PAGE_SECTORS : 0 };
574 bch2_quota_reservation_put(c, inode, "a_res);
575 bch2_disk_reservation_put(c, &res);
578 static void bch2_put_page_reservation(struct bch_fs *c, struct bch_inode_info *inode,
581 struct bch_page_state s;
583 EBUG_ON(!PageLocked(page));
585 s = page_state_cmpxchg(page_state(page), s, {
586 s.replicas_reserved = 0;
587 s.quota_reserved = 0;
590 __bch2_put_page_reservation(c, inode, s);
593 static int bch2_get_page_reservation(struct bch_fs *c, struct bch_inode_info *inode,
594 struct page *page, bool check_enospc)
596 struct bch_page_state *s = page_state(page), new;
598 /* XXX: this should not be open coded */
599 unsigned nr_replicas = inode->ei_inode.bi_data_replicas
600 ? inode->ei_inode.bi_data_replicas - 1
601 : c->opts.data_replicas;
602 struct disk_reservation disk_res;
603 struct quota_res quota_res = { 0 };
606 EBUG_ON(!PageLocked(page));
608 if (s->replicas_reserved < nr_replicas) {
609 ret = bch2_disk_reservation_get(c, &disk_res, PAGE_SECTORS,
610 nr_replicas - s->replicas_reserved,
611 !check_enospc ? BCH_DISK_RESERVATION_NOFAIL : 0);
615 page_state_cmpxchg(s, new, ({
616 BUG_ON(new.replicas_reserved +
617 disk_res.nr_replicas != nr_replicas);
618 new.replicas_reserved += disk_res.nr_replicas;
622 if (!s->quota_reserved &&
623 s->sectors + s->dirty_sectors < PAGE_SECTORS) {
624 ret = bch2_quota_reservation_add(c, inode, "a_res,
630 page_state_cmpxchg(s, new, ({
631 BUG_ON(new.quota_reserved);
632 new.quota_reserved = 1;
639 static void bch2_clear_page_bits(struct page *page)
641 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
642 struct bch_fs *c = inode->v.i_sb->s_fs_info;
643 struct bch_page_state s;
645 EBUG_ON(!PageLocked(page));
647 if (!PagePrivate(page))
650 s.v = xchg(&page_state(page)->v, 0);
651 ClearPagePrivate(page);
654 i_sectors_acct(c, inode, NULL, -s.dirty_sectors);
656 __bch2_put_page_reservation(c, inode, s);
659 int bch2_set_page_dirty(struct page *page)
661 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
662 struct bch_fs *c = inode->v.i_sb->s_fs_info;
663 struct quota_res quota_res = { 0 };
664 struct bch_page_state old, new;
666 old = page_state_cmpxchg(page_state(page), new,
667 new.dirty_sectors = PAGE_SECTORS - new.sectors;
668 new.quota_reserved = 0;
671 quota_res.sectors += old.quota_reserved * PAGE_SECTORS;
673 if (old.dirty_sectors != new.dirty_sectors)
674 i_sectors_acct(c, inode, "a_res,
675 new.dirty_sectors - old.dirty_sectors);
676 bch2_quota_reservation_put(c, inode, "a_res);
678 return __set_page_dirty_nobuffers(page);
681 int bch2_page_mkwrite(struct vm_fault *vmf)
683 struct page *page = vmf->page;
684 struct file *file = vmf->vma->vm_file;
685 struct bch_inode_info *inode = file_bch_inode(file);
686 struct address_space *mapping = inode->v.i_mapping;
687 struct bch_fs *c = inode->v.i_sb->s_fs_info;
688 int ret = VM_FAULT_LOCKED;
690 sb_start_pagefault(inode->v.i_sb);
691 file_update_time(file);
694 * Not strictly necessary, but helps avoid dio writes livelocking in
695 * write_invalidate_inode_pages_range() - can drop this if/when we get
696 * a write_invalidate_inode_pages_range() that works without dropping
697 * page lock before invalidating page
699 if (current->pagecache_lock != &mapping->add_lock)
700 pagecache_add_get(&mapping->add_lock);
703 if (page->mapping != mapping ||
704 page_offset(page) > i_size_read(&inode->v)) {
706 ret = VM_FAULT_NOPAGE;
710 if (bch2_get_page_reservation(c, inode, page, true)) {
712 ret = VM_FAULT_SIGBUS;
716 if (!PageDirty(page))
717 set_page_dirty(page);
718 wait_for_stable_page(page);
720 if (current->pagecache_lock != &mapping->add_lock)
721 pagecache_add_put(&mapping->add_lock);
722 sb_end_pagefault(inode->v.i_sb);
726 void bch2_invalidatepage(struct page *page, unsigned int offset,
729 EBUG_ON(!PageLocked(page));
730 EBUG_ON(PageWriteback(page));
732 if (offset || length < PAGE_SIZE)
735 bch2_clear_page_bits(page);
738 int bch2_releasepage(struct page *page, gfp_t gfp_mask)
740 /* XXX: this can't take locks that are held while we allocate memory */
741 EBUG_ON(!PageLocked(page));
742 EBUG_ON(PageWriteback(page));
747 bch2_clear_page_bits(page);
751 #ifdef CONFIG_MIGRATION
752 int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
753 struct page *page, enum migrate_mode mode)
757 EBUG_ON(!PageLocked(page));
758 EBUG_ON(!PageLocked(newpage));
760 ret = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
761 if (ret != MIGRATEPAGE_SUCCESS)
764 if (PagePrivate(page)) {
765 *page_state(newpage) = *page_state(page);
766 ClearPagePrivate(page);
769 migrate_page_copy(newpage, page);
770 return MIGRATEPAGE_SUCCESS;
774 /* readpages/writepages: */
776 static bool bio_can_add_page_contig(struct bio *bio, struct page *page)
778 sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT;
780 return bio->bi_vcnt < bio->bi_max_vecs &&
781 bio_end_sector(bio) == offset;
784 static int bio_add_page_contig(struct bio *bio, struct page *page)
786 sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT;
788 EBUG_ON(!bio->bi_max_vecs);
791 bio->bi_iter.bi_sector = offset;
792 else if (!bio_can_add_page_contig(bio, page))
795 __bio_add_page(bio, page, PAGE_SIZE, 0);
801 static void bch2_readpages_end_io(struct bio *bio)
806 bio_for_each_segment_all(bv, bio, i) {
807 struct page *page = bv->bv_page;
809 if (!bio->bi_status) {
810 SetPageUptodate(page);
812 ClearPageUptodate(page);
821 static inline void page_state_init_for_read(struct page *page)
823 SetPagePrivate(page);
827 struct readpages_iter {
828 struct address_space *mapping;
836 static int readpages_iter_init(struct readpages_iter *iter,
837 struct address_space *mapping,
838 struct list_head *pages, unsigned nr_pages)
840 memset(iter, 0, sizeof(*iter));
842 iter->mapping = mapping;
843 iter->offset = list_last_entry(pages, struct page, lru)->index;
845 iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
849 while (!list_empty(pages)) {
850 struct page *page = list_last_entry(pages, struct page, lru);
852 prefetchw(&page->flags);
853 iter->pages[iter->nr_pages++] = page;
854 list_del(&page->lru);
860 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
866 BUG_ON(iter->idx > iter->nr_added);
867 BUG_ON(iter->nr_added > iter->nr_pages);
869 if (iter->idx < iter->nr_added)
873 if (iter->idx == iter->nr_pages)
876 ret = add_to_page_cache_lru_vec(iter->mapping,
877 iter->pages + iter->nr_added,
878 iter->nr_pages - iter->nr_added,
879 iter->offset + iter->nr_added,
884 page = iter->pages[iter->nr_added];
891 iter->nr_added += ret;
893 for (i = iter->idx; i < iter->nr_added; i++)
894 put_page(iter->pages[i]);
896 EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
898 page_state_init_for_read(iter->pages[iter->idx]);
899 return iter->pages[iter->idx];
902 static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
904 struct bvec_iter iter;
906 unsigned nr_ptrs = bch2_bkey_nr_ptrs_allocated(k);
908 bio_for_each_segment(bv, bio, iter) {
909 /* brand new pages, don't need to be locked: */
911 struct bch_page_state *s = page_state(bv.bv_page);
913 /* sectors in @k from the start of this page: */
914 unsigned k_sectors = k.k->size - (iter.bi_sector - k.k->p.offset);
916 unsigned page_sectors = min(bv.bv_len >> 9, k_sectors);
918 s->nr_replicas = page_sectors == PAGE_SECTORS
921 BUG_ON(s->sectors + page_sectors > PAGE_SECTORS);
922 s->sectors += page_sectors;
926 static void readpage_bio_extend(struct readpages_iter *iter,
927 struct bio *bio, u64 offset,
930 while (bio_end_sector(bio) < offset &&
931 bio->bi_vcnt < bio->bi_max_vecs) {
932 pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT;
933 struct page *page = readpage_iter_next(iter);
937 if (iter->offset + iter->idx != page_offset)
945 page = xa_load(&iter->mapping->i_pages, page_offset);
946 if (page && !xa_is_value(page))
949 page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
953 page_state_init_for_read(page);
955 ret = add_to_page_cache_lru(page, iter->mapping,
956 page_offset, GFP_NOFS);
958 ClearPagePrivate(page);
966 __bio_add_page(bio, page, PAGE_SIZE, 0);
970 static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
971 struct bch_read_bio *rbio, u64 inum,
972 struct readpages_iter *readpages_iter)
974 struct bch_fs *c = trans->c;
975 struct bio *bio = &rbio->bio;
976 int flags = BCH_READ_RETRY_IF_STALE|
977 BCH_READ_MAY_PROMOTE;
980 rbio->start_time = local_clock();
987 bch2_btree_iter_set_pos(iter, POS(inum, bio->bi_iter.bi_sector));
989 k = bch2_btree_iter_peek_slot(iter);
993 int ret = btree_iter_err(iter);
995 bcache_io_error(c, bio, "btree IO error %i", ret);
1000 bkey_reassemble(&tmp.k, k);
1001 bch2_btree_trans_unlock(trans);
1002 k = bkey_i_to_s_c(&tmp.k);
1004 if (readpages_iter) {
1005 bool want_full_extent = false;
1007 if (bkey_extent_is_data(k.k)) {
1008 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
1009 const union bch_extent_entry *i;
1010 struct extent_ptr_decoded p;
1012 extent_for_each_ptr_decode(e, p, i)
1013 want_full_extent |= ((p.crc.csum_type != 0) |
1014 (p.crc.compression_type != 0));
1017 readpage_bio_extend(readpages_iter,
1022 bytes = (min_t(u64, k.k->p.offset, bio_end_sector(bio)) -
1023 bio->bi_iter.bi_sector) << 9;
1024 swap(bio->bi_iter.bi_size, bytes);
1026 if (bytes == bio->bi_iter.bi_size)
1027 flags |= BCH_READ_LAST_FRAGMENT;
1029 if (bkey_extent_is_allocation(k.k))
1030 bch2_add_page_sectors(bio, k);
1032 bch2_read_extent(c, rbio, k, flags);
1034 if (flags & BCH_READ_LAST_FRAGMENT)
1037 swap(bio->bi_iter.bi_size, bytes);
1038 bio_advance(bio, bytes);
1042 int bch2_readpages(struct file *file, struct address_space *mapping,
1043 struct list_head *pages, unsigned nr_pages)
1045 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1046 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1047 struct bch_io_opts opts = io_opts(c, inode);
1048 struct btree_trans trans;
1049 struct btree_iter *iter;
1051 struct readpages_iter readpages_iter;
1054 ret = readpages_iter_init(&readpages_iter, mapping, pages, nr_pages);
1057 bch2_trans_init(&trans, c);
1059 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
1062 if (current->pagecache_lock != &mapping->add_lock)
1063 pagecache_add_get(&mapping->add_lock);
1065 while ((page = readpage_iter_next(&readpages_iter))) {
1066 pgoff_t index = readpages_iter.offset + readpages_iter.idx;
1067 unsigned n = min_t(unsigned,
1068 readpages_iter.nr_pages -
1071 struct bch_read_bio *rbio =
1072 rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
1075 readpages_iter.idx++;
1077 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0);
1078 rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTOR_SHIFT;
1079 rbio->bio.bi_end_io = bch2_readpages_end_io;
1080 __bio_add_page(&rbio->bio, page, PAGE_SIZE, 0);
1082 bchfs_read(&trans, iter, rbio, inode->v.i_ino,
1086 if (current->pagecache_lock != &mapping->add_lock)
1087 pagecache_add_put(&mapping->add_lock);
1089 bch2_trans_exit(&trans);
1090 kfree(readpages_iter.pages);
1095 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
1096 u64 inum, struct page *page)
1098 struct btree_trans trans;
1099 struct btree_iter *iter;
1101 page_state_init_for_read(page);
1103 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
1104 bio_add_page_contig(&rbio->bio, page);
1106 bch2_trans_init(&trans, c);
1107 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
1110 bchfs_read(&trans, iter, rbio, inum, NULL);
1112 bch2_trans_exit(&trans);
1115 int bch2_readpage(struct file *file, struct page *page)
1117 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1118 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1119 struct bch_io_opts opts = io_opts(c, inode);
1120 struct bch_read_bio *rbio;
1122 rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
1123 rbio->bio.bi_end_io = bch2_readpages_end_io;
1125 __bchfs_readpage(c, rbio, inode->v.i_ino, page);
1129 static void bch2_read_single_page_end_io(struct bio *bio)
1131 complete(bio->bi_private);
1134 static int bch2_read_single_page(struct page *page,
1135 struct address_space *mapping)
1137 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1138 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1139 struct bch_read_bio *rbio;
1141 DECLARE_COMPLETION_ONSTACK(done);
1143 rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
1145 rbio->bio.bi_private = &done;
1146 rbio->bio.bi_end_io = bch2_read_single_page_end_io;
1148 __bchfs_readpage(c, rbio, inode->v.i_ino, page);
1149 wait_for_completion(&done);
1151 ret = blk_status_to_errno(rbio->bio.bi_status);
1152 bio_put(&rbio->bio);
1157 SetPageUptodate(page);
1163 struct bch_writepage_state {
1164 struct bch_writepage_io *io;
1165 struct bch_io_opts opts;
1168 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1169 struct bch_inode_info *inode)
1171 return (struct bch_writepage_state) { .opts = io_opts(c, inode) };
1174 static void bch2_writepage_io_free(struct closure *cl)
1176 struct bch_writepage_io *io = container_of(cl,
1177 struct bch_writepage_io, cl);
1179 bio_put(&io->op.op.wbio.bio);
1182 static void bch2_writepage_io_done(struct closure *cl)
1184 struct bch_writepage_io *io = container_of(cl,
1185 struct bch_writepage_io, cl);
1186 struct bch_fs *c = io->op.op.c;
1187 struct bio *bio = &io->op.op.wbio.bio;
1188 struct bio_vec *bvec;
1191 if (io->op.op.error) {
1192 bio_for_each_segment_all(bvec, bio, i)
1193 SetPageError(bvec->bv_page);
1194 set_bit(AS_EIO, &io->op.inode->v.i_mapping->flags);
1198 * racing with fallocate can cause us to add fewer sectors than
1199 * expected - but we shouldn't add more sectors than expected:
1201 BUG_ON(io->op.sectors_added > (s64) io->new_sectors);
1204 * (error (due to going RO) halfway through a page can screw that up
1207 BUG_ON(io->op.sectors_added - io->new_sectors >= (s64) PAGE_SECTORS);
1211 * PageWriteback is effectively our ref on the inode - fixup i_blocks
1212 * before calling end_page_writeback:
1214 if (io->op.sectors_added != io->new_sectors)
1215 i_sectors_acct(c, io->op.inode, NULL,
1216 io->op.sectors_added - (s64) io->new_sectors);
1218 bio_for_each_segment_all(bvec, bio, i)
1219 end_page_writeback(bvec->bv_page);
1221 closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
1224 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1226 struct bch_writepage_io *io = w->io;
1229 closure_call(&io->op.op.cl, bch2_write, NULL, &io->cl);
1230 continue_at(&io->cl, bch2_writepage_io_done, NULL);
1234 * Get a bch_writepage_io and add @page to it - appending to an existing one if
1235 * possible, else allocating a new one:
1237 static void bch2_writepage_io_alloc(struct bch_fs *c,
1238 struct bch_writepage_state *w,
1239 struct bch_inode_info *inode,
1241 unsigned nr_replicas)
1243 struct bch_write_op *op;
1244 u64 offset = (u64) page->index << PAGE_SECTOR_SHIFT;
1246 w->io = container_of(bio_alloc_bioset(GFP_NOFS,
1248 &c->writepage_bioset),
1249 struct bch_writepage_io, op.op.wbio.bio);
1251 closure_init(&w->io->cl, NULL);
1252 w->io->new_sectors = 0;
1253 bch2_fswrite_op_init(&w->io->op, c, inode, w->opts, false);
1255 op->nr_replicas = nr_replicas;
1256 op->res.nr_replicas = nr_replicas;
1257 op->write_point = writepoint_hashed(inode->ei_last_dirtied);
1258 op->pos = POS(inode->v.i_ino, offset);
1259 op->wbio.bio.bi_iter.bi_sector = offset;
1262 static int __bch2_writepage(struct page *page,
1263 struct writeback_control *wbc,
1266 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1267 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1268 struct bch_writepage_state *w = data;
1269 struct bch_page_state new, old;
1270 unsigned offset, nr_replicas_this_write;
1271 loff_t i_size = i_size_read(&inode->v);
1272 pgoff_t end_index = i_size >> PAGE_SHIFT;
1274 EBUG_ON(!PageUptodate(page));
1276 /* Is the page fully inside i_size? */
1277 if (page->index < end_index)
1280 /* Is the page fully outside i_size? (truncate in progress) */
1281 offset = i_size & (PAGE_SIZE - 1);
1282 if (page->index > end_index || !offset) {
1288 * The page straddles i_size. It must be zeroed out on each and every
1289 * writepage invocation because it may be mmapped. "A file is mapped
1290 * in multiples of the page size. For a file that is not a multiple of
1291 * the page size, the remaining memory is zeroed when mapped, and
1292 * writes to that region are not written out to the file."
1294 zero_user_segment(page, offset, PAGE_SIZE);
1296 EBUG_ON(!PageLocked(page));
1298 /* Before unlocking the page, transfer reservation to w->io: */
1299 old = page_state_cmpxchg(page_state(page), new, {
1301 * If we didn't get a reservation, we can only write out the
1302 * number of (fully allocated) replicas that currently exist,
1303 * and only if the entire page has been written:
1305 nr_replicas_this_write =
1307 new.replicas_reserved,
1308 (new.sectors == PAGE_SECTORS
1309 ? new.nr_replicas : 0));
1311 BUG_ON(!nr_replicas_this_write);
1313 new.nr_replicas = w->opts.compression
1315 : nr_replicas_this_write;
1317 new.replicas_reserved = 0;
1319 new.sectors += new.dirty_sectors;
1320 BUG_ON(new.sectors != PAGE_SECTORS);
1321 new.dirty_sectors = 0;
1324 BUG_ON(PageWriteback(page));
1325 set_page_writeback(page);
1329 (w->io->op.op.res.nr_replicas != nr_replicas_this_write ||
1330 !bio_can_add_page_contig(&w->io->op.op.wbio.bio, page)))
1331 bch2_writepage_do_io(w);
1334 bch2_writepage_io_alloc(c, w, inode, page,
1335 nr_replicas_this_write);
1337 w->io->new_sectors += new.sectors - old.sectors;
1339 BUG_ON(inode != w->io->op.inode);
1340 BUG_ON(bio_add_page_contig(&w->io->op.op.wbio.bio, page));
1342 w->io->op.op.res.sectors += old.replicas_reserved * PAGE_SECTORS;
1343 w->io->op.new_i_size = i_size;
1345 if (wbc->sync_mode == WB_SYNC_ALL)
1346 w->io->op.op.wbio.bio.bi_opf |= REQ_SYNC;
1351 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1353 struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1354 struct bch_writepage_state w =
1355 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1356 struct blk_plug plug;
1359 blk_start_plug(&plug);
1360 ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1362 bch2_writepage_do_io(&w);
1363 blk_finish_plug(&plug);
1367 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1369 struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1370 struct bch_writepage_state w =
1371 bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
1374 ret = __bch2_writepage(page, wbc, &w);
1376 bch2_writepage_do_io(&w);
1381 /* buffered writes: */
1383 int bch2_write_begin(struct file *file, struct address_space *mapping,
1384 loff_t pos, unsigned len, unsigned flags,
1385 struct page **pagep, void **fsdata)
1387 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1388 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1389 pgoff_t index = pos >> PAGE_SHIFT;
1390 unsigned offset = pos & (PAGE_SIZE - 1);
1394 BUG_ON(inode_unhashed(&inode->v));
1396 /* Not strictly necessary - same reason as mkwrite(): */
1397 pagecache_add_get(&mapping->add_lock);
1399 page = grab_cache_page_write_begin(mapping, index, flags);
1403 if (PageUptodate(page))
1406 /* If we're writing entire page, don't need to read it in first: */
1407 if (len == PAGE_SIZE)
1410 if (!offset && pos + len >= inode->v.i_size) {
1411 zero_user_segment(page, len, PAGE_SIZE);
1412 flush_dcache_page(page);
1416 if (index > inode->v.i_size >> PAGE_SHIFT) {
1417 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1418 flush_dcache_page(page);
1422 ret = bch2_read_single_page(page, mapping);
1426 ret = bch2_get_page_reservation(c, inode, page, true);
1428 if (!PageUptodate(page)) {
1430 * If the page hasn't been read in, we won't know if we
1431 * actually need a reservation - we don't actually need
1432 * to read here, we just need to check if the page is
1433 * fully backed by uncompressed data:
1448 pagecache_add_put(&mapping->add_lock);
1452 int bch2_write_end(struct file *file, struct address_space *mapping,
1453 loff_t pos, unsigned len, unsigned copied,
1454 struct page *page, void *fsdata)
1456 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1457 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1459 lockdep_assert_held(&inode->v.i_rwsem);
1461 if (unlikely(copied < len && !PageUptodate(page))) {
1463 * The page needs to be read in, but that would destroy
1464 * our partial write - simplest thing is to just force
1465 * userspace to redo the write:
1467 zero_user(page, 0, PAGE_SIZE);
1468 flush_dcache_page(page);
1472 spin_lock(&inode->v.i_lock);
1473 if (pos + copied > inode->v.i_size)
1474 i_size_write(&inode->v, pos + copied);
1475 spin_unlock(&inode->v.i_lock);
1478 if (!PageUptodate(page))
1479 SetPageUptodate(page);
1480 if (!PageDirty(page))
1481 set_page_dirty(page);
1483 inode->ei_last_dirtied = (unsigned long) current;
1485 bch2_put_page_reservation(c, inode, page);
1490 pagecache_add_put(&mapping->add_lock);
1495 #define WRITE_BATCH_PAGES 32
1497 static int __bch2_buffered_write(struct bch_inode_info *inode,
1498 struct address_space *mapping,
1499 struct iov_iter *iter,
1500 loff_t pos, unsigned len)
1502 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1503 struct page *pages[WRITE_BATCH_PAGES];
1504 unsigned long index = pos >> PAGE_SHIFT;
1505 unsigned offset = pos & (PAGE_SIZE - 1);
1506 unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1507 unsigned i, copied = 0, nr_pages_copied = 0;
1511 BUG_ON(nr_pages > ARRAY_SIZE(pages));
1513 for (i = 0; i < nr_pages; i++) {
1514 pages[i] = grab_cache_page_write_begin(mapping, index + i, 0);
1522 if (offset && !PageUptodate(pages[0])) {
1523 ret = bch2_read_single_page(pages[0], mapping);
1528 if ((pos + len) & (PAGE_SIZE - 1) &&
1529 !PageUptodate(pages[nr_pages - 1])) {
1530 if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
1531 zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
1533 ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
1539 for (i = 0; i < nr_pages; i++) {
1540 ret = bch2_get_page_reservation(c, inode, pages[i], true);
1542 if (ret && !PageUptodate(pages[i])) {
1543 ret = bch2_read_single_page(pages[i], mapping);
1547 ret = bch2_get_page_reservation(c, inode, pages[i], true);
1554 if (mapping_writably_mapped(mapping))
1555 for (i = 0; i < nr_pages; i++)
1556 flush_dcache_page(pages[i]);
1558 while (copied < len) {
1559 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1560 unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
1561 unsigned pg_bytes = min_t(unsigned, len - copied,
1562 PAGE_SIZE - pg_offset);
1563 unsigned pg_copied = iov_iter_copy_from_user_atomic(page,
1564 iter, pg_offset, pg_bytes);
1569 flush_dcache_page(page);
1570 iov_iter_advance(iter, pg_copied);
1571 copied += pg_copied;
1577 nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
1578 inode->ei_last_dirtied = (unsigned long) current;
1580 spin_lock(&inode->v.i_lock);
1581 if (pos + copied > inode->v.i_size)
1582 i_size_write(&inode->v, pos + copied);
1583 spin_unlock(&inode->v.i_lock);
1586 ((offset + copied) & (PAGE_SIZE - 1))) {
1587 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1589 if (!PageUptodate(page)) {
1590 zero_user(page, 0, PAGE_SIZE);
1591 copied -= (offset + copied) & (PAGE_SIZE - 1);
1595 for (i = 0; i < nr_pages_copied; i++) {
1596 if (!PageUptodate(pages[i]))
1597 SetPageUptodate(pages[i]);
1598 if (!PageDirty(pages[i]))
1599 set_page_dirty(pages[i]);
1600 unlock_page(pages[i]);
1604 for (i = nr_pages_copied; i < nr_pages; i++) {
1605 if (!PageDirty(pages[i]))
1606 bch2_put_page_reservation(c, inode, pages[i]);
1607 unlock_page(pages[i]);
1611 return copied ?: ret;
1614 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1616 struct file *file = iocb->ki_filp;
1617 struct address_space *mapping = file->f_mapping;
1618 struct bch_inode_info *inode = file_bch_inode(file);
1619 loff_t pos = iocb->ki_pos;
1620 ssize_t written = 0;
1623 pagecache_add_get(&mapping->add_lock);
1626 unsigned offset = pos & (PAGE_SIZE - 1);
1627 unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
1628 PAGE_SIZE * WRITE_BATCH_PAGES - offset);
1631 * Bring in the user page that we will copy from _first_.
1632 * Otherwise there's a nasty deadlock on copying from the
1633 * same page as we're writing to, without it being marked
1636 * Not only is this an optimisation, but it is also required
1637 * to check that the address is actually valid, when atomic
1638 * usercopies are used, below.
1640 if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1641 bytes = min_t(unsigned long, iov_iter_count(iter),
1642 PAGE_SIZE - offset);
1644 if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1650 if (unlikely(fatal_signal_pending(current))) {
1655 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1656 if (unlikely(ret < 0))
1661 if (unlikely(ret == 0)) {
1663 * If we were unable to copy any data at all, we must
1664 * fall back to a single segment length write.
1666 * If we didn't fallback here, we could livelock
1667 * because not all segments in the iov can be copied at
1668 * once without a pagefault.
1670 bytes = min_t(unsigned long, PAGE_SIZE - offset,
1671 iov_iter_single_seg_count(iter));
1677 balance_dirty_pages_ratelimited(mapping);
1678 } while (iov_iter_count(iter));
1680 pagecache_add_put(&mapping->add_lock);
1682 return written ? written : ret;
1685 /* O_DIRECT reads */
1687 static void bch2_dio_read_complete(struct closure *cl)
1689 struct dio_read *dio = container_of(cl, struct dio_read, cl);
1691 dio->req->ki_complete(dio->req, dio->ret, 0);
1692 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1695 static void bch2_direct_IO_read_endio(struct bio *bio)
1697 struct dio_read *dio = bio->bi_private;
1700 dio->ret = blk_status_to_errno(bio->bi_status);
1702 closure_put(&dio->cl);
1705 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1707 bch2_direct_IO_read_endio(bio);
1708 bio_check_pages_dirty(bio); /* transfers ownership */
1711 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1713 struct file *file = req->ki_filp;
1714 struct bch_inode_info *inode = file_bch_inode(file);
1715 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1716 struct bch_io_opts opts = io_opts(c, inode);
1717 struct dio_read *dio;
1719 loff_t offset = req->ki_pos;
1720 bool sync = is_sync_kiocb(req);
1724 if ((offset|iter->count) & (block_bytes(c) - 1))
1727 ret = min_t(loff_t, iter->count,
1728 max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1733 shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
1734 iter->count -= shorten;
1736 bio = bio_alloc_bioset(GFP_KERNEL,
1737 iov_iter_npages(iter, BIO_MAX_PAGES),
1738 &c->dio_read_bioset);
1740 bio->bi_end_io = bch2_direct_IO_read_endio;
1742 dio = container_of(bio, struct dio_read, rbio.bio);
1743 closure_init(&dio->cl, NULL);
1746 * this is a _really_ horrible hack just to avoid an atomic sub at the
1750 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1751 atomic_set(&dio->cl.remaining,
1752 CLOSURE_REMAINING_INITIALIZER -
1754 CLOSURE_DESTRUCTOR);
1756 atomic_set(&dio->cl.remaining,
1757 CLOSURE_REMAINING_INITIALIZER + 1);
1764 while (iter->count) {
1765 bio = bio_alloc_bioset(GFP_KERNEL,
1766 iov_iter_npages(iter, BIO_MAX_PAGES),
1768 bio->bi_end_io = bch2_direct_IO_read_split_endio;
1770 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1771 bio->bi_iter.bi_sector = offset >> 9;
1772 bio->bi_private = dio;
1774 ret = bio_iov_iter_get_pages(bio, iter);
1776 /* XXX: fault inject this path */
1777 bio->bi_status = BLK_STS_RESOURCE;
1782 offset += bio->bi_iter.bi_size;
1783 bio_set_pages_dirty(bio);
1786 closure_get(&dio->cl);
1788 bch2_read(c, rbio_init(bio, opts), inode->v.i_ino);
1791 iter->count += shorten;
1794 closure_sync(&dio->cl);
1795 closure_debug_destroy(&dio->cl);
1797 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1800 return -EIOCBQUEUED;
1804 /* O_DIRECT writes */
1806 static void bch2_dio_write_loop_async(struct closure *);
1808 static long bch2_dio_write_loop(struct dio_write *dio)
1810 bool kthread = (current->flags & PF_KTHREAD) != 0;
1811 struct kiocb *req = dio->req;
1812 struct address_space *mapping = req->ki_filp->f_mapping;
1813 struct bch_inode_info *inode = dio->iop.inode;
1814 struct bio *bio = &dio->iop.op.wbio.bio;
1824 inode_dio_begin(&inode->v);
1825 __pagecache_block_get(&mapping->add_lock);
1827 /* Write and invalidate pagecache range that we're writing to: */
1828 offset = req->ki_pos + (dio->iop.op.written << 9);
1829 ret = write_invalidate_inode_pages_range(mapping,
1831 offset + iov_iter_count(&dio->iter) - 1);
1836 offset = req->ki_pos + (dio->iop.op.written << 9);
1838 BUG_ON(current->pagecache_lock);
1839 current->pagecache_lock = &mapping->add_lock;
1843 ret = bio_iov_iter_get_pages(bio, &dio->iter);
1847 current->pagecache_lock = NULL;
1849 if (unlikely(ret < 0))
1852 /* gup might have faulted pages back in: */
1853 ret = write_invalidate_inode_pages_range(mapping,
1855 offset + bio->bi_iter.bi_size - 1);
1859 dio->iop.op.pos = POS(inode->v.i_ino, offset >> 9);
1861 task_io_account_write(bio->bi_iter.bi_size);
1863 closure_call(&dio->iop.op.cl, bch2_write, NULL, &dio->cl);
1865 if (!dio->sync && !dio->loop && dio->iter.count) {
1866 struct iovec *iov = dio->inline_vecs;
1868 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
1869 iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
1871 if (unlikely(!iov)) {
1872 dio->iop.op.error = -ENOMEM;
1876 dio->free_iov = true;
1879 memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
1880 dio->iter.iov = iov;
1886 continue_at(&dio->cl, bch2_dio_write_loop_async, NULL);
1887 return -EIOCBQUEUED;
1890 closure_sync(&dio->cl);
1892 bio_for_each_segment_all(bv, bio, i)
1893 put_page(bv->bv_page);
1894 if (!dio->iter.count || dio->iop.op.error)
1899 ret = dio->iop.op.error ?: ((long) dio->iop.op.written << 9);
1901 __pagecache_block_put(&mapping->add_lock);
1902 bch2_disk_reservation_put(dio->iop.op.c, &dio->iop.op.res);
1903 bch2_quota_reservation_put(dio->iop.op.c, inode, &dio->quota_res);
1906 kfree(dio->iter.iov);
1908 closure_debug_destroy(&dio->cl);
1913 /* inode->i_dio_count is our ref on inode and thus bch_fs */
1914 inode_dio_end(&inode->v);
1917 req->ki_complete(req, ret, 0);
1923 static void bch2_dio_write_loop_async(struct closure *cl)
1925 struct dio_write *dio = container_of(cl, struct dio_write, cl);
1927 bch2_dio_write_loop(dio);
1930 static int bch2_direct_IO_write(struct kiocb *req,
1931 struct iov_iter *iter,
1934 struct file *file = req->ki_filp;
1935 struct bch_inode_info *inode = file_bch_inode(file);
1936 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1937 struct dio_write *dio;
1941 lockdep_assert_held(&inode->v.i_rwsem);
1943 if (unlikely(!iter->count))
1946 if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
1949 bio = bio_alloc_bioset(GFP_KERNEL,
1950 iov_iter_npages(iter, BIO_MAX_PAGES),
1951 &c->dio_write_bioset);
1952 dio = container_of(bio, struct dio_write, iop.op.wbio.bio);
1953 closure_init(&dio->cl, NULL);
1955 dio->mm = current->mm;
1957 dio->sync = is_sync_kiocb(req) ||
1958 req->ki_pos + iter->count > inode->v.i_size;
1959 dio->free_iov = false;
1960 dio->quota_res.sectors = 0;
1962 bch2_fswrite_op_init(&dio->iop, c, inode, io_opts(c, inode), true);
1963 dio->iop.op.write_point = writepoint_hashed((unsigned long) current);
1964 dio->iop.op.flags |= BCH_WRITE_NOPUT_RESERVATION;
1966 if ((req->ki_flags & IOCB_DSYNC) &&
1967 !c->opts.journal_flush_disabled)
1968 dio->iop.op.flags |= BCH_WRITE_FLUSH;
1970 ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
1971 iter->count >> 9, true);
1975 dio->iop.op.nr_replicas = dio->iop.op.opts.data_replicas;
1977 ret = bch2_disk_reservation_get(c, &dio->iop.op.res, iter->count >> 9,
1978 dio->iop.op.opts.data_replicas, 0);
1979 if (unlikely(ret)) {
1980 if (!bch2_check_range_allocated(c, POS(inode->v.i_ino,
1983 dio->iop.op.opts.data_replicas))
1986 dio->iop.unalloc = true;
1989 return bch2_dio_write_loop(dio);
1991 bch2_disk_reservation_put(c, &dio->iop.op.res);
1992 bch2_quota_reservation_put(c, inode, &dio->quota_res);
1993 closure_debug_destroy(&dio->cl);
1998 ssize_t bch2_direct_IO(struct kiocb *req, struct iov_iter *iter)
2000 struct blk_plug plug;
2003 blk_start_plug(&plug);
2004 ret = iov_iter_rw(iter) == WRITE
2005 ? bch2_direct_IO_write(req, iter, false)
2006 : bch2_direct_IO_read(req, iter);
2007 blk_finish_plug(&plug);
2013 bch2_direct_write(struct kiocb *iocb, struct iov_iter *iter)
2015 return bch2_direct_IO_write(iocb, iter, true);
2018 static ssize_t __bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2020 struct file *file = iocb->ki_filp;
2021 struct bch_inode_info *inode = file_bch_inode(file);
2024 /* We can write back this queue in page reclaim */
2025 current->backing_dev_info = inode_to_bdi(&inode->v);
2026 ret = file_remove_privs(file);
2030 ret = file_update_time(file);
2034 ret = iocb->ki_flags & IOCB_DIRECT
2035 ? bch2_direct_write(iocb, from)
2036 : bch2_buffered_write(iocb, from);
2038 if (likely(ret > 0))
2039 iocb->ki_pos += ret;
2041 current->backing_dev_info = NULL;
2045 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2047 struct bch_inode_info *inode = file_bch_inode(iocb->ki_filp);
2048 bool direct = iocb->ki_flags & IOCB_DIRECT;
2051 inode_lock(&inode->v);
2052 ret = generic_write_checks(iocb, from);
2054 ret = __bch2_write_iter(iocb, from);
2055 inode_unlock(&inode->v);
2057 if (ret > 0 && !direct)
2058 ret = generic_write_sync(iocb, ret);
2065 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2067 struct bch_inode_info *inode = file_bch_inode(file);
2068 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2071 ret = file_write_and_wait_range(file, start, end);
2075 if (datasync && !(inode->v.i_state & I_DIRTY_DATASYNC))
2078 ret = sync_inode_metadata(&inode->v, 1);
2082 if (c->opts.journal_flush_disabled)
2085 ret = bch2_journal_flush_seq(&c->journal, inode->ei_journal_seq);
2086 ret2 = file_check_and_advance_wb_err(file);
2093 static int __bch2_fpunch(struct bch_fs *c, struct bch_inode_info *inode,
2094 u64 start_offset, u64 end_offset, u64 *journal_seq)
2096 struct bpos start = POS(inode->v.i_ino, start_offset);
2097 struct bpos end = POS(inode->v.i_ino, end_offset);
2098 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
2099 struct btree_trans trans;
2100 struct btree_iter *iter;
2104 bch2_trans_init(&trans, c);
2105 bch2_trans_preload_iters(&trans);
2107 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, start,
2110 while ((k = bch2_btree_iter_peek(iter)).k &&
2111 !(ret = bkey_err(k)) &&
2112 bkey_cmp(iter->pos, end) < 0) {
2113 struct disk_reservation disk_res =
2114 bch2_disk_reservation_init(c, 0);
2115 struct bkey_i delete;
2117 bkey_init(&delete.k);
2118 delete.k.p = iter->pos;
2120 /* create the biggest key we can */
2121 bch2_key_resize(&delete.k, max_sectors);
2122 bch2_cut_back(end, &delete.k);
2124 ret = bch2_extent_update(&trans, inode,
2125 &disk_res, NULL, iter, &delete,
2126 0, true, true, NULL);
2127 bch2_disk_reservation_put(c, &disk_res);
2134 bch2_trans_cond_resched(&trans);
2137 bch2_trans_exit(&trans);
2142 static inline int range_has_data(struct bch_fs *c,
2146 struct btree_trans trans;
2147 struct btree_iter *iter;
2151 bch2_trans_init(&trans, c);
2153 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k, ret) {
2154 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2157 if (bkey_extent_is_data(k.k)) {
2163 return bch2_trans_exit(&trans) ?: ret;
2166 static int __bch2_truncate_page(struct bch_inode_info *inode,
2167 pgoff_t index, loff_t start, loff_t end)
2169 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2170 struct address_space *mapping = inode->v.i_mapping;
2171 unsigned start_offset = start & (PAGE_SIZE - 1);
2172 unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2176 /* Page boundary? Nothing to do */
2177 if (!((index == start >> PAGE_SHIFT && start_offset) ||
2178 (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
2182 if (index << PAGE_SHIFT >= inode->v.i_size)
2185 page = find_lock_page(mapping, index);
2188 * XXX: we're doing two index lookups when we end up reading the
2191 ret = range_has_data(c,
2192 POS(inode->v.i_ino, index << PAGE_SECTOR_SHIFT),
2193 POS(inode->v.i_ino, (index + 1) << PAGE_SECTOR_SHIFT));
2197 page = find_or_create_page(mapping, index, GFP_KERNEL);
2198 if (unlikely(!page)) {
2204 if (!PageUptodate(page)) {
2205 ret = bch2_read_single_page(page, mapping);
2211 * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2213 * XXX: because we aren't currently tracking whether the page has actual
2214 * data in it (vs. just 0s, or only partially written) this wrong. ick.
2216 ret = bch2_get_page_reservation(c, inode, page, false);
2219 if (index == start >> PAGE_SHIFT &&
2220 index == end >> PAGE_SHIFT)
2221 zero_user_segment(page, start_offset, end_offset);
2222 else if (index == start >> PAGE_SHIFT)
2223 zero_user_segment(page, start_offset, PAGE_SIZE);
2224 else if (index == end >> PAGE_SHIFT)
2225 zero_user_segment(page, 0, end_offset);
2227 if (!PageDirty(page))
2228 set_page_dirty(page);
2236 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
2238 return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
2239 from, from + PAGE_SIZE);
2242 static int bch2_extend(struct bch_inode_info *inode, struct iattr *iattr)
2244 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2245 struct address_space *mapping = inode->v.i_mapping;
2248 ret = filemap_write_and_wait_range(mapping,
2249 inode->ei_inode.bi_size, S64_MAX);
2253 truncate_setsize(&inode->v, iattr->ia_size);
2254 setattr_copy(&inode->v, iattr);
2256 mutex_lock(&inode->ei_update_lock);
2257 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
2258 ATTR_MTIME|ATTR_CTIME);
2259 mutex_unlock(&inode->ei_update_lock);
2264 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2265 struct bch_inode_unpacked *bi,
2268 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2270 bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2271 bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
2275 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2276 struct bch_inode_unpacked *bi, void *p)
2278 u64 *new_i_size = p;
2280 bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2281 bi->bi_size = *new_i_size;
2285 int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
2287 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2288 struct address_space *mapping = inode->v.i_mapping;
2289 u64 new_i_size = iattr->ia_size;
2293 inode_dio_wait(&inode->v);
2294 pagecache_block_get(&mapping->add_lock);
2296 BUG_ON(inode->v.i_size < inode->ei_inode.bi_size);
2298 shrink = iattr->ia_size <= inode->v.i_size;
2301 ret = bch2_extend(inode, iattr);
2305 ret = bch2_truncate_page(inode, iattr->ia_size);
2309 if (iattr->ia_size > inode->ei_inode.bi_size)
2310 ret = filemap_write_and_wait_range(mapping,
2311 inode->ei_inode.bi_size,
2312 iattr->ia_size - 1);
2313 else if (iattr->ia_size & (PAGE_SIZE - 1))
2314 ret = filemap_write_and_wait_range(mapping,
2315 round_down(iattr->ia_size, PAGE_SIZE),
2316 iattr->ia_size - 1);
2320 mutex_lock(&inode->ei_update_lock);
2321 ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
2323 mutex_unlock(&inode->ei_update_lock);
2328 truncate_setsize(&inode->v, iattr->ia_size);
2331 * XXX: need a comment explaining why PAGE_SIZE and not block_bytes()
2334 ret = __bch2_fpunch(c, inode,
2335 round_up(iattr->ia_size, PAGE_SIZE) >> 9,
2336 U64_MAX, &inode->ei_journal_seq);
2340 setattr_copy(&inode->v, iattr);
2342 mutex_lock(&inode->ei_update_lock);
2343 ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL,
2344 ATTR_MTIME|ATTR_CTIME);
2345 mutex_unlock(&inode->ei_update_lock);
2347 pagecache_block_put(&mapping->add_lock);
2353 static long bch2_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2355 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2356 struct address_space *mapping = inode->v.i_mapping;
2357 u64 discard_start = round_up(offset, PAGE_SIZE) >> 9;
2358 u64 discard_end = round_down(offset + len, PAGE_SIZE) >> 9;
2361 inode_lock(&inode->v);
2362 inode_dio_wait(&inode->v);
2363 pagecache_block_get(&mapping->add_lock);
2365 ret = __bch2_truncate_page(inode,
2366 offset >> PAGE_SHIFT,
2367 offset, offset + len);
2371 if (offset >> PAGE_SHIFT !=
2372 (offset + len) >> PAGE_SHIFT) {
2373 ret = __bch2_truncate_page(inode,
2374 (offset + len) >> PAGE_SHIFT,
2375 offset, offset + len);
2380 truncate_pagecache_range(&inode->v, offset, offset + len - 1);
2382 if (discard_start < discard_end)
2383 ret = __bch2_fpunch(c, inode, discard_start, discard_end,
2384 &inode->ei_journal_seq);
2386 pagecache_block_put(&mapping->add_lock);
2387 inode_unlock(&inode->v);
2392 static long bch2_fcollapse(struct bch_inode_info *inode,
2393 loff_t offset, loff_t len)
2395 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2396 struct address_space *mapping = inode->v.i_mapping;
2397 struct btree_trans trans;
2398 struct btree_iter *src, *dst;
2399 BKEY_PADDED(k) copy;
2404 if ((offset | len) & (block_bytes(c) - 1))
2407 bch2_trans_init(&trans, c);
2408 bch2_trans_preload_iters(&trans);
2411 * We need i_mutex to keep the page cache consistent with the extents
2412 * btree, and the btree consistent with i_size - we don't need outside
2413 * locking for the extents btree itself, because we're using linked
2416 inode_lock(&inode->v);
2417 inode_dio_wait(&inode->v);
2418 pagecache_block_get(&mapping->add_lock);
2421 if (offset + len >= inode->v.i_size)
2424 if (inode->v.i_size < len)
2427 new_size = inode->v.i_size - len;
2429 ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2433 dst = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2434 POS(inode->v.i_ino, offset >> 9),
2435 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2436 BUG_ON(IS_ERR_OR_NULL(dst));
2438 src = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2439 POS_MIN, BTREE_ITER_SLOTS);
2440 BUG_ON(IS_ERR_OR_NULL(src));
2442 while (bkey_cmp(dst->pos,
2444 round_up(new_size, PAGE_SIZE) >> 9)) < 0) {
2445 struct disk_reservation disk_res;
2447 ret = bch2_btree_iter_traverse(dst);
2451 bch2_btree_iter_set_pos(src,
2452 POS(dst->pos.inode, dst->pos.offset + (len >> 9)));
2454 k = bch2_btree_iter_peek_slot(src);
2455 if ((ret = bkey_err(k)))
2458 bkey_reassemble(©.k, k);
2460 bch2_cut_front(src->pos, ©.k);
2461 copy.k.k.p.offset -= len >> 9;
2463 bch2_extent_trim_atomic(©.k, dst);
2465 BUG_ON(bkey_cmp(dst->pos, bkey_start_pos(©.k.k)));
2467 ret = bch2_disk_reservation_get(c, &disk_res, copy.k.k.size,
2468 bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(©.k)),
2469 BCH_DISK_RESERVATION_NOFAIL);
2472 ret = bch2_extent_update(&trans, inode,
2475 0, true, true, NULL);
2476 bch2_disk_reservation_put(c, &disk_res);
2483 * XXX: if we error here we've left data with multiple
2484 * pointers... which isn't a _super_ serious problem...
2487 bch2_trans_cond_resched(&trans);
2489 bch2_trans_unlock(&trans);
2491 ret = __bch2_fpunch(c, inode,
2492 round_up(new_size, block_bytes(c)) >> 9,
2493 U64_MAX, &inode->ei_journal_seq);
2497 i_size_write(&inode->v, new_size);
2498 mutex_lock(&inode->ei_update_lock);
2499 ret = bch2_write_inode_size(c, inode, new_size,
2500 ATTR_MTIME|ATTR_CTIME);
2501 mutex_unlock(&inode->ei_update_lock);
2503 bch2_trans_exit(&trans);
2504 pagecache_block_put(&mapping->add_lock);
2505 inode_unlock(&inode->v);
2509 static long bch2_fallocate(struct bch_inode_info *inode, int mode,
2510 loff_t offset, loff_t len)
2512 struct address_space *mapping = inode->v.i_mapping;
2513 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2514 struct btree_trans trans;
2515 struct btree_iter *iter;
2516 struct bpos end_pos;
2517 loff_t block_start, block_end;
2518 loff_t end = offset + len;
2520 unsigned replicas = io_opts(c, inode).data_replicas;
2523 bch2_trans_init(&trans, c);
2524 bch2_trans_preload_iters(&trans);
2526 inode_lock(&inode->v);
2527 inode_dio_wait(&inode->v);
2528 pagecache_block_get(&mapping->add_lock);
2530 if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
2531 ret = inode_newsize_ok(&inode->v, end);
2536 if (mode & FALLOC_FL_ZERO_RANGE) {
2537 ret = __bch2_truncate_page(inode,
2538 offset >> PAGE_SHIFT,
2542 offset >> PAGE_SHIFT != end >> PAGE_SHIFT)
2543 ret = __bch2_truncate_page(inode,
2550 truncate_pagecache_range(&inode->v, offset, end - 1);
2552 block_start = round_up(offset, PAGE_SIZE);
2553 block_end = round_down(end, PAGE_SIZE);
2555 block_start = round_down(offset, PAGE_SIZE);
2556 block_end = round_up(end, PAGE_SIZE);
2559 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2560 POS(inode->v.i_ino, block_start >> 9),
2561 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2562 end_pos = POS(inode->v.i_ino, block_end >> 9);
2564 while (bkey_cmp(iter->pos, end_pos) < 0) {
2565 struct disk_reservation disk_res = { 0 };
2566 struct quota_res quota_res = { 0 };
2567 struct bkey_i_reservation reservation;
2570 k = bch2_btree_iter_peek_slot(iter);
2571 if ((ret = bkey_err(k)))
2574 /* already reserved */
2575 if (k.k->type == KEY_TYPE_reservation &&
2576 bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2577 bch2_btree_iter_next_slot(iter);
2581 if (bkey_extent_is_data(k.k) &&
2582 !(mode & FALLOC_FL_ZERO_RANGE)) {
2583 bch2_btree_iter_next_slot(iter);
2587 bkey_reservation_init(&reservation.k_i);
2588 reservation.k.type = KEY_TYPE_reservation;
2589 reservation.k.p = k.k->p;
2590 reservation.k.size = k.k->size;
2592 bch2_cut_front(iter->pos, &reservation.k_i);
2593 bch2_cut_back(end_pos, &reservation.k);
2595 sectors = reservation.k.size;
2596 reservation.v.nr_replicas = bch2_bkey_nr_dirty_ptrs(k);
2598 if (!bkey_extent_is_allocation(k.k)) {
2599 ret = bch2_quota_reservation_add(c, inode,
2606 if (reservation.v.nr_replicas < replicas ||
2607 bch2_extent_is_compressed(k)) {
2608 ret = bch2_disk_reservation_get(c, &disk_res, sectors,
2613 reservation.v.nr_replicas = disk_res.nr_replicas;
2616 ret = bch2_extent_update(&trans, inode,
2617 &disk_res, "a_res,
2618 iter, &reservation.k_i,
2619 0, true, true, NULL);
2621 bch2_quota_reservation_put(c, inode, "a_res);
2622 bch2_disk_reservation_put(c, &disk_res);
2628 bch2_trans_unlock(&trans);
2630 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2631 end > inode->v.i_size) {
2632 i_size_write(&inode->v, end);
2634 mutex_lock(&inode->ei_update_lock);
2635 ret = bch2_write_inode_size(c, inode, inode->v.i_size, 0);
2636 mutex_unlock(&inode->ei_update_lock);
2640 if ((mode & FALLOC_FL_KEEP_SIZE) &&
2641 (mode & FALLOC_FL_ZERO_RANGE) &&
2642 inode->ei_inode.bi_size != inode->v.i_size) {
2643 /* sync appends.. */
2644 ret = filemap_write_and_wait_range(mapping,
2645 inode->ei_inode.bi_size, S64_MAX);
2649 if (inode->ei_inode.bi_size != inode->v.i_size) {
2650 mutex_lock(&inode->ei_update_lock);
2651 ret = bch2_write_inode_size(c, inode,
2652 inode->v.i_size, 0);
2653 mutex_unlock(&inode->ei_update_lock);
2657 bch2_trans_exit(&trans);
2658 pagecache_block_put(&mapping->add_lock);
2659 inode_unlock(&inode->v);
2663 long bch2_fallocate_dispatch(struct file *file, int mode,
2664 loff_t offset, loff_t len)
2666 struct bch_inode_info *inode = file_bch_inode(file);
2668 if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
2669 return bch2_fallocate(inode, mode, offset, len);
2671 if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
2672 return bch2_fpunch(inode, offset, len);
2674 if (mode == FALLOC_FL_COLLAPSE_RANGE)
2675 return bch2_fcollapse(inode, offset, len);
2682 static bool page_is_data(struct page *page)
2684 EBUG_ON(!PageLocked(page));
2686 /* XXX: should only have to check PageDirty */
2687 return PagePrivate(page) &&
2688 (page_state(page)->sectors ||
2689 page_state(page)->dirty_sectors);
2692 static loff_t bch2_next_pagecache_data(struct inode *vinode,
2693 loff_t start_offset,
2696 struct address_space *mapping = vinode->i_mapping;
2700 for (index = start_offset >> PAGE_SHIFT;
2701 index < end_offset >> PAGE_SHIFT;
2703 if (find_get_pages(mapping, &index, 1, &page)) {
2706 if (page_is_data(page))
2710 ((loff_t) index) << PAGE_SHIFT));
2721 static loff_t bch2_seek_data(struct file *file, u64 offset)
2723 struct bch_inode_info *inode = file_bch_inode(file);
2724 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2725 struct btree_trans trans;
2726 struct btree_iter *iter;
2728 u64 isize, next_data = MAX_LFS_FILESIZE;
2731 isize = i_size_read(&inode->v);
2732 if (offset >= isize)
2735 bch2_trans_init(&trans, c);
2737 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
2738 POS(inode->v.i_ino, offset >> 9), 0, k, ret) {
2739 if (k.k->p.inode != inode->v.i_ino) {
2741 } else if (bkey_extent_is_data(k.k)) {
2742 next_data = max(offset, bkey_start_offset(k.k) << 9);
2744 } else if (k.k->p.offset >> 9 > isize)
2748 ret = bch2_trans_exit(&trans) ?: ret;
2752 if (next_data > offset)
2753 next_data = bch2_next_pagecache_data(&inode->v,
2756 if (next_data > isize)
2759 return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
2762 static bool page_slot_is_data(struct address_space *mapping, pgoff_t index)
2767 page = find_lock_entry(mapping, index);
2768 if (!page || xa_is_value(page))
2771 ret = page_is_data(page);
2777 static loff_t bch2_next_pagecache_hole(struct inode *vinode,
2778 loff_t start_offset,
2781 struct address_space *mapping = vinode->i_mapping;
2784 for (index = start_offset >> PAGE_SHIFT;
2785 index < end_offset >> PAGE_SHIFT;
2787 if (!page_slot_is_data(mapping, index))
2788 end_offset = max(start_offset,
2789 ((loff_t) index) << PAGE_SHIFT);
2794 static loff_t bch2_seek_hole(struct file *file, u64 offset)
2796 struct bch_inode_info *inode = file_bch_inode(file);
2797 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2798 struct btree_trans trans;
2799 struct btree_iter *iter;
2801 u64 isize, next_hole = MAX_LFS_FILESIZE;
2804 isize = i_size_read(&inode->v);
2805 if (offset >= isize)
2808 bch2_trans_init(&trans, c);
2810 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
2811 POS(inode->v.i_ino, offset >> 9),
2812 BTREE_ITER_SLOTS, k, ret) {
2813 if (k.k->p.inode != inode->v.i_ino) {
2814 next_hole = bch2_next_pagecache_hole(&inode->v,
2815 offset, MAX_LFS_FILESIZE);
2817 } else if (!bkey_extent_is_data(k.k)) {
2818 next_hole = bch2_next_pagecache_hole(&inode->v,
2819 max(offset, bkey_start_offset(k.k) << 9),
2820 k.k->p.offset << 9);
2822 if (next_hole < k.k->p.offset << 9)
2825 offset = max(offset, bkey_start_offset(k.k) << 9);
2829 ret = bch2_trans_exit(&trans) ?: ret;
2833 if (next_hole > isize)
2836 return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
2839 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
2845 return generic_file_llseek(file, offset, whence);
2847 return bch2_seek_data(file, offset);
2849 return bch2_seek_hole(file, offset);
2855 void bch2_fs_fsio_exit(struct bch_fs *c)
2857 bioset_exit(&c->dio_write_bioset);
2858 bioset_exit(&c->dio_read_bioset);
2859 bioset_exit(&c->writepage_bioset);
2862 int bch2_fs_fsio_init(struct bch_fs *c)
2866 pr_verbose_init(c->opts, "");
2868 if (bioset_init(&c->writepage_bioset,
2869 4, offsetof(struct bch_writepage_io, op.op.wbio.bio),
2870 BIOSET_NEED_BVECS) ||
2871 bioset_init(&c->dio_read_bioset,
2872 4, offsetof(struct dio_read, rbio.bio),
2873 BIOSET_NEED_BVECS) ||
2874 bioset_init(&c->dio_write_bioset,
2875 4, offsetof(struct dio_write, iop.op.wbio.bio),
2879 pr_verbose_init(c->opts, "ret %i", ret);
2883 #endif /* NO_BCACHEFS_FS */