3 #include "btree_update.h"
15 #include <linux/aio.h>
16 #include <linux/backing-dev.h>
17 #include <linux/falloc.h>
18 #include <linux/migrate.h>
19 #include <linux/mmu_context.h>
20 #include <linux/pagevec.h>
21 #include <linux/task_io_accounting_ops.h>
22 #include <linux/uio.h>
23 #include <linux/writeback.h>
24 #include <trace/events/writeback.h>
26 struct bio_set *bch2_writepage_bioset;
27 struct bio_set *bch2_dio_read_bioset;
28 struct bio_set *bch2_dio_write_bioset;
30 /* pagecache_block must be held */
31 static int write_invalidate_inode_pages_range(struct address_space *mapping,
32 loff_t start, loff_t end)
37 * XXX: the way this is currently implemented, we can spin if a process
38 * is continually redirtying a specific page
41 if (!mapping->nrpages &&
42 !mapping->nrexceptional)
45 ret = filemap_write_and_wait_range(mapping, start, end);
49 if (!mapping->nrpages)
52 ret = invalidate_inode_pages2_range(mapping,
55 } while (ret == -EBUSY);
62 static int inode_set_size(struct bch_inode_info *ei,
63 struct bch_inode_unpacked *bi,
66 loff_t *new_i_size = p;
68 lockdep_assert_held(&ei->update_lock);
70 bi->i_size = *new_i_size;
72 if (atomic_long_read(&ei->i_size_dirty_count))
73 bi->i_flags |= BCH_INODE_I_SIZE_DIRTY;
75 bi->i_flags &= ~BCH_INODE_I_SIZE_DIRTY;
80 static int __must_check bch2_write_inode_size(struct bch_fs *c,
81 struct bch_inode_info *ei,
84 return __bch2_write_inode(c, ei, inode_set_size, &new_size);
87 static inline void i_size_dirty_put(struct bch_inode_info *ei)
89 atomic_long_dec_bug(&ei->i_size_dirty_count);
92 static inline void i_size_dirty_get(struct bch_inode_info *ei)
94 lockdep_assert_held(&ei->vfs_inode.i_rwsem);
96 atomic_long_inc(&ei->i_size_dirty_count);
99 /* i_sectors accounting: */
101 static enum extent_insert_hook_ret
102 i_sectors_hook_fn(struct extent_insert_hook *hook,
103 struct bpos committed_pos,
104 struct bpos next_pos,
106 const struct bkey_i *insert)
108 struct i_sectors_hook *h = container_of(hook,
109 struct i_sectors_hook, hook);
110 s64 sectors = next_pos.offset - committed_pos.offset;
111 int sign = bkey_extent_is_allocation(&insert->k) -
112 (k.k && bkey_extent_is_allocation(k.k));
114 EBUG_ON(!(h->ei->i_flags & BCH_INODE_I_SECTORS_DIRTY));
115 EBUG_ON(!atomic_long_read(&h->ei->i_sectors_dirty_count));
117 h->sectors += sectors * sign;
119 return BTREE_HOOK_DO_INSERT;
122 static int inode_set_i_sectors_dirty(struct bch_inode_info *ei,
123 struct bch_inode_unpacked *bi, void *p)
125 BUG_ON(bi->i_flags & BCH_INODE_I_SECTORS_DIRTY);
127 bi->i_flags |= BCH_INODE_I_SECTORS_DIRTY;
131 static int inode_clear_i_sectors_dirty(struct bch_inode_info *ei,
132 struct bch_inode_unpacked *bi,
135 BUG_ON(!(bi->i_flags & BCH_INODE_I_SECTORS_DIRTY));
137 bi->i_sectors = atomic64_read(&ei->i_sectors);
138 bi->i_flags &= ~BCH_INODE_I_SECTORS_DIRTY;
142 static void i_sectors_dirty_put(struct bch_inode_info *ei,
143 struct i_sectors_hook *h)
145 struct inode *inode = &ei->vfs_inode;
148 spin_lock(&inode->i_lock);
149 inode->i_blocks += h->sectors;
150 spin_unlock(&inode->i_lock);
152 atomic64_add(h->sectors, &ei->i_sectors);
153 EBUG_ON(atomic64_read(&ei->i_sectors) < 0);
156 EBUG_ON(atomic_long_read(&ei->i_sectors_dirty_count) <= 0);
158 mutex_lock(&ei->update_lock);
160 if (atomic_long_dec_and_test(&ei->i_sectors_dirty_count)) {
161 struct bch_fs *c = ei->vfs_inode.i_sb->s_fs_info;
162 int ret = __bch2_write_inode(c, ei, inode_clear_i_sectors_dirty, NULL);
167 mutex_unlock(&ei->update_lock);
170 static int __must_check i_sectors_dirty_get(struct bch_inode_info *ei,
171 struct i_sectors_hook *h)
175 h->hook.fn = i_sectors_hook_fn;
177 #ifdef CONFIG_BCACHEFS_DEBUG
181 if (atomic_long_inc_not_zero(&ei->i_sectors_dirty_count))
184 mutex_lock(&ei->update_lock);
186 if (!(ei->i_flags & BCH_INODE_I_SECTORS_DIRTY)) {
187 struct bch_fs *c = ei->vfs_inode.i_sb->s_fs_info;
189 ret = __bch2_write_inode(c, ei, inode_set_i_sectors_dirty, NULL);
193 atomic_long_inc(&ei->i_sectors_dirty_count);
195 mutex_unlock(&ei->update_lock);
200 struct bchfs_extent_trans_hook {
201 struct bchfs_write_op *op;
202 struct extent_insert_hook hook;
204 struct bch_inode_unpacked inode_u;
205 struct bkey_inode_buf inode_p;
207 bool need_inode_update;
210 static enum extent_insert_hook_ret
211 bchfs_extent_update_hook(struct extent_insert_hook *hook,
212 struct bpos committed_pos,
213 struct bpos next_pos,
215 const struct bkey_i *insert)
217 struct bchfs_extent_trans_hook *h = container_of(hook,
218 struct bchfs_extent_trans_hook, hook);
219 struct bch_inode_info *ei = h->op->ei;
220 struct inode *inode = &ei->vfs_inode;
221 int sign = bkey_extent_is_allocation(&insert->k) -
222 (k.k && bkey_extent_is_allocation(k.k));
223 s64 sectors = (s64) (next_pos.offset - committed_pos.offset) * sign;
224 u64 offset = min(next_pos.offset << 9, h->op->new_i_size);
225 bool do_pack = false;
227 BUG_ON((next_pos.offset << 9) > round_up(offset, PAGE_SIZE));
229 /* XXX: ei->i_size locking */
230 if (offset > ei->i_size) {
231 BUG_ON(ei->i_flags & BCH_INODE_I_SIZE_DIRTY);
233 if (!h->need_inode_update) {
234 h->need_inode_update = true;
235 return BTREE_HOOK_RESTART_TRANS;
238 h->inode_u.i_size = offset;
244 i_size_write(inode, offset);
248 if (!h->need_inode_update) {
249 h->need_inode_update = true;
250 return BTREE_HOOK_RESTART_TRANS;
253 h->inode_u.i_sectors += sectors;
256 atomic64_add(sectors, &ei->i_sectors);
258 h->op->sectors_added += sectors;
261 spin_lock(&inode->i_lock);
262 inode->i_blocks += sectors;
263 spin_unlock(&inode->i_lock);
268 bch2_inode_pack(&h->inode_p, &h->inode_u);
270 return BTREE_HOOK_DO_INSERT;
273 static int bchfs_write_index_update(struct bch_write_op *wop)
275 struct bchfs_write_op *op = container_of(wop,
276 struct bchfs_write_op, op);
277 struct keylist *keys = &op->op.insert_keys;
278 struct btree_iter extent_iter, inode_iter;
279 struct bchfs_extent_trans_hook hook;
280 struct bkey_i *k = bch2_keylist_front(keys);
283 BUG_ON(k->k.p.inode != op->ei->vfs_inode.i_ino);
285 bch2_btree_iter_init_intent(&extent_iter, wop->c, BTREE_ID_EXTENTS,
286 bkey_start_pos(&bch2_keylist_front(keys)->k));
287 bch2_btree_iter_init_intent(&inode_iter, wop->c, BTREE_ID_INODES,
288 POS(extent_iter.pos.inode, 0));
291 hook.hook.fn = bchfs_extent_update_hook;
292 hook.need_inode_update = false;
295 ret = bch2_btree_iter_traverse(&extent_iter);
299 /* XXX: ei->i_size locking */
300 k = bch2_keylist_front(keys);
301 if (min(k->k.p.offset << 9, op->new_i_size) > op->ei->i_size)
302 hook.need_inode_update = true;
304 if (hook.need_inode_update) {
305 struct bkey_s_c inode;
307 if (!btree_iter_linked(&inode_iter))
308 bch2_btree_iter_link(&extent_iter, &inode_iter);
310 inode = bch2_btree_iter_peek_with_holes(&inode_iter);
311 if ((ret = btree_iter_err(inode)))
314 if (WARN_ONCE(inode.k->type != BCH_INODE_FS,
315 "inode %llu not found when updating",
316 extent_iter.pos.inode)) {
321 if (WARN_ONCE(bkey_bytes(inode.k) >
322 sizeof(hook.inode_p),
323 "inode %llu too big (%zu bytes, buf %zu)",
324 extent_iter.pos.inode,
326 sizeof(hook.inode_p))) {
331 bkey_reassemble(&hook.inode_p.inode.k_i, inode);
332 ret = bch2_inode_unpack(bkey_s_c_to_inode(inode),
335 "error %i unpacking inode %llu",
336 ret, extent_iter.pos.inode)) {
341 ret = bch2_btree_insert_at(wop->c, &wop->res,
342 &hook.hook, op_journal_seq(wop),
343 BTREE_INSERT_NOFAIL|BTREE_INSERT_ATOMIC,
344 BTREE_INSERT_ENTRY(&extent_iter, k),
345 BTREE_INSERT_ENTRY_EXTRA_RES(&inode_iter,
346 &hook.inode_p.inode.k_i, 2));
348 ret = bch2_btree_insert_at(wop->c, &wop->res,
349 &hook.hook, op_journal_seq(wop),
350 BTREE_INSERT_NOFAIL|BTREE_INSERT_ATOMIC,
351 BTREE_INSERT_ENTRY(&extent_iter, k));
359 bch2_keylist_pop_front(keys);
360 } while (!bch2_keylist_empty(keys));
362 bch2_btree_iter_unlock(&extent_iter);
363 bch2_btree_iter_unlock(&inode_iter);
370 /* stored in page->private: */
373 * bch_page_state has to (unfortunately) be manipulated with cmpxchg - we could
374 * almost protected it with the page lock, except that bch2_writepage_io_done has
375 * to update the sector counts (and from interrupt/bottom half context).
377 struct bch_page_state {
380 * page is _fully_ written on disk, and not compressed - which means to
381 * write this page we don't have to reserve space (the new write will
382 * never take up more space on disk than what it's overwriting)
384 unsigned allocated:1;
386 /* Owns PAGE_SECTORS sized reservation: */
388 unsigned nr_replicas:4;
391 * Number of sectors on disk - for i_blocks
392 * Uncompressed size, not compressed size:
402 #define page_state_cmpxchg(_ptr, _new, _expr) \
404 unsigned long _v = READ_ONCE((_ptr)->v); \
405 struct bch_page_state _old; \
408 _old.v = _new.v = _v; \
411 EBUG_ON(_new.sectors + _new.dirty_sectors > PAGE_SECTORS);\
412 } while (_old.v != _new.v && \
413 (_v = cmpxchg(&(_ptr)->v, _old.v, _new.v)) != _old.v); \
418 static inline struct bch_page_state *page_state(struct page *page)
420 struct bch_page_state *s = (void *) &page->private;
422 BUILD_BUG_ON(sizeof(*s) > sizeof(page->private));
424 if (!PagePrivate(page))
425 SetPagePrivate(page);
430 static void bch2_put_page_reservation(struct bch_fs *c, struct page *page)
432 struct disk_reservation res = { .sectors = PAGE_SECTORS };
433 struct bch_page_state s;
435 s = page_state_cmpxchg(page_state(page), s, {
441 bch2_disk_reservation_put(c, &res);
444 static int bch2_get_page_reservation(struct bch_fs *c, struct page *page,
447 struct bch_page_state *s = page_state(page), new;
448 struct disk_reservation res;
451 BUG_ON(s->allocated && s->sectors != PAGE_SECTORS);
453 if (s->allocated || s->reserved)
456 ret = bch2_disk_reservation_get(c, &res, PAGE_SECTORS, !check_enospc
457 ? BCH_DISK_RESERVATION_NOFAIL : 0);
461 page_state_cmpxchg(s, new, {
463 bch2_disk_reservation_put(c, &res);
467 new.nr_replicas = res.nr_replicas;
473 static void bch2_clear_page_bits(struct page *page)
475 struct inode *inode = page->mapping->host;
476 struct bch_fs *c = inode->i_sb->s_fs_info;
477 struct disk_reservation res = { .sectors = PAGE_SECTORS };
478 struct bch_page_state s;
480 if (!PagePrivate(page))
483 s = xchg(page_state(page), (struct bch_page_state) { .v = 0 });
484 ClearPagePrivate(page);
486 if (s.dirty_sectors) {
487 spin_lock(&inode->i_lock);
488 inode->i_blocks -= s.dirty_sectors;
489 spin_unlock(&inode->i_lock);
493 bch2_disk_reservation_put(c, &res);
496 int bch2_set_page_dirty(struct page *page)
498 struct bch_page_state old, new;
500 old = page_state_cmpxchg(page_state(page), new,
501 new.dirty_sectors = PAGE_SECTORS - new.sectors;
504 if (old.dirty_sectors != new.dirty_sectors) {
505 struct inode *inode = page->mapping->host;
507 spin_lock(&inode->i_lock);
508 inode->i_blocks += new.dirty_sectors - old.dirty_sectors;
509 spin_unlock(&inode->i_lock);
512 return __set_page_dirty_nobuffers(page);
515 /* readpages/writepages: */
517 static bool bio_can_add_page_contig(struct bio *bio, struct page *page)
519 sector_t offset = (sector_t) page->index << (PAGE_SHIFT - 9);
521 return bio->bi_vcnt < bio->bi_max_vecs &&
522 bio_end_sector(bio) == offset;
525 static void __bio_add_page(struct bio *bio, struct page *page)
527 bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) {
533 bio->bi_iter.bi_size += PAGE_SIZE;
536 static int bio_add_page_contig(struct bio *bio, struct page *page)
538 sector_t offset = (sector_t) page->index << (PAGE_SHIFT - 9);
540 BUG_ON(!bio->bi_max_vecs);
543 bio->bi_iter.bi_sector = offset;
544 else if (!bio_can_add_page_contig(bio, page))
547 __bio_add_page(bio, page);
551 static void bch2_readpages_end_io(struct bio *bio)
556 bio_for_each_segment_all(bv, bio, i) {
557 struct page *page = bv->bv_page;
559 if (!bio->bi_error) {
560 SetPageUptodate(page);
562 ClearPageUptodate(page);
571 struct readpages_iter {
572 struct address_space *mapping;
573 struct list_head pages;
577 static int readpage_add_page(struct readpages_iter *iter, struct page *page)
579 struct bch_page_state *s = page_state(page);
586 prefetchw(&page->flags);
587 ret = add_to_page_cache_lru(page, iter->mapping,
588 page->index, GFP_NOFS);
593 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
595 while (iter->nr_pages) {
597 list_last_entry(&iter->pages, struct page, lru);
599 prefetchw(&page->flags);
600 list_del(&page->lru);
603 if (!readpage_add_page(iter, page))
610 #define for_each_readpage_page(_iter, _page) \
612 ((_page) = __readpage_next_page(&(_iter)));) \
614 static void bch2_mark_pages_unalloc(struct bio *bio)
616 struct bvec_iter iter;
619 bio_for_each_segment(bv, bio, iter)
620 page_state(bv.bv_page)->allocated = 0;
623 static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
625 struct bvec_iter iter;
628 bio_for_each_segment(bv, bio, iter) {
629 struct bch_page_state *s = page_state(bv.bv_page);
631 /* sectors in @k from the start of this page: */
632 unsigned k_sectors = k.k->size - (iter.bi_sector - k.k->p.offset);
634 unsigned page_sectors = min(bv.bv_len >> 9, k_sectors);
637 s->nr_replicas = bch2_extent_nr_dirty_ptrs(k);
639 s->nr_replicas = min_t(unsigned, s->nr_replicas,
640 bch2_extent_nr_dirty_ptrs(k));
642 BUG_ON(s->sectors + page_sectors > PAGE_SECTORS);
643 s->sectors += page_sectors;
647 static void readpage_bio_extend(struct readpages_iter *iter,
648 struct bio *bio, u64 offset,
655 while (bio_end_sector(bio) < offset &&
656 bio->bi_vcnt < bio->bi_max_vecs) {
657 page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT;
659 if (iter->nr_pages) {
660 page = list_last_entry(&iter->pages, struct page, lru);
661 if (page->index != page_offset)
664 list_del(&page->lru);
666 } else if (get_more) {
668 page = radix_tree_lookup(&iter->mapping->page_tree, page_offset);
671 if (page && !radix_tree_exceptional_entry(page))
674 page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
678 page->index = page_offset;
679 ClearPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page);
684 ret = readpage_add_page(iter, page);
688 __bio_add_page(bio, page);
692 SetPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page);
695 static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
696 struct bch_read_bio *rbio, u64 inode,
697 struct readpages_iter *readpages_iter)
699 struct bio *bio = &rbio->bio;
700 int flags = BCH_READ_RETRY_IF_STALE|
702 BCH_READ_MAY_REUSE_BIO;
705 struct extent_pick_ptr pick;
711 bch2_btree_iter_set_pos(iter, POS(inode, bio->bi_iter.bi_sector));
713 k = bch2_btree_iter_peek_with_holes(iter);
717 int ret = bch2_btree_iter_unlock(iter);
719 bcache_io_error(c, bio, "btree IO error %i", ret);
724 bkey_reassemble(&tmp.k, k);
725 bch2_btree_iter_unlock(iter);
726 k = bkey_i_to_s_c(&tmp.k);
728 bch2_extent_pick_ptr(c, k, &pick);
729 if (IS_ERR(pick.ca)) {
730 bcache_io_error(c, bio, "no device to read from");
736 readpage_bio_extend(readpages_iter,
739 (pick.crc.csum_type ||
740 pick.crc.compression_type));
742 bytes = (min_t(u64, k.k->p.offset, bio_end_sector(bio)) -
743 bio->bi_iter.bi_sector) << 9;
744 is_last = bytes == bio->bi_iter.bi_size;
745 swap(bio->bi_iter.bi_size, bytes);
747 if (bkey_extent_is_allocation(k.k))
748 bch2_add_page_sectors(bio, k);
750 if (!bkey_extent_is_allocation(k.k) ||
751 bkey_extent_is_compressed(k))
752 bch2_mark_pages_unalloc(bio);
755 flags |= BCH_READ_IS_LAST;
758 PTR_BUCKET(pick.ca, &pick.ptr)->read_prio =
759 c->prio_clock[READ].hand;
761 bch2_read_extent(c, rbio, k, &pick, flags);
762 flags &= ~BCH_READ_MAY_REUSE_BIO;
773 swap(bio->bi_iter.bi_size, bytes);
774 bio_advance(bio, bytes);
778 int bch2_readpages(struct file *file, struct address_space *mapping,
779 struct list_head *pages, unsigned nr_pages)
781 struct inode *inode = mapping->host;
782 struct bch_fs *c = inode->i_sb->s_fs_info;
783 struct btree_iter iter;
785 struct readpages_iter readpages_iter = {
786 .mapping = mapping, .nr_pages = nr_pages
789 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN);
791 INIT_LIST_HEAD(&readpages_iter.pages);
792 list_add(&readpages_iter.pages, pages);
793 list_del_init(pages);
795 if (current->pagecache_lock != &mapping->add_lock)
796 pagecache_add_get(&mapping->add_lock);
798 while ((page = readpage_iter_next(&readpages_iter))) {
799 unsigned n = max(min_t(unsigned, readpages_iter.nr_pages + 1,
801 BCH_ENCODED_EXTENT_MAX >> PAGE_SECTOR_SHIFT);
803 struct bch_read_bio *rbio =
804 container_of(bio_alloc_bioset(GFP_NOFS, n,
806 struct bch_read_bio, bio);
808 rbio->bio.bi_end_io = bch2_readpages_end_io;
809 bio_add_page_contig(&rbio->bio, page);
810 bchfs_read(c, &iter, rbio, inode->i_ino, &readpages_iter);
813 if (current->pagecache_lock != &mapping->add_lock)
814 pagecache_add_put(&mapping->add_lock);
819 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
820 u64 inode, struct page *page)
822 struct btree_iter iter;
825 * Initialize page state:
826 * If a page is partly allocated and partly a hole, we want it to be
827 * marked BCH_PAGE_UNALLOCATED - so we initially mark all pages
828 * allocated and then mark them unallocated as we find holes:
830 * Note that the bio hasn't been split yet - it's the only bio that
831 * points to these pages. As we walk extents and split @bio, that
832 * necessarily be true, the splits won't necessarily be on page
835 struct bch_page_state *s = page_state(page);
837 EBUG_ON(s->reserved);
841 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
842 bio_add_page_contig(&rbio->bio, page);
844 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN);
845 bchfs_read(c, &iter, rbio, inode, NULL);
848 int bch2_readpage(struct file *file, struct page *page)
850 struct address_space *mapping = page->mapping;
851 struct inode *inode = mapping->host;
852 struct bch_fs *c = inode->i_sb->s_fs_info;
853 struct bch_read_bio *rbio;
855 rbio = container_of(bio_alloc_bioset(GFP_NOFS, 1,
857 struct bch_read_bio, bio);
858 rbio->bio.bi_end_io = bch2_readpages_end_io;
860 __bchfs_readpage(c, rbio, inode->i_ino, page);
864 struct bch_writepage_state {
865 struct bch_writepage_io *io;
868 static void bch2_writepage_io_free(struct closure *cl)
870 struct bch_writepage_io *io = container_of(cl,
871 struct bch_writepage_io, cl);
872 struct bio *bio = &io->bio.bio;
877 static void bch2_writepage_io_done(struct closure *cl)
879 struct bch_writepage_io *io = container_of(cl,
880 struct bch_writepage_io, cl);
881 struct bch_fs *c = io->op.op.c;
882 struct bio *bio = &io->bio.bio;
883 struct bio_vec *bvec;
886 atomic_sub(bio->bi_vcnt, &c->writeback_pages);
887 wake_up(&c->writeback_wait);
889 bio_for_each_segment_all(bvec, bio, i) {
890 struct page *page = bvec->bv_page;
892 if (io->op.op.error) {
895 set_bit(AS_EIO, &page->mapping->flags);
898 if (io->op.op.written >= PAGE_SECTORS) {
899 struct bch_page_state old, new;
901 old = page_state_cmpxchg(page_state(page), new, {
902 new.sectors = PAGE_SECTORS;
903 new.dirty_sectors = 0;
906 io->op.sectors_added -= old.dirty_sectors;
907 io->op.op.written -= PAGE_SECTORS;
912 * racing with fallocate can cause us to add fewer sectors than
913 * expected - but we shouldn't add more sectors than expected:
915 * (error (due to going RO) halfway through a page can screw that up
918 BUG_ON(io->op.sectors_added >= (s64) PAGE_SECTORS);
921 * PageWriteback is effectively our ref on the inode - fixup i_blocks
922 * before calling end_page_writeback:
924 if (io->op.sectors_added) {
925 struct inode *inode = &io->op.ei->vfs_inode;
927 spin_lock(&inode->i_lock);
928 inode->i_blocks += io->op.sectors_added;
929 spin_unlock(&inode->i_lock);
932 bio_for_each_segment_all(bvec, bio, i)
933 end_page_writeback(bvec->bv_page);
935 closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
938 static void bch2_writepage_do_io(struct bch_writepage_state *w)
940 struct bch_writepage_io *io = w->io;
943 atomic_add(io->bio.bio.bi_vcnt, &io->op.op.c->writeback_pages);
945 io->op.op.pos.offset = io->bio.bio.bi_iter.bi_sector;
947 closure_call(&io->op.op.cl, bch2_write, NULL, &io->cl);
948 continue_at(&io->cl, bch2_writepage_io_done, NULL);
952 * Get a bch_writepage_io and add @page to it - appending to an existing one if
953 * possible, else allocating a new one:
955 static void bch2_writepage_io_alloc(struct bch_fs *c,
956 struct bch_writepage_state *w,
957 struct bch_inode_info *ei,
960 u64 inum = ei->vfs_inode.i_ino;
961 unsigned nr_replicas = page_state(page)->nr_replicas;
963 EBUG_ON(!nr_replicas);
964 /* XXX: disk_reservation->gen isn't plumbed through */
968 w->io = container_of(bio_alloc_bioset(GFP_NOFS,
970 bch2_writepage_bioset),
971 struct bch_writepage_io, bio.bio);
973 closure_init(&w->io->cl, NULL);
975 w->io->op.sectors_added = 0;
976 w->io->op.is_dio = false;
977 bch2_write_op_init(&w->io->op.op, c, &w->io->bio,
978 (struct disk_reservation) {
979 .nr_replicas = c->opts.data_replicas,
981 foreground_write_point(c, inum),
983 &ei->journal_seq, 0);
984 w->io->op.op.index_update_fn = bchfs_write_index_update;
987 if (w->io->op.op.res.nr_replicas != nr_replicas ||
988 bio_add_page_contig(&w->io->bio.bio, page)) {
989 bch2_writepage_do_io(w);
994 * We shouldn't ever be handed pages for multiple inodes in a single
997 BUG_ON(ei != w->io->op.ei);
1000 static int __bch2_writepage(struct bch_fs *c, struct page *page,
1001 struct writeback_control *wbc,
1002 struct bch_writepage_state *w)
1004 struct inode *inode = page->mapping->host;
1005 struct bch_inode_info *ei = to_bch_ei(inode);
1006 struct bch_page_state new, old;
1008 loff_t i_size = i_size_read(inode);
1009 pgoff_t end_index = i_size >> PAGE_SHIFT;
1011 EBUG_ON(!PageUptodate(page));
1013 /* Is the page fully inside i_size? */
1014 if (page->index < end_index)
1017 /* Is the page fully outside i_size? (truncate in progress) */
1018 offset = i_size & (PAGE_SIZE - 1);
1019 if (page->index > end_index || !offset) {
1025 * The page straddles i_size. It must be zeroed out on each and every
1026 * writepage invocation because it may be mmapped. "A file is mapped
1027 * in multiples of the page size. For a file that is not a multiple of
1028 * the page size, the remaining memory is zeroed when mapped, and
1029 * writes to that region are not written out to the file."
1031 zero_user_segment(page, offset, PAGE_SIZE);
1033 bch2_writepage_io_alloc(c, w, ei, page);
1035 /* while page is locked: */
1036 w->io->op.new_i_size = i_size;
1038 if (wbc->sync_mode == WB_SYNC_ALL)
1039 w->io->bio.bio.bi_opf |= WRITE_SYNC;
1041 /* Before unlocking the page, transfer reservation to w->io: */
1042 old = page_state_cmpxchg(page_state(page), new, {
1043 EBUG_ON(!new.reserved &&
1044 (new.sectors != PAGE_SECTORS ||
1047 if (new.allocated &&
1048 w->io->op.op.compression_type != BCH_COMPRESSION_NONE)
1050 else if (!new.reserved)
1055 w->io->op.op.res.sectors += PAGE_SECTORS *
1056 (old.reserved - new.reserved) *
1059 BUG_ON(PageWriteback(page));
1060 set_page_writeback(page);
1066 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1068 struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1069 struct bch_writepage_state w = { NULL };
1070 struct pagecache_iter iter;
1074 pgoff_t uninitialized_var(writeback_index);
1076 pgoff_t end; /* Inclusive */
1079 int range_whole = 0;
1082 if (wbc->range_cyclic) {
1083 writeback_index = mapping->writeback_index; /* prev offset */
1084 index = writeback_index;
1091 index = wbc->range_start >> PAGE_SHIFT;
1092 end = wbc->range_end >> PAGE_SHIFT;
1093 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1095 cycled = 1; /* ignore range_cyclic tests */
1097 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1098 tag = PAGECACHE_TAG_TOWRITE;
1100 tag = PAGECACHE_TAG_DIRTY;
1102 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1103 tag_pages_for_writeback(mapping, index, end);
1107 for_each_pagecache_tag(&iter, mapping, tag, index, end, page) {
1108 done_index = page->index;
1111 !bio_can_add_page_contig(&w.io->bio.bio, page))
1112 bch2_writepage_do_io(&w);
1115 atomic_read(&c->writeback_pages) >=
1116 c->writeback_pages_max) {
1117 /* don't sleep with pages pinned: */
1118 pagecache_iter_release(&iter);
1120 __wait_event(c->writeback_wait,
1121 atomic_read(&c->writeback_pages) <
1122 c->writeback_pages_max);
1129 * Page truncated or invalidated. We can freely skip it
1130 * then, even for data integrity operations: the page
1131 * has disappeared concurrently, so there could be no
1132 * real expectation of this data interity operation
1133 * even if there is now a new, dirty page at the same
1134 * pagecache address.
1136 if (unlikely(page->mapping != mapping)) {
1142 if (!PageDirty(page)) {
1143 /* someone wrote it for us */
1144 goto continue_unlock;
1147 if (PageWriteback(page)) {
1148 if (wbc->sync_mode != WB_SYNC_NONE)
1149 wait_on_page_writeback(page);
1151 goto continue_unlock;
1154 BUG_ON(PageWriteback(page));
1155 if (!clear_page_dirty_for_io(page))
1156 goto continue_unlock;
1158 trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
1159 ret = __bch2_writepage(c, page, wbc, &w);
1160 if (unlikely(ret)) {
1161 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1166 * done_index is set past this page,
1167 * so media errors will not choke
1168 * background writeout for the entire
1169 * file. This has consequences for
1170 * range_cyclic semantics (ie. it may
1171 * not be suitable for data integrity
1174 done_index = page->index + 1;
1181 * We stop writing back only if we are not doing
1182 * integrity sync. In case of integrity sync we have to
1183 * keep going until we have written all the pages
1184 * we tagged for writeback prior to entering this loop.
1186 if (--wbc->nr_to_write <= 0 &&
1187 wbc->sync_mode == WB_SYNC_NONE) {
1192 pagecache_iter_release(&iter);
1195 bch2_writepage_do_io(&w);
1197 if (!cycled && !done) {
1200 * We hit the last page and there is more work to be done: wrap
1201 * back to the start of the file
1205 end = writeback_index - 1;
1208 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1209 mapping->writeback_index = done_index;
1214 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1216 struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1217 struct bch_writepage_state w = { NULL };
1220 ret = __bch2_writepage(c, page, wbc, &w);
1222 bch2_writepage_do_io(&w);
1227 static void bch2_read_single_page_end_io(struct bio *bio)
1229 complete(bio->bi_private);
1232 static int bch2_read_single_page(struct page *page,
1233 struct address_space *mapping)
1235 struct inode *inode = mapping->host;
1236 struct bch_fs *c = inode->i_sb->s_fs_info;
1237 struct bch_read_bio *rbio;
1239 DECLARE_COMPLETION_ONSTACK(done);
1241 rbio = container_of(bio_alloc_bioset(GFP_NOFS, 1,
1243 struct bch_read_bio, bio);
1244 rbio->bio.bi_private = &done;
1245 rbio->bio.bi_end_io = bch2_read_single_page_end_io;
1247 __bchfs_readpage(c, rbio, inode->i_ino, page);
1248 wait_for_completion(&done);
1250 ret = rbio->bio.bi_error;
1251 bio_put(&rbio->bio);
1256 SetPageUptodate(page);
1260 int bch2_write_begin(struct file *file, struct address_space *mapping,
1261 loff_t pos, unsigned len, unsigned flags,
1262 struct page **pagep, void **fsdata)
1264 struct inode *inode = mapping->host;
1265 struct bch_fs *c = inode->i_sb->s_fs_info;
1266 pgoff_t index = pos >> PAGE_SHIFT;
1267 unsigned offset = pos & (PAGE_SIZE - 1);
1271 BUG_ON(inode_unhashed(mapping->host));
1273 /* Not strictly necessary - same reason as mkwrite(): */
1274 pagecache_add_get(&mapping->add_lock);
1276 page = grab_cache_page_write_begin(mapping, index, flags);
1280 if (PageUptodate(page))
1283 /* If we're writing entire page, don't need to read it in first: */
1284 if (len == PAGE_SIZE)
1287 if (!offset && pos + len >= inode->i_size) {
1288 zero_user_segment(page, len, PAGE_SIZE);
1289 flush_dcache_page(page);
1293 if (index > inode->i_size >> PAGE_SHIFT) {
1294 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1295 flush_dcache_page(page);
1299 ret = bch2_read_single_page(page, mapping);
1303 ret = bch2_get_page_reservation(c, page, true);
1305 if (!PageUptodate(page)) {
1307 * If the page hasn't been read in, we won't know if we
1308 * actually need a reservation - we don't actually need
1309 * to read here, we just need to check if the page is
1310 * fully backed by uncompressed data:
1325 pagecache_add_put(&mapping->add_lock);
1329 int bch2_write_end(struct file *filp, struct address_space *mapping,
1330 loff_t pos, unsigned len, unsigned copied,
1331 struct page *page, void *fsdata)
1333 struct inode *inode = page->mapping->host;
1334 struct bch_fs *c = inode->i_sb->s_fs_info;
1336 lockdep_assert_held(&inode->i_rwsem);
1338 if (unlikely(copied < len && !PageUptodate(page))) {
1340 * The page needs to be read in, but that would destroy
1341 * our partial write - simplest thing is to just force
1342 * userspace to redo the write:
1344 zero_user(page, 0, PAGE_SIZE);
1345 flush_dcache_page(page);
1349 if (pos + copied > inode->i_size)
1350 i_size_write(inode, pos + copied);
1353 if (!PageUptodate(page))
1354 SetPageUptodate(page);
1355 if (!PageDirty(page))
1356 set_page_dirty(page);
1358 bch2_put_page_reservation(c, page);
1363 pagecache_add_put(&mapping->add_lock);
1370 static void bch2_dio_read_complete(struct closure *cl)
1372 struct dio_read *dio = container_of(cl, struct dio_read, cl);
1374 dio->req->ki_complete(dio->req, dio->ret, 0);
1375 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1378 static void bch2_direct_IO_read_endio(struct bio *bio)
1380 struct dio_read *dio = bio->bi_private;
1383 dio->ret = bio->bi_error;
1385 closure_put(&dio->cl);
1388 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1390 bch2_direct_IO_read_endio(bio);
1391 bio_check_pages_dirty(bio); /* transfers ownership */
1394 static int bch2_direct_IO_read(struct bch_fs *c, struct kiocb *req,
1395 struct file *file, struct inode *inode,
1396 struct iov_iter *iter, loff_t offset)
1398 struct dio_read *dio;
1400 bool sync = is_sync_kiocb(req);
1403 if ((offset|iter->count) & (block_bytes(c) - 1))
1406 ret = min_t(loff_t, iter->count,
1407 max_t(loff_t, 0, i_size_read(inode) - offset));
1408 iov_iter_truncate(iter, round_up(ret, block_bytes(c)));
1413 bio = bio_alloc_bioset(GFP_KERNEL,
1414 iov_iter_npages(iter, BIO_MAX_PAGES),
1415 bch2_dio_read_bioset);
1417 bio->bi_end_io = bch2_direct_IO_read_endio;
1419 dio = container_of(bio, struct dio_read, rbio.bio);
1420 closure_init(&dio->cl, NULL);
1423 * this is a _really_ horrible hack just to avoid an atomic sub at the
1427 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1428 atomic_set(&dio->cl.remaining,
1429 CLOSURE_REMAINING_INITIALIZER -
1431 CLOSURE_DESTRUCTOR);
1433 atomic_set(&dio->cl.remaining,
1434 CLOSURE_REMAINING_INITIALIZER + 1);
1441 while (iter->count) {
1442 bio = bio_alloc_bioset(GFP_KERNEL,
1443 iov_iter_npages(iter, BIO_MAX_PAGES),
1445 bio->bi_end_io = bch2_direct_IO_read_split_endio;
1447 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1448 bio->bi_iter.bi_sector = offset >> 9;
1449 bio->bi_private = dio;
1451 ret = bio_get_user_pages(bio, iter, 1);
1453 /* XXX: fault inject this path */
1454 bio->bi_error = ret;
1459 offset += bio->bi_iter.bi_size;
1460 bio_set_pages_dirty(bio);
1463 closure_get(&dio->cl);
1465 bch2_read(c, container_of(bio,
1466 struct bch_read_bio, bio),
1471 closure_sync(&dio->cl);
1472 closure_debug_destroy(&dio->cl);
1474 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1477 return -EIOCBQUEUED;
1481 static long __bch2_dio_write_complete(struct dio_write *dio)
1483 struct file *file = dio->req->ki_filp;
1484 struct address_space *mapping = file->f_mapping;
1485 struct inode *inode = file->f_inode;
1486 long ret = dio->error ?: dio->written;
1488 bch2_disk_reservation_put(dio->c, &dio->res);
1490 __pagecache_block_put(&mapping->add_lock);
1491 inode_dio_end(inode);
1493 if (dio->iovec && dio->iovec != dio->inline_vecs)
1496 bio_put(&dio->bio.bio);
1500 static void bch2_dio_write_complete(struct closure *cl)
1502 struct dio_write *dio = container_of(cl, struct dio_write, cl);
1503 struct kiocb *req = dio->req;
1505 req->ki_complete(req, __bch2_dio_write_complete(dio), 0);
1508 static void bch2_dio_write_done(struct dio_write *dio)
1513 dio->written += dio->iop.op.written << 9;
1515 if (dio->iop.op.error)
1516 dio->error = dio->iop.op.error;
1518 bio_for_each_segment_all(bv, &dio->bio.bio, i)
1519 put_page(bv->bv_page);
1521 if (dio->iter.count)
1522 bio_reset(&dio->bio.bio);
1525 static void bch2_do_direct_IO_write(struct dio_write *dio)
1527 struct file *file = dio->req->ki_filp;
1528 struct inode *inode = file->f_inode;
1529 struct bch_inode_info *ei = to_bch_ei(inode);
1530 struct bio *bio = &dio->bio.bio;
1534 if ((dio->req->ki_flags & IOCB_DSYNC) &&
1535 !dio->c->opts.journal_flush_disabled)
1536 flags |= BCH_WRITE_FLUSH;
1538 bio->bi_iter.bi_sector = (dio->offset + dio->written) >> 9;
1540 ret = bio_get_user_pages(bio, &dio->iter, 0);
1543 * these didn't get initialized, but bch2_dio_write_done() will
1546 dio->iop.op.error = 0;
1547 dio->iop.op.written = 0;
1553 dio->iop.sectors_added = 0;
1554 dio->iop.is_dio = true;
1555 dio->iop.new_i_size = U64_MAX;
1556 bch2_write_op_init(&dio->iop.op, dio->c, &dio->bio,
1558 foreground_write_point(dio->c, inode->i_ino),
1559 POS(inode->i_ino, bio->bi_iter.bi_sector),
1560 &ei->journal_seq, flags);
1561 dio->iop.op.index_update_fn = bchfs_write_index_update;
1563 dio->res.sectors -= bio_sectors(bio);
1564 dio->iop.op.res.sectors = bio_sectors(bio);
1566 task_io_account_write(bio->bi_iter.bi_size);
1568 closure_call(&dio->iop.op.cl, bch2_write, NULL, &dio->cl);
1571 static void bch2_dio_write_loop_async(struct closure *cl)
1573 struct dio_write *dio =
1574 container_of(cl, struct dio_write, cl);
1575 struct address_space *mapping = dio->req->ki_filp->f_mapping;
1577 bch2_dio_write_done(dio);
1579 if (dio->iter.count && !dio->error) {
1581 pagecache_block_get(&mapping->add_lock);
1583 bch2_do_direct_IO_write(dio);
1585 pagecache_block_put(&mapping->add_lock);
1588 continue_at(&dio->cl, bch2_dio_write_loop_async, NULL);
1591 closure_return_with_destructor(cl, bch2_dio_write_complete);
1593 closure_debug_destroy(cl);
1594 bch2_dio_write_complete(cl);
1599 static int bch2_direct_IO_write(struct bch_fs *c, struct kiocb *req,
1600 struct file *file, struct inode *inode,
1601 struct iov_iter *iter, loff_t offset)
1603 struct address_space *mapping = file->f_mapping;
1604 struct dio_write *dio;
1607 bool sync = is_sync_kiocb(req);
1609 lockdep_assert_held(&inode->i_rwsem);
1611 if (unlikely(!iter->count))
1614 if (unlikely((offset|iter->count) & (block_bytes(c) - 1)))
1617 bio = bio_alloc_bioset(GFP_KERNEL,
1618 iov_iter_npages(iter, BIO_MAX_PAGES),
1619 bch2_dio_write_bioset);
1620 dio = container_of(bio, struct dio_write, bio.bio);
1625 dio->offset = offset;
1628 dio->mm = current->mm;
1629 closure_init(&dio->cl, NULL);
1631 if (offset + iter->count > inode->i_size)
1635 * XXX: we shouldn't return -ENOSPC if we're overwriting existing data -
1636 * if getting a reservation fails we should check if we are doing an
1639 * Have to then guard against racing with truncate (deleting data that
1640 * we would have been overwriting)
1642 ret = bch2_disk_reservation_get(c, &dio->res, iter->count >> 9, 0);
1643 if (unlikely(ret)) {
1644 closure_debug_destroy(&dio->cl);
1649 inode_dio_begin(inode);
1650 __pagecache_block_get(&mapping->add_lock);
1654 bch2_do_direct_IO_write(dio);
1656 closure_sync(&dio->cl);
1657 bch2_dio_write_done(dio);
1658 } while (dio->iter.count && !dio->error);
1660 closure_debug_destroy(&dio->cl);
1661 return __bch2_dio_write_complete(dio);
1663 bch2_do_direct_IO_write(dio);
1665 if (dio->iter.count && !dio->error) {
1666 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
1667 dio->iovec = kmalloc(dio->iter.nr_segs *
1668 sizeof(struct iovec),
1671 dio->error = -ENOMEM;
1673 dio->iovec = dio->inline_vecs;
1678 dio->iter.nr_segs * sizeof(struct iovec));
1679 dio->iter.iov = dio->iovec;
1682 continue_at_noreturn(&dio->cl, bch2_dio_write_loop_async, NULL);
1683 return -EIOCBQUEUED;
1687 ssize_t bch2_direct_IO(struct kiocb *req, struct iov_iter *iter)
1689 struct file *file = req->ki_filp;
1690 struct inode *inode = file->f_inode;
1691 struct bch_fs *c = inode->i_sb->s_fs_info;
1692 struct blk_plug plug;
1695 blk_start_plug(&plug);
1696 ret = ((iov_iter_rw(iter) == WRITE)
1697 ? bch2_direct_IO_write
1698 : bch2_direct_IO_read)(c, req, file, inode, iter, req->ki_pos);
1699 blk_finish_plug(&plug);
1705 bch2_direct_write(struct kiocb *iocb, struct iov_iter *iter)
1707 struct file *file = iocb->ki_filp;
1708 struct inode *inode = file->f_inode;
1709 struct bch_fs *c = inode->i_sb->s_fs_info;
1710 struct address_space *mapping = file->f_mapping;
1711 loff_t pos = iocb->ki_pos;
1714 pagecache_block_get(&mapping->add_lock);
1716 /* Write and invalidate pagecache range that we're writing to: */
1717 ret = write_invalidate_inode_pages_range(file->f_mapping, pos,
1718 pos + iov_iter_count(iter) - 1);
1722 ret = bch2_direct_IO_write(c, iocb, file, inode, iter, pos);
1724 pagecache_block_put(&mapping->add_lock);
1729 static ssize_t __bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1731 struct file *file = iocb->ki_filp;
1732 struct address_space *mapping = file->f_mapping;
1733 struct inode *inode = mapping->host;
1736 /* We can write back this queue in page reclaim */
1737 current->backing_dev_info = inode_to_bdi(inode);
1738 ret = file_remove_privs(file);
1742 ret = file_update_time(file);
1746 ret = iocb->ki_flags & IOCB_DIRECT
1747 ? bch2_direct_write(iocb, from)
1748 : generic_perform_write(file, from, iocb->ki_pos);
1750 if (likely(ret > 0))
1751 iocb->ki_pos += ret;
1753 current->backing_dev_info = NULL;
1757 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1759 struct file *file = iocb->ki_filp;
1760 struct inode *inode = file->f_mapping->host;
1761 bool direct = iocb->ki_flags & IOCB_DIRECT;
1765 ret = generic_write_checks(iocb, from);
1767 ret = __bch2_write_iter(iocb, from);
1768 inode_unlock(inode);
1770 if (ret > 0 && !direct)
1771 ret = generic_write_sync(iocb, ret);
1776 int bch2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1778 struct page *page = vmf->page;
1779 struct inode *inode = file_inode(vma->vm_file);
1780 struct address_space *mapping = inode->i_mapping;
1781 struct bch_fs *c = inode->i_sb->s_fs_info;
1782 int ret = VM_FAULT_LOCKED;
1784 sb_start_pagefault(inode->i_sb);
1785 file_update_time(vma->vm_file);
1788 * Not strictly necessary, but helps avoid dio writes livelocking in
1789 * write_invalidate_inode_pages_range() - can drop this if/when we get
1790 * a write_invalidate_inode_pages_range() that works without dropping
1791 * page lock before invalidating page
1793 if (current->pagecache_lock != &mapping->add_lock)
1794 pagecache_add_get(&mapping->add_lock);
1797 if (page->mapping != mapping ||
1798 page_offset(page) > i_size_read(inode)) {
1800 ret = VM_FAULT_NOPAGE;
1804 if (bch2_get_page_reservation(c, page, true)) {
1806 ret = VM_FAULT_SIGBUS;
1810 if (!PageDirty(page))
1811 set_page_dirty(page);
1812 wait_for_stable_page(page);
1814 if (current->pagecache_lock != &mapping->add_lock)
1815 pagecache_add_put(&mapping->add_lock);
1816 sb_end_pagefault(inode->i_sb);
1820 void bch2_invalidatepage(struct page *page, unsigned int offset,
1821 unsigned int length)
1823 EBUG_ON(!PageLocked(page));
1824 EBUG_ON(PageWriteback(page));
1826 if (offset || length < PAGE_SIZE)
1829 bch2_clear_page_bits(page);
1832 int bch2_releasepage(struct page *page, gfp_t gfp_mask)
1834 EBUG_ON(!PageLocked(page));
1835 EBUG_ON(PageWriteback(page));
1837 if (PageDirty(page))
1840 bch2_clear_page_bits(page);
1844 #ifdef CONFIG_MIGRATION
1845 int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
1846 struct page *page, enum migrate_mode mode)
1850 ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
1851 if (ret != MIGRATEPAGE_SUCCESS)
1854 if (PagePrivate(page)) {
1855 *page_state(newpage) = *page_state(page);
1856 ClearPagePrivate(page);
1859 migrate_page_copy(newpage, page);
1860 return MIGRATEPAGE_SUCCESS;
1864 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1866 struct inode *inode = file->f_mapping->host;
1867 struct bch_inode_info *ei = to_bch_ei(inode);
1868 struct bch_fs *c = inode->i_sb->s_fs_info;
1871 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1875 if (c->opts.journal_flush_disabled)
1878 return bch2_journal_flush_seq(&c->journal, ei->journal_seq);
1881 static int __bch2_truncate_page(struct address_space *mapping,
1882 pgoff_t index, loff_t start, loff_t end)
1884 struct inode *inode = mapping->host;
1885 struct bch_fs *c = inode->i_sb->s_fs_info;
1886 unsigned start_offset = start & (PAGE_SIZE - 1);
1887 unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
1891 /* Page boundary? Nothing to do */
1892 if (!((index == start >> PAGE_SHIFT && start_offset) ||
1893 (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
1897 if (index << PAGE_SHIFT >= inode->i_size)
1900 page = find_lock_page(mapping, index);
1902 struct btree_iter iter;
1903 struct bkey_s_c k = bkey_s_c_null;
1906 * XXX: we're doing two index lookups when we end up reading the
1909 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
1911 index << (PAGE_SHIFT - 9)), k) {
1912 if (bkey_cmp(bkey_start_pos(k.k),
1914 (index + 1) << (PAGE_SHIFT - 9))) >= 0)
1917 if (k.k->type != KEY_TYPE_DISCARD &&
1918 k.k->type != BCH_RESERVATION) {
1919 bch2_btree_iter_unlock(&iter);
1923 bch2_btree_iter_unlock(&iter);
1926 page = find_or_create_page(mapping, index, GFP_KERNEL);
1927 if (unlikely(!page)) {
1933 if (!PageUptodate(page)) {
1934 ret = bch2_read_single_page(page, mapping);
1940 * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
1942 * XXX: because we aren't currently tracking whether the page has actual
1943 * data in it (vs. just 0s, or only partially written) this wrong. ick.
1945 ret = bch2_get_page_reservation(c, page, false);
1948 if (index == start >> PAGE_SHIFT &&
1949 index == end >> PAGE_SHIFT)
1950 zero_user_segment(page, start_offset, end_offset);
1951 else if (index == start >> PAGE_SHIFT)
1952 zero_user_segment(page, start_offset, PAGE_SIZE);
1953 else if (index == end >> PAGE_SHIFT)
1954 zero_user_segment(page, 0, end_offset);
1956 if (!PageDirty(page))
1957 set_page_dirty(page);
1965 static int bch2_truncate_page(struct address_space *mapping, loff_t from)
1967 return __bch2_truncate_page(mapping, from >> PAGE_SHIFT,
1968 from, from + PAGE_SIZE);
1971 int bch2_truncate(struct inode *inode, struct iattr *iattr)
1973 struct address_space *mapping = inode->i_mapping;
1974 struct bch_inode_info *ei = to_bch_ei(inode);
1975 struct bch_fs *c = inode->i_sb->s_fs_info;
1976 bool shrink = iattr->ia_size <= inode->i_size;
1979 inode_dio_wait(inode);
1980 pagecache_block_get(&mapping->add_lock);
1982 truncate_setsize(inode, iattr->ia_size);
1984 /* sync appends.. */
1985 /* XXX what protects ei->i_size? */
1986 if (iattr->ia_size > ei->i_size)
1987 ret = filemap_write_and_wait_range(mapping, ei->i_size, S64_MAX);
1989 goto err_put_pagecache;
1991 mutex_lock(&ei->update_lock);
1992 i_size_dirty_get(ei);
1993 ret = bch2_write_inode_size(c, ei, inode->i_size);
1994 mutex_unlock(&ei->update_lock);
2000 * There might be persistent reservations (from fallocate())
2001 * above i_size, which bch2_inode_truncate() will discard - we're
2002 * only supposed to discard them if we're doing a real truncate
2003 * here (new i_size < current i_size):
2006 struct i_sectors_hook i_sectors_hook;
2009 ret = i_sectors_dirty_get(ei, &i_sectors_hook);
2013 ret = bch2_truncate_page(inode->i_mapping, iattr->ia_size);
2014 if (unlikely(ret)) {
2015 i_sectors_dirty_put(ei, &i_sectors_hook);
2019 ret = bch2_inode_truncate(c, inode->i_ino,
2020 round_up(iattr->ia_size, PAGE_SIZE) >> 9,
2021 &i_sectors_hook.hook,
2024 i_sectors_dirty_put(ei, &i_sectors_hook);
2030 mutex_lock(&ei->update_lock);
2031 setattr_copy(inode, iattr);
2032 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
2034 /* clear I_SIZE_DIRTY: */
2035 i_size_dirty_put(ei);
2036 ret = bch2_write_inode_size(c, ei, inode->i_size);
2037 mutex_unlock(&ei->update_lock);
2039 pagecache_block_put(&mapping->add_lock);
2043 i_size_dirty_put(ei);
2045 pagecache_block_put(&mapping->add_lock);
2049 static long bch2_fpunch(struct inode *inode, loff_t offset, loff_t len)
2051 struct address_space *mapping = inode->i_mapping;
2052 struct bch_inode_info *ei = to_bch_ei(inode);
2053 struct bch_fs *c = inode->i_sb->s_fs_info;
2054 u64 ino = inode->i_ino;
2055 u64 discard_start = round_up(offset, PAGE_SIZE) >> 9;
2056 u64 discard_end = round_down(offset + len, PAGE_SIZE) >> 9;
2060 inode_dio_wait(inode);
2061 pagecache_block_get(&mapping->add_lock);
2063 ret = __bch2_truncate_page(inode->i_mapping,
2064 offset >> PAGE_SHIFT,
2065 offset, offset + len);
2069 if (offset >> PAGE_SHIFT !=
2070 (offset + len) >> PAGE_SHIFT) {
2071 ret = __bch2_truncate_page(inode->i_mapping,
2072 (offset + len) >> PAGE_SHIFT,
2073 offset, offset + len);
2078 truncate_pagecache_range(inode, offset, offset + len - 1);
2080 if (discard_start < discard_end) {
2081 struct disk_reservation disk_res;
2082 struct i_sectors_hook i_sectors_hook;
2085 BUG_ON(bch2_disk_reservation_get(c, &disk_res, 0, 0));
2087 ret = i_sectors_dirty_get(ei, &i_sectors_hook);
2091 ret = bch2_discard(c,
2092 POS(ino, discard_start),
2093 POS(ino, discard_end),
2096 &i_sectors_hook.hook,
2099 i_sectors_dirty_put(ei, &i_sectors_hook);
2100 bch2_disk_reservation_put(c, &disk_res);
2103 pagecache_block_put(&mapping->add_lock);
2104 inode_unlock(inode);
2109 static long bch2_fcollapse(struct inode *inode, loff_t offset, loff_t len)
2111 struct address_space *mapping = inode->i_mapping;
2112 struct bch_inode_info *ei = to_bch_ei(inode);
2113 struct bch_fs *c = inode->i_sb->s_fs_info;
2114 struct btree_iter src;
2115 struct btree_iter dst;
2116 BKEY_PADDED(k) copy;
2118 struct i_sectors_hook i_sectors_hook;
2122 if ((offset | len) & (PAGE_SIZE - 1))
2125 bch2_btree_iter_init_intent(&dst, c, BTREE_ID_EXTENTS,
2126 POS(inode->i_ino, offset >> 9));
2127 /* position will be set from dst iter's position: */
2128 bch2_btree_iter_init(&src, c, BTREE_ID_EXTENTS, POS_MIN);
2129 bch2_btree_iter_link(&src, &dst);
2132 * We need i_mutex to keep the page cache consistent with the extents
2133 * btree, and the btree consistent with i_size - we don't need outside
2134 * locking for the extents btree itself, because we're using linked
2138 inode_dio_wait(inode);
2139 pagecache_block_get(&mapping->add_lock);
2142 if (offset + len >= inode->i_size)
2145 if (inode->i_size < len)
2148 new_size = inode->i_size - len;
2150 ret = write_invalidate_inode_pages_range(inode->i_mapping,
2155 ret = i_sectors_dirty_get(ei, &i_sectors_hook);
2159 while (bkey_cmp(dst.pos,
2161 round_up(new_size, PAGE_SIZE) >> 9)) < 0) {
2162 struct disk_reservation disk_res;
2164 bch2_btree_iter_set_pos(&src,
2165 POS(dst.pos.inode, dst.pos.offset + (len >> 9)));
2167 ret = bch2_btree_iter_traverse(&dst);
2169 goto btree_iter_err;
2171 k = bch2_btree_iter_peek_with_holes(&src);
2172 if ((ret = btree_iter_err(k)))
2173 goto btree_iter_err;
2175 bkey_reassemble(©.k, k);
2177 if (bkey_deleted(©.k.k))
2178 copy.k.k.type = KEY_TYPE_DISCARD;
2180 bch2_cut_front(src.pos, ©.k);
2181 copy.k.k.p.offset -= len >> 9;
2183 BUG_ON(bkey_cmp(dst.pos, bkey_start_pos(©.k.k)));
2185 ret = bch2_disk_reservation_get(c, &disk_res, copy.k.k.size,
2186 BCH_DISK_RESERVATION_NOFAIL);
2189 ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
2191 BTREE_INSERT_ATOMIC|
2192 BTREE_INSERT_NOFAIL,
2193 BTREE_INSERT_ENTRY(&dst, ©.k));
2194 bch2_disk_reservation_put(c, &disk_res);
2196 if (ret < 0 && ret != -EINTR)
2199 bch2_btree_iter_cond_resched(&src);
2202 bch2_btree_iter_unlock(&src);
2203 bch2_btree_iter_unlock(&dst);
2205 ret = bch2_inode_truncate(c, inode->i_ino,
2206 round_up(new_size, PAGE_SIZE) >> 9,
2207 &i_sectors_hook.hook,
2212 i_sectors_dirty_put(ei, &i_sectors_hook);
2214 mutex_lock(&ei->update_lock);
2215 i_size_write(inode, new_size);
2216 ret = bch2_write_inode_size(c, ei, inode->i_size);
2217 mutex_unlock(&ei->update_lock);
2219 pagecache_block_put(&mapping->add_lock);
2220 inode_unlock(inode);
2225 * XXX: we've left data with multiple pointers... which isn't a _super_
2226 * serious problem...
2228 i_sectors_dirty_put(ei, &i_sectors_hook);
2230 bch2_btree_iter_unlock(&src);
2231 bch2_btree_iter_unlock(&dst);
2232 pagecache_block_put(&mapping->add_lock);
2233 inode_unlock(inode);
2237 static long bch2_fallocate(struct inode *inode, int mode,
2238 loff_t offset, loff_t len)
2240 struct address_space *mapping = inode->i_mapping;
2241 struct bch_inode_info *ei = to_bch_ei(inode);
2242 struct bch_fs *c = inode->i_sb->s_fs_info;
2243 struct i_sectors_hook i_sectors_hook;
2244 struct btree_iter iter;
2246 loff_t block_start, block_end;
2247 loff_t new_size = offset + len;
2249 unsigned replicas = READ_ONCE(c->opts.data_replicas);
2252 bch2_btree_iter_init_intent(&iter, c, BTREE_ID_EXTENTS, POS_MIN);
2255 inode_dio_wait(inode);
2256 pagecache_block_get(&mapping->add_lock);
2258 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2259 new_size > inode->i_size) {
2260 ret = inode_newsize_ok(inode, new_size);
2265 if (mode & FALLOC_FL_ZERO_RANGE) {
2266 ret = __bch2_truncate_page(inode->i_mapping,
2267 offset >> PAGE_SHIFT,
2268 offset, offset + len);
2271 offset >> PAGE_SHIFT !=
2272 (offset + len) >> PAGE_SHIFT)
2273 ret = __bch2_truncate_page(inode->i_mapping,
2274 (offset + len) >> PAGE_SHIFT,
2275 offset, offset + len);
2280 truncate_pagecache_range(inode, offset, offset + len - 1);
2282 block_start = round_up(offset, PAGE_SIZE);
2283 block_end = round_down(offset + len, PAGE_SIZE);
2285 block_start = round_down(offset, PAGE_SIZE);
2286 block_end = round_up(offset + len, PAGE_SIZE);
2289 bch2_btree_iter_set_pos(&iter, POS(inode->i_ino, block_start >> 9));
2290 end = POS(inode->i_ino, block_end >> 9);
2292 ret = i_sectors_dirty_get(ei, &i_sectors_hook);
2296 while (bkey_cmp(iter.pos, end) < 0) {
2297 struct disk_reservation disk_res = { 0 };
2298 struct bkey_i_reservation reservation;
2301 k = bch2_btree_iter_peek_with_holes(&iter);
2302 if ((ret = btree_iter_err(k)))
2303 goto btree_iter_err;
2305 /* already reserved */
2306 if (k.k->type == BCH_RESERVATION &&
2307 bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2308 bch2_btree_iter_advance_pos(&iter);
2312 if (bkey_extent_is_data(k.k)) {
2313 if (!(mode & FALLOC_FL_ZERO_RANGE)) {
2314 bch2_btree_iter_advance_pos(&iter);
2319 bkey_reservation_init(&reservation.k_i);
2320 reservation.k.type = BCH_RESERVATION;
2321 reservation.k.p = k.k->p;
2322 reservation.k.size = k.k->size;
2324 bch2_cut_front(iter.pos, &reservation.k_i);
2325 bch2_cut_back(end, &reservation.k);
2327 sectors = reservation.k.size;
2328 reservation.v.nr_replicas = bch2_extent_nr_dirty_ptrs(k);
2330 if (reservation.v.nr_replicas < replicas ||
2331 bkey_extent_is_compressed(k)) {
2332 ret = bch2_disk_reservation_get(c, &disk_res,
2335 goto err_put_sectors_dirty;
2337 reservation.v.nr_replicas = disk_res.nr_replicas;
2340 ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
2342 BTREE_INSERT_ATOMIC|
2343 BTREE_INSERT_NOFAIL,
2344 BTREE_INSERT_ENTRY(&iter, &reservation.k_i));
2345 bch2_disk_reservation_put(c, &disk_res);
2347 if (ret < 0 && ret != -EINTR)
2348 goto err_put_sectors_dirty;
2351 bch2_btree_iter_unlock(&iter);
2353 i_sectors_dirty_put(ei, &i_sectors_hook);
2355 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2356 new_size > inode->i_size) {
2357 i_size_write(inode, new_size);
2359 mutex_lock(&ei->update_lock);
2360 ret = bch2_write_inode_size(c, ei, inode->i_size);
2361 mutex_unlock(&ei->update_lock);
2365 if ((mode & FALLOC_FL_KEEP_SIZE) &&
2366 (mode & FALLOC_FL_ZERO_RANGE) &&
2367 ei->i_size != inode->i_size) {
2368 /* sync appends.. */
2369 ret = filemap_write_and_wait_range(mapping, ei->i_size, S64_MAX);
2373 if (ei->i_size != inode->i_size) {
2374 mutex_lock(&ei->update_lock);
2375 ret = bch2_write_inode_size(c, ei, inode->i_size);
2376 mutex_unlock(&ei->update_lock);
2380 pagecache_block_put(&mapping->add_lock);
2381 inode_unlock(inode);
2384 err_put_sectors_dirty:
2385 i_sectors_dirty_put(ei, &i_sectors_hook);
2387 bch2_btree_iter_unlock(&iter);
2388 pagecache_block_put(&mapping->add_lock);
2389 inode_unlock(inode);
2393 long bch2_fallocate_dispatch(struct file *file, int mode,
2394 loff_t offset, loff_t len)
2396 struct inode *inode = file_inode(file);
2398 if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
2399 return bch2_fallocate(inode, mode, offset, len);
2401 if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
2402 return bch2_fpunch(inode, offset, len);
2404 if (mode == FALLOC_FL_COLLAPSE_RANGE)
2405 return bch2_fcollapse(inode, offset, len);
2410 static bool page_is_data(struct page *page)
2412 /* XXX: should only have to check PageDirty */
2413 return PagePrivate(page) &&
2414 (page_state(page)->sectors ||
2415 page_state(page)->dirty_sectors);
2418 static loff_t bch2_next_pagecache_data(struct inode *inode,
2419 loff_t start_offset,
2422 struct address_space *mapping = inode->i_mapping;
2426 for (index = start_offset >> PAGE_SHIFT;
2427 index < end_offset >> PAGE_SHIFT;
2429 if (find_get_pages(mapping, index, 1, &page)) {
2431 index = page->index;
2433 if (page_is_data(page))
2437 ((loff_t) index) << PAGE_SHIFT));
2448 static loff_t bch2_seek_data(struct file *file, u64 offset)
2450 struct inode *inode = file->f_mapping->host;
2451 struct bch_fs *c = inode->i_sb->s_fs_info;
2452 struct btree_iter iter;
2454 u64 isize, next_data = MAX_LFS_FILESIZE;
2457 isize = i_size_read(inode);
2458 if (offset >= isize)
2461 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2462 POS(inode->i_ino, offset >> 9), k) {
2463 if (k.k->p.inode != inode->i_ino) {
2465 } else if (bkey_extent_is_data(k.k)) {
2466 next_data = max(offset, bkey_start_offset(k.k) << 9);
2468 } else if (k.k->p.offset >> 9 > isize)
2472 ret = bch2_btree_iter_unlock(&iter);
2476 if (next_data > offset)
2477 next_data = bch2_next_pagecache_data(inode, offset, next_data);
2479 if (next_data > isize)
2482 return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
2485 static bool page_slot_is_data(struct address_space *mapping, pgoff_t index)
2490 page = find_lock_entry(mapping, index);
2491 if (!page || radix_tree_exception(page))
2494 ret = page_is_data(page);
2500 static loff_t bch2_next_pagecache_hole(struct inode *inode,
2501 loff_t start_offset,
2504 struct address_space *mapping = inode->i_mapping;
2507 for (index = start_offset >> PAGE_SHIFT;
2508 index < end_offset >> PAGE_SHIFT;
2510 if (!page_slot_is_data(mapping, index))
2511 end_offset = max(start_offset,
2512 ((loff_t) index) << PAGE_SHIFT);
2517 static loff_t bch2_seek_hole(struct file *file, u64 offset)
2519 struct inode *inode = file->f_mapping->host;
2520 struct bch_fs *c = inode->i_sb->s_fs_info;
2521 struct btree_iter iter;
2523 u64 isize, next_hole = MAX_LFS_FILESIZE;
2526 isize = i_size_read(inode);
2527 if (offset >= isize)
2530 for_each_btree_key_with_holes(&iter, c, BTREE_ID_EXTENTS,
2531 POS(inode->i_ino, offset >> 9), k) {
2532 if (k.k->p.inode != inode->i_ino) {
2533 next_hole = bch2_next_pagecache_hole(inode,
2534 offset, MAX_LFS_FILESIZE);
2536 } else if (!bkey_extent_is_data(k.k)) {
2537 next_hole = bch2_next_pagecache_hole(inode,
2538 max(offset, bkey_start_offset(k.k) << 9),
2539 k.k->p.offset << 9);
2541 if (next_hole < k.k->p.offset << 9)
2544 offset = max(offset, bkey_start_offset(k.k) << 9);
2548 ret = bch2_btree_iter_unlock(&iter);
2552 if (next_hole > isize)
2555 return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
2558 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
2564 return generic_file_llseek(file, offset, whence);
2566 return bch2_seek_data(file, offset);
2568 return bch2_seek_hole(file, offset);