4 #include "btree_update.h"
16 #include <linux/aio.h>
17 #include <linux/backing-dev.h>
18 #include <linux/falloc.h>
19 #include <linux/migrate.h>
20 #include <linux/mmu_context.h>
21 #include <linux/pagevec.h>
22 #include <linux/task_io_accounting_ops.h>
23 #include <linux/uio.h>
24 #include <linux/writeback.h>
26 #include <trace/events/bcachefs.h>
27 #include <trace/events/writeback.h>
29 struct bio_set *bch2_writepage_bioset;
30 struct bio_set *bch2_dio_read_bioset;
31 struct bio_set *bch2_dio_write_bioset;
33 /* pagecache_block must be held */
34 static int write_invalidate_inode_pages_range(struct address_space *mapping,
35 loff_t start, loff_t end)
40 * XXX: the way this is currently implemented, we can spin if a process
41 * is continually redirtying a specific page
44 if (!mapping->nrpages &&
45 !mapping->nrexceptional)
48 ret = filemap_write_and_wait_range(mapping, start, end);
52 if (!mapping->nrpages)
55 ret = invalidate_inode_pages2_range(mapping,
58 } while (ret == -EBUSY);
65 static int inode_set_size(struct bch_inode_info *inode,
66 struct bch_inode_unpacked *bi,
69 loff_t *new_i_size = p;
71 lockdep_assert_held(&inode->ei_update_lock);
73 bi->bi_size = *new_i_size;
75 if (atomic_long_read(&inode->ei_size_dirty_count))
76 bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
78 bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
83 static int __must_check bch2_write_inode_size(struct bch_fs *c,
84 struct bch_inode_info *inode,
87 return __bch2_write_inode(c, inode, inode_set_size, &new_size);
90 static inline void i_size_dirty_put(struct bch_inode_info *inode)
92 atomic_long_dec_bug(&inode->ei_size_dirty_count);
95 static inline void i_size_dirty_get(struct bch_inode_info *inode)
97 lockdep_assert_held(&inode->v.i_rwsem);
99 atomic_long_inc(&inode->ei_size_dirty_count);
102 /* i_sectors accounting: */
104 static enum extent_insert_hook_ret
105 i_sectors_hook_fn(struct extent_insert_hook *hook,
106 struct bpos committed_pos,
107 struct bpos next_pos,
109 const struct bkey_i *insert)
111 struct i_sectors_hook *h = container_of(hook,
112 struct i_sectors_hook, hook);
113 s64 sectors = next_pos.offset - committed_pos.offset;
114 int sign = bkey_extent_is_allocation(&insert->k) -
115 (k.k && bkey_extent_is_allocation(k.k));
117 EBUG_ON(!(h->inode->ei_flags & BCH_INODE_I_SECTORS_DIRTY));
118 EBUG_ON(!atomic_long_read(&h->inode->ei_sectors_dirty_count));
120 h->sectors += sectors * sign;
122 return BTREE_HOOK_DO_INSERT;
125 static int inode_set_i_sectors_dirty(struct bch_inode_info *inode,
126 struct bch_inode_unpacked *bi, void *p)
128 BUG_ON(bi->bi_flags & BCH_INODE_I_SECTORS_DIRTY);
130 bi->bi_flags |= BCH_INODE_I_SECTORS_DIRTY;
134 static int inode_clear_i_sectors_dirty(struct bch_inode_info *inode,
135 struct bch_inode_unpacked *bi,
138 BUG_ON(!(bi->bi_flags & BCH_INODE_I_SECTORS_DIRTY));
140 bi->bi_sectors = atomic64_read(&inode->ei_sectors);
141 bi->bi_flags &= ~BCH_INODE_I_SECTORS_DIRTY;
145 static void i_sectors_dirty_put(struct bch_fs *c,
146 struct bch_inode_info *inode,
147 struct i_sectors_hook *h)
150 spin_lock(&inode->v.i_lock);
151 inode->v.i_blocks += h->sectors;
152 spin_unlock(&inode->v.i_lock);
154 atomic64_add(h->sectors, &inode->ei_sectors);
155 EBUG_ON(atomic64_read(&inode->ei_sectors) < 0);
158 EBUG_ON(atomic_long_read(&inode->ei_sectors_dirty_count) <= 0);
160 mutex_lock(&inode->ei_update_lock);
162 if (atomic_long_dec_and_test(&inode->ei_sectors_dirty_count)) {
163 int ret = __bch2_write_inode(c, inode,
164 inode_clear_i_sectors_dirty, NULL);
169 mutex_unlock(&inode->ei_update_lock);
172 static int __must_check i_sectors_dirty_get(struct bch_fs *c,
173 struct bch_inode_info *inode,
174 struct i_sectors_hook *h)
178 h->hook.fn = i_sectors_hook_fn;
180 #ifdef CONFIG_BCACHEFS_DEBUG
184 if (atomic_long_inc_not_zero(&inode->ei_sectors_dirty_count))
187 mutex_lock(&inode->ei_update_lock);
189 if (!(inode->ei_flags & BCH_INODE_I_SECTORS_DIRTY))
190 ret = __bch2_write_inode(c, inode, inode_set_i_sectors_dirty,
194 atomic_long_inc(&inode->ei_sectors_dirty_count);
196 mutex_unlock(&inode->ei_update_lock);
201 struct bchfs_extent_trans_hook {
202 struct bchfs_write_op *op;
203 struct extent_insert_hook hook;
205 struct bch_inode_unpacked inode_u;
206 struct bkey_inode_buf inode_p;
208 bool need_inode_update;
211 static enum extent_insert_hook_ret
212 bchfs_extent_update_hook(struct extent_insert_hook *hook,
213 struct bpos committed_pos,
214 struct bpos next_pos,
216 const struct bkey_i *insert)
218 struct bchfs_extent_trans_hook *h = container_of(hook,
219 struct bchfs_extent_trans_hook, hook);
220 struct bch_inode_info *inode = h->op->inode;
221 int sign = bkey_extent_is_allocation(&insert->k) -
222 (k.k && bkey_extent_is_allocation(k.k));
223 s64 sectors = (s64) (next_pos.offset - committed_pos.offset) * sign;
224 u64 offset = min(next_pos.offset << 9, h->op->new_i_size);
225 bool do_pack = false;
227 BUG_ON((next_pos.offset << 9) > round_up(offset, PAGE_SIZE));
229 /* XXX: inode->i_size locking */
230 if (offset > inode->ei_size) {
231 BUG_ON(inode->ei_flags & BCH_INODE_I_SIZE_DIRTY);
233 if (!h->need_inode_update) {
234 h->need_inode_update = true;
235 return BTREE_HOOK_RESTART_TRANS;
238 h->inode_u.bi_size = offset;
241 inode->ei_size = offset;
244 i_size_write(&inode->v, offset);
248 if (!h->need_inode_update) {
249 h->need_inode_update = true;
250 return BTREE_HOOK_RESTART_TRANS;
253 h->inode_u.bi_sectors += sectors;
256 atomic64_add(sectors, &inode->ei_sectors);
258 h->op->sectors_added += sectors;
261 spin_lock(&inode->v.i_lock);
262 inode->v.i_blocks += sectors;
263 spin_unlock(&inode->v.i_lock);
268 bch2_inode_pack(&h->inode_p, &h->inode_u);
270 return BTREE_HOOK_DO_INSERT;
273 static int bchfs_write_index_update(struct bch_write_op *wop)
275 struct bchfs_write_op *op = container_of(wop,
276 struct bchfs_write_op, op);
277 struct keylist *keys = &op->op.insert_keys;
278 struct btree_iter extent_iter, inode_iter;
279 struct bchfs_extent_trans_hook hook;
280 struct bkey_i *k = bch2_keylist_front(keys);
283 BUG_ON(k->k.p.inode != op->inode->v.i_ino);
285 bch2_btree_iter_init(&extent_iter, wop->c, BTREE_ID_EXTENTS,
286 bkey_start_pos(&bch2_keylist_front(keys)->k),
288 bch2_btree_iter_init(&inode_iter, wop->c, BTREE_ID_INODES,
289 POS(extent_iter.pos.inode, 0),
293 hook.hook.fn = bchfs_extent_update_hook;
294 hook.need_inode_update = false;
297 ret = bch2_btree_iter_traverse(&extent_iter);
301 /* XXX: inode->i_size locking */
302 k = bch2_keylist_front(keys);
303 if (min(k->k.p.offset << 9, op->new_i_size) > op->inode->ei_size)
304 hook.need_inode_update = true;
306 if (hook.need_inode_update) {
307 struct bkey_s_c inode;
309 if (!btree_iter_linked(&inode_iter))
310 bch2_btree_iter_link(&extent_iter, &inode_iter);
312 inode = bch2_btree_iter_peek_with_holes(&inode_iter);
313 if ((ret = btree_iter_err(inode)))
316 if (WARN_ONCE(inode.k->type != BCH_INODE_FS,
317 "inode %llu not found when updating",
318 extent_iter.pos.inode)) {
323 if (WARN_ONCE(bkey_bytes(inode.k) >
324 sizeof(hook.inode_p),
325 "inode %llu too big (%zu bytes, buf %zu)",
326 extent_iter.pos.inode,
328 sizeof(hook.inode_p))) {
333 bkey_reassemble(&hook.inode_p.inode.k_i, inode);
334 ret = bch2_inode_unpack(bkey_s_c_to_inode(inode),
337 "error %i unpacking inode %llu",
338 ret, extent_iter.pos.inode)) {
343 ret = bch2_btree_insert_at(wop->c, &wop->res,
344 &hook.hook, op_journal_seq(wop),
345 BTREE_INSERT_NOFAIL|BTREE_INSERT_ATOMIC,
346 BTREE_INSERT_ENTRY(&extent_iter, k),
347 BTREE_INSERT_ENTRY_EXTRA_RES(&inode_iter,
348 &hook.inode_p.inode.k_i, 2));
350 ret = bch2_btree_insert_at(wop->c, &wop->res,
351 &hook.hook, op_journal_seq(wop),
352 BTREE_INSERT_NOFAIL|BTREE_INSERT_ATOMIC,
353 BTREE_INSERT_ENTRY(&extent_iter, k));
361 bch2_keylist_pop_front(keys);
362 } while (!bch2_keylist_empty(keys));
364 bch2_btree_iter_unlock(&extent_iter);
365 bch2_btree_iter_unlock(&inode_iter);
372 /* stored in page->private: */
375 * bch_page_state has to (unfortunately) be manipulated with cmpxchg - we could
376 * almost protected it with the page lock, except that bch2_writepage_io_done has
377 * to update the sector counts (and from interrupt/bottom half context).
379 struct bch_page_state {
382 * page is _fully_ written on disk, and not compressed - which means to
383 * write this page we don't have to reserve space (the new write will
384 * never take up more space on disk than what it's overwriting)
386 unsigned allocated:1;
388 /* Owns PAGE_SECTORS sized reservation: */
390 unsigned nr_replicas:4;
393 * Number of sectors on disk - for i_blocks
394 * Uncompressed size, not compressed size:
404 #define page_state_cmpxchg(_ptr, _new, _expr) \
406 unsigned long _v = READ_ONCE((_ptr)->v); \
407 struct bch_page_state _old; \
410 _old.v = _new.v = _v; \
413 EBUG_ON(_new.sectors + _new.dirty_sectors > PAGE_SECTORS);\
414 } while (_old.v != _new.v && \
415 (_v = cmpxchg(&(_ptr)->v, _old.v, _new.v)) != _old.v); \
420 static inline struct bch_page_state *page_state(struct page *page)
422 struct bch_page_state *s = (void *) &page->private;
424 BUILD_BUG_ON(sizeof(*s) > sizeof(page->private));
426 if (!PagePrivate(page))
427 SetPagePrivate(page);
432 static void bch2_put_page_reservation(struct bch_fs *c, struct page *page)
434 struct disk_reservation res = { .sectors = PAGE_SECTORS };
435 struct bch_page_state s;
437 s = page_state_cmpxchg(page_state(page), s, {
443 bch2_disk_reservation_put(c, &res);
446 static int bch2_get_page_reservation(struct bch_fs *c, struct page *page,
449 struct bch_page_state *s = page_state(page), new;
450 struct disk_reservation res;
453 BUG_ON(s->allocated && s->sectors != PAGE_SECTORS);
455 if (s->allocated || s->reserved)
458 ret = bch2_disk_reservation_get(c, &res, PAGE_SECTORS, !check_enospc
459 ? BCH_DISK_RESERVATION_NOFAIL : 0);
463 page_state_cmpxchg(s, new, {
465 bch2_disk_reservation_put(c, &res);
469 new.nr_replicas = res.nr_replicas;
475 static void bch2_clear_page_bits(struct page *page)
477 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
478 struct bch_fs *c = inode->v.i_sb->s_fs_info;
479 struct disk_reservation res = { .sectors = PAGE_SECTORS };
480 struct bch_page_state s;
482 if (!PagePrivate(page))
485 s = xchg(page_state(page), (struct bch_page_state) { .v = 0 });
486 ClearPagePrivate(page);
488 if (s.dirty_sectors) {
489 spin_lock(&inode->v.i_lock);
490 inode->v.i_blocks -= s.dirty_sectors;
491 spin_unlock(&inode->v.i_lock);
495 bch2_disk_reservation_put(c, &res);
498 int bch2_set_page_dirty(struct page *page)
500 struct bch_page_state old, new;
502 old = page_state_cmpxchg(page_state(page), new,
503 new.dirty_sectors = PAGE_SECTORS - new.sectors;
506 if (old.dirty_sectors != new.dirty_sectors) {
507 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
509 spin_lock(&inode->v.i_lock);
510 inode->v.i_blocks += new.dirty_sectors - old.dirty_sectors;
511 spin_unlock(&inode->v.i_lock);
514 return __set_page_dirty_nobuffers(page);
517 /* readpages/writepages: */
519 static bool bio_can_add_page_contig(struct bio *bio, struct page *page)
521 sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT;
523 return bio->bi_vcnt < bio->bi_max_vecs &&
524 bio_end_sector(bio) == offset;
527 static void __bio_add_page(struct bio *bio, struct page *page)
529 bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) {
535 bio->bi_iter.bi_size += PAGE_SIZE;
538 static int bio_add_page_contig(struct bio *bio, struct page *page)
540 sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT;
542 BUG_ON(!bio->bi_max_vecs);
545 bio->bi_iter.bi_sector = offset;
546 else if (!bio_can_add_page_contig(bio, page))
549 __bio_add_page(bio, page);
553 static void bch2_readpages_end_io(struct bio *bio)
558 bio_for_each_segment_all(bv, bio, i) {
559 struct page *page = bv->bv_page;
561 if (!bio->bi_error) {
562 SetPageUptodate(page);
564 ClearPageUptodate(page);
573 struct readpages_iter {
574 struct address_space *mapping;
575 struct list_head pages;
579 static int readpage_add_page(struct readpages_iter *iter, struct page *page)
581 struct bch_page_state *s = page_state(page);
588 prefetchw(&page->flags);
589 ret = add_to_page_cache_lru(page, iter->mapping,
590 page->index, GFP_NOFS);
595 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
597 while (iter->nr_pages) {
599 list_last_entry(&iter->pages, struct page, lru);
601 prefetchw(&page->flags);
602 list_del(&page->lru);
605 if (!readpage_add_page(iter, page))
612 #define for_each_readpage_page(_iter, _page) \
614 ((_page) = __readpage_next_page(&(_iter)));) \
616 static void bch2_mark_pages_unalloc(struct bio *bio)
618 struct bvec_iter iter;
621 bio_for_each_segment(bv, bio, iter)
622 page_state(bv.bv_page)->allocated = 0;
625 static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
627 struct bvec_iter iter;
630 bio_for_each_segment(bv, bio, iter) {
631 struct bch_page_state *s = page_state(bv.bv_page);
633 /* sectors in @k from the start of this page: */
634 unsigned k_sectors = k.k->size - (iter.bi_sector - k.k->p.offset);
636 unsigned page_sectors = min(bv.bv_len >> 9, k_sectors);
639 s->nr_replicas = bch2_extent_nr_dirty_ptrs(k);
641 s->nr_replicas = min_t(unsigned, s->nr_replicas,
642 bch2_extent_nr_dirty_ptrs(k));
644 BUG_ON(s->sectors + page_sectors > PAGE_SECTORS);
645 s->sectors += page_sectors;
649 static void readpage_bio_extend(struct readpages_iter *iter,
650 struct bio *bio, u64 offset,
657 while (bio_end_sector(bio) < offset &&
658 bio->bi_vcnt < bio->bi_max_vecs) {
659 page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT;
661 if (iter->nr_pages) {
662 page = list_last_entry(&iter->pages, struct page, lru);
663 if (page->index != page_offset)
666 list_del(&page->lru);
668 } else if (get_more) {
670 page = radix_tree_lookup(&iter->mapping->page_tree, page_offset);
673 if (page && !radix_tree_exceptional_entry(page))
676 page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
680 page->index = page_offset;
681 ClearPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page);
686 ret = readpage_add_page(iter, page);
690 __bio_add_page(bio, page);
694 SetPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page);
697 static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
698 struct bch_read_bio *rbio, u64 inum,
699 struct readpages_iter *readpages_iter)
701 struct bio *bio = &rbio->bio;
702 int flags = BCH_READ_RETRY_IF_STALE|
703 BCH_READ_MAY_PROMOTE;
706 struct extent_pick_ptr pick;
712 bch2_btree_iter_set_pos(iter, POS(inum, bio->bi_iter.bi_sector));
714 k = bch2_btree_iter_peek_with_holes(iter);
718 int ret = bch2_btree_iter_unlock(iter);
720 bcache_io_error(c, bio, "btree IO error %i", ret);
725 bkey_reassemble(&tmp.k, k);
726 bch2_btree_iter_unlock(iter);
727 k = bkey_i_to_s_c(&tmp.k);
729 bch2_extent_pick_ptr(c, k, NULL, &pick);
730 if (IS_ERR(pick.ca)) {
731 bcache_io_error(c, bio, "no device to read from");
737 readpage_bio_extend(readpages_iter,
740 (pick.crc.csum_type ||
741 pick.crc.compression_type));
743 bytes = (min_t(u64, k.k->p.offset, bio_end_sector(bio)) -
744 bio->bi_iter.bi_sector) << 9;
745 is_last = bytes == bio->bi_iter.bi_size;
746 swap(bio->bi_iter.bi_size, bytes);
748 if (bkey_extent_is_allocation(k.k))
749 bch2_add_page_sectors(bio, k);
751 if (!bkey_extent_is_allocation(k.k) ||
752 bkey_extent_is_compressed(k))
753 bch2_mark_pages_unalloc(bio);
757 bio_inc_remaining(&rbio->bio);
758 flags |= BCH_READ_MUST_CLONE;
759 trace_read_split(&rbio->bio);
762 bch2_read_extent(c, rbio, k, &pick, flags);
773 swap(bio->bi_iter.bi_size, bytes);
774 bio_advance(bio, bytes);
778 int bch2_readpages(struct file *file, struct address_space *mapping,
779 struct list_head *pages, unsigned nr_pages)
781 struct bch_inode_info *inode = to_bch_ei(mapping->host);
782 struct bch_fs *c = inode->v.i_sb->s_fs_info;
783 struct btree_iter iter;
785 struct readpages_iter readpages_iter = {
786 .mapping = mapping, .nr_pages = nr_pages
789 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0);
791 INIT_LIST_HEAD(&readpages_iter.pages);
792 list_add(&readpages_iter.pages, pages);
793 list_del_init(pages);
795 if (current->pagecache_lock != &mapping->add_lock)
796 pagecache_add_get(&mapping->add_lock);
798 while ((page = readpage_iter_next(&readpages_iter))) {
799 unsigned n = max_t(unsigned,
800 min_t(unsigned, readpages_iter.nr_pages + 1,
802 c->sb.encoded_extent_max >> PAGE_SECTOR_SHIFT);
804 struct bch_read_bio *rbio =
805 to_rbio(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read));
807 rbio->bio.bi_end_io = bch2_readpages_end_io;
808 bio_add_page_contig(&rbio->bio, page);
809 bchfs_read(c, &iter, rbio, inode->v.i_ino, &readpages_iter);
812 if (current->pagecache_lock != &mapping->add_lock)
813 pagecache_add_put(&mapping->add_lock);
818 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
819 u64 inum, struct page *page)
821 struct btree_iter iter;
824 * Initialize page state:
825 * If a page is partly allocated and partly a hole, we want it to be
826 * marked BCH_PAGE_UNALLOCATED - so we initially mark all pages
827 * allocated and then mark them unallocated as we find holes:
829 * Note that the bio hasn't been split yet - it's the only bio that
830 * points to these pages. As we walk extents and split @bio, that
831 * necessarily be true, the splits won't necessarily be on page
834 struct bch_page_state *s = page_state(page);
836 EBUG_ON(s->reserved);
840 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
841 bio_add_page_contig(&rbio->bio, page);
843 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0);
844 bchfs_read(c, &iter, rbio, inum, NULL);
847 int bch2_readpage(struct file *file, struct page *page)
849 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
850 struct bch_fs *c = inode->v.i_sb->s_fs_info;
851 struct bch_read_bio *rbio;
853 rbio = to_rbio(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read));
854 rbio->bio.bi_end_io = bch2_readpages_end_io;
856 __bchfs_readpage(c, rbio, inode->v.i_ino, page);
860 struct bch_writepage_state {
861 struct bch_writepage_io *io;
864 static void bch2_writepage_io_free(struct closure *cl)
866 struct bch_writepage_io *io = container_of(cl,
867 struct bch_writepage_io, cl);
869 bio_put(&io->op.op.wbio.bio);
872 static void bch2_writepage_io_done(struct closure *cl)
874 struct bch_writepage_io *io = container_of(cl,
875 struct bch_writepage_io, cl);
876 struct bch_fs *c = io->op.op.c;
877 struct bio *bio = &io->op.op.wbio.bio;
878 struct bio_vec *bvec;
881 atomic_sub(bio->bi_vcnt, &c->writeback_pages);
882 wake_up(&c->writeback_wait);
884 bio_for_each_segment_all(bvec, bio, i) {
885 struct page *page = bvec->bv_page;
887 if (io->op.op.error) {
890 set_bit(AS_EIO, &page->mapping->flags);
893 if (io->op.op.written >= PAGE_SECTORS) {
894 struct bch_page_state old, new;
896 old = page_state_cmpxchg(page_state(page), new, {
897 new.sectors = PAGE_SECTORS;
898 new.dirty_sectors = 0;
901 io->op.sectors_added -= old.dirty_sectors;
902 io->op.op.written -= PAGE_SECTORS;
907 * racing with fallocate can cause us to add fewer sectors than
908 * expected - but we shouldn't add more sectors than expected:
910 * (error (due to going RO) halfway through a page can screw that up
913 BUG_ON(io->op.sectors_added >= (s64) PAGE_SECTORS);
916 * PageWriteback is effectively our ref on the inode - fixup i_blocks
917 * before calling end_page_writeback:
919 if (io->op.sectors_added) {
920 struct bch_inode_info *inode = io->op.inode;
922 spin_lock(&inode->v.i_lock);
923 inode->v.i_blocks += io->op.sectors_added;
924 spin_unlock(&inode->v.i_lock);
927 bio_for_each_segment_all(bvec, bio, i)
928 end_page_writeback(bvec->bv_page);
930 closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
933 static void bch2_writepage_do_io(struct bch_writepage_state *w)
935 struct bch_writepage_io *io = w->io;
936 struct bio *bio = &io->op.op.wbio.bio;
939 atomic_add(bio->bi_vcnt, &io->op.op.c->writeback_pages);
941 io->op.op.pos.offset = bio->bi_iter.bi_sector;
943 closure_call(&io->op.op.cl, bch2_write, NULL, &io->cl);
944 continue_at(&io->cl, bch2_writepage_io_done, NULL);
948 * Get a bch_writepage_io and add @page to it - appending to an existing one if
949 * possible, else allocating a new one:
951 static void bch2_writepage_io_alloc(struct bch_fs *c,
952 struct bch_writepage_state *w,
953 struct bch_inode_info *inode,
956 u64 inum = inode->v.i_ino;
957 unsigned nr_replicas = page_state(page)->nr_replicas;
959 EBUG_ON(!nr_replicas);
960 /* XXX: disk_reservation->gen isn't plumbed through */
964 w->io = container_of(bio_alloc_bioset(GFP_NOFS,
966 bch2_writepage_bioset),
967 struct bch_writepage_io, op.op.wbio.bio);
969 closure_init(&w->io->cl, NULL);
970 w->io->op.inode = inode;
971 w->io->op.sectors_added = 0;
972 w->io->op.is_dio = false;
973 bch2_write_op_init(&w->io->op.op, c,
974 (struct disk_reservation) {
975 .nr_replicas = c->opts.data_replicas,
978 inode->ei_last_dirtied,
980 &inode->ei_journal_seq,
982 w->io->op.op.index_update_fn = bchfs_write_index_update;
985 if (w->io->op.op.res.nr_replicas != nr_replicas ||
986 bio_add_page_contig(&w->io->op.op.wbio.bio, page)) {
987 bch2_writepage_do_io(w);
992 * We shouldn't ever be handed pages for multiple inodes in a single
995 BUG_ON(inode != w->io->op.inode);
998 static int __bch2_writepage(struct bch_fs *c, struct page *page,
999 struct writeback_control *wbc,
1000 struct bch_writepage_state *w)
1002 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1003 struct bch_page_state new, old;
1005 loff_t i_size = i_size_read(&inode->v);
1006 pgoff_t end_index = i_size >> PAGE_SHIFT;
1008 EBUG_ON(!PageUptodate(page));
1010 /* Is the page fully inside i_size? */
1011 if (page->index < end_index)
1014 /* Is the page fully outside i_size? (truncate in progress) */
1015 offset = i_size & (PAGE_SIZE - 1);
1016 if (page->index > end_index || !offset) {
1022 * The page straddles i_size. It must be zeroed out on each and every
1023 * writepage invocation because it may be mmapped. "A file is mapped
1024 * in multiples of the page size. For a file that is not a multiple of
1025 * the page size, the remaining memory is zeroed when mapped, and
1026 * writes to that region are not written out to the file."
1028 zero_user_segment(page, offset, PAGE_SIZE);
1030 bch2_writepage_io_alloc(c, w, inode, page);
1032 /* while page is locked: */
1033 w->io->op.new_i_size = i_size;
1035 if (wbc->sync_mode == WB_SYNC_ALL)
1036 w->io->op.op.wbio.bio.bi_opf |= REQ_SYNC;
1038 /* Before unlocking the page, transfer reservation to w->io: */
1039 old = page_state_cmpxchg(page_state(page), new, {
1040 EBUG_ON(!new.reserved &&
1041 (new.sectors != PAGE_SECTORS ||
1044 if (new.allocated &&
1045 w->io->op.op.compression_type != BCH_COMPRESSION_NONE)
1047 else if (!new.reserved)
1052 w->io->op.op.res.sectors += PAGE_SECTORS *
1053 (old.reserved - new.reserved) *
1056 BUG_ON(PageWriteback(page));
1057 set_page_writeback(page);
1063 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1065 struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1066 struct bch_writepage_state w = { NULL };
1067 struct pagecache_iter iter;
1071 pgoff_t uninitialized_var(writeback_index);
1073 pgoff_t end; /* Inclusive */
1076 int range_whole = 0;
1079 if (wbc->range_cyclic) {
1080 writeback_index = mapping->writeback_index; /* prev offset */
1081 index = writeback_index;
1088 index = wbc->range_start >> PAGE_SHIFT;
1089 end = wbc->range_end >> PAGE_SHIFT;
1090 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1092 cycled = 1; /* ignore range_cyclic tests */
1094 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1095 tag = PAGECACHE_TAG_TOWRITE;
1097 tag = PAGECACHE_TAG_DIRTY;
1099 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1100 tag_pages_for_writeback(mapping, index, end);
1104 for_each_pagecache_tag(&iter, mapping, tag, index, end, page) {
1105 done_index = page->index;
1108 !bio_can_add_page_contig(&w.io->op.op.wbio.bio, page))
1109 bch2_writepage_do_io(&w);
1112 atomic_read(&c->writeback_pages) >=
1113 c->writeback_pages_max) {
1114 /* don't sleep with pages pinned: */
1115 pagecache_iter_release(&iter);
1117 __wait_event(c->writeback_wait,
1118 atomic_read(&c->writeback_pages) <
1119 c->writeback_pages_max);
1126 * Page truncated or invalidated. We can freely skip it
1127 * then, even for data integrity operations: the page
1128 * has disappeared concurrently, so there could be no
1129 * real expectation of this data interity operation
1130 * even if there is now a new, dirty page at the same
1131 * pagecache address.
1133 if (unlikely(page->mapping != mapping)) {
1139 if (!PageDirty(page)) {
1140 /* someone wrote it for us */
1141 goto continue_unlock;
1144 if (PageWriteback(page)) {
1145 if (wbc->sync_mode != WB_SYNC_NONE)
1146 wait_on_page_writeback(page);
1148 goto continue_unlock;
1151 BUG_ON(PageWriteback(page));
1152 if (!clear_page_dirty_for_io(page))
1153 goto continue_unlock;
1155 trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
1156 ret = __bch2_writepage(c, page, wbc, &w);
1157 if (unlikely(ret)) {
1158 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1163 * done_index is set past this page,
1164 * so media errors will not choke
1165 * background writeout for the entire
1166 * file. This has consequences for
1167 * range_cyclic semantics (ie. it may
1168 * not be suitable for data integrity
1171 done_index = page->index + 1;
1178 * We stop writing back only if we are not doing
1179 * integrity sync. In case of integrity sync we have to
1180 * keep going until we have written all the pages
1181 * we tagged for writeback prior to entering this loop.
1183 if (--wbc->nr_to_write <= 0 &&
1184 wbc->sync_mode == WB_SYNC_NONE) {
1189 pagecache_iter_release(&iter);
1192 bch2_writepage_do_io(&w);
1194 if (!cycled && !done) {
1197 * We hit the last page and there is more work to be done: wrap
1198 * back to the start of the file
1202 end = writeback_index - 1;
1205 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1206 mapping->writeback_index = done_index;
1211 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1213 struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1214 struct bch_writepage_state w = { NULL };
1217 ret = __bch2_writepage(c, page, wbc, &w);
1219 bch2_writepage_do_io(&w);
1224 static void bch2_read_single_page_end_io(struct bio *bio)
1226 complete(bio->bi_private);
1229 static int bch2_read_single_page(struct page *page,
1230 struct address_space *mapping)
1232 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1233 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1234 struct bch_read_bio *rbio;
1236 DECLARE_COMPLETION_ONSTACK(done);
1238 rbio = to_rbio(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read));
1239 rbio->bio.bi_private = &done;
1240 rbio->bio.bi_end_io = bch2_read_single_page_end_io;
1242 __bchfs_readpage(c, rbio, inode->v.i_ino, page);
1243 wait_for_completion(&done);
1245 ret = rbio->bio.bi_error;
1246 bio_put(&rbio->bio);
1251 SetPageUptodate(page);
1255 int bch2_write_begin(struct file *file, struct address_space *mapping,
1256 loff_t pos, unsigned len, unsigned flags,
1257 struct page **pagep, void **fsdata)
1259 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1260 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1261 pgoff_t index = pos >> PAGE_SHIFT;
1262 unsigned offset = pos & (PAGE_SIZE - 1);
1266 BUG_ON(inode_unhashed(&inode->v));
1268 /* Not strictly necessary - same reason as mkwrite(): */
1269 pagecache_add_get(&mapping->add_lock);
1271 page = grab_cache_page_write_begin(mapping, index, flags);
1275 if (PageUptodate(page))
1278 /* If we're writing entire page, don't need to read it in first: */
1279 if (len == PAGE_SIZE)
1282 if (!offset && pos + len >= inode->v.i_size) {
1283 zero_user_segment(page, len, PAGE_SIZE);
1284 flush_dcache_page(page);
1288 if (index > inode->v.i_size >> PAGE_SHIFT) {
1289 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1290 flush_dcache_page(page);
1294 ret = bch2_read_single_page(page, mapping);
1298 ret = bch2_get_page_reservation(c, page, true);
1300 if (!PageUptodate(page)) {
1302 * If the page hasn't been read in, we won't know if we
1303 * actually need a reservation - we don't actually need
1304 * to read here, we just need to check if the page is
1305 * fully backed by uncompressed data:
1320 pagecache_add_put(&mapping->add_lock);
1324 int bch2_write_end(struct file *filp, struct address_space *mapping,
1325 loff_t pos, unsigned len, unsigned copied,
1326 struct page *page, void *fsdata)
1328 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1329 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1331 lockdep_assert_held(&inode->v.i_rwsem);
1333 if (unlikely(copied < len && !PageUptodate(page))) {
1335 * The page needs to be read in, but that would destroy
1336 * our partial write - simplest thing is to just force
1337 * userspace to redo the write:
1339 zero_user(page, 0, PAGE_SIZE);
1340 flush_dcache_page(page);
1344 if (pos + copied > inode->v.i_size)
1345 i_size_write(&inode->v, pos + copied);
1348 if (!PageUptodate(page))
1349 SetPageUptodate(page);
1350 if (!PageDirty(page))
1351 set_page_dirty(page);
1353 inode->ei_last_dirtied = (unsigned long) current;
1355 bch2_put_page_reservation(c, page);
1360 pagecache_add_put(&mapping->add_lock);
1367 static void bch2_dio_read_complete(struct closure *cl)
1369 struct dio_read *dio = container_of(cl, struct dio_read, cl);
1371 dio->req->ki_complete(dio->req, dio->ret, 0);
1372 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1375 static void bch2_direct_IO_read_endio(struct bio *bio)
1377 struct dio_read *dio = bio->bi_private;
1380 dio->ret = bio->bi_error;
1382 closure_put(&dio->cl);
1385 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1387 bch2_direct_IO_read_endio(bio);
1388 bio_check_pages_dirty(bio); /* transfers ownership */
1391 static int bch2_direct_IO_read(struct bch_fs *c, struct kiocb *req,
1392 struct file *file, struct bch_inode_info *inode,
1393 struct iov_iter *iter, loff_t offset)
1395 struct dio_read *dio;
1397 bool sync = is_sync_kiocb(req);
1400 if ((offset|iter->count) & (block_bytes(c) - 1))
1403 ret = min_t(loff_t, iter->count,
1404 max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1405 iov_iter_truncate(iter, round_up(ret, block_bytes(c)));
1410 bio = bio_alloc_bioset(GFP_KERNEL,
1411 iov_iter_npages(iter, BIO_MAX_PAGES),
1412 bch2_dio_read_bioset);
1414 bio->bi_end_io = bch2_direct_IO_read_endio;
1416 dio = container_of(bio, struct dio_read, rbio.bio);
1417 closure_init(&dio->cl, NULL);
1420 * this is a _really_ horrible hack just to avoid an atomic sub at the
1424 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1425 atomic_set(&dio->cl.remaining,
1426 CLOSURE_REMAINING_INITIALIZER -
1428 CLOSURE_DESTRUCTOR);
1430 atomic_set(&dio->cl.remaining,
1431 CLOSURE_REMAINING_INITIALIZER + 1);
1438 while (iter->count) {
1439 bio = bio_alloc_bioset(GFP_KERNEL,
1440 iov_iter_npages(iter, BIO_MAX_PAGES),
1442 bio->bi_end_io = bch2_direct_IO_read_split_endio;
1444 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1445 bio->bi_iter.bi_sector = offset >> 9;
1446 bio->bi_private = dio;
1448 ret = bio_iov_iter_get_pages(bio, iter);
1450 /* XXX: fault inject this path */
1451 bio->bi_error = ret;
1456 offset += bio->bi_iter.bi_size;
1457 bio_set_pages_dirty(bio);
1460 closure_get(&dio->cl);
1462 bch2_read(c, to_rbio(bio), inode->v.i_ino);
1466 closure_sync(&dio->cl);
1467 closure_debug_destroy(&dio->cl);
1469 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1472 return -EIOCBQUEUED;
1476 static long __bch2_dio_write_complete(struct dio_write *dio)
1478 struct file *file = dio->req->ki_filp;
1479 struct address_space *mapping = file->f_mapping;
1480 struct bch_inode_info *inode = file_bch_inode(file);
1481 long ret = dio->error ?: dio->written;
1483 bch2_disk_reservation_put(dio->c, &dio->res);
1485 __pagecache_block_put(&mapping->add_lock);
1486 inode_dio_end(&inode->v);
1488 if (dio->iovec && dio->iovec != dio->inline_vecs)
1491 bio_put(&dio->iop.op.wbio.bio);
1495 static void bch2_dio_write_complete(struct closure *cl)
1497 struct dio_write *dio = container_of(cl, struct dio_write, cl);
1498 struct kiocb *req = dio->req;
1500 req->ki_complete(req, __bch2_dio_write_complete(dio), 0);
1503 static void bch2_dio_write_done(struct dio_write *dio)
1508 dio->written += dio->iop.op.written << 9;
1510 if (dio->iop.op.error)
1511 dio->error = dio->iop.op.error;
1513 bio_for_each_segment_all(bv, &dio->iop.op.wbio.bio, i)
1514 put_page(bv->bv_page);
1516 if (dio->iter.count)
1517 bio_reset(&dio->iop.op.wbio.bio);
1520 static void bch2_do_direct_IO_write(struct dio_write *dio)
1522 struct file *file = dio->req->ki_filp;
1523 struct bch_inode_info *inode = file_bch_inode(file);
1524 struct bio *bio = &dio->iop.op.wbio.bio;
1528 if ((dio->req->ki_flags & IOCB_DSYNC) &&
1529 !dio->c->opts.journal_flush_disabled)
1530 flags |= BCH_WRITE_FLUSH;
1532 ret = bio_iov_iter_get_pages(bio, &dio->iter);
1535 * these didn't get initialized, but bch2_dio_write_done() will
1538 dio->iop.op.error = 0;
1539 dio->iop.op.written = 0;
1544 dio->iop.inode = inode;
1545 dio->iop.sectors_added = 0;
1546 dio->iop.is_dio = true;
1547 dio->iop.new_i_size = U64_MAX;
1548 bch2_write_op_init(&dio->iop.op, dio->c, dio->res,
1549 dio->c->fastest_devs,
1550 (unsigned long) dio->task,
1551 POS(inode->v.i_ino, (dio->offset + dio->written) >> 9),
1552 &inode->ei_journal_seq,
1553 flags|BCH_WRITE_THROTTLE);
1554 dio->iop.op.index_update_fn = bchfs_write_index_update;
1556 dio->res.sectors -= bio_sectors(bio);
1557 dio->iop.op.res.sectors = bio_sectors(bio);
1559 task_io_account_write(bio->bi_iter.bi_size);
1561 closure_call(&dio->iop.op.cl, bch2_write, NULL, &dio->cl);
1564 static void bch2_dio_write_loop_async(struct closure *cl)
1566 struct dio_write *dio =
1567 container_of(cl, struct dio_write, cl);
1568 struct address_space *mapping = dio->req->ki_filp->f_mapping;
1570 bch2_dio_write_done(dio);
1572 if (dio->iter.count && !dio->error) {
1573 use_mm(dio->task->mm);
1574 pagecache_block_get(&mapping->add_lock);
1576 bch2_do_direct_IO_write(dio);
1578 pagecache_block_put(&mapping->add_lock);
1579 unuse_mm(dio->task->mm);
1581 continue_at(&dio->cl, bch2_dio_write_loop_async, NULL);
1584 closure_return_with_destructor(cl, bch2_dio_write_complete);
1586 closure_debug_destroy(cl);
1587 bch2_dio_write_complete(cl);
1592 static int bch2_direct_IO_write(struct bch_fs *c,
1593 struct kiocb *req, struct file *file,
1594 struct bch_inode_info *inode,
1595 struct iov_iter *iter, loff_t offset)
1597 struct address_space *mapping = file->f_mapping;
1598 struct dio_write *dio;
1601 bool sync = is_sync_kiocb(req);
1603 lockdep_assert_held(&inode->v.i_rwsem);
1605 if (unlikely(!iter->count))
1608 if (unlikely((offset|iter->count) & (block_bytes(c) - 1)))
1611 bio = bio_alloc_bioset(GFP_KERNEL,
1612 iov_iter_npages(iter, BIO_MAX_PAGES),
1613 bch2_dio_write_bioset);
1614 dio = container_of(bio, struct dio_write, iop.op.wbio.bio);
1619 dio->offset = offset;
1622 dio->task = current;
1623 closure_init(&dio->cl, NULL);
1625 if (offset + iter->count > inode->v.i_size)
1629 * XXX: we shouldn't return -ENOSPC if we're overwriting existing data -
1630 * if getting a reservation fails we should check if we are doing an
1633 * Have to then guard against racing with truncate (deleting data that
1634 * we would have been overwriting)
1636 ret = bch2_disk_reservation_get(c, &dio->res, iter->count >> 9, 0);
1637 if (unlikely(ret)) {
1638 closure_debug_destroy(&dio->cl);
1643 inode_dio_begin(&inode->v);
1644 __pagecache_block_get(&mapping->add_lock);
1648 bch2_do_direct_IO_write(dio);
1650 closure_sync(&dio->cl);
1651 bch2_dio_write_done(dio);
1652 } while (dio->iter.count && !dio->error);
1654 closure_debug_destroy(&dio->cl);
1655 return __bch2_dio_write_complete(dio);
1657 bch2_do_direct_IO_write(dio);
1659 if (dio->iter.count && !dio->error) {
1660 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
1661 dio->iovec = kmalloc(dio->iter.nr_segs *
1662 sizeof(struct iovec),
1665 dio->error = -ENOMEM;
1667 dio->iovec = dio->inline_vecs;
1672 dio->iter.nr_segs * sizeof(struct iovec));
1673 dio->iter.iov = dio->iovec;
1676 continue_at_noreturn(&dio->cl, bch2_dio_write_loop_async, NULL);
1677 return -EIOCBQUEUED;
1681 ssize_t bch2_direct_IO(struct kiocb *req, struct iov_iter *iter)
1683 struct file *file = req->ki_filp;
1684 struct bch_inode_info *inode = file_bch_inode(file);
1685 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1686 struct blk_plug plug;
1689 blk_start_plug(&plug);
1690 ret = ((iov_iter_rw(iter) == WRITE)
1691 ? bch2_direct_IO_write
1692 : bch2_direct_IO_read)(c, req, file, inode, iter, req->ki_pos);
1693 blk_finish_plug(&plug);
1699 bch2_direct_write(struct kiocb *iocb, struct iov_iter *iter)
1701 struct file *file = iocb->ki_filp;
1702 struct bch_inode_info *inode = file_bch_inode(file);
1703 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1704 struct address_space *mapping = file->f_mapping;
1705 loff_t pos = iocb->ki_pos;
1708 pagecache_block_get(&mapping->add_lock);
1710 /* Write and invalidate pagecache range that we're writing to: */
1711 ret = write_invalidate_inode_pages_range(file->f_mapping, pos,
1712 pos + iov_iter_count(iter) - 1);
1716 ret = bch2_direct_IO_write(c, iocb, file, inode, iter, pos);
1718 pagecache_block_put(&mapping->add_lock);
1723 static ssize_t __bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1725 struct file *file = iocb->ki_filp;
1726 struct bch_inode_info *inode = file_bch_inode(file);
1729 /* We can write back this queue in page reclaim */
1730 current->backing_dev_info = inode_to_bdi(&inode->v);
1731 ret = file_remove_privs(file);
1735 ret = file_update_time(file);
1739 ret = iocb->ki_flags & IOCB_DIRECT
1740 ? bch2_direct_write(iocb, from)
1741 : generic_perform_write(file, from, iocb->ki_pos);
1743 if (likely(ret > 0))
1744 iocb->ki_pos += ret;
1746 current->backing_dev_info = NULL;
1750 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1752 struct bch_inode_info *inode = file_bch_inode(iocb->ki_filp);
1753 bool direct = iocb->ki_flags & IOCB_DIRECT;
1756 inode_lock(&inode->v);
1757 ret = generic_write_checks(iocb, from);
1759 ret = __bch2_write_iter(iocb, from);
1760 inode_unlock(&inode->v);
1762 if (ret > 0 && !direct)
1763 ret = generic_write_sync(iocb, ret);
1768 int bch2_page_mkwrite(struct vm_fault *vmf)
1770 struct page *page = vmf->page;
1771 struct file *file = vmf->vma->vm_file;
1772 struct bch_inode_info *inode = file_bch_inode(file);
1773 struct address_space *mapping = inode->v.i_mapping;
1774 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1775 int ret = VM_FAULT_LOCKED;
1777 sb_start_pagefault(inode->v.i_sb);
1778 file_update_time(file);
1781 * Not strictly necessary, but helps avoid dio writes livelocking in
1782 * write_invalidate_inode_pages_range() - can drop this if/when we get
1783 * a write_invalidate_inode_pages_range() that works without dropping
1784 * page lock before invalidating page
1786 if (current->pagecache_lock != &mapping->add_lock)
1787 pagecache_add_get(&mapping->add_lock);
1790 if (page->mapping != mapping ||
1791 page_offset(page) > i_size_read(&inode->v)) {
1793 ret = VM_FAULT_NOPAGE;
1797 if (bch2_get_page_reservation(c, page, true)) {
1799 ret = VM_FAULT_SIGBUS;
1803 if (!PageDirty(page))
1804 set_page_dirty(page);
1805 wait_for_stable_page(page);
1807 if (current->pagecache_lock != &mapping->add_lock)
1808 pagecache_add_put(&mapping->add_lock);
1809 sb_end_pagefault(inode->v.i_sb);
1813 void bch2_invalidatepage(struct page *page, unsigned int offset,
1814 unsigned int length)
1816 EBUG_ON(!PageLocked(page));
1817 EBUG_ON(PageWriteback(page));
1819 if (offset || length < PAGE_SIZE)
1822 bch2_clear_page_bits(page);
1825 int bch2_releasepage(struct page *page, gfp_t gfp_mask)
1827 EBUG_ON(!PageLocked(page));
1828 EBUG_ON(PageWriteback(page));
1830 if (PageDirty(page))
1833 bch2_clear_page_bits(page);
1837 #ifdef CONFIG_MIGRATION
1838 int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
1839 struct page *page, enum migrate_mode mode)
1843 ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
1844 if (ret != MIGRATEPAGE_SUCCESS)
1847 if (PagePrivate(page)) {
1848 *page_state(newpage) = *page_state(page);
1849 ClearPagePrivate(page);
1852 migrate_page_copy(newpage, page);
1853 return MIGRATEPAGE_SUCCESS;
1857 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1859 struct bch_inode_info *inode = file_bch_inode(file);
1860 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1863 ret = filemap_write_and_wait_range(inode->v.i_mapping, start, end);
1867 if (c->opts.journal_flush_disabled)
1870 return bch2_journal_flush_seq(&c->journal, inode->ei_journal_seq);
1873 static int __bch2_truncate_page(struct address_space *mapping,
1874 pgoff_t index, loff_t start, loff_t end)
1876 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1877 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1878 unsigned start_offset = start & (PAGE_SIZE - 1);
1879 unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
1883 /* Page boundary? Nothing to do */
1884 if (!((index == start >> PAGE_SHIFT && start_offset) ||
1885 (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
1889 if (index << PAGE_SHIFT >= inode->v.i_size)
1892 page = find_lock_page(mapping, index);
1894 struct btree_iter iter;
1895 struct bkey_s_c k = bkey_s_c_null;
1898 * XXX: we're doing two index lookups when we end up reading the
1901 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
1903 index << PAGE_SECTOR_SHIFT), 0, k) {
1904 if (bkey_cmp(bkey_start_pos(k.k),
1906 (index + 1) << PAGE_SECTOR_SHIFT)) >= 0)
1909 if (k.k->type != KEY_TYPE_DISCARD &&
1910 k.k->type != BCH_RESERVATION) {
1911 bch2_btree_iter_unlock(&iter);
1915 bch2_btree_iter_unlock(&iter);
1918 page = find_or_create_page(mapping, index, GFP_KERNEL);
1919 if (unlikely(!page)) {
1925 if (!PageUptodate(page)) {
1926 ret = bch2_read_single_page(page, mapping);
1932 * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
1934 * XXX: because we aren't currently tracking whether the page has actual
1935 * data in it (vs. just 0s, or only partially written) this wrong. ick.
1937 ret = bch2_get_page_reservation(c, page, false);
1940 if (index == start >> PAGE_SHIFT &&
1941 index == end >> PAGE_SHIFT)
1942 zero_user_segment(page, start_offset, end_offset);
1943 else if (index == start >> PAGE_SHIFT)
1944 zero_user_segment(page, start_offset, PAGE_SIZE);
1945 else if (index == end >> PAGE_SHIFT)
1946 zero_user_segment(page, 0, end_offset);
1948 if (!PageDirty(page))
1949 set_page_dirty(page);
1957 static int bch2_truncate_page(struct address_space *mapping, loff_t from)
1959 return __bch2_truncate_page(mapping, from >> PAGE_SHIFT,
1960 from, from + PAGE_SIZE);
1963 int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
1965 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1966 struct address_space *mapping = inode->v.i_mapping;
1967 bool shrink = iattr->ia_size <= inode->v.i_size;
1970 inode_dio_wait(&inode->v);
1971 pagecache_block_get(&mapping->add_lock);
1973 truncate_setsize(&inode->v, iattr->ia_size);
1975 /* sync appends.. */
1976 /* XXX what protects inode->i_size? */
1977 if (iattr->ia_size > inode->ei_size)
1978 ret = filemap_write_and_wait_range(mapping,
1979 inode->ei_size, S64_MAX);
1981 goto err_put_pagecache;
1983 mutex_lock(&inode->ei_update_lock);
1984 i_size_dirty_get(inode);
1985 ret = bch2_write_inode_size(c, inode, inode->v.i_size);
1986 mutex_unlock(&inode->ei_update_lock);
1992 * There might be persistent reservations (from fallocate())
1993 * above i_size, which bch2_inode_truncate() will discard - we're
1994 * only supposed to discard them if we're doing a real truncate
1995 * here (new i_size < current i_size):
1998 struct i_sectors_hook i_sectors_hook;
2001 ret = i_sectors_dirty_get(c, inode, &i_sectors_hook);
2005 ret = bch2_truncate_page(inode->v.i_mapping, iattr->ia_size);
2006 if (unlikely(ret)) {
2007 i_sectors_dirty_put(c, inode, &i_sectors_hook);
2011 ret = bch2_inode_truncate(c, inode->v.i_ino,
2012 round_up(iattr->ia_size, PAGE_SIZE) >> 9,
2013 &i_sectors_hook.hook,
2014 &inode->ei_journal_seq);
2016 i_sectors_dirty_put(c, inode, &i_sectors_hook);
2022 mutex_lock(&inode->ei_update_lock);
2023 setattr_copy(&inode->v, iattr);
2024 inode->v.i_mtime = inode->v.i_ctime = current_fs_time(inode->v.i_sb);
2026 /* clear I_SIZE_DIRTY: */
2027 i_size_dirty_put(inode);
2028 ret = bch2_write_inode_size(c, inode, inode->v.i_size);
2029 mutex_unlock(&inode->ei_update_lock);
2032 pagecache_block_put(&mapping->add_lock);
2035 mutex_lock(&inode->ei_update_lock);
2039 static long bch2_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2041 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2042 struct address_space *mapping = inode->v.i_mapping;
2043 u64 ino = inode->v.i_ino;
2044 u64 discard_start = round_up(offset, PAGE_SIZE) >> 9;
2045 u64 discard_end = round_down(offset + len, PAGE_SIZE) >> 9;
2048 inode_lock(&inode->v);
2049 inode_dio_wait(&inode->v);
2050 pagecache_block_get(&mapping->add_lock);
2052 ret = __bch2_truncate_page(mapping,
2053 offset >> PAGE_SHIFT,
2054 offset, offset + len);
2058 if (offset >> PAGE_SHIFT !=
2059 (offset + len) >> PAGE_SHIFT) {
2060 ret = __bch2_truncate_page(mapping,
2061 (offset + len) >> PAGE_SHIFT,
2062 offset, offset + len);
2067 truncate_pagecache_range(&inode->v, offset, offset + len - 1);
2069 if (discard_start < discard_end) {
2070 struct disk_reservation disk_res;
2071 struct i_sectors_hook i_sectors_hook;
2074 BUG_ON(bch2_disk_reservation_get(c, &disk_res, 0, 0));
2076 ret = i_sectors_dirty_get(c, inode, &i_sectors_hook);
2080 ret = bch2_btree_delete_range(c,
2082 POS(ino, discard_start),
2083 POS(ino, discard_end),
2086 &i_sectors_hook.hook,
2087 &inode->ei_journal_seq);
2089 i_sectors_dirty_put(c, inode, &i_sectors_hook);
2090 bch2_disk_reservation_put(c, &disk_res);
2093 pagecache_block_put(&mapping->add_lock);
2094 inode_unlock(&inode->v);
2099 static long bch2_fcollapse(struct bch_inode_info *inode,
2100 loff_t offset, loff_t len)
2102 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2103 struct address_space *mapping = inode->v.i_mapping;
2104 struct btree_iter src;
2105 struct btree_iter dst;
2106 BKEY_PADDED(k) copy;
2108 struct i_sectors_hook i_sectors_hook;
2112 if ((offset | len) & (PAGE_SIZE - 1))
2115 bch2_btree_iter_init(&dst, c, BTREE_ID_EXTENTS,
2116 POS(inode->v.i_ino, offset >> 9),
2118 /* position will be set from dst iter's position: */
2119 bch2_btree_iter_init(&src, c, BTREE_ID_EXTENTS, POS_MIN, 0);
2120 bch2_btree_iter_link(&src, &dst);
2123 * We need i_mutex to keep the page cache consistent with the extents
2124 * btree, and the btree consistent with i_size - we don't need outside
2125 * locking for the extents btree itself, because we're using linked
2128 inode_lock(&inode->v);
2129 inode_dio_wait(&inode->v);
2130 pagecache_block_get(&mapping->add_lock);
2133 if (offset + len >= inode->v.i_size)
2136 if (inode->v.i_size < len)
2139 new_size = inode->v.i_size - len;
2141 ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2145 ret = i_sectors_dirty_get(c, inode, &i_sectors_hook);
2149 while (bkey_cmp(dst.pos,
2151 round_up(new_size, PAGE_SIZE) >> 9)) < 0) {
2152 struct disk_reservation disk_res;
2154 bch2_btree_iter_set_pos(&src,
2155 POS(dst.pos.inode, dst.pos.offset + (len >> 9)));
2157 ret = bch2_btree_iter_traverse(&dst);
2159 goto btree_iter_err;
2161 k = bch2_btree_iter_peek_with_holes(&src);
2162 if ((ret = btree_iter_err(k)))
2163 goto btree_iter_err;
2165 bkey_reassemble(©.k, k);
2167 if (bkey_deleted(©.k.k))
2168 copy.k.k.type = KEY_TYPE_DISCARD;
2170 bch2_cut_front(src.pos, ©.k);
2171 copy.k.k.p.offset -= len >> 9;
2173 BUG_ON(bkey_cmp(dst.pos, bkey_start_pos(©.k.k)));
2175 ret = bch2_disk_reservation_get(c, &disk_res, copy.k.k.size,
2176 BCH_DISK_RESERVATION_NOFAIL);
2179 ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
2180 &inode->ei_journal_seq,
2181 BTREE_INSERT_ATOMIC|
2182 BTREE_INSERT_NOFAIL,
2183 BTREE_INSERT_ENTRY(&dst, ©.k));
2184 bch2_disk_reservation_put(c, &disk_res);
2186 if (ret < 0 && ret != -EINTR)
2189 bch2_btree_iter_cond_resched(&src);
2192 bch2_btree_iter_unlock(&src);
2193 bch2_btree_iter_unlock(&dst);
2195 ret = bch2_inode_truncate(c, inode->v.i_ino,
2196 round_up(new_size, PAGE_SIZE) >> 9,
2197 &i_sectors_hook.hook,
2198 &inode->ei_journal_seq);
2202 i_sectors_dirty_put(c, inode, &i_sectors_hook);
2204 mutex_lock(&inode->ei_update_lock);
2205 i_size_write(&inode->v, new_size);
2206 ret = bch2_write_inode_size(c, inode, inode->v.i_size);
2207 mutex_unlock(&inode->ei_update_lock);
2209 pagecache_block_put(&mapping->add_lock);
2210 inode_unlock(&inode->v);
2215 * XXX: we've left data with multiple pointers... which isn't a _super_
2216 * serious problem...
2218 i_sectors_dirty_put(c, inode, &i_sectors_hook);
2220 bch2_btree_iter_unlock(&src);
2221 bch2_btree_iter_unlock(&dst);
2222 pagecache_block_put(&mapping->add_lock);
2223 inode_unlock(&inode->v);
2227 static long bch2_fallocate(struct bch_inode_info *inode, int mode,
2228 loff_t offset, loff_t len)
2230 struct address_space *mapping = inode->v.i_mapping;
2231 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2232 struct i_sectors_hook i_sectors_hook;
2233 struct btree_iter iter;
2235 loff_t block_start, block_end;
2236 loff_t new_size = offset + len;
2238 unsigned replicas = READ_ONCE(c->opts.data_replicas);
2241 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
2244 inode_lock(&inode->v);
2245 inode_dio_wait(&inode->v);
2246 pagecache_block_get(&mapping->add_lock);
2248 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2249 new_size > inode->v.i_size) {
2250 ret = inode_newsize_ok(&inode->v, new_size);
2255 if (mode & FALLOC_FL_ZERO_RANGE) {
2256 ret = __bch2_truncate_page(mapping,
2257 offset >> PAGE_SHIFT,
2258 offset, offset + len);
2261 offset >> PAGE_SHIFT !=
2262 (offset + len) >> PAGE_SHIFT)
2263 ret = __bch2_truncate_page(mapping,
2264 (offset + len) >> PAGE_SHIFT,
2265 offset, offset + len);
2270 truncate_pagecache_range(&inode->v, offset, offset + len - 1);
2272 block_start = round_up(offset, PAGE_SIZE);
2273 block_end = round_down(offset + len, PAGE_SIZE);
2275 block_start = round_down(offset, PAGE_SIZE);
2276 block_end = round_up(offset + len, PAGE_SIZE);
2279 bch2_btree_iter_set_pos(&iter, POS(inode->v.i_ino, block_start >> 9));
2280 end = POS(inode->v.i_ino, block_end >> 9);
2282 ret = i_sectors_dirty_get(c, inode, &i_sectors_hook);
2286 while (bkey_cmp(iter.pos, end) < 0) {
2287 struct disk_reservation disk_res = { 0 };
2288 struct bkey_i_reservation reservation;
2291 k = bch2_btree_iter_peek_with_holes(&iter);
2292 if ((ret = btree_iter_err(k)))
2293 goto btree_iter_err;
2295 /* already reserved */
2296 if (k.k->type == BCH_RESERVATION &&
2297 bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2298 bch2_btree_iter_advance_pos(&iter);
2302 if (bkey_extent_is_data(k.k)) {
2303 if (!(mode & FALLOC_FL_ZERO_RANGE)) {
2304 bch2_btree_iter_advance_pos(&iter);
2309 bkey_reservation_init(&reservation.k_i);
2310 reservation.k.type = BCH_RESERVATION;
2311 reservation.k.p = k.k->p;
2312 reservation.k.size = k.k->size;
2314 bch2_cut_front(iter.pos, &reservation.k_i);
2315 bch2_cut_back(end, &reservation.k);
2317 sectors = reservation.k.size;
2318 reservation.v.nr_replicas = bch2_extent_nr_dirty_ptrs(k);
2320 if (reservation.v.nr_replicas < replicas ||
2321 bkey_extent_is_compressed(k)) {
2322 ret = bch2_disk_reservation_get(c, &disk_res,
2325 goto err_put_sectors_dirty;
2327 reservation.v.nr_replicas = disk_res.nr_replicas;
2330 ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
2331 &inode->ei_journal_seq,
2332 BTREE_INSERT_ATOMIC|
2333 BTREE_INSERT_NOFAIL,
2334 BTREE_INSERT_ENTRY(&iter, &reservation.k_i));
2335 bch2_disk_reservation_put(c, &disk_res);
2337 if (ret < 0 && ret != -EINTR)
2338 goto err_put_sectors_dirty;
2341 bch2_btree_iter_unlock(&iter);
2343 i_sectors_dirty_put(c, inode, &i_sectors_hook);
2345 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2346 new_size > inode->v.i_size) {
2347 i_size_write(&inode->v, new_size);
2349 mutex_lock(&inode->ei_update_lock);
2350 ret = bch2_write_inode_size(c, inode, inode->v.i_size);
2351 mutex_unlock(&inode->ei_update_lock);
2355 if ((mode & FALLOC_FL_KEEP_SIZE) &&
2356 (mode & FALLOC_FL_ZERO_RANGE) &&
2357 inode->ei_size != inode->v.i_size) {
2358 /* sync appends.. */
2359 ret = filemap_write_and_wait_range(mapping,
2360 inode->ei_size, S64_MAX);
2364 if (inode->ei_size != inode->v.i_size) {
2365 mutex_lock(&inode->ei_update_lock);
2366 ret = bch2_write_inode_size(c, inode, inode->v.i_size);
2367 mutex_unlock(&inode->ei_update_lock);
2371 pagecache_block_put(&mapping->add_lock);
2372 inode_unlock(&inode->v);
2375 err_put_sectors_dirty:
2376 i_sectors_dirty_put(c, inode, &i_sectors_hook);
2378 bch2_btree_iter_unlock(&iter);
2379 pagecache_block_put(&mapping->add_lock);
2380 inode_unlock(&inode->v);
2384 long bch2_fallocate_dispatch(struct file *file, int mode,
2385 loff_t offset, loff_t len)
2387 struct bch_inode_info *inode = file_bch_inode(file);
2389 if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
2390 return bch2_fallocate(inode, mode, offset, len);
2392 if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
2393 return bch2_fpunch(inode, offset, len);
2395 if (mode == FALLOC_FL_COLLAPSE_RANGE)
2396 return bch2_fcollapse(inode, offset, len);
2401 static bool page_is_data(struct page *page)
2403 /* XXX: should only have to check PageDirty */
2404 return PagePrivate(page) &&
2405 (page_state(page)->sectors ||
2406 page_state(page)->dirty_sectors);
2409 static loff_t bch2_next_pagecache_data(struct inode *vinode,
2410 loff_t start_offset,
2413 struct address_space *mapping = vinode->i_mapping;
2417 for (index = start_offset >> PAGE_SHIFT;
2418 index < end_offset >> PAGE_SHIFT;
2420 if (find_get_pages(mapping, index, 1, &page)) {
2422 index = page->index;
2424 if (page_is_data(page))
2428 ((loff_t) index) << PAGE_SHIFT));
2439 static loff_t bch2_seek_data(struct file *file, u64 offset)
2441 struct bch_inode_info *inode = file_bch_inode(file);
2442 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2443 struct btree_iter iter;
2445 u64 isize, next_data = MAX_LFS_FILESIZE;
2448 isize = i_size_read(&inode->v);
2449 if (offset >= isize)
2452 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2453 POS(inode->v.i_ino, offset >> 9), 0, k) {
2454 if (k.k->p.inode != inode->v.i_ino) {
2456 } else if (bkey_extent_is_data(k.k)) {
2457 next_data = max(offset, bkey_start_offset(k.k) << 9);
2459 } else if (k.k->p.offset >> 9 > isize)
2463 ret = bch2_btree_iter_unlock(&iter);
2467 if (next_data > offset)
2468 next_data = bch2_next_pagecache_data(&inode->v,
2471 if (next_data > isize)
2474 return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
2477 static bool page_slot_is_data(struct address_space *mapping, pgoff_t index)
2482 page = find_lock_entry(mapping, index);
2483 if (!page || radix_tree_exception(page))
2486 ret = page_is_data(page);
2492 static loff_t bch2_next_pagecache_hole(struct inode *vinode,
2493 loff_t start_offset,
2496 struct address_space *mapping = vinode->i_mapping;
2499 for (index = start_offset >> PAGE_SHIFT;
2500 index < end_offset >> PAGE_SHIFT;
2502 if (!page_slot_is_data(mapping, index))
2503 end_offset = max(start_offset,
2504 ((loff_t) index) << PAGE_SHIFT);
2509 static loff_t bch2_seek_hole(struct file *file, u64 offset)
2511 struct bch_inode_info *inode = file_bch_inode(file);
2512 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2513 struct btree_iter iter;
2515 u64 isize, next_hole = MAX_LFS_FILESIZE;
2518 isize = i_size_read(&inode->v);
2519 if (offset >= isize)
2522 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2523 POS(inode->v.i_ino, offset >> 9),
2524 BTREE_ITER_WITH_HOLES, k) {
2525 if (k.k->p.inode != inode->v.i_ino) {
2526 next_hole = bch2_next_pagecache_hole(&inode->v,
2527 offset, MAX_LFS_FILESIZE);
2529 } else if (!bkey_extent_is_data(k.k)) {
2530 next_hole = bch2_next_pagecache_hole(&inode->v,
2531 max(offset, bkey_start_offset(k.k) << 9),
2532 k.k->p.offset << 9);
2534 if (next_hole < k.k->p.offset << 9)
2537 offset = max(offset, bkey_start_offset(k.k) << 9);
2541 ret = bch2_btree_iter_unlock(&iter);
2545 if (next_hole > isize)
2548 return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
2551 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
2557 return generic_file_llseek(file, offset, whence);
2559 return bch2_seek_data(file, offset);
2561 return bch2_seek_hole(file, offset);
2567 #endif /* NO_BCACHEFS_FS */