3 #include "btree_update.h"
15 #include <linux/aio.h>
16 #include <linux/backing-dev.h>
17 #include <linux/falloc.h>
18 #include <linux/migrate.h>
19 #include <linux/mmu_context.h>
20 #include <linux/pagevec.h>
21 #include <linux/task_io_accounting_ops.h>
22 #include <linux/uio.h>
23 #include <linux/writeback.h>
24 #include <trace/events/writeback.h>
26 struct bio_set *bch_writepage_bioset;
27 struct bio_set *bch_dio_read_bioset;
28 struct bio_set *bch_dio_write_bioset;
30 /* pagecache_block must be held */
31 static int write_invalidate_inode_pages_range(struct address_space *mapping,
32 loff_t start, loff_t end)
37 * XXX: the way this is currently implemented, we can spin if a process
38 * is continually redirtying a specific page
41 if (!mapping->nrpages &&
42 !mapping->nrexceptional)
45 ret = filemap_write_and_wait_range(mapping, start, end);
49 if (!mapping->nrpages)
52 ret = invalidate_inode_pages2_range(mapping,
55 } while (ret == -EBUSY);
62 static int inode_set_size(struct bch_inode_info *ei, struct bch_inode *bi,
65 loff_t *new_i_size = p;
66 unsigned i_flags = le32_to_cpu(bi->i_flags);
68 lockdep_assert_held(&ei->update_lock);
70 bi->i_size = cpu_to_le64(*new_i_size);
72 if (atomic_long_read(&ei->i_size_dirty_count))
73 i_flags |= BCH_INODE_I_SIZE_DIRTY;
75 i_flags &= ~BCH_INODE_I_SIZE_DIRTY;
77 bi->i_flags = cpu_to_le32(i_flags);
82 static int __must_check bch_write_inode_size(struct cache_set *c,
83 struct bch_inode_info *ei,
86 return __bch_write_inode(c, ei, inode_set_size, &new_size);
89 static inline void i_size_dirty_put(struct bch_inode_info *ei)
91 atomic_long_dec_bug(&ei->i_size_dirty_count);
94 static inline void i_size_dirty_get(struct bch_inode_info *ei)
96 lockdep_assert_held(&ei->vfs_inode.i_rwsem);
98 atomic_long_inc(&ei->i_size_dirty_count);
101 /* i_sectors accounting: */
103 static enum extent_insert_hook_ret
104 i_sectors_hook_fn(struct extent_insert_hook *hook,
105 struct bpos committed_pos,
106 struct bpos next_pos,
108 const struct bkey_i *insert)
110 struct i_sectors_hook *h = container_of(hook,
111 struct i_sectors_hook, hook);
112 s64 sectors = next_pos.offset - committed_pos.offset;
113 int sign = bkey_extent_is_allocation(&insert->k) -
114 (k.k && bkey_extent_is_allocation(k.k));
116 EBUG_ON(!(h->ei->i_flags & BCH_INODE_I_SECTORS_DIRTY));
117 EBUG_ON(!atomic_long_read(&h->ei->i_sectors_dirty_count));
119 h->sectors += sectors * sign;
121 return BTREE_HOOK_DO_INSERT;
124 static int inode_set_i_sectors_dirty(struct bch_inode_info *ei,
125 struct bch_inode *bi, void *p)
127 BUG_ON(le32_to_cpu(bi->i_flags) & BCH_INODE_I_SECTORS_DIRTY);
129 bi->i_flags = cpu_to_le32(le32_to_cpu(bi->i_flags)|
130 BCH_INODE_I_SECTORS_DIRTY);
134 static int inode_clear_i_sectors_dirty(struct bch_inode_info *ei,
135 struct bch_inode *bi, void *p)
137 BUG_ON(!(le32_to_cpu(bi->i_flags) & BCH_INODE_I_SECTORS_DIRTY));
139 bi->i_sectors = cpu_to_le64(atomic64_read(&ei->i_sectors));
140 bi->i_flags = cpu_to_le32(le32_to_cpu(bi->i_flags) &
141 ~BCH_INODE_I_SECTORS_DIRTY);
145 static void i_sectors_dirty_put(struct bch_inode_info *ei,
146 struct i_sectors_hook *h)
148 struct inode *inode = &ei->vfs_inode;
151 spin_lock(&inode->i_lock);
152 inode->i_blocks += h->sectors;
153 spin_unlock(&inode->i_lock);
155 atomic64_add(h->sectors, &ei->i_sectors);
156 EBUG_ON(atomic64_read(&ei->i_sectors) < 0);
159 EBUG_ON(atomic_long_read(&ei->i_sectors_dirty_count) <= 0);
161 mutex_lock(&ei->update_lock);
163 if (atomic_long_dec_and_test(&ei->i_sectors_dirty_count)) {
164 struct cache_set *c = ei->vfs_inode.i_sb->s_fs_info;
165 int ret = __bch_write_inode(c, ei, inode_clear_i_sectors_dirty, NULL);
170 mutex_unlock(&ei->update_lock);
173 static int __must_check i_sectors_dirty_get(struct bch_inode_info *ei,
174 struct i_sectors_hook *h)
178 h->hook.fn = i_sectors_hook_fn;
180 #ifdef CONFIG_BCACHE_DEBUG
184 if (atomic_long_inc_not_zero(&ei->i_sectors_dirty_count))
187 mutex_lock(&ei->update_lock);
189 if (!(ei->i_flags & BCH_INODE_I_SECTORS_DIRTY)) {
190 struct cache_set *c = ei->vfs_inode.i_sb->s_fs_info;
192 ret = __bch_write_inode(c, ei, inode_set_i_sectors_dirty, NULL);
196 atomic_long_inc(&ei->i_sectors_dirty_count);
198 mutex_unlock(&ei->update_lock);
203 struct bchfs_extent_trans_hook {
204 struct bchfs_write_op *op;
205 struct extent_insert_hook hook;
206 struct bkey_i_inode new_inode;
207 bool need_inode_update;
210 static enum extent_insert_hook_ret
211 bchfs_extent_update_hook(struct extent_insert_hook *hook,
212 struct bpos committed_pos,
213 struct bpos next_pos,
215 const struct bkey_i *insert)
217 struct bchfs_extent_trans_hook *h = container_of(hook,
218 struct bchfs_extent_trans_hook, hook);
219 struct bch_inode_info *ei = h->op->ei;
220 struct inode *inode = &ei->vfs_inode;
221 int sign = bkey_extent_is_allocation(&insert->k) -
222 (k.k && bkey_extent_is_allocation(k.k));
223 s64 sectors = (s64) (next_pos.offset - committed_pos.offset) * sign;
224 u64 offset = min(next_pos.offset << 9, h->op->new_i_size);
226 BUG_ON((next_pos.offset << 9) > round_up(offset, PAGE_SIZE));
228 /* XXX: ei->i_size locking */
229 if (offset > ei->i_size) {
230 BUG_ON(ei->i_flags & BCH_INODE_I_SIZE_DIRTY);
232 if (!h->need_inode_update) {
233 h->need_inode_update = true;
234 return BTREE_HOOK_RESTART_TRANS;
237 h->new_inode.v.i_size = cpu_to_le64(offset);
241 i_size_write(inode, offset);
245 if (!h->need_inode_update) {
246 h->need_inode_update = true;
247 return BTREE_HOOK_RESTART_TRANS;
250 le64_add_cpu(&h->new_inode.v.i_sectors, sectors);
251 atomic64_add(sectors, &ei->i_sectors);
253 h->op->sectors_added += sectors;
256 spin_lock(&inode->i_lock);
257 inode->i_blocks += sectors;
258 spin_unlock(&inode->i_lock);
262 return BTREE_HOOK_DO_INSERT;
265 static int bchfs_write_index_update(struct bch_write_op *wop)
267 struct bchfs_write_op *op = container_of(wop,
268 struct bchfs_write_op, op);
269 struct keylist *keys = &op->op.insert_keys;
270 struct btree_iter extent_iter, inode_iter;
271 struct bchfs_extent_trans_hook hook;
272 struct bkey_i *k = bch_keylist_front(keys);
275 BUG_ON(k->k.p.inode != op->ei->vfs_inode.i_ino);
277 bch_btree_iter_init_intent(&extent_iter, wop->c, BTREE_ID_EXTENTS,
278 bkey_start_pos(&bch_keylist_front(keys)->k));
279 bch_btree_iter_init_intent(&inode_iter, wop->c, BTREE_ID_INODES,
280 POS(extent_iter.pos.inode, 0));
283 hook.hook.fn = bchfs_extent_update_hook;
284 hook.need_inode_update = false;
287 ret = bch_btree_iter_traverse(&extent_iter);
291 /* XXX: ei->i_size locking */
292 k = bch_keylist_front(keys);
293 if (min(k->k.p.offset << 9, op->new_i_size) > op->ei->i_size)
294 hook.need_inode_update = true;
296 if (hook.need_inode_update) {
297 struct bkey_s_c inode;
299 if (!btree_iter_linked(&inode_iter))
300 bch_btree_iter_link(&extent_iter, &inode_iter);
302 inode = bch_btree_iter_peek_with_holes(&inode_iter);
303 if ((ret = btree_iter_err(inode)))
306 if (WARN_ONCE(inode.k->type != BCH_INODE_FS,
307 "inode %llu not found when updating",
308 extent_iter.pos.inode)) {
313 bkey_reassemble(&hook.new_inode.k_i, inode);
315 ret = bch_btree_insert_at(wop->c, &wop->res,
316 &hook.hook, op_journal_seq(wop),
317 BTREE_INSERT_NOFAIL|BTREE_INSERT_ATOMIC,
318 BTREE_INSERT_ENTRY(&extent_iter, k),
319 BTREE_INSERT_ENTRY(&inode_iter, &hook.new_inode.k_i));
321 ret = bch_btree_insert_at(wop->c, &wop->res,
322 &hook.hook, op_journal_seq(wop),
323 BTREE_INSERT_NOFAIL|BTREE_INSERT_ATOMIC,
324 BTREE_INSERT_ENTRY(&extent_iter, k));
332 bch_keylist_pop_front(keys);
333 } while (!bch_keylist_empty(keys));
335 bch_btree_iter_unlock(&extent_iter);
336 bch_btree_iter_unlock(&inode_iter);
343 /* stored in page->private: */
346 * bch_page_state has to (unfortunately) be manipulated with cmpxchg - we could
347 * almost protected it with the page lock, except that bch_writepage_io_done has
348 * to update the sector counts (and from interrupt/bottom half context).
350 struct bch_page_state {
353 * BCH_PAGE_ALLOCATED: page is _fully_ written on disk, and not
354 * compressed - which means to write this page we don't have to reserve
355 * space (the new write will never take up more space on disk than what
358 * BCH_PAGE_UNALLOCATED: page is not fully written on disk, or is
359 * compressed - before writing we have to reserve space with
360 * bch_reserve_sectors()
362 * BCH_PAGE_RESERVED: page has space reserved on disk (reservation will
363 * be consumed when the page is written).
366 BCH_PAGE_UNALLOCATED = 0,
370 /* Owns PAGE_SECTORS sized reservation: */
374 * Number of sectors on disk - for i_blocks
375 * Uncompressed size, not compressed size:
385 #define page_state_cmpxchg(_ptr, _new, _expr) \
387 unsigned long _v = READ_ONCE((_ptr)->v); \
388 struct bch_page_state _old; \
391 _old.v = _new.v = _v; \
394 EBUG_ON(_new.sectors + _new.dirty_sectors > PAGE_SECTORS);\
395 } while (_old.v != _new.v && \
396 (_v = cmpxchg(&(_ptr)->v, _old.v, _new.v)) != _old.v); \
401 static inline struct bch_page_state *page_state(struct page *page)
403 struct bch_page_state *s = (void *) &page->private;
405 BUILD_BUG_ON(sizeof(*s) > sizeof(page->private));
407 if (!PagePrivate(page))
408 SetPagePrivate(page);
413 static void bch_put_page_reservation(struct cache_set *c, struct page *page)
415 struct disk_reservation res = { .sectors = PAGE_SECTORS };
416 struct bch_page_state s;
418 s = page_state_cmpxchg(page_state(page), s, {
424 bch_disk_reservation_put(c, &res);
427 static int bch_get_page_reservation(struct cache_set *c, struct page *page,
430 struct bch_page_state *s = page_state(page), new;
431 struct disk_reservation res;
434 BUG_ON(s->alloc_state == BCH_PAGE_ALLOCATED &&
435 s->sectors != PAGE_SECTORS);
438 s->alloc_state == BCH_PAGE_ALLOCATED)
441 ret = bch_disk_reservation_get(c, &res, PAGE_SECTORS, !check_enospc
442 ? BCH_DISK_RESERVATION_NOFAIL : 0);
446 page_state_cmpxchg(s, new, {
448 bch_disk_reservation_put(c, &res);
457 static void bch_clear_page_bits(struct page *page)
459 struct inode *inode = page->mapping->host;
460 struct cache_set *c = inode->i_sb->s_fs_info;
461 struct disk_reservation res = { .sectors = PAGE_SECTORS };
462 struct bch_page_state s;
464 if (!PagePrivate(page))
467 s = xchg(page_state(page), (struct bch_page_state) { .v = 0 });
468 ClearPagePrivate(page);
470 if (s.dirty_sectors) {
471 spin_lock(&inode->i_lock);
472 inode->i_blocks -= s.dirty_sectors;
473 spin_unlock(&inode->i_lock);
477 bch_disk_reservation_put(c, &res);
480 int bch_set_page_dirty(struct page *page)
482 struct bch_page_state old, new;
484 old = page_state_cmpxchg(page_state(page), new,
485 new.dirty_sectors = PAGE_SECTORS - new.sectors;
488 if (old.dirty_sectors != new.dirty_sectors) {
489 struct inode *inode = page->mapping->host;
491 spin_lock(&inode->i_lock);
492 inode->i_blocks += new.dirty_sectors - old.dirty_sectors;
493 spin_unlock(&inode->i_lock);
496 return __set_page_dirty_nobuffers(page);
499 /* readpages/writepages: */
501 static bool bio_can_add_page_contig(struct bio *bio, struct page *page)
503 sector_t offset = (sector_t) page->index << (PAGE_SHIFT - 9);
505 return bio->bi_vcnt < bio->bi_max_vecs &&
506 bio_end_sector(bio) == offset;
509 static int bio_add_page_contig(struct bio *bio, struct page *page)
511 sector_t offset = (sector_t) page->index << (PAGE_SHIFT - 9);
513 BUG_ON(!bio->bi_max_vecs);
516 bio->bi_iter.bi_sector = offset;
517 else if (!bio_can_add_page_contig(bio, page))
520 bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) {
526 bio->bi_iter.bi_size += PAGE_SIZE;
531 static void bch_readpages_end_io(struct bio *bio)
536 bio_for_each_segment_all(bv, bio, i) {
537 struct page *page = bv->bv_page;
539 if (!bio->bi_error) {
540 SetPageUptodate(page);
542 ClearPageUptodate(page);
551 static inline struct page *__readpage_next_page(struct address_space *mapping,
552 struct list_head *pages,
559 page = list_entry(pages->prev, struct page, lru);
560 prefetchw(&page->flags);
561 list_del(&page->lru);
563 ret = add_to_page_cache_lru(page, mapping, page->index, GFP_NOFS);
565 /* if add_to_page_cache_lru() succeeded, page is locked: */
577 #define for_each_readpage_page(_mapping, _pages, _nr_pages, _page) \
579 ((_page) = __readpage_next_page(_mapping, _pages, &(_nr_pages)));\
582 static void bch_mark_pages_unalloc(struct bio *bio)
584 struct bvec_iter iter;
587 bio_for_each_segment(bv, bio, iter)
588 page_state(bv.bv_page)->alloc_state = BCH_PAGE_UNALLOCATED;
591 static void bch_add_page_sectors(struct bio *bio, const struct bkey *k)
593 struct bvec_iter iter;
596 bio_for_each_segment(bv, bio, iter) {
597 struct bch_page_state *s = page_state(bv.bv_page);
599 /* sectors in @k from the start of this page: */
600 unsigned k_sectors = k->size - (iter.bi_sector - k->p.offset);
602 unsigned page_sectors = min(bv.bv_len >> 9, k_sectors);
604 BUG_ON(s->sectors + page_sectors > PAGE_SECTORS);
606 s->sectors += page_sectors;
610 static void bchfs_read(struct cache_set *c, struct bch_read_bio *rbio, u64 inode)
612 struct bio *bio = &rbio->bio;
613 struct btree_iter iter;
619 bch_increment_clock(c, bio_sectors(bio), READ);
622 * Initialize page state:
623 * If a page is partly allocated and partly a hole, we want it to be
624 * marked BCH_PAGE_UNALLOCATED - so we initially mark all pages
625 * allocated and then mark them unallocated as we find holes:
627 * Note that the bio hasn't been split yet - it's the only bio that
628 * points to these pages. As we walk extents and split @bio, that
629 * necessarily be true, the splits won't necessarily be on page
632 bio_for_each_segment_all(bv, bio, i) {
633 struct bch_page_state *s = page_state(bv->bv_page);
635 EBUG_ON(s->reserved);
637 s->alloc_state = BCH_PAGE_ALLOCATED;
641 for_each_btree_key_with_holes(&iter, c, BTREE_ID_EXTENTS,
642 POS(inode, bio->bi_iter.bi_sector), k) {
644 struct extent_pick_ptr pick;
645 unsigned bytes, sectors;
648 bkey_reassemble(&tmp.k, k);
649 bch_btree_iter_unlock(&iter);
650 k = bkey_i_to_s_c(&tmp.k);
652 if (!bkey_extent_is_allocation(k.k) ||
653 bkey_extent_is_compressed(c, k))
654 bch_mark_pages_unalloc(bio);
656 bch_extent_pick_ptr(c, k, &pick);
657 if (IS_ERR(pick.ca)) {
658 bcache_io_error(c, bio, "no device to read from");
663 sectors = min_t(u64, k.k->p.offset, bio_end_sector(bio)) -
664 bio->bi_iter.bi_sector;
665 bytes = sectors << 9;
666 is_last = bytes == bio->bi_iter.bi_size;
667 swap(bio->bi_iter.bi_size, bytes);
669 if (bkey_extent_is_allocation(k.k))
670 bch_add_page_sectors(bio, k.k);
673 PTR_BUCKET(pick.ca, &pick.ptr)->read_prio =
674 c->prio_clock[READ].hand;
676 bch_read_extent(c, rbio, k, &pick,
677 BCH_READ_RETRY_IF_STALE|
679 (is_last ? BCH_READ_IS_LAST : 0));
681 zero_fill_bio_iter(bio, bio->bi_iter);
690 swap(bio->bi_iter.bi_size, bytes);
691 bio_advance(bio, bytes);
695 * If we get here, it better have been because there was an error
696 * reading a btree node
698 ret = bch_btree_iter_unlock(&iter);
700 bcache_io_error(c, bio, "btree IO error %i", ret);
704 int bch_readpages(struct file *file, struct address_space *mapping,
705 struct list_head *pages, unsigned nr_pages)
707 struct inode *inode = mapping->host;
708 struct cache_set *c = inode->i_sb->s_fs_info;
709 struct bch_read_bio *rbio = NULL;
712 pr_debug("reading %u pages", nr_pages);
714 if (current->pagecache_lock != &mapping->add_lock)
715 pagecache_add_get(&mapping->add_lock);
717 for_each_readpage_page(mapping, pages, nr_pages, page) {
720 rbio = container_of(bio_alloc_bioset(GFP_NOFS,
721 min_t(unsigned, nr_pages,
724 struct bch_read_bio, bio);
726 rbio->bio.bi_end_io = bch_readpages_end_io;
729 if (bio_add_page_contig(&rbio->bio, page)) {
730 bchfs_read(c, rbio, inode->i_ino);
737 bchfs_read(c, rbio, inode->i_ino);
739 if (current->pagecache_lock != &mapping->add_lock)
740 pagecache_add_put(&mapping->add_lock);
746 int bch_readpage(struct file *file, struct page *page)
748 struct address_space *mapping = page->mapping;
749 struct inode *inode = mapping->host;
750 struct cache_set *c = inode->i_sb->s_fs_info;
751 struct bch_read_bio *rbio;
753 rbio = container_of(bio_alloc_bioset(GFP_NOFS, 1,
755 struct bch_read_bio, bio);
756 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
757 rbio->bio.bi_end_io = bch_readpages_end_io;
759 bio_add_page_contig(&rbio->bio, page);
760 bchfs_read(c, rbio, inode->i_ino);
765 struct bch_writepage_state {
766 struct bch_writepage_io *io;
769 static void bch_writepage_io_free(struct closure *cl)
771 struct bch_writepage_io *io = container_of(cl,
772 struct bch_writepage_io, cl);
773 struct bio *bio = &io->bio.bio;
778 static void bch_writepage_io_done(struct closure *cl)
780 struct bch_writepage_io *io = container_of(cl,
781 struct bch_writepage_io, cl);
782 struct cache_set *c = io->op.op.c;
783 struct bio *bio = &io->bio.bio;
784 struct bio_vec *bvec;
787 atomic_sub(bio->bi_vcnt, &c->writeback_pages);
788 wake_up(&c->writeback_wait);
790 bio_for_each_segment_all(bvec, bio, i) {
791 struct page *page = bvec->bv_page;
793 if (io->op.op.error) {
796 set_bit(AS_EIO, &page->mapping->flags);
799 if (io->op.op.written >= PAGE_SECTORS) {
800 struct bch_page_state old, new;
802 old = page_state_cmpxchg(page_state(page), new, {
803 new.sectors = PAGE_SECTORS;
804 new.dirty_sectors = 0;
807 io->op.sectors_added -= old.dirty_sectors;
808 io->op.op.written -= PAGE_SECTORS;
813 * racing with fallocate can cause us to add fewer sectors than
814 * expected - but we shouldn't add more sectors than expected:
816 * (error (due to going RO) halfway through a page can screw that up
819 BUG_ON(io->op.sectors_added >= (s64) PAGE_SECTORS);
822 * PageWriteback is effectively our ref on the inode - fixup i_blocks
823 * before calling end_page_writeback:
825 if (io->op.sectors_added) {
826 struct inode *inode = &io->op.ei->vfs_inode;
828 spin_lock(&inode->i_lock);
829 inode->i_blocks += io->op.sectors_added;
830 spin_unlock(&inode->i_lock);
833 bio_for_each_segment_all(bvec, bio, i)
834 end_page_writeback(bvec->bv_page);
836 closure_return_with_destructor(&io->cl, bch_writepage_io_free);
839 static void bch_writepage_do_io(struct bch_writepage_state *w)
841 struct bch_writepage_io *io = w->io;
844 atomic_add(io->bio.bio.bi_vcnt, &io->op.op.c->writeback_pages);
846 io->op.op.pos.offset = io->bio.bio.bi_iter.bi_sector;
848 closure_call(&io->op.op.cl, bch_write, NULL, &io->cl);
849 continue_at(&io->cl, bch_writepage_io_done, NULL);
853 * Get a bch_writepage_io and add @page to it - appending to an existing one if
854 * possible, else allocating a new one:
856 static void bch_writepage_io_alloc(struct cache_set *c,
857 struct bch_writepage_state *w,
858 struct bch_inode_info *ei,
861 u64 inum = ei->vfs_inode.i_ino;
865 w->io = container_of(bio_alloc_bioset(GFP_NOFS,
867 bch_writepage_bioset),
868 struct bch_writepage_io, bio.bio);
870 closure_init(&w->io->cl, NULL);
872 w->io->op.sectors_added = 0;
873 w->io->op.is_dio = false;
874 bch_write_op_init(&w->io->op.op, c, &w->io->bio,
875 (struct disk_reservation) {
876 .nr_replicas = c->opts.data_replicas,
878 foreground_write_point(c, inum),
880 &ei->journal_seq, 0);
881 w->io->op.op.index_update_fn = bchfs_write_index_update;
884 if (bio_add_page_contig(&w->io->bio.bio, page)) {
885 bch_writepage_do_io(w);
890 * We shouldn't ever be handed pages for multiple inodes in a single
893 BUG_ON(ei != w->io->op.ei);
896 static int __bch_writepage(struct cache_set *c, struct page *page,
897 struct writeback_control *wbc,
898 struct bch_writepage_state *w)
900 struct inode *inode = page->mapping->host;
901 struct bch_inode_info *ei = to_bch_ei(inode);
902 struct bch_page_state new, old;
904 loff_t i_size = i_size_read(inode);
905 pgoff_t end_index = i_size >> PAGE_SHIFT;
907 EBUG_ON(!PageUptodate(page));
909 /* Is the page fully inside i_size? */
910 if (page->index < end_index)
913 /* Is the page fully outside i_size? (truncate in progress) */
914 offset = i_size & (PAGE_SIZE - 1);
915 if (page->index > end_index || !offset) {
921 * The page straddles i_size. It must be zeroed out on each and every
922 * writepage invocation because it may be mmapped. "A file is mapped
923 * in multiples of the page size. For a file that is not a multiple of
924 * the page size, the remaining memory is zeroed when mapped, and
925 * writes to that region are not written out to the file."
927 zero_user_segment(page, offset, PAGE_SIZE);
929 bch_writepage_io_alloc(c, w, ei, page);
931 /* while page is locked: */
932 w->io->op.new_i_size = i_size;
934 if (wbc->sync_mode == WB_SYNC_ALL)
935 w->io->bio.bio.bi_opf |= WRITE_SYNC;
937 /* Before unlocking the page, transfer reservation to w->io: */
938 old = page_state_cmpxchg(page_state(page), new, {
939 BUG_ON(!new.reserved &&
940 (new.sectors != PAGE_SECTORS ||
941 new.alloc_state != BCH_PAGE_ALLOCATED));
943 if (new.alloc_state == BCH_PAGE_ALLOCATED &&
944 w->io->op.op.compression_type != BCH_COMPRESSION_NONE)
945 new.alloc_state = BCH_PAGE_UNALLOCATED;
946 else if (!new.reserved)
951 w->io->op.op.res.sectors += PAGE_SECTORS * (old.reserved - new.reserved);
953 BUG_ON(PageWriteback(page));
954 set_page_writeback(page);
960 int bch_writepages(struct address_space *mapping, struct writeback_control *wbc)
962 struct cache_set *c = mapping->host->i_sb->s_fs_info;
963 struct bch_writepage_state w = { NULL };
964 struct pagecache_iter iter;
968 pgoff_t uninitialized_var(writeback_index);
970 pgoff_t end; /* Inclusive */
976 if (wbc->range_cyclic) {
977 writeback_index = mapping->writeback_index; /* prev offset */
978 index = writeback_index;
985 index = wbc->range_start >> PAGE_SHIFT;
986 end = wbc->range_end >> PAGE_SHIFT;
987 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
989 cycled = 1; /* ignore range_cyclic tests */
991 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
992 tag = PAGECACHE_TAG_TOWRITE;
994 tag = PAGECACHE_TAG_DIRTY;
996 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
997 tag_pages_for_writeback(mapping, index, end);
1001 for_each_pagecache_tag(&iter, mapping, tag, index, end, page) {
1002 done_index = page->index;
1005 !bio_can_add_page_contig(&w.io->bio.bio, page))
1006 bch_writepage_do_io(&w);
1009 atomic_read(&c->writeback_pages) >=
1010 c->writeback_pages_max) {
1011 /* don't sleep with pages pinned: */
1012 pagecache_iter_release(&iter);
1014 __wait_event(c->writeback_wait,
1015 atomic_read(&c->writeback_pages) <
1016 c->writeback_pages_max);
1023 * Page truncated or invalidated. We can freely skip it
1024 * then, even for data integrity operations: the page
1025 * has disappeared concurrently, so there could be no
1026 * real expectation of this data interity operation
1027 * even if there is now a new, dirty page at the same
1028 * pagecache address.
1030 if (unlikely(page->mapping != mapping)) {
1036 if (!PageDirty(page)) {
1037 /* someone wrote it for us */
1038 goto continue_unlock;
1041 if (PageWriteback(page)) {
1042 if (wbc->sync_mode != WB_SYNC_NONE)
1043 wait_on_page_writeback(page);
1045 goto continue_unlock;
1048 BUG_ON(PageWriteback(page));
1049 if (!clear_page_dirty_for_io(page))
1050 goto continue_unlock;
1052 trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
1053 ret = __bch_writepage(c, page, wbc, &w);
1054 if (unlikely(ret)) {
1055 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1060 * done_index is set past this page,
1061 * so media errors will not choke
1062 * background writeout for the entire
1063 * file. This has consequences for
1064 * range_cyclic semantics (ie. it may
1065 * not be suitable for data integrity
1068 done_index = page->index + 1;
1075 * We stop writing back only if we are not doing
1076 * integrity sync. In case of integrity sync we have to
1077 * keep going until we have written all the pages
1078 * we tagged for writeback prior to entering this loop.
1080 if (--wbc->nr_to_write <= 0 &&
1081 wbc->sync_mode == WB_SYNC_NONE) {
1086 pagecache_iter_release(&iter);
1089 bch_writepage_do_io(&w);
1091 if (!cycled && !done) {
1094 * We hit the last page and there is more work to be done: wrap
1095 * back to the start of the file
1099 end = writeback_index - 1;
1102 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1103 mapping->writeback_index = done_index;
1108 int bch_writepage(struct page *page, struct writeback_control *wbc)
1110 struct cache_set *c = page->mapping->host->i_sb->s_fs_info;
1111 struct bch_writepage_state w = { NULL };
1114 ret = __bch_writepage(c, page, wbc, &w);
1116 bch_writepage_do_io(&w);
1121 static void bch_read_single_page_end_io(struct bio *bio)
1123 complete(bio->bi_private);
1126 static int bch_read_single_page(struct page *page,
1127 struct address_space *mapping)
1129 struct inode *inode = mapping->host;
1130 struct cache_set *c = inode->i_sb->s_fs_info;
1131 struct bch_read_bio *rbio;
1133 DECLARE_COMPLETION_ONSTACK(done);
1135 rbio = container_of(bio_alloc_bioset(GFP_NOFS, 1,
1137 struct bch_read_bio, bio);
1138 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
1139 rbio->bio.bi_private = &done;
1140 rbio->bio.bi_end_io = bch_read_single_page_end_io;
1141 bio_add_page_contig(&rbio->bio, page);
1143 bchfs_read(c, rbio, inode->i_ino);
1144 wait_for_completion(&done);
1146 ret = rbio->bio.bi_error;
1147 bio_put(&rbio->bio);
1152 SetPageUptodate(page);
1156 int bch_write_begin(struct file *file, struct address_space *mapping,
1157 loff_t pos, unsigned len, unsigned flags,
1158 struct page **pagep, void **fsdata)
1160 struct inode *inode = mapping->host;
1161 struct cache_set *c = inode->i_sb->s_fs_info;
1162 pgoff_t index = pos >> PAGE_SHIFT;
1163 unsigned offset = pos & (PAGE_SIZE - 1);
1167 BUG_ON(inode_unhashed(mapping->host));
1169 /* Not strictly necessary - same reason as mkwrite(): */
1170 pagecache_add_get(&mapping->add_lock);
1172 page = grab_cache_page_write_begin(mapping, index, flags);
1176 if (PageUptodate(page))
1179 /* If we're writing entire page, don't need to read it in first: */
1180 if (len == PAGE_SIZE)
1183 if (!offset && pos + len >= inode->i_size) {
1184 zero_user_segment(page, len, PAGE_SIZE);
1185 flush_dcache_page(page);
1189 if (index > inode->i_size >> PAGE_SHIFT) {
1190 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1191 flush_dcache_page(page);
1195 ret = bch_read_single_page(page, mapping);
1199 ret = bch_get_page_reservation(c, page, true);
1201 if (!PageUptodate(page)) {
1203 * If the page hasn't been read in, we won't know if we
1204 * actually need a reservation - we don't actually need
1205 * to read here, we just need to check if the page is
1206 * fully backed by uncompressed data:
1221 pagecache_add_put(&mapping->add_lock);
1225 int bch_write_end(struct file *filp, struct address_space *mapping,
1226 loff_t pos, unsigned len, unsigned copied,
1227 struct page *page, void *fsdata)
1229 struct inode *inode = page->mapping->host;
1230 struct cache_set *c = inode->i_sb->s_fs_info;
1232 lockdep_assert_held(&inode->i_rwsem);
1234 if (unlikely(copied < len && !PageUptodate(page))) {
1236 * The page needs to be read in, but that would destroy
1237 * our partial write - simplest thing is to just force
1238 * userspace to redo the write:
1240 zero_user(page, 0, PAGE_SIZE);
1241 flush_dcache_page(page);
1245 if (pos + copied > inode->i_size)
1246 i_size_write(inode, pos + copied);
1249 if (!PageUptodate(page))
1250 SetPageUptodate(page);
1251 if (!PageDirty(page))
1252 set_page_dirty(page);
1254 bch_put_page_reservation(c, page);
1259 pagecache_add_put(&mapping->add_lock);
1266 static void bch_dio_read_complete(struct closure *cl)
1268 struct dio_read *dio = container_of(cl, struct dio_read, cl);
1270 dio->req->ki_complete(dio->req, dio->ret, 0);
1271 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1274 static void bch_direct_IO_read_endio(struct bio *bio)
1276 struct dio_read *dio = bio->bi_private;
1279 dio->ret = bio->bi_error;
1281 closure_put(&dio->cl);
1284 static void bch_direct_IO_read_split_endio(struct bio *bio)
1286 bch_direct_IO_read_endio(bio);
1287 bio_check_pages_dirty(bio); /* transfers ownership */
1290 static int bch_direct_IO_read(struct cache_set *c, struct kiocb *req,
1291 struct file *file, struct inode *inode,
1292 struct iov_iter *iter, loff_t offset)
1294 struct dio_read *dio;
1296 bool sync = is_sync_kiocb(req);
1299 if ((offset|iter->count) & (block_bytes(c) - 1))
1302 ret = min_t(loff_t, iter->count,
1303 max_t(loff_t, 0, i_size_read(inode) - offset));
1304 iov_iter_truncate(iter, round_up(ret, block_bytes(c)));
1309 bio = bio_alloc_bioset(GFP_KERNEL,
1310 iov_iter_npages(iter, BIO_MAX_PAGES),
1311 bch_dio_read_bioset);
1313 bio->bi_end_io = bch_direct_IO_read_endio;
1315 dio = container_of(bio, struct dio_read, rbio.bio);
1316 closure_init(&dio->cl, NULL);
1319 * this is a _really_ horrible hack just to avoid an atomic sub at the
1323 set_closure_fn(&dio->cl, bch_dio_read_complete, NULL);
1324 atomic_set(&dio->cl.remaining,
1325 CLOSURE_REMAINING_INITIALIZER -
1327 CLOSURE_DESTRUCTOR);
1329 atomic_set(&dio->cl.remaining,
1330 CLOSURE_REMAINING_INITIALIZER + 1);
1337 while (iter->count) {
1338 bio = bio_alloc_bioset(GFP_KERNEL,
1339 iov_iter_npages(iter, BIO_MAX_PAGES),
1341 bio->bi_end_io = bch_direct_IO_read_split_endio;
1343 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1344 bio->bi_iter.bi_sector = offset >> 9;
1345 bio->bi_private = dio;
1347 ret = bio_get_user_pages(bio, iter, 1);
1349 /* XXX: fault inject this path */
1350 bio->bi_error = ret;
1355 offset += bio->bi_iter.bi_size;
1356 bio_set_pages_dirty(bio);
1359 closure_get(&dio->cl);
1361 bch_read(c, container_of(bio,
1362 struct bch_read_bio, bio),
1367 closure_sync(&dio->cl);
1368 closure_debug_destroy(&dio->cl);
1370 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1373 return -EIOCBQUEUED;
1377 static long __bch_dio_write_complete(struct dio_write *dio)
1379 struct file *file = dio->req->ki_filp;
1380 struct address_space *mapping = file->f_mapping;
1381 struct inode *inode = file->f_inode;
1382 long ret = dio->error ?: dio->written;
1384 bch_disk_reservation_put(dio->c, &dio->res);
1386 __pagecache_block_put(&mapping->add_lock);
1387 inode_dio_end(inode);
1389 if (dio->iovec && dio->iovec != dio->inline_vecs)
1392 bio_put(&dio->bio.bio);
1396 static void bch_dio_write_complete(struct closure *cl)
1398 struct dio_write *dio = container_of(cl, struct dio_write, cl);
1399 struct kiocb *req = dio->req;
1401 req->ki_complete(req, __bch_dio_write_complete(dio), 0);
1404 static void bch_dio_write_done(struct dio_write *dio)
1409 dio->written += dio->iop.op.written << 9;
1411 if (dio->iop.op.error)
1412 dio->error = dio->iop.op.error;
1414 bio_for_each_segment_all(bv, &dio->bio.bio, i)
1415 put_page(bv->bv_page);
1417 if (dio->iter.count)
1418 bio_reset(&dio->bio.bio);
1421 static void bch_do_direct_IO_write(struct dio_write *dio)
1423 struct file *file = dio->req->ki_filp;
1424 struct inode *inode = file->f_inode;
1425 struct bch_inode_info *ei = to_bch_ei(inode);
1426 struct bio *bio = &dio->bio.bio;
1430 if ((dio->req->ki_flags & IOCB_DSYNC) &&
1431 !dio->c->opts.journal_flush_disabled)
1432 flags |= BCH_WRITE_FLUSH;
1434 bio->bi_iter.bi_sector = (dio->offset + dio->written) >> 9;
1436 ret = bio_get_user_pages(bio, &dio->iter, 0);
1439 * these didn't get initialized, but bch_dio_write_done() will
1442 dio->iop.op.error = 0;
1443 dio->iop.op.written = 0;
1449 dio->iop.sectors_added = 0;
1450 dio->iop.is_dio = true;
1451 dio->iop.new_i_size = U64_MAX;
1452 bch_write_op_init(&dio->iop.op, dio->c, &dio->bio,
1454 foreground_write_point(dio->c, inode->i_ino),
1455 POS(inode->i_ino, bio->bi_iter.bi_sector),
1456 &ei->journal_seq, flags);
1457 dio->iop.op.index_update_fn = bchfs_write_index_update;
1459 dio->res.sectors -= bio_sectors(bio);
1460 dio->iop.op.res.sectors = bio_sectors(bio);
1462 task_io_account_write(bio->bi_iter.bi_size);
1464 closure_call(&dio->iop.op.cl, bch_write, NULL, &dio->cl);
1467 static void bch_dio_write_loop_async(struct closure *cl)
1469 struct dio_write *dio =
1470 container_of(cl, struct dio_write, cl);
1471 struct address_space *mapping = dio->req->ki_filp->f_mapping;
1473 bch_dio_write_done(dio);
1475 if (dio->iter.count && !dio->error) {
1477 pagecache_block_get(&mapping->add_lock);
1479 bch_do_direct_IO_write(dio);
1481 pagecache_block_put(&mapping->add_lock);
1484 continue_at(&dio->cl, bch_dio_write_loop_async, NULL);
1487 closure_return_with_destructor(cl, bch_dio_write_complete);
1489 closure_debug_destroy(cl);
1490 bch_dio_write_complete(cl);
1495 static int bch_direct_IO_write(struct cache_set *c, struct kiocb *req,
1496 struct file *file, struct inode *inode,
1497 struct iov_iter *iter, loff_t offset)
1499 struct address_space *mapping = file->f_mapping;
1500 struct dio_write *dio;
1503 bool sync = is_sync_kiocb(req);
1505 lockdep_assert_held(&inode->i_rwsem);
1507 if (unlikely(!iter->count))
1510 if (unlikely((offset|iter->count) & (block_bytes(c) - 1)))
1513 bio = bio_alloc_bioset(GFP_KERNEL,
1514 iov_iter_npages(iter, BIO_MAX_PAGES),
1515 bch_dio_write_bioset);
1516 dio = container_of(bio, struct dio_write, bio.bio);
1521 dio->offset = offset;
1524 dio->mm = current->mm;
1525 closure_init(&dio->cl, NULL);
1527 if (offset + iter->count > inode->i_size)
1531 * XXX: we shouldn't return -ENOSPC if we're overwriting existing data -
1532 * if getting a reservation fails we should check if we are doing an
1535 * Have to then guard against racing with truncate (deleting data that
1536 * we would have been overwriting)
1538 ret = bch_disk_reservation_get(c, &dio->res, iter->count >> 9, 0);
1539 if (unlikely(ret)) {
1540 closure_debug_destroy(&dio->cl);
1545 inode_dio_begin(inode);
1546 __pagecache_block_get(&mapping->add_lock);
1550 bch_do_direct_IO_write(dio);
1552 closure_sync(&dio->cl);
1553 bch_dio_write_done(dio);
1554 } while (dio->iter.count && !dio->error);
1556 closure_debug_destroy(&dio->cl);
1557 return __bch_dio_write_complete(dio);
1559 bch_do_direct_IO_write(dio);
1561 if (dio->iter.count && !dio->error) {
1562 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
1563 dio->iovec = kmalloc(dio->iter.nr_segs *
1564 sizeof(struct iovec),
1567 dio->error = -ENOMEM;
1569 dio->iovec = dio->inline_vecs;
1574 dio->iter.nr_segs * sizeof(struct iovec));
1575 dio->iter.iov = dio->iovec;
1578 continue_at_noreturn(&dio->cl, bch_dio_write_loop_async, NULL);
1579 return -EIOCBQUEUED;
1583 ssize_t bch_direct_IO(struct kiocb *req, struct iov_iter *iter)
1585 struct file *file = req->ki_filp;
1586 struct inode *inode = file->f_inode;
1587 struct cache_set *c = inode->i_sb->s_fs_info;
1589 return ((iov_iter_rw(iter) == WRITE)
1590 ? bch_direct_IO_write
1591 : bch_direct_IO_read)(c, req, file, inode, iter, req->ki_pos);
1595 bch_direct_write(struct kiocb *iocb, struct iov_iter *iter)
1597 struct file *file = iocb->ki_filp;
1598 struct inode *inode = file->f_inode;
1599 struct cache_set *c = inode->i_sb->s_fs_info;
1600 struct address_space *mapping = file->f_mapping;
1601 loff_t pos = iocb->ki_pos;
1604 pagecache_block_get(&mapping->add_lock);
1606 /* Write and invalidate pagecache range that we're writing to: */
1607 ret = write_invalidate_inode_pages_range(file->f_mapping, pos,
1608 pos + iov_iter_count(iter) - 1);
1612 ret = bch_direct_IO_write(c, iocb, file, inode, iter, pos);
1614 pagecache_block_put(&mapping->add_lock);
1619 static ssize_t __bch_write_iter(struct kiocb *iocb, struct iov_iter *from)
1621 struct file *file = iocb->ki_filp;
1622 struct address_space *mapping = file->f_mapping;
1623 struct inode *inode = mapping->host;
1626 /* We can write back this queue in page reclaim */
1627 current->backing_dev_info = inode_to_bdi(inode);
1628 ret = file_remove_privs(file);
1632 ret = file_update_time(file);
1636 ret = iocb->ki_flags & IOCB_DIRECT
1637 ? bch_direct_write(iocb, from)
1638 : generic_perform_write(file, from, iocb->ki_pos);
1640 if (likely(ret > 0))
1641 iocb->ki_pos += ret;
1643 current->backing_dev_info = NULL;
1647 ssize_t bch_write_iter(struct kiocb *iocb, struct iov_iter *from)
1649 struct file *file = iocb->ki_filp;
1650 struct inode *inode = file->f_mapping->host;
1651 bool direct = iocb->ki_flags & IOCB_DIRECT;
1655 ret = generic_write_checks(iocb, from);
1657 ret = __bch_write_iter(iocb, from);
1658 inode_unlock(inode);
1660 if (ret > 0 && !direct)
1661 ret = generic_write_sync(iocb, ret);
1666 int bch_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1668 struct page *page = vmf->page;
1669 struct inode *inode = file_inode(vma->vm_file);
1670 struct address_space *mapping = inode->i_mapping;
1671 struct cache_set *c = inode->i_sb->s_fs_info;
1672 int ret = VM_FAULT_LOCKED;
1674 sb_start_pagefault(inode->i_sb);
1675 file_update_time(vma->vm_file);
1678 * Not strictly necessary, but helps avoid dio writes livelocking in
1679 * write_invalidate_inode_pages_range() - can drop this if/when we get
1680 * a write_invalidate_inode_pages_range() that works without dropping
1681 * page lock before invalidating page
1683 if (current->pagecache_lock != &mapping->add_lock)
1684 pagecache_add_get(&mapping->add_lock);
1687 if (page->mapping != mapping ||
1688 page_offset(page) > i_size_read(inode)) {
1690 ret = VM_FAULT_NOPAGE;
1694 if (bch_get_page_reservation(c, page, true)) {
1696 ret = VM_FAULT_SIGBUS;
1700 if (!PageDirty(page))
1701 set_page_dirty(page);
1702 wait_for_stable_page(page);
1704 if (current->pagecache_lock != &mapping->add_lock)
1705 pagecache_add_put(&mapping->add_lock);
1706 sb_end_pagefault(inode->i_sb);
1710 void bch_invalidatepage(struct page *page, unsigned int offset,
1711 unsigned int length)
1713 EBUG_ON(!PageLocked(page));
1714 EBUG_ON(PageWriteback(page));
1716 if (offset || length < PAGE_SIZE)
1719 bch_clear_page_bits(page);
1722 int bch_releasepage(struct page *page, gfp_t gfp_mask)
1724 EBUG_ON(!PageLocked(page));
1725 EBUG_ON(PageWriteback(page));
1727 if (PageDirty(page))
1730 bch_clear_page_bits(page);
1734 #ifdef CONFIG_MIGRATION
1735 int bch_migrate_page(struct address_space *mapping, struct page *newpage,
1736 struct page *page, enum migrate_mode mode)
1740 ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
1741 if (ret != MIGRATEPAGE_SUCCESS)
1744 if (PagePrivate(page)) {
1745 *page_state(newpage) = *page_state(page);
1746 ClearPagePrivate(page);
1749 migrate_page_copy(newpage, page);
1750 return MIGRATEPAGE_SUCCESS;
1754 int bch_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1756 struct inode *inode = file->f_mapping->host;
1757 struct bch_inode_info *ei = to_bch_ei(inode);
1758 struct cache_set *c = inode->i_sb->s_fs_info;
1761 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1765 if (c->opts.journal_flush_disabled)
1768 return bch_journal_flush_seq(&c->journal, ei->journal_seq);
1771 static int __bch_truncate_page(struct address_space *mapping,
1772 pgoff_t index, loff_t start, loff_t end)
1774 struct inode *inode = mapping->host;
1775 struct cache_set *c = inode->i_sb->s_fs_info;
1776 unsigned start_offset = start & (PAGE_SIZE - 1);
1777 unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
1781 /* Page boundary? Nothing to do */
1782 if (!((index == start >> PAGE_SHIFT && start_offset) ||
1783 (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
1787 if (index << PAGE_SHIFT >= inode->i_size)
1790 page = find_lock_page(mapping, index);
1792 struct btree_iter iter;
1793 struct bkey_s_c k = bkey_s_c_null;
1796 * XXX: we're doing two index lookups when we end up reading the
1799 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
1801 index << (PAGE_SHIFT - 9)), k) {
1802 if (bkey_cmp(bkey_start_pos(k.k),
1804 (index + 1) << (PAGE_SHIFT - 9))) >= 0)
1807 if (k.k->type != KEY_TYPE_DISCARD &&
1808 k.k->type != BCH_RESERVATION) {
1809 bch_btree_iter_unlock(&iter);
1813 bch_btree_iter_unlock(&iter);
1816 page = find_or_create_page(mapping, index, GFP_KERNEL);
1817 if (unlikely(!page)) {
1823 if (!PageUptodate(page)) {
1824 ret = bch_read_single_page(page, mapping);
1830 * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
1832 * XXX: because we aren't currently tracking whether the page has actual
1833 * data in it (vs. just 0s, or only partially written) this wrong. ick.
1835 ret = bch_get_page_reservation(c, page, false);
1838 if (index == start >> PAGE_SHIFT &&
1839 index == end >> PAGE_SHIFT)
1840 zero_user_segment(page, start_offset, end_offset);
1841 else if (index == start >> PAGE_SHIFT)
1842 zero_user_segment(page, start_offset, PAGE_SIZE);
1843 else if (index == end >> PAGE_SHIFT)
1844 zero_user_segment(page, 0, end_offset);
1846 if (!PageDirty(page))
1847 set_page_dirty(page);
1855 static int bch_truncate_page(struct address_space *mapping, loff_t from)
1857 return __bch_truncate_page(mapping, from >> PAGE_SHIFT,
1858 from, from + PAGE_SIZE);
1861 int bch_truncate(struct inode *inode, struct iattr *iattr)
1863 struct address_space *mapping = inode->i_mapping;
1864 struct bch_inode_info *ei = to_bch_ei(inode);
1865 struct cache_set *c = inode->i_sb->s_fs_info;
1866 bool shrink = iattr->ia_size <= inode->i_size;
1869 inode_dio_wait(inode);
1870 pagecache_block_get(&mapping->add_lock);
1872 truncate_setsize(inode, iattr->ia_size);
1874 /* sync appends.. */
1875 /* XXX what protects ei->i_size? */
1876 if (iattr->ia_size > ei->i_size)
1877 ret = filemap_write_and_wait_range(mapping, ei->i_size, S64_MAX);
1879 goto err_put_pagecache;
1881 mutex_lock(&ei->update_lock);
1882 i_size_dirty_get(ei);
1883 ret = bch_write_inode_size(c, ei, inode->i_size);
1884 mutex_unlock(&ei->update_lock);
1890 * There might be persistent reservations (from fallocate())
1891 * above i_size, which bch_inode_truncate() will discard - we're
1892 * only supposed to discard them if we're doing a real truncate
1893 * here (new i_size < current i_size):
1896 struct i_sectors_hook i_sectors_hook;
1899 ret = i_sectors_dirty_get(ei, &i_sectors_hook);
1903 ret = bch_truncate_page(inode->i_mapping, iattr->ia_size);
1904 if (unlikely(ret)) {
1905 i_sectors_dirty_put(ei, &i_sectors_hook);
1909 ret = bch_inode_truncate(c, inode->i_ino,
1910 round_up(iattr->ia_size, PAGE_SIZE) >> 9,
1911 &i_sectors_hook.hook,
1914 i_sectors_dirty_put(ei, &i_sectors_hook);
1920 mutex_lock(&ei->update_lock);
1921 setattr_copy(inode, iattr);
1922 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1924 /* clear I_SIZE_DIRTY: */
1925 i_size_dirty_put(ei);
1926 ret = bch_write_inode_size(c, ei, inode->i_size);
1927 mutex_unlock(&ei->update_lock);
1929 pagecache_block_put(&mapping->add_lock);
1933 i_size_dirty_put(ei);
1935 pagecache_block_put(&mapping->add_lock);
1939 static long bch_fpunch(struct inode *inode, loff_t offset, loff_t len)
1941 struct address_space *mapping = inode->i_mapping;
1942 struct bch_inode_info *ei = to_bch_ei(inode);
1943 struct cache_set *c = inode->i_sb->s_fs_info;
1944 u64 ino = inode->i_ino;
1945 u64 discard_start = round_up(offset, PAGE_SIZE) >> 9;
1946 u64 discard_end = round_down(offset + len, PAGE_SIZE) >> 9;
1950 inode_dio_wait(inode);
1951 pagecache_block_get(&mapping->add_lock);
1953 ret = __bch_truncate_page(inode->i_mapping,
1954 offset >> PAGE_SHIFT,
1955 offset, offset + len);
1959 if (offset >> PAGE_SHIFT !=
1960 (offset + len) >> PAGE_SHIFT) {
1961 ret = __bch_truncate_page(inode->i_mapping,
1962 (offset + len) >> PAGE_SHIFT,
1963 offset, offset + len);
1968 truncate_pagecache_range(inode, offset, offset + len - 1);
1970 if (discard_start < discard_end) {
1971 struct disk_reservation disk_res;
1972 struct i_sectors_hook i_sectors_hook;
1975 BUG_ON(bch_disk_reservation_get(c, &disk_res, 0, 0));
1977 ret = i_sectors_dirty_get(ei, &i_sectors_hook);
1981 ret = bch_discard(c,
1982 POS(ino, discard_start),
1983 POS(ino, discard_end),
1986 &i_sectors_hook.hook,
1989 i_sectors_dirty_put(ei, &i_sectors_hook);
1990 bch_disk_reservation_put(c, &disk_res);
1993 pagecache_block_put(&mapping->add_lock);
1994 inode_unlock(inode);
1999 static long bch_fcollapse(struct inode *inode, loff_t offset, loff_t len)
2001 struct address_space *mapping = inode->i_mapping;
2002 struct bch_inode_info *ei = to_bch_ei(inode);
2003 struct cache_set *c = inode->i_sb->s_fs_info;
2004 struct btree_iter src;
2005 struct btree_iter dst;
2006 BKEY_PADDED(k) copy;
2008 struct i_sectors_hook i_sectors_hook;
2012 if ((offset | len) & (PAGE_SIZE - 1))
2015 bch_btree_iter_init_intent(&dst, c, BTREE_ID_EXTENTS,
2016 POS(inode->i_ino, offset >> 9));
2017 /* position will be set from dst iter's position: */
2018 bch_btree_iter_init(&src, c, BTREE_ID_EXTENTS, POS_MIN);
2019 bch_btree_iter_link(&src, &dst);
2022 * We need i_mutex to keep the page cache consistent with the extents
2023 * btree, and the btree consistent with i_size - we don't need outside
2024 * locking for the extents btree itself, because we're using linked
2028 inode_dio_wait(inode);
2029 pagecache_block_get(&mapping->add_lock);
2032 if (offset + len >= inode->i_size)
2035 if (inode->i_size < len)
2038 new_size = inode->i_size - len;
2040 ret = write_invalidate_inode_pages_range(inode->i_mapping,
2045 ret = i_sectors_dirty_get(ei, &i_sectors_hook);
2049 while (bkey_cmp(dst.pos,
2051 round_up(new_size, PAGE_SIZE) >> 9)) < 0) {
2052 struct disk_reservation disk_res;
2054 bch_btree_iter_set_pos(&src,
2055 POS(dst.pos.inode, dst.pos.offset + (len >> 9)));
2057 ret = bch_btree_iter_traverse(&dst);
2059 goto btree_iter_err;
2061 k = bch_btree_iter_peek_with_holes(&src);
2062 if ((ret = btree_iter_err(k)))
2063 goto btree_iter_err;
2065 bkey_reassemble(©.k, k);
2067 if (bkey_deleted(©.k.k))
2068 copy.k.k.type = KEY_TYPE_DISCARD;
2070 bch_cut_front(src.pos, ©.k);
2071 copy.k.k.p.offset -= len >> 9;
2073 BUG_ON(bkey_cmp(dst.pos, bkey_start_pos(©.k.k)));
2075 ret = bch_disk_reservation_get(c, &disk_res, copy.k.k.size,
2076 BCH_DISK_RESERVATION_NOFAIL);
2079 ret = bch_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
2081 BTREE_INSERT_ATOMIC|
2082 BTREE_INSERT_NOFAIL,
2083 BTREE_INSERT_ENTRY(&dst, ©.k));
2084 bch_disk_reservation_put(c, &disk_res);
2086 if (ret < 0 && ret != -EINTR)
2089 bch_btree_iter_cond_resched(&src);
2092 bch_btree_iter_unlock(&src);
2093 bch_btree_iter_unlock(&dst);
2095 ret = bch_inode_truncate(c, inode->i_ino,
2096 round_up(new_size, PAGE_SIZE) >> 9,
2097 &i_sectors_hook.hook,
2102 i_sectors_dirty_put(ei, &i_sectors_hook);
2104 mutex_lock(&ei->update_lock);
2105 i_size_write(inode, new_size);
2106 ret = bch_write_inode_size(c, ei, inode->i_size);
2107 mutex_unlock(&ei->update_lock);
2109 pagecache_block_put(&mapping->add_lock);
2110 inode_unlock(inode);
2115 * XXX: we've left data with multiple pointers... which isn't a _super_
2116 * serious problem...
2118 i_sectors_dirty_put(ei, &i_sectors_hook);
2120 bch_btree_iter_unlock(&src);
2121 bch_btree_iter_unlock(&dst);
2122 pagecache_block_put(&mapping->add_lock);
2123 inode_unlock(inode);
2127 static long bch_fallocate(struct inode *inode, int mode,
2128 loff_t offset, loff_t len)
2130 struct address_space *mapping = inode->i_mapping;
2131 struct bch_inode_info *ei = to_bch_ei(inode);
2132 struct cache_set *c = inode->i_sb->s_fs_info;
2133 struct i_sectors_hook i_sectors_hook;
2134 struct btree_iter iter;
2135 struct bkey_i reservation;
2138 loff_t block_start, block_end;
2139 loff_t new_size = offset + len;
2143 bch_btree_iter_init_intent(&iter, c, BTREE_ID_EXTENTS, POS_MIN);
2146 inode_dio_wait(inode);
2147 pagecache_block_get(&mapping->add_lock);
2149 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2150 new_size > inode->i_size) {
2151 ret = inode_newsize_ok(inode, new_size);
2156 if (mode & FALLOC_FL_ZERO_RANGE) {
2157 ret = __bch_truncate_page(inode->i_mapping,
2158 offset >> PAGE_SHIFT,
2159 offset, offset + len);
2162 offset >> PAGE_SHIFT !=
2163 (offset + len) >> PAGE_SHIFT)
2164 ret = __bch_truncate_page(inode->i_mapping,
2165 (offset + len) >> PAGE_SHIFT,
2166 offset, offset + len);
2171 truncate_pagecache_range(inode, offset, offset + len - 1);
2173 block_start = round_up(offset, PAGE_SIZE);
2174 block_end = round_down(offset + len, PAGE_SIZE);
2176 block_start = round_down(offset, PAGE_SIZE);
2177 block_end = round_up(offset + len, PAGE_SIZE);
2180 bch_btree_iter_set_pos(&iter, POS(inode->i_ino, block_start >> 9));
2181 end = POS(inode->i_ino, block_end >> 9);
2183 ret = i_sectors_dirty_get(ei, &i_sectors_hook);
2187 while (bkey_cmp(iter.pos, end) < 0) {
2188 struct disk_reservation disk_res = { 0 };
2190 k = bch_btree_iter_peek_with_holes(&iter);
2191 if ((ret = btree_iter_err(k)))
2192 goto btree_iter_err;
2194 /* already reserved */
2195 if (k.k->type == BCH_RESERVATION) {
2196 bch_btree_iter_advance_pos(&iter);
2200 if (bkey_extent_is_data(k.k)) {
2201 if (!(mode & FALLOC_FL_ZERO_RANGE)) {
2202 bch_btree_iter_advance_pos(&iter);
2207 bkey_init(&reservation.k);
2208 reservation.k.type = BCH_RESERVATION;
2209 reservation.k.p = k.k->p;
2210 reservation.k.size = k.k->size;
2212 bch_cut_front(iter.pos, &reservation);
2213 bch_cut_back(end, &reservation.k);
2215 sectors = reservation.k.size;
2217 if (!bkey_extent_is_allocation(k.k) ||
2218 bkey_extent_is_compressed(c, k)) {
2219 ret = bch_disk_reservation_get(c, &disk_res,
2222 goto err_put_sectors_dirty;
2225 ret = bch_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
2227 BTREE_INSERT_ATOMIC|
2228 BTREE_INSERT_NOFAIL,
2229 BTREE_INSERT_ENTRY(&iter, &reservation));
2230 bch_disk_reservation_put(c, &disk_res);
2232 if (ret < 0 && ret != -EINTR)
2233 goto err_put_sectors_dirty;
2236 bch_btree_iter_unlock(&iter);
2238 i_sectors_dirty_put(ei, &i_sectors_hook);
2240 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2241 new_size > inode->i_size) {
2242 i_size_write(inode, new_size);
2244 mutex_lock(&ei->update_lock);
2245 ret = bch_write_inode_size(c, ei, inode->i_size);
2246 mutex_unlock(&ei->update_lock);
2250 if ((mode & FALLOC_FL_KEEP_SIZE) &&
2251 (mode & FALLOC_FL_ZERO_RANGE) &&
2252 ei->i_size != inode->i_size) {
2253 /* sync appends.. */
2254 ret = filemap_write_and_wait_range(mapping, ei->i_size, S64_MAX);
2258 if (ei->i_size != inode->i_size) {
2259 mutex_lock(&ei->update_lock);
2260 ret = bch_write_inode_size(c, ei, inode->i_size);
2261 mutex_unlock(&ei->update_lock);
2265 pagecache_block_put(&mapping->add_lock);
2266 inode_unlock(inode);
2269 err_put_sectors_dirty:
2270 i_sectors_dirty_put(ei, &i_sectors_hook);
2272 bch_btree_iter_unlock(&iter);
2273 pagecache_block_put(&mapping->add_lock);
2274 inode_unlock(inode);
2278 long bch_fallocate_dispatch(struct file *file, int mode,
2279 loff_t offset, loff_t len)
2281 struct inode *inode = file_inode(file);
2283 if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
2284 return bch_fallocate(inode, mode, offset, len);
2286 if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
2287 return bch_fpunch(inode, offset, len);
2289 if (mode == FALLOC_FL_COLLAPSE_RANGE)
2290 return bch_fcollapse(inode, offset, len);
2295 static bool page_is_data(struct page *page)
2297 /* XXX: should only have to check PageDirty */
2298 return PagePrivate(page) &&
2299 (page_state(page)->sectors ||
2300 page_state(page)->dirty_sectors);
2303 static loff_t bch_next_pagecache_data(struct inode *inode,
2304 loff_t start_offset,
2307 struct address_space *mapping = inode->i_mapping;
2311 for (index = start_offset >> PAGE_SHIFT;
2312 index < end_offset >> PAGE_SHIFT;
2314 if (find_get_pages(mapping, index, 1, &page)) {
2316 index = page->index;
2318 if (page_is_data(page))
2322 ((loff_t) index) << PAGE_SHIFT));
2333 static loff_t bch_seek_data(struct file *file, u64 offset)
2335 struct inode *inode = file->f_mapping->host;
2336 struct cache_set *c = inode->i_sb->s_fs_info;
2337 struct btree_iter iter;
2339 u64 isize, next_data = MAX_LFS_FILESIZE;
2342 isize = i_size_read(inode);
2343 if (offset >= isize)
2346 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2347 POS(inode->i_ino, offset >> 9), k) {
2348 if (k.k->p.inode != inode->i_ino) {
2350 } else if (bkey_extent_is_data(k.k)) {
2351 next_data = max(offset, bkey_start_offset(k.k) << 9);
2353 } else if (k.k->p.offset >> 9 > isize)
2357 ret = bch_btree_iter_unlock(&iter);
2361 if (next_data > offset)
2362 next_data = bch_next_pagecache_data(inode, offset, next_data);
2364 if (next_data > isize)
2367 return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
2370 static bool page_slot_is_data(struct address_space *mapping, pgoff_t index)
2375 page = find_lock_entry(mapping, index);
2376 if (!page || radix_tree_exception(page))
2379 ret = page_is_data(page);
2385 static loff_t bch_next_pagecache_hole(struct inode *inode,
2386 loff_t start_offset,
2389 struct address_space *mapping = inode->i_mapping;
2392 for (index = start_offset >> PAGE_SHIFT;
2393 index < end_offset >> PAGE_SHIFT;
2395 if (!page_slot_is_data(mapping, index))
2396 end_offset = max(start_offset,
2397 ((loff_t) index) << PAGE_SHIFT);
2402 static loff_t bch_seek_hole(struct file *file, u64 offset)
2404 struct inode *inode = file->f_mapping->host;
2405 struct cache_set *c = inode->i_sb->s_fs_info;
2406 struct btree_iter iter;
2408 u64 isize, next_hole = MAX_LFS_FILESIZE;
2411 isize = i_size_read(inode);
2412 if (offset >= isize)
2415 for_each_btree_key_with_holes(&iter, c, BTREE_ID_EXTENTS,
2416 POS(inode->i_ino, offset >> 9), k) {
2417 if (k.k->p.inode != inode->i_ino) {
2418 next_hole = bch_next_pagecache_hole(inode,
2419 offset, MAX_LFS_FILESIZE);
2421 } else if (!bkey_extent_is_data(k.k)) {
2422 next_hole = bch_next_pagecache_hole(inode,
2423 max(offset, bkey_start_offset(k.k) << 9),
2424 k.k->p.offset << 9);
2426 if (next_hole < k.k->p.offset << 9)
2429 offset = max(offset, bkey_start_offset(k.k) << 9);
2433 ret = bch_btree_iter_unlock(&iter);
2437 if (next_hole > isize)
2440 return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
2443 loff_t bch_llseek(struct file *file, loff_t offset, int whence)
2449 return generic_file_llseek(file, offset, whence);
2451 return bch_seek_data(file, offset);
2453 return bch_seek_hole(file, offset);