4 #include "btree_update.h"
17 #include <linux/aio.h>
18 #include <linux/backing-dev.h>
19 #include <linux/falloc.h>
20 #include <linux/migrate.h>
21 #include <linux/mmu_context.h>
22 #include <linux/pagevec.h>
23 #include <linux/task_io_accounting_ops.h>
24 #include <linux/uio.h>
25 #include <linux/writeback.h>
27 #include <trace/events/bcachefs.h>
28 #include <trace/events/writeback.h>
30 struct i_sectors_hook {
31 struct extent_insert_hook hook;
32 struct bch_inode_info *inode;
39 struct bchfs_write_op {
40 struct bch_inode_info *inode;
47 struct bch_write_op op;
50 struct bch_writepage_io {
54 struct bchfs_write_op op;
60 struct task_struct *task;
66 struct iovec inline_vecs[2];
69 struct bchfs_write_op iop;
76 struct bch_read_bio rbio;
79 /* pagecache_block must be held */
80 static int write_invalidate_inode_pages_range(struct address_space *mapping,
81 loff_t start, loff_t end)
86 * XXX: the way this is currently implemented, we can spin if a process
87 * is continually redirtying a specific page
90 if (!mapping->nrpages &&
91 !mapping->nrexceptional)
94 ret = filemap_write_and_wait_range(mapping, start, end);
98 if (!mapping->nrpages)
101 ret = invalidate_inode_pages2_range(mapping,
104 } while (ret == -EBUSY);
109 /* i_size updates: */
111 static int inode_set_size(struct bch_inode_info *inode,
112 struct bch_inode_unpacked *bi,
115 loff_t *new_i_size = p;
117 lockdep_assert_held(&inode->ei_update_lock);
119 bi->bi_size = *new_i_size;
123 static int __must_check bch2_write_inode_size(struct bch_fs *c,
124 struct bch_inode_info *inode,
127 return __bch2_write_inode(c, inode, inode_set_size, &new_size);
130 static void __i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode, int sectors)
132 inode->v.i_blocks += sectors;
133 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, BCH_QUOTA_WARN);
136 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode, int sectors)
138 mutex_lock(&inode->ei_update_lock);
139 __i_sectors_acct(c, inode, sectors);
140 mutex_unlock(&inode->ei_update_lock);
143 /* i_sectors accounting: */
145 static enum btree_insert_ret
146 i_sectors_hook_fn(struct extent_insert_hook *hook,
147 struct bpos committed_pos,
148 struct bpos next_pos,
150 const struct bkey_i *insert)
152 struct i_sectors_hook *h = container_of(hook,
153 struct i_sectors_hook, hook);
154 s64 sectors = next_pos.offset - committed_pos.offset;
155 int sign = bkey_extent_is_allocation(&insert->k) -
156 (k.k && bkey_extent_is_allocation(k.k));
158 EBUG_ON(!(h->inode->ei_inode.bi_flags & BCH_INODE_I_SECTORS_DIRTY));
160 h->sectors += sectors * sign;
162 return BTREE_INSERT_OK;
165 static int i_sectors_dirty_finish_fn(struct bch_inode_info *inode,
166 struct bch_inode_unpacked *bi,
169 struct i_sectors_hook *h = p;
171 if (h->new_i_size != U64_MAX &&
173 h->new_i_size > bi->bi_size))
174 bi->bi_size = h->new_i_size;
175 bi->bi_sectors += h->sectors;
176 bi->bi_flags &= ~h->flags;
180 static int i_sectors_dirty_finish(struct bch_fs *c, struct i_sectors_hook *h)
184 mutex_lock(&h->inode->ei_update_lock);
185 if (h->new_i_size != U64_MAX)
186 i_size_write(&h->inode->v, h->new_i_size);
188 __i_sectors_acct(c, h->inode, h->sectors);
190 ret = __bch2_write_inode(c, h->inode, i_sectors_dirty_finish_fn, h);
191 mutex_unlock(&h->inode->ei_update_lock);
198 static int i_sectors_dirty_start_fn(struct bch_inode_info *inode,
199 struct bch_inode_unpacked *bi, void *p)
201 struct i_sectors_hook *h = p;
203 if (h->flags & BCH_INODE_I_SIZE_DIRTY)
204 bi->bi_size = h->new_i_size;
206 bi->bi_flags |= h->flags;
210 static int i_sectors_dirty_start(struct bch_fs *c, struct i_sectors_hook *h)
214 mutex_lock(&h->inode->ei_update_lock);
215 ret = __bch2_write_inode(c, h->inode, i_sectors_dirty_start_fn, h);
216 mutex_unlock(&h->inode->ei_update_lock);
221 static inline struct i_sectors_hook
222 i_sectors_hook_init(struct bch_inode_info *inode, unsigned flags)
224 return (struct i_sectors_hook) {
225 .hook.fn = i_sectors_hook_fn,
228 .new_i_size = U64_MAX,
229 .flags = flags|BCH_INODE_I_SECTORS_DIRTY,
233 /* normal i_size/i_sectors update machinery: */
235 struct bchfs_extent_trans_hook {
236 struct bchfs_write_op *op;
237 struct extent_insert_hook hook;
239 struct bch_inode_unpacked inode_u;
240 struct bkey_inode_buf inode_p;
242 bool need_inode_update;
245 static enum btree_insert_ret
246 bchfs_extent_update_hook(struct extent_insert_hook *hook,
247 struct bpos committed_pos,
248 struct bpos next_pos,
250 const struct bkey_i *insert)
252 struct bchfs_extent_trans_hook *h = container_of(hook,
253 struct bchfs_extent_trans_hook, hook);
254 struct bch_inode_info *inode = h->op->inode;
255 int sign = bkey_extent_is_allocation(&insert->k) -
256 (k.k && bkey_extent_is_allocation(k.k));
257 s64 sectors = (s64) (next_pos.offset - committed_pos.offset) * sign;
258 u64 offset = min(next_pos.offset << 9, h->op->new_i_size);
259 bool do_pack = false;
261 if (h->op->unalloc &&
262 !bch2_extent_is_fully_allocated(k))
263 return BTREE_INSERT_ENOSPC;
265 BUG_ON((next_pos.offset << 9) > round_up(offset, PAGE_SIZE));
267 /* XXX: inode->i_size locking */
268 if (offset > inode->ei_inode.bi_size) {
269 if (!h->need_inode_update) {
270 h->need_inode_update = true;
271 return BTREE_INSERT_NEED_TRAVERSE;
274 BUG_ON(h->inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY);
276 h->inode_u.bi_size = offset;
279 inode->ei_inode.bi_size = offset;
282 i_size_write(&inode->v, offset);
286 if (!h->need_inode_update) {
287 h->need_inode_update = true;
288 return BTREE_INSERT_NEED_TRAVERSE;
291 h->inode_u.bi_sectors += sectors;
294 h->op->sectors_added += sectors;
298 bch2_inode_pack(&h->inode_p, &h->inode_u);
300 return BTREE_INSERT_OK;
303 static int bchfs_write_index_update(struct bch_write_op *wop)
305 struct bchfs_write_op *op = container_of(wop,
306 struct bchfs_write_op, op);
307 struct keylist *keys = &op->op.insert_keys;
308 struct btree_iter extent_iter, inode_iter;
309 struct bchfs_extent_trans_hook hook;
310 struct bkey_i *k = bch2_keylist_front(keys);
311 s64 orig_sectors_added = op->sectors_added;
314 BUG_ON(k->k.p.inode != op->inode->v.i_ino);
316 bch2_btree_iter_init(&extent_iter, wop->c, BTREE_ID_EXTENTS,
317 bkey_start_pos(&bch2_keylist_front(keys)->k),
319 bch2_btree_iter_init(&inode_iter, wop->c, BTREE_ID_INODES,
320 POS(extent_iter.pos.inode, 0),
324 hook.hook.fn = bchfs_extent_update_hook;
325 hook.need_inode_update = false;
328 ret = bch2_btree_iter_traverse(&extent_iter);
332 /* XXX: inode->i_size locking */
333 k = bch2_keylist_front(keys);
334 if (min(k->k.p.offset << 9, op->new_i_size) >
335 op->inode->ei_inode.bi_size)
336 hook.need_inode_update = true;
338 if (hook.need_inode_update) {
339 struct bkey_s_c inode;
341 if (!btree_iter_linked(&inode_iter))
342 bch2_btree_iter_link(&extent_iter, &inode_iter);
344 inode = bch2_btree_iter_peek_with_holes(&inode_iter);
345 if ((ret = btree_iter_err(inode)))
348 if (WARN_ONCE(inode.k->type != BCH_INODE_FS,
349 "inode %llu not found when updating",
350 extent_iter.pos.inode)) {
355 if (WARN_ONCE(bkey_bytes(inode.k) >
356 sizeof(hook.inode_p),
357 "inode %llu too big (%zu bytes, buf %zu)",
358 extent_iter.pos.inode,
360 sizeof(hook.inode_p))) {
365 bkey_reassemble(&hook.inode_p.inode.k_i, inode);
366 ret = bch2_inode_unpack(bkey_s_c_to_inode(inode),
369 "error %i unpacking inode %llu",
370 ret, extent_iter.pos.inode)) {
375 ret = bch2_btree_insert_at(wop->c, &wop->res,
376 &hook.hook, op_journal_seq(wop),
377 BTREE_INSERT_NOFAIL|BTREE_INSERT_ATOMIC,
378 BTREE_INSERT_ENTRY(&extent_iter, k),
379 BTREE_INSERT_ENTRY_EXTRA_RES(&inode_iter,
380 &hook.inode_p.inode.k_i, 2));
382 ret = bch2_btree_insert_at(wop->c, &wop->res,
383 &hook.hook, op_journal_seq(wop),
384 BTREE_INSERT_NOFAIL|BTREE_INSERT_ATOMIC,
385 BTREE_INSERT_ENTRY(&extent_iter, k));
388 BUG_ON(bkey_cmp(extent_iter.pos, bkey_start_pos(&k->k)));
389 BUG_ON(!ret != !k->k.size);
396 BUG_ON(bkey_cmp(extent_iter.pos, k->k.p) < 0);
397 bch2_keylist_pop_front(keys);
398 } while (!bch2_keylist_empty(keys));
400 bch2_btree_iter_unlock(&extent_iter);
401 bch2_btree_iter_unlock(&inode_iter);
404 i_sectors_acct(wop->c, op->inode,
405 op->sectors_added - orig_sectors_added);
410 static inline void bch2_fswrite_op_init(struct bchfs_write_op *op,
412 struct bch_inode_info *inode,
413 struct bch_io_opts opts,
417 op->sectors_added = 0;
420 op->new_i_size = U64_MAX;
422 bch2_write_op_init(&op->op, c);
423 op->op.csum_type = bch2_data_checksum_type(c, opts.data_checksum);
424 op->op.compression_type = bch2_compression_opt_to_type(opts.compression);
425 op->op.devs = c->fastest_devs;
426 op->op.index_update_fn = bchfs_write_index_update;
427 op_journal_seq_set(&op->op, &inode->ei_journal_seq);
430 static inline struct bch_io_opts io_opts(struct bch_fs *c, struct bch_inode_info *inode)
432 struct bch_io_opts opts = bch2_opts_to_inode_opts(c->opts);
434 bch2_io_opts_apply(&opts, bch2_inode_opts_get(&inode->ei_inode));
440 /* stored in page->private: */
443 * bch_page_state has to (unfortunately) be manipulated with cmpxchg - we could
444 * almost protected it with the page lock, except that bch2_writepage_io_done has
445 * to update the sector counts (and from interrupt/bottom half context).
447 struct bch_page_state {
450 * page is _fully_ written on disk, and not compressed - which means to
451 * write this page we don't have to reserve space (the new write will
452 * never take up more space on disk than what it's overwriting)
454 unsigned allocated:1;
456 /* Owns PAGE_SECTORS sized reservation: */
458 unsigned nr_replicas:4;
461 * Number of sectors on disk - for i_blocks
462 * Uncompressed size, not compressed size:
472 #define page_state_cmpxchg(_ptr, _new, _expr) \
474 unsigned long _v = READ_ONCE((_ptr)->v); \
475 struct bch_page_state _old; \
478 _old.v = _new.v = _v; \
481 EBUG_ON(_new.sectors + _new.dirty_sectors > PAGE_SECTORS);\
482 } while (_old.v != _new.v && \
483 (_v = cmpxchg(&(_ptr)->v, _old.v, _new.v)) != _old.v); \
488 static inline struct bch_page_state *page_state(struct page *page)
490 struct bch_page_state *s = (void *) &page->private;
492 BUILD_BUG_ON(sizeof(*s) > sizeof(page->private));
494 if (!PagePrivate(page))
495 SetPagePrivate(page);
500 static void bch2_put_page_reservation(struct bch_fs *c, struct page *page)
502 struct disk_reservation res = { .sectors = PAGE_SECTORS };
503 struct bch_page_state s;
505 s = page_state_cmpxchg(page_state(page), s, {
511 bch2_disk_reservation_put(c, &res);
514 static int bch2_get_page_reservation(struct bch_fs *c, struct page *page,
517 struct bch_page_state *s = page_state(page), new;
518 struct disk_reservation res;
521 BUG_ON(s->allocated && s->sectors != PAGE_SECTORS);
523 if (s->allocated || s->reserved)
526 ret = bch2_disk_reservation_get(c, &res, PAGE_SECTORS, !check_enospc
527 ? BCH_DISK_RESERVATION_NOFAIL : 0);
531 page_state_cmpxchg(s, new, {
533 bch2_disk_reservation_put(c, &res);
537 new.nr_replicas = res.nr_replicas;
543 static void bch2_clear_page_bits(struct page *page)
545 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
546 struct bch_fs *c = inode->v.i_sb->s_fs_info;
547 struct disk_reservation res = { .sectors = PAGE_SECTORS };
548 struct bch_page_state s;
550 if (!PagePrivate(page))
553 s = xchg(page_state(page), (struct bch_page_state) { .v = 0 });
554 ClearPagePrivate(page);
557 i_sectors_acct(c, inode, -s.dirty_sectors);
560 bch2_disk_reservation_put(c, &res);
563 int bch2_set_page_dirty(struct page *page)
565 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
566 struct bch_fs *c = inode->v.i_sb->s_fs_info;
567 struct bch_page_state old, new;
569 old = page_state_cmpxchg(page_state(page), new,
570 new.dirty_sectors = PAGE_SECTORS - new.sectors;
573 if (old.dirty_sectors != new.dirty_sectors)
574 i_sectors_acct(c, inode, new.dirty_sectors - old.dirty_sectors);
576 return __set_page_dirty_nobuffers(page);
579 /* readpages/writepages: */
581 static bool bio_can_add_page_contig(struct bio *bio, struct page *page)
583 sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT;
585 return bio->bi_vcnt < bio->bi_max_vecs &&
586 bio_end_sector(bio) == offset;
589 static void __bio_add_page(struct bio *bio, struct page *page)
591 bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) {
597 bio->bi_iter.bi_size += PAGE_SIZE;
600 static int bio_add_page_contig(struct bio *bio, struct page *page)
602 sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT;
604 BUG_ON(!bio->bi_max_vecs);
607 bio->bi_iter.bi_sector = offset;
608 else if (!bio_can_add_page_contig(bio, page))
611 __bio_add_page(bio, page);
615 static void bch2_readpages_end_io(struct bio *bio)
620 bio_for_each_segment_all(bv, bio, i) {
621 struct page *page = bv->bv_page;
623 if (!bio->bi_status) {
624 SetPageUptodate(page);
626 ClearPageUptodate(page);
635 struct readpages_iter {
636 struct address_space *mapping;
637 struct list_head pages;
641 static int readpage_add_page(struct readpages_iter *iter, struct page *page)
643 struct bch_page_state *s = page_state(page);
650 prefetchw(&page->flags);
651 ret = add_to_page_cache_lru(page, iter->mapping,
652 page->index, GFP_NOFS);
657 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
659 while (iter->nr_pages) {
661 list_last_entry(&iter->pages, struct page, lru);
663 prefetchw(&page->flags);
664 list_del(&page->lru);
667 if (!readpage_add_page(iter, page))
674 #define for_each_readpage_page(_iter, _page) \
676 ((_page) = __readpage_next_page(&(_iter)));) \
678 static void bch2_mark_pages_unalloc(struct bio *bio)
680 struct bvec_iter iter;
683 bio_for_each_segment(bv, bio, iter)
684 page_state(bv.bv_page)->allocated = 0;
687 static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
689 struct bvec_iter iter;
692 bio_for_each_segment(bv, bio, iter) {
693 struct bch_page_state *s = page_state(bv.bv_page);
695 /* sectors in @k from the start of this page: */
696 unsigned k_sectors = k.k->size - (iter.bi_sector - k.k->p.offset);
698 unsigned page_sectors = min(bv.bv_len >> 9, k_sectors);
701 s->nr_replicas = bch2_extent_nr_dirty_ptrs(k);
703 s->nr_replicas = min_t(unsigned, s->nr_replicas,
704 bch2_extent_nr_dirty_ptrs(k));
706 BUG_ON(s->sectors + page_sectors > PAGE_SECTORS);
707 s->sectors += page_sectors;
711 static void readpage_bio_extend(struct readpages_iter *iter,
712 struct bio *bio, u64 offset,
719 while (bio_end_sector(bio) < offset &&
720 bio->bi_vcnt < bio->bi_max_vecs) {
721 page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT;
723 if (iter->nr_pages) {
724 page = list_last_entry(&iter->pages, struct page, lru);
725 if (page->index != page_offset)
728 list_del(&page->lru);
730 } else if (get_more) {
732 page = radix_tree_lookup(&iter->mapping->page_tree, page_offset);
735 if (page && !radix_tree_exceptional_entry(page))
738 page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
742 page->index = page_offset;
743 ClearPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page);
748 ret = readpage_add_page(iter, page);
752 __bio_add_page(bio, page);
756 SetPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page);
759 static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
760 struct bch_read_bio *rbio, u64 inum,
761 struct readpages_iter *readpages_iter)
763 struct bio *bio = &rbio->bio;
764 int flags = BCH_READ_RETRY_IF_STALE|
765 BCH_READ_MAY_PROMOTE;
768 struct extent_pick_ptr pick;
774 bch2_btree_iter_set_pos(iter, POS(inum, bio->bi_iter.bi_sector));
776 k = bch2_btree_iter_peek_with_holes(iter);
780 int ret = bch2_btree_iter_unlock(iter);
782 bcache_io_error(c, bio, "btree IO error %i", ret);
787 bkey_reassemble(&tmp.k, k);
788 bch2_btree_iter_unlock(iter);
789 k = bkey_i_to_s_c(&tmp.k);
791 bch2_extent_pick_ptr(c, k, NULL, &pick);
792 if (IS_ERR(pick.ca)) {
793 bcache_io_error(c, bio, "no device to read from");
799 readpage_bio_extend(readpages_iter,
802 (pick.crc.csum_type ||
803 pick.crc.compression_type));
805 bytes = (min_t(u64, k.k->p.offset, bio_end_sector(bio)) -
806 bio->bi_iter.bi_sector) << 9;
807 is_last = bytes == bio->bi_iter.bi_size;
808 swap(bio->bi_iter.bi_size, bytes);
810 if (bkey_extent_is_allocation(k.k))
811 bch2_add_page_sectors(bio, k);
813 if (!bch2_extent_is_fully_allocated(k))
814 bch2_mark_pages_unalloc(bio);
818 bio_inc_remaining(&rbio->bio);
819 flags |= BCH_READ_MUST_CLONE;
820 trace_read_split(&rbio->bio);
823 bch2_read_extent(c, rbio, bkey_s_c_to_extent(k),
835 swap(bio->bi_iter.bi_size, bytes);
836 bio_advance(bio, bytes);
840 int bch2_readpages(struct file *file, struct address_space *mapping,
841 struct list_head *pages, unsigned nr_pages)
843 struct bch_inode_info *inode = to_bch_ei(mapping->host);
844 struct bch_fs *c = inode->v.i_sb->s_fs_info;
845 struct bch_io_opts opts = io_opts(c, inode);
846 struct btree_iter iter;
848 struct readpages_iter readpages_iter = {
849 .mapping = mapping, .nr_pages = nr_pages
852 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0);
854 INIT_LIST_HEAD(&readpages_iter.pages);
855 list_add(&readpages_iter.pages, pages);
856 list_del_init(pages);
858 if (current->pagecache_lock != &mapping->add_lock)
859 pagecache_add_get(&mapping->add_lock);
861 while ((page = readpage_iter_next(&readpages_iter))) {
862 unsigned n = max_t(unsigned,
863 min_t(unsigned, readpages_iter.nr_pages + 1,
865 c->sb.encoded_extent_max >> PAGE_SECTOR_SHIFT);
867 struct bch_read_bio *rbio =
868 rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
871 rbio->bio.bi_end_io = bch2_readpages_end_io;
872 bio_add_page_contig(&rbio->bio, page);
873 bchfs_read(c, &iter, rbio, inode->v.i_ino, &readpages_iter);
876 if (current->pagecache_lock != &mapping->add_lock)
877 pagecache_add_put(&mapping->add_lock);
882 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
883 u64 inum, struct page *page)
885 struct btree_iter iter;
888 * Initialize page state:
889 * If a page is partly allocated and partly a hole, we want it to be
890 * marked BCH_PAGE_UNALLOCATED - so we initially mark all pages
891 * allocated and then mark them unallocated as we find holes:
893 * Note that the bio hasn't been split yet - it's the only bio that
894 * points to these pages. As we walk extents and split @bio, that
895 * necessarily be true, the splits won't necessarily be on page
898 struct bch_page_state *s = page_state(page);
900 EBUG_ON(s->reserved);
904 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
905 bio_add_page_contig(&rbio->bio, page);
907 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0);
908 bchfs_read(c, &iter, rbio, inum, NULL);
911 int bch2_readpage(struct file *file, struct page *page)
913 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
914 struct bch_fs *c = inode->v.i_sb->s_fs_info;
915 struct bch_io_opts opts = io_opts(c, inode);
916 struct bch_read_bio *rbio;
918 rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
919 rbio->bio.bi_end_io = bch2_readpages_end_io;
921 __bchfs_readpage(c, rbio, inode->v.i_ino, page);
925 struct bch_writepage_state {
926 struct bch_writepage_io *io;
927 struct bch_io_opts opts;
930 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
931 struct bch_inode_info *inode)
933 return (struct bch_writepage_state) { .opts = io_opts(c, inode) };
936 static void bch2_writepage_io_free(struct closure *cl)
938 struct bch_writepage_io *io = container_of(cl,
939 struct bch_writepage_io, cl);
941 bio_put(&io->op.op.wbio.bio);
944 static void bch2_writepage_io_done(struct closure *cl)
946 struct bch_writepage_io *io = container_of(cl,
947 struct bch_writepage_io, cl);
948 struct bch_fs *c = io->op.op.c;
949 struct bio *bio = &io->op.op.wbio.bio;
950 struct bio_vec *bvec;
953 atomic_sub(bio->bi_vcnt, &c->writeback_pages);
954 wake_up(&c->writeback_wait);
956 bio_for_each_segment_all(bvec, bio, i) {
957 struct page *page = bvec->bv_page;
959 if (io->op.op.error) {
962 set_bit(AS_EIO, &page->mapping->flags);
965 if (io->op.op.written >= PAGE_SECTORS) {
966 struct bch_page_state old, new;
968 old = page_state_cmpxchg(page_state(page), new, {
969 new.sectors = PAGE_SECTORS;
970 new.dirty_sectors = 0;
973 io->op.sectors_added -= old.dirty_sectors;
974 io->op.op.written -= PAGE_SECTORS;
979 * racing with fallocate can cause us to add fewer sectors than
980 * expected - but we shouldn't add more sectors than expected:
982 * (error (due to going RO) halfway through a page can screw that up
985 BUG_ON(io->op.sectors_added >= (s64) PAGE_SECTORS);
988 * PageWriteback is effectively our ref on the inode - fixup i_blocks
989 * before calling end_page_writeback:
991 if (io->op.sectors_added)
992 i_sectors_acct(c, io->op.inode, io->op.sectors_added);
994 bio_for_each_segment_all(bvec, bio, i)
995 end_page_writeback(bvec->bv_page);
997 closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
1000 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1002 struct bch_writepage_io *io = w->io;
1003 struct bio *bio = &io->op.op.wbio.bio;
1006 atomic_add(bio->bi_vcnt, &io->op.op.c->writeback_pages);
1008 closure_call(&io->op.op.cl, bch2_write, NULL, &io->cl);
1009 continue_at(&io->cl, bch2_writepage_io_done, NULL);
1013 * Get a bch_writepage_io and add @page to it - appending to an existing one if
1014 * possible, else allocating a new one:
1016 static void bch2_writepage_io_alloc(struct bch_fs *c,
1017 struct bch_writepage_state *w,
1018 struct bch_inode_info *inode,
1020 struct bch_page_state s)
1022 struct bch_write_op *op;
1023 u64 offset = (u64) page->index << PAGE_SECTOR_SHIFT;
1025 w->io = container_of(bio_alloc_bioset(GFP_NOFS,
1027 &c->writepage_bioset),
1028 struct bch_writepage_io, op.op.wbio.bio);
1031 closure_init(&w->io->cl, NULL);
1033 bch2_fswrite_op_init(&w->io->op, c, inode, w->opts, false);
1034 op->nr_replicas = s.nr_replicas;
1035 op->res.nr_replicas = s.nr_replicas;
1036 op->write_point = writepoint_hashed(inode->ei_last_dirtied);
1037 op->pos = POS(inode->v.i_ino, offset);
1038 op->wbio.bio.bi_iter.bi_sector = offset;
1041 static int __bch2_writepage(struct bch_fs *c, struct page *page,
1042 struct writeback_control *wbc,
1043 struct bch_writepage_state *w)
1045 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1046 struct bch_page_state new, old;
1048 loff_t i_size = i_size_read(&inode->v);
1049 pgoff_t end_index = i_size >> PAGE_SHIFT;
1051 EBUG_ON(!PageUptodate(page));
1053 /* Is the page fully inside i_size? */
1054 if (page->index < end_index)
1057 /* Is the page fully outside i_size? (truncate in progress) */
1058 offset = i_size & (PAGE_SIZE - 1);
1059 if (page->index > end_index || !offset) {
1065 * The page straddles i_size. It must be zeroed out on each and every
1066 * writepage invocation because it may be mmapped. "A file is mapped
1067 * in multiples of the page size. For a file that is not a multiple of
1068 * the page size, the remaining memory is zeroed when mapped, and
1069 * writes to that region are not written out to the file."
1071 zero_user_segment(page, offset, PAGE_SIZE);
1073 /* Before unlocking the page, transfer reservation to w->io: */
1074 old = page_state_cmpxchg(page_state(page), new, {
1075 EBUG_ON(!new.reserved &&
1076 (new.sectors != PAGE_SECTORS ||
1079 if (new.allocated && w->opts.compression)
1081 else if (!new.reserved)
1087 (w->io->op.op.res.nr_replicas != old.nr_replicas ||
1088 !bio_can_add_page_contig(&w->io->op.op.wbio.bio, page)))
1089 bch2_writepage_do_io(w);
1092 bch2_writepage_io_alloc(c, w, inode, page, old);
1094 BUG_ON(inode != w->io->op.inode);
1095 BUG_ON(bio_add_page_contig(&w->io->op.op.wbio.bio, page));
1098 w->io->op.op.res.sectors += old.nr_replicas * PAGE_SECTORS;
1100 /* while page is locked: */
1101 w->io->op.new_i_size = i_size;
1103 if (wbc->sync_mode == WB_SYNC_ALL)
1104 w->io->op.op.wbio.bio.bi_opf |= REQ_SYNC;
1106 BUG_ON(PageWriteback(page));
1107 set_page_writeback(page);
1113 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1115 struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1116 struct bch_writepage_state w =
1117 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1118 struct pagecache_iter iter;
1122 pgoff_t uninitialized_var(writeback_index);
1124 pgoff_t end; /* Inclusive */
1127 int range_whole = 0;
1130 if (wbc->range_cyclic) {
1131 writeback_index = mapping->writeback_index; /* prev offset */
1132 index = writeback_index;
1139 index = wbc->range_start >> PAGE_SHIFT;
1140 end = wbc->range_end >> PAGE_SHIFT;
1141 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1143 cycled = 1; /* ignore range_cyclic tests */
1145 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1146 tag = PAGECACHE_TAG_TOWRITE;
1148 tag = PAGECACHE_TAG_DIRTY;
1150 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1151 tag_pages_for_writeback(mapping, index, end);
1155 for_each_pagecache_tag(&iter, mapping, tag, index, end, page) {
1156 done_index = page->index;
1159 !bio_can_add_page_contig(&w.io->op.op.wbio.bio, page))
1160 bch2_writepage_do_io(&w);
1163 atomic_read(&c->writeback_pages) >=
1164 c->writeback_pages_max) {
1165 /* don't sleep with pages pinned: */
1166 pagecache_iter_release(&iter);
1168 __wait_event(c->writeback_wait,
1169 atomic_read(&c->writeback_pages) <
1170 c->writeback_pages_max);
1177 * Page truncated or invalidated. We can freely skip it
1178 * then, even for data integrity operations: the page
1179 * has disappeared concurrently, so there could be no
1180 * real expectation of this data interity operation
1181 * even if there is now a new, dirty page at the same
1182 * pagecache address.
1184 if (unlikely(page->mapping != mapping)) {
1190 if (!PageDirty(page)) {
1191 /* someone wrote it for us */
1192 goto continue_unlock;
1195 if (PageWriteback(page)) {
1196 if (wbc->sync_mode != WB_SYNC_NONE)
1197 wait_on_page_writeback(page);
1199 goto continue_unlock;
1202 BUG_ON(PageWriteback(page));
1203 if (!clear_page_dirty_for_io(page))
1204 goto continue_unlock;
1206 trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
1207 ret = __bch2_writepage(c, page, wbc, &w);
1208 if (unlikely(ret)) {
1209 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1214 * done_index is set past this page,
1215 * so media errors will not choke
1216 * background writeout for the entire
1217 * file. This has consequences for
1218 * range_cyclic semantics (ie. it may
1219 * not be suitable for data integrity
1222 done_index = page->index + 1;
1229 * We stop writing back only if we are not doing
1230 * integrity sync. In case of integrity sync we have to
1231 * keep going until we have written all the pages
1232 * we tagged for writeback prior to entering this loop.
1234 if (--wbc->nr_to_write <= 0 &&
1235 wbc->sync_mode == WB_SYNC_NONE) {
1240 pagecache_iter_release(&iter);
1243 bch2_writepage_do_io(&w);
1245 if (!cycled && !done) {
1248 * We hit the last page and there is more work to be done: wrap
1249 * back to the start of the file
1253 end = writeback_index - 1;
1256 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1257 mapping->writeback_index = done_index;
1262 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1264 struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1265 struct bch_writepage_state w =
1266 bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
1269 ret = __bch2_writepage(c, page, wbc, &w);
1271 bch2_writepage_do_io(&w);
1276 static void bch2_read_single_page_end_io(struct bio *bio)
1278 complete(bio->bi_private);
1281 static int bch2_read_single_page(struct page *page,
1282 struct address_space *mapping)
1284 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1285 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1286 struct bch_read_bio *rbio;
1288 DECLARE_COMPLETION_ONSTACK(done);
1290 rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
1292 rbio->bio.bi_private = &done;
1293 rbio->bio.bi_end_io = bch2_read_single_page_end_io;
1295 __bchfs_readpage(c, rbio, inode->v.i_ino, page);
1296 wait_for_completion(&done);
1298 ret = blk_status_to_errno(rbio->bio.bi_status);
1299 bio_put(&rbio->bio);
1304 SetPageUptodate(page);
1308 int bch2_write_begin(struct file *file, struct address_space *mapping,
1309 loff_t pos, unsigned len, unsigned flags,
1310 struct page **pagep, void **fsdata)
1312 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1313 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1314 pgoff_t index = pos >> PAGE_SHIFT;
1315 unsigned offset = pos & (PAGE_SIZE - 1);
1319 BUG_ON(inode_unhashed(&inode->v));
1321 /* Not strictly necessary - same reason as mkwrite(): */
1322 pagecache_add_get(&mapping->add_lock);
1324 page = grab_cache_page_write_begin(mapping, index, flags);
1328 if (PageUptodate(page))
1331 /* If we're writing entire page, don't need to read it in first: */
1332 if (len == PAGE_SIZE)
1335 if (!offset && pos + len >= inode->v.i_size) {
1336 zero_user_segment(page, len, PAGE_SIZE);
1337 flush_dcache_page(page);
1341 if (index > inode->v.i_size >> PAGE_SHIFT) {
1342 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1343 flush_dcache_page(page);
1347 ret = bch2_read_single_page(page, mapping);
1351 ret = bch2_get_page_reservation(c, page, true);
1353 if (!PageUptodate(page)) {
1355 * If the page hasn't been read in, we won't know if we
1356 * actually need a reservation - we don't actually need
1357 * to read here, we just need to check if the page is
1358 * fully backed by uncompressed data:
1373 pagecache_add_put(&mapping->add_lock);
1377 int bch2_write_end(struct file *filp, struct address_space *mapping,
1378 loff_t pos, unsigned len, unsigned copied,
1379 struct page *page, void *fsdata)
1381 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1382 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1384 lockdep_assert_held(&inode->v.i_rwsem);
1386 if (unlikely(copied < len && !PageUptodate(page))) {
1388 * The page needs to be read in, but that would destroy
1389 * our partial write - simplest thing is to just force
1390 * userspace to redo the write:
1392 zero_user(page, 0, PAGE_SIZE);
1393 flush_dcache_page(page);
1397 if (pos + copied > inode->v.i_size)
1398 i_size_write(&inode->v, pos + copied);
1401 if (!PageUptodate(page))
1402 SetPageUptodate(page);
1403 if (!PageDirty(page))
1404 set_page_dirty(page);
1406 inode->ei_last_dirtied = (unsigned long) current;
1408 bch2_put_page_reservation(c, page);
1413 pagecache_add_put(&mapping->add_lock);
1420 static void bch2_dio_read_complete(struct closure *cl)
1422 struct dio_read *dio = container_of(cl, struct dio_read, cl);
1424 dio->req->ki_complete(dio->req, dio->ret, 0);
1425 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1428 static void bch2_direct_IO_read_endio(struct bio *bio)
1430 struct dio_read *dio = bio->bi_private;
1433 dio->ret = blk_status_to_errno(bio->bi_status);
1435 closure_put(&dio->cl);
1438 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1440 bch2_direct_IO_read_endio(bio);
1441 bio_check_pages_dirty(bio); /* transfers ownership */
1444 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1446 struct file *file = req->ki_filp;
1447 struct bch_inode_info *inode = file_bch_inode(file);
1448 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1449 struct bch_io_opts opts = io_opts(c, inode);
1450 struct dio_read *dio;
1452 loff_t offset = req->ki_pos;
1453 bool sync = is_sync_kiocb(req);
1456 if ((offset|iter->count) & (block_bytes(c) - 1))
1459 ret = min_t(loff_t, iter->count,
1460 max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1461 iov_iter_truncate(iter, round_up(ret, block_bytes(c)));
1466 bio = bio_alloc_bioset(GFP_KERNEL,
1467 iov_iter_npages(iter, BIO_MAX_PAGES),
1468 &c->dio_read_bioset);
1470 bio->bi_end_io = bch2_direct_IO_read_endio;
1472 dio = container_of(bio, struct dio_read, rbio.bio);
1473 closure_init(&dio->cl, NULL);
1476 * this is a _really_ horrible hack just to avoid an atomic sub at the
1480 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1481 atomic_set(&dio->cl.remaining,
1482 CLOSURE_REMAINING_INITIALIZER -
1484 CLOSURE_DESTRUCTOR);
1486 atomic_set(&dio->cl.remaining,
1487 CLOSURE_REMAINING_INITIALIZER + 1);
1494 while (iter->count) {
1495 bio = bio_alloc_bioset(GFP_KERNEL,
1496 iov_iter_npages(iter, BIO_MAX_PAGES),
1498 bio->bi_end_io = bch2_direct_IO_read_split_endio;
1500 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1501 bio->bi_iter.bi_sector = offset >> 9;
1502 bio->bi_private = dio;
1504 ret = bio_iov_iter_get_pages(bio, iter);
1506 /* XXX: fault inject this path */
1507 bio->bi_status = BLK_STS_RESOURCE;
1512 offset += bio->bi_iter.bi_size;
1513 bio_set_pages_dirty(bio);
1516 closure_get(&dio->cl);
1518 bch2_read(c, rbio_init(bio, opts), inode->v.i_ino);
1522 closure_sync(&dio->cl);
1523 closure_debug_destroy(&dio->cl);
1525 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1528 return -EIOCBQUEUED;
1532 static void bch2_dio_write_loop_async(struct closure *);
1534 static long bch2_dio_write_loop(struct dio_write *dio)
1536 struct kiocb *req = dio->req;
1537 struct file *file = req->ki_filp;
1538 struct address_space *mapping = file->f_mapping;
1539 struct bch_inode_info *inode = file_bch_inode(file);
1540 struct bio *bio = &dio->iop.op.wbio.bio;
1549 inode_dio_begin(&inode->v);
1550 __pagecache_block_get(&mapping->add_lock);
1552 /* Write and invalidate pagecache range that we're writing to: */
1553 ret = write_invalidate_inode_pages_range(mapping, req->ki_pos,
1554 req->ki_pos + iov_iter_count(&dio->iter) - 1);
1559 BUG_ON(current->pagecache_lock);
1560 current->pagecache_lock = &mapping->add_lock;
1561 if (current != dio->task)
1562 use_mm(dio->task->mm);
1564 ret = bio_iov_iter_get_pages(bio, &dio->iter);
1566 if (current != dio->task)
1567 unuse_mm(dio->task->mm);
1568 current->pagecache_lock = NULL;
1570 if (unlikely(ret < 0))
1573 dio->iop.op.pos = POS(inode->v.i_ino,
1574 (req->ki_pos >> 9) + dio->iop.op.written);
1576 task_io_account_write(bio->bi_iter.bi_size);
1578 closure_call(&dio->iop.op.cl, bch2_write, NULL, &dio->cl);
1580 if (!dio->sync && !dio->loop && dio->iter.count) {
1581 struct iovec *iov = dio->inline_vecs;
1583 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
1584 iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
1586 if (unlikely(!iov)) {
1587 dio->iop.op.error = -ENOMEM;
1591 dio->free_iov = true;
1594 memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
1595 dio->iter.iov = iov;
1601 continue_at_noreturn(&dio->cl,
1602 bch2_dio_write_loop_async, NULL);
1603 return -EIOCBQUEUED;
1606 closure_sync(&dio->cl);
1608 bio_for_each_segment_all(bv, bio, i)
1609 put_page(bv->bv_page);
1610 if (!dio->iter.count || dio->iop.op.error)
1615 ret = dio->iop.op.error ?: ((long) dio->iop.op.written << 9);
1617 __pagecache_block_put(&mapping->add_lock);
1618 inode_dio_end(&inode->v);
1619 bch2_disk_reservation_put(dio->iop.op.c, &dio->iop.op.res);
1622 kfree(dio->iter.iov);
1624 closure_debug_destroy(&dio->cl);
1630 req->ki_complete(req, ret, 0);
1636 static void bch2_dio_write_loop_async(struct closure *cl)
1638 struct dio_write *dio = container_of(cl, struct dio_write, cl);
1640 bch2_dio_write_loop(dio);
1643 static int bch2_direct_IO_write(struct kiocb *req,
1644 struct iov_iter *iter,
1647 struct file *file = req->ki_filp;
1648 struct bch_inode_info *inode = file_bch_inode(file);
1649 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1650 struct dio_write *dio;
1652 loff_t offset = req->ki_pos;
1655 lockdep_assert_held(&inode->v.i_rwsem);
1657 if (unlikely(!iter->count))
1660 if (unlikely((offset|iter->count) & (block_bytes(c) - 1)))
1663 bio = bio_alloc_bioset(GFP_KERNEL,
1664 iov_iter_npages(iter, BIO_MAX_PAGES),
1665 &c->dio_write_bioset);
1666 dio = container_of(bio, struct dio_write, iop.op.wbio.bio);
1667 closure_init(&dio->cl, NULL);
1669 dio->task = current;
1671 dio->sync = is_sync_kiocb(req) ||
1672 offset + iter->count > inode->v.i_size;
1673 dio->free_iov = false;
1675 bch2_fswrite_op_init(&dio->iop, c, inode, io_opts(c, inode), true);
1676 dio->iop.op.write_point = writepoint_hashed((unsigned long) dio->task);
1677 dio->iop.op.flags |= BCH_WRITE_NOPUT_RESERVATION;
1679 if ((req->ki_flags & IOCB_DSYNC) &&
1680 !c->opts.journal_flush_disabled)
1681 dio->iop.op.flags |= BCH_WRITE_FLUSH;
1683 ret = bch2_disk_reservation_get(c, &dio->iop.op.res, iter->count >> 9, 0);
1684 if (unlikely(ret)) {
1685 if (bch2_check_range_allocated(c, POS(inode->v.i_ino,
1690 dio->iop.unalloc = true;
1693 dio->iop.op.nr_replicas = dio->iop.op.res.nr_replicas;
1695 return bch2_dio_write_loop(dio);
1697 bch2_disk_reservation_put(c, &dio->iop.op.res);
1698 closure_debug_destroy(&dio->cl);
1703 ssize_t bch2_direct_IO(struct kiocb *req, struct iov_iter *iter)
1705 struct blk_plug plug;
1708 blk_start_plug(&plug);
1709 ret = iov_iter_rw(iter) == WRITE
1710 ? bch2_direct_IO_write(req, iter, false)
1711 : bch2_direct_IO_read(req, iter);
1712 blk_finish_plug(&plug);
1718 bch2_direct_write(struct kiocb *iocb, struct iov_iter *iter)
1720 return bch2_direct_IO_write(iocb, iter, true);
1723 static ssize_t __bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1725 struct file *file = iocb->ki_filp;
1726 struct bch_inode_info *inode = file_bch_inode(file);
1729 /* We can write back this queue in page reclaim */
1730 current->backing_dev_info = inode_to_bdi(&inode->v);
1731 ret = file_remove_privs(file);
1735 ret = file_update_time(file);
1739 ret = iocb->ki_flags & IOCB_DIRECT
1740 ? bch2_direct_write(iocb, from)
1741 : generic_perform_write(file, from, iocb->ki_pos);
1743 if (likely(ret > 0))
1744 iocb->ki_pos += ret;
1746 current->backing_dev_info = NULL;
1750 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1752 struct bch_inode_info *inode = file_bch_inode(iocb->ki_filp);
1753 bool direct = iocb->ki_flags & IOCB_DIRECT;
1756 inode_lock(&inode->v);
1757 ret = generic_write_checks(iocb, from);
1759 ret = __bch2_write_iter(iocb, from);
1760 inode_unlock(&inode->v);
1762 if (ret > 0 && !direct)
1763 ret = generic_write_sync(iocb, ret);
1768 int bch2_page_mkwrite(struct vm_fault *vmf)
1770 struct page *page = vmf->page;
1771 struct file *file = vmf->vma->vm_file;
1772 struct bch_inode_info *inode = file_bch_inode(file);
1773 struct address_space *mapping = inode->v.i_mapping;
1774 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1775 int ret = VM_FAULT_LOCKED;
1777 sb_start_pagefault(inode->v.i_sb);
1778 file_update_time(file);
1781 * Not strictly necessary, but helps avoid dio writes livelocking in
1782 * write_invalidate_inode_pages_range() - can drop this if/when we get
1783 * a write_invalidate_inode_pages_range() that works without dropping
1784 * page lock before invalidating page
1786 if (current->pagecache_lock != &mapping->add_lock)
1787 pagecache_add_get(&mapping->add_lock);
1790 if (page->mapping != mapping ||
1791 page_offset(page) > i_size_read(&inode->v)) {
1793 ret = VM_FAULT_NOPAGE;
1797 if (bch2_get_page_reservation(c, page, true)) {
1799 ret = VM_FAULT_SIGBUS;
1803 if (!PageDirty(page))
1804 set_page_dirty(page);
1805 wait_for_stable_page(page);
1807 if (current->pagecache_lock != &mapping->add_lock)
1808 pagecache_add_put(&mapping->add_lock);
1809 sb_end_pagefault(inode->v.i_sb);
1813 void bch2_invalidatepage(struct page *page, unsigned int offset,
1814 unsigned int length)
1816 EBUG_ON(!PageLocked(page));
1817 EBUG_ON(PageWriteback(page));
1819 if (offset || length < PAGE_SIZE)
1822 bch2_clear_page_bits(page);
1825 int bch2_releasepage(struct page *page, gfp_t gfp_mask)
1827 EBUG_ON(!PageLocked(page));
1828 EBUG_ON(PageWriteback(page));
1830 if (PageDirty(page))
1833 bch2_clear_page_bits(page);
1837 #ifdef CONFIG_MIGRATION
1838 int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
1839 struct page *page, enum migrate_mode mode)
1843 ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
1844 if (ret != MIGRATEPAGE_SUCCESS)
1847 if (PagePrivate(page)) {
1848 *page_state(newpage) = *page_state(page);
1849 ClearPagePrivate(page);
1852 migrate_page_copy(newpage, page);
1853 return MIGRATEPAGE_SUCCESS;
1857 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1859 struct bch_inode_info *inode = file_bch_inode(file);
1860 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1863 ret = filemap_write_and_wait_range(inode->v.i_mapping, start, end);
1867 if (c->opts.journal_flush_disabled)
1870 return bch2_journal_flush_seq(&c->journal, inode->ei_journal_seq);
1873 static int __bch2_truncate_page(struct bch_inode_info *inode,
1874 pgoff_t index, loff_t start, loff_t end)
1876 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1877 struct address_space *mapping = inode->v.i_mapping;
1878 unsigned start_offset = start & (PAGE_SIZE - 1);
1879 unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
1883 /* Page boundary? Nothing to do */
1884 if (!((index == start >> PAGE_SHIFT && start_offset) ||
1885 (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
1889 if (index << PAGE_SHIFT >= inode->v.i_size)
1892 page = find_lock_page(mapping, index);
1894 struct btree_iter iter;
1895 struct bkey_s_c k = bkey_s_c_null;
1898 * XXX: we're doing two index lookups when we end up reading the
1901 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
1903 index << PAGE_SECTOR_SHIFT), 0, k) {
1904 if (bkey_cmp(bkey_start_pos(k.k),
1906 (index + 1) << PAGE_SECTOR_SHIFT)) >= 0)
1909 if (k.k->type != KEY_TYPE_DISCARD &&
1910 k.k->type != BCH_RESERVATION) {
1911 bch2_btree_iter_unlock(&iter);
1915 bch2_btree_iter_unlock(&iter);
1918 page = find_or_create_page(mapping, index, GFP_KERNEL);
1919 if (unlikely(!page)) {
1925 if (!PageUptodate(page)) {
1926 ret = bch2_read_single_page(page, mapping);
1932 * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
1934 * XXX: because we aren't currently tracking whether the page has actual
1935 * data in it (vs. just 0s, or only partially written) this wrong. ick.
1937 ret = bch2_get_page_reservation(c, page, false);
1940 if (index == start >> PAGE_SHIFT &&
1941 index == end >> PAGE_SHIFT)
1942 zero_user_segment(page, start_offset, end_offset);
1943 else if (index == start >> PAGE_SHIFT)
1944 zero_user_segment(page, start_offset, PAGE_SIZE);
1945 else if (index == end >> PAGE_SHIFT)
1946 zero_user_segment(page, 0, end_offset);
1948 if (!PageDirty(page))
1949 set_page_dirty(page);
1957 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
1959 return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
1960 from, from + PAGE_SIZE);
1963 int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
1965 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1966 struct address_space *mapping = inode->v.i_mapping;
1967 bool shrink = iattr->ia_size <= inode->v.i_size;
1968 struct i_sectors_hook i_sectors_hook =
1969 i_sectors_hook_init(inode, BCH_INODE_I_SIZE_DIRTY);
1972 inode_dio_wait(&inode->v);
1973 pagecache_block_get(&mapping->add_lock);
1975 truncate_setsize(&inode->v, iattr->ia_size);
1977 /* sync appends.. */
1978 /* XXX what protects inode->i_size? */
1979 if (iattr->ia_size > inode->ei_inode.bi_size)
1980 ret = filemap_write_and_wait_range(mapping,
1981 inode->ei_inode.bi_size, S64_MAX);
1983 goto err_put_pagecache;
1985 i_sectors_hook.new_i_size = iattr->ia_size;
1987 ret = i_sectors_dirty_start(c, &i_sectors_hook);
1992 * There might be persistent reservations (from fallocate())
1993 * above i_size, which bch2_inode_truncate() will discard - we're
1994 * only supposed to discard them if we're doing a real truncate
1995 * here (new i_size < current i_size):
1998 ret = bch2_truncate_page(inode, iattr->ia_size);
2002 ret = bch2_inode_truncate(c, inode->v.i_ino,
2003 round_up(iattr->ia_size, PAGE_SIZE) >> 9,
2004 &i_sectors_hook.hook,
2005 &inode->ei_journal_seq);
2010 setattr_copy(&inode->v, iattr);
2011 inode->v.i_mtime = inode->v.i_ctime = current_time(&inode->v);
2014 * On error - in particular, bch2_truncate_page() error - don't clear
2015 * I_SIZE_DIRTY, as we've left data above i_size!:
2018 i_sectors_hook.flags &= ~BCH_INODE_I_SIZE_DIRTY;
2020 ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2022 pagecache_block_put(&mapping->add_lock);
2026 static long bch2_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2028 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2029 struct address_space *mapping = inode->v.i_mapping;
2030 u64 ino = inode->v.i_ino;
2031 u64 discard_start = round_up(offset, PAGE_SIZE) >> 9;
2032 u64 discard_end = round_down(offset + len, PAGE_SIZE) >> 9;
2035 inode_lock(&inode->v);
2036 inode_dio_wait(&inode->v);
2037 pagecache_block_get(&mapping->add_lock);
2039 ret = __bch2_truncate_page(inode,
2040 offset >> PAGE_SHIFT,
2041 offset, offset + len);
2045 if (offset >> PAGE_SHIFT !=
2046 (offset + len) >> PAGE_SHIFT) {
2047 ret = __bch2_truncate_page(inode,
2048 (offset + len) >> PAGE_SHIFT,
2049 offset, offset + len);
2054 truncate_pagecache_range(&inode->v, offset, offset + len - 1);
2056 if (discard_start < discard_end) {
2057 struct disk_reservation disk_res;
2058 struct i_sectors_hook i_sectors_hook =
2059 i_sectors_hook_init(inode, 0);
2062 ret = i_sectors_dirty_start(c, &i_sectors_hook);
2067 * We need to pass in a disk reservation here because we might
2068 * be splitting a compressed extent into two. This isn't a
2069 * problem with truncate because truncate will never split an
2070 * extent, only truncate it...
2072 ret = bch2_disk_reservation_get(c, &disk_res, 0, 0);
2075 ret = bch2_btree_delete_range(c,
2077 POS(ino, discard_start),
2078 POS(ino, discard_end),
2081 &i_sectors_hook.hook,
2082 &inode->ei_journal_seq);
2083 bch2_disk_reservation_put(c, &disk_res);
2085 ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2088 pagecache_block_put(&mapping->add_lock);
2089 inode_unlock(&inode->v);
2094 static long bch2_fcollapse(struct bch_inode_info *inode,
2095 loff_t offset, loff_t len)
2097 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2098 struct address_space *mapping = inode->v.i_mapping;
2099 struct btree_iter src;
2100 struct btree_iter dst;
2101 BKEY_PADDED(k) copy;
2103 struct i_sectors_hook i_sectors_hook = i_sectors_hook_init(inode, 0);
2107 if ((offset | len) & (PAGE_SIZE - 1))
2110 bch2_btree_iter_init(&dst, c, BTREE_ID_EXTENTS,
2111 POS(inode->v.i_ino, offset >> 9),
2113 /* position will be set from dst iter's position: */
2114 bch2_btree_iter_init(&src, c, BTREE_ID_EXTENTS, POS_MIN, 0);
2115 bch2_btree_iter_link(&src, &dst);
2118 * We need i_mutex to keep the page cache consistent with the extents
2119 * btree, and the btree consistent with i_size - we don't need outside
2120 * locking for the extents btree itself, because we're using linked
2123 inode_lock(&inode->v);
2124 inode_dio_wait(&inode->v);
2125 pagecache_block_get(&mapping->add_lock);
2128 if (offset + len >= inode->v.i_size)
2131 if (inode->v.i_size < len)
2134 new_size = inode->v.i_size - len;
2136 ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2140 ret = i_sectors_dirty_start(c, &i_sectors_hook);
2144 while (bkey_cmp(dst.pos,
2146 round_up(new_size, PAGE_SIZE) >> 9)) < 0) {
2147 struct disk_reservation disk_res;
2149 bch2_btree_iter_set_pos(&src,
2150 POS(dst.pos.inode, dst.pos.offset + (len >> 9)));
2152 ret = bch2_btree_iter_traverse(&dst);
2154 goto btree_iter_err;
2156 k = bch2_btree_iter_peek_with_holes(&src);
2157 if ((ret = btree_iter_err(k)))
2158 goto btree_iter_err;
2160 bkey_reassemble(©.k, k);
2162 if (bkey_deleted(©.k.k))
2163 copy.k.k.type = KEY_TYPE_DISCARD;
2165 bch2_cut_front(src.pos, ©.k);
2166 copy.k.k.p.offset -= len >> 9;
2168 BUG_ON(bkey_cmp(dst.pos, bkey_start_pos(©.k.k)));
2170 ret = bch2_disk_reservation_get(c, &disk_res, copy.k.k.size,
2171 BCH_DISK_RESERVATION_NOFAIL);
2174 ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
2175 &inode->ei_journal_seq,
2176 BTREE_INSERT_ATOMIC|
2177 BTREE_INSERT_NOFAIL,
2178 BTREE_INSERT_ENTRY(&dst, ©.k));
2179 bch2_disk_reservation_put(c, &disk_res);
2184 goto err_put_sectors_dirty;
2186 * XXX: if we error here we've left data with multiple
2187 * pointers... which isn't a _super_ serious problem...
2190 bch2_btree_iter_cond_resched(&src);
2193 bch2_btree_iter_unlock(&src);
2194 bch2_btree_iter_unlock(&dst);
2196 ret = bch2_inode_truncate(c, inode->v.i_ino,
2197 round_up(new_size, PAGE_SIZE) >> 9,
2198 &i_sectors_hook.hook,
2199 &inode->ei_journal_seq);
2201 goto err_put_sectors_dirty;
2203 i_size_write(&inode->v, new_size);
2204 i_sectors_hook.new_i_size = new_size;
2205 err_put_sectors_dirty:
2206 ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2208 pagecache_block_put(&mapping->add_lock);
2209 inode_unlock(&inode->v);
2211 bch2_btree_iter_unlock(&src);
2212 bch2_btree_iter_unlock(&dst);
2216 static long bch2_fallocate(struct bch_inode_info *inode, int mode,
2217 loff_t offset, loff_t len)
2219 struct address_space *mapping = inode->v.i_mapping;
2220 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2221 struct i_sectors_hook i_sectors_hook = i_sectors_hook_init(inode, 0);
2222 struct btree_iter iter;
2223 struct bpos end_pos;
2224 loff_t block_start, block_end;
2225 loff_t end = offset + len;
2227 unsigned replicas = READ_ONCE(c->opts.data_replicas);
2230 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
2233 inode_lock(&inode->v);
2234 inode_dio_wait(&inode->v);
2235 pagecache_block_get(&mapping->add_lock);
2237 if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
2238 ret = inode_newsize_ok(&inode->v, end);
2243 if (mode & FALLOC_FL_ZERO_RANGE) {
2244 ret = __bch2_truncate_page(inode,
2245 offset >> PAGE_SHIFT,
2249 offset >> PAGE_SHIFT != end >> PAGE_SHIFT)
2250 ret = __bch2_truncate_page(inode,
2257 truncate_pagecache_range(&inode->v, offset, end - 1);
2259 block_start = round_up(offset, PAGE_SIZE);
2260 block_end = round_down(end, PAGE_SIZE);
2262 block_start = round_down(offset, PAGE_SIZE);
2263 block_end = round_up(end, PAGE_SIZE);
2266 bch2_btree_iter_set_pos(&iter, POS(inode->v.i_ino, block_start >> 9));
2267 end_pos = POS(inode->v.i_ino, block_end >> 9);
2269 ret = i_sectors_dirty_start(c, &i_sectors_hook);
2273 while (bkey_cmp(iter.pos, end_pos) < 0) {
2274 struct disk_reservation disk_res = { 0 };
2275 struct bkey_i_reservation reservation;
2278 k = bch2_btree_iter_peek_with_holes(&iter);
2279 if ((ret = btree_iter_err(k)))
2280 goto btree_iter_err;
2282 /* already reserved */
2283 if (k.k->type == BCH_RESERVATION &&
2284 bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2285 bch2_btree_iter_advance_pos(&iter);
2289 if (bkey_extent_is_data(k.k)) {
2290 if (!(mode & FALLOC_FL_ZERO_RANGE)) {
2291 bch2_btree_iter_advance_pos(&iter);
2296 bkey_reservation_init(&reservation.k_i);
2297 reservation.k.type = BCH_RESERVATION;
2298 reservation.k.p = k.k->p;
2299 reservation.k.size = k.k->size;
2301 bch2_cut_front(iter.pos, &reservation.k_i);
2302 bch2_cut_back(end_pos, &reservation.k);
2304 sectors = reservation.k.size;
2305 reservation.v.nr_replicas = bch2_extent_nr_dirty_ptrs(k);
2307 if (reservation.v.nr_replicas < replicas ||
2308 bch2_extent_is_compressed(k)) {
2309 ret = bch2_disk_reservation_get(c, &disk_res,
2312 goto err_put_sectors_dirty;
2314 reservation.v.nr_replicas = disk_res.nr_replicas;
2317 ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
2318 &inode->ei_journal_seq,
2319 BTREE_INSERT_ATOMIC|
2320 BTREE_INSERT_NOFAIL,
2321 BTREE_INSERT_ENTRY(&iter, &reservation.k_i));
2322 bch2_disk_reservation_put(c, &disk_res);
2324 if (ret < 0 && ret != -EINTR)
2325 goto err_put_sectors_dirty;
2328 bch2_btree_iter_unlock(&iter);
2330 ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2332 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2333 end > inode->v.i_size) {
2334 i_size_write(&inode->v, end);
2336 mutex_lock(&inode->ei_update_lock);
2337 ret = bch2_write_inode_size(c, inode, inode->v.i_size);
2338 mutex_unlock(&inode->ei_update_lock);
2342 if ((mode & FALLOC_FL_KEEP_SIZE) &&
2343 (mode & FALLOC_FL_ZERO_RANGE) &&
2344 inode->ei_inode.bi_size != inode->v.i_size) {
2345 /* sync appends.. */
2346 ret = filemap_write_and_wait_range(mapping,
2347 inode->ei_inode.bi_size, S64_MAX);
2351 if (inode->ei_inode.bi_size != inode->v.i_size) {
2352 mutex_lock(&inode->ei_update_lock);
2353 ret = bch2_write_inode_size(c, inode, inode->v.i_size);
2354 mutex_unlock(&inode->ei_update_lock);
2358 pagecache_block_put(&mapping->add_lock);
2359 inode_unlock(&inode->v);
2362 err_put_sectors_dirty:
2363 ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2365 bch2_btree_iter_unlock(&iter);
2366 pagecache_block_put(&mapping->add_lock);
2367 inode_unlock(&inode->v);
2371 long bch2_fallocate_dispatch(struct file *file, int mode,
2372 loff_t offset, loff_t len)
2374 struct bch_inode_info *inode = file_bch_inode(file);
2376 if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
2377 return bch2_fallocate(inode, mode, offset, len);
2379 if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
2380 return bch2_fpunch(inode, offset, len);
2382 if (mode == FALLOC_FL_COLLAPSE_RANGE)
2383 return bch2_fcollapse(inode, offset, len);
2388 static bool page_is_data(struct page *page)
2390 /* XXX: should only have to check PageDirty */
2391 return PagePrivate(page) &&
2392 (page_state(page)->sectors ||
2393 page_state(page)->dirty_sectors);
2396 static loff_t bch2_next_pagecache_data(struct inode *vinode,
2397 loff_t start_offset,
2400 struct address_space *mapping = vinode->i_mapping;
2404 for (index = start_offset >> PAGE_SHIFT;
2405 index < end_offset >> PAGE_SHIFT;
2407 if (find_get_pages(mapping, index, 1, &page)) {
2409 index = page->index;
2411 if (page_is_data(page))
2415 ((loff_t) index) << PAGE_SHIFT));
2426 static loff_t bch2_seek_data(struct file *file, u64 offset)
2428 struct bch_inode_info *inode = file_bch_inode(file);
2429 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2430 struct btree_iter iter;
2432 u64 isize, next_data = MAX_LFS_FILESIZE;
2435 isize = i_size_read(&inode->v);
2436 if (offset >= isize)
2439 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2440 POS(inode->v.i_ino, offset >> 9), 0, k) {
2441 if (k.k->p.inode != inode->v.i_ino) {
2443 } else if (bkey_extent_is_data(k.k)) {
2444 next_data = max(offset, bkey_start_offset(k.k) << 9);
2446 } else if (k.k->p.offset >> 9 > isize)
2450 ret = bch2_btree_iter_unlock(&iter);
2454 if (next_data > offset)
2455 next_data = bch2_next_pagecache_data(&inode->v,
2458 if (next_data > isize)
2461 return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
2464 static bool page_slot_is_data(struct address_space *mapping, pgoff_t index)
2469 page = find_lock_entry(mapping, index);
2470 if (!page || radix_tree_exception(page))
2473 ret = page_is_data(page);
2479 static loff_t bch2_next_pagecache_hole(struct inode *vinode,
2480 loff_t start_offset,
2483 struct address_space *mapping = vinode->i_mapping;
2486 for (index = start_offset >> PAGE_SHIFT;
2487 index < end_offset >> PAGE_SHIFT;
2489 if (!page_slot_is_data(mapping, index))
2490 end_offset = max(start_offset,
2491 ((loff_t) index) << PAGE_SHIFT);
2496 static loff_t bch2_seek_hole(struct file *file, u64 offset)
2498 struct bch_inode_info *inode = file_bch_inode(file);
2499 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2500 struct btree_iter iter;
2502 u64 isize, next_hole = MAX_LFS_FILESIZE;
2505 isize = i_size_read(&inode->v);
2506 if (offset >= isize)
2509 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2510 POS(inode->v.i_ino, offset >> 9),
2511 BTREE_ITER_WITH_HOLES, k) {
2512 if (k.k->p.inode != inode->v.i_ino) {
2513 next_hole = bch2_next_pagecache_hole(&inode->v,
2514 offset, MAX_LFS_FILESIZE);
2516 } else if (!bkey_extent_is_data(k.k)) {
2517 next_hole = bch2_next_pagecache_hole(&inode->v,
2518 max(offset, bkey_start_offset(k.k) << 9),
2519 k.k->p.offset << 9);
2521 if (next_hole < k.k->p.offset << 9)
2524 offset = max(offset, bkey_start_offset(k.k) << 9);
2528 ret = bch2_btree_iter_unlock(&iter);
2532 if (next_hole > isize)
2535 return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
2538 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
2544 return generic_file_llseek(file, offset, whence);
2546 return bch2_seek_data(file, offset);
2548 return bch2_seek_hole(file, offset);
2554 void bch2_fs_fsio_exit(struct bch_fs *c)
2556 bioset_exit(&c->dio_write_bioset);
2557 bioset_exit(&c->dio_read_bioset);
2558 bioset_exit(&c->writepage_bioset);
2561 int bch2_fs_fsio_init(struct bch_fs *c)
2563 if (bioset_init(&c->writepage_bioset,
2564 4, offsetof(struct bch_writepage_io, op.op.wbio.bio),
2565 BIOSET_NEED_BVECS) ||
2566 bioset_init(&c->dio_read_bioset,
2567 4, offsetof(struct dio_read, rbio.bio),
2568 BIOSET_NEED_BVECS) ||
2569 bioset_init(&c->dio_write_bioset,
2570 4, offsetof(struct dio_write, iop.op.wbio.bio),
2577 #endif /* NO_BCACHEFS_FS */