4 #include "btree_update.h"
17 #include <linux/aio.h>
18 #include <linux/backing-dev.h>
19 #include <linux/falloc.h>
20 #include <linux/migrate.h>
21 #include <linux/mmu_context.h>
22 #include <linux/pagevec.h>
23 #include <linux/sched/signal.h>
24 #include <linux/task_io_accounting_ops.h>
25 #include <linux/uio.h>
26 #include <linux/writeback.h>
28 #include <trace/events/bcachefs.h>
29 #include <trace/events/writeback.h>
35 struct i_sectors_hook {
36 struct extent_insert_hook hook;
37 struct bch_inode_info *inode;
38 struct quota_res quota_res;
45 struct bchfs_write_op {
46 struct bch_inode_info *inode;
53 struct bch_write_op op;
56 struct bch_writepage_io {
61 struct bchfs_write_op op;
67 struct task_struct *task;
71 struct quota_res quota_res;
74 struct iovec inline_vecs[2];
77 struct bchfs_write_op iop;
84 struct bch_read_bio rbio;
87 /* pagecache_block must be held */
88 static int write_invalidate_inode_pages_range(struct address_space *mapping,
89 loff_t start, loff_t end)
94 * XXX: the way this is currently implemented, we can spin if a process
95 * is continually redirtying a specific page
98 if (!mapping->nrpages &&
99 !mapping->nrexceptional)
102 ret = filemap_write_and_wait_range(mapping, start, end);
106 if (!mapping->nrpages)
109 ret = invalidate_inode_pages2_range(mapping,
112 } while (ret == -EBUSY);
119 #ifdef CONFIG_BCACHEFS_QUOTA
121 static void bch2_quota_reservation_put(struct bch_fs *c,
122 struct bch_inode_info *inode,
123 struct quota_res *res)
128 mutex_lock(&inode->ei_quota_lock);
129 BUG_ON(res->sectors > inode->ei_quota_reserved);
131 bch2_quota_acct(c, inode->ei_qid, Q_SPC,
132 -((s64) res->sectors), BCH_QUOTA_PREALLOC);
133 inode->ei_quota_reserved -= res->sectors;
134 mutex_unlock(&inode->ei_quota_lock);
139 static int bch2_quota_reservation_add(struct bch_fs *c,
140 struct bch_inode_info *inode,
141 struct quota_res *res,
147 mutex_lock(&inode->ei_quota_lock);
148 ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
149 check_enospc ? BCH_QUOTA_PREALLOC : BCH_QUOTA_NOCHECK);
151 inode->ei_quota_reserved += sectors;
152 res->sectors += sectors;
154 mutex_unlock(&inode->ei_quota_lock);
161 static void bch2_quota_reservation_put(struct bch_fs *c,
162 struct bch_inode_info *inode,
163 struct quota_res *res)
167 static int bch2_quota_reservation_add(struct bch_fs *c,
168 struct bch_inode_info *inode,
169 struct quota_res *res,
178 /* i_size updates: */
180 static int inode_set_size(struct bch_inode_info *inode,
181 struct bch_inode_unpacked *bi,
184 loff_t *new_i_size = p;
186 lockdep_assert_held(&inode->ei_update_lock);
188 bi->bi_size = *new_i_size;
192 static int __must_check bch2_write_inode_size(struct bch_fs *c,
193 struct bch_inode_info *inode,
196 return __bch2_write_inode(c, inode, inode_set_size, &new_size);
199 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
200 struct quota_res *quota_res, int sectors)
202 mutex_lock(&inode->ei_quota_lock);
203 #ifdef CONFIG_BCACHEFS_QUOTA
204 if (quota_res && sectors > 0) {
205 BUG_ON(sectors > quota_res->sectors);
206 BUG_ON(sectors > inode->ei_quota_reserved);
208 quota_res->sectors -= sectors;
209 inode->ei_quota_reserved -= sectors;
211 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, BCH_QUOTA_WARN);
214 inode->v.i_blocks += sectors;
215 mutex_unlock(&inode->ei_quota_lock);
218 /* i_sectors accounting: */
220 static enum btree_insert_ret
221 i_sectors_hook_fn(struct extent_insert_hook *hook,
222 struct bpos committed_pos,
223 struct bpos next_pos,
225 const struct bkey_i *insert)
227 struct i_sectors_hook *h = container_of(hook,
228 struct i_sectors_hook, hook);
229 s64 sectors = next_pos.offset - committed_pos.offset;
230 int sign = bkey_extent_is_allocation(&insert->k) -
231 (k.k && bkey_extent_is_allocation(k.k));
233 EBUG_ON(!(h->inode->ei_inode.bi_flags & BCH_INODE_I_SECTORS_DIRTY));
235 h->sectors += sectors * sign;
237 return BTREE_INSERT_OK;
240 static int i_sectors_dirty_finish_fn(struct bch_inode_info *inode,
241 struct bch_inode_unpacked *bi,
244 struct i_sectors_hook *h = p;
246 if (h->new_i_size != U64_MAX &&
248 h->new_i_size > bi->bi_size))
249 bi->bi_size = h->new_i_size;
250 bi->bi_sectors += h->sectors;
251 bi->bi_flags &= ~h->flags;
255 static int i_sectors_dirty_finish(struct bch_fs *c, struct i_sectors_hook *h)
259 mutex_lock(&h->inode->ei_update_lock);
260 if (h->new_i_size != U64_MAX)
261 i_size_write(&h->inode->v, h->new_i_size);
263 i_sectors_acct(c, h->inode, &h->quota_res, h->sectors);
265 ret = __bch2_write_inode(c, h->inode, i_sectors_dirty_finish_fn, h);
266 mutex_unlock(&h->inode->ei_update_lock);
268 bch2_quota_reservation_put(c, h->inode, &h->quota_res);
275 static int i_sectors_dirty_start_fn(struct bch_inode_info *inode,
276 struct bch_inode_unpacked *bi, void *p)
278 struct i_sectors_hook *h = p;
280 if (h->flags & BCH_INODE_I_SIZE_DIRTY)
281 bi->bi_size = h->new_i_size;
283 bi->bi_flags |= h->flags;
287 static int i_sectors_dirty_start(struct bch_fs *c, struct i_sectors_hook *h)
291 mutex_lock(&h->inode->ei_update_lock);
292 ret = __bch2_write_inode(c, h->inode, i_sectors_dirty_start_fn, h);
293 mutex_unlock(&h->inode->ei_update_lock);
298 static inline struct i_sectors_hook
299 i_sectors_hook_init(struct bch_inode_info *inode, unsigned flags)
301 return (struct i_sectors_hook) {
302 .hook.fn = i_sectors_hook_fn,
305 .new_i_size = U64_MAX,
306 .flags = flags|BCH_INODE_I_SECTORS_DIRTY,
310 /* normal i_size/i_sectors update machinery: */
312 struct bchfs_extent_trans_hook {
313 struct bchfs_write_op *op;
314 struct extent_insert_hook hook;
316 struct bch_inode_unpacked inode_u;
317 struct bkey_inode_buf inode_p;
319 bool need_inode_update;
322 static enum btree_insert_ret
323 bchfs_extent_update_hook(struct extent_insert_hook *hook,
324 struct bpos committed_pos,
325 struct bpos next_pos,
327 const struct bkey_i *insert)
329 struct bchfs_extent_trans_hook *h = container_of(hook,
330 struct bchfs_extent_trans_hook, hook);
331 struct bch_inode_info *inode = h->op->inode;
332 int sign = bkey_extent_is_allocation(&insert->k) -
333 (k.k && bkey_extent_is_allocation(k.k));
334 s64 sectors = (s64) (next_pos.offset - committed_pos.offset) * sign;
335 u64 offset = min(next_pos.offset << 9, h->op->new_i_size);
336 bool do_pack = false;
338 if (h->op->unalloc &&
339 !bch2_extent_is_fully_allocated(k))
340 return BTREE_INSERT_ENOSPC;
342 BUG_ON((next_pos.offset << 9) > round_up(offset, PAGE_SIZE));
344 /* XXX: inode->i_size locking */
345 if (offset > inode->ei_inode.bi_size) {
346 if (!h->need_inode_update) {
347 h->need_inode_update = true;
348 return BTREE_INSERT_NEED_TRAVERSE;
351 BUG_ON(h->inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY);
353 h->inode_u.bi_size = offset;
356 inode->ei_inode.bi_size = offset;
359 i_size_write(&inode->v, offset);
363 if (!h->need_inode_update) {
364 h->need_inode_update = true;
365 return BTREE_INSERT_NEED_TRAVERSE;
368 h->inode_u.bi_sectors += sectors;
371 h->op->sectors_added += sectors;
375 bch2_inode_pack(&h->inode_p, &h->inode_u);
377 return BTREE_INSERT_OK;
380 static int bchfs_write_index_update(struct bch_write_op *wop)
382 struct bchfs_write_op *op = container_of(wop,
383 struct bchfs_write_op, op);
384 struct keylist *keys = &op->op.insert_keys;
385 struct btree_iter extent_iter, inode_iter;
386 struct bchfs_extent_trans_hook hook;
387 struct bkey_i *k = bch2_keylist_front(keys);
388 s64 orig_sectors_added = op->sectors_added;
391 BUG_ON(k->k.p.inode != op->inode->v.i_ino);
393 bch2_btree_iter_init(&extent_iter, wop->c, BTREE_ID_EXTENTS,
394 bkey_start_pos(&bch2_keylist_front(keys)->k),
396 bch2_btree_iter_init(&inode_iter, wop->c, BTREE_ID_INODES,
397 POS(extent_iter.pos.inode, 0),
398 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
401 hook.hook.fn = bchfs_extent_update_hook;
402 hook.need_inode_update = false;
405 /* XXX: inode->i_size locking */
406 k = bch2_keylist_front(keys);
407 if (min(k->k.p.offset << 9, op->new_i_size) >
408 op->inode->ei_inode.bi_size)
409 hook.need_inode_update = true;
411 if (hook.need_inode_update) {
412 struct bkey_s_c inode;
414 if (!btree_iter_linked(&inode_iter))
415 bch2_btree_iter_link(&extent_iter, &inode_iter);
417 inode = bch2_btree_iter_peek_slot(&inode_iter);
418 if ((ret = btree_iter_err(inode)))
421 if (WARN_ONCE(inode.k->type != BCH_INODE_FS,
422 "inode %llu not found when updating",
423 extent_iter.pos.inode)) {
428 if (WARN_ONCE(bkey_bytes(inode.k) >
429 sizeof(hook.inode_p),
430 "inode %llu too big (%zu bytes, buf %zu)",
431 extent_iter.pos.inode,
433 sizeof(hook.inode_p))) {
438 bkey_reassemble(&hook.inode_p.inode.k_i, inode);
439 ret = bch2_inode_unpack(bkey_s_c_to_inode(inode),
442 "error %i unpacking inode %llu",
443 ret, extent_iter.pos.inode)) {
448 ret = bch2_btree_insert_at(wop->c, &wop->res,
449 &hook.hook, op_journal_seq(wop),
452 BTREE_INSERT_USE_RESERVE,
453 BTREE_INSERT_ENTRY(&extent_iter, k),
454 BTREE_INSERT_ENTRY_EXTRA_RES(&inode_iter,
455 &hook.inode_p.inode.k_i, 2));
457 ret = bch2_btree_insert_at(wop->c, &wop->res,
458 &hook.hook, op_journal_seq(wop),
461 BTREE_INSERT_USE_RESERVE,
462 BTREE_INSERT_ENTRY(&extent_iter, k));
465 BUG_ON(bkey_cmp(extent_iter.pos, bkey_start_pos(&k->k)));
467 if (WARN_ONCE(!ret != !k->k.size,
468 "ret %i k->size %u", ret, k->k.size))
469 ret = k->k.size ? -EINTR : 0;
476 BUG_ON(bkey_cmp(extent_iter.pos, k->k.p) < 0);
477 bch2_keylist_pop_front(keys);
478 } while (!bch2_keylist_empty(keys));
480 bch2_btree_iter_unlock(&extent_iter);
481 bch2_btree_iter_unlock(&inode_iter);
484 struct dio_write *dio = container_of(op, struct dio_write, iop);
486 i_sectors_acct(wop->c, op->inode, &dio->quota_res,
487 op->sectors_added - orig_sectors_added);
493 static inline void bch2_fswrite_op_init(struct bchfs_write_op *op,
495 struct bch_inode_info *inode,
496 struct bch_io_opts opts,
500 op->sectors_added = 0;
503 op->new_i_size = U64_MAX;
505 bch2_write_op_init(&op->op, c, opts);
506 op->op.target = opts.foreground_target;
507 op->op.index_update_fn = bchfs_write_index_update;
508 op_journal_seq_set(&op->op, &inode->ei_journal_seq);
511 static inline struct bch_io_opts io_opts(struct bch_fs *c, struct bch_inode_info *inode)
513 struct bch_io_opts opts = bch2_opts_to_inode_opts(c->opts);
515 bch2_io_opts_apply(&opts, bch2_inode_opts_get(&inode->ei_inode));
521 /* stored in page->private: */
524 * bch_page_state has to (unfortunately) be manipulated with cmpxchg - we could
525 * almost protected it with the page lock, except that bch2_writepage_io_done has
526 * to update the sector counts (and from interrupt/bottom half context).
528 struct bch_page_state {
531 unsigned sectors:PAGE_SECTOR_SHIFT + 1;
532 unsigned nr_replicas:4;
533 unsigned compressed:1;
535 /* Owns PAGE_SECTORS sized reservation: */
537 unsigned reservation_replicas:4;
539 /* Owns PAGE_SECTORS sized quota reservation: */
540 unsigned quota_reserved:1;
543 * Number of sectors on disk - for i_blocks
544 * Uncompressed size, not compressed size:
546 unsigned dirty_sectors:PAGE_SECTOR_SHIFT + 1;
553 #define page_state_cmpxchg(_ptr, _new, _expr) \
555 unsigned long _v = READ_ONCE((_ptr)->v); \
556 struct bch_page_state _old; \
559 _old.v = _new.v = _v; \
562 EBUG_ON(_new.sectors + _new.dirty_sectors > PAGE_SECTORS);\
563 } while (_old.v != _new.v && \
564 (_v = cmpxchg(&(_ptr)->v, _old.v, _new.v)) != _old.v); \
569 static inline struct bch_page_state *page_state(struct page *page)
571 struct bch_page_state *s = (void *) &page->private;
573 BUILD_BUG_ON(sizeof(*s) > sizeof(page->private));
575 if (!PagePrivate(page))
576 SetPagePrivate(page);
581 static inline unsigned page_res_sectors(struct bch_page_state s)
584 return s.reserved ? s.reservation_replicas * PAGE_SECTORS : 0;
587 static void __bch2_put_page_reservation(struct bch_fs *c, struct bch_inode_info *inode,
588 struct bch_page_state s)
590 struct disk_reservation res = { .sectors = page_res_sectors(s) };
591 struct quota_res quota_res = { .sectors = s.quota_reserved ? PAGE_SECTORS : 0 };
593 bch2_quota_reservation_put(c, inode, "a_res);
594 bch2_disk_reservation_put(c, &res);
597 static void bch2_put_page_reservation(struct bch_fs *c, struct bch_inode_info *inode,
600 struct bch_page_state s;
602 s = page_state_cmpxchg(page_state(page), s, {
604 s.quota_reserved = 0;
607 __bch2_put_page_reservation(c, inode, s);
610 static int bch2_get_page_reservation(struct bch_fs *c, struct bch_inode_info *inode,
611 struct page *page, bool check_enospc)
613 struct bch_page_state *s = page_state(page), new, old;
615 /* XXX: this should not be open coded */
616 unsigned nr_replicas = inode->ei_inode.bi_data_replicas
617 ? inode->ei_inode.bi_data_replicas - 1
618 : c->opts.data_replicas;
620 struct disk_reservation disk_res = bch2_disk_reservation_init(c,
622 struct quota_res quota_res = { 0 };
626 * XXX: this could likely be quite a bit simpler, page reservations
627 * _should_ only be manipulated with page locked:
630 old = page_state_cmpxchg(s, new, {
632 ? (new.reservation_replicas < disk_res.nr_replicas)
633 : (new.sectors < PAGE_SECTORS ||
634 new.nr_replicas < disk_res.nr_replicas ||
636 int sectors = (disk_res.nr_replicas * PAGE_SECTORS -
637 page_res_sectors(new) -
641 ret = bch2_disk_reservation_add(c, &disk_res, sectors,
643 ? BCH_DISK_RESERVATION_NOFAIL : 0);
649 new.reservation_replicas = disk_res.nr_replicas;
652 if (!new.quota_reserved &&
653 new.sectors + new.dirty_sectors < PAGE_SECTORS) {
654 ret = bch2_quota_reservation_add(c, inode, "a_res,
655 PAGE_SECTORS - quota_res.sectors,
660 new.quota_reserved = 1;
664 quota_res.sectors -= (new.quota_reserved - old.quota_reserved) * PAGE_SECTORS;
665 disk_res.sectors -= page_res_sectors(new) - page_res_sectors(old);
667 bch2_quota_reservation_put(c, inode, "a_res);
668 bch2_disk_reservation_put(c, &disk_res);
672 static void bch2_clear_page_bits(struct page *page)
674 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
675 struct bch_fs *c = inode->v.i_sb->s_fs_info;
676 struct bch_page_state s;
678 if (!PagePrivate(page))
681 s.v = xchg(&page_state(page)->v, 0);
682 ClearPagePrivate(page);
685 i_sectors_acct(c, inode, NULL, -s.dirty_sectors);
687 __bch2_put_page_reservation(c, inode, s);
690 int bch2_set_page_dirty(struct page *page)
692 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
693 struct bch_fs *c = inode->v.i_sb->s_fs_info;
694 struct quota_res quota_res = { 0 };
695 struct bch_page_state old, new;
697 old = page_state_cmpxchg(page_state(page), new,
698 new.dirty_sectors = PAGE_SECTORS - new.sectors;
699 new.quota_reserved = 0;
702 quota_res.sectors += old.quota_reserved * PAGE_SECTORS;
704 if (old.dirty_sectors != new.dirty_sectors)
705 i_sectors_acct(c, inode, "a_res,
706 new.dirty_sectors - old.dirty_sectors);
707 bch2_quota_reservation_put(c, inode, "a_res);
709 return __set_page_dirty_nobuffers(page);
712 int bch2_page_mkwrite(struct vm_fault *vmf)
714 struct page *page = vmf->page;
715 struct file *file = vmf->vma->vm_file;
716 struct bch_inode_info *inode = file_bch_inode(file);
717 struct address_space *mapping = inode->v.i_mapping;
718 struct bch_fs *c = inode->v.i_sb->s_fs_info;
719 int ret = VM_FAULT_LOCKED;
721 sb_start_pagefault(inode->v.i_sb);
722 file_update_time(file);
725 * Not strictly necessary, but helps avoid dio writes livelocking in
726 * write_invalidate_inode_pages_range() - can drop this if/when we get
727 * a write_invalidate_inode_pages_range() that works without dropping
728 * page lock before invalidating page
730 if (current->pagecache_lock != &mapping->add_lock)
731 pagecache_add_get(&mapping->add_lock);
734 if (page->mapping != mapping ||
735 page_offset(page) > i_size_read(&inode->v)) {
737 ret = VM_FAULT_NOPAGE;
741 if (bch2_get_page_reservation(c, inode, page, true)) {
743 ret = VM_FAULT_SIGBUS;
747 if (!PageDirty(page))
748 set_page_dirty(page);
749 wait_for_stable_page(page);
751 if (current->pagecache_lock != &mapping->add_lock)
752 pagecache_add_put(&mapping->add_lock);
753 sb_end_pagefault(inode->v.i_sb);
757 void bch2_invalidatepage(struct page *page, unsigned int offset,
760 EBUG_ON(!PageLocked(page));
761 EBUG_ON(PageWriteback(page));
763 if (offset || length < PAGE_SIZE)
766 bch2_clear_page_bits(page);
769 int bch2_releasepage(struct page *page, gfp_t gfp_mask)
771 /* XXX: this can't take locks that are held while we allocate memory */
772 EBUG_ON(!PageLocked(page));
773 EBUG_ON(PageWriteback(page));
778 bch2_clear_page_bits(page);
782 #ifdef CONFIG_MIGRATION
783 int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
784 struct page *page, enum migrate_mode mode)
788 ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
789 if (ret != MIGRATEPAGE_SUCCESS)
792 if (PagePrivate(page)) {
793 *page_state(newpage) = *page_state(page);
794 ClearPagePrivate(page);
797 migrate_page_copy(newpage, page);
798 return MIGRATEPAGE_SUCCESS;
802 /* readpages/writepages: */
804 static bool bio_can_add_page_contig(struct bio *bio, struct page *page)
806 sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT;
808 return bio->bi_vcnt < bio->bi_max_vecs &&
809 bio_end_sector(bio) == offset;
812 static void __bio_add_page(struct bio *bio, struct page *page)
814 bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) {
820 bio->bi_iter.bi_size += PAGE_SIZE;
823 static int bio_add_page_contig(struct bio *bio, struct page *page)
825 sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT;
827 EBUG_ON(!bio->bi_max_vecs);
830 bio->bi_iter.bi_sector = offset;
831 else if (!bio_can_add_page_contig(bio, page))
834 __bio_add_page(bio, page);
840 static void bch2_readpages_end_io(struct bio *bio)
845 bio_for_each_segment_all(bv, bio, i) {
846 struct page *page = bv->bv_page;
848 if (!bio->bi_status) {
849 SetPageUptodate(page);
851 ClearPageUptodate(page);
860 struct readpages_iter {
861 struct address_space *mapping;
862 struct list_head pages;
866 static inline void page_state_init_for_read(struct page *page)
868 struct bch_page_state *s = page_state(page);
875 static int readpage_add_page(struct readpages_iter *iter, struct page *page)
879 prefetchw(&page->flags);
881 ret = add_to_page_cache_lru(page, iter->mapping,
882 page->index, GFP_NOFS);
884 page_state_init_for_read(page);
890 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
892 while (iter->nr_pages) {
894 list_last_entry(&iter->pages, struct page, lru);
896 prefetchw(&page->flags);
897 list_del(&page->lru);
900 if (!readpage_add_page(iter, page))
907 #define for_each_readpage_page(_iter, _page) \
909 ((_page) = __readpage_next_page(&(_iter)));) \
911 static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
913 struct bvec_iter iter;
915 bool compressed = bch2_extent_is_compressed(k);
916 unsigned nr_ptrs = bch2_extent_nr_dirty_ptrs(k);
918 bio_for_each_segment(bv, bio, iter) {
919 struct bch_page_state *s = page_state(bv.bv_page);
921 /* sectors in @k from the start of this page: */
922 unsigned k_sectors = k.k->size - (iter.bi_sector - k.k->p.offset);
924 unsigned page_sectors = min(bv.bv_len >> 9, k_sectors);
926 s->nr_replicas = !s->sectors
928 : min_t(unsigned, s->nr_replicas, nr_ptrs);
930 BUG_ON(s->sectors + page_sectors > PAGE_SECTORS);
931 s->sectors += page_sectors;
933 s->compressed |= compressed;
937 static void readpage_bio_extend(struct readpages_iter *iter,
938 struct bio *bio, u64 offset,
945 while (bio_end_sector(bio) < offset &&
946 bio->bi_vcnt < bio->bi_max_vecs) {
947 page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT;
949 if (iter->nr_pages) {
950 page = list_last_entry(&iter->pages, struct page, lru);
951 if (page->index != page_offset)
954 list_del(&page->lru);
956 } else if (get_more) {
958 page = radix_tree_lookup(&iter->mapping->page_tree, page_offset);
961 if (page && !radix_tree_exceptional_entry(page))
964 page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
968 page->index = page_offset;
969 ClearPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page);
974 ret = readpage_add_page(iter, page);
978 __bio_add_page(bio, page);
982 SetPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page);
985 static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
986 struct bch_read_bio *rbio, u64 inum,
987 struct readpages_iter *readpages_iter)
989 struct bio *bio = &rbio->bio;
990 int flags = BCH_READ_RETRY_IF_STALE|
991 BCH_READ_MAY_PROMOTE;
994 rbio->start_time = local_clock();
1001 bch2_btree_iter_set_pos(iter, POS(inum, bio->bi_iter.bi_sector));
1003 k = bch2_btree_iter_peek_slot(iter);
1007 int ret = bch2_btree_iter_unlock(iter);
1009 bcache_io_error(c, bio, "btree IO error %i", ret);
1014 bkey_reassemble(&tmp.k, k);
1015 bch2_btree_iter_unlock(iter);
1016 k = bkey_i_to_s_c(&tmp.k);
1018 if (readpages_iter) {
1019 bool want_full_extent = false;
1021 if (bkey_extent_is_data(k.k)) {
1022 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
1023 struct bch_extent_crc_unpacked crc;
1024 const union bch_extent_entry *i;
1026 extent_for_each_crc(e, crc, i)
1027 want_full_extent |= ((crc.csum_type != 0) |
1028 (crc.compression_type != 0));
1031 readpage_bio_extend(readpages_iter,
1036 bytes = (min_t(u64, k.k->p.offset, bio_end_sector(bio)) -
1037 bio->bi_iter.bi_sector) << 9;
1038 swap(bio->bi_iter.bi_size, bytes);
1040 if (bytes == bio->bi_iter.bi_size)
1041 flags |= BCH_READ_LAST_FRAGMENT;
1043 if (bkey_extent_is_allocation(k.k))
1044 bch2_add_page_sectors(bio, k);
1046 bch2_read_extent(c, rbio, k, flags);
1048 if (flags & BCH_READ_LAST_FRAGMENT)
1051 swap(bio->bi_iter.bi_size, bytes);
1052 bio_advance(bio, bytes);
1056 int bch2_readpages(struct file *file, struct address_space *mapping,
1057 struct list_head *pages, unsigned nr_pages)
1059 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1060 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1061 struct bch_io_opts opts = io_opts(c, inode);
1062 struct btree_iter iter;
1064 struct readpages_iter readpages_iter = {
1065 .mapping = mapping, .nr_pages = nr_pages
1068 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
1071 INIT_LIST_HEAD(&readpages_iter.pages);
1072 list_add(&readpages_iter.pages, pages);
1073 list_del_init(pages);
1075 if (current->pagecache_lock != &mapping->add_lock)
1076 pagecache_add_get(&mapping->add_lock);
1078 while ((page = readpage_iter_next(&readpages_iter))) {
1079 unsigned n = max_t(unsigned,
1080 min_t(unsigned, readpages_iter.nr_pages + 1,
1082 c->sb.encoded_extent_max >> PAGE_SECTOR_SHIFT);
1084 struct bch_read_bio *rbio =
1085 rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
1088 rbio->bio.bi_end_io = bch2_readpages_end_io;
1089 bio_add_page_contig(&rbio->bio, page);
1090 bchfs_read(c, &iter, rbio, inode->v.i_ino, &readpages_iter);
1093 if (current->pagecache_lock != &mapping->add_lock)
1094 pagecache_add_put(&mapping->add_lock);
1099 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
1100 u64 inum, struct page *page)
1102 struct btree_iter iter;
1104 page_state_init_for_read(page);
1106 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
1107 bio_add_page_contig(&rbio->bio, page);
1109 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
1111 bchfs_read(c, &iter, rbio, inum, NULL);
1114 int bch2_readpage(struct file *file, struct page *page)
1116 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1117 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1118 struct bch_io_opts opts = io_opts(c, inode);
1119 struct bch_read_bio *rbio;
1121 rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
1122 rbio->bio.bi_end_io = bch2_readpages_end_io;
1124 __bchfs_readpage(c, rbio, inode->v.i_ino, page);
1128 static void bch2_read_single_page_end_io(struct bio *bio)
1130 complete(bio->bi_private);
1133 static int bch2_read_single_page(struct page *page,
1134 struct address_space *mapping)
1136 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1137 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1138 struct bch_read_bio *rbio;
1140 DECLARE_COMPLETION_ONSTACK(done);
1142 rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
1144 rbio->bio.bi_private = &done;
1145 rbio->bio.bi_end_io = bch2_read_single_page_end_io;
1147 __bchfs_readpage(c, rbio, inode->v.i_ino, page);
1148 wait_for_completion(&done);
1150 ret = blk_status_to_errno(rbio->bio.bi_status);
1151 bio_put(&rbio->bio);
1156 SetPageUptodate(page);
1162 struct bch_writepage_state {
1163 struct bch_writepage_io *io;
1164 struct bch_io_opts opts;
1167 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1168 struct bch_inode_info *inode)
1170 return (struct bch_writepage_state) { .opts = io_opts(c, inode) };
1173 static void bch2_writepage_io_free(struct closure *cl)
1175 struct bch_writepage_io *io = container_of(cl,
1176 struct bch_writepage_io, cl);
1178 bio_put(&io->op.op.wbio.bio);
1181 static void bch2_writepage_io_done(struct closure *cl)
1183 struct bch_writepage_io *io = container_of(cl,
1184 struct bch_writepage_io, cl);
1185 struct bch_fs *c = io->op.op.c;
1186 struct bio *bio = &io->op.op.wbio.bio;
1187 struct bio_vec *bvec;
1190 if (io->op.op.error) {
1191 bio_for_each_segment_all(bvec, bio, i)
1192 SetPageError(bvec->bv_page);
1193 set_bit(AS_EIO, &io->op.inode->v.i_mapping->flags);
1197 * racing with fallocate can cause us to add fewer sectors than
1198 * expected - but we shouldn't add more sectors than expected:
1200 BUG_ON(io->op.sectors_added > (s64) io->new_sectors);
1203 * (error (due to going RO) halfway through a page can screw that up
1206 BUG_ON(io->op.sectors_added - io->new_sectors >= (s64) PAGE_SECTORS);
1210 * PageWriteback is effectively our ref on the inode - fixup i_blocks
1211 * before calling end_page_writeback:
1213 if (io->op.sectors_added != io->new_sectors)
1214 i_sectors_acct(c, io->op.inode, NULL,
1215 io->op.sectors_added - (s64) io->new_sectors);
1217 bio_for_each_segment_all(bvec, bio, i)
1218 end_page_writeback(bvec->bv_page);
1220 closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
1223 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1225 struct bch_writepage_io *io = w->io;
1228 closure_call(&io->op.op.cl, bch2_write, NULL, &io->cl);
1229 continue_at(&io->cl, bch2_writepage_io_done, NULL);
1233 * Get a bch_writepage_io and add @page to it - appending to an existing one if
1234 * possible, else allocating a new one:
1236 static void bch2_writepage_io_alloc(struct bch_fs *c,
1237 struct bch_writepage_state *w,
1238 struct bch_inode_info *inode,
1240 unsigned nr_replicas)
1242 struct bch_write_op *op;
1243 u64 offset = (u64) page->index << PAGE_SECTOR_SHIFT;
1245 w->io = container_of(bio_alloc_bioset(GFP_NOFS,
1247 &c->writepage_bioset),
1248 struct bch_writepage_io, op.op.wbio.bio);
1250 closure_init(&w->io->cl, NULL);
1251 w->io->new_sectors = 0;
1252 bch2_fswrite_op_init(&w->io->op, c, inode, w->opts, false);
1254 op->nr_replicas = nr_replicas;
1255 op->res.nr_replicas = nr_replicas;
1256 op->write_point = writepoint_hashed(inode->ei_last_dirtied);
1257 op->pos = POS(inode->v.i_ino, offset);
1258 op->wbio.bio.bi_iter.bi_sector = offset;
1261 static int __bch2_writepage(struct page *page,
1262 struct writeback_control *wbc,
1265 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1266 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1267 struct bch_writepage_state *w = data;
1268 struct bch_page_state new, old;
1270 loff_t i_size = i_size_read(&inode->v);
1271 pgoff_t end_index = i_size >> PAGE_SHIFT;
1273 EBUG_ON(!PageUptodate(page));
1275 /* Is the page fully inside i_size? */
1276 if (page->index < end_index)
1279 /* Is the page fully outside i_size? (truncate in progress) */
1280 offset = i_size & (PAGE_SIZE - 1);
1281 if (page->index > end_index || !offset) {
1287 * The page straddles i_size. It must be zeroed out on each and every
1288 * writepage invocation because it may be mmapped. "A file is mapped
1289 * in multiples of the page size. For a file that is not a multiple of
1290 * the page size, the remaining memory is zeroed when mapped, and
1291 * writes to that region are not written out to the file."
1293 zero_user_segment(page, offset, PAGE_SIZE);
1295 /* Before unlocking the page, transfer reservation to w->io: */
1296 old = page_state_cmpxchg(page_state(page), new, {
1297 EBUG_ON(!new.reserved &&
1298 (new.sectors != PAGE_SECTORS ||
1302 new.nr_replicas = new.reservation_replicas;
1305 new.compressed |= w->opts.compression != 0;
1307 new.sectors += new.dirty_sectors;
1308 new.dirty_sectors = 0;
1311 BUG_ON(PageWriteback(page));
1312 set_page_writeback(page);
1316 (w->io->op.op.res.nr_replicas != new.nr_replicas ||
1317 !bio_can_add_page_contig(&w->io->op.op.wbio.bio, page)))
1318 bch2_writepage_do_io(w);
1321 bch2_writepage_io_alloc(c, w, inode, page, new.nr_replicas);
1323 w->io->new_sectors += new.sectors - old.sectors;
1325 BUG_ON(inode != w->io->op.inode);
1326 BUG_ON(bio_add_page_contig(&w->io->op.op.wbio.bio, page));
1329 w->io->op.op.res.sectors += old.reservation_replicas * PAGE_SECTORS;
1331 w->io->op.new_i_size = i_size;
1333 if (wbc->sync_mode == WB_SYNC_ALL)
1334 w->io->op.op.wbio.bio.bi_opf |= REQ_SYNC;
1339 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1341 struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1342 struct bch_writepage_state w =
1343 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1344 struct blk_plug plug;
1347 blk_start_plug(&plug);
1348 ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1350 bch2_writepage_do_io(&w);
1351 blk_finish_plug(&plug);
1355 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1357 struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1358 struct bch_writepage_state w =
1359 bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
1362 ret = __bch2_writepage(page, wbc, &w);
1364 bch2_writepage_do_io(&w);
1369 /* buffered writes: */
1371 int bch2_write_begin(struct file *file, struct address_space *mapping,
1372 loff_t pos, unsigned len, unsigned flags,
1373 struct page **pagep, void **fsdata)
1375 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1376 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1377 pgoff_t index = pos >> PAGE_SHIFT;
1378 unsigned offset = pos & (PAGE_SIZE - 1);
1382 BUG_ON(inode_unhashed(&inode->v));
1384 /* Not strictly necessary - same reason as mkwrite(): */
1385 pagecache_add_get(&mapping->add_lock);
1387 page = grab_cache_page_write_begin(mapping, index, flags);
1391 if (PageUptodate(page))
1394 /* If we're writing entire page, don't need to read it in first: */
1395 if (len == PAGE_SIZE)
1398 if (!offset && pos + len >= inode->v.i_size) {
1399 zero_user_segment(page, len, PAGE_SIZE);
1400 flush_dcache_page(page);
1404 if (index > inode->v.i_size >> PAGE_SHIFT) {
1405 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1406 flush_dcache_page(page);
1410 ret = bch2_read_single_page(page, mapping);
1414 ret = bch2_get_page_reservation(c, inode, page, true);
1416 if (!PageUptodate(page)) {
1418 * If the page hasn't been read in, we won't know if we
1419 * actually need a reservation - we don't actually need
1420 * to read here, we just need to check if the page is
1421 * fully backed by uncompressed data:
1436 pagecache_add_put(&mapping->add_lock);
1440 int bch2_write_end(struct file *file, struct address_space *mapping,
1441 loff_t pos, unsigned len, unsigned copied,
1442 struct page *page, void *fsdata)
1444 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1445 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1447 lockdep_assert_held(&inode->v.i_rwsem);
1449 if (unlikely(copied < len && !PageUptodate(page))) {
1451 * The page needs to be read in, but that would destroy
1452 * our partial write - simplest thing is to just force
1453 * userspace to redo the write:
1455 zero_user(page, 0, PAGE_SIZE);
1456 flush_dcache_page(page);
1460 if (pos + copied > inode->v.i_size)
1461 i_size_write(&inode->v, pos + copied);
1464 if (!PageUptodate(page))
1465 SetPageUptodate(page);
1466 if (!PageDirty(page))
1467 set_page_dirty(page);
1469 inode->ei_last_dirtied = (unsigned long) current;
1471 bch2_put_page_reservation(c, inode, page);
1476 pagecache_add_put(&mapping->add_lock);
1481 #define WRITE_BATCH_PAGES 32
1483 static int __bch2_buffered_write(struct bch_inode_info *inode,
1484 struct address_space *mapping,
1485 struct iov_iter *iter,
1486 loff_t pos, unsigned len)
1488 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1489 struct page *pages[WRITE_BATCH_PAGES];
1490 unsigned long index = pos >> PAGE_SHIFT;
1491 unsigned offset = pos & (PAGE_SIZE - 1);
1492 unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1493 unsigned i, copied = 0, nr_pages_copied = 0;
1497 BUG_ON(nr_pages > ARRAY_SIZE(pages));
1499 for (i = 0; i < nr_pages; i++) {
1500 pages[i] = grab_cache_page_write_begin(mapping, index + i, 0);
1508 if (offset && !PageUptodate(pages[0])) {
1509 ret = bch2_read_single_page(pages[0], mapping);
1514 if ((pos + len) & (PAGE_SIZE - 1) &&
1515 !PageUptodate(pages[nr_pages - 1])) {
1516 if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
1517 zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
1519 ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
1525 for (i = 0; i < nr_pages; i++) {
1526 ret = bch2_get_page_reservation(c, inode, pages[i], true);
1528 if (ret && !PageUptodate(pages[i])) {
1529 ret = bch2_read_single_page(pages[i], mapping);
1533 ret = bch2_get_page_reservation(c, inode, pages[i], true);
1540 if (mapping_writably_mapped(mapping))
1541 for (i = 0; i < nr_pages; i++)
1542 flush_dcache_page(pages[i]);
1544 while (copied < len) {
1545 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1546 unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
1547 unsigned pg_bytes = min_t(unsigned, len - copied,
1548 PAGE_SIZE - pg_offset);
1549 unsigned pg_copied = iov_iter_copy_from_user_atomic(page,
1550 iter, pg_offset, pg_bytes);
1555 flush_dcache_page(page);
1556 iov_iter_advance(iter, pg_copied);
1557 copied += pg_copied;
1563 nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
1564 inode->ei_last_dirtied = (unsigned long) current;
1566 if (pos + copied > inode->v.i_size)
1567 i_size_write(&inode->v, pos + copied);
1570 ((offset + copied) & (PAGE_SIZE - 1))) {
1571 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1573 if (!PageUptodate(page)) {
1574 zero_user(page, 0, PAGE_SIZE);
1575 copied -= (offset + copied) & (PAGE_SIZE - 1);
1579 for (i = 0; i < nr_pages_copied; i++) {
1580 if (!PageUptodate(pages[i]))
1581 SetPageUptodate(pages[i]);
1582 if (!PageDirty(pages[i]))
1583 set_page_dirty(pages[i]);
1584 unlock_page(pages[i]);
1588 for (i = nr_pages_copied; i < nr_pages; i++) {
1589 if (!PageDirty(pages[i]))
1590 bch2_put_page_reservation(c, inode, pages[i]);
1591 unlock_page(pages[i]);
1595 return copied ?: ret;
1598 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1600 struct file *file = iocb->ki_filp;
1601 struct address_space *mapping = file->f_mapping;
1602 struct bch_inode_info *inode = file_bch_inode(file);
1603 loff_t pos = iocb->ki_pos;
1604 ssize_t written = 0;
1607 pagecache_add_get(&mapping->add_lock);
1610 unsigned offset = pos & (PAGE_SIZE - 1);
1611 unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
1612 PAGE_SIZE * WRITE_BATCH_PAGES - offset);
1615 * Bring in the user page that we will copy from _first_.
1616 * Otherwise there's a nasty deadlock on copying from the
1617 * same page as we're writing to, without it being marked
1620 * Not only is this an optimisation, but it is also required
1621 * to check that the address is actually valid, when atomic
1622 * usercopies are used, below.
1624 if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1625 bytes = min_t(unsigned long, iov_iter_count(iter),
1626 PAGE_SIZE - offset);
1628 if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
1634 if (unlikely(fatal_signal_pending(current))) {
1639 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1640 if (unlikely(ret < 0))
1645 if (unlikely(ret == 0)) {
1647 * If we were unable to copy any data at all, we must
1648 * fall back to a single segment length write.
1650 * If we didn't fallback here, we could livelock
1651 * because not all segments in the iov can be copied at
1652 * once without a pagefault.
1654 bytes = min_t(unsigned long, PAGE_SIZE - offset,
1655 iov_iter_single_seg_count(iter));
1661 balance_dirty_pages_ratelimited(mapping);
1662 } while (iov_iter_count(iter));
1664 pagecache_add_put(&mapping->add_lock);
1666 return written ? written : ret;
1669 /* O_DIRECT reads */
1671 static void bch2_dio_read_complete(struct closure *cl)
1673 struct dio_read *dio = container_of(cl, struct dio_read, cl);
1675 dio->req->ki_complete(dio->req, dio->ret, 0);
1676 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1679 static void bch2_direct_IO_read_endio(struct bio *bio)
1681 struct dio_read *dio = bio->bi_private;
1684 dio->ret = blk_status_to_errno(bio->bi_status);
1686 closure_put(&dio->cl);
1689 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1691 bch2_direct_IO_read_endio(bio);
1692 bio_check_pages_dirty(bio); /* transfers ownership */
1695 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1697 struct file *file = req->ki_filp;
1698 struct bch_inode_info *inode = file_bch_inode(file);
1699 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1700 struct bch_io_opts opts = io_opts(c, inode);
1701 struct dio_read *dio;
1703 loff_t offset = req->ki_pos;
1704 bool sync = is_sync_kiocb(req);
1707 if ((offset|iter->count) & (block_bytes(c) - 1))
1710 ret = min_t(loff_t, iter->count,
1711 max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1712 iov_iter_truncate(iter, round_up(ret, block_bytes(c)));
1717 bio = bio_alloc_bioset(GFP_KERNEL,
1718 iov_iter_npages(iter, BIO_MAX_PAGES),
1719 &c->dio_read_bioset);
1721 bio->bi_end_io = bch2_direct_IO_read_endio;
1723 dio = container_of(bio, struct dio_read, rbio.bio);
1724 closure_init(&dio->cl, NULL);
1727 * this is a _really_ horrible hack just to avoid an atomic sub at the
1731 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1732 atomic_set(&dio->cl.remaining,
1733 CLOSURE_REMAINING_INITIALIZER -
1735 CLOSURE_DESTRUCTOR);
1737 atomic_set(&dio->cl.remaining,
1738 CLOSURE_REMAINING_INITIALIZER + 1);
1745 while (iter->count) {
1746 bio = bio_alloc_bioset(GFP_KERNEL,
1747 iov_iter_npages(iter, BIO_MAX_PAGES),
1749 bio->bi_end_io = bch2_direct_IO_read_split_endio;
1751 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1752 bio->bi_iter.bi_sector = offset >> 9;
1753 bio->bi_private = dio;
1755 ret = bio_iov_iter_get_pages(bio, iter);
1757 /* XXX: fault inject this path */
1758 bio->bi_status = BLK_STS_RESOURCE;
1763 offset += bio->bi_iter.bi_size;
1764 bio_set_pages_dirty(bio);
1767 closure_get(&dio->cl);
1769 bch2_read(c, rbio_init(bio, opts), inode->v.i_ino);
1773 closure_sync(&dio->cl);
1774 closure_debug_destroy(&dio->cl);
1776 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1779 return -EIOCBQUEUED;
1783 /* O_DIRECT writes */
1785 static void bch2_dio_write_loop_async(struct closure *);
1787 static long bch2_dio_write_loop(struct dio_write *dio)
1789 struct kiocb *req = dio->req;
1790 struct address_space *mapping = req->ki_filp->f_mapping;
1791 struct bch_inode_info *inode = dio->iop.inode;
1792 struct bio *bio = &dio->iop.op.wbio.bio;
1801 inode_dio_begin(&inode->v);
1802 __pagecache_block_get(&mapping->add_lock);
1804 /* Write and invalidate pagecache range that we're writing to: */
1805 ret = write_invalidate_inode_pages_range(mapping, req->ki_pos,
1806 req->ki_pos + iov_iter_count(&dio->iter) - 1);
1811 BUG_ON(current->pagecache_lock);
1812 current->pagecache_lock = &mapping->add_lock;
1813 if (current != dio->task)
1814 use_mm(dio->task->mm);
1816 ret = bio_iov_iter_get_pages(bio, &dio->iter);
1818 if (current != dio->task)
1819 unuse_mm(dio->task->mm);
1820 current->pagecache_lock = NULL;
1822 if (unlikely(ret < 0))
1825 dio->iop.op.pos = POS(inode->v.i_ino,
1826 (req->ki_pos >> 9) + dio->iop.op.written);
1828 task_io_account_write(bio->bi_iter.bi_size);
1830 closure_call(&dio->iop.op.cl, bch2_write, NULL, &dio->cl);
1832 if (!dio->sync && !dio->loop && dio->iter.count) {
1833 struct iovec *iov = dio->inline_vecs;
1835 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
1836 iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
1838 if (unlikely(!iov)) {
1839 dio->iop.op.error = -ENOMEM;
1843 dio->free_iov = true;
1846 memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
1847 dio->iter.iov = iov;
1853 continue_at(&dio->cl, bch2_dio_write_loop_async, NULL);
1854 return -EIOCBQUEUED;
1857 closure_sync(&dio->cl);
1859 bio_for_each_segment_all(bv, bio, i)
1860 put_page(bv->bv_page);
1861 if (!dio->iter.count || dio->iop.op.error)
1866 ret = dio->iop.op.error ?: ((long) dio->iop.op.written << 9);
1868 __pagecache_block_put(&mapping->add_lock);
1869 bch2_disk_reservation_put(dio->iop.op.c, &dio->iop.op.res);
1870 bch2_quota_reservation_put(dio->iop.op.c, inode, &dio->quota_res);
1873 kfree(dio->iter.iov);
1875 closure_debug_destroy(&dio->cl);
1880 /* inode->i_dio_count is our ref on inode and thus bch_fs */
1881 inode_dio_end(&inode->v);
1884 req->ki_complete(req, ret, 0);
1890 static void bch2_dio_write_loop_async(struct closure *cl)
1892 struct dio_write *dio = container_of(cl, struct dio_write, cl);
1894 bch2_dio_write_loop(dio);
1897 static int bch2_direct_IO_write(struct kiocb *req,
1898 struct iov_iter *iter,
1901 struct file *file = req->ki_filp;
1902 struct bch_inode_info *inode = file_bch_inode(file);
1903 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1904 struct dio_write *dio;
1906 loff_t offset = req->ki_pos;
1909 lockdep_assert_held(&inode->v.i_rwsem);
1911 if (unlikely(!iter->count))
1914 if (unlikely((offset|iter->count) & (block_bytes(c) - 1)))
1917 bio = bio_alloc_bioset(GFP_KERNEL,
1918 iov_iter_npages(iter, BIO_MAX_PAGES),
1919 &c->dio_write_bioset);
1920 dio = container_of(bio, struct dio_write, iop.op.wbio.bio);
1921 closure_init(&dio->cl, NULL);
1923 dio->task = current;
1925 dio->sync = is_sync_kiocb(req) ||
1926 offset + iter->count > inode->v.i_size;
1927 dio->free_iov = false;
1928 dio->quota_res.sectors = 0;
1930 bch2_fswrite_op_init(&dio->iop, c, inode, io_opts(c, inode), true);
1931 dio->iop.op.write_point = writepoint_hashed((unsigned long) dio->task);
1932 dio->iop.op.flags |= BCH_WRITE_NOPUT_RESERVATION;
1934 if ((req->ki_flags & IOCB_DSYNC) &&
1935 !c->opts.journal_flush_disabled)
1936 dio->iop.op.flags |= BCH_WRITE_FLUSH;
1938 ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
1939 iter->count >> 9, true);
1943 ret = bch2_disk_reservation_get(c, &dio->iop.op.res, iter->count >> 9,
1944 dio->iop.op.opts.data_replicas, 0);
1945 if (unlikely(ret)) {
1946 if (bch2_check_range_allocated(c, POS(inode->v.i_ino,
1951 dio->iop.unalloc = true;
1954 dio->iop.op.nr_replicas = dio->iop.op.res.nr_replicas;
1956 return bch2_dio_write_loop(dio);
1958 bch2_disk_reservation_put(c, &dio->iop.op.res);
1959 bch2_quota_reservation_put(c, inode, &dio->quota_res);
1960 closure_debug_destroy(&dio->cl);
1965 ssize_t bch2_direct_IO(struct kiocb *req, struct iov_iter *iter)
1967 struct blk_plug plug;
1970 blk_start_plug(&plug);
1971 ret = iov_iter_rw(iter) == WRITE
1972 ? bch2_direct_IO_write(req, iter, false)
1973 : bch2_direct_IO_read(req, iter);
1974 blk_finish_plug(&plug);
1980 bch2_direct_write(struct kiocb *iocb, struct iov_iter *iter)
1982 return bch2_direct_IO_write(iocb, iter, true);
1985 static ssize_t __bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1987 struct file *file = iocb->ki_filp;
1988 struct bch_inode_info *inode = file_bch_inode(file);
1991 /* We can write back this queue in page reclaim */
1992 current->backing_dev_info = inode_to_bdi(&inode->v);
1993 ret = file_remove_privs(file);
1997 ret = file_update_time(file);
2001 ret = iocb->ki_flags & IOCB_DIRECT
2002 ? bch2_direct_write(iocb, from)
2003 : bch2_buffered_write(iocb, from);
2005 if (likely(ret > 0))
2006 iocb->ki_pos += ret;
2008 current->backing_dev_info = NULL;
2012 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2014 struct bch_inode_info *inode = file_bch_inode(iocb->ki_filp);
2015 bool direct = iocb->ki_flags & IOCB_DIRECT;
2018 inode_lock(&inode->v);
2019 ret = generic_write_checks(iocb, from);
2021 ret = __bch2_write_iter(iocb, from);
2022 inode_unlock(&inode->v);
2024 if (ret > 0 && !direct)
2025 ret = generic_write_sync(iocb, ret);
2032 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2034 struct bch_inode_info *inode = file_bch_inode(file);
2035 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2038 ret = filemap_write_and_wait_range(inode->v.i_mapping, start, end);
2042 if (c->opts.journal_flush_disabled)
2045 return bch2_journal_flush_seq(&c->journal, inode->ei_journal_seq);
2050 static int __bch2_truncate_page(struct bch_inode_info *inode,
2051 pgoff_t index, loff_t start, loff_t end)
2053 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2054 struct address_space *mapping = inode->v.i_mapping;
2055 unsigned start_offset = start & (PAGE_SIZE - 1);
2056 unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2060 /* Page boundary? Nothing to do */
2061 if (!((index == start >> PAGE_SHIFT && start_offset) ||
2062 (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
2066 if (index << PAGE_SHIFT >= inode->v.i_size)
2069 page = find_lock_page(mapping, index);
2071 struct btree_iter iter;
2072 struct bkey_s_c k = bkey_s_c_null;
2075 * XXX: we're doing two index lookups when we end up reading the
2078 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2080 index << PAGE_SECTOR_SHIFT), 0, k) {
2081 if (bkey_cmp(bkey_start_pos(k.k),
2083 (index + 1) << PAGE_SECTOR_SHIFT)) >= 0)
2086 if (k.k->type != KEY_TYPE_DISCARD &&
2087 k.k->type != BCH_RESERVATION) {
2088 bch2_btree_iter_unlock(&iter);
2092 bch2_btree_iter_unlock(&iter);
2095 page = find_or_create_page(mapping, index, GFP_KERNEL);
2096 if (unlikely(!page)) {
2102 if (!PageUptodate(page)) {
2103 ret = bch2_read_single_page(page, mapping);
2109 * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2111 * XXX: because we aren't currently tracking whether the page has actual
2112 * data in it (vs. just 0s, or only partially written) this wrong. ick.
2114 ret = bch2_get_page_reservation(c, inode, page, false);
2117 if (index == start >> PAGE_SHIFT &&
2118 index == end >> PAGE_SHIFT)
2119 zero_user_segment(page, start_offset, end_offset);
2120 else if (index == start >> PAGE_SHIFT)
2121 zero_user_segment(page, start_offset, PAGE_SIZE);
2122 else if (index == end >> PAGE_SHIFT)
2123 zero_user_segment(page, 0, end_offset);
2125 if (!PageDirty(page))
2126 set_page_dirty(page);
2134 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
2136 return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
2137 from, from + PAGE_SIZE);
2140 int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
2142 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2143 struct address_space *mapping = inode->v.i_mapping;
2144 bool shrink = iattr->ia_size <= inode->v.i_size;
2145 struct i_sectors_hook i_sectors_hook =
2146 i_sectors_hook_init(inode, BCH_INODE_I_SIZE_DIRTY);
2149 inode_dio_wait(&inode->v);
2150 pagecache_block_get(&mapping->add_lock);
2152 truncate_setsize(&inode->v, iattr->ia_size);
2154 /* sync appends.. */
2155 /* XXX what protects inode->i_size? */
2156 if (iattr->ia_size > inode->ei_inode.bi_size)
2157 ret = filemap_write_and_wait_range(mapping,
2158 inode->ei_inode.bi_size, S64_MAX);
2160 goto err_put_pagecache;
2162 i_sectors_hook.new_i_size = iattr->ia_size;
2164 ret = i_sectors_dirty_start(c, &i_sectors_hook);
2169 * There might be persistent reservations (from fallocate())
2170 * above i_size, which bch2_inode_truncate() will discard - we're
2171 * only supposed to discard them if we're doing a real truncate
2172 * here (new i_size < current i_size):
2175 ret = bch2_truncate_page(inode, iattr->ia_size);
2179 ret = bch2_inode_truncate(c, inode->v.i_ino,
2180 round_up(iattr->ia_size, PAGE_SIZE) >> 9,
2181 &i_sectors_hook.hook,
2182 &inode->ei_journal_seq);
2187 setattr_copy(&inode->v, iattr);
2188 inode->v.i_mtime = inode->v.i_ctime = current_time(&inode->v);
2191 * On error - in particular, bch2_truncate_page() error - don't clear
2192 * I_SIZE_DIRTY, as we've left data above i_size!:
2195 i_sectors_hook.flags &= ~BCH_INODE_I_SIZE_DIRTY;
2197 ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2199 pagecache_block_put(&mapping->add_lock);
2205 static long bch2_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2207 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2208 struct address_space *mapping = inode->v.i_mapping;
2209 u64 ino = inode->v.i_ino;
2210 u64 discard_start = round_up(offset, PAGE_SIZE) >> 9;
2211 u64 discard_end = round_down(offset + len, PAGE_SIZE) >> 9;
2214 inode_lock(&inode->v);
2215 inode_dio_wait(&inode->v);
2216 pagecache_block_get(&mapping->add_lock);
2218 ret = __bch2_truncate_page(inode,
2219 offset >> PAGE_SHIFT,
2220 offset, offset + len);
2224 if (offset >> PAGE_SHIFT !=
2225 (offset + len) >> PAGE_SHIFT) {
2226 ret = __bch2_truncate_page(inode,
2227 (offset + len) >> PAGE_SHIFT,
2228 offset, offset + len);
2233 truncate_pagecache_range(&inode->v, offset, offset + len - 1);
2235 if (discard_start < discard_end) {
2237 * We need to pass in a disk reservation here because we might
2238 * be splitting a compressed extent into two. This isn't a
2239 * problem with truncate because truncate will never split an
2240 * extent, only truncate it...
2242 struct disk_reservation disk_res =
2243 bch2_disk_reservation_init(c, 0);
2244 struct i_sectors_hook i_sectors_hook =
2245 i_sectors_hook_init(inode, 0);
2248 ret = i_sectors_dirty_start(c, &i_sectors_hook);
2252 ret = bch2_btree_delete_range(c,
2254 POS(ino, discard_start),
2255 POS(ino, discard_end),
2258 &i_sectors_hook.hook,
2259 &inode->ei_journal_seq);
2261 ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2264 pagecache_block_put(&mapping->add_lock);
2265 inode_unlock(&inode->v);
2270 static long bch2_fcollapse(struct bch_inode_info *inode,
2271 loff_t offset, loff_t len)
2273 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2274 struct address_space *mapping = inode->v.i_mapping;
2275 struct btree_iter src;
2276 struct btree_iter dst;
2277 BKEY_PADDED(k) copy;
2279 struct i_sectors_hook i_sectors_hook = i_sectors_hook_init(inode, 0);
2283 if ((offset | len) & (PAGE_SIZE - 1))
2286 bch2_btree_iter_init(&dst, c, BTREE_ID_EXTENTS,
2287 POS(inode->v.i_ino, offset >> 9),
2288 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2289 /* position will be set from dst iter's position: */
2290 bch2_btree_iter_init(&src, c, BTREE_ID_EXTENTS, POS_MIN,
2292 bch2_btree_iter_link(&src, &dst);
2295 * We need i_mutex to keep the page cache consistent with the extents
2296 * btree, and the btree consistent with i_size - we don't need outside
2297 * locking for the extents btree itself, because we're using linked
2300 inode_lock(&inode->v);
2301 inode_dio_wait(&inode->v);
2302 pagecache_block_get(&mapping->add_lock);
2305 if (offset + len >= inode->v.i_size)
2308 if (inode->v.i_size < len)
2311 new_size = inode->v.i_size - len;
2313 ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2317 ret = i_sectors_dirty_start(c, &i_sectors_hook);
2321 while (bkey_cmp(dst.pos,
2323 round_up(new_size, PAGE_SIZE) >> 9)) < 0) {
2324 struct disk_reservation disk_res;
2326 bch2_btree_iter_set_pos(&src,
2327 POS(dst.pos.inode, dst.pos.offset + (len >> 9)));
2329 k = bch2_btree_iter_peek_slot(&src);
2330 if ((ret = btree_iter_err(k)))
2331 goto btree_iter_err;
2333 bkey_reassemble(©.k, k);
2335 if (bkey_deleted(©.k.k))
2336 copy.k.k.type = KEY_TYPE_DISCARD;
2338 bch2_cut_front(src.pos, ©.k);
2339 copy.k.k.p.offset -= len >> 9;
2341 BUG_ON(bkey_cmp(dst.pos, bkey_start_pos(©.k.k)));
2343 ret = bch2_disk_reservation_get(c, &disk_res, copy.k.k.size,
2344 bch2_extent_nr_dirty_ptrs(bkey_i_to_s_c(©.k)),
2345 BCH_DISK_RESERVATION_NOFAIL);
2348 ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
2349 &inode->ei_journal_seq,
2350 BTREE_INSERT_ATOMIC|
2351 BTREE_INSERT_NOFAIL,
2352 BTREE_INSERT_ENTRY(&dst, ©.k));
2353 bch2_disk_reservation_put(c, &disk_res);
2358 goto err_put_sectors_dirty;
2360 * XXX: if we error here we've left data with multiple
2361 * pointers... which isn't a _super_ serious problem...
2364 bch2_btree_iter_cond_resched(&src);
2367 bch2_btree_iter_unlock(&src);
2368 bch2_btree_iter_unlock(&dst);
2370 ret = bch2_inode_truncate(c, inode->v.i_ino,
2371 round_up(new_size, PAGE_SIZE) >> 9,
2372 &i_sectors_hook.hook,
2373 &inode->ei_journal_seq);
2375 goto err_put_sectors_dirty;
2377 i_size_write(&inode->v, new_size);
2378 i_sectors_hook.new_i_size = new_size;
2379 err_put_sectors_dirty:
2380 ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2382 pagecache_block_put(&mapping->add_lock);
2383 inode_unlock(&inode->v);
2385 bch2_btree_iter_unlock(&src);
2386 bch2_btree_iter_unlock(&dst);
2390 static long bch2_fallocate(struct bch_inode_info *inode, int mode,
2391 loff_t offset, loff_t len)
2393 struct address_space *mapping = inode->v.i_mapping;
2394 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2395 struct i_sectors_hook i_sectors_hook = i_sectors_hook_init(inode, 0);
2396 struct btree_iter iter;
2397 struct bpos end_pos;
2398 loff_t block_start, block_end;
2399 loff_t end = offset + len;
2401 unsigned replicas = io_opts(c, inode).data_replicas;
2404 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
2405 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2407 inode_lock(&inode->v);
2408 inode_dio_wait(&inode->v);
2409 pagecache_block_get(&mapping->add_lock);
2411 if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
2412 ret = inode_newsize_ok(&inode->v, end);
2417 if (mode & FALLOC_FL_ZERO_RANGE) {
2418 ret = __bch2_truncate_page(inode,
2419 offset >> PAGE_SHIFT,
2423 offset >> PAGE_SHIFT != end >> PAGE_SHIFT)
2424 ret = __bch2_truncate_page(inode,
2431 truncate_pagecache_range(&inode->v, offset, end - 1);
2433 block_start = round_up(offset, PAGE_SIZE);
2434 block_end = round_down(end, PAGE_SIZE);
2436 block_start = round_down(offset, PAGE_SIZE);
2437 block_end = round_up(end, PAGE_SIZE);
2440 bch2_btree_iter_set_pos(&iter, POS(inode->v.i_ino, block_start >> 9));
2441 end_pos = POS(inode->v.i_ino, block_end >> 9);
2443 ret = i_sectors_dirty_start(c, &i_sectors_hook);
2447 while (bkey_cmp(iter.pos, end_pos) < 0) {
2448 struct disk_reservation disk_res = { 0 };
2449 struct bkey_i_reservation reservation;
2452 k = bch2_btree_iter_peek_slot(&iter);
2453 if ((ret = btree_iter_err(k)))
2454 goto btree_iter_err;
2456 /* already reserved */
2457 if (k.k->type == BCH_RESERVATION &&
2458 bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2459 bch2_btree_iter_next_slot(&iter);
2463 if (bkey_extent_is_data(k.k)) {
2464 if (!(mode & FALLOC_FL_ZERO_RANGE)) {
2465 bch2_btree_iter_next_slot(&iter);
2470 bkey_reservation_init(&reservation.k_i);
2471 reservation.k.type = BCH_RESERVATION;
2472 reservation.k.p = k.k->p;
2473 reservation.k.size = k.k->size;
2475 bch2_cut_front(iter.pos, &reservation.k_i);
2476 bch2_cut_back(end_pos, &reservation.k);
2478 sectors = reservation.k.size;
2479 reservation.v.nr_replicas = bch2_extent_nr_dirty_ptrs(k);
2481 if (!bkey_extent_is_allocation(k.k)) {
2482 ret = bch2_quota_reservation_add(c, inode,
2483 &i_sectors_hook.quota_res,
2486 goto err_put_sectors_dirty;
2489 if (reservation.v.nr_replicas < replicas ||
2490 bch2_extent_is_compressed(k)) {
2491 ret = bch2_disk_reservation_get(c, &disk_res, sectors,
2494 goto err_put_sectors_dirty;
2496 reservation.v.nr_replicas = disk_res.nr_replicas;
2499 ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
2500 &inode->ei_journal_seq,
2501 BTREE_INSERT_ATOMIC|
2502 BTREE_INSERT_NOFAIL,
2503 BTREE_INSERT_ENTRY(&iter, &reservation.k_i));
2504 bch2_disk_reservation_put(c, &disk_res);
2506 if (ret < 0 && ret != -EINTR)
2507 goto err_put_sectors_dirty;
2510 bch2_btree_iter_unlock(&iter);
2512 ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2514 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2515 end > inode->v.i_size) {
2516 i_size_write(&inode->v, end);
2518 mutex_lock(&inode->ei_update_lock);
2519 ret = bch2_write_inode_size(c, inode, inode->v.i_size);
2520 mutex_unlock(&inode->ei_update_lock);
2524 if ((mode & FALLOC_FL_KEEP_SIZE) &&
2525 (mode & FALLOC_FL_ZERO_RANGE) &&
2526 inode->ei_inode.bi_size != inode->v.i_size) {
2527 /* sync appends.. */
2528 ret = filemap_write_and_wait_range(mapping,
2529 inode->ei_inode.bi_size, S64_MAX);
2533 if (inode->ei_inode.bi_size != inode->v.i_size) {
2534 mutex_lock(&inode->ei_update_lock);
2535 ret = bch2_write_inode_size(c, inode, inode->v.i_size);
2536 mutex_unlock(&inode->ei_update_lock);
2540 pagecache_block_put(&mapping->add_lock);
2541 inode_unlock(&inode->v);
2544 err_put_sectors_dirty:
2545 ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2547 bch2_btree_iter_unlock(&iter);
2548 pagecache_block_put(&mapping->add_lock);
2549 inode_unlock(&inode->v);
2553 long bch2_fallocate_dispatch(struct file *file, int mode,
2554 loff_t offset, loff_t len)
2556 struct bch_inode_info *inode = file_bch_inode(file);
2558 if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
2559 return bch2_fallocate(inode, mode, offset, len);
2561 if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
2562 return bch2_fpunch(inode, offset, len);
2564 if (mode == FALLOC_FL_COLLAPSE_RANGE)
2565 return bch2_fcollapse(inode, offset, len);
2572 static bool page_is_data(struct page *page)
2574 /* XXX: should only have to check PageDirty */
2575 return PagePrivate(page) &&
2576 (page_state(page)->sectors ||
2577 page_state(page)->dirty_sectors);
2580 static loff_t bch2_next_pagecache_data(struct inode *vinode,
2581 loff_t start_offset,
2584 struct address_space *mapping = vinode->i_mapping;
2588 for (index = start_offset >> PAGE_SHIFT;
2589 index < end_offset >> PAGE_SHIFT;
2591 if (find_get_pages(mapping, &index, 1, &page)) {
2594 if (page_is_data(page))
2598 ((loff_t) index) << PAGE_SHIFT));
2609 static loff_t bch2_seek_data(struct file *file, u64 offset)
2611 struct bch_inode_info *inode = file_bch_inode(file);
2612 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2613 struct btree_iter iter;
2615 u64 isize, next_data = MAX_LFS_FILESIZE;
2618 isize = i_size_read(&inode->v);
2619 if (offset >= isize)
2622 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2623 POS(inode->v.i_ino, offset >> 9), 0, k) {
2624 if (k.k->p.inode != inode->v.i_ino) {
2626 } else if (bkey_extent_is_data(k.k)) {
2627 next_data = max(offset, bkey_start_offset(k.k) << 9);
2629 } else if (k.k->p.offset >> 9 > isize)
2633 ret = bch2_btree_iter_unlock(&iter);
2637 if (next_data > offset)
2638 next_data = bch2_next_pagecache_data(&inode->v,
2641 if (next_data > isize)
2644 return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
2647 static bool page_slot_is_data(struct address_space *mapping, pgoff_t index)
2652 page = find_lock_entry(mapping, index);
2653 if (!page || radix_tree_exception(page))
2656 ret = page_is_data(page);
2662 static loff_t bch2_next_pagecache_hole(struct inode *vinode,
2663 loff_t start_offset,
2666 struct address_space *mapping = vinode->i_mapping;
2669 for (index = start_offset >> PAGE_SHIFT;
2670 index < end_offset >> PAGE_SHIFT;
2672 if (!page_slot_is_data(mapping, index))
2673 end_offset = max(start_offset,
2674 ((loff_t) index) << PAGE_SHIFT);
2679 static loff_t bch2_seek_hole(struct file *file, u64 offset)
2681 struct bch_inode_info *inode = file_bch_inode(file);
2682 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2683 struct btree_iter iter;
2685 u64 isize, next_hole = MAX_LFS_FILESIZE;
2688 isize = i_size_read(&inode->v);
2689 if (offset >= isize)
2692 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2693 POS(inode->v.i_ino, offset >> 9),
2694 BTREE_ITER_SLOTS, k) {
2695 if (k.k->p.inode != inode->v.i_ino) {
2696 next_hole = bch2_next_pagecache_hole(&inode->v,
2697 offset, MAX_LFS_FILESIZE);
2699 } else if (!bkey_extent_is_data(k.k)) {
2700 next_hole = bch2_next_pagecache_hole(&inode->v,
2701 max(offset, bkey_start_offset(k.k) << 9),
2702 k.k->p.offset << 9);
2704 if (next_hole < k.k->p.offset << 9)
2707 offset = max(offset, bkey_start_offset(k.k) << 9);
2711 ret = bch2_btree_iter_unlock(&iter);
2715 if (next_hole > isize)
2718 return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
2721 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
2727 return generic_file_llseek(file, offset, whence);
2729 return bch2_seek_data(file, offset);
2731 return bch2_seek_hole(file, offset);
2737 void bch2_fs_fsio_exit(struct bch_fs *c)
2739 bioset_exit(&c->dio_write_bioset);
2740 bioset_exit(&c->dio_read_bioset);
2741 bioset_exit(&c->writepage_bioset);
2744 int bch2_fs_fsio_init(struct bch_fs *c)
2748 pr_verbose_init(c->opts, "");
2750 if (bioset_init(&c->writepage_bioset,
2751 4, offsetof(struct bch_writepage_io, op.op.wbio.bio),
2752 BIOSET_NEED_BVECS) ||
2753 bioset_init(&c->dio_read_bioset,
2754 4, offsetof(struct dio_read, rbio.bio),
2755 BIOSET_NEED_BVECS) ||
2756 bioset_init(&c->dio_write_bioset,
2757 4, offsetof(struct dio_write, iop.op.wbio.bio),
2761 pr_verbose_init(c->opts, "ret %i", ret);
2765 #endif /* NO_BCACHEFS_FS */