4 #include "btree_update.h"
17 #include <linux/aio.h>
18 #include <linux/backing-dev.h>
19 #include <linux/falloc.h>
20 #include <linux/migrate.h>
21 #include <linux/mmu_context.h>
22 #include <linux/pagevec.h>
23 #include <linux/task_io_accounting_ops.h>
24 #include <linux/uio.h>
25 #include <linux/writeback.h>
27 #include <trace/events/bcachefs.h>
28 #include <trace/events/writeback.h>
34 struct i_sectors_hook {
35 struct extent_insert_hook hook;
36 struct bch_inode_info *inode;
37 struct quota_res quota_res;
44 struct bchfs_write_op {
45 struct bch_inode_info *inode;
52 struct bch_write_op op;
55 struct bch_writepage_io {
60 struct bchfs_write_op op;
66 struct task_struct *task;
70 struct quota_res quota_res;
73 struct iovec inline_vecs[2];
76 struct bchfs_write_op iop;
83 struct bch_read_bio rbio;
86 /* pagecache_block must be held */
87 static int write_invalidate_inode_pages_range(struct address_space *mapping,
88 loff_t start, loff_t end)
93 * XXX: the way this is currently implemented, we can spin if a process
94 * is continually redirtying a specific page
97 if (!mapping->nrpages &&
98 !mapping->nrexceptional)
101 ret = filemap_write_and_wait_range(mapping, start, end);
105 if (!mapping->nrpages)
108 ret = invalidate_inode_pages2_range(mapping,
111 } while (ret == -EBUSY);
118 #ifdef CONFIG_BCACHEFS_QUOTA
120 static void bch2_quota_reservation_put(struct bch_fs *c,
121 struct bch_inode_info *inode,
122 struct quota_res *res)
127 mutex_lock(&inode->ei_update_lock);
128 BUG_ON(res->sectors > inode->ei_quota_reserved);
130 bch2_quota_acct(c, inode->ei_qid, Q_SPC,
131 -((s64) res->sectors), BCH_QUOTA_PREALLOC);
132 inode->ei_quota_reserved -= res->sectors;
133 mutex_unlock(&inode->ei_update_lock);
138 static int bch2_quota_reservation_add(struct bch_fs *c,
139 struct bch_inode_info *inode,
140 struct quota_res *res,
146 mutex_lock(&inode->ei_update_lock);
147 ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
148 check_enospc ? BCH_QUOTA_PREALLOC : BCH_QUOTA_NOCHECK);
150 inode->ei_quota_reserved += sectors;
151 res->sectors += sectors;
153 mutex_unlock(&inode->ei_update_lock);
160 static void bch2_quota_reservation_put(struct bch_fs *c,
161 struct bch_inode_info *inode,
162 struct quota_res *res)
166 static int bch2_quota_reservation_add(struct bch_fs *c,
167 struct bch_inode_info *inode,
168 struct quota_res *res,
177 /* i_size updates: */
179 static int inode_set_size(struct bch_inode_info *inode,
180 struct bch_inode_unpacked *bi,
183 loff_t *new_i_size = p;
185 lockdep_assert_held(&inode->ei_update_lock);
187 bi->bi_size = *new_i_size;
191 static int __must_check bch2_write_inode_size(struct bch_fs *c,
192 struct bch_inode_info *inode,
195 return __bch2_write_inode(c, inode, inode_set_size, &new_size);
198 static void __i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
199 struct quota_res *quota_res, int sectors)
201 #ifdef CONFIG_BCACHEFS_QUOTA
202 if (quota_res && sectors > 0) {
203 BUG_ON(sectors > quota_res->sectors);
204 BUG_ON(sectors > inode->ei_quota_reserved);
206 quota_res->sectors -= sectors;
207 inode->ei_quota_reserved -= sectors;
209 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, BCH_QUOTA_WARN);
212 inode->v.i_blocks += sectors;
215 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
216 struct quota_res *quota_res, int sectors)
218 mutex_lock(&inode->ei_update_lock);
219 __i_sectors_acct(c, inode, quota_res, sectors);
220 mutex_unlock(&inode->ei_update_lock);
223 /* i_sectors accounting: */
225 static enum btree_insert_ret
226 i_sectors_hook_fn(struct extent_insert_hook *hook,
227 struct bpos committed_pos,
228 struct bpos next_pos,
230 const struct bkey_i *insert)
232 struct i_sectors_hook *h = container_of(hook,
233 struct i_sectors_hook, hook);
234 s64 sectors = next_pos.offset - committed_pos.offset;
235 int sign = bkey_extent_is_allocation(&insert->k) -
236 (k.k && bkey_extent_is_allocation(k.k));
238 EBUG_ON(!(h->inode->ei_inode.bi_flags & BCH_INODE_I_SECTORS_DIRTY));
240 h->sectors += sectors * sign;
242 return BTREE_INSERT_OK;
245 static int i_sectors_dirty_finish_fn(struct bch_inode_info *inode,
246 struct bch_inode_unpacked *bi,
249 struct i_sectors_hook *h = p;
251 if (h->new_i_size != U64_MAX &&
253 h->new_i_size > bi->bi_size))
254 bi->bi_size = h->new_i_size;
255 bi->bi_sectors += h->sectors;
256 bi->bi_flags &= ~h->flags;
260 static int i_sectors_dirty_finish(struct bch_fs *c, struct i_sectors_hook *h)
264 mutex_lock(&h->inode->ei_update_lock);
265 if (h->new_i_size != U64_MAX)
266 i_size_write(&h->inode->v, h->new_i_size);
268 __i_sectors_acct(c, h->inode, &h->quota_res, h->sectors);
270 ret = __bch2_write_inode(c, h->inode, i_sectors_dirty_finish_fn, h);
271 mutex_unlock(&h->inode->ei_update_lock);
273 bch2_quota_reservation_put(c, h->inode, &h->quota_res);
280 static int i_sectors_dirty_start_fn(struct bch_inode_info *inode,
281 struct bch_inode_unpacked *bi, void *p)
283 struct i_sectors_hook *h = p;
285 if (h->flags & BCH_INODE_I_SIZE_DIRTY)
286 bi->bi_size = h->new_i_size;
288 bi->bi_flags |= h->flags;
292 static int i_sectors_dirty_start(struct bch_fs *c, struct i_sectors_hook *h)
296 mutex_lock(&h->inode->ei_update_lock);
297 ret = __bch2_write_inode(c, h->inode, i_sectors_dirty_start_fn, h);
298 mutex_unlock(&h->inode->ei_update_lock);
303 static inline struct i_sectors_hook
304 i_sectors_hook_init(struct bch_inode_info *inode, unsigned flags)
306 return (struct i_sectors_hook) {
307 .hook.fn = i_sectors_hook_fn,
310 .new_i_size = U64_MAX,
311 .flags = flags|BCH_INODE_I_SECTORS_DIRTY,
315 /* normal i_size/i_sectors update machinery: */
317 struct bchfs_extent_trans_hook {
318 struct bchfs_write_op *op;
319 struct extent_insert_hook hook;
321 struct bch_inode_unpacked inode_u;
322 struct bkey_inode_buf inode_p;
324 bool need_inode_update;
327 static enum btree_insert_ret
328 bchfs_extent_update_hook(struct extent_insert_hook *hook,
329 struct bpos committed_pos,
330 struct bpos next_pos,
332 const struct bkey_i *insert)
334 struct bchfs_extent_trans_hook *h = container_of(hook,
335 struct bchfs_extent_trans_hook, hook);
336 struct bch_inode_info *inode = h->op->inode;
337 int sign = bkey_extent_is_allocation(&insert->k) -
338 (k.k && bkey_extent_is_allocation(k.k));
339 s64 sectors = (s64) (next_pos.offset - committed_pos.offset) * sign;
340 u64 offset = min(next_pos.offset << 9, h->op->new_i_size);
341 bool do_pack = false;
343 if (h->op->unalloc &&
344 !bch2_extent_is_fully_allocated(k))
345 return BTREE_INSERT_ENOSPC;
347 BUG_ON((next_pos.offset << 9) > round_up(offset, PAGE_SIZE));
349 /* XXX: inode->i_size locking */
350 if (offset > inode->ei_inode.bi_size) {
351 if (!h->need_inode_update) {
352 h->need_inode_update = true;
353 return BTREE_INSERT_NEED_TRAVERSE;
356 BUG_ON(h->inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY);
358 h->inode_u.bi_size = offset;
361 inode->ei_inode.bi_size = offset;
364 i_size_write(&inode->v, offset);
368 if (!h->need_inode_update) {
369 h->need_inode_update = true;
370 return BTREE_INSERT_NEED_TRAVERSE;
373 h->inode_u.bi_sectors += sectors;
376 h->op->sectors_added += sectors;
380 bch2_inode_pack(&h->inode_p, &h->inode_u);
382 return BTREE_INSERT_OK;
385 static int bchfs_write_index_update(struct bch_write_op *wop)
387 struct bchfs_write_op *op = container_of(wop,
388 struct bchfs_write_op, op);
389 struct keylist *keys = &op->op.insert_keys;
390 struct btree_iter extent_iter, inode_iter;
391 struct bchfs_extent_trans_hook hook;
392 struct bkey_i *k = bch2_keylist_front(keys);
393 s64 orig_sectors_added = op->sectors_added;
396 BUG_ON(k->k.p.inode != op->inode->v.i_ino);
398 bch2_btree_iter_init(&extent_iter, wop->c, BTREE_ID_EXTENTS,
399 bkey_start_pos(&bch2_keylist_front(keys)->k),
401 bch2_btree_iter_init(&inode_iter, wop->c, BTREE_ID_INODES,
402 POS(extent_iter.pos.inode, 0),
403 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
406 hook.hook.fn = bchfs_extent_update_hook;
407 hook.need_inode_update = false;
410 /* XXX: inode->i_size locking */
411 k = bch2_keylist_front(keys);
412 if (min(k->k.p.offset << 9, op->new_i_size) >
413 op->inode->ei_inode.bi_size)
414 hook.need_inode_update = true;
416 if (hook.need_inode_update) {
417 struct bkey_s_c inode;
419 if (!btree_iter_linked(&inode_iter))
420 bch2_btree_iter_link(&extent_iter, &inode_iter);
422 inode = bch2_btree_iter_peek_slot(&inode_iter);
423 if ((ret = btree_iter_err(inode)))
426 if (WARN_ONCE(inode.k->type != BCH_INODE_FS,
427 "inode %llu not found when updating",
428 extent_iter.pos.inode)) {
433 if (WARN_ONCE(bkey_bytes(inode.k) >
434 sizeof(hook.inode_p),
435 "inode %llu too big (%zu bytes, buf %zu)",
436 extent_iter.pos.inode,
438 sizeof(hook.inode_p))) {
443 bkey_reassemble(&hook.inode_p.inode.k_i, inode);
444 ret = bch2_inode_unpack(bkey_s_c_to_inode(inode),
447 "error %i unpacking inode %llu",
448 ret, extent_iter.pos.inode)) {
453 ret = bch2_btree_insert_at(wop->c, &wop->res,
454 &hook.hook, op_journal_seq(wop),
457 BTREE_INSERT_USE_RESERVE,
458 BTREE_INSERT_ENTRY(&extent_iter, k),
459 BTREE_INSERT_ENTRY_EXTRA_RES(&inode_iter,
460 &hook.inode_p.inode.k_i, 2));
462 ret = bch2_btree_insert_at(wop->c, &wop->res,
463 &hook.hook, op_journal_seq(wop),
466 BTREE_INSERT_USE_RESERVE,
467 BTREE_INSERT_ENTRY(&extent_iter, k));
470 BUG_ON(bkey_cmp(extent_iter.pos, bkey_start_pos(&k->k)));
471 BUG_ON(!ret != !k->k.size);
478 BUG_ON(bkey_cmp(extent_iter.pos, k->k.p) < 0);
479 bch2_keylist_pop_front(keys);
480 } while (!bch2_keylist_empty(keys));
482 bch2_btree_iter_unlock(&extent_iter);
483 bch2_btree_iter_unlock(&inode_iter);
486 struct dio_write *dio = container_of(op, struct dio_write, iop);
488 i_sectors_acct(wop->c, op->inode, &dio->quota_res,
489 op->sectors_added - orig_sectors_added);
495 static inline void bch2_fswrite_op_init(struct bchfs_write_op *op,
497 struct bch_inode_info *inode,
498 struct bch_io_opts opts,
502 op->sectors_added = 0;
505 op->new_i_size = U64_MAX;
507 bch2_write_op_init(&op->op, c);
508 op->op.csum_type = bch2_data_checksum_type(c, opts.data_checksum);
509 op->op.compression_type = bch2_compression_opt_to_type[opts.compression];
510 op->op.devs = c->fastest_devs;
511 op->op.index_update_fn = bchfs_write_index_update;
512 op_journal_seq_set(&op->op, &inode->ei_journal_seq);
515 static inline struct bch_io_opts io_opts(struct bch_fs *c, struct bch_inode_info *inode)
517 struct bch_io_opts opts = bch2_opts_to_inode_opts(c->opts);
519 bch2_io_opts_apply(&opts, bch2_inode_opts_get(&inode->ei_inode));
525 /* stored in page->private: */
528 * bch_page_state has to (unfortunately) be manipulated with cmpxchg - we could
529 * almost protected it with the page lock, except that bch2_writepage_io_done has
530 * to update the sector counts (and from interrupt/bottom half context).
532 struct bch_page_state {
535 unsigned sectors:PAGE_SECTOR_SHIFT + 1;
536 unsigned nr_replicas:4;
537 unsigned compressed:1;
539 /* Owns PAGE_SECTORS sized reservation: */
541 unsigned reservation_replicas:4;
543 /* Owns PAGE_SECTORS sized quota reservation: */
544 unsigned quota_reserved:1;
547 * Number of sectors on disk - for i_blocks
548 * Uncompressed size, not compressed size:
550 unsigned dirty_sectors:PAGE_SECTOR_SHIFT + 1;
557 #define page_state_cmpxchg(_ptr, _new, _expr) \
559 unsigned long _v = READ_ONCE((_ptr)->v); \
560 struct bch_page_state _old; \
563 _old.v = _new.v = _v; \
566 EBUG_ON(_new.sectors + _new.dirty_sectors > PAGE_SECTORS);\
567 } while (_old.v != _new.v && \
568 (_v = cmpxchg(&(_ptr)->v, _old.v, _new.v)) != _old.v); \
573 static inline struct bch_page_state *page_state(struct page *page)
575 struct bch_page_state *s = (void *) &page->private;
577 BUILD_BUG_ON(sizeof(*s) > sizeof(page->private));
579 if (!PagePrivate(page))
580 SetPagePrivate(page);
585 static inline unsigned page_res_sectors(struct bch_page_state s)
588 return s.reserved ? s.reservation_replicas * PAGE_SECTORS : 0;
591 static void __bch2_put_page_reservation(struct bch_fs *c, struct bch_inode_info *inode,
592 struct bch_page_state s)
594 struct disk_reservation res = { .sectors = page_res_sectors(s) };
595 struct quota_res quota_res = { .sectors = s.quota_reserved ? PAGE_SECTORS : 0 };
597 bch2_quota_reservation_put(c, inode, "a_res);
598 bch2_disk_reservation_put(c, &res);
601 static void bch2_put_page_reservation(struct bch_fs *c, struct bch_inode_info *inode,
604 struct bch_page_state s;
606 s = page_state_cmpxchg(page_state(page), s, {
608 s.quota_reserved = 0;
611 __bch2_put_page_reservation(c, inode, s);
614 static int bch2_get_page_reservation(struct bch_fs *c, struct bch_inode_info *inode,
615 struct page *page, bool check_enospc)
617 struct bch_page_state *s = page_state(page), new, old;
618 struct disk_reservation disk_res = bch2_disk_reservation_init(c,
619 READ_ONCE(c->opts.data_replicas));
620 struct quota_res quota_res = { 0 };
624 * XXX: this could likely be quite a bit simpler, page reservations
625 * _should_ only be manipulated with page locked:
628 old = page_state_cmpxchg(s, new, {
630 ? (new.reservation_replicas < disk_res.nr_replicas)
631 : (new.sectors < PAGE_SECTORS ||
632 new.nr_replicas < disk_res.nr_replicas ||
634 int sectors = (disk_res.nr_replicas * PAGE_SECTORS -
635 page_res_sectors(new) -
639 ret = bch2_disk_reservation_add(c, &disk_res, sectors,
641 ? BCH_DISK_RESERVATION_NOFAIL : 0);
647 new.reservation_replicas = disk_res.nr_replicas;
650 if (!new.quota_reserved &&
651 new.sectors + new.dirty_sectors < PAGE_SECTORS) {
652 ret = bch2_quota_reservation_add(c, inode, "a_res,
653 PAGE_SECTORS - quota_res.sectors,
658 new.quota_reserved = 1;
662 quota_res.sectors -= (new.quota_reserved - old.quota_reserved) * PAGE_SECTORS;
663 disk_res.sectors -= page_res_sectors(new) - page_res_sectors(old);
665 bch2_quota_reservation_put(c, inode, "a_res);
666 bch2_disk_reservation_put(c, &disk_res);
670 static void bch2_clear_page_bits(struct page *page)
672 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
673 struct bch_fs *c = inode->v.i_sb->s_fs_info;
674 struct bch_page_state s;
676 if (!PagePrivate(page))
679 s = xchg(page_state(page), (struct bch_page_state) { .v = 0 });
680 ClearPagePrivate(page);
683 i_sectors_acct(c, inode, NULL, -s.dirty_sectors);
685 __bch2_put_page_reservation(c, inode, s);
688 int bch2_set_page_dirty(struct page *page)
690 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
691 struct bch_fs *c = inode->v.i_sb->s_fs_info;
692 struct quota_res quota_res = { 0 };
693 struct bch_page_state old, new;
695 old = page_state_cmpxchg(page_state(page), new,
696 new.dirty_sectors = PAGE_SECTORS - new.sectors;
697 new.quota_reserved = 0;
700 quota_res.sectors += old.quota_reserved * PAGE_SECTORS;
702 if (old.dirty_sectors != new.dirty_sectors)
703 i_sectors_acct(c, inode, "a_res,
704 new.dirty_sectors - old.dirty_sectors);
705 bch2_quota_reservation_put(c, inode, "a_res);
707 return __set_page_dirty_nobuffers(page);
710 int bch2_page_mkwrite(struct vm_fault *vmf)
712 struct page *page = vmf->page;
713 struct file *file = vmf->vma->vm_file;
714 struct bch_inode_info *inode = file_bch_inode(file);
715 struct address_space *mapping = inode->v.i_mapping;
716 struct bch_fs *c = inode->v.i_sb->s_fs_info;
717 int ret = VM_FAULT_LOCKED;
719 sb_start_pagefault(inode->v.i_sb);
720 file_update_time(file);
723 * Not strictly necessary, but helps avoid dio writes livelocking in
724 * write_invalidate_inode_pages_range() - can drop this if/when we get
725 * a write_invalidate_inode_pages_range() that works without dropping
726 * page lock before invalidating page
728 if (current->pagecache_lock != &mapping->add_lock)
729 pagecache_add_get(&mapping->add_lock);
732 if (page->mapping != mapping ||
733 page_offset(page) > i_size_read(&inode->v)) {
735 ret = VM_FAULT_NOPAGE;
739 if (bch2_get_page_reservation(c, inode, page, true)) {
741 ret = VM_FAULT_SIGBUS;
745 if (!PageDirty(page))
746 set_page_dirty(page);
747 wait_for_stable_page(page);
749 if (current->pagecache_lock != &mapping->add_lock)
750 pagecache_add_put(&mapping->add_lock);
751 sb_end_pagefault(inode->v.i_sb);
755 void bch2_invalidatepage(struct page *page, unsigned int offset,
758 EBUG_ON(!PageLocked(page));
759 EBUG_ON(PageWriteback(page));
761 if (offset || length < PAGE_SIZE)
764 bch2_clear_page_bits(page);
767 int bch2_releasepage(struct page *page, gfp_t gfp_mask)
769 EBUG_ON(!PageLocked(page));
770 EBUG_ON(PageWriteback(page));
775 bch2_clear_page_bits(page);
779 #ifdef CONFIG_MIGRATION
780 int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
781 struct page *page, enum migrate_mode mode)
785 ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
786 if (ret != MIGRATEPAGE_SUCCESS)
789 if (PagePrivate(page)) {
790 *page_state(newpage) = *page_state(page);
791 ClearPagePrivate(page);
794 migrate_page_copy(newpage, page);
795 return MIGRATEPAGE_SUCCESS;
799 /* readpages/writepages: */
801 static bool bio_can_add_page_contig(struct bio *bio, struct page *page)
803 sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT;
805 return bio->bi_vcnt < bio->bi_max_vecs &&
806 bio_end_sector(bio) == offset;
809 static void __bio_add_page(struct bio *bio, struct page *page)
811 bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) {
817 bio->bi_iter.bi_size += PAGE_SIZE;
820 static int bio_add_page_contig(struct bio *bio, struct page *page)
822 sector_t offset = (sector_t) page->index << PAGE_SECTOR_SHIFT;
824 EBUG_ON(!bio->bi_max_vecs);
827 bio->bi_iter.bi_sector = offset;
828 else if (!bio_can_add_page_contig(bio, page))
831 __bio_add_page(bio, page);
837 static void bch2_readpages_end_io(struct bio *bio)
842 bio_for_each_segment_all(bv, bio, i) {
843 struct page *page = bv->bv_page;
845 if (!bio->bi_status) {
846 SetPageUptodate(page);
848 ClearPageUptodate(page);
857 struct readpages_iter {
858 struct address_space *mapping;
859 struct list_head pages;
863 static inline void page_state_init_for_read(struct page *page)
865 struct bch_page_state *s = page_state(page);
872 static int readpage_add_page(struct readpages_iter *iter, struct page *page)
876 prefetchw(&page->flags);
877 page_state_init_for_read(page);
879 ret = add_to_page_cache_lru(page, iter->mapping,
880 page->index, GFP_NOFS);
885 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
887 while (iter->nr_pages) {
889 list_last_entry(&iter->pages, struct page, lru);
891 prefetchw(&page->flags);
892 list_del(&page->lru);
895 if (!readpage_add_page(iter, page))
902 #define for_each_readpage_page(_iter, _page) \
904 ((_page) = __readpage_next_page(&(_iter)));) \
906 static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
908 struct bvec_iter iter;
910 bool compressed = bch2_extent_is_compressed(k);
911 unsigned nr_ptrs = bch2_extent_nr_dirty_ptrs(k);
913 bio_for_each_segment(bv, bio, iter) {
914 struct bch_page_state *s = page_state(bv.bv_page);
916 /* sectors in @k from the start of this page: */
917 unsigned k_sectors = k.k->size - (iter.bi_sector - k.k->p.offset);
919 unsigned page_sectors = min(bv.bv_len >> 9, k_sectors);
921 s->nr_replicas = !s->sectors
923 : min_t(unsigned, s->nr_replicas, nr_ptrs);
925 BUG_ON(s->sectors + page_sectors > PAGE_SECTORS);
926 s->sectors += page_sectors;
928 s->compressed |= compressed;
932 static void readpage_bio_extend(struct readpages_iter *iter,
933 struct bio *bio, u64 offset,
940 while (bio_end_sector(bio) < offset &&
941 bio->bi_vcnt < bio->bi_max_vecs) {
942 page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT;
944 if (iter->nr_pages) {
945 page = list_last_entry(&iter->pages, struct page, lru);
946 if (page->index != page_offset)
949 list_del(&page->lru);
951 } else if (get_more) {
953 page = radix_tree_lookup(&iter->mapping->page_tree, page_offset);
956 if (page && !radix_tree_exceptional_entry(page))
959 page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
963 page->index = page_offset;
964 ClearPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page);
969 ret = readpage_add_page(iter, page);
973 __bio_add_page(bio, page);
977 SetPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page);
980 static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
981 struct bch_read_bio *rbio, u64 inum,
982 struct readpages_iter *readpages_iter)
984 struct bio *bio = &rbio->bio;
985 int flags = BCH_READ_RETRY_IF_STALE|
986 BCH_READ_MAY_PROMOTE;
989 struct extent_pick_ptr pick;
995 bch2_btree_iter_set_pos(iter, POS(inum, bio->bi_iter.bi_sector));
997 k = bch2_btree_iter_peek_slot(iter);
1001 int ret = bch2_btree_iter_unlock(iter);
1003 bcache_io_error(c, bio, "btree IO error %i", ret);
1008 bkey_reassemble(&tmp.k, k);
1009 bch2_btree_iter_unlock(iter);
1010 k = bkey_i_to_s_c(&tmp.k);
1012 bch2_extent_pick_ptr(c, k, NULL, &pick);
1013 if (IS_ERR(pick.ca)) {
1014 bcache_io_error(c, bio, "no device to read from");
1020 readpage_bio_extend(readpages_iter,
1023 (pick.crc.csum_type ||
1024 pick.crc.compression_type));
1026 bytes = (min_t(u64, k.k->p.offset, bio_end_sector(bio)) -
1027 bio->bi_iter.bi_sector) << 9;
1028 is_last = bytes == bio->bi_iter.bi_size;
1029 swap(bio->bi_iter.bi_size, bytes);
1031 if (bkey_extent_is_allocation(k.k))
1032 bch2_add_page_sectors(bio, k);
1036 bio_inc_remaining(&rbio->bio);
1037 flags |= BCH_READ_MUST_CLONE;
1038 trace_read_split(&rbio->bio);
1041 bch2_read_extent(c, rbio, bkey_s_c_to_extent(k),
1053 swap(bio->bi_iter.bi_size, bytes);
1054 bio_advance(bio, bytes);
1058 int bch2_readpages(struct file *file, struct address_space *mapping,
1059 struct list_head *pages, unsigned nr_pages)
1061 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1062 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1063 struct bch_io_opts opts = io_opts(c, inode);
1064 struct btree_iter iter;
1066 struct readpages_iter readpages_iter = {
1067 .mapping = mapping, .nr_pages = nr_pages
1070 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
1073 INIT_LIST_HEAD(&readpages_iter.pages);
1074 list_add(&readpages_iter.pages, pages);
1075 list_del_init(pages);
1077 if (current->pagecache_lock != &mapping->add_lock)
1078 pagecache_add_get(&mapping->add_lock);
1080 while ((page = readpage_iter_next(&readpages_iter))) {
1081 unsigned n = max_t(unsigned,
1082 min_t(unsigned, readpages_iter.nr_pages + 1,
1084 c->sb.encoded_extent_max >> PAGE_SECTOR_SHIFT);
1086 struct bch_read_bio *rbio =
1087 rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
1090 rbio->bio.bi_end_io = bch2_readpages_end_io;
1091 bio_add_page_contig(&rbio->bio, page);
1092 bchfs_read(c, &iter, rbio, inode->v.i_ino, &readpages_iter);
1095 if (current->pagecache_lock != &mapping->add_lock)
1096 pagecache_add_put(&mapping->add_lock);
1101 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
1102 u64 inum, struct page *page)
1104 struct btree_iter iter;
1106 page_state_init_for_read(page);
1108 bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
1109 bio_add_page_contig(&rbio->bio, page);
1111 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
1113 bchfs_read(c, &iter, rbio, inum, NULL);
1116 int bch2_readpage(struct file *file, struct page *page)
1118 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1119 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1120 struct bch_io_opts opts = io_opts(c, inode);
1121 struct bch_read_bio *rbio;
1123 rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
1124 rbio->bio.bi_end_io = bch2_readpages_end_io;
1126 __bchfs_readpage(c, rbio, inode->v.i_ino, page);
1130 static void bch2_read_single_page_end_io(struct bio *bio)
1132 complete(bio->bi_private);
1135 static int bch2_read_single_page(struct page *page,
1136 struct address_space *mapping)
1138 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1139 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1140 struct bch_read_bio *rbio;
1142 DECLARE_COMPLETION_ONSTACK(done);
1144 rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
1146 rbio->bio.bi_private = &done;
1147 rbio->bio.bi_end_io = bch2_read_single_page_end_io;
1149 __bchfs_readpage(c, rbio, inode->v.i_ino, page);
1150 wait_for_completion(&done);
1152 ret = blk_status_to_errno(rbio->bio.bi_status);
1153 bio_put(&rbio->bio);
1158 SetPageUptodate(page);
1164 struct bch_writepage_state {
1165 struct bch_writepage_io *io;
1166 struct bch_io_opts opts;
1169 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
1170 struct bch_inode_info *inode)
1172 return (struct bch_writepage_state) { .opts = io_opts(c, inode) };
1175 static void bch2_writepage_io_free(struct closure *cl)
1177 struct bch_writepage_io *io = container_of(cl,
1178 struct bch_writepage_io, cl);
1180 bio_put(&io->op.op.wbio.bio);
1183 static void bch2_writepage_io_done(struct closure *cl)
1185 struct bch_writepage_io *io = container_of(cl,
1186 struct bch_writepage_io, cl);
1187 struct bch_fs *c = io->op.op.c;
1188 struct bio *bio = &io->op.op.wbio.bio;
1189 struct bio_vec *bvec;
1192 atomic_sub(bio->bi_vcnt, &c->writeback_pages);
1193 wake_up(&c->writeback_wait);
1195 if (io->op.op.error) {
1196 bio_for_each_segment_all(bvec, bio, i)
1197 SetPageError(bvec->bv_page);
1198 set_bit(AS_EIO, &io->op.inode->v.i_mapping->flags);
1202 * racing with fallocate can cause us to add fewer sectors than
1203 * expected - but we shouldn't add more sectors than expected:
1205 BUG_ON(io->op.sectors_added > (s64) io->new_sectors);
1208 * (error (due to going RO) halfway through a page can screw that up
1211 BUG_ON(io->op.sectors_added - io->new_sectors >= (s64) PAGE_SECTORS);
1215 * PageWriteback is effectively our ref on the inode - fixup i_blocks
1216 * before calling end_page_writeback:
1218 if (io->op.sectors_added != io->new_sectors)
1219 i_sectors_acct(c, io->op.inode, NULL,
1220 io->op.sectors_added - (s64) io->new_sectors);
1222 bio_for_each_segment_all(bvec, bio, i)
1223 end_page_writeback(bvec->bv_page);
1225 closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
1228 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1230 struct bch_writepage_io *io = w->io;
1231 struct bio *bio = &io->op.op.wbio.bio;
1234 atomic_add(bio->bi_vcnt, &io->op.op.c->writeback_pages);
1236 closure_call(&io->op.op.cl, bch2_write, NULL, &io->cl);
1237 continue_at(&io->cl, bch2_writepage_io_done, NULL);
1241 * Get a bch_writepage_io and add @page to it - appending to an existing one if
1242 * possible, else allocating a new one:
1244 static void bch2_writepage_io_alloc(struct bch_fs *c,
1245 struct bch_writepage_state *w,
1246 struct bch_inode_info *inode,
1248 unsigned nr_replicas)
1250 struct bch_write_op *op;
1251 u64 offset = (u64) page->index << PAGE_SECTOR_SHIFT;
1253 w->io = container_of(bio_alloc_bioset(GFP_NOFS,
1255 &c->writepage_bioset),
1256 struct bch_writepage_io, op.op.wbio.bio);
1258 closure_init(&w->io->cl, NULL);
1259 w->io->new_sectors = 0;
1260 bch2_fswrite_op_init(&w->io->op, c, inode, w->opts, false);
1262 op->nr_replicas = nr_replicas;
1263 op->res.nr_replicas = nr_replicas;
1264 op->write_point = writepoint_hashed(inode->ei_last_dirtied);
1265 op->pos = POS(inode->v.i_ino, offset);
1266 op->wbio.bio.bi_iter.bi_sector = offset;
1269 static int __bch2_writepage(struct bch_fs *c, struct page *page,
1270 struct writeback_control *wbc,
1271 struct bch_writepage_state *w)
1273 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1274 struct bch_page_state new, old;
1276 loff_t i_size = i_size_read(&inode->v);
1277 pgoff_t end_index = i_size >> PAGE_SHIFT;
1279 EBUG_ON(!PageUptodate(page));
1281 /* Is the page fully inside i_size? */
1282 if (page->index < end_index)
1285 /* Is the page fully outside i_size? (truncate in progress) */
1286 offset = i_size & (PAGE_SIZE - 1);
1287 if (page->index > end_index || !offset) {
1293 * The page straddles i_size. It must be zeroed out on each and every
1294 * writepage invocation because it may be mmapped. "A file is mapped
1295 * in multiples of the page size. For a file that is not a multiple of
1296 * the page size, the remaining memory is zeroed when mapped, and
1297 * writes to that region are not written out to the file."
1299 zero_user_segment(page, offset, PAGE_SIZE);
1301 /* Before unlocking the page, transfer reservation to w->io: */
1302 old = page_state_cmpxchg(page_state(page), new, {
1303 EBUG_ON(!new.reserved &&
1304 (new.sectors != PAGE_SECTORS ||
1308 new.nr_replicas = new.reservation_replicas;
1311 new.compressed |= w->opts.compression != 0;
1313 new.sectors += new.dirty_sectors;
1314 new.dirty_sectors = 0;
1318 (w->io->op.op.res.nr_replicas != new.nr_replicas ||
1319 !bio_can_add_page_contig(&w->io->op.op.wbio.bio, page)))
1320 bch2_writepage_do_io(w);
1323 bch2_writepage_io_alloc(c, w, inode, page, new.nr_replicas);
1325 w->io->new_sectors += new.sectors - old.sectors;
1327 BUG_ON(inode != w->io->op.inode);
1328 BUG_ON(bio_add_page_contig(&w->io->op.op.wbio.bio, page));
1331 w->io->op.op.res.sectors += old.reservation_replicas * PAGE_SECTORS;
1333 /* while page is locked: */
1334 w->io->op.new_i_size = i_size;
1336 if (wbc->sync_mode == WB_SYNC_ALL)
1337 w->io->op.op.wbio.bio.bi_opf |= REQ_SYNC;
1339 BUG_ON(PageWriteback(page));
1340 set_page_writeback(page);
1346 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1348 struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1349 struct bch_writepage_state w =
1350 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1351 struct pagecache_iter iter;
1355 pgoff_t uninitialized_var(writeback_index);
1357 pgoff_t end; /* Inclusive */
1360 int range_whole = 0;
1363 if (wbc->range_cyclic) {
1364 writeback_index = mapping->writeback_index; /* prev offset */
1365 index = writeback_index;
1372 index = wbc->range_start >> PAGE_SHIFT;
1373 end = wbc->range_end >> PAGE_SHIFT;
1374 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1376 cycled = 1; /* ignore range_cyclic tests */
1378 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1379 tag = PAGECACHE_TAG_TOWRITE;
1381 tag = PAGECACHE_TAG_DIRTY;
1383 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1384 tag_pages_for_writeback(mapping, index, end);
1388 for_each_pagecache_tag(&iter, mapping, tag, index, end, page) {
1389 done_index = page->index;
1392 !bio_can_add_page_contig(&w.io->op.op.wbio.bio, page))
1393 bch2_writepage_do_io(&w);
1396 atomic_read(&c->writeback_pages) >=
1397 c->writeback_pages_max) {
1398 /* don't sleep with pages pinned: */
1399 pagecache_iter_release(&iter);
1401 __wait_event(c->writeback_wait,
1402 atomic_read(&c->writeback_pages) <
1403 c->writeback_pages_max);
1410 * Page truncated or invalidated. We can freely skip it
1411 * then, even for data integrity operations: the page
1412 * has disappeared concurrently, so there could be no
1413 * real expectation of this data interity operation
1414 * even if there is now a new, dirty page at the same
1415 * pagecache address.
1417 if (unlikely(page->mapping != mapping)) {
1423 if (!PageDirty(page)) {
1424 /* someone wrote it for us */
1425 goto continue_unlock;
1428 if (PageWriteback(page)) {
1429 if (wbc->sync_mode != WB_SYNC_NONE)
1430 wait_on_page_writeback(page);
1432 goto continue_unlock;
1435 BUG_ON(PageWriteback(page));
1436 if (!clear_page_dirty_for_io(page))
1437 goto continue_unlock;
1439 trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
1440 ret = __bch2_writepage(c, page, wbc, &w);
1441 if (unlikely(ret)) {
1442 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1447 * done_index is set past this page,
1448 * so media errors will not choke
1449 * background writeout for the entire
1450 * file. This has consequences for
1451 * range_cyclic semantics (ie. it may
1452 * not be suitable for data integrity
1455 done_index = page->index + 1;
1462 * We stop writing back only if we are not doing
1463 * integrity sync. In case of integrity sync we have to
1464 * keep going until we have written all the pages
1465 * we tagged for writeback prior to entering this loop.
1467 if (--wbc->nr_to_write <= 0 &&
1468 wbc->sync_mode == WB_SYNC_NONE) {
1473 pagecache_iter_release(&iter);
1476 bch2_writepage_do_io(&w);
1478 if (!cycled && !done) {
1481 * We hit the last page and there is more work to be done: wrap
1482 * back to the start of the file
1486 end = writeback_index - 1;
1489 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1490 mapping->writeback_index = done_index;
1495 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1497 struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1498 struct bch_writepage_state w =
1499 bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
1502 ret = __bch2_writepage(c, page, wbc, &w);
1504 bch2_writepage_do_io(&w);
1509 /* buffered writes: */
1511 int bch2_write_begin(struct file *file, struct address_space *mapping,
1512 loff_t pos, unsigned len, unsigned flags,
1513 struct page **pagep, void **fsdata)
1515 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1516 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1517 pgoff_t index = pos >> PAGE_SHIFT;
1518 unsigned offset = pos & (PAGE_SIZE - 1);
1522 BUG_ON(inode_unhashed(&inode->v));
1524 /* Not strictly necessary - same reason as mkwrite(): */
1525 pagecache_add_get(&mapping->add_lock);
1527 page = grab_cache_page_write_begin(mapping, index, flags);
1531 if (PageUptodate(page))
1534 /* If we're writing entire page, don't need to read it in first: */
1535 if (len == PAGE_SIZE)
1538 if (!offset && pos + len >= inode->v.i_size) {
1539 zero_user_segment(page, len, PAGE_SIZE);
1540 flush_dcache_page(page);
1544 if (index > inode->v.i_size >> PAGE_SHIFT) {
1545 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1546 flush_dcache_page(page);
1550 ret = bch2_read_single_page(page, mapping);
1554 ret = bch2_get_page_reservation(c, inode, page, true);
1556 if (!PageUptodate(page)) {
1558 * If the page hasn't been read in, we won't know if we
1559 * actually need a reservation - we don't actually need
1560 * to read here, we just need to check if the page is
1561 * fully backed by uncompressed data:
1576 pagecache_add_put(&mapping->add_lock);
1580 int bch2_write_end(struct file *file, struct address_space *mapping,
1581 loff_t pos, unsigned len, unsigned copied,
1582 struct page *page, void *fsdata)
1584 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1585 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1587 lockdep_assert_held(&inode->v.i_rwsem);
1589 if (unlikely(copied < len && !PageUptodate(page))) {
1591 * The page needs to be read in, but that would destroy
1592 * our partial write - simplest thing is to just force
1593 * userspace to redo the write:
1595 zero_user(page, 0, PAGE_SIZE);
1596 flush_dcache_page(page);
1600 if (pos + copied > inode->v.i_size)
1601 i_size_write(&inode->v, pos + copied);
1604 if (!PageUptodate(page))
1605 SetPageUptodate(page);
1606 if (!PageDirty(page))
1607 set_page_dirty(page);
1609 inode->ei_last_dirtied = (unsigned long) current;
1611 bch2_put_page_reservation(c, inode, page);
1616 pagecache_add_put(&mapping->add_lock);
1621 /* O_DIRECT reads */
1623 static void bch2_dio_read_complete(struct closure *cl)
1625 struct dio_read *dio = container_of(cl, struct dio_read, cl);
1627 dio->req->ki_complete(dio->req, dio->ret, 0);
1628 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1631 static void bch2_direct_IO_read_endio(struct bio *bio)
1633 struct dio_read *dio = bio->bi_private;
1636 dio->ret = blk_status_to_errno(bio->bi_status);
1638 closure_put(&dio->cl);
1641 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1643 bch2_direct_IO_read_endio(bio);
1644 bio_check_pages_dirty(bio); /* transfers ownership */
1647 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1649 struct file *file = req->ki_filp;
1650 struct bch_inode_info *inode = file_bch_inode(file);
1651 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1652 struct bch_io_opts opts = io_opts(c, inode);
1653 struct dio_read *dio;
1655 loff_t offset = req->ki_pos;
1656 bool sync = is_sync_kiocb(req);
1659 if ((offset|iter->count) & (block_bytes(c) - 1))
1662 ret = min_t(loff_t, iter->count,
1663 max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1664 iov_iter_truncate(iter, round_up(ret, block_bytes(c)));
1669 bio = bio_alloc_bioset(GFP_KERNEL,
1670 iov_iter_npages(iter, BIO_MAX_PAGES),
1671 &c->dio_read_bioset);
1673 bio->bi_end_io = bch2_direct_IO_read_endio;
1675 dio = container_of(bio, struct dio_read, rbio.bio);
1676 closure_init(&dio->cl, NULL);
1679 * this is a _really_ horrible hack just to avoid an atomic sub at the
1683 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1684 atomic_set(&dio->cl.remaining,
1685 CLOSURE_REMAINING_INITIALIZER -
1687 CLOSURE_DESTRUCTOR);
1689 atomic_set(&dio->cl.remaining,
1690 CLOSURE_REMAINING_INITIALIZER + 1);
1697 while (iter->count) {
1698 bio = bio_alloc_bioset(GFP_KERNEL,
1699 iov_iter_npages(iter, BIO_MAX_PAGES),
1701 bio->bi_end_io = bch2_direct_IO_read_split_endio;
1703 bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
1704 bio->bi_iter.bi_sector = offset >> 9;
1705 bio->bi_private = dio;
1707 ret = bio_iov_iter_get_pages(bio, iter);
1709 /* XXX: fault inject this path */
1710 bio->bi_status = BLK_STS_RESOURCE;
1715 offset += bio->bi_iter.bi_size;
1716 bio_set_pages_dirty(bio);
1719 closure_get(&dio->cl);
1721 bch2_read(c, rbio_init(bio, opts), inode->v.i_ino);
1725 closure_sync(&dio->cl);
1726 closure_debug_destroy(&dio->cl);
1728 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1731 return -EIOCBQUEUED;
1735 /* O_DIRECT writes */
1737 static void bch2_dio_write_loop_async(struct closure *);
1739 static long bch2_dio_write_loop(struct dio_write *dio)
1741 struct kiocb *req = dio->req;
1742 struct address_space *mapping = req->ki_filp->f_mapping;
1743 struct bch_inode_info *inode = dio->iop.inode;
1744 struct bio *bio = &dio->iop.op.wbio.bio;
1753 inode_dio_begin(&inode->v);
1754 __pagecache_block_get(&mapping->add_lock);
1756 /* Write and invalidate pagecache range that we're writing to: */
1757 ret = write_invalidate_inode_pages_range(mapping, req->ki_pos,
1758 req->ki_pos + iov_iter_count(&dio->iter) - 1);
1763 BUG_ON(current->pagecache_lock);
1764 current->pagecache_lock = &mapping->add_lock;
1765 if (current != dio->task)
1766 use_mm(dio->task->mm);
1768 ret = bio_iov_iter_get_pages(bio, &dio->iter);
1770 if (current != dio->task)
1771 unuse_mm(dio->task->mm);
1772 current->pagecache_lock = NULL;
1774 if (unlikely(ret < 0))
1777 dio->iop.op.pos = POS(inode->v.i_ino,
1778 (req->ki_pos >> 9) + dio->iop.op.written);
1780 task_io_account_write(bio->bi_iter.bi_size);
1782 closure_call(&dio->iop.op.cl, bch2_write, NULL, &dio->cl);
1784 if (!dio->sync && !dio->loop && dio->iter.count) {
1785 struct iovec *iov = dio->inline_vecs;
1787 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
1788 iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
1790 if (unlikely(!iov)) {
1791 dio->iop.op.error = -ENOMEM;
1795 dio->free_iov = true;
1798 memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
1799 dio->iter.iov = iov;
1805 continue_at_noreturn(&dio->cl,
1806 bch2_dio_write_loop_async, NULL);
1807 return -EIOCBQUEUED;
1810 closure_sync(&dio->cl);
1812 bio_for_each_segment_all(bv, bio, i)
1813 put_page(bv->bv_page);
1814 if (!dio->iter.count || dio->iop.op.error)
1819 ret = dio->iop.op.error ?: ((long) dio->iop.op.written << 9);
1821 __pagecache_block_put(&mapping->add_lock);
1822 bch2_disk_reservation_put(dio->iop.op.c, &dio->iop.op.res);
1823 bch2_quota_reservation_put(dio->iop.op.c, inode, &dio->quota_res);
1826 kfree(dio->iter.iov);
1828 closure_debug_destroy(&dio->cl);
1833 /* inode->i_dio_count is our ref on inode and thus bch_fs */
1834 inode_dio_end(&inode->v);
1837 req->ki_complete(req, ret, 0);
1843 static void bch2_dio_write_loop_async(struct closure *cl)
1845 struct dio_write *dio = container_of(cl, struct dio_write, cl);
1847 bch2_dio_write_loop(dio);
1850 static int bch2_direct_IO_write(struct kiocb *req,
1851 struct iov_iter *iter,
1854 struct file *file = req->ki_filp;
1855 struct bch_inode_info *inode = file_bch_inode(file);
1856 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1857 struct dio_write *dio;
1859 loff_t offset = req->ki_pos;
1862 lockdep_assert_held(&inode->v.i_rwsem);
1864 if (unlikely(!iter->count))
1867 if (unlikely((offset|iter->count) & (block_bytes(c) - 1)))
1870 bio = bio_alloc_bioset(GFP_KERNEL,
1871 iov_iter_npages(iter, BIO_MAX_PAGES),
1872 &c->dio_write_bioset);
1873 dio = container_of(bio, struct dio_write, iop.op.wbio.bio);
1874 closure_init(&dio->cl, NULL);
1876 dio->task = current;
1878 dio->sync = is_sync_kiocb(req) ||
1879 offset + iter->count > inode->v.i_size;
1880 dio->free_iov = false;
1881 dio->quota_res.sectors = 0;
1883 bch2_fswrite_op_init(&dio->iop, c, inode, io_opts(c, inode), true);
1884 dio->iop.op.write_point = writepoint_hashed((unsigned long) dio->task);
1885 dio->iop.op.flags |= BCH_WRITE_NOPUT_RESERVATION;
1887 if ((req->ki_flags & IOCB_DSYNC) &&
1888 !c->opts.journal_flush_disabled)
1889 dio->iop.op.flags |= BCH_WRITE_FLUSH;
1891 ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
1892 iter->count >> 9, true);
1896 ret = bch2_disk_reservation_get(c, &dio->iop.op.res, iter->count >> 9,
1897 c->opts.data_replicas, 0);
1898 if (unlikely(ret)) {
1899 if (bch2_check_range_allocated(c, POS(inode->v.i_ino,
1904 dio->iop.unalloc = true;
1907 dio->iop.op.nr_replicas = dio->iop.op.res.nr_replicas;
1909 return bch2_dio_write_loop(dio);
1911 bch2_disk_reservation_put(c, &dio->iop.op.res);
1912 bch2_quota_reservation_put(c, inode, &dio->quota_res);
1913 closure_debug_destroy(&dio->cl);
1918 ssize_t bch2_direct_IO(struct kiocb *req, struct iov_iter *iter)
1920 struct blk_plug plug;
1923 blk_start_plug(&plug);
1924 ret = iov_iter_rw(iter) == WRITE
1925 ? bch2_direct_IO_write(req, iter, false)
1926 : bch2_direct_IO_read(req, iter);
1927 blk_finish_plug(&plug);
1933 bch2_direct_write(struct kiocb *iocb, struct iov_iter *iter)
1935 return bch2_direct_IO_write(iocb, iter, true);
1938 static ssize_t __bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1940 struct file *file = iocb->ki_filp;
1941 struct bch_inode_info *inode = file_bch_inode(file);
1944 /* We can write back this queue in page reclaim */
1945 current->backing_dev_info = inode_to_bdi(&inode->v);
1946 ret = file_remove_privs(file);
1950 ret = file_update_time(file);
1954 ret = iocb->ki_flags & IOCB_DIRECT
1955 ? bch2_direct_write(iocb, from)
1956 : generic_perform_write(file, from, iocb->ki_pos);
1958 if (likely(ret > 0))
1959 iocb->ki_pos += ret;
1961 current->backing_dev_info = NULL;
1965 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1967 struct bch_inode_info *inode = file_bch_inode(iocb->ki_filp);
1968 bool direct = iocb->ki_flags & IOCB_DIRECT;
1971 inode_lock(&inode->v);
1972 ret = generic_write_checks(iocb, from);
1974 ret = __bch2_write_iter(iocb, from);
1975 inode_unlock(&inode->v);
1977 if (ret > 0 && !direct)
1978 ret = generic_write_sync(iocb, ret);
1985 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1987 struct bch_inode_info *inode = file_bch_inode(file);
1988 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1991 ret = filemap_write_and_wait_range(inode->v.i_mapping, start, end);
1995 if (c->opts.journal_flush_disabled)
1998 return bch2_journal_flush_seq(&c->journal, inode->ei_journal_seq);
2003 static int __bch2_truncate_page(struct bch_inode_info *inode,
2004 pgoff_t index, loff_t start, loff_t end)
2006 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2007 struct address_space *mapping = inode->v.i_mapping;
2008 unsigned start_offset = start & (PAGE_SIZE - 1);
2009 unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2013 /* Page boundary? Nothing to do */
2014 if (!((index == start >> PAGE_SHIFT && start_offset) ||
2015 (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
2019 if (index << PAGE_SHIFT >= inode->v.i_size)
2022 page = find_lock_page(mapping, index);
2024 struct btree_iter iter;
2025 struct bkey_s_c k = bkey_s_c_null;
2028 * XXX: we're doing two index lookups when we end up reading the
2031 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2033 index << PAGE_SECTOR_SHIFT), 0, k) {
2034 if (bkey_cmp(bkey_start_pos(k.k),
2036 (index + 1) << PAGE_SECTOR_SHIFT)) >= 0)
2039 if (k.k->type != KEY_TYPE_DISCARD &&
2040 k.k->type != BCH_RESERVATION) {
2041 bch2_btree_iter_unlock(&iter);
2045 bch2_btree_iter_unlock(&iter);
2048 page = find_or_create_page(mapping, index, GFP_KERNEL);
2049 if (unlikely(!page)) {
2055 if (!PageUptodate(page)) {
2056 ret = bch2_read_single_page(page, mapping);
2062 * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2064 * XXX: because we aren't currently tracking whether the page has actual
2065 * data in it (vs. just 0s, or only partially written) this wrong. ick.
2067 ret = bch2_get_page_reservation(c, inode, page, false);
2070 if (index == start >> PAGE_SHIFT &&
2071 index == end >> PAGE_SHIFT)
2072 zero_user_segment(page, start_offset, end_offset);
2073 else if (index == start >> PAGE_SHIFT)
2074 zero_user_segment(page, start_offset, PAGE_SIZE);
2075 else if (index == end >> PAGE_SHIFT)
2076 zero_user_segment(page, 0, end_offset);
2078 if (!PageDirty(page))
2079 set_page_dirty(page);
2087 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
2089 return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
2090 from, from + PAGE_SIZE);
2093 int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
2095 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2096 struct address_space *mapping = inode->v.i_mapping;
2097 bool shrink = iattr->ia_size <= inode->v.i_size;
2098 struct i_sectors_hook i_sectors_hook =
2099 i_sectors_hook_init(inode, BCH_INODE_I_SIZE_DIRTY);
2102 inode_dio_wait(&inode->v);
2103 pagecache_block_get(&mapping->add_lock);
2105 truncate_setsize(&inode->v, iattr->ia_size);
2107 /* sync appends.. */
2108 /* XXX what protects inode->i_size? */
2109 if (iattr->ia_size > inode->ei_inode.bi_size)
2110 ret = filemap_write_and_wait_range(mapping,
2111 inode->ei_inode.bi_size, S64_MAX);
2113 goto err_put_pagecache;
2115 i_sectors_hook.new_i_size = iattr->ia_size;
2117 ret = i_sectors_dirty_start(c, &i_sectors_hook);
2122 * There might be persistent reservations (from fallocate())
2123 * above i_size, which bch2_inode_truncate() will discard - we're
2124 * only supposed to discard them if we're doing a real truncate
2125 * here (new i_size < current i_size):
2128 ret = bch2_truncate_page(inode, iattr->ia_size);
2132 ret = bch2_inode_truncate(c, inode->v.i_ino,
2133 round_up(iattr->ia_size, PAGE_SIZE) >> 9,
2134 &i_sectors_hook.hook,
2135 &inode->ei_journal_seq);
2140 setattr_copy(&inode->v, iattr);
2141 inode->v.i_mtime = inode->v.i_ctime = current_time(&inode->v);
2144 * On error - in particular, bch2_truncate_page() error - don't clear
2145 * I_SIZE_DIRTY, as we've left data above i_size!:
2148 i_sectors_hook.flags &= ~BCH_INODE_I_SIZE_DIRTY;
2150 ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2152 pagecache_block_put(&mapping->add_lock);
2158 static long bch2_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2160 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2161 struct address_space *mapping = inode->v.i_mapping;
2162 u64 ino = inode->v.i_ino;
2163 u64 discard_start = round_up(offset, PAGE_SIZE) >> 9;
2164 u64 discard_end = round_down(offset + len, PAGE_SIZE) >> 9;
2167 inode_lock(&inode->v);
2168 inode_dio_wait(&inode->v);
2169 pagecache_block_get(&mapping->add_lock);
2171 ret = __bch2_truncate_page(inode,
2172 offset >> PAGE_SHIFT,
2173 offset, offset + len);
2177 if (offset >> PAGE_SHIFT !=
2178 (offset + len) >> PAGE_SHIFT) {
2179 ret = __bch2_truncate_page(inode,
2180 (offset + len) >> PAGE_SHIFT,
2181 offset, offset + len);
2186 truncate_pagecache_range(&inode->v, offset, offset + len - 1);
2188 if (discard_start < discard_end) {
2190 * We need to pass in a disk reservation here because we might
2191 * be splitting a compressed extent into two. This isn't a
2192 * problem with truncate because truncate will never split an
2193 * extent, only truncate it...
2195 struct disk_reservation disk_res =
2196 bch2_disk_reservation_init(c, 0);
2197 struct i_sectors_hook i_sectors_hook =
2198 i_sectors_hook_init(inode, 0);
2201 ret = i_sectors_dirty_start(c, &i_sectors_hook);
2205 ret = bch2_btree_delete_range(c,
2207 POS(ino, discard_start),
2208 POS(ino, discard_end),
2211 &i_sectors_hook.hook,
2212 &inode->ei_journal_seq);
2214 ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2217 pagecache_block_put(&mapping->add_lock);
2218 inode_unlock(&inode->v);
2223 static long bch2_fcollapse(struct bch_inode_info *inode,
2224 loff_t offset, loff_t len)
2226 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2227 struct address_space *mapping = inode->v.i_mapping;
2228 struct btree_iter src;
2229 struct btree_iter dst;
2230 BKEY_PADDED(k) copy;
2232 struct i_sectors_hook i_sectors_hook = i_sectors_hook_init(inode, 0);
2236 if ((offset | len) & (PAGE_SIZE - 1))
2239 bch2_btree_iter_init(&dst, c, BTREE_ID_EXTENTS,
2240 POS(inode->v.i_ino, offset >> 9),
2241 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2242 /* position will be set from dst iter's position: */
2243 bch2_btree_iter_init(&src, c, BTREE_ID_EXTENTS, POS_MIN,
2245 bch2_btree_iter_link(&src, &dst);
2248 * We need i_mutex to keep the page cache consistent with the extents
2249 * btree, and the btree consistent with i_size - we don't need outside
2250 * locking for the extents btree itself, because we're using linked
2253 inode_lock(&inode->v);
2254 inode_dio_wait(&inode->v);
2255 pagecache_block_get(&mapping->add_lock);
2258 if (offset + len >= inode->v.i_size)
2261 if (inode->v.i_size < len)
2264 new_size = inode->v.i_size - len;
2266 ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2270 ret = i_sectors_dirty_start(c, &i_sectors_hook);
2274 while (bkey_cmp(dst.pos,
2276 round_up(new_size, PAGE_SIZE) >> 9)) < 0) {
2277 struct disk_reservation disk_res;
2279 bch2_btree_iter_set_pos(&src,
2280 POS(dst.pos.inode, dst.pos.offset + (len >> 9)));
2282 k = bch2_btree_iter_peek_slot(&src);
2283 if ((ret = btree_iter_err(k)))
2284 goto btree_iter_err;
2286 bkey_reassemble(©.k, k);
2288 if (bkey_deleted(©.k.k))
2289 copy.k.k.type = KEY_TYPE_DISCARD;
2291 bch2_cut_front(src.pos, ©.k);
2292 copy.k.k.p.offset -= len >> 9;
2294 BUG_ON(bkey_cmp(dst.pos, bkey_start_pos(©.k.k)));
2296 ret = bch2_disk_reservation_get(c, &disk_res, copy.k.k.size,
2297 bch2_extent_nr_dirty_ptrs(bkey_i_to_s_c(©.k)),
2298 BCH_DISK_RESERVATION_NOFAIL);
2301 ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
2302 &inode->ei_journal_seq,
2303 BTREE_INSERT_ATOMIC|
2304 BTREE_INSERT_NOFAIL,
2305 BTREE_INSERT_ENTRY(&dst, ©.k));
2306 bch2_disk_reservation_put(c, &disk_res);
2311 goto err_put_sectors_dirty;
2313 * XXX: if we error here we've left data with multiple
2314 * pointers... which isn't a _super_ serious problem...
2317 bch2_btree_iter_cond_resched(&src);
2320 bch2_btree_iter_unlock(&src);
2321 bch2_btree_iter_unlock(&dst);
2323 ret = bch2_inode_truncate(c, inode->v.i_ino,
2324 round_up(new_size, PAGE_SIZE) >> 9,
2325 &i_sectors_hook.hook,
2326 &inode->ei_journal_seq);
2328 goto err_put_sectors_dirty;
2330 i_size_write(&inode->v, new_size);
2331 i_sectors_hook.new_i_size = new_size;
2332 err_put_sectors_dirty:
2333 ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2335 pagecache_block_put(&mapping->add_lock);
2336 inode_unlock(&inode->v);
2338 bch2_btree_iter_unlock(&src);
2339 bch2_btree_iter_unlock(&dst);
2343 static long bch2_fallocate(struct bch_inode_info *inode, int mode,
2344 loff_t offset, loff_t len)
2346 struct address_space *mapping = inode->v.i_mapping;
2347 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2348 struct i_sectors_hook i_sectors_hook = i_sectors_hook_init(inode, 0);
2349 struct btree_iter iter;
2350 struct bpos end_pos;
2351 loff_t block_start, block_end;
2352 loff_t end = offset + len;
2354 unsigned replicas = READ_ONCE(c->opts.data_replicas);
2357 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
2358 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2360 inode_lock(&inode->v);
2361 inode_dio_wait(&inode->v);
2362 pagecache_block_get(&mapping->add_lock);
2364 if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
2365 ret = inode_newsize_ok(&inode->v, end);
2370 if (mode & FALLOC_FL_ZERO_RANGE) {
2371 ret = __bch2_truncate_page(inode,
2372 offset >> PAGE_SHIFT,
2376 offset >> PAGE_SHIFT != end >> PAGE_SHIFT)
2377 ret = __bch2_truncate_page(inode,
2384 truncate_pagecache_range(&inode->v, offset, end - 1);
2386 block_start = round_up(offset, PAGE_SIZE);
2387 block_end = round_down(end, PAGE_SIZE);
2389 block_start = round_down(offset, PAGE_SIZE);
2390 block_end = round_up(end, PAGE_SIZE);
2393 bch2_btree_iter_set_pos(&iter, POS(inode->v.i_ino, block_start >> 9));
2394 end_pos = POS(inode->v.i_ino, block_end >> 9);
2396 ret = i_sectors_dirty_start(c, &i_sectors_hook);
2400 while (bkey_cmp(iter.pos, end_pos) < 0) {
2401 struct disk_reservation disk_res = { 0 };
2402 struct bkey_i_reservation reservation;
2405 k = bch2_btree_iter_peek_slot(&iter);
2406 if ((ret = btree_iter_err(k)))
2407 goto btree_iter_err;
2409 /* already reserved */
2410 if (k.k->type == BCH_RESERVATION &&
2411 bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2412 bch2_btree_iter_next_slot(&iter);
2416 if (bkey_extent_is_data(k.k)) {
2417 if (!(mode & FALLOC_FL_ZERO_RANGE)) {
2418 bch2_btree_iter_next_slot(&iter);
2423 bkey_reservation_init(&reservation.k_i);
2424 reservation.k.type = BCH_RESERVATION;
2425 reservation.k.p = k.k->p;
2426 reservation.k.size = k.k->size;
2428 bch2_cut_front(iter.pos, &reservation.k_i);
2429 bch2_cut_back(end_pos, &reservation.k);
2431 sectors = reservation.k.size;
2432 reservation.v.nr_replicas = bch2_extent_nr_dirty_ptrs(k);
2434 if (!bkey_extent_is_allocation(k.k)) {
2435 ret = bch2_quota_reservation_add(c, inode,
2436 &i_sectors_hook.quota_res,
2439 goto err_put_sectors_dirty;
2442 if (reservation.v.nr_replicas < replicas ||
2443 bch2_extent_is_compressed(k)) {
2444 ret = bch2_disk_reservation_get(c, &disk_res, sectors,
2447 goto err_put_sectors_dirty;
2449 reservation.v.nr_replicas = disk_res.nr_replicas;
2452 ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
2453 &inode->ei_journal_seq,
2454 BTREE_INSERT_ATOMIC|
2455 BTREE_INSERT_NOFAIL,
2456 BTREE_INSERT_ENTRY(&iter, &reservation.k_i));
2457 bch2_disk_reservation_put(c, &disk_res);
2459 if (ret < 0 && ret != -EINTR)
2460 goto err_put_sectors_dirty;
2463 bch2_btree_iter_unlock(&iter);
2465 ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2467 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2468 end > inode->v.i_size) {
2469 i_size_write(&inode->v, end);
2471 mutex_lock(&inode->ei_update_lock);
2472 ret = bch2_write_inode_size(c, inode, inode->v.i_size);
2473 mutex_unlock(&inode->ei_update_lock);
2477 if ((mode & FALLOC_FL_KEEP_SIZE) &&
2478 (mode & FALLOC_FL_ZERO_RANGE) &&
2479 inode->ei_inode.bi_size != inode->v.i_size) {
2480 /* sync appends.. */
2481 ret = filemap_write_and_wait_range(mapping,
2482 inode->ei_inode.bi_size, S64_MAX);
2486 if (inode->ei_inode.bi_size != inode->v.i_size) {
2487 mutex_lock(&inode->ei_update_lock);
2488 ret = bch2_write_inode_size(c, inode, inode->v.i_size);
2489 mutex_unlock(&inode->ei_update_lock);
2493 pagecache_block_put(&mapping->add_lock);
2494 inode_unlock(&inode->v);
2497 err_put_sectors_dirty:
2498 ret = i_sectors_dirty_finish(c, &i_sectors_hook) ?: ret;
2500 bch2_btree_iter_unlock(&iter);
2501 pagecache_block_put(&mapping->add_lock);
2502 inode_unlock(&inode->v);
2506 long bch2_fallocate_dispatch(struct file *file, int mode,
2507 loff_t offset, loff_t len)
2509 struct bch_inode_info *inode = file_bch_inode(file);
2511 if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
2512 return bch2_fallocate(inode, mode, offset, len);
2514 if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
2515 return bch2_fpunch(inode, offset, len);
2517 if (mode == FALLOC_FL_COLLAPSE_RANGE)
2518 return bch2_fcollapse(inode, offset, len);
2525 static bool page_is_data(struct page *page)
2527 /* XXX: should only have to check PageDirty */
2528 return PagePrivate(page) &&
2529 (page_state(page)->sectors ||
2530 page_state(page)->dirty_sectors);
2533 static loff_t bch2_next_pagecache_data(struct inode *vinode,
2534 loff_t start_offset,
2537 struct address_space *mapping = vinode->i_mapping;
2541 for (index = start_offset >> PAGE_SHIFT;
2542 index < end_offset >> PAGE_SHIFT;
2544 if (find_get_pages(mapping, &index, 1, &page)) {
2547 if (page_is_data(page))
2551 ((loff_t) index) << PAGE_SHIFT));
2562 static loff_t bch2_seek_data(struct file *file, u64 offset)
2564 struct bch_inode_info *inode = file_bch_inode(file);
2565 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2566 struct btree_iter iter;
2568 u64 isize, next_data = MAX_LFS_FILESIZE;
2571 isize = i_size_read(&inode->v);
2572 if (offset >= isize)
2575 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2576 POS(inode->v.i_ino, offset >> 9), 0, k) {
2577 if (k.k->p.inode != inode->v.i_ino) {
2579 } else if (bkey_extent_is_data(k.k)) {
2580 next_data = max(offset, bkey_start_offset(k.k) << 9);
2582 } else if (k.k->p.offset >> 9 > isize)
2586 ret = bch2_btree_iter_unlock(&iter);
2590 if (next_data > offset)
2591 next_data = bch2_next_pagecache_data(&inode->v,
2594 if (next_data > isize)
2597 return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
2600 static bool page_slot_is_data(struct address_space *mapping, pgoff_t index)
2605 page = find_lock_entry(mapping, index);
2606 if (!page || radix_tree_exception(page))
2609 ret = page_is_data(page);
2615 static loff_t bch2_next_pagecache_hole(struct inode *vinode,
2616 loff_t start_offset,
2619 struct address_space *mapping = vinode->i_mapping;
2622 for (index = start_offset >> PAGE_SHIFT;
2623 index < end_offset >> PAGE_SHIFT;
2625 if (!page_slot_is_data(mapping, index))
2626 end_offset = max(start_offset,
2627 ((loff_t) index) << PAGE_SHIFT);
2632 static loff_t bch2_seek_hole(struct file *file, u64 offset)
2634 struct bch_inode_info *inode = file_bch_inode(file);
2635 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2636 struct btree_iter iter;
2638 u64 isize, next_hole = MAX_LFS_FILESIZE;
2641 isize = i_size_read(&inode->v);
2642 if (offset >= isize)
2645 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
2646 POS(inode->v.i_ino, offset >> 9),
2647 BTREE_ITER_SLOTS, k) {
2648 if (k.k->p.inode != inode->v.i_ino) {
2649 next_hole = bch2_next_pagecache_hole(&inode->v,
2650 offset, MAX_LFS_FILESIZE);
2652 } else if (!bkey_extent_is_data(k.k)) {
2653 next_hole = bch2_next_pagecache_hole(&inode->v,
2654 max(offset, bkey_start_offset(k.k) << 9),
2655 k.k->p.offset << 9);
2657 if (next_hole < k.k->p.offset << 9)
2660 offset = max(offset, bkey_start_offset(k.k) << 9);
2664 ret = bch2_btree_iter_unlock(&iter);
2668 if (next_hole > isize)
2671 return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
2674 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
2680 return generic_file_llseek(file, offset, whence);
2682 return bch2_seek_data(file, offset);
2684 return bch2_seek_hole(file, offset);
2690 void bch2_fs_fsio_exit(struct bch_fs *c)
2692 bioset_exit(&c->dio_write_bioset);
2693 bioset_exit(&c->dio_read_bioset);
2694 bioset_exit(&c->writepage_bioset);
2697 int bch2_fs_fsio_init(struct bch_fs *c)
2701 pr_verbose_init(c->opts, "");
2703 if (bioset_init(&c->writepage_bioset,
2704 4, offsetof(struct bch_writepage_io, op.op.wbio.bio),
2705 BIOSET_NEED_BVECS) ||
2706 bioset_init(&c->dio_read_bioset,
2707 4, offsetof(struct dio_read, rbio.bio),
2708 BIOSET_NEED_BVECS) ||
2709 bioset_init(&c->dio_write_bioset,
2710 4, offsetof(struct dio_write, iop.op.wbio.bio),
2714 pr_verbose_init(c->opts, "ret %i", ret);
2718 #endif /* NO_BCACHEFS_FS */